query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns a list of linearspaced values with min_value and max_value as the boundaries with the specified number (length) of entries. If roundToIntegers is True, each value will be rounded to the nearest integer.
def linear_space(min_value=0, max_value=1.0, length=10, round_op=None): out = [] value = min_value length = max(2, length) delta = (float(max_value) - float(min_value)) / float(length - 1.0) for index in range(length - 1): out.append(round_op(value) if round_op else value) value += delta out.append(round_op(max_value) if round_op else max_value) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def math_map_list(values, toMin=0, toMax=1):\n minValue = min(values)\n maxValue = max(values)\n delta = maxValue - minValue\n deltaTarget = toMax - toMin\n newValues = [toMin +(value-minValue)*deltaTarget/delta for value in values]\n return newValues", "def get_numeric_intervals(self):\n intervals = []\n for num in self.numeric_col:\n\n interval_difference = self.max[num] - self.min[num]\n interval_no = min(10, interval_difference)\n\n step = math.floor(interval_difference / interval_no) + 1\n interval = list(range(math.floor(self.min[num]), math.floor(self.max[num]), step))\n interval.append(9999999)\n intervals.append(interval)\n\n return intervals", "def list_random_sample_numbers(min: int, max: int, length: int) -> List:\r\n result = random.sample(range(min, max), length)\r\n return result", "def scale_between(minval, maxval, numStops):\n\n scale = []\n\n if numStops < 2:\n return [minval, maxval]\n elif maxval < minval:\n raise ValueError()\n else:\n domain = maxval - minval\n interval = float(domain) / float(numStops)\n for i in range(numStops):\n scale.append(round(minval + interval * i, 2))\n return scale", "def _bucket_boundaries(self, max_length, min_length=8, length_bucket_step=1.1):\n assert min_length <= max_length\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def rangeLin(min, max, n):\n\n return np.arange( min, max, (max-min)/n )", "def get_range(min, max, intervals, log):\n if not log:\n min = float(min)\n max = float(max)\n difference = max-min\n step_size = difference/intervals\n output = [min + i*step_size for i in range(intervals+1)]\n return output\n else:\n from math import log10 as log\n log_min = log(min)\n log_max = log(max)\n log_difference = log_max - log_min\n step_size = log_difference/intervals\n output = [pow(10, log_min + i*step_size) for i in range(intervals+1)]\n return output", "def dist_to_list(func, length, min=None, max=None):\n from scipy import inf\n from scipy.integrate import quad\n if min is None:\n min = -inf\n if max is None:\n max = inf\n total = quad(func, min, max)[0]\n step = float(total) / length\n return [intsolve(func, (0.5 + i) * step, min, max) for i in range(length)]", "def midrange(lo, hi, mid=0, scale=1.0):\n return [min(mid, (mid + lo) / (1.0 + scale)),\n max(mid, (mid + hi) / (1.0 + scale))]", "def get_bins(val: List[float]) -> List[float]:\n r_min = np.min(val)\n r_max = np.max(val)\n min_bins = 2\n max_bins = 50\n # Calculate bin width using either Freedman-Diaconis or Sturges estimator\n bin_edges = np.histogram_bin_edges(val, bins=\"auto\")\n if len(bin_edges) < min_bins:\n return list(np.linspace(start=r_min, stop=r_max, num=min_bins))\n elif len(bin_edges) <= max_bins:\n return list(bin_edges)\n # Clamp to max_bins by estimating a good bin range to be more robust to outliers\n q75, q25 = np.percentile(val, [75, 25])\n iqr = q75 - q25\n width = 2 * iqr / max_bins\n start = max((q75 + q25) / 2 - iqr, r_min)\n stop = min(start + max_bins * width, r_max)\n # Take the minimum of range and 2x IQR to account for outliers\n edges = list(np.linspace(start=start, stop=stop, num=max_bins))\n prefix = [r_min] if start > r_min else []\n suffix = [r_max] if stop < r_max else []\n return prefix + edges + suffix", "def linspace(start, stop, n, istart=True, istop=True):\r\n n = n-1\r\n arr = [start + ((stop-start)/n) * i for i in range(n+1)]\r\n return arr", "def val_split(a: Iterable, partitions: int, range_max: int, range_min: int = 0,\n size: bool = True) -> List[np.ndarray]:\n if size:\n n = int(np.ceil(range_max / partitions))\n splits = partitions\n else:\n n = partitions\n splits = (range_max - range_min) // partitions\n\n it = iter(a)\n it_current = next(it)\n ret_val = [[] for _ in range(n)]\n\n try:\n if isinstance(it_current, (tuple, list, np.ndarray)):\n it_current, it_value = it_current\n for i in range(n):\n for j in range(splits):\n split_current = (partitions + 1) * i + j\n while it_current <= split_current:\n ret_val[i].append([it_current, it_value])\n it_current, it_value = next(it)\n continue\n return list(map(np.array, ret_val))\n for i in range(n):\n for j in range(splits):\n split_current = (partitions + 1) * i + j\n while it_current <= split_current:\n ret_val[i].append(it_current)\n it_current = next(it)\n continue\n except StopIteration:\n return list(map(np.array, ret_val))", "def list_random_numbers(min: int, max: int, length: int) -> List:\r\n # Many instructions + test condition we can use random.sample()\r\n # See next function 'list_random_sample_numbers()'\r\n result = []\r\n while len(result) < length:\r\n n = randint(min, max)\r\n if n not in result:\r\n result.append(n)\r\n return result", "def get_bin_lims(n, max_value):\n return np.linspace(max_value // n, max_value, n, dtype=int)", "def genRandomIntListWithinRange(size, minLim, maxLim):\n\tvalues = set()\n\tfor i in range(size):\n\t\tval = randint(minLim, maxLim)\n\t\twhile val not in values:\n\t\t\tvalues.add(val)\n\treturn list(values)", "def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result", "def linrange(start, stop=None, step=1):\n if stop is None:\n stop = start\n start = 0\n n = int(round((stop-start) / step))\n return linspace(start, stop, n+1)", "def get_range(value):\n return list(range(value))", "def get_range( value ):\n return list(range(value))", "def scale_range(data, minTo, maxTo):\n minFrom = np.min(data)\n maxFrom = np.max(data)\n \n scaled_data = []\n \n for point in data:\n new_point = minTo + (maxTo - minTo) * ((point - minFrom)/(maxFrom - minFrom))\n scaled_data.append(new_point)\n \n return scaled_data", "def get_range(lst):\n return float(max(lst)) - float(min(lst))", "def get_rangelist(start, end, count):\n if start is not None and end is not None:\n if count != 0 and not (start == 0 and count < end):\n start = int(start)\n end = int(end)\n cnt = end - start\n rangelist = []\n div = int(start) / count + 1\n multiple = round(div, 0)\n start_range = int(count * multiple)\n n = 1\n for itr in range(0, start_range + count, (end - start)):\n if itr < count:\n rangelist.append([itr, itr + cnt, n])\n n += 1\n return rangelist\n return []", "def define_intervals(self):\n i = 5 # a step of increment\n interval_sum = self.min_step\n interval_list = [self.min_step]\n while interval_sum < self.max_step:\n interval_sum += i\n interval_list.append(interval_sum)\n # interval_list.append(self.max_step)\n # print(\"Intervals\", interval_list)\n return interval_list", "def getSpansFromIntegers(ints: Iterable[int]) -> List[Tuple[int, int, int]]:\n ints = sorted(set(ints))\n if len(ints) <= 2:\n return [(x, x, 1) for x in ints]\n\n stride = min(\n (y - x if y - x == z - y else math.inf)\n for x, y, z in zip(ints[:-2], ints[1:-1], ints[2:])\n )\n if not (stride < math.inf):\n # There are not three consecutive elements at even intervals\n return [(x, x, 1) for x in ints]\n\n # guard both sides with sentinels\n ints = [math.nan, math.nan] + ints + [math.nan, math.nan]\n\n spans = []\n for is_evenintervals, group in itertools.groupby(\n zip(ints[:-2], ints[1:-1], ints[2:]),\n key=lambda xyz: xyz[1] - xyz[0] == xyz[2] - xyz[1] == stride\n ):\n group = list(group)\n if is_evenintervals:\n first = group[0][0]\n last = group[-1][-1]\n spans.append((first, last, stride))\n else:\n spans += getSpansFromIntegers(xyz[2] for xyz in group[:-2])\n\n return spans", "def grid(gmin, gmax, gstep):\n n_vals = int((gmax - gmin)/gstep + 1)\n my_grid = linspace(gmin, gmax, n_vals)\n return my_grid", "def ranges(int_list):\n begin = 0\n end = 0\n\n ranges = []\n\n for i in int_list:\n # At the start of iteration set the value of\n # `begin` and `end` to equal the first element\n if begin == 0:\n begin = i\n end = i\n # Set the current element as the value of `end`\n # as long as the array is in sequence\n elif i-1 == end:\n end = i\n # Reset flags to current element when iterating through\n # multiple integers that are of broken sequence\n elif begin == end:\n begin = i\n end = i\n else:\n # Sequence of array has been broken, append current range\n # to `ranges` and set the value of `begin and `end` flags to\n # equal the current element\n ranges.append(\"{0}->{1}\".format(begin, end))\n begin = i\n end = i\n # Grab the last range from the array\n if begin != end:\n ranges.append(\"{0}->{1}\".format(begin, end))\n\n return ranges", "def get_abnormal_price_values(ls_ls_prices, lower_bound, upper_bound):\n ls_abnormal_prices = []\n for indiv_ind, ls_prices in enumerate(ls_ls_prices):\n day_ind = 0\n while day_ind < len(ls_prices):\n if (ls_prices[day_ind] < lower_bound) or (ls_prices[day_ind] > upper_bound):\n relative_day = 0\n ls_day_inds = []\n while (day_ind + relative_day < len(ls_prices)) and\\\n (ls_prices[day_ind] == ls_prices[day_ind + relative_day]):\n ls_day_inds.append(day_ind + relative_day)\n relative_day += 1\n ls_abnormal_prices.append((indiv_ind, ls_prices[day_ind], ls_day_inds))\n day_ind += relative_day\n else:\n day_ind += 1\n return ls_abnormal_prices", "def _split_chunk_bounds(\n start: int, stop: int, multiple: int,\n) -> List[Tuple[int, int]]:\n # pylint: disable=g-doc-args\n # pylint: disable=g-doc-return-or-yield\n if multiple == -1:\n return [(start, stop)]\n assert start >= 0 and stop > start and multiple > 0, (start, stop, multiple)\n first_multiple = (start // multiple + 1) * multiple\n breaks = list(range(first_multiple, stop, multiple))\n return list(zip([start] + breaks, breaks + [stop]))", "def get_range(start, stop):\n \n nums = []\n\n for num in range(start, stop):\n nums.append(num)\n\n return nums" ]
[ "0.6354057", "0.6273124", "0.603573", "0.60351825", "0.6027625", "0.59930015", "0.59687275", "0.5939713", "0.5936503", "0.5912387", "0.581435", "0.5784891", "0.5772762", "0.5737696", "0.5734971", "0.570616", "0.5631111", "0.5624516", "0.55972034", "0.55809015", "0.5568487", "0.55164886", "0.5466365", "0.5456629", "0.54488975", "0.54435134", "0.5439931", "0.5425948", "0.54232234", "0.53987104" ]
0.72541976
0
trying to delete message when authorized
def test_authorized_delete(self): user = User.query.filter(User.username == "testuser").first() message = Message(text="text", user_id=user.id, id=10000) self.client.post("/login", data={"username": "testuser", "password": "testuser"}) db.session.add(message) db.session.commit() resp_delete = self.client.post("/messages/10000/delete") self.assertEqual(resp_delete.status_code, 302) deleted_message = Message.query.get(message.id) self.assertIsNone(deleted_message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self):\n for i, message in enumerate(self.owner.messages):\n if message == self.body:\n del self.owner.messages[i]\n break", "async def delete(self):\n return await self.set_message(text='')", "def delete_message(self, message):\r\n return self.connection.delete_message(self, message)", "def test_delete_message_logged_out(self):\n\n with self.client as c:\n resp = c.post(f\"/messages/{self.test_msg_id}/delete\")\n\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp.location, 'http://localhost/')\n\n test_msg = Message.query.get(self.test_msg_id)\n\n self.assertEqual(Message.query.one(), test_msg)", "def delete_profile_msg(message_id):\n if 'username' in session:\n \n user = mongo.db.user.find_one_or_404({'username': session['username']})\n message = mongo.db.profile_msgs.find_one_or_404({'_id': ObjectId(message_id)})\n \n if request.method == 'POST':\n if user['username'] == message['from_user'] or user['username'] == message['to_user']:\n messages = mongo.db.profile_msgs\n messages.find_one_and_delete({'_id': ObjectId(message_id) })\n flash(f'{user.username}, your conversation has been deleted with {message.to_user}. ', 'success')\n return redirect(url_for('dashboard'))\n elif user['username'] == message['to_user']:\n messages = mongo.db.profile_msgs\n messages.find_one_and_delete({'_id': ObjectId(message_id) })\n flash('Your conversation has been deleted. ', 'success')\n return redirect(url_for('dashboard'))\n \n flash('You need to be logged in delete messages.', 'info')\n return redirect(url_for('login'))", "def test_unauthorized_delete_when_logged_out(self):\n\n u = User(username=\"other_user\",\n email=\"[email protected]\",\n password=\"testuser\",\n id=10000)\n\n message = Message(text=\"text\",\n user_id=10000,\n id=10000)\n\n db.session.add(u)\n db.session.add(message)\n db.session.commit()\n\n resp_delete = self.client.post(\"/messages/10000/delete\")\n resp_delete_redirected = self.client.post(\"/messages/10000/delete\",\n follow_redirects=True)\n\n self.assertEqual(resp_delete.status_code, 302)\n self.assertIn(b\"Access unauthorized.\", resp_delete_redirected.data)", "def delete(data):\n message_id = int(data['message_id'])\n message = Message.query.filter_by(id=message_id, username=session['username']).first()\n if message:\n db.session.delete(message)\n db.session.commit()\n emit('delete', {'message_id': message_id}, room=data['room'])", "def message_delete(self):\r\n SlTrace.lg(\"Destroying timed message\", \"message\")\r\n if self.cur_message is not None:\r\n SlTrace.lg(\"Found message to destroy\", \"message\")\r\n self.cur_message.destroy()\r\n self.cur_message = None", "def _delete_draft_message(draft):\n if draft is not None:\n draft.key.delete()\n return HttpTextResponse('OK')", "def test_delete_message(self):\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.test_user_id\n\n resp = c.post(f\"/messages/{self.test_msg_id}/delete\")\n\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp.location, f'http://localhost/users/{sess[CURR_USER_KEY]}')\n\n self.assertRaises(NoResultFound, Message.query.one)", "def delete_message(self, ts):\n return self(\"chat.delete\", ts=ts)", "def message_remove(token, message_id):\n verify_token(token)\n message_obj = Message.query.filter_by(id=message_id).first()\n if not message_obj:\n raise InputError(\"Message doesn't exist\")\n calling_user = get_user_from_token(token)\n if calling_user.id != message_obj.user_id:\n raise AccessError(\"You can't modify someone else's message\")\n\n # Removes message and saves changes\n db.session.delete(message_obj)\n db.session.commit()\n return {\n \"old_message\": message_obj.message\n }", "async def method_delete(\n self,\n request: Request,\n body: dict,\n session: DBSession,\n message_id: int,\n token: dict,\n *args,\n **kwargs,\n ) -> BaseHTTPResponse:\n try:\n db_message = delete_message(session, message_id=message_id)\n except DBMessageNotExists as e:\n raise NotFound(f\"Message {message_id} not found\") from e\n\n if token[\"sub\"] not in (db_message.sender_id, db_message.recipient_id):\n raise Forbidden(\"user is not recipient or sender\")\n\n try:\n session.commit_session()\n except (DBDataException, DBIntegrityException) as e:\n raise SanicDBException(str(e))\n\n return await self.make_response_json(status=204)", "def delete_project_msg(message_id):\n if 'username' in session:\n \n user = mongo.db.user.find_one_or_404({'username': session['username']})\n message = mongo.db.project_msgs.find_one_or_404({'_id': ObjectId(message_id)})\n \n if request.method == 'POST':\n if user['username'] == message['from_user']:\n messages = mongo.db.project_msgs\n messages.find_one_and_delete({'_id': ObjectId(message_id) })\n flash('Your conversation has been deleted. ', 'success')\n return redirect(url_for('dashboard'))\n elif user['username'] == message['to_user']:\n messages = mongo.db.project_msgs\n messages.find_one_and_delete({'_id': ObjectId(message_id) })\n flash('Your conversation has been deleted.', 'success')\n return redirect(url_for('dashboard'))\n \n flash('You need to be logged in delete messages.', 'info')\n return redirect(url_for('login'))", "def test_unauthorized_delete_when_not_author(self):\n\n # with self.client as c:\n # with c.session_transaction() as sess:\n # sess[CURR_USER_KEY] = self.testuser.id\n\n logged_in_user = self.client.post(\"/login\", data={\"username\":\"testuser\", \"password\":\"testuser\"})\n\n u = User(username=\"other_user\",\n email=\"[email protected]\",\n password=\"testuser\",\n id=10000)\n\n message = Message(text=\"text\",\n user_id=10000,\n id=10000)\n\n db.session.add(u)\n db.session.add(message)\n db.session.commit()\n\n resp_delete = self.client.post(\"/messages/10000/delete\")\n resp_delete_redirected = self.client.post(\"/messages/10000/delete\",\n follow_redirects=True)\n\n self.assertEqual(resp_delete.status_code, 302)\n self.assertIn(b\"Access unauthorized.\", resp_delete_redirected.data)", "async def on_message_delete(self, message):\n if message.author.bot:\n return\n flags = await self.parse(message)\n if flags:\n await self.detected(message, flags)", "def delete_messages(self):\n other_user_email = request.args.get('other_user_email')\n if not other_user_email:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"other_user_email\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"other_user_email\", 400\n email_token = auth.current_user()[0]\n self.friend_database.delete_conversation(email_token, other_user_email)\n return messages.SUCCESS_JSON, 200", "def delete(request, message_id):\n return HttpResponse(\"error\")", "async def close_message(client, event):\n if event.user_permissions.can_manage_messages:\n await client.interaction_component_acknowledge(event)\n await client.interaction_response_message_delete(event)", "def delete():\n name = request.form['name']\n message = request.form['message']\n\n try:\n newcurs = g.conn.execute(\"\"\"DELETE FROM record\n WHERE record.user_name = %s AND record.message = %s;\"\"\", name, message)\n newcurs.close()\n except Exception:\n print \"can not write record to database\"\n return redirect('/error')\n\n return \"successfully deleted the message\"", "def delete(bot, message_id, chat_id):\n\n bot.delete_message(chat_id, message_id)", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "async def on_raw_message_delete(self, payload):\n\t\tif payload.guild_id is not None:\n\t\t\tguild = self.bot.get_guild(payload.guild_id)\n\t\t\tleaderboards = self.leaderboards[str(guild.id)]\n\n\t\t\tif payload.cached_message is not None:\n\t\t\t\tmessage = payload.cached_message\n\n\t\t\t\tif not message.author.bot:\n\t\t\t\t\tleaderboards[\"messageLeaderboard\"][str(message.author.id)] -= 1\n\n\t\t\t\t\tif str(message.channel.id) == leaderboards[\"quotesChannel\"]:\n\t\t\t\t\t\tfor user in message.mentions:\n\t\t\t\t\t\t\tleaderboards[\"quotesChannel\"][str(user.id)] -= 1\n\n\t\t\t\t\tfor emoji in self.bot.emojis:\n\t\t\t\t\t\temojiName = \"<:\" + emoji.name + \":\" + str(emoji.id) + \">\"\n\t\t\t\t\t\tfor index in range(0, message.content.count(emojiName)):\n\t\t\t\t\t\t\tleaderboards[\"emojiLeaderboard\"][str(emoji.id)] -= 1\n\n\t\t\t\tleaderboards[\"lastUpdate\"] = message.created_at.isoformat()\n\t\t\t\tawait self.update_state()", "def test_delete_message_logged_out_redirect_followed(self):\n\n with self.client as c:\n resp = c.post(f\"/messages/{self.test_msg_id}/delete\",\n follow_redirects=True\n )\n\n html = resp.get_data(as_text=True)\n self.assertEqual(resp.status_code, 200)\n\n self.assertIn('Access unauthorized.', html)", "def delete_personal_message(request, pk=None):\n user = User.objects.get(email=request.user.email)\n contactuserposts = ContactUser.objects.all()\n contactuserpost = get_object_or_404(ContactUser, pk=pk)\n if request.method == \"POST\":\n contactuserpost.delete()\n messages.success(request, 'This message has been successfully deleted.')\n return redirect(user_profile)\n return render(request, \"personalmessagedelete.html\", {'contactuserposts': contactuserposts})", "def message_remove(request, undo=False):\n message_pks = request.POST.getlist('message_pks')\n redirect_to = request.REQUEST.get('next', False)\n\n if message_pks:\n # Check that all values are integers.\n valid_message_pk_list = set()\n for pk in message_pks:\n try: valid_pk = int(pk)\n except (TypeError, ValueError): pass\n else:\n valid_message_pk_list.add(valid_pk)\n\n # Delete all the messages, if they belong to the user.\n now = datetime.datetime.now()\n changed_message_list = set()\n for pk in valid_message_pk_list:\n message = get_object_or_404(Message, pk=pk)\n\n # Check if the user is the owner\n if message.sender == request.user:\n if undo:\n message.sender_deleted_at = None\n else:\n message.sender_deleted_at = now\n message.save()\n changed_message_list.add(message.pk)\n\n # Check if the user is a recipient of the message\n if request.user in message.recipients.all():\n mr = message.messagerecipient_set.get(user=request.user,\n message=message)\n if undo:\n mr.deleted_at = None\n else:\n mr.deleted_at = now\n mr.save()\n changed_message_list.add(message.pk)\n\n # Send messages\n if (len(changed_message_list) > 0):\n if undo:\n message = ungettext('Message is succesfully restored.',\n 'Messages are succesfully restored.',\n len(changed_message_list))\n else:\n message = ungettext('Message is successfully removed.',\n 'Messages are successfully removed.',\n len(changed_message_list))\n\n messages.success(request, message, fail_silently=True)\n\n if redirect_to: return redirect(redirect_to)\n else: return redirect(reverse('socialapps_messages_list'))", "def delete_message(id_message: int):\n mycursor.execute(f\"\"\"DELETE FROM Daily_message\n WHERE id_message = {id_message}\"\"\")\n mydb.commit()\n return f\"Le message {id_message} a été supprimé\"", "def process_quit(message):\n try:\n Resident.objects.get(phone_number=message.sender).delete()\n except Resident.DoesNotExist:\n pass\n \n # TODO - wording...\n message.respond('You have been removed from our system and will no longer get text messages.')\n \n return TropoOkResponse()", "def delete_message(self, message_timestamp: str):\n self.slack_client.api_call(\"chat.delete\", channel=self.slack_channel_id, ts=message_timestamp)", "async def delete_bot_msg(self, channel):\n await channel.purge(limit=100, check=self.is_me)" ]
[ "0.7266986", "0.71698946", "0.7003001", "0.69563955", "0.69488233", "0.69374424", "0.6878325", "0.68730885", "0.686122", "0.682144", "0.681105", "0.67818886", "0.6760031", "0.67559224", "0.6749782", "0.6714628", "0.6695347", "0.66892964", "0.6670156", "0.6666097", "0.66554487", "0.66478914", "0.6583505", "0.65476406", "0.6540335", "0.65329444", "0.6508636", "0.64943135", "0.64596707", "0.6442699" ]
0.7522226
0
Generator function to chunk through a matrix along specified axis. Will yield blocks of length 'blocksize' along axis = 1,2,3
def matrix_chunker(blocksize, matrix, axis=0, offset=0): length=matrix.shape[axis] index=np.arange(0+offset, length+offset, blocksize) index=np.append(index, index[-1]+(length+offset-index[-1])) for start, end in zip(index[:-1],index[1:]): yield start, end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toBlocks(im, blocksize):\n blocks = im.reshape(\n im.shape[0]//blocksize, \n blocksize, \n im.shape[1]//blocksize, \n blocksize, 3).swapaxes(1, 2)\n\n\n blocks = blocks.reshape(\n im.shape[0]*im.shape[1]//(blocksize**2), \n blocksize, \n 1,\n blocksize, 3).swapaxes(1, 2)\n\n return blocks", "def chunker(bitstream, chunk_size):\n\n for chunk in nslice(lazy_pad(bitstream, 2 * chunk_size), 2 * chunk_size):\n yield chunk[:chunk_size], chunk[chunk_size:]", "def blockify_chunks(chunks):\n acc = []\n size = 0\n for chunk, chunk_size in chunks:\n assert len(chunk) == CHUNK_SIZE\n assert len(acc) <= BLOCK_SIZE\n if len(acc) == BLOCK_SIZE:\n # Only the last chunk may be short.\n assert size == CHUNK_SIZE * BLOCK_SIZE\n yield acc, size\n acc = []\n size = 0\n acc.append(chunk)\n size += chunk_size\n assert acc\n yield acc, size", "def tile_iterator(im,\r\n blocksize = (64, 64),\r\n padsize = (64,64),\r\n mode = \"constant\",\r\n verbose = False):\r\n\r\n if not(im.ndim == len(blocksize) ==len(padsize)):\r\n raise ValueError(\"im.ndim (%s) != len(blocksize) (%s) != len(padsize) (%s)\"\r\n %(im.ndim , len(blocksize) , len(padsize)))\r\n\r\n subgrids = tuple([int(np.ceil(1.*n/b)) for n,b in zip(im.shape, blocksize)])\r\n\r\n\r\n #if the image dimension are not divible by the blocksize, pad it accordingly\r\n pad_mismatch = tuple([(s*b-n) for n,s, b in zip(im.shape,subgrids,blocksize)])\r\n\r\n if verbose:\r\n print(\"tile padding... \")\r\n\r\n im_pad = np.pad(im,[(p,p+pm) for pm,p in zip(pad_mismatch,padsize)], mode = mode)\r\n\r\n # iterates over cartesian product of subgrids\r\n for i,index in enumerate(product(*[range(sg) for sg in subgrids])):\r\n # the slices\r\n # if verbose:\r\n # print(\"tile %s/%s\"%(i+1,np.prod(subgrids)))\r\n\r\n # dest[s_output] is where we will write to\r\n s_input = tuple([slice(i*b,(i+1)*b) for i,b in zip(index, blocksize)])\r\n\r\n\r\n\r\n s_output = tuple([slice(p,-p-pm*(i==s-1)) for pm,p,i,s in zip(pad_mismatch,padsize, index, subgrids)])\r\n\r\n\r\n s_output = tuple([slice(p,b+p-pm*(i==s-1)) for b,pm,p,i,s in zip(blocksize,pad_mismatch,padsize, index, subgrids)])\r\n\r\n\r\n s_padinput = tuple([slice(i*b,(i+1)*b+2*p) for i,b,p in zip(index, blocksize, padsize)])\r\n padded_block = im_pad[s_padinput]\r\n\r\n\r\n\r\n yield padded_block, s_input, s_output", "def chunk(flat, sizes):\n iter_flat = iter(flat)\n yield from (list(islice(iter_flat, 0, size)) for size in sizes)", "def block_process(a, blocksize, filt):\n block = np.empty(a.shape)\n for row in range(0, a.shape[0], blocksize):\n for col in range(0, a.shape[1], blocksize):\n block[row:row + blocksize, col:col + blocksize] = (\n filt(a[row:row + blocksize, col:col + blocksize]))\n return block", "def unblock(arr: np.ndarray, n1: int, n2: int, axis1: int = -1, axis2: int = -2, blocksize: bool = False) -> np.ndarray:\n\n \"\"\" test (stackoverflow): Ok, so considering I have N block matrices with bm x bn dimension and want to stack them in a m x n matrix, provided N = m x n, I would then have x.reshape(m,n,bm,bn).swapaxes(1,2).reshape(bm*m,-1)\n \"\"\"\n\n s = np.array(arr.shape)\n if s[axis1] % n1 != 0 or s[axis2] % n2 != 0:\n raise ValueError(f\"{s[axis1]}x{s[axis2]} does not divide by {n1}x{n2}\")\n\n if blocksize:\n n1 = s[axis1] // n1\n n2 = s[axis2] // n2\n\n # this first .split adds a new dimensions on the outside, so if a absolute index\n # is given for the second axis it must be moved one to the right\n if axis2 >= 0:\n _axis2 = axis2 + 1\n else:\n _axis2 = axis2\n\n arr = np.array(np.split(arr, n1, axis1))\n arr = np.array(np.split(arr, n2, _axis2))\n\n inv_blocksize = n1 * n2\n total = s[axis1] * s[axis2]\n s[axis2] = inv_blocksize\n s[axis1] = total // inv_blocksize\n\n return np.reshape(arr, s)", "def _chunks(l, ncols):\n assert isinstance(ncols, int), \"ncols must be an integer\"\n for i in range(0, len(l), ncols):\n yield l[i: i+ncols]", "def split_chunk(chunk, sizes, max_iter=1000, rng=None):\n assert len(chunk) == sum(sizes), f\"{len(chunk)} != {sum(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # Precompute neighbors for each cube in the chunk\n neighbors = dict()\n for c in chunk:\n neighbors[c] = set(c.neighbors()) & set(chunk)\n for i in range(max_iter):\n result = split_chunk_iter(chunk, sizes, neighbors, rng)\n if result != None:\n return result\n raise SplitChunkMaxIterationExceeded(\"Ran out of iterations trying to split chunk\")", "def chunks(data, rows=10000):\n\n for i in range(0, len(data), rows):\n yield data[i:i+rows]", "def yield_chunks(arr, chunk_size):\r\n larr = len(arr)\r\n if larr < chunk_size:\r\n raise ValueError(\"The array length (%d) must be larger than the chunk size (%d)\" % (len(arr), chunk_size))\r\n\r\n cursor = 0\r\n while cursor < larr:\r\n next_cursor = min(cursor + chunk_size, larr)\r\n yield arr[cursor:next_cursor]\r\n cursor = next_cursor", "def block_splitter(data, block_size):\n buf = []\n for i, datum in enumerate(data):\n buf.append(datum)\n if len(buf) == block_size:\n yield buf\n buf = []\n\n # If there's anything leftover (a partial block),\n # yield it as well.\n if buf:\n yield buf", "def split_chunk_iter(chunk, sizes, neighbors, rng=None):\n assert len(chunk) > len(sizes), f\"{len(chunk)} !> {len(sizes)}\"\n if not isinstance(rng, random.Random):\n rng = random\n # start by drawing three random items\n splits = [[c] for c in rng.sample(list(chunk), len(sizes))]\n unused = set(chunk) - set(sum(splits, []))\n max_iters = max(sizes) * len(sizes) # worst case\n for j in range(max_iters):\n i = j % len(sizes)\n size = sizes[i]\n split = splits[i]\n if len(split) == size:\n continue\n # get all of the neighbors of the split\n candidates = set()\n for c in split:\n candidates |= neighbors[c]\n # filter to unused cubes\n candidates = candidates & unused\n if not candidates:\n return None\n # Pick a candidate at random and add it\n choice = rng.choice(list(candidates))\n split.append(choice)\n unused.remove(choice)\n return splits", "def chunk_generator(input_file, chunksize = 100000, dataset_name = \"\") :\n\n with h5py.File(input_file, 'r', libver = 'latest') as f :\n dataset = f[dataset_name]\n for x in range(0, dataset.size, chunksize) :\n yield dataset[x:x+chunksize]", "def chunk(items, chunk_size):\n start_index = 0\n for start_index in xrange(0, len(items), chunk_size):\n end_index = min(start_index+chunk_size, len(items))\n yield items[start_index:end_index]", "def chunks(iterator, size):\n for index in range(0, len(iterator), size):\n yield iterator[index:index + size]", "def gen_blocks(self, count=None):\n if not count:\n count = self.num_blk\n for x in range(0, count*32, 32):\n buf = self._read(x)\n yield x, buf", "def iterate_array_in_chunks(arr, size: int):\n for i in range(0, len(arr), size):\n yield arr[i:i+size]", "def iter_slices(shape, chunk_size):\n assert len(shape) == len(chunk_size)\n num_grid_chunks = [int(ceil(s / float(c))) for s, c in zip(shape, chunk_size)]\n for grid_index in numpy.ndindex(*num_grid_chunks):\n yield tuple(\n slice(min(d * c, stop), min((d + 1) * c, stop)) for d, c, stop in zip(grid_index, chunk_size, shape))", "def chunks(array, size: int):\r\n for i in range(0, len(array), size):\r\n yield array[i:i + size]", "def block_split(stream, block_size=BLOCK_SIZE_IN_BYTES):\n # TODO: this could possibly be a generator\n return [stream[i:i + BLOCK_SIZE_IN_BYTES]\n for i in range(0, len(stream), BLOCK_SIZE_IN_BYTES)]", "def _chunk_data(self):\n for n in range(0, len(self.data) + 1, len(self.data) //\n self.num_of_chunks):\n yield self.data[0 + n:len(self.data) // self.num_of_chunks + n]", "def make_block(self, block_ix, downsample, ker_size, block_len):\n stride = int(downsample) + 1\n n_in_filters = self.filters[block_ix]\n n_filters = self.filters[block_ix+1]\n mult_fact = 1 if block_ix == 0 else 6\n\n block = [MBConv(n_in_filters, n_filters, ker_size, stride, mult_fact)]\n block += [MBConv(n_filters, n_filters, ker_size, 1, mult_fact) for _ in range(block_len-1)]\n return block", "def values_chunked(items, endtype, chunk_dim=10):\n ilengths = [len(x) for x in items]\n n = len(items)\n items = [np.array(x) for x in items]\n if n > chunk_dim:\n p = n - chunk_dim\n q = chunk_dim\n outer = itertools.product(*(items[0:p]))\n else:\n p = 0\n q = n\n\n def outer_iter():\n yield ()\n\n outer = outer_iter()\n\n chunk = np.zeros(\n [np.prod(ilengths[p:]), len(items)], dtype=int).view(endarray)\n chunk.endtype = endtype\n chunk[:, p:] = np.indices(ilengths[p:]).reshape(q, -1).T\n for i in range(p, n):\n chunk[:, i] = items[i][chunk[:, i]]\n for seq in outer:\n chunk[:, :p] = seq\n yield chunk", "def reshape_as_blocks(data, block_size):\n data, block_size = _process_block_inputs(data, block_size)\n\n if np.any(np.mod(data.shape, block_size) != 0):\n raise ValueError(\n \"Each dimension of block_size must divide evenly \"\n \"into the corresponding dimension of data\"\n )\n\n nblocks = np.array(data.shape) // block_size\n new_shape = tuple(k for ij in zip(nblocks, block_size) for k in ij)\n nblocks_idx = tuple(range(0, len(new_shape), 2)) # even indices\n block_idx = tuple(range(1, len(new_shape), 2)) # odd indices\n\n return data.reshape(new_shape).transpose(nblocks_idx + block_idx)", "def get_chunks(sequence, window_size, step=1):\n k = len(sequence)\n for i in range(0, k - window_size + 1, step):\n end = i + window_size\n chunk = sequence[i:i + window_size]\n assert len(chunk) == window_size\n yield chunk, end", "def iter_chunks(self, chunk_size, depths=True, step_size=None):\n step_size = step_size or chunk_size\n\n i = 0\n while i < self.height:\n if depths:\n yield self.img[i:i+chunk_size], self.depths[i:i+chunk_size]\n else:\n yield self.img[i:i+chunk_size]\n i += step_size", "def get_batch(iterator, batch_size):\n while True:\n center_batch = np.zeros(batch_size, dtype = np.uint32)\n target_batch = np.zeros((batch_size, 1), dtype = np.uint32)\n for index in range(batch_size):\n center_batch[index], target_batch[index] = next(iterator)\n\n yield center_batch, target_batch", "def chunk(list, chunksize):\n for i in range(0, len(list), chunksize):\n yield list[i:i + chunksize]", "def matrix_to_blocks(fock, frame, orbs):\n # maps atom types to different n indices\n io_base, _ = orbs_base(orbs)\n\n # prepares storage\n diaglist = {}\n offdlist_p = {}\n offdlist_m = {}\n heterolist = {}\n\n # creates storage. these are the blocks of the matrix we'll have to fill up later\n lorbs = []\n for el_a in orbs.keys():\n for ia, a in enumerate(orbs[el_a]):\n na, la, ma = a\n na += io_base[el_a] # adds element offset\n for el_b in orbs.keys():\n for ib, b in enumerate(orbs[el_b]):\n nb, lb, mb = b\n nb += io_base[el_b] # adds element offset\n if ( (nb>na or (nb==na and lb>=la)) and\n not (na,la,nb,lb) in lorbs ):\n orb = (na,la,nb,lb)\n lorbs.append(orb)\n if el_a == el_b:\n diaglist[orb] = []\n offdlist_p[orb] = []\n offdlist_m[orb] = []\n else:\n heterolist[orb] = []\n\n\n # reads in and partitions into blocks\n ki = 0\n nat = len(frame.numbers)\n for i in range(nat):\n el_a = frame.symbols[i]\n cur_a = ()\n for ia, oa in enumerate(orbs[el_a]):\n na, la, ma = oa\n na += io_base[el_a]\n # we read the Hamiltonian in blocks\n if (cur_a == (na,la)): continue\n cur_a = (na,la)\n kj = 0\n for j in range(nat):\n el_b = frame.symbols[j]\n cur_b = ()\n for ib, ob in enumerate(orbs[el_b]):\n nb, lb, mb = ob\n nb += io_base[el_b] # adds element offset\n if (cur_b == (nb,lb)): continue # only read at the beginning of each m block\n cur_b = (nb,lb)\n if (nb<na or (nb==na and lb<la)): continue\n orb = (na,la,nb,lb)\n blockij = fock[ki+ia:ki+ia+2*la+1, kj+ib:kj+ib+2*lb+1]\n if (i==j):\n diaglist[orb].append(blockij)\n elif (i<j and el_a == el_b):\n blockji= fock[kj+ia:kj+ia+2*la+1, ki+ib:ki+ib+2*lb+1]\n offdlist_p[orb].append((blockij+blockji)/np.sqrt(2))\n offdlist_m[orb].append((blockij-blockji)/np.sqrt(2))\n elif(el_a != el_b):\n heterolist[orb].append(blockij)\n kj += len(orbs[el_b])\n ki += len(orbs[el_a])\n\n # stores as ndarray for more flexible indexing\n for orb in lorbs:\n for d in [diaglist, offdlist_p, offdlist_m, heterolist]:\n if orb in d:\n d[orb] = np.asarray(d[orb])\n\n return dict( diag=diaglist, offd_p=offdlist_p, offd_m=offdlist_m, hete=heterolist)" ]
[ "0.66012836", "0.6529381", "0.64783555", "0.6303526", "0.6264694", "0.6173771", "0.61281383", "0.6098625", "0.60908544", "0.60895056", "0.60419375", "0.6028545", "0.60086375", "0.5985733", "0.5961891", "0.5945922", "0.5945654", "0.59103626", "0.58455825", "0.5843193", "0.58349484", "0.583134", "0.5788816", "0.5774064", "0.5773137", "0.5764202", "0.5739724", "0.5733614", "0.5700711", "0.5695567" ]
0.8451607
0
Selects a block_x x block_y subsquare in the underlying layers lying directly below the unit in the upper layer. Selects units within radius in that block. e.g. block_x=2, block_y=2 radius=2 1 1 1 1 e.g. block_x=3, block_y=3 radius=2 1 1 1 1 1 1 1 1 1 e.g. block_x=3, block_y=3 radius=1 0 1 0 1 1 1 0 1 0
def get_fan_in(xy=(0, 0), dim_x_l=10, dim_y_l=10, dim_x_u=9, dim_y_u=9, block_x=2, block_y=2, radius=2): x = xy[0] y = xy[1] if dim_x_u > 1: factor_x = ((dim_x_l-1)-(block_x-1))/(1.0*(dim_x_u-1)) else: factor_x = ((dim_x_l-1)-(block_x))/2.0 if dim_y_u > 1: factor_y = ((dim_y_l-1)-(block_y-1))/(1.0*(dim_y_u-1)) else: factor_y = ((dim_y_l-1)-(block_y))/2.0 results = [] if dim_x_u > 1 and dim_y_u > 1: for xx in range(block_x): for yy in range(block_y): if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2: continue results.append((int((factor_x*(x))+xx), int((factor_y*(y))+yy))) return results elif dim_x_u == 1 and dim_y_u > 1: for xx in range(block_x): for yy in range(block_y): if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2: continue results.append((int((dim_x_l-block_x)/2.0+xx), int((factor_y*(y)+yy)))) return results elif dim_x_u > 1 and dim_y_u == 1: for xx in range(block_x): for yy in range(block_y): if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2: continue results.append((int((factor_x*(x)+xx)), int((dim_y_l-block_y)/2.0+yy))) return results elif dim_x_u == 1 and dim_y_u == 1: for xx in range(block_x): for yy in range(block_y): if (xx-(block_x-1)*0.5)**2 + (yy-(block_y-1)*0.5)**2 > radius**2: continue results.append((int((dim_x_l-block_x)/2.0+xx), int((dim_y_l-block_y)/2.0+yy))) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_block(x, y, input_suduko_3d):\n block_id = 100\n\n if x < 3 and y < 3: # First if statements defines into which block the element of given x,y values is, this are\n # from 0-8 representing the 9 boxes\n block_id = 0\n if 6 > x >= 3 > 0 <= y:\n block_id = 3\n if 6 <= x <= 8 and 0 <= y < 3:\n block_id = 6\n if x < 3 <= y < 6:\n block_id = 1\n if 3 <= x < 6 and 3 <= y < 6:\n block_id = 4\n if 8 >= x >= 6 > 3 <= y:\n block_id = 7\n if x < 3 and 6 <= y <= 8:\n block_id = 2\n if 3 <= x < 6 <= y <= 8:\n block_id = 5\n if 6 <= x <= 8 and 6 <= y <= 8:\n block_id = 8\n\n suduko_blocks = blockshaped(np.array(input_suduko_3d[:, :, 0])) # suduko_blocks is a 2D array that holds all of the\n # 9 constituent block elements\n suduko_blocks_flatten = suduko_blocks[block_id].flatten()\n\n value_in_block = np.in1d(input_suduko_3d[x, y, :], suduko_blocks_flatten)\n\n for i, value in enumerate(value_in_block):\n if value_in_block[i] == True and i != 0:\n input_suduko_3d[x, y, i] = 0", "def test_block_picking(multiblock_poly):\n\n pl = pyvista.Plotter()\n width, height = pl.window_size\n actor, mapper = pl.add_composite(multiblock_poly)\n\n picked_blocks = []\n\n def turn_blue(index, dataset):\n mapper.block_attr[index].color = 'blue'\n picked_blocks.append(index)\n\n pl.enable_block_picking(callback=turn_blue)\n pl.show(auto_close=False)\n\n # click in the corner\n assert not picked_blocks\n pl.iren._mouse_left_button_click(0, 0)\n assert not picked_blocks\n\n # click directly in the middle\n pl.iren._mouse_left_button_click(width // 2, height // 2)\n assert mapper.block_attr[2].color\n\n assert pl.picked_block_index == picked_blocks[0]", "def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)", "def ReturnBlockOfCoordinates(x, y):\n block_x = int(x / block_size)\n block_y = int(y / block_size)\n\n if block_x == block_size:\n block_x = block_x - 1\n\n if block_y == block_size:\n block_y = block_y - 1\n\n return (block_x, block_y)", "def objects_radius(self, centre, radius):", "def clear_radius(self, radius):\n s = self\n length = self.physics.len_blocks\n for i in range(-radius, radius + 1):\n for j in range(-radius, radius + 1):\n block = Rectangle(Vector(i * length, j * length), Vector(length * (i + 1), length * (j + 1)))\n if not(block in self.physics.unavailable_blocks):\n self.physics.unavailable_blocks.append(block)", "def circle_mask(radius,size=None,offset=None,inner=0,subsample_limit=4,center=False):\n def subsample(x,y,sz,r,lim):\n d = np.hypot(x, y)\n if lim==0: #hit recursion limit\n #return area if x,y is inside circle\n return sz**2 if d < r else 0.0\n elif d + 0.70711*sz < r: #totally inside circle\n return sz**2\n elif d - 0.70711*sz > r: #totally outside circle\n return 0.0\n else: #on edge, recurse into quadrants\n s,o = sz/2, sz/4\n return subsample(x+o,y+o,s,r,lim-1) + \\\n subsample(x+o,y-o,s,r,lim-1) + \\\n subsample(x-o,y-o,s,r,lim-1) + \\\n subsample(x-o,y+o,s,r,lim-1)\n if offset is None:\n y0,x0 = 0,0\n else:\n y0,x0 = offset\n if size is None:\n size=2*radius+1\n if np.isscalar(size):\n size = (size,size)\n if center:\n y0 += 0.5*size[0]-0.5-radius\n x0 += 0.5*size[1]-0.5-radius\n coeffs = np.empty(size)\n for r in range(size[0]):\n for c in range(size[1]):\n x,y = c-radius,r-radius\n coeffs[r,c] = subsample(x-x0,y-y0,1,radius,subsample_limit)\n if inner > 0: \n coeffs[r,c] -= subsample(x-x0,y-y0,1,inner,subsample_limit) \n return coeffs", "def cutout(self, centre, radius):", "def build_block_cross(self):\n from ambry.geo.util import find_geo_containment, find_containment\n from geoid import civick \n\n lr = self.init_log_rate(3000)\n\n def gen_bound():\n \n boundaries = self.library.dep('blockgroups').partition\n\n # Note, ogc_fid is the primary key. The id column is created by the shapefile. \n for i,boundary in enumerate(boundaries.query(\n \"SELECT AsText(geometry) AS wkt, gvid FROM blockgroups\")):\n lr('Load rtree')\n \n yield i, boundary['wkt'] , boundary['gvid'] \n \n def gen_points():\n\n for row in self.partitions.find(table = 'facilities_addresses').rows:\n if row['longitude'] and row['latitude']:\n yield (row['longitude'], row['latitude']), row['facilities_id']\n\n\n p = self.partitions.find_or_new(table='facilities_geoids')\n p.clean()\n\n with p.inserter() as ins:\n for point, point_o, cntr_geo, cntr_o in find_containment(gen_bound(),gen_points()):\n\n blockgroup_gvid = civick.Blockgroup.parse(cntr_o)\n tract_gvid = blockgroup_gvid.convert(civick.Tract)\n county_gvid = blockgroup_gvid.convert(civick.County)\n \n ins.insert(dict(facilities_id = point_o, \n blockgroup_gvid = str(blockgroup_gvid),\n tract_gvid = str(tract_gvid),\n county_gvid = str(county_gvid)\n ))\n \n lr('Marking point containment')", "def circular_levelset(shape, center, sqradius, scalerow=1.0):\n grid = np.mgrid[list(map(slice, shape))].T - center\n phi = sqradius - np.sqrt(np.sum((grid.T)**2, 0))\n u = np.float_(phi > 0)\n return u", "def create_block():\n global BLOCK\n posx = SEG_SIZE * random.randint(1, (WIDTH-SEG_SIZE) / SEG_SIZE)\n posy = SEG_SIZE * random.randint(1, (HEIGHT-SEG_SIZE) / SEG_SIZE)\n BLOCK = c.create_oval(posx, posy,\n posx+SEG_SIZE, posy+SEG_SIZE,\n fill=\"red\")\n # print(posx, posy)\n return posx, posy", "def find_square_box(box):\n width = box['bottom_right_x'] - box['top_left_x']\n height = box['bottom_right_y'] - box['top_left_y']\n if width <= height:\n offset = int((width - height) / 2)\n box['top_left_x'] = box['top_left_x'] - offset\n box['bottom_right_x'] = box['bottom_right_x'] + offset\n else:\n offset = int((height - width) / 2)\n box['top_left_y'] = box['top_left_y'] - offset\n box['bottom_right_y'] = box['bottom_right_y'] + offset\n return box", "def square(side):\n rectangle(side,side)", "def computeNormalAndCurvature():\n radius = 50\n for i,j in pts:\n nb_pts = ti.cast(0, ti.f32)\n accu_0 = ti.cast(0, ti.f32)\n accu_1 = ti.cast(0, ti.f32)\n accu_2 = ti.cast(0, ti.f32)\n accu_3 = ti.cast(0, ti.f32)\n accu_4 = ti.cast(0, ti.f32)\n accu_5 = ti.cast(0, ti.f32)\n accu_6 = ti.cast(0, ti.f32)\n accu_7 = ti.cast(0, ti.f32)\n accu_8 = ti.cast(0, ti.f32)\n z = 0\n for x in range(i-radius, i+radius):\n for y in range(j-radius, j+radius):\n if ti.is_active(block1, [x,y]):\n accu_0 += x * x\n accu_1 += x * y\n accu_2 += x * z\n accu_3 += y * y\n accu_4 += y * z\n accu_5 += z * z\n accu_6 += x\n accu_7 += y\n accu_8 += z\n nb_pts += 1\n accu_0 /= nb_pts\n accu_1 /= nb_pts\n accu_2 /= nb_pts\n accu_3 /= nb_pts\n accu_4 /= nb_pts\n accu_5 /= nb_pts\n accu_6 /= nb_pts\n accu_7 /= nb_pts\n accu_8 /= nb_pts\n cov_mat_0 = accu_0 - accu_6 * accu_6\n cov_mat_1 = accu_1 - accu_6 * accu_7\n cov_mat_2 = accu_2 - accu_6 * accu_8\n cov_mat_4 = accu_3 - accu_7 * accu_7\n cov_mat_5 = accu_4 - accu_7 * accu_8\n cov_mat_8 = accu_5 - accu_8 * accu_8\n cov_mat_3 = cov_mat_1\n cov_mat_6 = cov_mat_2\n cov_mat_7 = cov_mat_5\n\n # Compute eigen value and eigen vector\n # Make sure in [-1, 1]\n scale = ti.max(1.0, ti.abs(cov_mat_0))\n scale = ti.max(scale, ti.abs(cov_mat_1))\n scale = ti.max(scale, ti.abs(cov_mat_2))\n scale = ti.max(scale, ti.abs(cov_mat_3))\n scale = ti.max(scale, ti.abs(cov_mat_4))\n scale = ti.max(scale, ti.abs(cov_mat_5))\n scale = ti.max(scale, ti.abs(cov_mat_6))\n scale = ti.max(scale, ti.abs(cov_mat_7))\n scale = ti.max(scale, ti.abs(cov_mat_8))\n if scale > 1.0:\n cov_mat_0 /= scale\n cov_mat_1 /= scale\n cov_mat_2 /= scale\n cov_mat_3 /= scale\n cov_mat_4 /= scale\n cov_mat_5 /= scale\n cov_mat_6 /= scale\n cov_mat_7 /= scale\n cov_mat_8 /= scale\n \n # Compute roots\n eigen_val_0 = ti.cast(0, ti.f32)\n eigen_val_1 = ti.cast(0, ti.f32)\n eigen_val_2 = ti.cast(0, ti.f32)\n \n c0 = cov_mat_0 * cov_mat_4 * cov_mat_8 \\\n + 2 * cov_mat_3 * cov_mat_6 * cov_mat_7 \\\n - cov_mat_0 * cov_mat_7 * cov_mat_7 \\\n - cov_mat_4 * cov_mat_6 * cov_mat_6 \\\n - cov_mat_8 * cov_mat_3 * cov_mat_3\n c1 = cov_mat_0 * cov_mat_4 \\\n - cov_mat_3 * cov_mat_3 \\\n + cov_mat_0 * cov_mat_8 \\\n - cov_mat_6 * cov_mat_6 \\\n + cov_mat_4 * cov_mat_8 \\\n - cov_mat_7 * cov_mat_7\n c2 = cov_mat_0 + cov_mat_4 + cov_mat_8\n \n if ti.abs(c0) < 0.00001:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n else:\n s_inv3 = ti.cast(1.0 / 3.0, ti.f32)\n s_sqrt3 = ti.sqrt(3.0)\n c2_over_3 = c2 * s_inv3\n a_over_3 = (c1 - c2 * c2_over_3) * s_inv3\n if a_over_3 > 0:\n a_over_3 = 0\n \n half_b = 0.5 * (c0 + c2_over_3 * (2 * c2_over_3 * c2_over_3 - c1))\n q = half_b * half_b + a_over_3 * a_over_3 * a_over_3\n if q > 0:\n q = 0\n \n rho = ti.sqrt(-a_over_3)\n theta = ti.atan2(ti.sqrt(-q), half_b) * s_inv3\n cos_theta = ti.cos(theta)\n sin_theta = ti.sin(theta)\n eigen_val_0 = c2_over_3 + 2 * rho * cos_theta\n eigen_val_1 = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta)\n eigen_val_2 = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta)\n temp_swap = ti.cast(0, ti.f32)\n \n # Sort in increasing order.\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n if eigen_val_1 >= eigen_val_2:\n temp_swap = eigen_val_2\n eigen_val_2 = eigen_val_1\n eigen_val_1 = temp_swap\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n \n if eigen_val_0 <= 0:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n # end of compute roots\n\n eigen_value = eigen_val_1 * scale # eigen value for 2D SDF\n # eigen value for 3D SDF\n #eigen_value = eigen_val_0 * scale\n\n #print(\"eigen_val_0 \", eigen_val_0)\n #print(\"eigen_val_1 \", eigen_val_1)\n #print(\"eigen_val_2 \", eigen_val_2)\n \n # TODO\n #scaledMat.diagonal ().array () -= eigenvalues (0)\n #eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector;\n\n # Compute normal vector (TODO)\n #visual_norm[i,j][0] = eigen_val_0 #eigen_vector[0]\n #visual_norm[i,j][1] = eigen_val_1 #eigen_vector[1]\n #visual_norm[i,j][2] = eigen_val_2 #eigen_vector[2]\n\n # Compute the curvature surface change\n eig_sum = cov_mat_0 + cov_mat_1 + cov_mat_2\n visual_curv[i,j][0] = 0\n if eig_sum != 0:\n visual_curv[i,j][0] = eigen_val_1 # true curvature is: ti.abs(eigen_value / eig_sum)", "def solve_block(self, center_cell: tuple[int, int], first_round=True):\n block = Block(self.field, center_cell)\n action = block.solve()\n if action == 'clear':\n self.clear_queue.add_batch(block.unknown_neighbors,\n emphasis=self.emphasis[\"add_batch\"],\n color=\"new_clear\")\n if not self.clear_queue.is_busy:\n self.clear_queue.is_busy = True\n self.process(self.clear_queue)\n elif action == 'flag':\n to_flag = Queue(field=self.field, color=\"to_flag\")\n for cell in block.unknown_neighbors:\n to_flag.append(cell)\n to_flag.direction = self.direction\n to_flag.re_orient()\n if self.emphasis[\"to_flag\"].is_checked:\n self.update()\n pause(self.emphasis[\"to_flag\"].pause_time)\n while to_flag:\n new_flag = to_flag[0]\n to_flag.remove(new_flag)\n self.toggle_flag(new_flag)\n elif first_round and center_cell not in self.hyper_queue:\n self.hyper_queue.append(center_cell)", "def box(original, radius):\n batches = original.size()[0]\n num_elem = h.product(original.size()[1:])\n ei = h.getEi(batches,num_elem)\n \n if len(original.size()) > 2:\n ei = ei.contiguous().view(num_elem, *original.size())\n\n return HBox(original, None, ei * radius).checkSizes()", "def set_roi_to_volume(volume, center, sub_volume):\n volume_shape = volume.shape \n patch_shape = sub_volume.shape\n output_volume = volume\n for i in range(len(center)):\n if(center[i] >= volume_shape[i]):\n return output_volume\n r0max = [int(x/2) for x in patch_shape]\n r1max = [patch_shape[i] - r0max[i] for i in range(len(r0max))]\n r0 = [min(r0max[i], center[i]) for i in range(len(r0max))]\n r1 = [min(r1max[i], volume_shape[i] - center[i]) for i in range(len(r0max))]\n patch_center = r0max\n\n if(len(center) == 3):\n output_volume[np.ix_(range(center[0] - r0[0], center[0] + r1[0]),\n range(center[1] - r0[1], center[1] + r1[1]),\n range(center[2] - r0[2], center[2] + r1[2]))] = \\\n sub_volume[np.ix_(range(patch_center[0] - r0[0], patch_center[0] + r1[0]),\n range(patch_center[1] - r0[1], patch_center[1] + r1[1]),\n range(patch_center[2] - r0[2], patch_center[2] + r1[2]))]\n elif(len(center) == 4):\n output_volume[np.ix_(range(center[0] - r0[0], center[0] + r1[0]),\n range(center[1] - r0[1], center[1] + r1[1]),\n range(center[2] - r0[2], center[2] + r1[2]),\n range(center[3] - r0[3], center[3] + r1[3]))] = \\\n sub_volume[np.ix_(range(patch_center[0] - r0[0], patch_center[0] + r1[0]),\n range(patch_center[1] - r0[1], patch_center[1] + r1[1]),\n range(patch_center[2] - r0[2], patch_center[2] + r1[2]),\n range(patch_center[3] - r0[3], patch_center[3] + r1[3]))]\n else:\n raise ValueError(\"array dimension should be 3 or 4\") \n return output_volume", "def set_roi_to_volume(volume, center, sub_volume):\n volume_shape = volume.shape \n patch_shape = sub_volume.shape\n output_volume = volume\n for i in range(len(center)):\n if(center[i] >= volume_shape[i]):\n return output_volume\n r0max = [int(x/2) for x in patch_shape]\n r1max = [patch_shape[i] - r0max[i] for i in range(len(r0max))]\n r0 = [min(r0max[i], center[i]) for i in range(len(r0max))]\n r1 = [min(r1max[i], volume_shape[i] - center[i]) for i in range(len(r0max))]\n patch_center = r0max\n\n if(len(center) == 3):\n output_volume[np.ix_(range(center[0] - r0[0], center[0] + r1[0]),\n range(center[1] - r0[1], center[1] + r1[1]),\n range(center[2] - r0[2], center[2] + r1[2]))] = \\\n sub_volume[np.ix_(range(patch_center[0] - r0[0], patch_center[0] + r1[0]),\n range(patch_center[1] - r0[1], patch_center[1] + r1[1]),\n range(patch_center[2] - r0[2], patch_center[2] + r1[2]))]\n elif(len(center) == 4):\n output_volume[np.ix_(range(center[0] - r0[0], center[0] + r1[0]),\n range(center[1] - r0[1], center[1] + r1[1]),\n range(center[2] - r0[2], center[2] + r1[2]),\n range(center[3] - r0[3], center[3] + r1[3]))] = \\\n sub_volume[np.ix_(range(patch_center[0] - r0[0], patch_center[0] + r1[0]),\n range(patch_center[1] - r0[1], patch_center[1] + r1[1]),\n range(patch_center[2] - r0[2], patch_center[2] + r1[2]),\n range(patch_center[3] - r0[3], patch_center[3] + r1[3]))]\n else:\n raise ValueError(\"array dimension should be 3 or 4\") \n return output_volume", "def build_blocks():\n block_1 = GRect(375, 80, x=20, y=330)\n block_1.filled = True\n block_1.color = 'firebrick'\n block_1.fill_color = 'firebrick'\n window.add(block_1)\n block_2 = GRect(375, 80, x=405, y=330)\n block_2.filled = True\n block_2.color = 'steelblue'\n block_2.fill_color = 'steelblue'\n window.add(block_2)\n block_3 = GRect(375, 80, x=20, y=420)\n block_3.filled = True\n block_3.color = 'goldenrod'\n block_3.fill_color = 'goldenrod'\n window.add(block_3)\n block_4 = GRect(375, 80, x=405, y=420)\n block_4.filled = True\n block_4.color = 'forestgreen'\n block_4.fill_color = 'forestgreen'\n window.add(block_4)\n block_5 = GRect(60, 40, x=720, y=120)\n block_5.filled = True\n block_5.color = 'dodgerblue'\n block_5.fill_color = 'dodgerblue'\n window.add(block_5)\n circle_1 = GOval(90, 90, x=20, y=170)\n circle_1.filled = True\n circle_1.color = 'blueviolet'\n circle_1.fill_color = 'blueviolet'\n window.add(circle_1)", "def get_radius(self):", "def collect_blocks():\n\n # Below are the position of (c,r) in a block.\n\n #########################\n # (0,0) # (1,0) # (2,0) #\n #########################\n #########################\n # (0,1) # (1,1) # (2,1) #\n #########################\n #########################\n # (0,2) # (1,2) # (2,2) #\n #########################\n\n for x in range(72):\n r, c = x // 9 % 3, x % 3\n if r == 0:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n yield x, x + 19\n yield x, x + 20\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n yield x, x + 17\n yield x, x + 19\n else:\n yield x, x + 7\n yield x, x + 8\n yield x, x + 16\n yield x, x + 17\n elif r == 1:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n else:\n yield x, x + 8\n yield x, x + 7", "def radius_square(self):\n try: \n return self._radius_2\n except AttributeError:\n center = self.center()\n self._radius_2 = max( (v.vector() - center).dot_product(\n v.vector() - center) for v in\n self.vertex_generator() )\n return self._radius_2", "def _block(self, x, y):\n\n cells = []\n\n ix = x - (x%3)\n iy = y - (y%3)\n\n for y in range(iy, iy+3):\n for x in range(ix, ix+3):\n i = self._index(x, y)\n cells.append(self.data[i])\n\n return cells", "def make_tight_box(self, tightness: float = 0.33) -> None:\r\n\r\n # Default to the plant's original box\r\n x1 = self.box[0]\r\n y1 = self.box[1]\r\n x2 = self.box[2]\r\n y2 = self.box[3]\r\n\r\n ## Find y coordinates\r\n # Initialize variables and sort pixels by x coordinate\r\n width = self.box[2] - self.box[0]\r\n cents = sorted(self.cluster, key = lambda u: u[1])\r\n clust = []\r\n curr = cents[0][1]\r\n last = 0\r\n\r\n # Split the pixels by x coordinate\r\n for p in range(len(cents)):\r\n if cents[p][1] != curr:\r\n clust.append(cents[last: p])\r\n curr = cents[p][1]\r\n last = p\r\n \r\n # Get the topmost y value which is <tightness> green\r\n for hor in clust:\r\n if len(hor) / width > tightness:\r\n y1 = hor[0][1]\r\n break\r\n\r\n # Get the bottommost y value which is <tightness> green\r\n for hor in clust[::-1]:\r\n if len(hor) / width > tightness:\r\n y2 = hor[0][1]\r\n break\r\n\r\n ## Find x coordinates\r\n # Initialize variables and sort pixels by y coordinate\r\n height = self.box[3] - self.box[1]\r\n cents = sorted(self.cluster, key = lambda u: u[0])\r\n clust = []\r\n curr = cents[0][0]\r\n last = 0\r\n\r\n # Split the pixels by y coordinate\r\n for p in range(len(cents)):\r\n if cents[p][0] != curr:\r\n clust.append(cents[last: p])\r\n curr = cents[p][0]\r\n last = p\r\n\r\n # Get the leftmost x value which is <tightness> green\r\n for ver in clust:\r\n if len(ver) / height > tightness:\r\n x1 = ver[0][0]\r\n break\r\n\r\n # Get the rightmost x value which is <tightness> green\r\n for ver in clust[::-1]:\r\n if len(ver) / height > tightness:\r\n x2 = ver[0][0]\r\n break\r\n\r\n # Default to original x values if no better estimate was found\r\n if x1 == x2:\r\n x1 = self.box[0]\r\n x2 = self.box[2]\r\n\r\n # Default to original y values if no better estimate was found\r\n if y1 == y2:\r\n y1 = self.box[1]\r\n y2 = self.box[3]\r\n\r\n self.tight_box = (x1, y1, x2, y2)", "def draw_block_element(self, cr, x, y):\n cr.rectangle(\n self.wall_width+x*self.block_size, \n (self.block_height-y-1)*self.block_size, \n self.block_size, self.block_size\n )\n \n cr.set_source_rgb(0.2, 0.25, 0.5)\n cr.fill_preserve()\n\n cr.set_source_rgb(0.8,0.8,0.8)\n cr.set_line_width(self.block_size/10)\n cr.stroke()", "def square(diameter: int, top: int = 0, left: int = 0) -> List['GridQubit']:\n return GridQubit.rect(diameter, diameter, top=top, left=left)", "def square_boundaries(px , py, pz, incx, incy, incz, min_x, min_y, min_z, max_x, max_y, max_z):\n\n if px < min_x or px > max_x: \n pcx = px - incx \n\n if py < min_y or py > max_y:\n pcy = py - incy \n\n if pz < min_z or pz > max_z:\n pcz = pz - incz \n\n return pcx, pcy, pcz", "def _get_sprites_block(self, block, region):\n level_data = Group()\n for idx_row, row in enumerate(block):\n for idx_elem, elem in enumerate(row):\n if elem == '-':\n x = idx_elem * BLOCK_WIDTH + SCREEN_RESOLUTION * region\n y = idx_row * BLOCK_HEIGHT\n block = Block(PATH_TO_IMAGE_GRASS, (x, y))\n level_data.add(block)\n elif elem == '+':\n x = idx_elem * WORM_WIDTH + SCREEN_RESOLUTION * region\n y = idx_row * WORM_HEIGHT\n worm = Mob(PATH_TO_IMAGE_WORM, ANIMATIONS_WORMS, (x, y), self.play_sounds)\n level_data.add(worm)\n self.sprite_level_blocks.append(level_data)", "def square(halfSideLength = 30, robotHeight = -90):\n# _______ \n# | |\n# | |\n# |_______|\n# \n# | a | \n# a = halfSideLength\n\n posSquare = [\n [halfSideLength,halfSideLength,robotHeight,0,0,0,'mov'],\n [-halfSideLength,halfSideLength,robotHeight,0,0,0,'lin'],\n [-halfSideLength,-halfSideLength,robotHeight,0,0,0,'lin'],\n [halfSideLength,-halfSideLength,robotHeight,0,0,0,'lin'],\n [halfSideLength,halfSideLength,robotHeight,0,0,0,'lin'],\n [0,0,-127,0,0,0,'mov']\n ]\n\n return posSquare", "def test_update_radius():\n center = Coordinates(1, 1)\n rad1 = 20.3\n speed = 30\n\n i = Intersection(center, rad1, speed)\n\n assert i.get_radius() == 20.3\n\n i.update_radius(56.5)\n\n assert i.get_radius() == 56.5" ]
[ "0.57853335", "0.5534077", "0.55082244", "0.55064106", "0.5454181", "0.53618264", "0.5243425", "0.52276367", "0.5208876", "0.51775503", "0.51532876", "0.51532644", "0.5141889", "0.5138911", "0.5133598", "0.51161593", "0.5114931", "0.5114931", "0.5114019", "0.50985324", "0.5085104", "0.5084861", "0.50726396", "0.5064258", "0.5052702", "0.50412583", "0.5039118", "0.50200194", "0.50129265", "0.50054955" ]
0.6242508
0
Connect two layers with a given fanin as defined by the square_size and radius. The forward connections are accompanied by a feedback context connections back to the originating source unit.
def connect_forward_and_back(simulation_dict, (index0, blocks_per_dim0, predicted_array), (index1, blocks_per_dim1), square_size, radius, context_factor): hidden_size = simulation_dict['hidden_size'] dx = hidden_size dy = hidden_size logging.info("Connecting from index %d to index %d" % (index0, index1)) logging.info("Input layer size is %d, receiving layer size is %d" % (blocks_per_dim0, blocks_per_dim1)) logging.info("Radius of connectivity %d" % radius) for x in range(blocks_per_dim1): for y in range(blocks_per_dim1): surround = get_fan_in((x, y), dim_x_l=blocks_per_dim0, dim_y_l=blocks_per_dim0, dim_x_u=blocks_per_dim1, dim_y_u=blocks_per_dim1, block_x=square_size, block_y=square_size, radius=radius) dest = index1 + x * (blocks_per_dim1) + y # destination unit for xy in surround: source = index0 + xy[0] * blocks_per_dim0 + xy[1] # source unit # Prepare the input and corresponding delta block at source input_block = simulation_dict['stage0'][source]['output_block'] delta_block = SharedArray.SharedNumpyArray_like(input_block) simulation_dict['stage0'][source]['delta_blocks'].append(delta_block) # Prepare the context and corresonding delta block at destination context_block = simulation_dict['stage0'][dest]['output_block'] delta_block2 = SharedArray.SharedNumpyArray_like(context_block) simulation_dict['stage0'][dest]['delta_blocks'].append(delta_block2) # Connect the context block to the source simulation_dict['stage0'][source]['context_blocks'].append((context_block, delta_block2, context_factor)) # Prepare the predicted blocks xx = xy[0]*hidden_size yy = xy[1]*hidden_size assert(predicted_array[xx:xx+dx, yy:yy+dy].shape == context_block.shape) predicted_block = SharedArray.DynamicView(predicted_array)[xx:xx+dx, yy:yy+dy] if not (predicted_block.shape == (dx, dy)): print predicted_block.shape raise # Connect the input to the destination together with its predicted blocks and so on. past_block = SharedArray.SharedNumpyArray_like(input_block) derivative_block = SharedArray.SharedNumpyArray_like(input_block) integral_block = SharedArray.SharedNumpyArray_like(input_block) pred_block_local = SharedArray.SharedNumpyArray_like(input_block) simulation_dict['stage0'][dest]['signal_blocks'].append((input_block, delta_block, predicted_block, past_block, derivative_block, integral_block, pred_block_local))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_back(simulation_dict, (index_from, blocks_per_dim_from), (index_to, blocks_per_dim_to), square_size, radius, context_factor):\n logging.info(\"Connecting back additional context from index %d to index %d\" % (index_from, index_to))\n logging.info(\"Connecting back additional context from layer size is %d, receiving layer size is %d\" % (blocks_per_dim_from, blocks_per_dim_to))\n logging.info(\"Radius of connectivity %d\" % radius)\n for x in range(blocks_per_dim_from):\n for y in range(blocks_per_dim_from):\n surround = get_fan_in((x, y),\n dim_x_l=blocks_per_dim_to,\n dim_y_l=blocks_per_dim_to,\n dim_x_u=blocks_per_dim_from,\n dim_y_u=blocks_per_dim_from,\n block_x=square_size,\n block_y=square_size,\n radius=radius)\n source = index_from + x * (blocks_per_dim_from) + y # unit in the higher layer\n for xy in surround:\n dest = index_to + xy[0] * blocks_per_dim_to + xy[1] # unit in the lower layer\n context_block = simulation_dict['stage0'][source]['output_block']\n delta_block2 = SharedArray.SharedNumpyArray_like(context_block)\n simulation_dict['stage0'][source]['delta_blocks'].append(delta_block2)\n # Connect the context block to the source\n simulation_dict['stage0'][dest]['context_blocks'].append((context_block, delta_block2, context_factor))", "def connect_forward_and_back_v1(simulation_dict, (index0, blocks_per_dim0, predicted_array, predicted_array_t2), (index1, blocks_per_dim1), square_size, radius, context_factor):\n hidden_size = simulation_dict['hidden_size']\n dx = hidden_size\n dy = hidden_size\n logging.info(\"Connecting from index %d to index %d\" % (index0, index1))\n logging.info(\"Input layer size is %d, receiving layer size is %d\" % (blocks_per_dim0, blocks_per_dim1))\n logging.info(\"Radius of connectivity %d\" % radius)\n for x in range(blocks_per_dim1):\n for y in range(blocks_per_dim1):\n surround = get_fan_in((x, y),\n dim_x_l=blocks_per_dim0,\n dim_y_l=blocks_per_dim0,\n dim_x_u=blocks_per_dim1,\n dim_y_u=blocks_per_dim1,\n block_x=square_size,\n block_y=square_size,\n radius=radius)\n dest = index1 + x * (blocks_per_dim1) + y # destination unit\n for xy in surround:\n source = index0 + xy[0] * blocks_per_dim0 + xy[1] # source unit\n # Prepare the input and corresponding delta block at source\n input_block = simulation_dict['stage0'][source]['output_block']\n delta_block = SharedArray.SharedNumpyArray_like(input_block)\n simulation_dict['stage0'][source]['delta_blocks'].append(delta_block)\n # Prepare the context and corresonding delta block at destination\n context_block = simulation_dict['stage0'][dest]['output_block']\n delta_block2 = SharedArray.SharedNumpyArray_like(context_block)\n simulation_dict['stage0'][dest]['delta_blocks'].append(delta_block2)\n # Connect the context block to the source\n simulation_dict['stage0'][source]['context_blocks'].append((context_block, delta_block2, context_factor))\n # Prepare the predicted blocks\n xx = xy[0]*hidden_size\n yy = xy[1]*hidden_size\n assert(predicted_array[xx:xx+dx, yy:yy+dy].shape == context_block.shape)\n predicted_block = SharedArray.DynamicView(predicted_array)[xx:xx+dx, yy:yy+dy]\n predicted_block2 = SharedArray.DynamicView(predicted_array_t2)[xx:xx+dx, yy:yy+dy]\n if not (predicted_block.shape == (dx, dy)):\n print predicted_block.shape\n raise\n # Connect the input to the destination together with its predicted blocks and so on.\n simulation_dict['stage0'][dest]['signal_blocks'].append((input_block, delta_block, predicted_block, predicted_block2))", "def fc_layer(input, size_in, size_out):\n w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1))\n b = tf.Variable(tf.truncated_normal([size_out], stddev=0.1))\n return tf.nn.relu(tf.matmul(input, w) + b)", "def __init__(self, input_size, nb_action):\r\n super(Network, self).__init__()\r\n self.input_size = input_size\r\n self.nb_action = nb_action\r\n \r\n #Connection with input layer and hidden layer\r\n self.fc1 = nn.Linear(input_size, 30)\r\n #Connection with hidden layer and output layer\r\n self.fc2 = nn.Linear(30, nb_action)", "def fc_layer(input, size_in, size_out):\n w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1))\n # var name needed later for variable filtering\n b = tf.Variable(tf.truncated_normal([size_out], stddev=0.1), name='bias')\n return tf.nn.relu(tf.matmul(input, w) + b)", "def expand_fc_layer(self, layer_int, new_size):\n backward_layer = getattr(self, \"fc{}\".format(layer_int))\n forward_layer = getattr(self, \"fc{}\".format(layer_int + 1))\n batch_norm = getattr(self, \"fc{}_bn\".format(layer_int))\n\n # Get averages, it should be transposed but we can go\n # along the other axis to make it easier\n weight_avgs = torch.mean(forward_layer.weight, dim=0)\n # Sort them for replication\n idxs = weight_avgs.argsort(descending=True)\n # Calculate multiplicative requirement\n extend_amount = (math.ceil(new_size / idxs.size()[0]))\n # Repeat the indices\n idxs = idxs.repeat(extend_amount)[:new_size]\n # Get divides\n _, inverse, ratios = idxs.unique(\n return_inverse=True, return_counts=True)\n ratios = ratios[inverse].float().repeat(extend_amount)[:new_size]\n ratios = ratios.unsqueeze(0)\n # Chunk out to be sure we keep order correct\n SIZE = forward_layer.weight.shape[1]\n chunks = [idxs[SIZE*i:SIZE*i + SIZE].sort()[1] + (SIZE*i)\n for i in range(extend_amount)]\n sorted_idxs = torch.cat(chunks)\n # Get and assign new weights\n new_l2_weights = forward_layer.weight[:, idxs]\n new_l2_weights = new_l2_weights / ratios.expand_as(new_l2_weights)\n new_l1_weights = backward_layer.weight[idxs]\n\n # Reset weight matrices\n new_backward_layer = nn.Linear(backward_layer.in_features, new_size)\n new_backward_layer.weight = nn.Parameter(new_l1_weights[sorted_idxs])\n new_backward_layer.bias = nn.Parameter(backward_layer.bias.data[idxs])\n\n new_forward_layer = nn.Linear(new_size, forward_layer.out_features)\n new_forward_layer.weight = nn.Parameter(new_l2_weights[:, sorted_idxs])\n new_forward_layer.bias = forward_layer.bias\n\n new_batch_norm = nn.BatchNorm1d(new_size)\n new_batch_norm.weight.data = batch_norm.weight.data[idxs]\n new_batch_norm.bias.data = batch_norm.bias.data[idxs]\n\n setattr(self, \"fc{}\".format(layer_int), new_backward_layer)\n setattr(self, \"fc{}\".format(layer_int + 1), new_forward_layer)\n setattr(self, \"fc{}_bn\".format(layer_int), new_batch_norm)\n\n self.cuda()\n self._optim = optim.Adam(self.parameters(), lr=0.001)", "def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return", "def __init__(self, config, input_shp):\n\n # Run initialization for super class\n super(MyNetwork, self).__init__()\n\n # Store configuration\n self.config = config\n\n # Placeholder for layers\n self.layers = {}\n indim = input_shp[0]\n\n # Retrieve Conv, Act, Pool functions from configurations. We'll use\n # these for our code below.\n if config.conv2d == \"torch\":\n self.Conv2d = nn.Conv2d\n elif config.conv2d == \"custom\":\n self.Conv2d = ConvBlock\n self.Activation = getattr(nn, config.activation)\n self.Pool2d = getattr(nn, config.pool2d)\n self.Linear = nn.Linear\n\n # Resnet Blocks, similar to slide 73 of lecture 21. However, for\n # simplicity, we'll make is slightly different. Note that we used\n # nn.Sequential this time.\n self.convs = nn.Sequential()\n cur_h, cur_w = input_shp[-2:]\n for _i in range(config.num_conv_outer):\n #\n # NOTE THE NEW LAYER ON THESE LINES!\n #\n # We have a dedicated 1x1 layer to get more channels. Note also\n # that this is a pure linear convolution layer.\n outdim = config.nchannel_base * 2 ** _i\n self.convs.add_module(\n \"conv_{}_base\".format(_i), nn.Conv2d(indim, outdim, 1, 1, 0))\n indim = outdim\n for _j in range(config.num_conv_inner):\n # We now use our selected convolution layer. Note that our\n # resnet implementation will have a different call style to\n # vanilla conv2d of torch, so we'll just do an ugly if-else\n # here.\n if config.conv2d == \"torch\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, 1))\n self.convs.add_module(\n \"act_{}_{}\".format(_i, _j),\n self.Activation())\n cur_h = cur_h - (config.ksize - 1)\n cur_w = cur_w - (config.ksize - 1)\n elif config.conv2d == \"custom\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, self.Activation))\n self.convs.add_module(\n \"conv_{}_pool\".format(_i), self.Pool2d(2, 2))\n cur_h = cur_h // 2\n cur_w = cur_w // 2\n\n # Final output layer. We'll assume that conv layer outputs are global\n # average pooled\n self.output = nn.Linear(indim, config.num_class)\n\n print(self)", "def build_2net(input_size, output_size, n_hidden=[5, 3]):\n\t# Create network and modules\n\tnet = FeedForwardNetwork()\n\tinp = LinearLayer(input_size)\n\th1 = SigmoidLayer(n_hidden[0])\n\th2 = TanhLayer(n_hidden[1])\n\toutp = LinearLayer(output_size)\n\t# Add modules\n\tnet.addOutputModule(outp)\n\tnet.addInputModule(inp)\n\tnet.addModule(h1)\n\tnet.addModule(h2)\n\t# Create connections\n\tnet.addConnection(FullConnection(inp, h1, inSliceTo=6))\n\tnet.addConnection(FullConnection(inp, h2, inSliceFrom=6))\n\tnet.addConnection(FullConnection(h1, h2))\n\tnet.addConnection(FullConnection(h2, outp))\n\t# Finish up\n\tnet.sortModules()\n\treturn net", "def __call__(self, **kwargs):\n segname = 'block_{}_expand_relu'\n blocks = [13, 6, 3, 1]\n skips = [self._backbone.get_layer(segname.format(i)) for i in blocks]\n backbone_out = self._backbone.get_layer('block_16_project')\n\n p5 = self._fpn_block(backbone_out.output, skips[0].output)\n p4 = self._fpn_block(p5, skips[1].output)\n p3 = self._fpn_block(p4, skips[2].output)\n p2 = self._fpn_block(p3, skips[3].output)\n\n s5 = self._conv_block(p5, 128)\n s4 = self._conv_block(p4, 128)\n s3 = self._conv_block(p3, 128)\n s2 = self._conv_block(p2, 128)\n\n s5 = tf.keras.layers.UpSampling2D(\n size=(8, 8),\n interpolation='nearest'\n )(s5)\n\n s4 = tf.keras.layers.UpSampling2D(\n size=(4, 4),\n interpolation='nearest'\n )(s4)\n\n s3 = tf.keras.layers.UpSampling2D(\n size=(2, 2),\n interpolation='nearest'\n )(s3)\n\n concat = [s5, s4, s3, s2]\n x = tf.keras.layers.Concatenate()(concat)\n x = tf.keras.layers.Conv2D(\n 64,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n\n x = tf.keras.layers.Conv2D(\n 1,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n out = tf.keras.layers.Activation('sigmoid')(x)\n model = tf.keras.models.Model(\n inputs=self._backbone.input,\n outputs=out\n )\n\n return model", "def _radius_neighbors_graph(\n self,\n sf,\n label,\n distance,\n radius,\n k=None,\n src_field='query_label',\n dst_field='reference_label'):\n\n # Get a feature list.\n features = []\n [features.extend(list(i[0])) for i in distance]\n\n\n print 'Determining edges...'\n # Compute the edgelist via nearest neighbors.\n nn = gl.nearest_neighbors.create(\n sf, label=label, features=features, distance=distance)\n edgelist = nn.query(\n sf, label=label, k=k, radius=radius)\n\n # Remove loops from the edgelist.\n # edgelist = self._remove_loops(edgelist)\n\n print 'Constructing graph...'\n # Make the graph.\n g = gl.SGraph(\n sf,\n edgelist,\n vid_field=label,\n src_field=src_field,\n dst_field=dst_field)\n return g", "def draw_neuron(self, center, radius, color):\r\n self.pen.up()\r\n self.pen.color(color)\r\n self.pen.goto(center)\r\n\r\n self.pen.setheading(0)\r\n self.pen.forward(radius)\r\n self.pen.setheading(90)\r\n\r\n # draw circle\r\n self.pen.begin_fill()\r\n self.pen.pendown()\r\n self.pen.circle(radius)\r\n self.pen.end_fill()\r\n\r\n self.pen.color('black')\r\n self.pen.up()\r\n self.pen.goto(center)\r\n self.pen.setheading(0)", "def __init__(self, input_size, output_size, activation=torch.nn.functional.relu, left_to_right=True):\n super(GraphConvolutionalLayer, self).__init__()\n self.w = torch.nn.Parameter(torch.rand([input_size, output_size]))\n self.activation = activation\n self.left_to_right = left_to_right", "def add_connector(self):\n \n no = len(self.connectors)\n state = {}\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % no\n \n if len(self.connectors)>0:\n state = self.connectors[-1].get_state()\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % (no)\n else:\n if self.mount == self.MOUNT_THT:\n state[\"p_shape\"] = Con.SHAPE_HOLE\n elif self.mount == self.MOUNT_SMD:\n state[\"p_shape\"] = Con.SHAPE_PAD\n \n c = Con(no)\n c.set_state(state) \n \n self.sch_layers[\"pins\"].add(c.s_svg)\n self.pcb_layers[\"copper1\"].add(c.p_svg)\n self.connectors.append(c)", "def __init__(self, depth=7, latent_size=512, use_eql=True, gpu_parallelize=False):\r\n from torch.nn import ModuleList, Conv2d\r\n from CustomLayers import GenGeneralConvBlock, GenInitialBlock, _equalized_conv2d\r\n\r\n super().__init__()\r\n\r\n assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \\\r\n \"latent size not a power of 2\"\r\n if depth >= 4:\r\n assert latent_size >= np.power(2, depth - 4), \"latent size will diminish to zero\"\r\n\r\n # state of the generator:\r\n self.use_eql = use_eql\r\n self.depth = depth\r\n self.latent_size = latent_size\r\n\r\n # register the modules required for the Generator Below ...\r\n # create the ToRGB layers for various outputs:\r\n if self.use_eql:\r\n def to_rgb(in_channels):\r\n return _equalized_conv2d(in_channels, 1, (1, 1), bias=True)\r\n else:\r\n def to_rgb(in_channels):\r\n return Conv2d(in_channels, 1, (1, 1), bias=True)\r\n\r\n # create a module list of the other required general convolution blocks\r\n self.layers = ModuleList([GenInitialBlock(self.latent_size, use_eql=self.use_eql)])\r\n self.rgb_converters = ModuleList([to_rgb(self.latent_size)])\r\n\r\n # create the remaining layers\r\n for i in range(self.depth - 1):\r\n if i <= 2:\r\n layer = GenGeneralConvBlock(self.latent_size, self.latent_size,\r\n use_eql=self.use_eql)\r\n rgb = to_rgb(self.latent_size)\r\n else:\r\n layer = GenGeneralConvBlock(\r\n int(self.latent_size // np.power(2, i - 3)),\r\n int(self.latent_size // np.power(2, i - 2)),\r\n use_eql=self.use_eql\r\n )\r\n rgb = to_rgb(int(self.latent_size // np.power(2, i - 2)))\r\n self.layers.append(layer)\r\n self.rgb_converters.append(rgb)", "def conv_block(layer, fsize, training, name, pool=True):\n with tf.variable_scope(name):\n\n for i in range(1, 3):\n\n layer = tf.layers.conv2d(layer, filters=fsize, kernel_size=(3, 3), padding='same',\n kernel_regularizer=l2_reg(1e-1), name='conv-%i' % i)\n layer = tf.layers.batch_normalization(layer, training=training, name='norm-%s' % i)\n layer = tf.nn.relu(layer, name='relu-%i' % i)\n\n if pool:\n pool = tf.layers.max_pooling2d(layer, pool_size=(2, 2), strides=(2, 2), name='pool-%i' % i)\n\n return layer, pool", "def __init__(self, channel):\n super(CoarseFineFlownet, self).__init__()\n in_c = channel * 2\n conv1 = nn.Sequential(nn.Conv2d(in_c, 24, 5, 2, 2), nn.ReLU(True))\n conv2 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv3 = nn.Sequential(nn.Conv2d(24, 24, 5, 2, 2), nn.ReLU(True))\n conv4 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv5 = nn.Sequential(nn.Conv2d(24, 32, 3, 1, 1), nn.Tanh())\n up1 = nn.PixelShuffle(4)\n self.coarse_flow = nn.Sequential(conv1, conv2, conv3, conv4, conv5, up1)\n in_c = channel * 3 + 2\n conv1 = nn.Sequential(nn.Conv2d(in_c, 24, 5, 2, 2), nn.ReLU(True))\n conv2 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv3 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv4 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv5 = nn.Sequential(nn.Conv2d(24, 8, 3, 1, 1), nn.Tanh())\n up2 = nn.PixelShuffle(2)\n self.fine_flow = nn.Sequential(conv1, conv2, conv3, conv4, conv5, up2)\n self.warp_c = STN(padding_mode='border')", "def stack_convolution(self, kernel_size, old_size, new_size, batch_normalization=True):\n weights = self.new_weight_variable([kernel_size, kernel_size, old_size, new_size])\n self.flow = tf.nn.conv2d(self.flow, weights, strides=[1, 1, 1, 1], padding=\"SAME\")\n if batch_normalization:\n self.flow = tf.layers.batch_normalization(\n self.flow,\n center=True,\n scale=True,\n training=self.is_training_ph)\n else:\n bias = self.new_bias_variable([new_size])\n self.flow = self.flow + bias # TODO: Is += equivalent?", "def ResUNetPlusPlus(input_size: tuple, test_mode=False):\n assert len(input_size) == 3, \"[ERROR]: Expected tuple of length 3 got {0}\".format(len(input_size))\n\n image_width, image_height, n_channels = input_size\n\n inp = tf.keras.layers.Input(shape=(image_width, image_height, n_channels), dtype=\"float32\", name=\"input_layer\")\n\n # starting conv\n x = layers.Conv2DBN(64, 3, padding=\"same\", activation=\"relu\", name=\"conv_start\")(inp)\n\n # Residual block 1\n x = layers.ResidualBlock(64, 3, activation=\"relu\", name=\"rb_1\")(x)\n skip1 = x\n\n x = tf.keras.layers.Conv2D(128,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.SqueezeExcitationBlock(ratio=1)(x)\n\n # Residual block 2\n x = layers.ResidualBlock(128, 3, activation=\"relu\", name=\"rb_2\")(x)\n skip2 = x\n\n x = tf.keras.layers.Conv2D(256,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.SqueezeExcitationBlock(ratio=2)(x)\n\n # Residual block 3\n x = layers.ResidualBlock(256, 3, activation=\"relu\", name=\"rb_3\")(x)\n skip3 = x\n\n x = tf.keras.layers.Conv2D(512,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.SqueezeExcitationBlock(ratio=4)(x)\n\n # Residual block 4\n x = layers.ResidualBlock(512, 3, activation=\"relu\", name=\"rb_4\")(x)\n skip4 = x\n\n x = tf.keras.layers.Conv2D(1024,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.SqueezeExcitationBlock(ratio=8)(x)\n\n # Bottleneck ASPP\n x = layers.ASPP(256, [4, 8, 12], (256, 256), 16, activation=\"relu\", name=\"aspp_bottleneck\")(x)\n x = layers.Conv2DBN(1024, 1, activation=\"relu\")(x)\n\n # Up-sample L4\n x = layers.GlobalAttentionUpsample(name=\"GAU_4\")([skip4, x])\n x = tf.keras.layers.Concatenate(axis=-1)([x, skip4])\n x = tf.keras.layers.Conv2D(512,\n kernel_size=(3, 3),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.ResidualBlock(512, 3, activation=\"relu\", activate_begin=True, name=\"u_rb_4\")(x)\n\n # Up-sample L3\n x = layers.GlobalAttentionUpsample(name=\"GAU_3\")([skip3, x])\n x = tf.keras.layers.Concatenate(axis=-1)([x, skip3])\n x = tf.keras.layers.Conv2D(256,\n kernel_size=(3, 3),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.ResidualBlock(256, 3, activation=\"relu\", activate_begin=True, name=\"u_rb_3\")(x)\n\n # Up-sample L2\n x = layers.GlobalAttentionUpsample(name=\"GAU_2\")([skip2, x])\n x = tf.keras.layers.Concatenate(axis=-1)([x, skip2])\n x = tf.keras.layers.Conv2D(128,\n kernel_size=(3, 3),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.ResidualBlock(128, 3, activation=\"relu\", activate_begin=True, name=\"u_rb_2\")(x)\n\n # Up-sample L1\n x = layers.GlobalAttentionUpsample(name=\"GAU_1\")([skip1, x])\n x = tf.keras.layers.Concatenate(axis=-1)([x, skip1])\n x = tf.keras.layers.Conv2D(64,\n kernel_size=(3, 3),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.ResidualBlock(64, 3, activation=\"relu\", activate_begin=True, name=\"u_rb_1\")(x)\n x = layers.Conv2DBN(1, 1, activation=\"sigmoid\")(x)\n\n return tf.keras.Model(inputs=[inp], outputs=[x])", "def construct_layer(\n self,\n input_layer: \"NeuralNetworkLayer\",\n output_layer: \"NeuralNetworkLayer\",\n **kwargs\n ):\n # Add Nodes\n for node_number in range(self.num_nodes):\n node_object = Circle(\n radius=self.node_radius,\n color=self.node_color,\n stroke_width=self.node_stroke_width,\n )\n self.node_group.add(node_object)\n # Space the nodes\n # Assumes Vertical orientation\n for node_index, node_object in enumerate(self.node_group):\n location = node_index * self.node_spacing\n node_object.move_to([0, location, 0])\n # Create Surrounding Rectangle\n self.surrounding_rectangle = SurroundingRectangle(\n self.node_group,\n color=self.rectangle_color,\n fill_color=self.rectangle_fill_color,\n fill_opacity=1.0,\n buff=self.layer_buffer,\n stroke_width=self.rectangle_stroke_width,\n )\n self.surrounding_rectangle.set_z_index(1)\n # Add the objects to the class\n self.add(self.surrounding_rectangle, self.node_group)\n\n self.construct_activation_function()\n super().construct_layer(input_layer, output_layer, **kwargs)", "def build_transformation_network(n_styles, depthwise_separable_conv):\n\n image_input = Input((None, None, 3), name=\"image\")\n style_weights = Input((n_styles, ), name=\"style_weights\")\n\n net = conv_block(image_input,\n style_weights,\n filters=32,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=64,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(2, 2),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = residual_block(net,\n style_weights,\n filters=128,\n kernel_size=(3, 3),\n strides=(1, 1),\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=64,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = upsampling_block(net,\n style_weights,\n interpolation_factor=2,\n filters=32,\n kernel_size=(3, 3),\n strides=(1, 1),\n activation=\"relu\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = conv_block(net,\n style_weights,\n filters=3,\n kernel_size=(9, 9),\n strides=(1, 1),\n activation=\"sigmoid\",\n depthwise_separable_conv=depthwise_separable_conv)\n\n net = Lambda(lambda t: t * 255.0, name=\"output\")(net)\n\n return Model([image_input, style_weights], net, name=\"transform_net\")", "def __init__(self, conv_features_sizes, linear_layer_sizes, connector_shape):\n super().__init__()\n \n self.conv = nn.Sequential()\n self.mlp = nn.Sequential()\n self.flat = nn.Flatten()\n\n self.conv.add_module(name=f\"e-fconv{0}\", module=_conv2d_block(1, conv_features_sizes[0], kernel_size=3, padding=1))\n self.conv.add_module(name=f\"e-max{0}\", module=nn.MaxPool2d(2, 2))\n for i, (in_size, out_size) in enumerate(zip(conv_features_sizes[:-1], conv_features_sizes[1:]), 1):\n self.conv.add_module(name=f\"e-fconv{i}\", module=_conv2d_block(in_size, out_size, kernel_size=3, padding=1))\n self.conv.add_module(name=f\"e-max{i}\", module=nn.MaxPool2d(2, 2))\n\n mlp_input_shape = int(reduce((lambda x,y: x * y), connector_shape))\n self.mlp.add_module(name=f\"e-linear{0}\", module=nn.Linear(mlp_input_shape, linear_layer_sizes[0]))\n self.mlp.add_module(name=f\"e-batchnorm{0}\", module=nn.BatchNorm1d(linear_layer_sizes[0]))\n self.mlp.add_module(name=f\"e-relu{0}\", module=nn.ReLU())\n for i, (in_size, out_size) in enumerate(zip(linear_layer_sizes[:-1], linear_layer_sizes[1:]), 1):\n self.mlp.add_module(name=f\"e-linear{i}\", module=nn.Linear(in_size, out_size))\n self.mlp.add_module(name=f\"e-batchnorm{i}\", module=nn.BatchNorm1d(out_size))\n self.mlp.add_module(name=f\"e-relu{i}\", module=nn.ReLU())", "def __init__(self, depth=7, feature_size=512, use_eql=True, gpu_parallelize=False):\r\n from torch.nn import ModuleList\r\n from CustomLayers import DisGeneralConvBlock, DisFinalBlock, _equalized_conv2d\r\n from torch.nn import Conv2d\r\n\r\n super().__init__()\r\n\r\n assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \\\r\n \"latent size not a power of 2\"\r\n if depth >= 4:\r\n assert feature_size >= np.power(2, depth - 4), \\\r\n \"feature size cannot be produced\"\r\n\r\n # create state of the object\r\n self.gpu_parallelize = gpu_parallelize\r\n self.use_eql = use_eql\r\n self.depth = depth\r\n self.feature_size = feature_size\r\n\r\n # create the fromRGB layers for various inputs:\r\n if self.use_eql:\r\n def from_rgb(out_channels):\r\n return _equalized_conv2d(1, out_channels, (1, 1), bias=True)\r\n else:\r\n def from_rgb(out_channels):\r\n return Conv2d(1, out_channels, (1, 1), bias=True)\r\n\r\n self.rgb_to_features = ModuleList()\r\n self.final_converter = from_rgb(self.feature_size // 2)\r\n\r\n # create a module list of the other required general convolution blocks\r\n self.layers = ModuleList()\r\n self.final_block = DisFinalBlock(self.feature_size, use_eql=self.use_eql)\r\n\r\n # create the remaining layers\r\n for i in range(self.depth - 1):\r\n if i > 2:\r\n layer = DisGeneralConvBlock(\r\n int(self.feature_size // np.power(2, i - 2)),\r\n int(self.feature_size // np.power(2, i - 2)),\r\n use_eql=self.use_eql\r\n )\r\n rgb = from_rgb(int(self.feature_size // np.power(2, i - 1)))\r\n else:\r\n layer = DisGeneralConvBlock(self.feature_size, self.feature_size // 2,\r\n use_eql=self.use_eql)\r\n rgb = from_rgb(self.feature_size // 2)\r\n\r\n self.layers.append(layer)\r\n self.rgb_to_features.append(rgb)\r\n\r\n # just replace the last converter\r\n self.rgb_to_features[self.depth - 2] = \\\r\n from_rgb(self.feature_size // np.power(2, i - 2))\r\n\r\n # parallelize the modules from the module-lists if asked to:\r\n if self.gpu_parallelize:\r\n for i in range(len(self.layers)):\r\n self.layers[i] = torch.nn.DataParallel(self.layers[i])\r\n self.rgb_to_features[i] = torch.nn.DataParallel(\r\n self.rgb_to_features[i])\r\n\r\n # Note that since the FinalBlock contains the StdDev layer,\r\n # it cannot be parallelized so easily. It will have to be parallelized\r\n # from the Lower level (from CustomLayers). This much parallelism\r\n # seems enough for me.\r", "def compile(self):\n m, n = self.input_shape[1], self.input_shape[2]\n\n inp = Input(shape=self.input_shape, traces=True)\n self.add_layer(inp, \"DoG\")\n\n s1 = LIFNodes(shape=(18, m, n), traces=True)\n self.add_layer(s1, \"conv_1\")\n c1 = LIFNodes(shape=(18, m // 2, n // 2), traces=True)\n self.add_layer(c1, \"pool_1\")\n\n s2 = LIFNodes(shape=(24, m // 2, n // 2), traces=True)\n self.add_layer(s2, \"conv_2\")\n c2 = LIFNodes(shape=(24, m // 4, n // 4), traces=True)\n self.add_layer(c2, \"pool_2\")\n\n s3 = LIFNodes(shape=(32, m // 4, n // 4), traces=True)\n self.add_layer(s3, \"conv_3\")\n f = LIFNodes(shape=(32, 1), traces=True)\n self.add_layer(f, \"global_pool\")\n\n conv1 = Conv2dConnection(inp, s1, 5, padding=2, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv1, \"DoG\", \"conv_1\")\n pool1 = MaxPool2dConnection(s1, c1, 2, 2, decay=0.5)\n self.add_connection(pool1, \"conv_1\", \"pool_1\")\n\n conv2 = Conv2dConnection(c1, s2, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv2, \"pool_1\", \"conv_2\")\n pool2 = MaxPool2dConnection(s2, c2, 2, 2, decay=0.5)\n self.add_connection(pool2, \"conv_2\", \"pool_2\")\n\n conv3 = Conv2dConnection(c2, s3, 3, padding=1, weight_decay=0.01,\n nu=0.01, update_rule=PostPre, decay=0.5)\n self.add_connection(conv3, \"pool_2\", \"conv_3\")\n global_pool = MaxPool2dConnection(s3, f, (m // 4, n // 4), decay=0.5)\n self.add_connection(global_pool, \"conv_3\", \"global_pool\")\n\n monitor = NetworkMonitor(self, layers=[\"DoG\", \"conv_1\", \"pool_1\",\n \"conv_2\", \"pool_2\",\n \"conv_3\", \"global_pool\"],\n connections=[(\"DoG\", \"conv_1\"),\n (\"pool_1\", \"conv_2\"),\n (\"pool_2\", \"conv_3\")],\n state_vars=[\"w\", \"s\"])\n self.add_monitor(monitor, \"network_monitor\")\n\n return self", "def __init__(self, inputLayerSize, outputLayerSize, \\\n hiddenLayerSize):\n #Network hyperparameters - neurons per layer - **not altered by training**\n self.inputLayerSize = inputLayerSize\n self.outputLayerSize = outputLayerSize\n self.hiddenLayerSize = hiddenLayerSize\n self.num_params = inputLayerSize * hiddenLayerSize + \\\n hiddenLayerSize * outputLayerSize + hiddenLayerSize \\\n + outputLayerSize\n #--Weights--\n #w_ih - weights of synapses linking input -> hidden\n self.w_ih = np.random.randn( self.inputLayerSize, \\\n self.hiddenLayerSize)\n #w_ho - weights of synapses linking hidden -> output\n self.w_ho = np.random.randn( self.hiddenLayerSize, \\\n self.outputLayerSize)\n \n #--Biases--\n #b_h - biases of hidden layer\n self.b_h = np.random.randn( self.hiddenLayerSize )\n #b_o - biases of output layer\n self.b_o = np.random.randn( self.outputLayerSize )", "def intermediate_layer(layer, filters, kernel_size):\n layer = Conv2D(filters=filters, kernel_size=kernel_size,\n strides=(2, 2), padding=\"same\")(layer)\n layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA)(layer)\n return layer", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def __init__(self, layers, input_size):\n super(ConvNetMaker, self).__init__()\n self.conv_layers = []\n self.fc_layers = []\n # h, w, d = 32, 32, 3\n h, w, d = input_size, input_size, 3\n previous_layer_filter_count = 3\n previous_layer_size = h * w * d\n num_fc_layers_remained = len([1 for l in layers if l.startswith('FC')])\n for layer in layers:\n if layer.startswith('Conv'):\n filter_count = int(layer[4:])\n self.conv_layers += [\n nn.Conv2d(previous_layer_filter_count,\n filter_count,\n kernel_size=3,\n padding=1),\n nn.BatchNorm2d(filter_count),\n nn.ReLU(inplace=True)\n ]\n\n previous_layer_filter_count = filter_count\n d = filter_count\n previous_layer_size = h * w * d\n elif layer.startswith('MaxPool'):\n self.conv_layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n h, w = int(h / 2.0), int(w / 2.0)\n previous_layer_size = h * w * d\n elif layer.startswith('FC'):\n num_fc_layers_remained -= 1\n current_layer_size = int(layer[2:])\n if num_fc_layers_remained == 0:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size)]\n else:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size),\n nn.ReLU(inplace=True)]\n previous_layer_size = current_layer_size\n\n conv_layers = self.conv_layers\n fc_layers = self.fc_layers\n self.conv_layers = nn.Sequential(*conv_layers)\n self.fc_layers = nn.Sequential(*fc_layers)", "def create_simple_conv2D_model(feats2d, shapes, model_settings, is_training):\n\n if is_training:\n dropout_prob = model_settings['dropout_prob'] \n\n # Input Layer\n shape = tf.shape(feats2d) # features are of shape [max seq length for batch, 40]\n input_layer = tf.reshape(feats2d,tf.stack([-1, shape[1], shape[2], 1])) # [batch_size, seq_length, 40, 1]\n\n # Convolutional Layer #1\n conv1 = tf.layers.conv2d(\n inputs=input_layer,\n filters=64,\n kernel_size=[20, 8],\n padding=\"same\",\n activation=tf.nn.relu)\n\n dropout1 = tf.layers.dropout(\n inputs=conv1, rate=dropout_prob, training=is_training)\n\n # Pooling Layer #1\n pool1 = tf.layers.max_pooling2d(inputs=dropout1, pool_size=[2, 2], strides=2)\n\n # Convolutional Layer #2 and Pooling Layer #2\n conv2 = tf.layers.conv2d(\n inputs=pool1,\n filters=64,\n kernel_size=[10, 4],\n padding=\"same\",\n activation=tf.nn.relu)\n\n dropout2 = tf.layers.dropout(\n inputs=conv2, rate=dropout_prob, training=is_training)\n\n pool2 = tf.layers.max_pooling2d(inputs=dropout2, pool_size=[2, 2], strides=2) # [batch_size, pool2_shape[1], pool2_shape[2], 64]\n\n # in case we want to use a flat output layer from convolutions\n # pool2_flat = tf.layers.flatten(pool2) # [batch_size, pool2_shape[1] * pool2_shape[2] * 64]\n # idem as: \n # pool2_shape = tf.shape(pool2) \n # pool2_flat = tf.reshape(pool2, [-1, pool2_shape[1] * pool2_shape[2] * 64]) \n\n # Average of the result of convolutions over 2 axes: max sequence length in the batch and dimension of sepctrogram\n pool_sum = tf.reduce_sum(pool2,[1,2],keep_dims=True) # [batch_size, 1, 1, 64]\n mean = pool_sum/tf.cast(shape[1] * shape[2], tf.float32)# [batch_size, 1, 1, 64]\n res1=tf.squeeze(mean, axis=[1,2]) # [batch_size, 64]\n\n # Logits Layer\n num_classes = model_settings['num_classes']\n logits = tf.layers.dense(inputs=res1, units=num_classes)\n\n if is_training:\n return logits, dropout_prob\n else:\n return logits", "def add_neuron(self):\n self.num_hiddens += 1\n self.input2hidden_layers[str(len(self.input2hidden_layers))] = nn.Linear(self.input_size, 1, bias=False)\n for n_connection in range(self.num_hiddens - 1):\n self.hidden2hidden_layers[str(len(self.hidden2hidden_layers))] = nn.Linear(1, 1,bias=False)\n self.hidden2output_layers[str(len(self.hidden2output_layers))] = nn.Linear(1, self.num_classes, bias=False)\n\n return" ]
[ "0.597466", "0.57517326", "0.50623995", "0.49667892", "0.49642864", "0.49607903", "0.49168962", "0.4915778", "0.4870057", "0.48660815", "0.48269352", "0.4822324", "0.48160443", "0.47990656", "0.46569383", "0.46474767", "0.4635789", "0.46261376", "0.46119142", "0.45876154", "0.45729813", "0.45672578", "0.4554374", "0.4540337", "0.45374763", "0.45279053", "0.45274502", "0.45209405", "0.45107588", "0.4500741" ]
0.5859038
1
Connect two layers with a given fan in as defined by the square_size and radius. The forward connections are accompanied by feedback context connections back to the originating source unit.
def connect_forward_and_back_v1(simulation_dict, (index0, blocks_per_dim0, predicted_array, predicted_array_t2), (index1, blocks_per_dim1), square_size, radius, context_factor): hidden_size = simulation_dict['hidden_size'] dx = hidden_size dy = hidden_size logging.info("Connecting from index %d to index %d" % (index0, index1)) logging.info("Input layer size is %d, receiving layer size is %d" % (blocks_per_dim0, blocks_per_dim1)) logging.info("Radius of connectivity %d" % radius) for x in range(blocks_per_dim1): for y in range(blocks_per_dim1): surround = get_fan_in((x, y), dim_x_l=blocks_per_dim0, dim_y_l=blocks_per_dim0, dim_x_u=blocks_per_dim1, dim_y_u=blocks_per_dim1, block_x=square_size, block_y=square_size, radius=radius) dest = index1 + x * (blocks_per_dim1) + y # destination unit for xy in surround: source = index0 + xy[0] * blocks_per_dim0 + xy[1] # source unit # Prepare the input and corresponding delta block at source input_block = simulation_dict['stage0'][source]['output_block'] delta_block = SharedArray.SharedNumpyArray_like(input_block) simulation_dict['stage0'][source]['delta_blocks'].append(delta_block) # Prepare the context and corresonding delta block at destination context_block = simulation_dict['stage0'][dest]['output_block'] delta_block2 = SharedArray.SharedNumpyArray_like(context_block) simulation_dict['stage0'][dest]['delta_blocks'].append(delta_block2) # Connect the context block to the source simulation_dict['stage0'][source]['context_blocks'].append((context_block, delta_block2, context_factor)) # Prepare the predicted blocks xx = xy[0]*hidden_size yy = xy[1]*hidden_size assert(predicted_array[xx:xx+dx, yy:yy+dy].shape == context_block.shape) predicted_block = SharedArray.DynamicView(predicted_array)[xx:xx+dx, yy:yy+dy] predicted_block2 = SharedArray.DynamicView(predicted_array_t2)[xx:xx+dx, yy:yy+dy] if not (predicted_block.shape == (dx, dy)): print predicted_block.shape raise # Connect the input to the destination together with its predicted blocks and so on. simulation_dict['stage0'][dest]['signal_blocks'].append((input_block, delta_block, predicted_block, predicted_block2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_forward_and_back(simulation_dict, (index0, blocks_per_dim0, predicted_array), (index1, blocks_per_dim1), square_size, radius, context_factor):\n hidden_size = simulation_dict['hidden_size']\n dx = hidden_size\n dy = hidden_size\n logging.info(\"Connecting from index %d to index %d\" % (index0, index1))\n logging.info(\"Input layer size is %d, receiving layer size is %d\" % (blocks_per_dim0, blocks_per_dim1))\n logging.info(\"Radius of connectivity %d\" % radius)\n for x in range(blocks_per_dim1):\n for y in range(blocks_per_dim1):\n surround = get_fan_in((x, y),\n dim_x_l=blocks_per_dim0,\n dim_y_l=blocks_per_dim0,\n dim_x_u=blocks_per_dim1,\n dim_y_u=blocks_per_dim1,\n block_x=square_size,\n block_y=square_size,\n radius=radius)\n dest = index1 + x * (blocks_per_dim1) + y # destination unit\n for xy in surround:\n source = index0 + xy[0] * blocks_per_dim0 + xy[1] # source unit\n # Prepare the input and corresponding delta block at source\n input_block = simulation_dict['stage0'][source]['output_block']\n delta_block = SharedArray.SharedNumpyArray_like(input_block)\n simulation_dict['stage0'][source]['delta_blocks'].append(delta_block)\n # Prepare the context and corresonding delta block at destination\n context_block = simulation_dict['stage0'][dest]['output_block']\n delta_block2 = SharedArray.SharedNumpyArray_like(context_block)\n simulation_dict['stage0'][dest]['delta_blocks'].append(delta_block2)\n # Connect the context block to the source\n simulation_dict['stage0'][source]['context_blocks'].append((context_block, delta_block2, context_factor))\n # Prepare the predicted blocks\n xx = xy[0]*hidden_size\n yy = xy[1]*hidden_size\n assert(predicted_array[xx:xx+dx, yy:yy+dy].shape == context_block.shape)\n predicted_block = SharedArray.DynamicView(predicted_array)[xx:xx+dx, yy:yy+dy]\n if not (predicted_block.shape == (dx, dy)):\n print predicted_block.shape\n raise\n # Connect the input to the destination together with its predicted blocks and so on.\n past_block = SharedArray.SharedNumpyArray_like(input_block)\n derivative_block = SharedArray.SharedNumpyArray_like(input_block)\n integral_block = SharedArray.SharedNumpyArray_like(input_block)\n pred_block_local = SharedArray.SharedNumpyArray_like(input_block)\n simulation_dict['stage0'][dest]['signal_blocks'].append((input_block, delta_block, predicted_block, past_block, derivative_block, integral_block, pred_block_local))", "def connect_back(simulation_dict, (index_from, blocks_per_dim_from), (index_to, blocks_per_dim_to), square_size, radius, context_factor):\n logging.info(\"Connecting back additional context from index %d to index %d\" % (index_from, index_to))\n logging.info(\"Connecting back additional context from layer size is %d, receiving layer size is %d\" % (blocks_per_dim_from, blocks_per_dim_to))\n logging.info(\"Radius of connectivity %d\" % radius)\n for x in range(blocks_per_dim_from):\n for y in range(blocks_per_dim_from):\n surround = get_fan_in((x, y),\n dim_x_l=blocks_per_dim_to,\n dim_y_l=blocks_per_dim_to,\n dim_x_u=blocks_per_dim_from,\n dim_y_u=blocks_per_dim_from,\n block_x=square_size,\n block_y=square_size,\n radius=radius)\n source = index_from + x * (blocks_per_dim_from) + y # unit in the higher layer\n for xy in surround:\n dest = index_to + xy[0] * blocks_per_dim_to + xy[1] # unit in the lower layer\n context_block = simulation_dict['stage0'][source]['output_block']\n delta_block2 = SharedArray.SharedNumpyArray_like(context_block)\n simulation_dict['stage0'][source]['delta_blocks'].append(delta_block2)\n # Connect the context block to the source\n simulation_dict['stage0'][dest]['context_blocks'].append((context_block, delta_block2, context_factor))", "def __init__(self, input_size, nb_action):\r\n super(Network, self).__init__()\r\n self.input_size = input_size\r\n self.nb_action = nb_action\r\n \r\n #Connection with input layer and hidden layer\r\n self.fc1 = nn.Linear(input_size, 30)\r\n #Connection with hidden layer and output layer\r\n self.fc2 = nn.Linear(30, nb_action)", "def _radius_neighbors_graph(\n self,\n sf,\n label,\n distance,\n radius,\n k=None,\n src_field='query_label',\n dst_field='reference_label'):\n\n # Get a feature list.\n features = []\n [features.extend(list(i[0])) for i in distance]\n\n\n print 'Determining edges...'\n # Compute the edgelist via nearest neighbors.\n nn = gl.nearest_neighbors.create(\n sf, label=label, features=features, distance=distance)\n edgelist = nn.query(\n sf, label=label, k=k, radius=radius)\n\n # Remove loops from the edgelist.\n # edgelist = self._remove_loops(edgelist)\n\n print 'Constructing graph...'\n # Make the graph.\n g = gl.SGraph(\n sf,\n edgelist,\n vid_field=label,\n src_field=src_field,\n dst_field=dst_field)\n return g", "def add_connector(self):\n \n no = len(self.connectors)\n state = {}\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % no\n \n if len(self.connectors)>0:\n state = self.connectors[-1].get_state()\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % (no)\n else:\n if self.mount == self.MOUNT_THT:\n state[\"p_shape\"] = Con.SHAPE_HOLE\n elif self.mount == self.MOUNT_SMD:\n state[\"p_shape\"] = Con.SHAPE_PAD\n \n c = Con(no)\n c.set_state(state) \n \n self.sch_layers[\"pins\"].add(c.s_svg)\n self.pcb_layers[\"copper1\"].add(c.p_svg)\n self.connectors.append(c)", "def expand_fc_layer(self, layer_int, new_size):\n backward_layer = getattr(self, \"fc{}\".format(layer_int))\n forward_layer = getattr(self, \"fc{}\".format(layer_int + 1))\n batch_norm = getattr(self, \"fc{}_bn\".format(layer_int))\n\n # Get averages, it should be transposed but we can go\n # along the other axis to make it easier\n weight_avgs = torch.mean(forward_layer.weight, dim=0)\n # Sort them for replication\n idxs = weight_avgs.argsort(descending=True)\n # Calculate multiplicative requirement\n extend_amount = (math.ceil(new_size / idxs.size()[0]))\n # Repeat the indices\n idxs = idxs.repeat(extend_amount)[:new_size]\n # Get divides\n _, inverse, ratios = idxs.unique(\n return_inverse=True, return_counts=True)\n ratios = ratios[inverse].float().repeat(extend_amount)[:new_size]\n ratios = ratios.unsqueeze(0)\n # Chunk out to be sure we keep order correct\n SIZE = forward_layer.weight.shape[1]\n chunks = [idxs[SIZE*i:SIZE*i + SIZE].sort()[1] + (SIZE*i)\n for i in range(extend_amount)]\n sorted_idxs = torch.cat(chunks)\n # Get and assign new weights\n new_l2_weights = forward_layer.weight[:, idxs]\n new_l2_weights = new_l2_weights / ratios.expand_as(new_l2_weights)\n new_l1_weights = backward_layer.weight[idxs]\n\n # Reset weight matrices\n new_backward_layer = nn.Linear(backward_layer.in_features, new_size)\n new_backward_layer.weight = nn.Parameter(new_l1_weights[sorted_idxs])\n new_backward_layer.bias = nn.Parameter(backward_layer.bias.data[idxs])\n\n new_forward_layer = nn.Linear(new_size, forward_layer.out_features)\n new_forward_layer.weight = nn.Parameter(new_l2_weights[:, sorted_idxs])\n new_forward_layer.bias = forward_layer.bias\n\n new_batch_norm = nn.BatchNorm1d(new_size)\n new_batch_norm.weight.data = batch_norm.weight.data[idxs]\n new_batch_norm.bias.data = batch_norm.bias.data[idxs]\n\n setattr(self, \"fc{}\".format(layer_int), new_backward_layer)\n setattr(self, \"fc{}\".format(layer_int + 1), new_forward_layer)\n setattr(self, \"fc{}_bn\".format(layer_int), new_batch_norm)\n\n self.cuda()\n self._optim = optim.Adam(self.parameters(), lr=0.001)", "def draw_neuron(self, center, radius, color):\r\n self.pen.up()\r\n self.pen.color(color)\r\n self.pen.goto(center)\r\n\r\n self.pen.setheading(0)\r\n self.pen.forward(radius)\r\n self.pen.setheading(90)\r\n\r\n # draw circle\r\n self.pen.begin_fill()\r\n self.pen.pendown()\r\n self.pen.circle(radius)\r\n self.pen.end_fill()\r\n\r\n self.pen.color('black')\r\n self.pen.up()\r\n self.pen.goto(center)\r\n self.pen.setheading(0)", "def connect_layers(self):\n if not self.check():\n msg = \"Failed to check neural network.\"\n print(msg)\n logging.error(msg)\n return\n\n # 1. set input layer\n pre_layer = self.input_layer\n for layer in self.hidden_layers:\n layer.set_input_layer(pre_layer)\n pre_layer = layer\n self.output_layer.set_input_layer(pre_layer)\n\n # 2. set output layer\n next_layer = self.output_layer\n for layer in reversed(self.hidden_layers):\n layer.set_next_layer(next_layer)\n next_layer = layer\n self.input_layer.set_next_layer(next_layer)\n\n # 3. call layer init\n self.input_layer.init()\n for layer in self.hidden_layers:\n layer.init()\n self.output_layer.init()\n\n return", "def __init__(self, depth=7, latent_size=512, use_eql=True, gpu_parallelize=False):\r\n from torch.nn import ModuleList, Conv2d\r\n from CustomLayers import GenGeneralConvBlock, GenInitialBlock, _equalized_conv2d\r\n\r\n super().__init__()\r\n\r\n assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \\\r\n \"latent size not a power of 2\"\r\n if depth >= 4:\r\n assert latent_size >= np.power(2, depth - 4), \"latent size will diminish to zero\"\r\n\r\n # state of the generator:\r\n self.use_eql = use_eql\r\n self.depth = depth\r\n self.latent_size = latent_size\r\n\r\n # register the modules required for the Generator Below ...\r\n # create the ToRGB layers for various outputs:\r\n if self.use_eql:\r\n def to_rgb(in_channels):\r\n return _equalized_conv2d(in_channels, 1, (1, 1), bias=True)\r\n else:\r\n def to_rgb(in_channels):\r\n return Conv2d(in_channels, 1, (1, 1), bias=True)\r\n\r\n # create a module list of the other required general convolution blocks\r\n self.layers = ModuleList([GenInitialBlock(self.latent_size, use_eql=self.use_eql)])\r\n self.rgb_converters = ModuleList([to_rgb(self.latent_size)])\r\n\r\n # create the remaining layers\r\n for i in range(self.depth - 1):\r\n if i <= 2:\r\n layer = GenGeneralConvBlock(self.latent_size, self.latent_size,\r\n use_eql=self.use_eql)\r\n rgb = to_rgb(self.latent_size)\r\n else:\r\n layer = GenGeneralConvBlock(\r\n int(self.latent_size // np.power(2, i - 3)),\r\n int(self.latent_size // np.power(2, i - 2)),\r\n use_eql=self.use_eql\r\n )\r\n rgb = to_rgb(int(self.latent_size // np.power(2, i - 2)))\r\n self.layers.append(layer)\r\n self.rgb_converters.append(rgb)", "def __call__(self, **kwargs):\n segname = 'block_{}_expand_relu'\n blocks = [13, 6, 3, 1]\n skips = [self._backbone.get_layer(segname.format(i)) for i in blocks]\n backbone_out = self._backbone.get_layer('block_16_project')\n\n p5 = self._fpn_block(backbone_out.output, skips[0].output)\n p4 = self._fpn_block(p5, skips[1].output)\n p3 = self._fpn_block(p4, skips[2].output)\n p2 = self._fpn_block(p3, skips[3].output)\n\n s5 = self._conv_block(p5, 128)\n s4 = self._conv_block(p4, 128)\n s3 = self._conv_block(p3, 128)\n s2 = self._conv_block(p2, 128)\n\n s5 = tf.keras.layers.UpSampling2D(\n size=(8, 8),\n interpolation='nearest'\n )(s5)\n\n s4 = tf.keras.layers.UpSampling2D(\n size=(4, 4),\n interpolation='nearest'\n )(s4)\n\n s3 = tf.keras.layers.UpSampling2D(\n size=(2, 2),\n interpolation='nearest'\n )(s3)\n\n concat = [s5, s4, s3, s2]\n x = tf.keras.layers.Concatenate()(concat)\n x = tf.keras.layers.Conv2D(\n 64,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu')(x)\n x = tf.keras.layers.UpSampling2D((2, 2))(x)\n\n x = tf.keras.layers.Conv2D(\n 1,\n kernel_size=3,\n padding='same',\n kernel_initializer='he_uniform'\n )(x)\n\n out = tf.keras.layers.Activation('sigmoid')(x)\n model = tf.keras.models.Model(\n inputs=self._backbone.input,\n outputs=out\n )\n\n return model", "def conv_block(layer, fsize, training, name, pool=True):\n with tf.variable_scope(name):\n\n for i in range(1, 3):\n\n layer = tf.layers.conv2d(layer, filters=fsize, kernel_size=(3, 3), padding='same',\n kernel_regularizer=l2_reg(1e-1), name='conv-%i' % i)\n layer = tf.layers.batch_normalization(layer, training=training, name='norm-%s' % i)\n layer = tf.nn.relu(layer, name='relu-%i' % i)\n\n if pool:\n pool = tf.layers.max_pooling2d(layer, pool_size=(2, 2), strides=(2, 2), name='pool-%i' % i)\n\n return layer, pool", "def __init__(self, depth=7, feature_size=512, use_eql=True, gpu_parallelize=False):\r\n from torch.nn import ModuleList\r\n from CustomLayers import DisGeneralConvBlock, DisFinalBlock, _equalized_conv2d\r\n from torch.nn import Conv2d\r\n\r\n super().__init__()\r\n\r\n assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \\\r\n \"latent size not a power of 2\"\r\n if depth >= 4:\r\n assert feature_size >= np.power(2, depth - 4), \\\r\n \"feature size cannot be produced\"\r\n\r\n # create state of the object\r\n self.gpu_parallelize = gpu_parallelize\r\n self.use_eql = use_eql\r\n self.depth = depth\r\n self.feature_size = feature_size\r\n\r\n # create the fromRGB layers for various inputs:\r\n if self.use_eql:\r\n def from_rgb(out_channels):\r\n return _equalized_conv2d(1, out_channels, (1, 1), bias=True)\r\n else:\r\n def from_rgb(out_channels):\r\n return Conv2d(1, out_channels, (1, 1), bias=True)\r\n\r\n self.rgb_to_features = ModuleList()\r\n self.final_converter = from_rgb(self.feature_size // 2)\r\n\r\n # create a module list of the other required general convolution blocks\r\n self.layers = ModuleList()\r\n self.final_block = DisFinalBlock(self.feature_size, use_eql=self.use_eql)\r\n\r\n # create the remaining layers\r\n for i in range(self.depth - 1):\r\n if i > 2:\r\n layer = DisGeneralConvBlock(\r\n int(self.feature_size // np.power(2, i - 2)),\r\n int(self.feature_size // np.power(2, i - 2)),\r\n use_eql=self.use_eql\r\n )\r\n rgb = from_rgb(int(self.feature_size // np.power(2, i - 1)))\r\n else:\r\n layer = DisGeneralConvBlock(self.feature_size, self.feature_size // 2,\r\n use_eql=self.use_eql)\r\n rgb = from_rgb(self.feature_size // 2)\r\n\r\n self.layers.append(layer)\r\n self.rgb_to_features.append(rgb)\r\n\r\n # just replace the last converter\r\n self.rgb_to_features[self.depth - 2] = \\\r\n from_rgb(self.feature_size // np.power(2, i - 2))\r\n\r\n # parallelize the modules from the module-lists if asked to:\r\n if self.gpu_parallelize:\r\n for i in range(len(self.layers)):\r\n self.layers[i] = torch.nn.DataParallel(self.layers[i])\r\n self.rgb_to_features[i] = torch.nn.DataParallel(\r\n self.rgb_to_features[i])\r\n\r\n # Note that since the FinalBlock contains the StdDev layer,\r\n # it cannot be parallelized so easily. It will have to be parallelized\r\n # from the Lower level (from CustomLayers). This much parallelism\r\n # seems enough for me.\r", "def fc_layer(input, size_in, size_out):\n w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1))\n b = tf.Variable(tf.truncated_normal([size_out], stddev=0.1))\n return tf.nn.relu(tf.matmul(input, w) + b)", "def __init__(self, input_size, output_size, activation=torch.nn.functional.relu, left_to_right=True):\n super(GraphConvolutionalLayer, self).__init__()\n self.w = torch.nn.Parameter(torch.rand([input_size, output_size]))\n self.activation = activation\n self.left_to_right = left_to_right", "def build_2net(input_size, output_size, n_hidden=[5, 3]):\n\t# Create network and modules\n\tnet = FeedForwardNetwork()\n\tinp = LinearLayer(input_size)\n\th1 = SigmoidLayer(n_hidden[0])\n\th2 = TanhLayer(n_hidden[1])\n\toutp = LinearLayer(output_size)\n\t# Add modules\n\tnet.addOutputModule(outp)\n\tnet.addInputModule(inp)\n\tnet.addModule(h1)\n\tnet.addModule(h2)\n\t# Create connections\n\tnet.addConnection(FullConnection(inp, h1, inSliceTo=6))\n\tnet.addConnection(FullConnection(inp, h2, inSliceFrom=6))\n\tnet.addConnection(FullConnection(h1, h2))\n\tnet.addConnection(FullConnection(h2, outp))\n\t# Finish up\n\tnet.sortModules()\n\treturn net", "def connect(src, target, reftype):", "def __init__(self, config, input_shp):\n\n # Run initialization for super class\n super(MyNetwork, self).__init__()\n\n # Store configuration\n self.config = config\n\n # Placeholder for layers\n self.layers = {}\n indim = input_shp[0]\n\n # Retrieve Conv, Act, Pool functions from configurations. We'll use\n # these for our code below.\n if config.conv2d == \"torch\":\n self.Conv2d = nn.Conv2d\n elif config.conv2d == \"custom\":\n self.Conv2d = ConvBlock\n self.Activation = getattr(nn, config.activation)\n self.Pool2d = getattr(nn, config.pool2d)\n self.Linear = nn.Linear\n\n # Resnet Blocks, similar to slide 73 of lecture 21. However, for\n # simplicity, we'll make is slightly different. Note that we used\n # nn.Sequential this time.\n self.convs = nn.Sequential()\n cur_h, cur_w = input_shp[-2:]\n for _i in range(config.num_conv_outer):\n #\n # NOTE THE NEW LAYER ON THESE LINES!\n #\n # We have a dedicated 1x1 layer to get more channels. Note also\n # that this is a pure linear convolution layer.\n outdim = config.nchannel_base * 2 ** _i\n self.convs.add_module(\n \"conv_{}_base\".format(_i), nn.Conv2d(indim, outdim, 1, 1, 0))\n indim = outdim\n for _j in range(config.num_conv_inner):\n # We now use our selected convolution layer. Note that our\n # resnet implementation will have a different call style to\n # vanilla conv2d of torch, so we'll just do an ugly if-else\n # here.\n if config.conv2d == \"torch\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, 1))\n self.convs.add_module(\n \"act_{}_{}\".format(_i, _j),\n self.Activation())\n cur_h = cur_h - (config.ksize - 1)\n cur_w = cur_w - (config.ksize - 1)\n elif config.conv2d == \"custom\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, self.Activation))\n self.convs.add_module(\n \"conv_{}_pool\".format(_i), self.Pool2d(2, 2))\n cur_h = cur_h // 2\n cur_w = cur_w // 2\n\n # Final output layer. We'll assume that conv layer outputs are global\n # average pooled\n self.output = nn.Linear(indim, config.num_class)\n\n print(self)", "def connect(self):\n if self.pin_1.type == self.pin_2.type:\n self.pin_1.connected = True\n self.pin_2.connected = True\n else:\n raise InvalidPowerCombination(\"Not the same types\")", "def fc_layer(input, size_in, size_out):\n w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1))\n # var name needed later for variable filtering\n b = tf.Variable(tf.truncated_normal([size_out], stddev=0.1), name='bias')\n return tf.nn.relu(tf.matmul(input, w) + b)", "def __init__(self, channel):\n super(CoarseFineFlownet, self).__init__()\n in_c = channel * 2\n conv1 = nn.Sequential(nn.Conv2d(in_c, 24, 5, 2, 2), nn.ReLU(True))\n conv2 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv3 = nn.Sequential(nn.Conv2d(24, 24, 5, 2, 2), nn.ReLU(True))\n conv4 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv5 = nn.Sequential(nn.Conv2d(24, 32, 3, 1, 1), nn.Tanh())\n up1 = nn.PixelShuffle(4)\n self.coarse_flow = nn.Sequential(conv1, conv2, conv3, conv4, conv5, up1)\n in_c = channel * 3 + 2\n conv1 = nn.Sequential(nn.Conv2d(in_c, 24, 5, 2, 2), nn.ReLU(True))\n conv2 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv3 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv4 = nn.Sequential(nn.Conv2d(24, 24, 3, 1, 1), nn.ReLU(True))\n conv5 = nn.Sequential(nn.Conv2d(24, 8, 3, 1, 1), nn.Tanh())\n up2 = nn.PixelShuffle(2)\n self.fine_flow = nn.Sequential(conv1, conv2, conv3, conv4, conv5, up2)\n self.warp_c = STN(padding_mode='border')", "def __init__(self, conv_features_sizes, linear_layer_sizes, connector_shape):\n super().__init__()\n \n self.conv = nn.Sequential()\n self.mlp = nn.Sequential()\n self.flat = nn.Flatten()\n\n self.conv.add_module(name=f\"e-fconv{0}\", module=_conv2d_block(1, conv_features_sizes[0], kernel_size=3, padding=1))\n self.conv.add_module(name=f\"e-max{0}\", module=nn.MaxPool2d(2, 2))\n for i, (in_size, out_size) in enumerate(zip(conv_features_sizes[:-1], conv_features_sizes[1:]), 1):\n self.conv.add_module(name=f\"e-fconv{i}\", module=_conv2d_block(in_size, out_size, kernel_size=3, padding=1))\n self.conv.add_module(name=f\"e-max{i}\", module=nn.MaxPool2d(2, 2))\n\n mlp_input_shape = int(reduce((lambda x,y: x * y), connector_shape))\n self.mlp.add_module(name=f\"e-linear{0}\", module=nn.Linear(mlp_input_shape, linear_layer_sizes[0]))\n self.mlp.add_module(name=f\"e-batchnorm{0}\", module=nn.BatchNorm1d(linear_layer_sizes[0]))\n self.mlp.add_module(name=f\"e-relu{0}\", module=nn.ReLU())\n for i, (in_size, out_size) in enumerate(zip(linear_layer_sizes[:-1], linear_layer_sizes[1:]), 1):\n self.mlp.add_module(name=f\"e-linear{i}\", module=nn.Linear(in_size, out_size))\n self.mlp.add_module(name=f\"e-batchnorm{i}\", module=nn.BatchNorm1d(out_size))\n self.mlp.add_module(name=f\"e-relu{i}\", module=nn.ReLU())", "def __init__(self, count, channels):\n super().__init__()\n self.count = count\n self.channels = channels\n # Organization of the distance data can be angles first, channels second,\n # or channels first, angles second.\n # E.g., for inputs with a shape of (angles, channels):\n # [ [23.0, 27.0], [1.0, 27.0], [23.0, 27.0] ]\n # would have three angles for rays detecting distances to two types of\n # objects.\n\n self.layers = []\n layer_width = self.count\n\n self.layers_parameters = [\n FeelersLayerParameters(kernel_size=5, kernel_count=3, pool_size=2),\n FeelersLayerParameters(kernel_size=5, kernel_count=3, pool_size=2),\n FeelersLayerParameters(kernel_size=5, kernel_count=2, pool_size=2),\n FeelersLayerParameters(kernel_size=5, kernel_count=2, pool_size=2),\n ]\n\n # Larger kernel sizes are appropriate for 1D convolutions.\n # Small number of filters to keep total parameter count low.\n for param in self.layers_parameters:\n width_after_next_layer = self._width_after_convolution(\n layer_width, param.kernel_size, param.pool_size)\n if width_after_next_layer < param.kernel_size:\n break\n self.layers.append(\n tf.keras.layers.Conv1D(\n filters=param.kernel_count, kernel_size=param.kernel_size))\n self.layers.append(\n tf.keras.layers.MaxPool1D(pool_size=param.pool_size))\n layer_width = width_after_next_layer", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n\n # Compute the Q-values which are used for action selection in the current\n # state.\n self._net_outputs = self.online_convnet(self.state_ph,\n self.num_quantile_samples)\n # Shape of self._net_outputs.quantile_values:\n # num_quantile_samples x num_actions.\n # e.g. if num_actions is 2, it might look something like this:\n # Vals for Quantile .2 Vals for Quantile .4 Vals for Quantile .6\n # [[0.1, 0.5], [0.15, -0.3], [0.15, -0.2]]\n # Q-values = [(0.1 + 0.15 + 0.15)/3, (0.5 + 0.15 + -0.2)/3].\n self._q_values = tf.reduce_mean(self._net_outputs.quantile_values, axis=0)\n self._q_argmax = tf.argmax(self._q_values, axis=0)\n self._policy_logits = tf.nn.softmax(self._q_values / self.tau, axis=0)\n self._stochastic_action = tf.random.categorical(\n self._policy_logits[None, Ellipsis],\n num_samples=1,\n dtype=tf.int32)[0][0]\n\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n self.num_tau_samples)\n # Shape: (num_tau_samples x batch_size) x num_actions.\n self._replay_net_quantile_values = self._replay_net_outputs.quantile_values\n self._replay_net_quantiles = self._replay_net_outputs.quantiles\n\n # Do the same for next states in the replay buffer.\n self._replay_net_target_outputs = self.target_convnet(\n self._replay.next_states, self.num_tau_prime_samples)\n # Shape: (num_tau_prime_samples x batch_size) x num_actions.\n vals = self._replay_net_target_outputs.quantile_values\n self._replay_net_target_quantile_values = vals\n\n # Compute Q-values which are used for action selection for the states and\n # next states in the replay buffer.\n target_next_action = self.target_convnet(self._replay.next_states,\n self.num_quantile_samples)\n target_action = self.target_convnet(self._replay.states,\n self.num_quantile_samples)\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_next_quantile_values_action = target_next_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_next_quantile_values_action = tf.reshape(\n target_next_quantile_values_action,\n [self.num_quantile_samples, self._replay.batch_size, self.num_actions])\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_quantile_values_action = target_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_quantile_values_action = tf.reshape(target_quantile_values_action,\n [self.num_quantile_samples,\n self._replay.batch_size,\n self.num_actions])\n # Shape: batch_size x num_actions.\n self._replay_next_target_q_values = tf.squeeze(tf.reduce_mean(\n target_next_quantile_values_action, axis=0))\n self._replay_target_q_values = tf.squeeze(tf.reduce_mean(\n target_quantile_values_action, axis=0))\n\n self._replay_next_qt_argmax = tf.argmax(\n self._replay_next_target_q_values, axis=1)", "def go(self, z):\n with tf.variable_scope(self.name) as scope:\n batch_size = tf.shape(z)[0]\n fc = tf.contrib.layers.fully_connected(z, 4*4*1024, activation_fn=tf.identity)\n reshape_fc = tf.reshape(fc, [1, 4, 4, 1024])\n \n conv1 = tf.contrib.layers.conv2d_transpose(\n reshape_fc, 512, [4,4], [2,2],\n weights_initializer = tf.random_normal_initializer(stddev=0.02), #st_dev from dcgan paper\n activation_fn = leaky_relu\n )\n \n conv2 = tf.contrib.layers.conv2d_transpose(\n conv1, 256, [4, 4], [2,2],\n weights_initializer = tf.random_normal_initializer(stddev=0.02),\n activation_fn = leaky_relu\n )\n \n conv3 = tf.contrib.layers.conv2d_transpose(\n conv2, 3, [4,4], [2,2],\n weights_initializer = tf.random_normal_initializer(stddev=0.02),\n# activation_fn = leaky_relu\n activation_fn = tf.tanh\n )\n \n# conv4 = tf.contrib.layers.conv2d_transpose(\n# conv3, 3, [4,4], [2,2],\n# weights_initializer = tf.random_normal_initializer(stddev=0.02),\n# activation_fn = tf.tanh\n# )\n return conv3", "def __init__(\n self,\n wshare,\n n_feat,\n dropout_rate,\n kernel_size,\n use_kernel_mask=False,\n use_bias=False,\n ):\n super(LightweightConvolution2D, self).__init__()\n\n assert n_feat % wshare == 0\n self.wshare = wshare\n self.use_kernel_mask = use_kernel_mask\n self.dropout_rate = dropout_rate\n self.kernel_size = kernel_size\n self.padding_size = int(kernel_size / 2)\n\n # linear -> GLU -> lightconv -> linear\n self.linear1 = nn.Linear(n_feat, n_feat * 2)\n self.linear2 = nn.Linear(n_feat * 2, n_feat)\n self.act = nn.GLU()\n\n # lightconv related\n self.weight = nn.Parameter(\n torch.Tensor(self.wshare, 1, kernel_size).uniform_(0, 1)\n )\n self.weight_f = nn.Parameter(torch.Tensor(1, 1, kernel_size).uniform_(0, 1))\n self.use_bias = use_bias\n if self.use_bias:\n self.bias = nn.Parameter(torch.Tensor(n_feat))\n\n # mask of kernel\n kernel_mask0 = torch.zeros(self.wshare, int(kernel_size / 2))\n kernel_mask1 = torch.ones(self.wshare, int(kernel_size / 2 + 1))\n self.kernel_mask = torch.cat((kernel_mask1, kernel_mask0), dim=-1).unsqueeze(1)", "def __init__(self, state_size, action_dim, seed, fc1_units=64, fc2_units=64):\n super(RNetwork, self).__init__()\n self.seed = torch.manual_seed(seed)\n self.fc1 = nn.Linear(state_size + action_dim, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, 1)", "def _build_network(self):\n self.new_trainable_variable(\"w0_sin\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_sin\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_sin\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_sin\", np.zeros(config.oscillators, dtype=np.float64))\n\n self.new_trainable_variable(\"w0_cos\", np.zeros(\n (config.somites * 2 - 2, HIDDEN_LAYER_UNITS), dtype=np.float64))\n self.new_trainable_variable(\"b0_cos\", np.zeros(HIDDEN_LAYER_UNITS, dtype=np.float64))\n self.new_trainable_variable(\"w1_cos\", np.zeros(\n (HIDDEN_LAYER_UNITS, config.oscillators), dtype=np.float64))\n self.new_trainable_variable(\"b1_cos\", np.zeros(config.oscillators, dtype=np.float64))\n\n def action_infer(state: np.array) -> np.array:\n \"\"\"\n Get state and return feedback.\n\n state: [f_0, f_1, ..., phi_0, phi_1, ..., t_0, t_1, ...]\n return: [phase_feedback0, phase_feedback1, ..., angle_range0, angle_range1, ...]\n\n Discrepancy for torsion spring = alpha / 2 * k * range * T * sin(phi_i)\n \"\"\"\n forces = state[:config.somites]\n phis = state[config.somites:config.somites + config.oscillators]\n tensions = state[config.somites + config.oscillators:]\n\n f_sin, f_cos = self._calc_fs(np.concatenate((forces, tensions)))\n discrepancies = -0.5 * config.caterpillar_params[\"vertical_ts_k\"] * config.caterpillar_params[\"realtime_tunable_ts_rom\"] * tensions * np.sin(phis)\n return f_sin * np.sin(phis) + f_cos * np.cos(phis) - self.get_discrep_coeffs() * discrepancies, np.ones(config.oscillators) * config.caterpillar_params[\"realtime_tunable_ts_rom\"]\n\n return action_infer", "def send2(self, radius, gain, convergence=False):\n # dump to json format\n data = json.dumps(dict({\"gain\" : gain, \"radius\": radius, \"convergence\" : convergence})).encode()\n print(\"Sending value ({}, {}) as data {}\".format(radius, gain, data))\n self.sock.sendall(data)", "def bind(self, other):\n self.vector = vu.circular_convolution(self.vector, other.vector)", "def __init__(self, conv_block_args, deconv_block_args, flat_channels,\n flat_kernel_size):\n super().__init__()\n\n # Perform a number of steps validating the input arguments\n self._validate_parameters(conv_block_args, deconv_block_args,\n flat_channels, flat_kernel_size)\n\n # Create lists of conv and deconv blocks from the configurations\n # passed as arguments to this function\n self.conv_blocks = nn.ModuleList([\n ConvBlock(**args)\n for args in conv_block_args\n ])\n\n self.deconv_blocks = nn.ModuleList([\n DeconvBlock(**args)\n for args in deconv_block_args\n ])\n\n # The input and output from the flat channels must be compatible\n # with the configurations for the conv and deconv blocks\n flat_in_channels = conv_block_args[-1]['out_channels']\n flat_out_channels = deconv_block_args[0]['in_channels']\n\n # Setup the flat layers\n self.flat = nn.Conv2d(flat_in_channels, flat_channels,\n flat_kernel_size)\n self.flat2 = nn.Conv2d(flat_channels, flat_channels, 1)\n self.unflatten = nn.ConvTranspose2d(flat_channels, flat_out_channels,\n flat_kernel_size)" ]
[ "0.5964945", "0.5862508", "0.49246627", "0.49153936", "0.48418275", "0.47906286", "0.47593868", "0.4756391", "0.47359917", "0.47261834", "0.4666891", "0.46099734", "0.46059293", "0.45885086", "0.45856825", "0.4538176", "0.45291564", "0.45182663", "0.45091137", "0.4504134", "0.4502937", "0.44911504", "0.44888988", "0.4478636", "0.44704416", "0.4452877", "0.44445273", "0.44310322", "0.44298875", "0.44093698" ]
0.58673096
1
adds item to end of items, will decide to extend or append based on item iterability
def push(self, item): if hasattr(item, "__iter__"): self.items.extend(item) else: self.items.append(item)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def append(self, item):\n if self.full or self.pre_allocated:\n # overwrite\n self.data[self.cur] = item\n else:\n self.data.append(item)\n if not self.full:\n self.full = self.cur == self.max - 1\n self.cur = (self.cur + 1) % self.max", "def append(self, item):\n self.update([item])", "def append (self, item):\n pass", "def append(self, item):\n self.items.append(item)", "def add_next(self, item, index):\n if index in self.d_buffer.keys():\n return\n if len(self) == self.size:\n self.pop_first()\n self.add_item(item, index)", "def add_item(self, item):\n self.items.append(item)\n self.length += 1", "def add_to_group(self,item):\n self.items.append(item)\n self.n += 1", "def extend(self, items):\n\t\tfor item in items:\n\t\t\tself.append(item)", "def append(self, items):\n self.__add__(items)", "def append(self, item):\n\n # resize array to 2*capacity if max capacity reached\n if self.count == self.capacity:\n self._resize(2 * self.capacity)\n\n # Append the item at the end of array\n self.the_array[self.count] = item\n self.count += 1", "def append_item(self, item):\r\n if not isinstance(item, LR0Item):\r\n raise TypeError\r\n self.itemlist.append(item)", "def add_item(self, item: _T) -> None:\n if item not in self.item_to_index:\n self.item_to_index[item] = len(self.index_to_item)\n self.index_to_item.append(item)", "def add_item(self, item_to_append):\n self.items.append(item_to_append)", "def append(self, item):\n try:\n i = self.index(item)\n return self[i]\n except ValueError:\n list.append(self, item)\n return item", "def add(self, item):\r\n if len(self.buff)==self.size: self.buff.pop(0)\r\n self.buff.append(item)", "def append(self, item: Any) -> BaseList:\n super().append(item)\n return self", "def add(self, item):\n \n with self.lock:\n if isinstance(item, list):\n self.items.join(item)\n else:\n self.items.append(item)", "def append(self, value):\n assert isinstance(value, Item), type(value)\n list.append(self, value)\n self.emit('appened', value)\n self.emit('modified')", "def add_item(self, item):\n self.items.append(item)\n self.item_count += 1\n self.max_length = max(self.max_length, len(item.samples))", "def __append_to_item_list(self):\n Item.get_item_list().append(self)", "def push(self, item):\n self._tail_iters.append(iter([item]))", "def add_item(self, item):\n if self.head is None:\n # if not self.array:\n self.head = 0\n self.array = [item]\n else:\n # insert item\n self.array.insert(self.head, item)\n\n # reassign head\n self.head += 1", "def insert(self, item):\r\n if not self.is_full():\r\n for i in range(1,len(self.items)):\r\n if self.items[i] is None:\r\n self.items[i] = item\r\n self.size += 1\r\n self.perc_up(i)\r\n return True\r\n return False", "def append(self, item: T) -> None:\n self.insert(item)", "def extend(self, item: Any) -> BaseList:\n super().extend(item)\n return self", "def append(self, item: T) -> None:\n pass", "def append(self, item: T) -> None:\n pass", "def push(self, item):\n self.list.prepend(item)", "def take_item(self, item):\r\n if len(self.items) <= 2:\r\n self.items.append(item)\r\n if self.got_both():\r\n self.working = True", "def _append_with_string_merge(seq, new_item):\n if seq and isinstance(new_item, text_type) and isinstance(seq[-1], text_type):\n s = seq.pop()\n seq.append(s+new_item)\n else:\n seq.append(new_item)" ]
[ "0.7585724", "0.72042567", "0.7136644", "0.7093334", "0.70766443", "0.7074858", "0.702008", "0.69905263", "0.69694656", "0.69540036", "0.68580884", "0.68391997", "0.68382025", "0.68109816", "0.68016183", "0.67989516", "0.6775975", "0.671375", "0.6701978", "0.66974854", "0.66743577", "0.6621128", "0.6620893", "0.66018564", "0.6601427", "0.65988827", "0.65988827", "0.65733546", "0.6573261", "0.6551348" ]
0.7232399
1
Check number of tweets with a given polarities of given tweets. Check how many tweets are positive, negative or neutral
def check_polarity(df, name="Polarity"): polarity = df[name] positives = polarity[polarity == 4] neutrals = polarity[polarity == 2] negatives = polarity[polarity == 0] print('Positive Tweets: {}'.format(len(positives))) print('Neutral Tweets: {}'.format(len(neutrals))) print('Negative Tweets: {}'.format(len(negatives))) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self, tweets, without_neutral=True):\n correct = 0\n total = 0\n for tweet in tweets:\n assert tweet.polarity is not None\n if tweet.is_neutral() and without_neutral:\n continue\n\n if tweet.polarity == self.predict_sentiment_enum(tweet, without_neutral):\n correct += 1\n\n total += 1\n\n print(\"correct = \", correct, \"total = \", total)\n return correct / total", "def classify(tweets, positives, negatives):\n sentiment_list = makelist(tweets, positives, negatives)\n n_positives = 0\n n_negatives = 0\n n_neutral = 0\n\n # Counts the amount of times each number is in sentiment_list\n for i in sentiment_list:\n if i == 1:\n n_positives += 1\n elif i == -1:\n n_negatives += 1\n else:\n n_neutral += 1\n\n print(\"Trump's tweets classified:\")\n print(\" positive: {}\".format(n_positives))\n print(\" negative: {}\".format(n_negatives))\n print(\" neutral : {}\".format(n_neutral))", "def filter_pos_tweets(tweets):\n\n pos_tweets = []\n\n for tweet in tweets:\n sentiment = unirest.post(\"https://japerk-text-processing.p.mashape.com/sentiment/\",\n headers={\n \"X-Mashape-Key\": os.environ['X_MASHAPE_KEY'],\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\"\n },\n params={\n \"language\": \"english\",\n \"text\": tweet.text\n }\n )\n if (sentiment.body['probability']['neg'] <= max_neg) & (sentiment.body['probability']['pos'] >= min_pos):\n pos_tweets.append(tweet)\n log_sentiment(tweet, sentiment)\n\n return pos_tweets", "def stockSentiment(stockName, numTweets=100):\n\n listOfTweets = user.search(stockName, count=numTweets)\n threshold = posSentTweet = negSentTweet = 0\n\n for tweet in listOfTweets:\n analysis = TextBlob(tweet.text)\n if analysis.sentiment.polarity >= threshold:\n posSentTweet = posSentTweet + 1\n else:\n negSentTweet = negSentTweet + 1\n\n if posSentTweet > negSentTweet:\n print(\"Overall Positive\")\n return True\n else:\n print(\"Overall Negative\")\n return False", "def neut_pol_tweets(self):\n positive = sdf.loc[sdf.polarity == 0, ['text']].sample(5).values\n [print(text[0], '\\n') for text in positive];", "def analyze(self, text): #takes the text to be analyzed for sentiment\n #initialize inicial score to 0\n score = 0\n #Create tokenizer instance\n tokenizer = nltk.tokenize.TweetTokenizer()\n #create list of words in a tweets\n tokens = tokenizer.tokenize(text)\n \n #iterate over tokens(list of words)\n for word in tokens:\n #check if word is positive or negative\n if word.lower() in self.positives_words:\n score+=1\n if word.lower() in self.negatives_words:\n score-=1\n #neutral if its neither, doesnt add anything, 0\n return score", "def analyze(self, text):\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for token in tokens:\n if token in self.positives_list:\n score += 1\n elif token in self.negatives_list:\n score -= 1\n\n return score", "def analyze(self, text):\n\n tknzr = nltk.tokenize.TweetTokenizer()\n words = tknzr.tokenize(text)\n \n score = 0\n \n for word in words:\n if word.lower() in self.positives:\n score += 1\n elif word.lower() in self.negatives:\n score -= 1\n else:\n continue\n \n return score", "def aggregate_sentiment(tweets):\r\n\r\n positive = 0\r\n negative = 0\r\n neutral = 0\r\n\r\n for tweet in tweets:\r\n if tweet.sentiment_type == \"positive\":\r\n positive += 1\r\n elif tweet.sentiment_type == \"negative\":\r\n negative += 1\r\n else:\r\n neutral += 1\r\n\r\n result = [[\"Positive\", positive], [\"Neutral\", neutral], [\"Negative\", negative]]\r\n return result", "def analyse_tweet(self, tweet):\r\n sentiment = 0\r\n subjects = []\r\n\r\n is_comparison = False # sentiment will be the LHS of the comparison\r\n seen_not = False\r\n for word in myparser.parse(tweet,self.company_names,True):\r\n if word == \"not\" or word == \"don't\":\r\n seen_not = True\r\n elif word in self.positive_words:\r\n sentiment = sentiment + 1\r\n elif word in self.negative_words:\r\n sentiment = sentiment - 1\r\n if word in self.company_names:\r\n subjects += [word]\r\n for (p, c) in self.product_names:\r\n if word == p:\r\n subjects += [c]\r\n for (c,s) in self.comparisons:\r\n if word == c:\r\n sentiment = s\r\n is_comparison = True\r\n if seen_not:\r\n sentiment = -sentiment\r\n\r\n #print((tweet, subjects, sentiment, is_comparison))\r\n\r\n if is_comparison:\r\n subjects += [None, None]\r\n return[(subjects[0], sentiment), (subjects[1], -sentiment)]\r\n else:\r\n return [(sub, sentiment) for sub in subjects]", "def analyze(self, text):\n\n # TODO\n # tokens = tokenizer.tokenize(tweet)\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n score = 0\n\n for word in tokens:\n # iterate over tokens#str.lower\n\n if word.lower() in self.positives:\n score = score+1\n\n elif word.lower() in self.negatives:\n score = score-1\n\n else:\n continue\n return score", "def compute_rating(positive_count, neutral_count, negative_count):\n total = positive_count + neutral_count + negative_count\n if total < 5:\n return 'NEUTRAL'\n\n pos = positive_count/total\n neg = negative_count/total\n\n if pos > 0.3 and neg > 0.3:\n return 'CONTROVERSIAL'\n if pos > 0.7 or (pos > 0.5 and pos >= neg * 2):\n return 'POSITIVE'\n if neg > 0.7 or (neg > 0.5 and neg >= pos * 2):\n return 'NEGATIVE'\n return 'NEUTRAL'", "def test_tonality(self) -> None:\n tonality: float = 0.0\n\n for word in self.report.get_words():\n tonality = tonality + word.sentiment\n\n if tonality < self.rules.tonality_min:\n self.add_error(\"Tonaliteten i rapporten är för negativ.\")\n\n if tonality > self.rules.tonality_max:\n self.add_error(\"Tonaliteten i rapporten är för positiv.\")", "def analyze(self, text):\n tknzr = nltk.tokenize.casual.TweetTokenizer(preserve_case=True, reduce_len=False, strip_handles=False)\n tknTxt = tknzr.tokenize(text)\n sentiment = 0\n \n for i in range(len(tknTxt)):\n if tknTxt[i] in self.posTxt:\n #print(\"POS\")\n #print(tknTxt[i])\n sentiment += 1\n elif tknTxt[i] in self.negTxt:\n #print(\"NEG\")\n #print(tknTxt[i])\n sentiment -= 1\n \n return sentiment", "def analyze(self, text):\n score =0\n token = TweetTokenizer()\n tokens = token.tokenize(text)\n for token in tokens:\n if token.lower() in self.pos_list:\n score+=1\n elif token.lower() in self.neg_list:\n score-=1\n\n return score", "def analyze(self, text):\n #Check each word in text\n tokenizer = nltk.tokenize.TweetTokenizer()\n tokens = tokenizer.tokenize(text)\n total_score = 0\n #Sum the total score\n for token in tokens:\n token = token.lower()\n if token in self.positives:\n total_score = total_score + 1\n elif token in self.negatives:\n total_score = total_score - 1\n else:\n total_score = total_score + 0\n \n return total_score", "def statistics(all_new_tweets, all_retweets, all_quote_tweets):\n length_all_quote_tweets = len(all_quote_tweets)\n length_all_retweets = len(all_retweets)\n length_all_tweets = len(all_new_tweets)\n\n # print(db_twitter.collections.stats())\n total_tweets = length_all_quote_tweets + length_all_retweets + length_all_tweets\n print(\n f\"Number of all tweets via streaming collected: {total_tweets - return_rest_tweets_number()}\"\n )\n print(f\"Number of new tweets collected: {length_all_tweets}\")\n print(f\"Number of retweets collected: {length_all_retweets}\")\n print(f\"Number of quote tweets collected: {length_all_quote_tweets}\")\n print(f\"Number of tweets collected via rest is {return_rest_tweets_number()}\")\n\n # Calculates mean sentiment, where 1 is very positive, -1 is very negative\n mean_sentiment = 0.0\n\n for tweet in all_new_tweets:\n mean_sentiment += tweet[\"sentiment_polarity\"]\n mean_sentiment = mean_sentiment / length_all_tweets\n print(\"The mean sentiment of tweets is: \", mean_sentiment)\n\n # Calculates mean subjectivity, where 1 is very subjective, -1 is very objective\n mean_subjectivity = 0.0\n\n for tweet in all_new_tweets:\n mean_subjectivity += tweet[\"subjectivity\"]\n mean_subjectivity = mean_subjectivity / length_all_tweets\n print(\"The mean subjectivity of retweets is: \", mean_subjectivity)\n return mean_sentiment, mean_subjectivity, total_tweets", "def get_nTruePositive(atrank, was_retrieved, gt_ranks):\n TP = (np.logical_and(was_retrieved, gt_ranks <= atrank)).sum()\n return TP", "def analyze(self, text):\n\n sent = 0\n for word in text.split():\n # check each word in tweet\n if word.strip(\":, \").lower() in self.posWords:\n sent += 1\n elif word.strip(\":, \").lower() in self.negWords:\n sent -= 1\n\n return sent", "def high_pol_tweets(self):\n positive = sdf.loc[sdf.polarity == sdf.polarity.max(), ['text']].sample(5).values\n [print(text[0], '\\n') for text in positive];", "def classify(tweets,table,positives,negatives,p_tweets,n_tweets):\n\n\n st = LancasterStemmer()\n\n n_words = len(table)\n in_table = 0\n not_in_table = 0\n\n\n y_pred = np.zeros(len(tweets)).astype('int32')\n\n for i in range(len(tweets)):\n likelihood_pos = 0\n likelihood_neg = 0\n \n # MAP negatives and positives\n for word in tweets[i].split():\n word = st.stem(word.decode('utf-8'))\n if word in table:\n in_table += 1\n likelihood_pos += m.log((table[word][0]+1)/float(positives + 1*n_words))\n likelihood_neg += m.log((table[word][1]+1)/float(negatives + 1*n_words))\n \n else:\n not_in_table += 1\n likelihood_pos += m.log(1/float(positives + 1*n_words))\n likelihood_neg += m.log(1/float(negatives + 1*n_words))\n\n likelihood_pos += m.log(p_tweets/float(p_tweets + n_tweets))\n likelihood_neg += m.log(n_tweets/float(p_tweets + n_tweets))\n\n\n\n # Classify as positive or negative\n if likelihood_neg < likelihood_pos: \n y_pred[i] = 1\n\n prediction = np.bincount(y_pred)\n\n print \"Known words: %d\" % in_table\n print \"Unknown words %d\\n\" % not_in_table\n\n positive_ratio = prediction[1]/float(prediction[1] + prediction[0])\n\n group = \"Positive\" if positive_ratio > 0.5 else \"Negative\" \n\n\n return positive_ratio,group", "def predominant_sentiment(sentiment_aggregate_list):\r\n\r\n positive = int(sentiment_aggregate_list[0][1])\r\n neutral = int(sentiment_aggregate_list[1][1])\r\n negative = int(sentiment_aggregate_list[2][1])\r\n\r\n if positive > neutral and positive > negative:\r\n return \"positive\"\r\n elif neutral > positive and neutral > negative:\r\n return \"neutral\"\r\n elif negative > positive and negative > neutral:\r\n return \"negative\"\r\n else:\r\n return \"mixed\"", "def test_count_35(self):\n value: int = 35\n result: int = 6\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')", "def generate_tweet_scores(data):\n max_rt = 0\n max_likes = 0\n rt = {}\n likes = {}\n for i in data:\n max_rt = max(data[i][\"retweet_count\"], max_rt)\n max_likes = max(data[i][\"favorite_count\"], max_likes)\n rt[i] = data[i][\"retweet_count\"]\n likes[i] = data[i][\"favorite_count\"]\n for i in data:\n if max_rt > 0:\n rt[i] = rt[i]/max_rt\n if max_likes > 0:\n likes[i] = likes[i]/max_likes\n return rt, likes", "def analyze(self, tweet):\n \n # keeping track of the score\n score = 0\n \n # filtering though tweets exstracting the useful words\n # preserve_case = false maks them lowercase\n tokenizer = nltk.tokenize.TweetTokenizer(preserve_case = False)\n tokens = tokenizer.tokenize(tweet)\n \n # checking word for word the intension and keeping score\n for word in tokens:\n if word in self.dic:\n if self.dic[word] == 1:\n score += 1\n else:\n score -= 1\n# score += self.dic[word]\n return score", "def get_tweet_sentiment(self, tweet):\r\n # create TextBlob object of passed tweet text\r\n polarity = TextBlob(self.clean_tweet(tweet)).sentiment.polarity\r\n if polarity > 0:\r\n return 1.0\r\n if polarity < 0:\r\n return -1.0\r\n return 0", "def evaluate_indicators(self, tweet_words):\n if tweet_words is None:\n return [0. for word in self.indicator_words]\n tweet_words = set(tweet_words)\n evaluate = vectorize(lambda _word: int(_word in tweet_words))\n indicators = evaluate(self.indicator_words)\n return indicators", "def get_tweet_sentiment(self, tweet):\n\n analyzer = SentimentIntensityAnalyzer()\n vs = analyzer.polarity_scores(tweet)\n # set sentiment\n if vs['compound'] >= 0.05:\n return 'positive'\n elif -0.5 < vs['compound'] < 0.05:\n return 'neutral'\n else:\n return 'negative'", "def count_tweets_based_on_words(word, positve_sentiment, negative_sentiment):\n tweet_counter = dict()\n index_db = couch[config.get(\"database\", \"DB_INDEX\")]\n items = index_db.view(view_text_index)\n try:\n tweet_id_holder = items[word].rows[0].value\n except Exception as e:\n return tweet_counter\n\n tweets_all = tweet_database.view(view_tweet_info)\n\n for tweet_id in tweet_id_holder:\n # check redundancy\n if tweet_id not in tweet_index:\n tweet = tweets_all[tweet_id]\n tweet_index.add(tweet_id)\n try:\n # set polarity value\n if negative_sentiment:\n if tweet.rows[0].value[3] < 0:\n tweet_counter = add_state_count(tweet, tweet_counter)\n elif positve_sentiment:\n if tweet.rows[0].value[3] > 0:\n tweet_counter = add_state_count(tweet, tweet_counter)\n else:\n tweet_counter = add_state_count(tweet, tweet_counter)\n except:\n return tweet_counter\n return tweet_counter", "def tpr(positive, negative, fpr):\n threshold = np.percentile(np.asarray(negative), 100 - fpr)\n total_true_positives = sum(positive > threshold)\n\n return total_true_positives / len(positive)" ]
[ "0.69956076", "0.6323075", "0.6051928", "0.6020553", "0.60054296", "0.5880848", "0.58020025", "0.5775723", "0.5775562", "0.57518595", "0.57133067", "0.56743443", "0.56387675", "0.5637214", "0.56288105", "0.5614385", "0.5610941", "0.55810124", "0.5555613", "0.5540526", "0.55161226", "0.54605764", "0.5454292", "0.5452991", "0.5451809", "0.54494506", "0.5429645", "0.5419093", "0.5414495", "0.54108953" ]
0.63810635
1
Clean a pandas dataframe containing several tweets for sentiment analysis.
def data_clean(df, name="Tweet"): tic = timer() twts = [] # Define a punctuation dictionary so that we can replace each punctuation with an empty space. table = str.maketrans('', '', string.punctuation) stopWords = set(stopwords.words('senti')) # Set stop words language to English for n in range(df[name].shape[0]): text = df[name][n] tokens = text.split() # Split each tweet into list of words. tokens = filter(lambda x: x[0] != '@', tokens) # Remove mentions tokens = [word.translate(table) for word in tokens] # Remove punctuation marks tokens = [word for word in tokens if word.isalpha()] # Remove any word that is not completely alphabetic. tokens = [word for word in tokens if len(word) > 1] # Remove any word that is shorter than two letters tokens = [word.lower() for word in tokens] tokens = [word for word in tokens if not word in stopWords] # Remove any stopwords # Modified for dumping data without additional commas in csv file token = "" for i in tokens: token += (i + " ") twts.append(token) toc = timer() print("Time for cleaning tweets", (toc - tic)) return twts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleaninto_df(frame:pd) -> pd:\n # remove repeated characters EXAMPLE: DIMPLLLLEEEEE -> DIMPLE\n # nopunc = word_tokenize(nopunc) this might not work. find something else\n\n stop = stopwords.words('english')\n newStopWords = ['get', 'http','there','and','i','t','it','d']\n stop.extend(newStopWords)\n lemmatizer = WordNetLemmatizer()\n clean = []\n new_col = []\n frame['Cleaned'] = None\n for tweet in frame.content:\n if 'RT' in tweet:\n if tweet.index('RT')>5:\n tweet = tweet[:tweet.index('RT')]\n else:\n tweet = tweet[2:]\n # WHAT ARE WE TRYING TO CLEAN HERE?\n # cleaning with preprocessor library https://pypi.org/project/tweet-preprocessor/\n tweet = ' '.join(re.sub(\"(@\\w+)|([^A-Za-z]+)|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n # changes #November1 -> November: need to remove full hashtag?\n # changes @poetweatherford: -> poetweatherford\n # changes don’t -> don t, children's -> children s\n print(\"after regex:\" + str(tweet))\n clean.append(tweet.lower())\n for clean_tweet in clean:\n word_tokens = word_tokenize(clean_tweet)\n clean_tokens = [word for word in word_tokens if word not in stop]\n stems = []\n for item in clean_tokens:\n stems.append(lemmatizer.lemmatize(item))\n new_sentence = ' '.join(stems)\n new_col.append(new_sentence.lower())\n frame['Cleaned'] = new_col\n return frame", "def clean_all_tweets(self):\n clean_tweets = []\n for tweet in self.tweets:\n clean_tweets.extend(self.clean_tweet(tweet).split())\n return clean_tweets", "def clean_all(self, tweet):\n tweet = self.clean_urls(tweet)\n tweet = self.clean_hashtags(tweet)\n tweet = self.clean_mentions(tweet)\n tweet = self.clean_emojis_and_smileys(tweet)\n tweet = self.clean_unnecessary_characters(tweet)\n tweet = self.clean_reserved_words(tweet)\n\n return tweet", "def clean(df):", "def stop_words_remover(df):\n \n df['Without Stop Words'] = df['Tweets'].apply(str.lower).apply(str.split)\n\n for i in range(len(twitter_df)):\n df['Without Stop Words'][i] = [x for x in df['Without Stop Words'][i] if x not in stop_words_dict['stopwords']]\n return df\n pass", "def stop_words_remover(df):\n stop_words = stop_words_dict['stopwords']\n\n df['Without Stop Words'] = [' '.join([w for w in x.lower().split()\n if w not in stop_words])\n for x in df['Tweets'].tolist()\n ]\n result = []\n l1 = df['Without Stop Words']\n for tweet in l1:\n result.append(tweet.split(' '))\n df['Without Stop Words'] = result\n return df", "def preprocess_tweets(tweets):\n tweets = clean_html(tweets)\n tweets = detweettify(tweets)\n tweets = remove_numbers(tweets)\n tweets = remove_punctuation(tweets)\n tweets = remove_links(tweets)\n return tweets", "def clean_data(df):\n cleaned = []\n for row,i in zip(df['text'],df.index):\n # if ':' in row:\n # row = row.split(':')[1]\n text = re.sub('https:[\\w.\\/]*','',row)\n # a = re.sub(r'[\\.@]', '', row)\n cleaned.append(text)\n df['clean_text'] = pd.Series(cleaned)\n return df", "def clean_and_write_tweets(path, category):\n\n table = str.maketrans({key: None for key in string.punctuation})\n\n test_csv = pd.read_csv(path, dtype=data_columns)\n tweets = deepcopy(list(test_csv.get('text')))\n\n cleaned_tweets = []\n idx = 0\n for tweet in tweets:\n\n if type(tweet) is str:\n\n if len(tweet) == 0:\n continue\n\n # remove URL\n line = remove_url(str(tweet.strip()))\n # remove non Latin characters\n stripped_text = ''\n for c in line:\n stripped_text += c if len(c.encode(encoding='utf_8')) == 1 else ''\n\n stripped_text = (stripped_text.translate(table)).strip()\n if len(stripped_text) > 0 and stripped_text.lower() != 'nan':\n cleaned_tweets.append(stripped_text)\n idx += 1\n\n d = {'tweets': cleaned_tweets, 'category': [category] * len(cleaned_tweets)}\n df = pd.DataFrame(data=d)\n\n write_path = path[:path.rfind('/')]\n df.to_csv(os.path.join(write_path, 'cleaned_tweets.csv'), index=False)", "def cleanUpTweets(tweets_list):\r\n # Constants\r\n MIN_TWEET_SIZE = 30\r\n REGEX_URI = '(http|https):\\S+[/a-zA-Z0-9]'\r\n REGEX_AMPERSAND = '(&amp;)'\r\n REGEX_NEWLINE = '\\n'\r\n REGEX_EMOJIS = re.compile(\"[\"\r\n u\"\\U0001F600-\\U0001F64F\" # emoticons\r\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\r\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\r\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\r\n \"]+\", flags=re.UNICODE)\r\n \r\n # Filtered list\r\n clear_tweets = []\r\n \r\n # (ugly) Loop through the list \r\n for lst in tweets_list:\r\n inner_list = []\r\n for t in lst:\r\n if (len(t) >= MIN_TWEET_SIZE):\r\n t = re.sub(REGEX_URI, '', t, flags=re.MULTILINE)\r\n t = re.sub(REGEX_AMPERSAND, ' and ', t, flags=re.MULTILINE)\r\n t = REGEX_EMOJIS.sub('', t)\r\n t = re.sub(REGEX_NEWLINE, '', t, flags=re.MULTILINE)\r\n \r\n # Check tweet size after \"brushing up\"\r\n if (len(t) >= MIN_TWEET_SIZE):\r\n inner_list.append(t)\r\n if len(inner_list) != 0:\r\n clear_tweets.append(inner_list)\r\n \r\n \r\n # Return\r\n return clear_tweets", "def tweet_cleaner(tweets):\n n_tweets = {}\n clean = cleaner()\n for tweet in tweets:\n text = clean.clean_text(tweets[tweet][\"text\"])\n if len(text) > 15:\n n_tweets[tweet] = tweets[tweet]\n return n_tweets", "def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)", "def sentiment_analysis(df):\n analyzer = SentimentIntensityAnalyzer()\n polarity = []\n for tweet in df['clean_text'].astype(str):\n sentiment = analyzer.polarity_scores(tweet)\n polarity.append(sentiment['compound'])\n df['sentiment'] = pd.Series(polarity)\n return df", "def tweet_to_df(tweet):\r\n\r\n count = helper(\"./data\")\r\n\r\n dict_ = {}\r\n dict_[\"text\"] = tweet.text\r\n dict_[\"user\"] = tweet.user.description\r\n dict_[\"user_location\"] = tweet.user.location\r\n dict_[\"screem_name\"] = tweet.user.screen_name\r\n dict_[\"account_date_cr\"] = tweet.user.created_at\r\n dict_[\"nb_followers\"] = tweet.user.followers_count\r\n dict_[\"profile_color\"] = tweet.user.profile_background_color\r\n dict_[\"tweet_id\"] = tweet.id_str\r\n dict_[\"tweet_date\"] = tweet.created_at\r\n dict_[\"nb_retweeted\"] = tweet.retweet_count\r\n dict_[\"tweet coordinates\"] = tweet.coordinates\r\n\r\n tweet_data = pd.DataFrame(dict_, index=[0])\r\n return tweet_data.to_csv(f\"C:/Users/info/Desktop/projects/tweetanalyser/data/{count+1}.csv\")", "def gb_cleaner(df):\n df['tag'] = df.tags.apply(retagger)\n \n c_list = df.text.tolist()\n\n clean_corpus = []\n for docs in c_list:\n clean_corpus.append(data_cleaner(docs))\n \n df['clean'] = clean_corpus\n\n df = df.drop(['text', 'tags', 'stars'], axis= 1)\n \n return df", "def clean_data(df):\n\t# create a dataframe of the 36 individual category columns\n\tcategories = df['categories'].str.split(';', expand=True)\n\t# select the first row of the categories dataframe\n\trow = categories.loc[0]\n\t# extract a list of new column names for categories. Remove unnecessary chars.\n\tcategory_colnames = row.str.replace(r'-\\w','')\n\t# rename the columns of `categories`\n\tcategories.columns = category_colnames\n\t# Convert category values to just numbers 0 or 1.\n\tcategories = categories.applymap(lambda x: int(x.split('-')[1]))\n\t# drop the original categories column from `df`\n\tdf.drop(['categories'],axis=1, inplace=True)\n\t# concatenate the original dataframe with the new `categories` dataframe\n\tdf = pd.concat([df,categories],axis=1)\n\t# find duplicates\n\tdups = df.duplicated(subset=None, keep='first')\n\t# drop duplicates\n\tdf = df[~(dups)]\n\treturn df", "def cleanse_tweets(tweets):\n cleansed = []\n # set up url regex\n # regex pattern from http://stackoverflow.com/questions/6883049/regex-to-find-urls-in-string-in-python\n regURL = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')\n # for each tweet object find and replace any urls\n for tweet in tweets:\n # split tweet into list\n tList = tweet.text.split()\n for i in range(len(tList)):\n # replace \"bad\" quotes with normal quotes\n # regex pattern from http://stackoverflow.com/questions/24358361/removing-u2018-and-u2019-character\n tList[i] = re.sub(u\"(\\u2018|\\u2019|\\u201c|\\u201d)\", \"'\", tList[i])\n tList[i] = re.sub(u\"(\\xe9)\", \"e\", tList[i])\n tList[i] = re.sub(u\"(\\u2014)\", \"-\", tList[i])\n # remove other non-ascii unicode\n tList[i] = re.sub(r'[^\\x00-\\x7F]+', '', tList[i])\n match = regURL.match(tList[i])\n if match:\n #tList[i] = \"URL\"\n tList[i] = \"\"\n # rejoin updated list into string, add to cleansed list\n tweet.text = ' '.join(tList)\n cleansed.append(tweet)\n return cleansed", "def tweet_cleaner(text, unescape=True, remove_html_tags=True, accented_chars=True,\r\n contractions=True, special_chars=True, expand_hash=True, remove_mention=True,\r\n remove_links=True, convert_abbrevations=True, remove_all_emojis=True,\r\n remove_stop=False, remove_num=True, lemmatization=True, lowercase=True):\r\n if lowercase: # convert all text to lowercase\r\n text = text.lower()\r\n if unescape: # unescape tweets\r\n unescape_tweet(text)\r\n if remove_html_tags: # remove html tags\r\n text = strip_html_tags(text)\r\n if accented_chars: # remove accented characters\r\n text = convert_accented_chars(text)\r\n if contractions: # expand contractions\r\n text = expand_contractions(text)\r\n if special_chars: # convert any special characters\r\n text = replace_special(text)\r\n if expand_hash: # expand words in hashtags\r\n text = expand_tweet(text)\r\n if remove_mention: # remove twitter mentions which start with @ and hashtags\r\n text = remove_mentions(text)\r\n if remove_links: # remove all links in a tweet which start with http or https\r\n text = remove_url(text)\r\n if convert_abbrevations: # convert all abbreviations found to their normal form\r\n text = convert_abbrev_in_text(text)\r\n if remove_all_emojis: # remove all emojis from given text\r\n text = remove_emoji(text)\r\n if remove_stop: # remove stop words\r\n text = remove_stop_words(text)\r\n if lemmatization: # convert tokens to base form\r\n text = lemmatize(text)\r\n\r\n text = remove_punct(text)\r\n text = reduce_spaces(text)\r\n\r\n doc = nlp(text) # tokenize text\r\n\r\n clean_text = []\r\n\r\n for token in doc:\r\n flag = True\r\n edit = token.text\r\n # remove stop words\r\n if stop_words and token.is_stop and token.pos_ != 'NUM':\r\n flag = False\r\n # remove all numbers\r\n if remove_num and (token.pos_ == 'NUM' or token.text.isnumeric()) and flag:\r\n flag = False\r\n # convert tokens to base form\r\n if lemmatization and token.lemma_ != \"-PRON-\" and flag:\r\n edit = token.lemma_\r\n # append tokens edited and not removed to list\r\n if edit != \"\" and flag:\r\n clean_text.append(edit)\r\n return (\" \".join(clean_text)).strip()", "def clean_data():\n pd.set_option('display.max_columns', None)\n try:\n df = pd.read_csv('test1/movie.csv')\n except FileNotFoundError:\n df = pd.read_csv('movie.csv')\n\n df.drop(labels=[\"actor_3_facebook_likes\", \"actor_2_name\",\n \"actor_1_facebook_likes\", \"actor_1_name\",\n \"num_voted_users\",\n \"cast_total_facebook_likes\", \"actor_3_name\",\n \"facenumber_in_poster\", \"movie_imdb_link\",\n \"num_user_for_reviews\", \"actor_2_facebook_likes\",\n \"aspect_ratio\", \"color\", \"num_critic_for_reviews\",\n \"director_facebook_likes\"], axis=1, inplace=True)\n df.dropna(subset=[\"gross\"], axis=0, inplace=True)\n return df", "def clean_tweets(data):\n count = 0\n f = open(os.path.dirname(__file__) + '/../tweet_output/ft1.txt','w')\n for item in data:\n if item.get('text'):\n string=item['text'].encode('ascii','ignore')+' (timestamp: '+item['created_at']+')\\n'\n f.write(string)\n if item['text'].encode('ascii','ignore')!=item['text']:\n count=count+1\n f.write('\\n')\n string=str(count)+' tweets contained unicode.'\n f.write(string)\n f.close()", "def data_cleaning(df):\n df = df.dropna(how='all')\n \n return df", "def preprocess(df):\n drop_cols = ['duration_ms', 'key', 'mode', 'time_signature', 'popularity', 'tempo']\n drop_cols += ['track_id', 'track_name', 'artist_name']\n for col in drop_cols:\n if col in list(df.columns):\n df = df.drop(columns=col)\n return df", "def clean_data(df):\r\n \r\n # list of categories to use as column names \r\n categories_cols = [names.split('-')[0] for names in df['categories'][0].split(';')]\r\n \r\n # creating 36 individual category columns\r\n for i in range(len(categories_cols)):\r\n df[categories_cols[i]] = [int(row.split(';')[i].split('-')[1]) for row in df['categories']]\r\n \r\n # labels 0 and 2 in 'related' class are similar (refer to notebook)\r\n # change 2s into 0s to make it more simple\r\n df['related'] = df['related'].map({0:0,1:1,2:0})\r\n \r\n # drop 'categories' column\r\n df.drop('categories', axis=1, inplace=True)\r\n \r\n # drop duplicates\r\n df.drop_duplicates(inplace=True)\r\n \r\n return df", "def clean_data(df):\n # Copy dataframe to local dataframe\n df_clean = df\n # Split category into subcategories\n categories = df.categories.str.split(\";\", expand=True)\n # Label columns according to new label\n categories.columns = categories.iloc[0].str[:-2]\n # Make columns numeric, i.e. remove the label substring from the content\n for label, content in categories.iteritems():\n categories[label] = pd.to_numeric(content.str.replace(f\"{label}-\", \"\"))\n # Clean related category to 0/1 - there are outliers with 2s\n categories[\"related\"] = categories[\"related\"].map(lambda x: 1 if x == 2 else x)\n # Drop original category column\n df_clean = df_clean.drop(labels=\"categories\", axis=1)\n # Add categories to dataframe\n df_clean = df_clean.join(categories)\n\n return df_clean", "def preprocess(data_df, remove_stopwords=False):\n data_cp = data_df.copy()\n for i, row in tqdm(data_cp.iterrows(), total=len(data_cp), desc='Preprocessing dataframe contents'):\n\n article_content = _clean(row.article_content, remove_stopwords)\n row.article_content = _tokenize_stem_lem_join(article_content)\n\n _clean_claim = _clean(row.claim, remove_stopwords)\n row.claim = _tokenize_stem_lem_join(_clean_claim)\n\n data_cp.loc[i] = row\n\n return data_cp", "def _clean_dataset(df: pd.DataFrame) -> pd.DataFrame:\n df = df.loc[:, ~df.columns.str.contains(\"^Unnamed\")]\n df = df.dropna()\n return df", "def preprocess_tweet(tweet):\n\n\n clean_tweet, hashtags = separate_hastags_mentions_urls(tweet)\n clean_tweet = remove_emoji_punc(clean_tweet)\n return clean_tweet, hashtags", "def clean_data(dataframe):\n # split categories into seperate\n categories = dataframe.categories.str.split(';', expand=True)\n \n # select the first row&col of the categories dataframe\n row&col = categories.iloc[0]\n cate_col = row&col.apply(lambda x: x[:-2])\n cate.columns = cate_colnames\n \n #convert categories values to numeric instead of strings\n for column in categories:\n categories[column] = categories[column].str[-1]\n categories[column] = categories[column].astype(int)\n \n # replace categories column in dataframe \n dataframe.drop(columns = ['categories'], inplace=True)\n # concatenate the original dataframe with the new `categories` dataframe\n dataframe = dataframe.join(categories)\n \n #drop duplicates\n dataframe.drop_duplicates(inplace=True)\n \n return dataframe", "def dataCleaner(dataframe):\r\n dataframe = dataframe.dropna(how='all')\r\n for col in dataframe:\r\n dataframe[col] = dataframe[col].apply(lambda x : np.nan() if str(x).isspace() else x)\r\n dataframe[col] = dataframe[col].fillna(dataframe[col].mean())\r\n return dataframe", "def create_corpus(df):\r\n corpus=[]\r\n for tweet in tqdm(df['text']):\r\n words=[word.lower() for word in word_tokenize(tweet) if((word.isalpha()==1))]\r\n corpus.append(words)\r\n return corpus" ]
[ "0.72220683", "0.6873865", "0.66959065", "0.66368425", "0.65932864", "0.6456916", "0.6384063", "0.6361564", "0.6303022", "0.62216514", "0.6215786", "0.6181189", "0.60718507", "0.60612637", "0.60575706", "0.60547775", "0.6050489", "0.5991977", "0.5986532", "0.59694624", "0.5964454", "0.5956109", "0.5945414", "0.58997756", "0.5869312", "0.5868335", "0.5823554", "0.58216715", "0.5816405", "0.5753256" ]
0.74418724
0
Calculates the euclidean distance between the two labels and the predictions.
def distance_metric(y_true, y_pred): diff = y_true - y_pred sqr = K.square(diff) total = K.sum(sqr, axis=1) return K.sqrt(total)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def LevDistMultilabels(y_true, y_pred):\n \n n = y_pred.shape[0]\n D = 0\n for i in range(n):\n D += LevenshteinDistance(y_pred[i,:], y_true[i,:])[-1, -1]\n return D/n", "def euclideanDistance(data1, data2):\n distance = 0\n for x in range(14):\n data1[x] = truncate(data1[x], 3)\n data2[x] = truncate(data2[x], 3)\n dist = truncate((data1[x] - data2[x]) ** 2, 3)\n distance = truncate(distance + dist, 3)\n\n # Final Euclidean distance between train poing and test point:\n distance = truncate(np.sqrt(distance), 3)\n return distance", "def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())", "def euclidean_distance(a, b):\n return np.linalg.norm(a - b)", "def _distorted_distance(self):\n distance = 0\n for i, pixel in enumerate(self.training_set):\n distance += self._euclid_distance(\n pixel, self.clusters[self.labels[i]], axis=0)\n return distance", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n\n distance = np.linalg.norm(x - y)\n\n return distance", "def dEuclideanLoss(YPredict, YTrue):\n if YPredict.shape != YTrue.shape:\n YTrue = YTrue.reshape(YPredict.shape)\n return YPredict - YTrue", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))", "def euclidean_dist(self, example1, example2, length):\n dist = 0\n for i in xrange(example1.size - 1):\n nominal = isinstance(self.attributes[i], NominalAttribute)\n if not nominal:\n dist += (example1[i] - example2[i])**2\n elif nominal and (example2[i] != example1[i]):\n dist += 1\n return sqrt(dist)", "def get_distance(self, resp1, resp2):\n feed_dict = {self.anchor: resp1}\n embed1 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n feed_dict = {self.anchor: resp2}\n embed2 = self.sess.run(self.embed_anchor, feed_dict=feed_dict)\n\n return np.sqrt(np.sum((embed1-embed2)**2, 1))", "def _kendall_distance(Y_true, Y_pred, normalize=True, sample_weight=None):\n (n_samples, n_classes) = Y_true.shape\n dists = np.zeros(n_samples)\n\n for sample in range(n_samples):\n for f_class in range(n_classes - 1):\n for s_class in range(f_class + 1, n_classes):\n a = Y_true[sample, f_class] - Y_true[sample, s_class]\n b = Y_pred[sample, f_class] - Y_pred[sample, s_class]\n\n if a * b < 0:\n dists[sample] += 1\n\n if normalize:\n dists[sample] /= n_classes * (n_classes-1) / 2\n\n return np.average(a=dists, weights=sample_weight)", "def _euclidian_classifier(self, X_test: np.array, y_test: np.array):\r\n dist = np.empty([X_test.shape[0], y_test.shape[0]])\r\n for index, target in enumerate(self.targets):\r\n dist[:, index] = np.array([euclidian_distance(sample, self.features[np.where(self.labels == target)])\r\n for sample in X_test])\r\n return dist", "def euclidean_distance(pred, squared=False, eps=1e-12):\n pred_square = pred.pow(2).sum(dim=-1) # (N, )\n prod = torch.mm(pred, pred.t()) # (N, N)\n distance = (pred_square.unsqueeze(1) + pred_square.unsqueeze(0) -\n 2 * prod).clamp(min=eps) # (N, N)\n\n if not squared:\n distance = distance.sqrt()\n\n distance = distance.clone()\n distance[range(len(prod)), range(len(prod))] = 0\n return distance", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.power(x1 - x2, 2)))", "def euclidean_distance(a, b):\n\n N, D = tf.shape(a)[0], tf.shape(a)[1]\n M = tf.shape(b)[0]\n a = tf.tile(tf.expand_dims(a, axis=1), (1, M, 1))\n b = tf.tile(tf.expand_dims(b, axis=0), (N, 1, 1))\n dists_to_normal_class = tf.reduce_mean(tf.square(a - b), axis=2)\n dists_to_center = tf.reduce_mean(tf.square(a - 0.0), axis=2)\n dists = tf.concat([dists_to_normal_class, dists_to_center], 1)\n return", "def euclidean(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( np.sqrt( ( x.flat_cpt() - y.flat_cpt() )**2 ) )\n\treturn distance", "def euclidean_distance(arr1,arr2):\n distance = np.sqrt(np.sum((arr1 - arr2)**2))\n return distance", "def euclidean_distance(x1, x2):\n return (x2[0] - x1[0])**2 + (x2[1] - x1[1])**2", "def compute_feature_distances(features1: np.ndarray, \r\n features2: np.ndarray) -> np.ndarray:\r\n #broadcasting trick\r\n a = features1[:, np.newaxis, :]\r\n b = features2[np.newaxis, :, :]\r\n \r\n return np.linalg.norm( (a-b), axis=-1)", "def euclidean_distance(x, y):\n return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))", "def average_distance(predictions, targets):\n total_distance = 0\n for prediction, target in zip(predictions, targets):\n total_distance += Levenshtein.distance(prediction, target)\n return total_distance / len(predictions)", "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def _euclidean_distance(self, points_a, points_b):\n assert len(points_a.shape) == 2\n assert len(points_b.shape) == 2\n\n transpose_b = points_b.T\n dot = np.dot(points_a, transpose_b)\n\n a_mode_sq = np.tile(\n (points_a ** 2).sum(-1, keepdims=True), (1, points_b.shape[0]))\n b_mode_sq = np.tile((transpose_b ** 2).sum(0, keepdims=True),\n (points_a.shape[0], 1))\n\n distance = np.sqrt(a_mode_sq + b_mode_sq - 2 * dot)\n return distance", "def euclidean_distance(a, b):\n return sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def pairwise_euclidean_distance(tensor1, tensor2, keepdims=False):\n tensor1 = tf.convert_to_tensor(tensor1)\n tensor2 = tf.convert_to_tensor(tensor2)\n tensor1 = tf.expand_dims(tensor1, 1)\n\n distance = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=-1, keepdims=keepdims))\n\n return distance", "def euclidean_metric(x, y):\n if len(x) != len(y):\n raise ValueError(\"Incompatible dimensions.\")\n return np.linalg.norm(x - y)\n \n # Or a slightly longer way:\n return np.sqrt(np.sum(np.subtract(x, y)**2))\n # Or the longest/worst way:\n total = 0\n for i in xrange(len(x)):\n term = x[i] - y[i]\n term = term**2\n total += term\n total = np.sqrt(total)\n return total", "def euclidean_distance(tensor1, tensor2):\n tensor1 = tf.convert_to_tensor(tensor1)\n tensor2 = tf.convert_to_tensor(tensor2)\n\n distance = tf.sqrt(tf.reduce_sum(tf.square(tensor1 - tensor2), axis=-1))\n\n return distance", "def euclidian_distance(x: np.arrays, y: np.arrays):\r\n diff = x - np.mean(y, axis=0)\r\n return np.sqrt(np.dot(diff.T, diff))", "def euclidean_distance_3D(y_true, y_pred):\n ed3D = K.flatten(K.sqrt(K.sum(K.pow(y_true - y_pred, 2), axis=1)))\n return K_nanmean_infmean(ed3D)", "def euclidean_distance(cls, y, y_target):\n return np.linalg.norm(y - y_target)" ]
[ "0.7383842", "0.7026889", "0.6977112", "0.69555247", "0.6952029", "0.68401", "0.6823526", "0.67569906", "0.67405534", "0.67359006", "0.6658021", "0.66576433", "0.6644144", "0.6634221", "0.66333306", "0.6625327", "0.6588581", "0.65771985", "0.6573899", "0.65667194", "0.6559554", "0.65525323", "0.6535439", "0.6534234", "0.6517551", "0.6517088", "0.65108365", "0.6510152", "0.650657", "0.6496174" ]
0.7344262
1
Supports the following commandline arguments listed below. dir_name directory to check the junit test results url bitbucket/stash url
def getargs(): parser = argparse.ArgumentParser(description='fetch all failed unit tests') parser.add_argument('dir_name', help='directory to check the junit results') parser.add_argument('url', help='bitbucket/stash url') args = parser.parse_args() return args
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n args = getargs()\r\n dir_name = args.dir_name\r\n url = args.url\r\n fetch_junit(dir_name, url)", "def test_dir(command, options=\"\", dir_=\".\"):\n\n print(\n \"\"\"\nRunning pytest the test framework\n=================================\n\"\"\"\n )\n command.run(f\"python -m pytest {options} {dir_}\", echo=True, pty=POSIX)", "def fetch_junit(dir_name, url):\r\n dir_to_look = dir_name\r\n failed_junit = []\r\n onlyfiles = [f for f in listdir(dir_to_look) if isfile(join(dir_to_look, f))]\r\n \"\"\" if multiple files are there check all files \"\"\"\r\n for i in onlyfiles:\r\n update_dir = str(directory) + \"/\"\r\n xmldoc = minidom.parse(update_dir + i) # parse file\r\n testsuite = xmldoc.getElementsByTagName(\"testsuite\")[0]\r\n status = xmldoc.getElementsByTagName(\"testsuite\")[0].getAttribute(\"failures\")\r\n if status != \"0\":\r\n testcase = testsuite.getElementsByTagName(\"testcase\")\r\n t_name = testsuite.getElementsByTagName(\"testcase\")[0].getAttribute(\"name\")\r\n for test_cases in testcase:\r\n classname = test_cases.getAttribute(\"classname\")\r\n name = test_cases.getAttribute(\"name\")\r\n failure = test_cases.getElementsByTagName(\"failure\") # check for failure exception\r\n for failed_test in failure:\r\n junit_test = classname + \".\" + name\r\n failed_junit.append(junit_test) # append all tests to a list\r\n\r\n \"\"\"com.cs.tools.content.MyDecksLoaderTest.testGetSlidesXMLHasImageAndThumbnailUrls\r\n package - com.cs.tools.content\r\n group - MyDecksLoaderTest\r\n test_name - testGetSlidesXMLHasImageAndThumbnailUrls\"\"\"\r\n for j in failed_junit:\r\n \"\"\" \r\n Apply some regular expression to find test_name and group and package\r\n \"\"\"\r\n lst1 = j.split('.')\r\n test_name = lst1[-1]\r\n group = lst1[-2]\r\n val1 = re.sub(r'.[a-zA-Z]*$', \"\", j)\r\n package = re.sub(r'.[a-zA-Z]*$', \"\", val1)\r\n # Generate URL to publish failed test link in stash/bitbucket\r\n url = url + \"testReport/junit/\" + package + \"/\" + group + \"/\" + test_name\r\n print(\"[\" + j + \"] (\" + url + \")\")", "def test(command, options=\"\"):\n\n print(\n \"\"\"\nRunning pytest the test framework\n=================================\n\"\"\"\n )\n for dir_ in TEST_DIRECTORIES:\n test_dir(command, options=options, dir_=dir_)\n # command.run(f\"python -m pytest {options} {' '.join(dir_ for dir_ in TEST_DIRECTORIES)}\", echo=True, pty=POSIX)\n\n print(\n \"\"\"\nAll Testing Directories Passed Successfully\n===========================================\n\"\"\"\n )", "def main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"api_base_url\", type=str,\n help=\"base url for all tests\")\n parser.add_argument(\"test_file_name\", type=str,\n help=\"name of file containing JSON array of tests\")\n parser.add_argument(\"-f\", \"--format\", default=\"json\", type=str,\n help=\"output format - must be either json or text\")\n\n args = parser.parse_args()\n\n try:\n\n run_tests_from_file(args.api_base_url, args.test_file_name, \n args.format)\n\n except KeyError as e:\n print(\"Required key '%s' not found. Check tests file.\" % str(e.args[0]))\n exit(1)\n\n except FileNotFoundError:\n print(\"Cannot open file '%s'. File not found.\" % args.test_file_name)\n exit(1)\n\n except ValueError:\n print(\"Cannot decode JSON from file '%s'.\" % args.test_file_name)\n exit(1)", "def main():\n argument_parser = argparse.ArgumentParser(add_help=True)\n argument_parser.add_argument(\"directory\", type=str,\n help=\"Directory to detect test smells.\")\n args = argument_parser.parse_args()\n \n if len(sys.argv) < 1:\n \n argument_parser.print_help()\n \n else:\n \n if os.path.exists(args.directory) or os.path.isdir(args.directory):\n\n #Stage 1: project level rule checking\n files = python_parser.get_python_files(os.path.abspath(args.directory))\n results_list = project_rule_runner(files)\n \n #Stage 2: test case level rule checking\n #test_case_pairs_list is a list of test cases paired with their file of origin\n filtered_files = python_parser.filter_python_files(files)\n test_case_pairs_list = python_parser.get_test_case_asts(filtered_files)\n \n for test_case_pair in test_case_pairs_list:\n results_list = results_list + test_case_rule_runner(test_case_pair)\n \n #Stage 3: test method level rule checking\n test_method_list = list()\n \n for test_case_pair in test_case_pairs_list:\n test_method_list = test_method_list + python_parser.get_test_asts(test_case_pair)\n \n for test_method in test_method_list: \n results_list = results_list + test_method_rule_runner(test_method)\n \n #Output formatting\n format_output(results_list)\n \n else:\n print(\"Invalid path given.\")", "def test_get_result_directory(self):\n pass", "def test_get_result_directories(self):\n pass", "def test_one_param_from_dir(self):\n assert tuttle_dir(\"test\") == path.join(\".tuttle\", \"test\")", "def test(self, cmdline):\n\n if tm.UPDATE_BEFORE_TEST:\n print \"Updating directory of source ...\"\n mu.update_dir(tm.SOURCE_DIR)\n\n args = mu.get_second_arg(cmdline).strip().split()\n if len(args) == 0:\n print \"Invalid command, test [sourcename] ([maxThread] ([pageLimit]))\"\n return\n elif len(args) == 1:\n self.sourcename, = args\n self.max_thread = '5'\n self.page_limit = '2'\n elif len(args) == 2:\n self.sourcename, self.max_thread = args\n self.page_limit = '2'\n elif len(args) == 3:\n self.sourcename, self.max_thread, self.page_limit = args\n\n print \"Searching directory of %s ...\" % self.sourcename\n self.sourcedir = mu.search_for_source(self.sourcename)\n if not self.sourcedir:\n print \"Directory of %s doesn't exist.\\n\" % self.sourcename\n return\n\n self.sourcetype = self.get_source_type()\n if self.sourcetype == 'blog':\n process = BlogProcess(self.sourcename, self.sourcedir)\n config_files = ('%s.xq' % self.sourcename, 'config.xml', 'globalConfig.xml', 'subSourceConfig.xml')\n elif self.sourcetype == 'forum':\n process = ForumProcess(self.sourcename, self.sourcedir, string.atoi(self.max_thread), self.page_limit)\n config_files = ('%s-url.xq' % self.sourcename, '%s-thread.xq' % self.sourcename, 'finished.xml', 'webForumConfiguration.xml')\n self.test_source(process, self.sourcedir, config_files)", "def run_tests():\n parser = ArgumentParser()\n parser.add_argument('name',nargs='?',default=None,help=\"Suite or test name\")\n parser.add_argument('-b','--bin-dir',help=\"Directory where Firebird binaries tools are\")\n parser.add_argument('-d','--db-dir',help=\"Directory to use for test databases\")\n parser.add_argument('--archive',action='store_true',help=\"Save last run results to archive\")\n parser.add_argument('--rerun',action='store_true',help=\"Run only tests that don't PASSed in last run\")\n parser.add_argument('--untested',action='store_true',help=\"Run only tests that were UNTESTED in last run\")\n parser.add_argument('-v','--verbose',action='store_true',help=\"Be more verbose\")\n parser.add_argument('--verbosity',type=int,choices=[0,1,2],default=1,help=\"Set verbosity; --verbosity=2 is the same as -v\")\n parser.add_argument('-q','--quiet',action='store_true',help=\"Be less verbose\")\n parser.add_argument('-x','--xunit',action='store_true',help=\"Provides test results also in the standard XUnit XML format\")\n parser.add_argument('-e','--expect',type=str,metavar=\"FILENAME\",help=\"Test results file to be used as expeted outcomes\")\n if rpyc_available:\n parser.add_argument('--remote',action='store_true',help=\"Connect to remote fbtest server\")\n\n parser.add_argument('-u','--update',action='store_true',help=\"Update last run results with re-run results\")\n parser.add_argument('-w','--password',help=\"SYSDBA password\")\n parser.add_argument('-o','--host',help=\"Remote Firebird or fbtest host machine identification\")\n parser.add_argument('-p','--person',help=\"QA person name\")\n parser.add_argument('-a','--arch',help=\"Firebird architecture: SS, CS, SC, EM\")\n parser.add_argument('-s','--sequence',type=int,help=\"Run sequence number for this target\")\n parser.add_argument('-k','--skip',help=\"Suite or test name or name of file with suite/test names to skip\")\n parser.add_argument('-c','--client',help=\"Use specified Firebird client library\")\n parser.set_defaults(rerun=False,untested=False,update=False,server=False,register=False,\n remote=False,host='localhost',password='masterkey',\n sequence=1,arch='SS',person=UNKNOWN)\n\n script_runner.run_tests(parser.parse_args())", "def test_parse_arguments():\n with TemporaryDirectory() as project_folder:\n args = get_args(['setup', '-d', project_folder])\n\n expected = project_folder\n actual = args.directory\n assert actual == expected\n\n # Ensure that the `setup_project_folder` function is called when `setup`\n # command is passed to the cli\n assert args.func.__name__ == 'setup_project_folder'", "def parseArgs(argv):\n global payload, clientCwd, baseDir, logFolder, testsToRun, suiteToRun, testDirectory\n if len(argv)<1:\n usage()\n sys.exit(2)\n \n suiteToRun = payload = argv[len(argv)-1]\n \n \n if len(os.path.split(payload)) == 0:\n clientCwd = \"./\"\n else:\n clientCwd = os.path.realpath(baseDir)\n \n payload = os.path.join(os.path.realpath(baseDir), payload)\n if not os.path.exists(payload):\n print \"FATAL - Pyro must be given a payload that exists: %s\\n\" % payload\n exit(1)\n \n #####\n #### logFolder = os.path.abspath(logFolder)\n logFolder = payload\n \n print \"Base dir: %s\" % baseDir;\n print \"Real Base dir: %s\" % os.path.realpath(baseDir); \n print \"Client dir: %s\" % clientCwd\n print \"Real Client Dir: %s\" % os.path.realpath(clientCwd)\n print \"Log dir: %s\" % logFolder\n print \"Payload: %s\" % payload", "def test_main():\n\n temp_dir = \"./deepreg_download_temp_dir\"\n branch = Repo(\".\").head.object.hexsha\n\n main(args=[\"--output_dir\", temp_dir, \"--branch\", branch])\n\n # Check downloading all req'd folders into temp, verify that they are the same as in main branch.\n config_dcmp = dircmp(\"./config\", os.path.join(temp_dir, \"config\"))\n assert not has_diff_files(config_dcmp)\n\n data_dcmp = dircmp(\"./data\", os.path.join(temp_dir, \"data\"))\n assert not has_diff_files(data_dcmp)\n\n demos_dcmp = dircmp(\"./demos\", os.path.join(temp_dir, \"demos\"))\n assert not has_diff_files(demos_dcmp)\n\n shutil.rmtree(temp_dir)", "def main(args):\n\n if 'log' in args and args['log'] is not None:\n logging.basicConfig(level=LOGGING_LEVELS.get(args['log'].lower(), logging.NOTSET))\n\n test_structure = read_test_file(args['test'])\n tests = build_testsets(args['url'], test_structure)\n\n # Override configs from command line if config set\n for t in tests:\n if 'print_bodies' in args and args['print_bodies'] is not None:\n t.config.print_bodies = safe_to_bool(args['print_bodies'])\n\n if 'interactive' in args and args['interactive'] is not None:\n t.config.interactive = safe_to_bool(args['interactive'])\n\n # Execute all testsets\n failures = execute_testsets(tests)\n\n sys.exit(failures)", "def main(\n workdir: Path = typer.Argument(\".\", help=\"a directory path for working directory\"),\n url: Optional[str] = typer.Option(None, help=\"a download URL\"),\n directory: Path = typer.Option(None, help=\"a directory path for test cases\"),\n no_store: bool = typer.Option(False, help=\"testcases is shown but not saved\"),\n format: str = typer.Option(\"sample-%i.%e\", help=\"custom filename format\"),\n login: bool = typer.Option(False, help=\"login into target service\"),\n cookie: Path = typer.Option(utils.default_cookie_path, help=\"directory for cookie\"),\n) -> None:\n typer.echo(\"Load configuration...\")\n\n if not workdir.exists():\n typer.secho(f\"Not exists: {str(workdir.resolve())}\", fg=typer.colors.BRIGHT_RED)\n raise typer.Abort()\n\n try:\n _config = JudgeConfig.from_toml(workdir)\n except KeyError as e:\n typer.secho(str(e), fg=typer.colors.BRIGHT_RED)\n raise typer.Abort()\n\n __config = _config.dict()\n\n if url or directory:\n # check arguments\n if url:\n __config[\"URL\"] = url\n if directory:\n __config[\"testdir\"] = directory.resolve()\n try:\n config = DownloadJudgeConfig(**__config)\n except KeyError as e:\n typer.secho(str(e), fg=typer.colors.BRIGHT_RED)\n raise typer.Abort()\n\n typer.echo(f\"Download {config.URL}\")\n\n try:\n login_form: Optional[LoginForm] = None\n if login:\n login_form = CLILoginForm()\n testcases = download_tool(\n DownloadArgs(\n url=config.URL,\n login_form=login_form,\n cookie=cookie,\n )\n )\n except Exception as e:\n typer.secho(str(e), fg=typer.colors.BRIGHT_RED)\n raise typer.Abort()\n\n if not no_store:\n try:\n save_tool(\n testcases,\n SaveArgs(\n format=format,\n directory=Path(config.testdir),\n ),\n )\n except Exception as e:\n typer.secho(str(e), fg=typer.colors.BRIGHT_RED)\n raise typer.Abort()", "def _main(compare_with: str | None, directory: str | None, config: str | None) -> None:\n __main(compare_with, directory, config)", "def testParseArguments(self):\n de_object = de.DockerExplorerTool()\n\n prog = sys.argv[0]\n\n expected_docker_root = os.path.join('test_data', 'docker')\n\n args = [prog, '-r', expected_docker_root, 'list', 'repositories']\n sys.argv = args\n\n options = de_object.ParseArguments()\n usage_string = de_object._argument_parser.format_usage()\n expected_usage = '[-h] [-d] [-r DOCKER_DIRECTORY] [-V]'\n expected_usage_commands = '{download,mount,list,history}'\n self.assertIn(expected_usage, usage_string)\n self.assertIn(expected_usage_commands, usage_string)\n self.assertEqual(expected_docker_root, options.docker_directory)", "def cmd_directory(args):\n comp = DirectoryComparator(\n args.source,\n args.target,\n args.tests,\n args.kernels,\n args.codenames,\n )\n comp.compare(args.auxiliary)\n comp.dump_json(args.output)", "def main():\n import argparse\n\n # parse sys.argv\n parser = argparse.ArgumentParser(description='stylecheck')\n parser.add_argument('-v', '--version', action='version',\n version=('%(prog)s ' + __version__))\n parser.add_argument('-r', '--root_dir', type=str, default='../../ken3/',\n help='root dir path (default: \\'../../ken3/\\')')\n args = parser.parse_args()\n\n # run each test\n result = list(run(pick_names(args.root_dir), args.root_dir))\n if result:\n print(result)\n return len(result)", "def testGetOutput(self):\n #f = open(\"src_output.root\", 'w')\n #f.close()\n\n #1) missing required -d option (the other required option, -r, is ignored)\n go = getoutput(self.logger, self.maplistopt)\n res = go()\n expRes = CommandResult(2001, 'ERROR: Task option is required')\n self.assertEquals(expRes, res)\n\n #2) -d option is present but -r is missing\n analysisDir = self.reqarea\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir])\n res = go()\n expRes = CommandResult(2002, 'ERROR: Range option is required')\n self.assertEquals(expRes, res)\n\n #3) request passed with the -d option does not exist\n #res = go([\"-d\", analysisDir + \"asdf\"])\n #TODO we expect an appropriate answer from the server.\n #By now, the server just answer an empty list\n\n #4) check correct behaviour without specifying output directory\n #N.B.: -p options is required for tests to skip proxy creation and delegation\n destDir = os.path.join(analysisDir, 'results')\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir(destDir))\n self.assertTrue(os.path.isfile(os.path.join(destDir, '1.root')))\n #Remove the directory\n shutil.rmtree(destDir)\n self.assertFalse(os.path.isdir(destDir))\n self.assertEquals(expRes, res)\n\n #5) correct behavior and output directory specified which exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp'))\n destFile = os.path.join('/tmp', '1.root')\n self.assertTrue(os.path.isfile(destFile))\n os.remove(destFile)\n self.assertFalse(os.path.isfile(destFile))\n self.assertEquals(expRes, res)\n\n #6) correct behavior and output directory specified which does not exists\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"/tmp/asdf/qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('/tmp/asdf/qwerty'))\n #Remove the directory\n shutil.rmtree('/tmp/asdf/qwerty')\n self.assertEquals(expRes, res)\n\n #7) correct behavior and output directory specified which does not exists (relative path)\n go = getoutput(self.logger, self.maplistopt + [\"-d\", analysisDir, \"-r\", \"1\", \"-o\", \"qwerty\", \"-p\"])\n res = go()\n expRes = CommandResult(0, '\\n')\n #check if the result directory has been created\n self.assertTrue(os.path.isdir('qwerty'))\n #Remove the directory\n shutil.rmtree('qwerty')\n self.assertEquals(expRes, res)", "def test_two_params_from_dir(self):\n assert tuttle_dir(\"test1\", \"test2\") == path.join(\".tuttle\", \"test1\", \"test2\")", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def dir_tests():\n return abspath('tests')", "def main():\r\n args = getargs()\r\n testng_file = args.testng_file\r\n url = args.url\r\n fetch_testng(testng_file, url)", "def run(args=None, config=None):\n\tif args:\n\t\tparser = AnalysisParser('config')\n\t\targs = parser.parse_analysis_args(args)\n\t\tconfig = args.config\n\tif not config:\n\t\traise Exception(\"Missing `config` dict argument.\")\n\n\tnumpatches = config['numpatches']\n\tchangesets_list = config['changesets']\n\toutputdir = config['outputdir']\n\tanalyze_all = config['analyze_all'] if 'analyze_all' in config else False\n\tmozcentral_path = config['mozcentral_path'] if 'mozcentral_path' in config else None\n\trunname = config['runname'] if 'runname' in config else None\n\tinclude_guaranteed = config['include_guaranteed'] if 'include_guaranteed' in config else False\n\tuse_active_data = config['use_active_data'] if 'use_active_data' in config else False\n\tskip_py = config['skip_py'] if 'skip_py' in config else True\n\n\tsuites_to_analyze = config['suites_to_analyze']\n\tplatforms_to_analyze = config['platforms_to_analyze']\n\tfrom_date = config['from_date']\n\n\ttimestr = str(int(time.time()))\n\n\tcustom_script = config['custom_scheduling']\n\tcustom_classname = config['custom_classname']\n\tcustom_class = import_class(custom_script, custom_classname)\n\tcustom_class_obj = custom_class(config)\n\n\tfailed_tests_query_json = {\n\t\t\"from\":\"unittest\",\n\t\t\"where\":{\n\t\t\t\"and\":[\n\t\t\t\t{\"eq\":{\"repo.changeset.id12\":None}},\n\t\t\t\t{\"eq\":{\"repo.branch.name\":None}},\n\t\t\t\t{\"eq\":{\"task.state\":\"failed\"}},\n\t\t\t\t{\"eq\":{\"result.ok\":\"false\"}},\n\t\t\t\t{\"or\":[\n\t\t\t\t\t{\"regex\":{\"job.type.name\":\".*%s.*\" % suite}}\n\t\t\t\t\tfor suite in suites_to_analyze\n\t\t\t\t]},\n\t\t\t\t{\"or\": [\n\t\t\t\t\t{\"regex\":{\"job.type.name\":\".*%s.*\" % platform}}\n\t\t\t\t\tfor platform in platforms_to_analyze\n\t\t\t\t]},\n\t\t\t]\n\t\t},\n\t\t\"limit\":100000,\n\t\t\"select\":[{\"name\":\"test\",\"value\":\"result.test\"}]\n\t}\n\n\tlog.info(\"Getting FBC entries...\")\n\n\tchangesets = get_fixed_by_commit_entries(\n\t\tlocaldata=not use_active_data,\n\t\tactivedata=use_active_data,\n\t\tsuites_to_analyze=suites_to_analyze,\n\t\tplatforms_to_analyze=platforms_to_analyze,\n\t\tfrom_date=from_date,\n\t\tlocal_datasets_list=changesets_list,\n\t\tsave_fbc_entries=outputdir\n\t)\n\n\t# For each patch\n\thistogram1_datalist = []\n\ttests_for_changeset = {}\n\tchangesets_counts = {}\n\tcount_changesets_processed = 0\n\tall_changesets = []\n\n\tfor count, tp in enumerate(changesets):\n\t\tif count_changesets_processed >= numpatches:\n\t\t\tcontinue\n\n\t\tif len(tp) == 4:\n\t\t\tchangeset, suite, repo, test_fixed = tp\n\t\telse:\n\t\t\tcontinue\n\n\t\torig_test_fixed = test_fixed\n\t\ttest_fixed = test_fixed.split('ini:')[-1]\n\t\tif 'mochitest' not in suite and 'xpcshell' not in suite:\n\t\t\ttest_fixed = format_testname(test_fixed)\n\n\t\tchangeset = changeset[:12]\n\n\t\tlog.info(\"\")\n\t\tlog.info(\"On changeset \" + \"(\" + str(count) + \"): \" + changeset)\n\t\tlog.info(\"Running analysis: %s\" % str(runname))\n\t\tlog.info(\"Test name: %s\" % test_fixed)\n\n\t\t# Get patch\n\t\tcurrhg_analysisbranch = hg_branch(repo)\n\t\tfiles_url = HG_URL + currhg_analysisbranch + \"json-info/\" + changeset\n\t\tdata = get_http_json(files_url)\n\t\tfiles_modified = data[changeset]['files']\n\t\torig_files_modified = files_modified.copy()\n\n\t\t# Get tests that use this patch\n\t\tfailed_tests_query_json['where']['and'][0] = {\"eq\": {\"repo.changeset.id12\": changeset}}\n\t\tfailed_tests_query_json['where']['and'][1] = {\"eq\": {\"repo.branch.name\": repo}}\n\n\t\tlog.info(\"Checking for test failures...\")\n\n\t\tall_tests = []\n\t\tfailed_tests = []\n\t\ttry:\n\t\t\tfailed_tests = query_activedata(failed_tests_query_json)\n\t\texcept Exception as e:\n\t\t\tlog.info(\"Error running query: \" + str(failed_tests_query_json))\n\n\t\tall_failed_tests = []\n\t\tif 'test' in failed_tests:\n\t\t\tall_failed_tests = [test for test in failed_tests['test']]\n\n\t\tif pattern_find(test_fixed, all_failed_tests):\n\t\t\tlog.info(\"Test was not completely fixed by commit: \" + str(test_fixed))\n\t\t\tcontinue\n\n\t\tlog.info(\"Test was truly fixed. Failed tests: \" + str(all_failed_tests))\n\n\t\t# Perform scheduling\n\t\tall_tests_not_run = []\n\t\treturned_data = custom_class_obj.analyze_fbc_entry(\n\t\t\t(changeset, suite, repo, orig_test_fixed),\n\t\t\ttest_fixed\n\t\t)\n\n\t\tif 'skip' in returned_data and returned_data['skip']:\n\t\t\tcontinue\n\t\tif not returned_data['success']:\n\t\t\tall_tests_not_run.append(test_fixed)\n\n\t\tlog.info(\"Number of tests: \" + str(len(all_tests)))\n\t\tlog.info(\"Number of failed tests: \" + str(len([test_fixed])))\n\t\tlog.info(\"Number of files: \" + str(len(files_modified)))\n\t\tlog.info(\"Number of tests not scheduled by per-test: \" + str(len(all_tests_not_run)))\n\t\tlog.info(\"Tests not scheduled: \\n\" + str(all_tests_not_run))\n\n\t\tcset_count = 1\n\t\tif changeset not in changesets_counts:\n\t\t\tchangesets_counts[changeset] = cset_count\n\t\telse:\n\t\t\tchangesets_counts[changeset] += 1\n\t\t\tcset_count = changesets_counts[changeset]\n\n\t\tchangeset_name = changeset + \"_\" + str(cset_count)\n\t\ttests_for_changeset[changeset_name] = {\n\t\t\t'patch-link': HG_URL + currhg_analysisbranch + \"rev/\" + changeset,\n\t\t\t'numfiles': len(files_modified),\n\t\t\t'numtests': len(all_tests),\n\t\t\t'numtestsfailed': 1,\n\t\t\t'numtestsnotrun': len(all_tests_not_run),\n\t\t\t'files_modified': files_modified,\n\t\t\t'suite': suite,\n\t\t\t'runname': runname,\n\t\t\t'orig-test-related': orig_test_fixed,\n\t\t\t'test-related': test_fixed,\n\t\t\t'testsnotrun': all_tests_not_run,\n\t\t}\n\n\t\tfor entry in returned_data:\n\t\t\ttests_for_changeset[entry] = returned_data[entry]\n\n\t\tall_changesets.append(changeset)\n\t\thistogram1_datalist.append((1, 1-len(all_tests_not_run), changeset))\n\t\tcount_changesets_processed += 1\n\n\t\tnumchangesets = len(all_changesets)\n\t\ttotal_correct = sum([\n\t\t\t\t1 if not tests_for_changeset[cset + \"_\" + str(cset_count)]['testsnotrun'] else 0\n\t\t\t\tfor cset in all_changesets\n\t\t])\n\t\tlog.info(\"Running success rate = {:3.2f}%\".format(float((100 * (total_correct/numchangesets)))))\n\n\tlog.info(\"\")\n\n\t## Save results (number, and all tests scheduled)\n\tif outputdir:\n\t\tlog.info(\"\\nSaving results to output directory: \" + outputdir)\n\t\ttimestr = str(int(time.time()))\n\t\tsave_json(tests_for_changeset, outputdir, timestr + '_per_changeset_breakdown.json')\n\n\tf = plt.figure()\n\n\tnumchangesets = len(all_changesets)\n\ttotal_correct = sum([\n\t\t\t1 if not tests_for_changeset[cset + \"_1\"]['testsnotrun'] else 0\n\t\t\tfor cset in all_changesets\n\t])\n\ttotal_incorrect = sum([\n\t\t\t1 if tests_for_changeset[cset + \"_1\"]['testsnotrun'] else 0\n\t\t\tfor cset in all_changesets\n\t])\n\n\tb2 = plt.pie(\n\t\t[\n\t\t\t100 * (total_correct/numchangesets),\n\t\t\t100 * (total_no_coverage_data/numchangesets)\n\t\t],\n\t\tcolors=['green', 'red'],\n\t\tlabels=[\n\t\t\t'Successfully scheduled',\n\t\t\t'Not successfully scheduled'\n\t\t],\n\t\tautopct='%1.1f%%'\n\t)\n\n\tplt.legend()\n\n\tlog.info(\"Completed analysis for run: %s\" % str(runname))\n\n\tlog.info(\"Total number of changesets in pie chart: \" + str(numchangesets))\n\n\tlog.info(\"Close figures to end analysis.\")\n\tlog.info(\"Changesets analyzed (use these in other analysis types if possible): \\n\" + str(all_changesets))\n\tplt.show()", "def run_view():\n parser = ArgumentParser()\n parser.add_argument('name',nargs='?',default=None,help=\"Results file or directory with result files\")\n parser.add_argument('-x','--xunit',action='store_true',help=\"Save test results in the standard XUnit XML format\")\n parser.add_argument('-c','--cause',action='store_true',help=\"Print cause of fails and errors.\")\n parser.add_argument('-d','--details',action='store_true',help=\"Print details for fails and errors.\")\n #parser.add_argument('-o','--output',matavar='FILENAME',help=\"Save output to file.\")\n\n script_runner.run_view(parser.parse_args())", "def parse_args():\n\n op = OptionParser(usage=\"usage: %prog [opts] input_folder\")\n\n op.add_option(\"--test\",\n dest=\"test\",\n default=False,\n action=\"store_true\",\n help=\"executes the test suite\")\n\n return op.parse_args()", "def main():\n # Disable *.pyc files\n sys.dont_write_bytecode = True\n\n # Add \"..\" to module search path\n cur_dir = os.path.dirname(os.path.realpath(__file__))\n top_dir = os.path.abspath(os.path.join(cur_dir, os.pardir))\n sys.path.append(top_dir)\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"-v\", \"--verbose\", action=\"count\", default=0,\n help=\"verbosity level, use: [-v | -vv | -vvv]\")\n parser.add_argument(\"-s\", \"--start-directory\", default=None,\n help=\"directory to start discovery\")\n parser.add_argument(\"-p\", \"--pattern\", default=\"test*.py\",\n help=\"pattern to match test files ('test*.py' default)\")\n parser.add_argument(\"test\", nargs=\"*\",\n help=\"test specs (e.g. module.TestCase.test_func)\")\n args = parser.parse_args()\n\n if not args.start_directory:\n args.start_directory = cur_dir\n\n if args.verbose > 2:\n logging.basicConfig(level=logging.DEBUG, format=\"DEBUG: %(message)s\")\n\n loader = unittest.TestLoader()\n if args.test:\n # Add particular tests\n for test in args.test:\n suite = unittest.TestSuite()\n suite.addTests(loader.loadTestsFromName(test))\n else:\n # Find all tests\n suite = loader.discover(args.start_directory, args.pattern)\n\n runner = unittest.TextTestRunner(verbosity=args.verbose)\n result = runner.run(suite)\n return result.wasSuccessful()", "def run_tests():\n os.environ['WORKDIR'] = CONFIG['workdir']\n os.environ['REPORTDIR'] = CONFIG['reportFolder']\n stdout = subprocess.DEVNULL\n if CONFIG['verbose']:\n stdout = None\n # cycle throught version\n total = 0\n valid = 0\n start = time.time()\n for version in utils.get_dirs(CONFIG['versionsFolder']):\n os.environ['VERSION'] = version\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version), CONFIG['workdir']\\\n , CONFIG['clearWorkdir'])\n # cycle throught use case\n for usecase in utils.get_dirs(CONFIG['testsFolder']):\n os.environ['TESTDIR'] = usecase\n if not CONFIG['quiet']:\n print('UseCase test: {}'.format(usecase))\n log_msg('info', 'UseCase test: {}'.format(usecase))\n try:\n folder = os.path.join(CONFIG['testsFolder'], usecase)\n with open(os.path.join(folder, CONFIG['useConfig'])) as usefp:\n jconfig = json.load(usefp)\n # clear workdir if desired\n if 'clearWorkdir' in jconfig.keys() and jconfig['clearWorkdir']:\n utils.copy_dir(os.path.join(CONFIG['versionsFolder'], version)\\\n , CONFIG['workdir'], CONFIG['clearWorkdir'])\n # print('clearing')\n # raise\n cmd = ['py', os.path.join(folder, jconfig['entrypoint'])]\n total += 1\n if jconfig['runType'] == 'single':\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n else:\n for step in range(jconfig['numRuns']):\n if not CONFIG['quiet']:\n print('\\r >Step {}/{} '.format(step+1, jconfig['numRuns'])\\\n , end='', flush=True)\n log_msg('info', 'Step {}/{}'.format(step+1, jconfig['numRuns']))\n subprocess.run(cmd, stdout=stdout, stderr=subprocess.PIPE, check=True)\n if step+1 != jconfig['numRuns']:\n time.sleep(jconfig['interval'])\n except subprocess.CalledProcessError as excp:\n if not CONFIG['quiet']:\n print('Error msg:{}'\\\n .format(excp.stderr.decode().replace('\\r', '').replace('\\n', '|')))\n log_msg('error', excp.stderr.decode())\n else:\n valid += 1\n if not CONFIG['quiet']:\n print('{}.....Passed'.format(usecase))\n log_msg('info', '{} Passed'.format(usecase))\n\n elapse = time.time()-start\n log_msg('info', 'Ran {} tests in {:.3f}s with {} passed'.format(total, elapse, valid))\n print('-'*20)\n print('Ran {} tests in {:.3f}s with {} passed.'.format(total, elapse, valid))\n return total-valid" ]
[ "0.77802515", "0.6323875", "0.6216804", "0.6210566", "0.61194", "0.61187565", "0.6011419", "0.590582", "0.5868668", "0.58155215", "0.58143663", "0.58103776", "0.5791339", "0.57763976", "0.5775211", "0.5773462", "0.57602936", "0.56986207", "0.5685014", "0.5645336", "0.56211627", "0.5607782", "0.557341", "0.5567461", "0.5551245", "0.55491376", "0.554798", "0.55404866", "0.5509348", "0.5500294" ]
0.6335797
1
Counts variants in vcf file and outputs summary dataframe.
def count_variants(vcf_list, sample_list): df_lst = [] sample_vcf_dct = dict(zip(sample_list,vcf_list)) for s in sample_vcf_dct.keys(): vcf_in = sample_vcf_dct[s] vcf = VariantFile(vcf_in) snv = 0 indel = 0 for rec in vcf: ref_len = len(rec.ref) for a in rec.alts: if len(a) > 1 or ref_len > 1: indel +=1 else: snv +=1 df_lst.append([s,snv,indel]) out_df = pd.DataFrame(df_lst, columns=['sample','snvs','indels']) return out_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_summary_statistics(self):\n # Get log 10 total mutation count\n self.log_mut_count = np.log10(self.variant_df.shape[0])\n\n # Get the number of variants stratified by functional location of variant\n # E.g. Exon, Intron, 5'UTR, etc.\n self.functional_counts = pd.DataFrame(self.variant_df['Func.refGene'].value_counts())\n self.functional_counts.columns = [self.sample_name]\n \n # Get the number of variants stratified by exonic functional outcome of variant\n # E.g. Silent, Nonsense, Missense, etc.\n self.mutational_class_counts = (\n pd.DataFrame(self.variant_df['ExonicFunc.refGene'].value_counts())\n )\n self.mutational_class_counts.columns = [self.sample_name]\n \n # Get number of COSMIC curated events\n self.cosmic_variants = self.variant_df[self.variant_df['cosmic70'] != '.']\n self.cosmic_variants = self.cosmic_variants.assign(sample_name = self.sample_name,\n final_id = self.final_id)\n self.cosmic_variant_counts = self.cosmic_variants.shape[0]\n \n # Get depth summary\n self.depth_summary = pd.DataFrame(self.variant_df['depth'].astype(int).describe())\n self.depth_summary.columns = [self.sample_name]\n \n return self.functional_counts, self.mutational_class_counts, self.depth_summary", "def create_variant_counting_workflow(\n vcfs,\n tumour_cell_bams,\n results_h5,\n config,\n):\n\n workflow = pypeliner.workflow.Workflow()\n\n workflow.setobj(\n obj=mgd.OutputChunks('cell_id'),\n value=tumour_cell_bams.keys(),\n )\n\n workflow.transform(\n name='merge_snvs',\n func='biowrappers.components.io.vcf.tasks.merge_vcfs',\n args=(\n [mgd.InputFile(vcf) for vcf in vcfs],\n mgd.TempOutputFile('all.snv.vcf')\n )\n )\n\n workflow.transform(\n name='finalise_snvs',\n func=\"biowrappers.components.io.vcf.tasks.finalise_vcf\",\n args=(\n mgd.TempInputFile('all.snv.vcf'),\n mgd.TempOutputFile('all.snv.vcf.gz', extensions=['.tbi'])\n ),\n kwargs={'docker_config': helpers.get_container_ctx(config['containers'], 'vcftools')}\n )\n\n workflow.subworkflow(\n name='count_alleles',\n func=create_snv_allele_counts_for_vcf_targets_workflow,\n args=(\n config,\n mgd.InputFile('tumour_cells.bam', 'cell_id', extensions=['.bai'], fnames=tumour_cell_bams),\n mgd.TempInputFile('all.snv.vcf.gz'),\n mgd.OutputFile(results_h5),\n ),\n kwargs={\n 'docker_config': helpers.get_container_ctx(config['containers'], 'single_cell_pipeline')\n },\n )\n\n return workflow", "def write_to_vcf(self):\n\n # 1. Generate header info\n date_for_vcf = datetime.now().strftime('%Y%m%d')\n header_info = [\n '##fileformat=VCFv4.2',\n '##fileDate=%s' % date_for_vcf,\n '##source=%s' % self.get_analyser_name(),\n '##reference=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz',\n '##contig=<ID=chr1,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr1.fa.gz>',\n '##contig=<ID=chr2,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr2.fa.gz>',\n '##contig=<ID=chr3,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr3.fa.gz>',\n '##contig=<ID=chr4,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr4.fa.gz>',\n '##contig=<ID=chr5,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr5.fa.gz>',\n '##contig=<ID=chr6,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr6.fa.gz>',\n '##contig=<ID=chr7,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr7.fa.gz>',\n '##contig=<ID=chr8,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr8.fa.gz>',\n '##contig=<ID=chr9,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr9.fa.gz>',\n '##contig=<ID=chr10,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr10.fa.gz>',\n '##contig=<ID=chr11,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr11.fa.gz>',\n '##contig=<ID=chr12,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr12.fa.gz>',\n '##contig=<ID=chr13,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr13.fa.gz>',\n '##contig=<ID=chr14,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr14.fa.gz>',\n '##contig=<ID=chr15,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr15.fa.gz>',\n '##contig=<ID=chr16,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr16.fa.gz>',\n '##contig=<ID=chr17,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr17.fa.gz>',\n '##contig=<ID=chr18,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr18.fa.gz>',\n '##contig=<ID=chr19,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr19.fa.gz>',\n '##contig=<ID=chr20,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr20.fa.gz>',\n '##contig=<ID=chr21,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr21.fa.gz>',\n '##contig=<ID=chr22,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr22.fa.gz>',\n '##contig=<ID=chrM,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrM.fa.gz>',\n '##contig=<ID=chrX,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrX.fa.gz>',\n '##contig=<ID=chrY,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrY.fa.gz>',\n ]\n header_parameters = [\n '##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">',\n '##FORMAT=<ID=MTQ,Number=1,Type=String,Description=\"MassArray Typer quality value for SNP call. '\n 'A=Conservative, B=Moderate, C=Aggressive, D=Low Probability, E=User Call, i=Low Intensity. A and B are considered high '\n 'quality scores.\">',\n '##INFO=<ID=PCR,Number=2,Type=String,Description=\"PCR sequences used in assay.\">',\n '##INFO=<ID=AF,Number=A,Type=Float,Description=\"Minor allele frequency from population data.\">',\n '##INFO=<ID=Gene,Number=A,Type=String,Description=\"HGNC Gene Name for gene containing SNP.\">',\n '##INFO=<ID=Build,Number=A,Type=String,Description=\"Genome build used to determine SNP position for assay.\">',\n '##FILTER=<ID=LowCallRate,Description=\"SNP not called in at least 30% of samples in assay.\">',\n ]\n\n # 2. Extract info from XML file\n results = self.get_results()\n snps = self.get_snps()\n pcr_sequences = self.get_pcr_sequences()\n call_rates = self.get_snp_call_rate()\n\n # 3. For each sample, create VCF, add headers, determine genotype of each SNP and write to file.\n for sample, variants in results.items():\n\n with open(os.path.join(self.output, '%s.vcf' % sample), 'w+') as outfile:\n\n header_fields = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', str(sample)]\n\n outfile.write('%s\\n' % '\\n'.join(header_info))\n outfile.write('%s\\n' % '\\n'.join(header_parameters))\n outfile.write('#%s\\n' % '\\t'.join(header_fields))\n\n # for each variant, make a line to add to the file which will\n # then be sorted\n lines_to_write = []\n for snp, info in variants.items():\n\n ref_allele = snps[snp]['ref']\n alt_alleles = snps[snp]['alt']\n alt_list = alt_alleles.split(',')\n\n # Genotype formatting matches VCF v4.0 spec where ./. is no call.\n gt_list = []\n called_genotype = info['genotype']\n if not called_genotype:\n gt_list = ['.', '.']\n elif len(called_genotype) == 1:\n called_genotype += called_genotype\n for allele in list(called_genotype):\n if allele == ref_allele:\n gt_list.append(0)\n else:\n if allele in alt_list:\n idx = alt_list.index(allele)\n gt_list.append(idx + 1)\n else:\n raise ValueError(\n 'Called genotype %s not provided as possible alt in bed file. Sample %s and SNP '\n '%s %s.' % (called_genotype, sample, snp, alt_alleles)\n )\n gt = '/'.join([str(x) for x in gt_list])\n\n # Threshold currently set to 0.3 (70% results have a call).\n snp_call_rate = call_rates[snp]\n if snp_call_rate >= 0.3:\n vcf_filter = 'LowCallRate'\n else:\n vcf_filter = 'PASS'\n\n snp_pcr_seqs = pcr_sequences[snp]\n\n lines_to_write.append(\n '{chr}\\t{pos}\\t{id}\\t{ref}\\t{alt}\\t.\\t{filter}\\tAF={af};PCR={pcr};Gene={gene};Build={build}\\t'\n 'GT:MTQ\\t{gt}:{qual}\\n'.format(\n chr=snps[snp]['chrom'],\n pos=snps[snp]['pos'],\n id=snp,\n ref=ref_allele,\n alt=alt_alleles,\n filter=vcf_filter,\n af=snps[snp]['maf'],\n pcr=','.join(snp_pcr_seqs),\n gene=snps[snp]['gene'],\n build=snps[snp]['genome_build'],\n gt=gt,\n qual=','.join(info['quality'])\n )\n )\n\n sorted_lines_to_write = sorted(\n lines_to_write,\n key=lambda x: (\n # first key for sorting is the int value of chr\n int(x.split('\\t')[0][3:]),\n # second key for sorting is the position of the variant\n int(x.split('\\t')[1])\n )\n )\n\n for line in sorted_lines_to_write:\n outfile.write(line)", "def process_VCF(input_vcf, targets_file, out_vcf = None) :\n\n\tfVCF_OUT = None\n\tif out_vcf is not None :\n\t\tfVCF_OUT = open(out_vcf, 'w')\n\tfDUP_OUT = open(targets_file, 'w')\n\n\tvariants_dict = {}\n\tvariants_list = []\n\tnum_redundant, num_kept = 0, 0\n\tfINVCF = open(input_vcf, 'r')\n\tfor line in fINVCF :\n\t\tif line.startswith('#') :\n\t\t\tif line.startswith(\"#CHROM\") :\n\t\t\t\tindividuals = re.split('\\t', line.strip())[9:]\n\t\t\t\tstdout.write(\"%d individuals included in the VCF file: %s\\n\" %(len(individuals), input_vcf))\n\t\t\tif fVCF_OUT :\n\t\t\t\tfVCF_OUT.write(line)\n\t\telse :\n\t\t\ttmp_line = re.split('\\t', line.strip())\n\t\t\tref_base = tmp_line[3]\n\t\t\talt_base = tmp_line[4]\n\t\t\tchrom_id = tmp_line[0]\n\t\t\tchrom_pos = tmp_line[1]\n\t\t\tqual = tmp_line[5]\n\t\t\tfilter = tmp_line[6]\t\t\t\t\t# PASS or FILTERED by VQSR #\n\t\t\t# fix sites having different types of calls: redundant calls #\n\t\t\tif not variants_dict.has_key(chrom_id+':'+chrom_pos) :\n\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\tvariants_list.append(chrom_id+':'+chrom_pos)\n\t\t\telse :\n\t\t\t\tnum_redundant += 1\n\t\t\t\tsame_site_diff_call = re.split('\\t', variants_dict[chrom_id+':'+chrom_pos])\n\t\t\t\ttmp_qual = same_site_diff_call[5]\n\t\t\t\ttmp_filter = same_site_diff_call[6]\n\t\t\t\ttmp_alt_base = same_site_diff_call[4]\n\t\t\t\tfDUP_OUT.write(\"%s\\n%s\\n\" %(variants_dict[chrom_id+':'+chrom_pos], line.strip()))\n\t\t\t\tif (tmp_filter != \"PASS\" and filter != \"PASS\") or (filter == \"PASS\" and tmp_filter == \"PASS\") :\t\t# if two different call both passed the VQSR or both not, we remove it from the final call set #\t\n\t\t\t\t\tvariants_dict.pop(chrom_id+':'+chrom_pos)\n\t\t\t\t\tvariants_list.remove(chrom_id+':'+chrom_pos)\n\t\t\t\t\tif filter == \"PASS\" :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both pass\\n\")\n\t\t\t\t\telse :\n\t\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos+\" both filtered\\n\")\n\t\t\t\telif filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" second kept\\n\")\n\t\t\t\t\tvariants_dict[chrom_id+':'+chrom_pos] = line.strip()\n\t\t\t\t\tnum_kept += 1\n\t\t\t\telif tmp_filter == \"PASS\" and tmp_filter != filter :\n\t\t\t\t\tstdout.write(chrom_id+\" \"+chrom_pos + \" first kept\\n\")\n\t\t\t\t\tnum_kept += 1\n\tstdout.write(\"%d\\t%d\\n\" %(num_redundant, num_kept))\n\n\tif fVCF_OUT :\n\t\tfor i in range(len(variants_list)) :\n\t\t\tfVCF_OUT.write(\"%s\\n\" %(variants_dict[variants_list[i]]))\n\t\tfVCF_OUT.close()\n\tfINVCF.close()", "def load_variants_from_vcf( vcf_file ):\n\t\n\tsnps_per_chr = {}\n\tindels_per_chr = {}\n\t\n\ttri_counter = 0\n\t\n\twith open( vcf_file, \"r\" ) as f:\n\t\tline = f.readline()\n\t\twhile line:\n\t\t\tif line[0] != '#':\n\t\t\t\tparts = line.strip().split('\\t')\n\t\t\t\tif not \",\" in parts[4]:\t#only biallelic variants\n\t\t\t\t\tif len( parts[3] ) == len( parts[4] ) and len( parts[3] ) == 1:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tsnps_per_chr[ parts[0] ].append( parts[1] )\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tsnps_per_chr.update( { parts[0]: [ parts[1] ] } )\n\t\t\t\t\t\t\n\t\t\t\t\telif len( parts[3] ) != len( parts[4] ):\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tindels_per_chr[ parts[0] ].append( parts[1] )\n\t\t\t\t\t\texcept KeyError:\n\t\t\t\t\t\t\tindels_per_chr.update( { parts[0]: [ parts[1] ] } )\n\t\t\t\telse:\t#count triallelic variants\n\t\t\t\t\ttri_counter += 1\n\t\t\t\t\t\t\n\t\t\tline = f.readline()\n\tprint \"number of triallelic variants: \" + str( tri_counter )\n\t\n\treturn snps_per_chr, indels_per_chr", "def summary(args):\n from jcvi.formats.base import DictFile\n from jcvi.utils.cbook import percentage, Registry\n\n p = OptionParser(summary.__doc__)\n p.add_option(\"--extra\", help=\"Cross with extra tsv file\")\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n frfile, statusfile = args\n status = DictFile(statusfile)\n fp = open(frfile)\n registry = Registry() # keeps all the tags for any given gene\n for row in fp:\n seqid, gene, tag = row.split()\n if tag == \".\":\n registry[gene].append(\"outside\")\n else:\n registry[gene].append(\"inside\")\n if tag[0] == \"[\":\n registry[gene].append(\"no_syntenic_model\")\n if tag.startswith(\"[S]\"):\n registry[gene].append(\"[S]\")\n gstatus = status.get(gene, None)\n if gstatus == \"complete\":\n registry[gene].append(\"complete\")\n elif gstatus == \"pseudogene\":\n registry[gene].append(\"pseudogene\")\n elif gstatus == \"partial\":\n registry[gene].append(\"partial\")\n else:\n registry[gene].append(\"gmap_fail\")\n elif tag.startswith(\"[NS]\"):\n registry[gene].append(\"[NS]\")\n if \"random\" in tag or \"Scaffold\" in tag:\n registry[gene].append(\"random\")\n else:\n registry[gene].append(\"real_ns\")\n elif tag.startswith(\"[NF]\"):\n registry[gene].append(\"[NF]\")\n else:\n registry[gene].append(\"syntenic_model\")\n\n inside = registry.count(\"inside\")\n outside = registry.count(\"outside\")\n syntenic = registry.count(\"syntenic_model\")\n non_syntenic = registry.count(\"no_syntenic_model\")\n s = registry.count(\"[S]\")\n ns = registry.count(\"[NS]\")\n nf = registry.count(\"[NF]\")\n complete = registry.count(\"complete\")\n pseudogene = registry.count(\"pseudogene\")\n partial = registry.count(\"partial\")\n gmap_fail = registry.count(\"gmap_fail\")\n random = registry.count(\"random\")\n real_ns = registry.count(\"real_ns\")\n\n complete_models = registry.get_tag(\"complete\")\n pseudogenes = registry.get_tag(\"pseudogene\")\n partial_deletions = registry.get_tag(\"partial\")\n\n m = \"{0} inside synteny blocks\\n\".format(inside)\n m += \"{0} outside synteny blocks\\n\".format(outside)\n m += \"{0} has syntenic gene\\n\".format(syntenic)\n m += \"{0} lack syntenic gene\\n\".format(non_syntenic)\n m += \"{0} has sequence match in syntenic location\\n\".format(s)\n m += \"{0} has sequence match in non-syntenic location\\n\".format(ns)\n m += \"{0} has sequence match in un-ordered scaffolds\\n\".format(random)\n m += \"{0} has sequence match in real non-syntenic location\\n\".format(real_ns)\n m += \"{0} has no sequence match\\n\".format(nf)\n m += \"{0} syntenic sequence - complete model\\n\".format(percentage(complete, s))\n m += \"{0} syntenic sequence - partial model\\n\".format(percentage(partial, s))\n m += \"{0} syntenic sequence - pseudogene\\n\".format(percentage(pseudogene, s))\n m += \"{0} syntenic sequence - gmap fail\\n\".format(percentage(gmap_fail, s))\n print(m, file=sys.stderr)\n\n aa = [\"complete_models\", \"partial_deletions\", \"pseudogenes\"]\n bb = [complete_models, partial_deletions, pseudogenes]\n for a, b in zip(aa, bb):\n fw = open(a, \"w\")\n print(\"\\n\".join(b), file=fw)\n fw.close()\n\n extra = opts.extra\n if extra:\n registry.update_from(extra)\n\n fp.seek(0)\n fw = open(\"registry\", \"w\")\n for row in fp:\n seqid, gene, tag = row.split()\n ts = registry[gene]\n print(\"\\t\".join((seqid, gene, tag, \"-\".join(ts))), file=fw)\n fw.close()\n\n logging.debug(\"Registry written.\")", "def load_vcf_data(vcf_file):\n \n if(vcf_file[-3:]==\".gz\"):\n vcf_data=gzip.open(vcf_file, \"r\")\n else:\n vcf_data=open(vcf_file, \"r\")\n \n snp_names=[]\n snp_pos=[]\n genotype_data=[]\n\n missing=0\n \n for line in vcf_data:\n\n if line[0:2] == '##':\n continue\n elif line[0:1] == '#':\n data=line[1:-1]\n data=data.split(\"\\t\")\n if data[0:9]==[\"CHROM\", \"POS\", \"ID\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\", \"FORMAT\"]:\n sample_names=data[9:]\n else:\n print data[0:9]\n raise Exception(\"Bad vcf header line\")\n else:\n data=line[:-1]\n data=data.split(\"\\t\")\n\n if len(data[4].split(\",\"))>1: \n print \"Warning: ignoring multi alleleic site at \" + data[0]+\":\"+data[1] \n continue # multi-allelic sites. \n\n if data[2] != \".\":\n snp_names.append(data[2])\n else:\n snp_names.append(data[0]+\":\"+data[1])\n\n snp_pos.append(int(data[1]))\n\n if not all([(x[0]==\".\" and x[2]==\".\") or (x[0] in [\"0\", \"1\"] and x[2] in [\"0\", \"1\"]) for x in data[9:]]):\n raise Exception(\"Could not read line: \" + line) \n \n genotype_data.append([ 3 if x[0]==\".\" and x[2]==\".\" else int(x[0])+int(x[2]) for x in data[9:] ])\n\n return {\"sample_names\":sample_names, \"snp_names\":snp_names, \"snp_pos\":snp_pos, \"genotype_data\":genotype_data}", "def sample_vcf():\n file_content = b\"\"\"##fileformat=VCFv4.2\n##hailversion=0.2.100-2ea2615a797a\n##INFO=<ID=QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=SB,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_pab_max,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQ,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=QD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_MQRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_FS,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=ReadPosRankSum,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=AS_QUALapprox,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SB_TABLE,Number=.,Type=Integer,Description=\"\">\n##INFO=<ID=AS_VarDP,Number=1,Type=Integer,Description=\"\">\n##INFO=<ID=AS_SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=SOR,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=transmitted_singleton,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=omni,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=mills,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=monoallelic,Number=0,Type=Flag,Description=\"\">\n##INFO=<ID=AS_VQSLOD,Number=1,Type=Float,Description=\"\">\n##INFO=<ID=InbreedingCoeff,Number=1,Type=Float,Description=\"\">\n##FILTER=<ID=AC0,Description=\"Allele count is zero after filtering out low-confidence genotypes (GQ < 20; DP < 10; and AB < 0.2 for het calls)\">\n##FILTER=<ID=AS_VQSR,Description=\"Failed VQSR filtering thresholds of -2.7739 for SNPs and -1.0606 for indels\">\n##contig=<ID=chr1,length=248956422,assembly=GRCh38>\n#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\nchr1\t10330\t.\tCCCCTAACCCTAACCCTAACCCTACCCTAACCCTAACCCTAACCCTAACCCTAA\tC\t.\tPASS\tQUALapprox=21493;SB=325,1077,113,694;MQ=32.1327;MQRankSum=0.720000;VarDP=2236;AS_ReadPosRankSum=-0.736000;AS_pab_max=1.00000;AS_QD=5.17857;AS_MQ=29.5449;QD=9.61225;AS_MQRankSum=0.00000;FS=8.55065;AS_FS=.;ReadPosRankSum=0.727000;AS_QUALapprox=145;AS_SB_TABLE=325,1077,2,5;AS_VarDP=28;AS_SOR=0.311749;SOR=1.48100;singleton;AS_VQSLOD=13.4641;InbreedingCoeff=-0.000517845\"\"\"\n file = io.BytesIO(file_content)\n return file", "def write_vcf(snps_dict):\n # Header of the vcf file\n header = f\"\"\"#REF: {REFERENCE_FILE}\n#READS: {READS_FILE}\n#K: {K_VALUE}\n#MAX_SUBST: {H_VALUE}\n#MIN_ABUNDANCE: {M_VALUE}\n\"\"\"\n\n with open(OUTPUT_FILE, 'w') as vcf:\n vcf.write(header)\n for position in sorted(snps_dict.keys()): # For each snp position found,\n # count for each nucleotid the number of time it was found in reads mapped\n # at this position\n nA = 0\n nT = 0\n nC = 0\n nG = 0\n for nucleotid in snps_dict[position]:\n if nucleotid == \"A\":\n nA += 1\n elif nucleotid == \"T\":\n nT += 1\n elif nucleotid == \"G\":\n nG += 1\n else:\n nC += 1\n if nA >= int(M_VALUE): # If the same nucleotid was found more than M_VALUE time\n # in reads mapped at this position, write it in the vcf file.\n vcf.write(f\"{position}\\t{GENOME[position]}\\tA\\t{nA}\\n\")\n if nT >= int(M_VALUE):\n vcf.write(f\"{position}\\t{GENOME[position]}\\tT\\t{nT}\\n\")\n if nG >= int(M_VALUE):\n vcf.write(f\"{position}\\t{GENOME[position]}\\tG\\t{nG}\\n\")\n if nC >= int(M_VALUE):\n vcf.write(f\"{position}\\t{GENOME[position]}\\tC\\t{nC}\\n\")", "def finalize(param, input_files='count_files'):\n\n import csv\n HELPER.writeLog('Collecting featureCount raw counts ... \\n', param)\n\n #check which of these files are actually available\n working_files = [iFile for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get feature ID using the first column in the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #For featureCount output, we want to skip the first two lines as they\n #include the featureCount call and the headers which we don't want\n next(csv_reader, None)\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n counts = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the expression values\n header = 'ID'\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n csv_file = open(param[input_files][idx])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n\n #Here too we want to skip the first two lines, before getting the counts\n next(csv_reader, None)\n next(csv_reader, None)\n #Now start getting the counts (row[6]) and add in the ID (counts[i]) before it\n idx = 0\n for row in csv_reader:\n counts[idx] = counts[idx]+'\\t'+row[6]\n idx += 1\n csv_file.close()\n\n #output the file\n out_file = param['working_dir']+'deliverables/featureCount_raw_counts.txt'\n out_handle = open(out_file, 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(counts)):\n out_handle.write(counts[i]+'\\n')\n out_handle.close()\n\n #output_phenotype_file\n HELPER.writeLog('Writing phenotype data ... \\n', param)\n MODULE_HELPER.output_sample_info(param)\n\n #write summary stats\n #featureCount does this on its own so we can just fetch each summary file\n #check which of these files are actually available\n working_files = [iFile+'.summary' for iFile in param[input_files] if iFile != '']\n\n if len(working_files) > 0:\n #get Status column from summary file using the first column in\n #the first file in the list of working files\n csv_file = open(working_files[0])\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Here, we want to skip the first line, as it simply points to the\n #alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start by taking the list of identifier,\n #which is the first column in the file\n entry = [row[0] for row in csv_reader]\n csv_file.close()\n\n #get all the summary stats for each sample\n header = 'Status'\n\n for idx in range(param['num_samples']):\n if param[input_files] != '':\n header = header+'\\t'+param['stub'][idx]\n #Fetch the corresponding sample's summary file\n csv_file = open(param[input_files][idx]+'.summary')\n csv_reader = csv.reader(csv_file, delimiter='\\t')\n #Again, we want to skip the first line, as it simply points\n #to the alignment file used when running featureCount\n next(csv_reader, None)\n\n #Now start getting the stats (row[1]) and add in the Status\n # (counts[i]) before it\n i = 0\n for row in csv_reader:\n entry[i] = entry[i]+'\\t'+row[1]\n i += 1\n csv_file.close()\n #output the file\n out_handle = open(param['working_dir']+\n 'results/featureCount/featureCount_stats.txt',\n 'w')\n out_handle.write(header+'\\n')\n\n for i in range(len(entry)):\n out_handle.write(entry[i]+'\\n')\n out_handle.close()\n else:\n print 'featureCount was not run successfully on any of the files..\\n'", "def getQVcounts(genotypesFilename, caseNames, controlNames):\r\n\r\n\tif \"genotypes\" in genotypesFilename:\r\n\t\tcaseCounts, controlCounts = getQVcountsForDominantModel(genotypesFilename, caseNames, controlNames)\r\n\telif \"comphet\" in genotypesFilename:\r\n\t\tcaseCounts, controlCounts = getQVsForComphetModel(genotypesFilename, caseNames, controlNames)\r\n\telse:\r\n\t\traise NameError(\"Unclear whether variants file is dominant or comphet.\\nRename with 'genotypes' or 'comphet'.\")\r\n\t\t\r\n\treturn caseCounts, controlCounts", "def count_variants(filename, content=None):\n open_fn = gzip.open if is_gz_file(filename) else open\n count = 0\n with open_fn(filename, \"rt\") as ifile:\n for line in ifile:\n if not line.startswith(\"#\"):\n if content:\n if content in line:\n count += 1\n else:\n count += 1\n return count", "def to_countvectors(self):\n if hasattr(self, \"ifp\"):\n df = self.to_dataframe()\n return to_countvectors(df)\n raise AttributeError(\"Please use the `run` method before\")", "def read_vcf(vcf):\n vcfdata = {}\n with open(vcf, 'r') as f:\n for line in f:\n if line.startswith('#'):\n continue\n else:\n tmp = line.strip().split()\n pos = int(tmp[1])\n sample_info = tmp[9:]\n # Get the genotype calls from the sample info fields\n gt = [t.split(':')[0] for t in sample_info]\n hap_calls = []\n for g in gt:\n if g == '0/0':\n hap_calls.append(0)\n elif g == '1/1':\n hap_calls.append(1)\n else:\n hap_calls.append('NA')\n site_pi = pairwise_diversity(hap_calls)\n vcfdata[pos] = site_pi\n return vcfdata", "def _load_vessel_counts(self, crs: str = 'epsg:4326') -> gpd.GeoDataFrame:\n with rasterio.open(self.path) as raster:\n vessel_counts = raster.read(1)\n\n # get locations where there were vessels (non-zero counts)\n non_zero_ix = np.argwhere(vessel_counts > 0)\n lon, lat = raster.xy(non_zero_ix[:, 0], non_zero_ix[:, 1])\n counts = vessel_counts[non_zero_ix[:, 0], non_zero_ix[:, 1]]\n\n gdf = gpd.GeoDataFrame(\n {\n 'counts': counts,\n 'lon': lon,\n 'lat': lat\n },\n geometry=gpd.points_from_xy(lon, lat)\n )\n return gdf.set_crs(crs)", "def variants(context, collaborator):\n LOG.info(\"Running scout export variants\")\n adapter = context.obj['adapter']\n \n header = [\"#Chrom\\tStart\\tEnd\\tTranscript\\tRefSeq\\tHgncSymbol\\tHgncID\"]\n\n if not collaborator:\n click.echo(\"Please provide a collaborator to export variants\")\n context.abort()\n\n header = [\n \"##fileformat=VCFv4.2\",\n \"#CHROM\\tPOS\\tID\\tREF\\tALT\\tQUAL\\tFILTER\\tINFO\"\n ]\n\n for line in header:\n click.echo(line)\n\n for variant in export_causatives(adapter, collaborator):\n click.echo(variant)", "def export(ctx, outfile):\n adapter = ctx.obj['adapter']\n \n logger.info(\"Export the variants from {0}\".format(adapter))\n nr_cases = 0\n \n existing_chromosomes = set(adapter.get_chromosomes())\n \n ordered_chromosomes = []\n for chrom in CHROMOSOME_ORDER:\n if chrom in existing_chromosomes:\n ordered_chromosomes.append(chrom)\n existing_chromosomes.remove(chrom)\n for chrom in existing_chromosomes:\n ordered_chromosomes.append(chrom)\n \n nr_cases = adapter.cases().count()\n logger.info(\"Found {0} cases in database\".format(nr_cases))\n\n head = HeaderParser()\n head.add_fileformat(\"VCFv4.3\")\n head.add_meta_line(\"NrCases\", nr_cases)\n head.add_info(\"Obs\", '1', 'Integer', \"The number of observations for the variant\")\n head.add_info(\"Hom\", '1', 'Integer', \"The number of observed homozygotes\")\n head.add_info(\"Hem\", '1', 'Integer', \"The number of observed hemizygotes\")\n head.add_version_tracking(\"loqusdb\", __version__, datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n for chrom in ordered_chromosomes:\n length = adapter.get_max_position(chrom)\n head.add_contig(contig_id=chrom, length=str(length))\n\n print_headers(head, outfile=outfile)\n \n for chrom in ordered_chromosomes:\n for variant in adapter.get_variants(chromosome=chrom):\n chrom = variant['chrom']\n pos = variant['start']\n ref = variant['ref']\n alt = variant['alt']\n observations = variant['observations']\n homozygotes = variant['homozygote']\n hemizygotes = variant['hemizygote']\n info = \"Obs={0}\".format(observations)\n if homozygotes:\n info += \";Hom={0}\".format(homozygotes)\n if hemizygotes:\n info += \";Hem={0}\".format(hemizygotes)\n variant_line = \"{0}\\t{1}\\t.\\t{2}\\t{3}\\t.\\t.\\t{4}\\n\".format(\n chrom, pos, ref, alt, info)\n print_variant(variant_line=variant_line, outfile=outfile)", "def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file", "def Add_in_vcf_SO(infofile,\n vcf_path,\n output_vcf, ):\n ori_format2info = ['AF', 'AD']\n\n if type(vcf_path) == str:\n vcf_readed = vcf.Reader(open(vcf_path, 'r'))\n else:\n try:\n vcf_readed = vcf.Reader(fsock=vcf_path)\n except:\n raise IOError('Wrong vcf, it is a %s' % str(type(vcf_path)))\n\n info_df = pd.read_csv(infofile, sep='\\t')\n info_df.index = info_df.iloc[:, [1, 2, 3]].astype(str).sum(1)\n samples = count_sample(vcf_readed.samples)\n\n if len(samples) != 1:\n return\n\n new_infos = vcf_readed.infos\n machine = list(new_infos.values())[0]\n\n field1 = \"SAD\"\n field2 = \"SAF\"\n field3 = \"PoS\"\n field1_info = [field1, 'R', 'Integer',\n \"(REF base count, alt base count). Self cal coverage from bam file. It is different from AD. It is rawer than AD.\"]\n field2_info = [field2, 'R', 'Float',\n \"Alt base count divide the total reads number in this pos. Self cal frequency from bam file. It is different from AF. It is rawer than AF.\"]\n field3_info = [field3, '.', 'Integer',\n \"A field which describe this file is single only analysis or pair analysis. 1 for single analysis, 2 for pair analysis.\"]\n new_infos[field1] = machine._make(field1_info + [None, None])\n new_infos[field2] = machine._make(field2_info + [None, None])\n new_infos[field3] = machine._make(field3_info + [None, None])\n for ori_format in ori_format2info:\n if ori_format not in new_infos.keys():\n new_infos[ori_format] = list(new_infos.values())[0]._make(\n list(vcf_readed.formats[ori_format]._asdict().values()) + [None, None])\n\n vcf_readed.infos = new_infos\n\n vcf_writer = vcf.Writer(open(output_vcf, 'w'),\n vcf_readed)\n\n for record in tqdm(vcf_readed):\n if record.is_snp:\n # SNP instead of indel\n query_index = record.CHROM + str(record.POS - 1) + str(record.REF)\n if query_index not in info_df.index:\n # it means there are np reads here\n ref_cov, alt_cov = 0, 0\n else:\n row = info_df.loc[query_index, :]\n if len(row.shape) == 2:\n # if multiple index occur\n row = row.iloc[0, :]\n\n ref_base = row[\"Reference\"] # should same as record.REF\n ref_cov = row[ref_base.upper()]\n alt_cov = row[str(record.ALT[0]).upper()]\n\n SAD = [int(ref_cov),\n int(alt_cov)]\n try:\n SAF = round(float(alt_cov) / sum(SAD), 4)\n except ZeroDivisionError:\n SAF = 0\n\n record.INFO[field1] = tuple(SAD)\n record.INFO[field2] = SAF\n record.INFO[field3] = 1\n else:\n # for indel we just ignore it.\n # write the original info\n pass\n\n for sample in record.samples:\n data = dict(sample.data._asdict())\n for ori_format in ori_format2info:\n if data.get(ori_format,''):\n record.INFO[ori_format] = data[ori_format]\n\n vcf_writer.write_record(record)\n vcf_writer.close()", "def read_annovar_vcf(input_vcf):\n hash_table = {}\n vcf_reader = vcf.Reader(filename=input_vcf)\n\n for i, r in enumerate(vcf_reader):\n hash_variant = {}\n\n hash_fields = dict(r.INFO)\n hash_fields.update(dict(zip(r.samples[0].data._fields, r.samples[0].data)))\n\n chrom = r.CHROM\n pos = str(r.POS)\n ref = str(r.REF)\n alt = str(r.ALT[0])\n l_samples = len(r.samples)\n\n if r.FILTER == []:\n hash_variant['FILTER'] = \"PASS\"\n else:\n hash_variant['FILTER'] = str(r.FILTER)\n\n hash_variant['QUAL'] = str(r.QUAL)\n\n hash_variant['chr'] = chrom.strip()\n hash_variant['pos'] = pos.strip()\n hash_variant['ref'] = ref.strip()\n hash_variant['alt'] = alt.strip()\n hash_variant['Func.refGene'] = str(hash_fields.get('Func.refGene', '.')[0])\n hash_variant['Gene.refGene'] = str(hash_fields.get('Gene.refGene', '.')[0])\n hash_variant['GeneDetail.refGene'] = str(hash_fields.get('GeneDetail.refGene', '.')[0])\n hash_variant['ExonicFunc.refGene'] = str(hash_fields.get('ExonicFunc.refGene', '.')[0])\n hash_variant['AAChange.refGene'] = str(hash_fields.get('AAChange.refGene', '.')[0])\n hash_variant['cytoBand'] = str(hash_fields.get('cytoBand', '.')[0])\n hash_variant['ExAC_ALL'] = str(hash_fields.get('ExAC_ALL', '.'))\n hash_variant['ExAC_AFR'] = str(hash_fields.get('ExAC_AFR', '.'))\n hash_variant['ExAC_AMR'] = str(hash_fields.get('ExAC_AMR', '.'))\n hash_variant['ExAC_EAS'] = str(hash_fields.get('ExAC_EAS', '.'))\n hash_variant['ExAC_FIN'] = str(hash_fields.get('ExAC_FIN', '.'))\n hash_variant['ExAC_NFE'] = str(hash_fields.get('ExAC_NFE', '.'))\n hash_variant['ExAC_OTH'] = str(hash_fields.get('ExAC_OTH', '.'))\n hash_variant['ExAC_SAS'] = str(hash_fields.get('ExAC_SAS', '.'))\n hash_variant['avsnp147'] = str(hash_fields.get('avsnp147', '.')[0])\n hash_variant['SIFT_score'] = str(hash_fields.get('SIFT_score', '.')[0])\n hash_variant['SIFT_pred'] = str(hash_fields.get('SIFT_pred', '.')[0])\n hash_variant['Polyphen2_HDIV_score'] = str(hash_fields.get('Polyphen2_HDIV_score', '.')[0])\n hash_variant['Polyphen2_HDIV_pred'] = str(hash_fields.get('Polyphen2_HDIV_pred', '.')[0])\n hash_variant['Polyphen2_HVAR_score'] = str(hash_fields.get('Polyphen2_HVAR_score', '.')[0])\n hash_variant['Polyphen2_HVAR_pred'] = str(hash_fields.get('Polyphen2_HVAR_pred', '.')[0])\n hash_variant['LRT_score'] = str(hash_fields.get('LRT_score', '.')[0])\n hash_variant['LRT_pred'] = str(hash_fields.get('LRT_pred', '.')[0])\n hash_variant['MutationTaster_score'] = str(hash_fields.get('MutationTaster_score', '.')[0])\n hash_variant['MutationTaster_pred'] = str(hash_fields.get('MutationTaster_pred', '.')[0])\n hash_variant['MutationAssessor_score'] = str(hash_fields.get('MutationAssessor_score', '.')[0])\n hash_variant['MutationAssessor_pred'] = str(hash_fields.get('MutationAssessor_pred', '.')[0])\n hash_variant['FATHMM_score'] = str(hash_fields.get('FATHMM_score', '.')[0])\n hash_variant['FATHMM_pred'] = str(hash_fields.get('FATHMM_pred', '.')[0])\n hash_variant['PROVEAN_score'] = str(hash_fields.get('PROVEAN_score', '.')[0])\n hash_variant['PROVEAN_pred'] = str(hash_fields.get('PROVEAN_pred', '.')[0])\n hash_variant['VEST3_score'] = str(hash_fields.get('VEST3_score', '.')[0])\n hash_variant['CADD_raw'] = str(hash_fields.get('CADD_raw', '.')[0])\n hash_variant['CADD_phred'] = str(hash_fields.get('CADD_phred', '.')[0])\n hash_variant['DANN_score'] = str(hash_fields.get('DANN_score', '.')[0])\n hash_variant['fathmm-MKL_coding_score'] = str(hash_fields.get('fathmm-MKL_coding_score', '.')[0])\n hash_variant['fathmm-MKL_coding_pred'] = str(hash_fields.get('fathmm-MKL_coding_pred', '.')[0])\n hash_variant['MetaSVM_score'] = str(hash_fields.get('MetaSVM_score', '.')[0])\n hash_variant['MetaSVM_pred'] = str(hash_fields.get('MetaSVM_pred', '.')[0])\n hash_variant['MetaLR_score'] = str(hash_fields.get('MetaLR_score', '.')[0])\n hash_variant['MetaLR_pred'] = str(hash_fields.get('MetaLR_pred', '.')[0])\n hash_variant['integrated_fitCons_score'] = str(hash_fields.get('integrated_fitCons_score', '.')[0])\n hash_variant['integrated_confidence_value'] = str(hash_fields.get('integrated_confidence_value', '.')[0])\n hash_variant['GERP++_RS'] = str(hash_fields.get('GERP++_RS', '.')[0])\n hash_variant['phyloP7way_vertebrate'] = str(hash_fields.get('phyloP7way_vertebrate', '.')[0])\n hash_variant['phyloP20way_mammalian'] = str(hash_fields.get('phyloP20way_mammalian', '.')[0])\n hash_variant['phastCons7way_vertebrate'] = str(hash_fields.get('phastCons7way_vertebrate', '.')[0])\n hash_variant['phastCons20way_mammalian'] = str(hash_fields.get('phastCons20way_mammalian', '.')[0])\n hash_variant['SiPhy_29way_logOdds'] = str(hash_fields.get('SiPhy_29way_logOdds', '.')[0])\n\n l_samples = r.samples[::]\n l_sample_ids = []\n for sample in l_samples:\n sample_id = sample.sample\n sample_gt = sample.data.GT\n hash_variant[sample_id] = sample_gt\n l_sample_ids.append(sample_id)\n\n hash_table[(chrom, pos, alt)] = hash_variant\n\n return hash_table, l_sample_ids", "def count_sites_under_condition_vcf(vcf_file,chrom,start,end,mincov=0,maxcov=10000,inds=\"all\",bgzip=True,nb_ind_with_min_cov=\"all\",nalleles=[1,2],snps=False):\n\tprint vcf_file\n\tinput_vcf=vcf.Reader(fsock=None, filename=vcf_file, compressed=bgzip, prepend_chr=\"False\", strict_whitespace=False)#open the vcf parser\n\tnsites_OK=0\n\tnsites_total=0\n\t#print \"in count_sites_under_condition_vcf nb_ind_with_min_cov :\",nb_ind_with_min_cov, \" inds\", ind\n\tif chrom!=\"all\":\n\t\t\tprint chrom,start,end\n\t\t\tcheck=len(sh.tabix(vcf_file,str(chrom)+\":\"+str(start)+\"-\"+str(end)))\n\t\t\t#print check\n\t\t\t#print \"check;' \",check,\"'\"\n\t\t\tif check==0: \n\t\t\t\treturn [0,0]\n\t\t\tfor record in input_vcf.fetch(chrom,start,end):\n\t\t\t\t#print record# for every site\n\t\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=nalleles,nb_ind_with_min_cov=nb_ind_with_min_cov,snps=snps)# check if the site respect our condition\n\t\t\t\tnsites_total+=1\n\t\t\t\tif cond:# if it does\n\t\t\t\t\t#raise Exception\n\t\t\t\t\t#if any([int(sample['DP'])<5 for sample in record.samples]): print [int(sample['DP']) for sample in record.samples] # to check this argument nb_ind_with_min_cov\n\t\t\t\t\tnsites_OK+=1\n\telif chrom==\"all\":\n\t\tfor record in input_vcf:# for every site\n\t\t\tcond=checkSnp_Cov(input_vcf,record,mincov,maxcov,inds=inds,nalleles=nalleles,nb_ind_with_min_cov=nb_ind_with_min_cov,snps=snps)# check if the site respect our condition\n\t\t\tnsites_total+=1\n\t\t\tif cond:# if it does\n\t\t\t\tnsites_OK+=1\n\treturn [nsites_OK,nsites_total]", "def getTotalCaseAndControlCounts(genotypesFilename):\r\n\r\n\tcomphetSuffix = \"\"\r\n\tif \"comphet\" in genotypesFilename:\r\n\t\tcomphetSuffix = \" (#1)\"\r\n\r\n\t# We read through the whole file. Might take a while, but easier than dealing with all edge cases.\r\n\tmaxCoveredCasePercentage = 0\r\n\tmaxCoveredControlPercentage = 0\r\n\treader = csv.reader(open(genotypesFilename, \"r\"))\r\n\theader = next(reader)\r\n\r\n\tfor variant in reader:\r\n\r\n\t\tvariant = dict(zip(header, variant))\r\n\t\tcasePercentage = float(variant[\"Covered Case Percentage\" + comphetSuffix])/100.0\r\n\t\tif casePercentage > maxCoveredCasePercentage:\r\n\t\t\tmaxCoveredCasePercentage = casePercentage\r\n\t\t\tcoveredCases = int(variant[\"Covered Case\" + comphetSuffix])\r\n\t\t\ttotalCases = int(round(coveredCases/casePercentage))\r\n\r\n\t\tcontrolPercentage = float(variant[\"Covered Ctrl Percentage\" + comphetSuffix])/100.0\r\n\t\tif controlPercentage > maxCoveredControlPercentage:\r\n\t\t\tmaxCoveredControlPercentage = controlPercentage\r\n\t\t\tcoveredControls = int(variant[\"Covered Ctrl\" + comphetSuffix])\r\n\t\t\ttotalControls = int(round(coveredControls/controlPercentage))\r\n\treturn totalCases, totalControls", "def _print_summary_counts(\n self, out_file, categories, result_events_by_status, extra_rows):\n\n # Get max length for category printed name\n category_with_max_printed_name = max(\n categories, key=lambda x: len(x[1]))\n max_category_name_length = len(category_with_max_printed_name[1])\n\n # If we are provided with extra rows, consider these row name lengths.\n if extra_rows is not None:\n for row in extra_rows:\n name_length = len(row[0])\n if name_length > max_category_name_length:\n max_category_name_length = name_length\n\n self._print_banner(out_file, \"Test Result Summary\")\n\n # Prepend extra rows\n if extra_rows is not None:\n for row in extra_rows:\n extra_label = \"{}:\".format(row[0]).ljust(\n max_category_name_length + 1)\n out_file.write(\"{} {:4}\\n\".format(extra_label, row[1]))\n\n for category in categories:\n result_status_id = category[0]\n result_label = \"{}:\".format(category[1]).ljust(\n max_category_name_length + 1)\n count = len(result_events_by_status[result_status_id])\n out_file.write(\"{} {:4}\\n\".format(\n result_label,\n count))", "def samples(app, args):\n engine = create_engine(args.datafile)\n meta = MetaData()\n meta.reflect(engine)\n print(\"\\t\".join([str(x).replace('counts.', '')\n for x in meta.tables['counts'].columns\n if not x == 'counts.index']))", "def summarize_tsvs(\n self,\n tsv_dir,\n dd,\n prefix=\"\",\n outlier_threshold=10,\n omit_props=[\n \"project_id\",\n \"type\",\n \"id\",\n \"submitter_id\",\n \"case_submitter_id\",\n \"case_ids\",\n \"visit_id\",\n \"sample_id\",\n \"md5sum\",\n \"file_name\",\n \"object_id\",\n \"series_uid\",\n \"study_uid\",\n \"token_record_id\"\n ],\n omit_nodes=[\"metaschema\", \"root\", \"program\", \"project\", \"data_release\"],\n outdir=\".\",\n bin_limit=False,\n write_report=True,\n report_null=True,\n ):\n\n summary = {}\n\n report = pd.DataFrame(\n columns=[\n \"prop_id\",\n \"project_id\",\n \"node\",\n \"property\",\n \"type\",\n \"N\",\n \"nn\",\n \"null\",\n \"perc_null\",\n \"all_null\",\n \"min\",\n \"max\",\n \"median\",\n \"mean\",\n \"stdev\",\n \"outliers\",\n \"bin_number\",\n \"bins\",\n ]\n )\n report[\"all_null\"] = report[\"all_null\"].astype(bool)\n\n dir_pattern = \"{}*{}\".format(prefix, \"tsvs\")\n project_dirs = glob.glob(\"{}/{}\".format(tsv_dir, dir_pattern))\n\n nn_nodes, nn_props, null_nodes, null_props, all_prop_ids = [], [], [], [], []\n\n msg = \"Summarizing TSVs in '{}':\\n\".format(tsv_dir)\n print(\"\\n\\n{}\".format(msg))\n\n for project_dir in project_dirs: # project_dir=project_dirs[0]\n\n try:\n project_id = re.search(\n r\"^{}/?([A-Za-z0-9_-]+)_tsvs$\".format(tsv_dir), project_dir\n ).group(1)\n except:\n print(\n \"Couldn't extract the project_id from project_dir '{}'!\".format(\n project_dir\n )\n )\n\n fpattern = \"{}*{}\".format(prefix, \".tsv\")\n fnames = glob.glob(\"{}/{}\".format(project_dir, fpattern))\n\n # msg = \"\\t\\tFound the following {} TSVs: {}\".format(len(fnames),fnames)\n # sys.stdout.write(\"\\r\" + str(msg))\n\n # print(fnames) # trouble-shooting\n if len(fnames) == 0:\n continue\n\n for (\n fname\n ) in (\n fnames\n ): # Each node with data in the project is in one TSV file so len(fnames) is the number of nodes in the project with data.\n\n # print(\"\\n\\t\\t{}\".format(fname)) # trouble-shooting\n\n node_regex = (\n re.escape(project_id) + r\"_([a-zA-Z0-9_]+)\\.tsv$\"\n ) # node = re.search(r'^([a-zA-Z0-9_]+)-([a-zA-Z0-9]+)_([a-zA-Z0-9_]+)\\.tsv$',fname).group(3)\n\n try:\n node = re.search(node_regex, fname, re.IGNORECASE).group(1)\n\n except Exception as e:\n print(\n \"\\n\\nCouldn't set node with node_regex on '{}':\\n\\t{}\".format(\n fname, e\n )\n )\n node = fname\n\n df = pd.read_csv(fname, sep=\"\\t\", header=0, dtype=str)\n\n if df.empty:\n print(\"\\t\\t'{}' TSV is empty. No data to summarize.\\n\".format(node))\n\n else:\n nn_nodes.append(node)\n prop_regex = re.compile(\n r\"^[A-Za-z0-9_]*[^.]$\"\n ) # drop the links, e.g., cases.submitter_id or diagnoses.id (matches all properties with no \".\")\n props = list(\n filter(prop_regex.match, list(df))\n ) # properties in this TSV to summarize\n props = [\n prop for prop in props if prop not in omit_props\n ] # omit_props=['project_id','type','id','submitter_id','case_submitter_id','case_ids','visit_id','sample_id','md5sum','file_name','object_id']\n\n # msg = \"\\t\\tTotal of {} records in '{}' TSV with {} properties.\".format(len(df),node,len(props))\n # sys.stdout.write(\"\\r\"+str(msg))\n\n for prop in props: # prop=props[0]\n\n prop_name = \"{}.{}\".format(node, prop)\n prop_id = \"{}.{}\".format(project_id, prop_name)\n print(prop_name)\n\n # because of sheepdog bug, need to inclue \"None\" in \"null\" (:facepalm:) https://ctds-planx.atlassian.net/browse/PXP-5663\n #df.at[df[prop] == \"None\", prop] = np.nan\n\n null = df.loc[df[prop].isnull()]\n nn = df.loc[df[prop].notnull()]\n perc_null = len(null)/len(df)\n ptype = self.get_prop_type(node, prop, dd)\n\n # dict for the prop's row in report dataframe\n prop_stats = {\n \"prop_id\": prop_id,\n \"project_id\": project_id,\n \"node\": node,\n \"property\": prop,\n \"type\": ptype,\n \"N\": len(df),\n \"nn\": len(nn),\n \"null\": len(null),\n \"perc_null\": perc_null,\n \"all_null\": np.nan,\n \"min\": np.nan,\n \"max\": np.nan,\n \"median\": np.nan,\n \"mean\": np.nan,\n \"stdev\": np.nan,\n \"outliers\": np.nan,\n \"bin_number\": np.nan,\n \"bins\": np.nan,\n }\n\n if nn.empty:\n null_props.append(prop_name)\n prop_stats[\"all_null\"] = True\n\n else:\n nn_props.append(prop_name)\n all_prop_ids.append(prop_id)\n prop_stats[\"all_null\"] = False\n\n msg = \"\\t'{}'\".format(prop_id)\n sys.stdout.write(\"\\r\" + str(msg).ljust(200, \" \"))\n\n if ptype in [\"string\", \"enum\", \"array\", \"boolean\", \"date\"]:\n\n if ptype == \"array\":\n\n all_bins = list(nn[prop])\n bin_list = [\n bin_txt.split(\",\") for bin_txt in list(nn[prop])\n ]\n counts = Counter(\n [\n item\n for sublist in bin_list\n for item in sublist\n ]\n )\n\n elif ptype in [\"string\", \"enum\", \"boolean\", \"date\"]:\n\n counts = Counter(nn[prop])\n\n df1 = pd.DataFrame.from_dict(\n counts, orient=\"index\"\n ).reset_index()\n bins = [tuple(x) for x in df1.values]\n bins = sorted(\n sorted(bins, key=lambda x: (x[0])),\n key=lambda x: (x[1]),\n reverse=True,\n ) # sort first by name, then by value. This way, names with same value are in same order.\n\n prop_stats[\"bins\"] = bins\n prop_stats[\"bin_number\"] = len(bins)\n\n # Get stats for numbers\n elif ptype in [\"number\", \"integer\"]: # prop='concentration'\n\n # make a list of the data values as floats (converted from strings)\n nn_all = nn[prop]\n d_all = list(nn_all)\n\n nn_num = (\n nn[prop]\n .apply(pd.to_numeric, errors=\"coerce\")\n .dropna()\n )\n d = list(nn_num)\n\n nn_string = nn.loc[~nn[prop].isin(list(map(str, d)))]\n non_numbers = list(nn_string[prop])\n\n if (\n len(d) > 0\n ): # if there are numbers in the data, calculate numeric stats\n\n # calculate summary stats using the float list d\n mean = statistics.mean(d)\n median = statistics.median(d)\n minimum = min(d)\n maximum = max(d)\n\n if (\n len(d) == 1\n ): # if only one value, no stdev and no outliers\n std = \"NA\"\n outliers = []\n else:\n std = statistics.stdev(d)\n # Get outliers by mean +/- outlier_threshold * stdev\n cutoff = (\n std * outlier_threshold\n ) # three times the standard deviation is default\n lower, upper = (\n mean - cutoff,\n mean + cutoff,\n ) # cut-offs for outliers is 3 times the stdev below and above the mean\n outliers = sorted(\n list(\n set(\n [\n x\n for x in d\n if x < lower or x > upper\n ]\n )\n )\n )\n\n # if property type is 'integer', change min, max, median to int type\n if ptype == \"integer\":\n median = int(median) # median\n minimum = int(minimum) # min\n maximum = int(maximum) # max\n outliers = [\n int(i) for i in outliers\n ] # convert outliers from float to int\n\n prop_stats[\"stdev\"] = std\n prop_stats[\"mean\"] = mean\n prop_stats[\"median\"] = median\n prop_stats[\"min\"] = minimum\n prop_stats[\"max\"] = maximum\n prop_stats[\"outliers\"] = outliers\n\n # check if numeric property is mixed with strings, and if so, summarize the string data\n if len(d_all) > len(d):\n\n msg = \"\\t\\tFound {} string values among the {} records of prop '{}' with value(s): {}. Calculating stats only for the {} numeric values.\".format(\n len(non_numbers),\n len(nn),\n prop,\n list(set(non_numbers)),\n len(d),\n )\n print(\"\\n\\t{}\\n\".format(msg))\n\n prop_stats[\"type\"] = \"mixed {},string\".format(ptype)\n\n counts = Counter(nn_string[prop])\n df1 = pd.DataFrame.from_dict(\n counts, orient=\"index\"\n ).reset_index()\n bins = [tuple(x) for x in df1.values]\n bins = sorted(\n sorted(bins, key=lambda x: (x[0])),\n key=lambda x: (x[1]),\n reverse=True,\n )\n prop_stats[\"bins\"] = bins\n prop_stats[\"bin_number\"] = len(bins)\n\n else: # If its not in the list of ptypes, exit. Need to add array handling.\n print(\n \"\\t\\t\\n\\n\\n\\nUnhandled property type!\\n\\n '{}': {}\\n\\n\\n\\n\".format(\n prop_id, ptype\n )\n )\n exit()\n\n if bin_limit and isinstance(prop_stats[\"bins\"], list): # if bin_limit != False\n prop_stats[\"bins\"] = prop_stats[\"bins\"][: int(bin_limit)]\n\n #report = report.append(prop_stats, ignore_index=True)\n # print(\"\\n{}\\n\".format(report))\n # print(\"\\n{}\\n\".format(prop_stats))\n pdf = pd.DataFrame.from_records([prop_stats])\n pdf['all_null'] = pdf['all_null'].astype(bool)\n report = pd.concat([report,pdf])\n\n\n if not report_null: # if report_null == False\n report = report.loc[report[\"all_null\"] != True]\n\n # strip the col names so we can sort the report\n report.columns = report.columns.str.strip()\n report.sort_values(by=[\"all_null\", \"node\", \"property\"], inplace=True)\n\n summary[\"report\"] = report\n summary[\"all_prop_ids\"] = all_prop_ids\n\n # summarize all properties\n nn_props = sorted(list(set(nn_props)))\n summary[\"nn_props\"] = nn_props\n\n null_props = [prop for prop in null_props if prop not in nn_props]\n summary[\"null_props\"] = sorted(list(set(null_props)))\n\n # summarize all nodes\n nn_nodes = sorted(list(set(nn_nodes)))\n summary[\"nn_nodes\"] = nn_nodes\n\n dd_regex = re.compile(r\"[^_][A-Za-z0-9_]+\")\n dd_nodes = list(filter(dd_regex.match, list(dd)))\n dd_nodes = [node for node in dd_nodes if node not in omit_nodes]\n null_nodes = [node for node in dd_nodes if node not in nn_nodes]\n\n summary[\"null_nodes\"] = null_nodes\n\n if write_report: # write_report == True\n\n self.create_output_dir(outdir=outdir)\n\n if \"/\" in tsv_dir:\n names = tsv_dir.split(\"/\")\n names = [name for name in names if name != \"\"]\n name = names[-1]\n else:\n name = tsv_dir\n\n outname = \"data_summary_{}.tsv\".format(name)\n outname = \"{}/{}\".format(\n outdir, outname\n ) # ./data_summary_prod_tsvs_04272020.tsv\n\n report.to_csv(outname, sep=\"\\t\", index=False, encoding=\"utf-8\")\n sys.stdout.write(\"\\rReport written to file:\".ljust(200, \" \"))\n print(\"\\n\\t{}\".format(outname))\n\n return summary", "def load_vcf(vcf_fn, asm_dat, state=\"UNK\", sample='unknown'):\n logging.info(\"Loading Variants from %s\", vcf_fn)\n asm_header, asms = asm_dat\n ret_header = build_var_header()\n ret_header.update(asm_header)\n asmheadidx = list(asm_header.keys())\n ret = []\n with pysam.VariantFile(vcf_fn) as fh:\n for var in fh:\n cur_data = [sample]\n cur_data.append(var.chrom)\n cur_data.append(var.start)\n cur_data.append(var.stop)\n\n var_type, var_len = get_type_lens(var)\n \n cur_data.append(var_type)\n cur_data.append(state)\n \n baid = None # best aid\n blen = 0 # best aid length\n best = None # pulled data\n num_asms = 0\n for aid in var.info[\"AID\"]:\n if aid not in asms:\n continue\n num_asms += 1\n dat = asms[aid]\n alen = dat[asmheadidx.index(\"ASMLEN\")]\n if alen > blen:\n baid = aid\n blen = alen\n best = dat\n \n cur_data.append(var.info[\"POP\"])\n cur_data.append(var_len)\n cur_data.append(num_asms)\n\n cur_data.extend(parse_format(var.samples[0]))\n \n if baid is not None:\n cur_data.extend(best)\n else:\n cur_data.extend(([0] * (len(asmheadidx) - 1)) + [\"\"])\n\n\n ret.append(cur_data)\n logging.info(\"Loaded %d variants\", len(ret))\n return pd.DataFrame(ret, columns = ret_header.keys())", "def strain_variant_stats(strains=None, verbose=True):\n verbose = ast.literal_eval(str(verbose))\n ROW = 'StrainID'\n strains = get_required_strains(strains)\n header = \"StrainID,Total Variants,Substitution,Insertion,Deletion\"\n results = []\n results.append(header)\n if verbose:\n print header\n with database.make_connection() as connection:\n for strain in strains:\n tmp = []\n tmp.append(r.table(TABLE).filter({'StrainID': strain}).count().run(connection))\n classes = ['substitution', 'insertion', 'deletion']\n for c in classes:\n tmp.append(r.table(TABLE).filter({'StrainID': strain,\n 'Class': c}).count().run(connection))\n cur = \"%s,%i,%i,%i,%i\" % (strain, tmp[0], tmp[1], tmp[2], tmp[3])\n if verbose:\n print cur\n results.append(cur)\n return results", "def read_varscan_vcf(vcf_file, min_depth):\n vcf = {}\n with open(vcf_file) as fh:\n for line in fh:\n row = line.split('\\t')\n chrom = row[0]\n bp = row[1]\n ref = row[3]\n alt = row[4]\n info = row[7]\n\n read_depth = int(info.partition('DP=')[-1].partition(';')[0])\n allele_freq = float(info.partition('AF1=')[-1].partition(';')[0])\n\n if read_depth >= min_depth:\n vcf['{0} {1}'.format(chrom, bp)] = '{0}:{1}:{2}'.format(allele_freq, ref, alt)\n\n else:\n vcf['{0} {1}'.format(chrom, bp)] = 'NA'\n\n return vcf", "def add_gtcnt(vcf, out, n_header=None):\n if n_header is None:\n n_header = edit_header(vcf)\n for entry in vcf:\n cnt = [0, 0, 0, 0]\n #cnt = {\"UNK\": 0, \"REF\": 0, \"HET\": 0, \"HOM\": 0}\n for sample in entry.samples:\n gt = entry.samples[sample][\"GT\"]\n if None in gt or len(gt) != 2:\n cnt[0] += 1\n elif gt[0] == gt[1] and gt[0] == 0:\n cnt[1] += 1\n elif gt[0] == gt[1]:\n cnt[3] += 1\n elif gt[0] != gt[1]:\n cnt[2] += 1\n else:\n cnt[0] += 1\n try:\n nentry = truvari.copy_entry(entry, n_header)\n except TypeError:\n yield entry\n continue\n nentry.info[\"GTCNT\"] = cnt \n yield nentry", "def GatherVcfs(\n b: hb.Batch,\n input_vcfs: List,\n disk_size: int,\n output_vcf_path: str = None,\n) -> Job:\n j = b.new_job('VQSR: FinalGatherVcf')\n j.image(utils.GATK_IMAGE)\n j.memory(f'16G')\n j.storage(f'{disk_size}G')\n j.declare_resource_group(\n output_vcf={'vcf.gz': f'{NAME}_gathered.vcf.gz', 'vcf.gz.tbi': f'{NAME}_gathered.vcf.gz.tbi'}\n )\n\n input_cmdl = ' '.join([f'--input {v}' for v in input_vcfs])\n j.command(\n f\"\"\"set -euo pipefail\n # --ignore-safety-checks makes a big performance difference so we include it in \n # our invocation. This argument disables expensive checks that the file headers \n # contain the same set of genotyped samples and that files are in order \n # by position of first record.\n gatk --java-options -Xms6g \\\\\n GatherVcfsCloud \\\\\n --gather-type BLOCK \\\\\n {input_cmdl} \\\\\n --output {j.output_vcf['vcf.gz']}\n tabix {j.output_vcf['vcf.gz']}\"\"\"\n )\n if output_vcf_path:\n b.write_output(j.output_vcf, f'{output_vcf_path}{NAME}_gathered{LABEL}')\n return j" ]
[ "0.58822435", "0.57890356", "0.5787876", "0.5740579", "0.5731693", "0.5716607", "0.5684999", "0.5680566", "0.56145954", "0.55977553", "0.5555825", "0.554939", "0.5524379", "0.5494294", "0.5492469", "0.5470732", "0.5433212", "0.54157346", "0.54056317", "0.5402928", "0.5400513", "0.53945225", "0.5384676", "0.5377839", "0.536223", "0.5353831", "0.53330815", "0.53322357", "0.53321606", "0.53046674" ]
0.7263814
0
Sends email according to the provided form data. Returns HTTP 200 if the mail is sent regardless of the provider it used. Returns HTTP 500 if an error occured and logs the error.
def send(): try: message = get_message_from_request(request) logging.info('Sending message {}'.format(message)) except KeyError: logging.error('Missing mandatory fields in form request.') return 'sender, recipients, subject and body fields are mandatory.', 500 try: client.send(message) logging.info('Sent message {}'.format(message)) return 'Message was sent successfully', 200 except SendError as e: logging.error('Message could not be sent: {}'.format(e)) return 'Something wrong happened, email could not be sent.', 500
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_email(request):\n # send emails and return some manner of success response\n send(**request.params)\n return {'success': 'mail sent!'}", "def send():\n try:\n data = request.get_json()\n if data['authkey'] != os.environ.get('MAIL_AUTHKEY'): \n return \"Ooops. Wrong `authkey`.\"\n msg = Message(data['subject'],\n sender=os.environ.get('MAIL_USERNAME'),\n recipients=[data['recipient']])\n msg.body = data['body'] \n mail.send(msg)\n return 'Mail sent!'\n except Exception as e:\n print('We got an error at ' + httpdate(datetime.datetime.now()))\n print(str(e)) \n return 'There was an error with that request.'", "def contact():\n if request.method == 'POST':\n send_email()\n return \"\"", "def send_email(formData):\r\n # The subject line for the email.\r\n emailSubject = \"Form Submitted\"\r\n formDataAsHTML = \"\"\r\n formDataAsText = \"\"\r\n for item in formData:\r\n # Exclude the Google form field\r\n if item == \"g-recaptcha-response\":\r\n continue\r\n formDataAsHTML += \"<p>The form field with the name \" + item + \" had the value \" + formData[item] + \"</p>\"\r\n formDataAsText += \"The form field with the name \" + item + \" had the value \" + formData[item] + \"\\r\\n\"\r\n\r\n # The email body for recipients with non-HTML email clients.\r\n emailTextBody = (\"Form Submission Data\\r\\n\" + formDataAsText)\r\n # The HTML body of the email.\r\n emailHTMLBody = \"\"\"<html>\r\n <head></head>\r\n <body>\r\n <h1>Form Submission Data</h1>\"\"\" + formDataAsHTML + \"\"\"\r\n </body>\r\n </html>\r\n \"\"\" \r\n # The character encoding for the email.\r\n charset = \"UTF-8\"\r\n \r\n # Create a new SES resource and specify a region.\r\n client = boto3.client('ses',region_name=\"eu-west-2\")\r\n\r\n #Provide the contents of the email.\r\n response = client.send_email(\r\n Destination={\r\n 'ToAddresses': environ[\"emailTo\"].split(\",\"),\r\n },\r\n Message={\r\n 'Body': {\r\n 'Html': {\r\n 'Charset': charset,\r\n 'Data': emailHTMLBody,\r\n },\r\n 'Text': {\r\n 'Charset': charset,\r\n 'Data': emailTextBody,\r\n },\r\n },\r\n 'Subject': {\r\n 'Charset': charset,\r\n 'Data': emailSubject,\r\n },\r\n },\r\n Source=environ[\"emailFrom\"]\r\n )", "def send_mail():\n email_address = request.args.get('emailAddress') # get email address from the form\n response = call_sendmail_endpoint(session['access_token'], session['alias'], email_address)\n print(session)\n if response == 'SUCCESS':\n show_success = 'true'\n show_error = 'false'\n else:\n print(response)\n show_success = 'false'\n show_error = 'true'\n\n session['pageRefresh'] = 'false'\n return render_template('main.html', name=session['alias'],\n emailAddress=email_address, showSuccess=show_success,\n showError=show_error)", "def send_email(form_instance, **kwargs):\n cleaned_data = form_instance.cleaned_data\n\n try:\n from_email = cleaned_data.pop(kwargs[\"from_email_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"send_email\", \"from_email_field\")\n try:\n to_email = cleaned_data.pop(kwargs[\"to_email_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"send_email\", \"to_email_field\")\n try:\n subject = cleaned_data.pop(kwargs[\"subject_field\"])\n except KeyError:\n raise exceptions.MissingActionParam(\"send_email\", \"subject_field\")\n\n if \"uuid\" in cleaned_data:\n del cleaned_data[\"uuid\"]\n\n if \"form_id\" in cleaned_data:\n del cleaned_data[\"form_id\"]\n\n email_body = \"\".join([\n \"%s: %s\\n\\r\" % (get_label(form_instance, label), value)\n for label, value in cleaned_data.items()\n ])\n send_mail(subject, email_body, from_email, [to_email])", "def post(self):\n return send_email(request.args)", "def sendTheDamnEmail(f):\n \n subject = f[\"subject\"].value\n toEmails = f[\"toEmail\"].value\n msg = f[\"msg\"].value\n \n #try:\n #mimeMsg = MIMEText(msg, \"plain\", \"utf-8\")\n #mimeMsg['Subject'] = subject\n #mimeMsg['From'] = fromEmail\n #mimeMsg['To'] = toEmails\n \n mimeMsg = MIMEMultipart('alternative')\n mimeMsg['Subject'] = Header(subject, 'UTF-8').encode()\n mimeMsg['To'] = Header(toEmails, 'UTF-8').encode()\n mimeMsg['From'] = Header(fromEmail, 'UTF-8').encode()\n\t\n part1 = MIMEText(msg, 'plain', \"utf-8\")\n #part2 = MIMEText(msg, 'html') # If you want to send a fancy HTML email, use this one also\n\t\n mimeMsg.attach(part1)\n\n sendEmail.sendEmail(fromEmail, password, toEmails,\\\n smtp, port=port, msg=mimeMsg)\n\n if logPath!=\"null\":\n logger = logEmail.EmailLogger(logPath)\n stored = logger.storePost(ip, msg, toEmails)\n\tprint \"stored\"\n print \"success\"", "def email_page(data):\n subject = f\"Inkbusters form contact: {data['title']}\"\n sender = current_app.config[\"MAIL_USERNAME\"]\n recipients= ['[email protected]']\n text_body=render_template('email/email_contact.txt', data=data)\n html_body=render_template('email/email_contact.html', data=data)\n\n send_email(\n subject=subject,\n sender=sender,\n recipients=recipients,\n text_body=text_body,\n html_body=html_body\n )", "def sendEmail(_name, _email, _body):\n\n _mailer = app.config['MAIL_USERNAME']\n msg = Message(\"Contact Form\", sender=('iSOLveIT Contact', f'{_mailer}'), recipients=[f'{_mailer}'])\n msg.body = f'''{_body}\n\n\nSender's Name: {_name}\nSender's Email: {_email}\nDate Sent: {dt.now(tz=GMT_tz).strftime('%B %d, %Y, %H:%M ') + 'GMT'}\n'''\n mail.send(msg)\n return 'OK'", "def send_mail(email):\n return email.send()", "def send(cls, data):\n if settings.SENDINBLUE[\"API_KEY\"]:\n requests.request(\n \"POST\",\n cls.send_email_url,\n data=json.dumps(data),\n headers=cls.default_headers,\n )", "def send_form(self, subject='no-subject'):\n\t\tname = self.cleaned_data.get('name', 'no-name')\n\t\tmessage = self.cleaned_data.get('message', 'no-message')\n\t\tsender = self.cleaned_data.get('sender', 'no-email')\n\t\ttry:\n\t\t\tmail_managers(subject, message)\n\t\texcept:\n\t\t\tpass", "def send_application_email():\n # pprint(request.json)\n \n TEXT = ('Dear ' + str(request.json['email']) + ',\\n\\nThank you for expressing your interest in' +\n ' becoming a mentor for theRightFit.\\n\\nAs a next step, we would like' +\n ' to invite you to fill in our mentor application form at https://goo.gl/forms/ft1mzyiQbJkazhkQ2' +\n '\\n\\nIf you have any questions, feel free to reach out to us directly ' +\n 'at [email protected].\\n\\nThanks,\\n\\ntheRightFit Team')\n \n sg = sendgrid.SendGridAPIClient(apikey='')\n from_email = Email(\"[email protected]\")\n to_email = Email(str(request.json['email']))\n subject = \"theRightFit Mentor Application\"\n content = Content(\"text/plain\", TEXT)\n mail = Mail(from_email, subject, to_email, content)\n response = sg.client.mail.send.post(request_body=mail.get())\n print(response.status_code)\n print(response.body)\n print(response.headers)\n return 'Application email sent!', 200", "def form_valid(self, form):\n\n try:\n # send_mail(subject, message, from_email, [ADMIN_EMAIL])\n form.send_email()\n except Exception:\n message = _(u'Під час відправки листа виникла непередбачувана '\n u'помилка. Спробуйте скористатись даною формою пізніше.')\n messages.info(self.request, message)\n else:\n message = _(u'Повідомлення успішно надіслане!')\n messages.success(self.request, message)\n\n # redirect to same contact page with success message\n return HttpResponseRedirect(reverse('contact_admin'))", "def contact(request, contact_form=ContactForm, template_name='contact/form.html',\n success_url=None, failure_url=None):\n if request.method == 'POST':\n form = contact_form(request.POST)\n if form.is_valid():\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['message']\n sender_name = form.cleaned_data['sender_name']\n sender_email = form.cleaned_data['sender_email']\n cc_myself = form.cleaned_data['cc_myself']\n\n admin_email = settings.ADMINS[0][1]\n recipients = [admin_email,]\n if cc_myself:\n recipients.append(sender_email)\n\n site_name = Site.objects.get(pk=settings.SITE_ID).name\n\n try:\n send_mail(subject, _(\"Mail from %s %s\\n\\n %s\" % (site_name, message, sender_name)), sender_email, recipients)\n except BadHeaderError:\n return HttpResponseRedirect(reverse('contact-error') if failure_url is None else failure_url)\n return HttpResponseRedirect(reverse('contact-confirmation') if success_url is None else success_url)\n else:\n form = contact_form()\n\n return render_to_response(template_name,\n {'form': form},\n context_instance=RequestContext(request))", "def send(\r\n self,\r\n to = '', #list of email addresses - Required\r\n subject='None', #message's subject - Required\r\n message_text='None', #message body in plain text - Required\r\n message_html=None, #message body in html - Optional\r\n attachments=None, #list of truples [(filename, file_contents)] - Optional\r\n cc = None, #list of email addresses to CC message to\r\n bcc = None, #list of email addresses to BCC message to\r\n reply_to = None, #single email address to have replies send to\r\n ): \r\n if not isinstance(to, list):\r\n to = [to]\r\n\r\n try:\r\n if self.settings.private.email_server == 'gae':\r\n from google.appengine.api import mail\r\n #untested on GAE, but in theory should work\r\n #http://code.google.com/appengine/docs/python/mail/emailmessagefields.html\r\n mail.send_mail(sender=self.settings.private.email_sender, to=to,\r\n subject=subject, body=message_text, html=message_html, attachments=attachments, cc = cc,\r\n bcc = bcc, reply_to = reply_to)\r\n else:\r\n\r\n msg = self.buildMIME(sender = self.settings.private.email_sender,\r\n recipients = to, subject = subject,\r\n message_text = message_text, message_html = message_html,\r\n attachments = attachments,\r\n cc = cc, bcc = bcc, reply_to = reply_to)\r\n #print 'message'+msg.as_string()\r\n #Build MIME body\r\n (host, port) = self.settings.mail.server.split(':')\r\n\r\n if self.settings.mail.ssl: \r\n try:\r\n server = smtplib.SMTP_SSL(host, port)\r\n except:\r\n # ERROR python <= 2.6\r\n pass\r\n else:\r\n server = smtplib.SMTP(host, port)\r\n\r\n if self.settings.mail.login:\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n if self.settings.mail.use_tls:\r\n try:\r\n server.starttls()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in STARTTLS\")\r\n except SMTPException:\r\n logger.info(\"Server does not support TLS\")\r\n\r\n except RuntimeError:\r\n logger.info(\"Python version does not support TLS (<= 2.6?)\")\r\n\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n (username, password) = self.settings.mail.login.split(':')\r\n try:\r\n server.login(username, password)\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in LOGIN\")\r\n\r\n except SMTPAuthenticationError:\r\n logger.info(\"Invalid username/password combination\")\r\n\r\n except SMTPException:\r\n logger.info(\"SMTP error in login\")\r\n\r\n try:\r\n server.sendmail(self.settings.private.email_sender, to, msg.as_string())\r\n server.quit()\r\n\r\n except SMTPRecipientsRefused:\r\n logger.info(\"All recipients were refused. Nobody got the mail.\")\r\n\r\n except SMTPHeloError:\r\n logger.info(\"The server didn't reply properly to the HELO greeting.\")\r\n\r\n except SMTPSenderRefused:\r\n logger.info(\"The server didn't accept the from_addr.\")\r\n\r\n except SMTPDataError:\r\n logger.info(\"The server replied with an unexpected error code (other than a refusal of a recipient).\")\r\n \r\n except Exception, e:\r\n return False\r\n return True", "def contactView(request):\n submitSuccess = False\n if request.method == 'GET':\n form = ContactForm()\n else:\n form = ContactForm(request.POST)\n if form.is_valid():\n fromEmail = form.cleaned_data['fromEmail']\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['message']\n message = \"From user : \" + str(request.user.username) + \"\\n\\n\" + (\"-\" * 40) + \"\\n\\n\" + message\n\n # Send the mail\n try:\n send_mail(subject, message, fromEmail, ['[email protected]'])\n except BadHeaderError:\n return HttpResponse('Invalid e-mail header found.')\n submitSuccess = True\n\n context = {\n 'form' : form,\n 'submitSuccess' : submitSuccess\n }\n return render(request, 'contact.html', context)", "def post():\n contactus_json = request.get_json()\n\n try:\n dict_data = ContactUsSchema().load(contactus_json)\n dict_data['description'] = escape(dict_data['description'])\n EmailService.save_and_send(EmailType.CONTACT_US, dict_data)\n response, status = 'Received successfully', HTTPStatus.OK\n except ValidationError as project_err:\n response, status = {'systemErrors': project_err.messages}, \\\n HTTPStatus.BAD_REQUEST\n return response, status", "def contact_us(request):\n\n if request.method == \"POST\":\n if request.POST.get(\"fname\") and request.POST.get(\"emailadd\"):\n post = Contact()\n post.full_name = request.POST.get(\"fname\")\n post.email = request.POST.get(\"emailadd\")\n post.phone_number = request.POST.get(\"pnumber\")\n post.message = request.POST.get(\"cmessage\")\n post.save()\n\n subject = \"Althea Store Inquiry\"\n message = post.message = request.POST.get(\n \"cmessage\") + \" From: \" + post.full_name + \" Sender's Email Address \" + post.email + post.phone_number\n from_email = \"[email protected]\"\n if subject and message and from_email:\n try:\n send_mail(\n subject, message,\n from_email, ['[email protected]'])\n except BadHeaderError:\n return HttpResponse(\"Invalid Header Found\")\n return render(request, \"contact/contact_success.html\")\n return HttpResponse(\"Make sure all fields are entered and valid.\")\n return render(request, \"contact/contact_success.html\")\n return render(request, \"contact/contact_us.html\")", "def send(self):\n if self._valid:\n try:\n self.connection.sendmail(self.sender, self.receiver, self.formatted_message)\n return 0, f'Successfully sent email {self.sender} -> {self.receiver}'\n except Exception as e:\n return 2, str(e)\n else:\n return 1, 'Invalid email formatting, message not sent'", "def feedback(request):\n\tif request.method == 'POST':\n\t\tsubject = request.POST.get(\"subject\", \"\")\n\t\tmessage = request.POST.get(\"message\", \"\")\n\t\t#from_email = request.POST.get(\"mail\", \"\")\n\t\tfrom_email = \"[email protected]\"\n\t\trecipient_list = [\"[email protected]\"]\n\t\tsend_mail(subject, \n\t\t\tmessage, \n\t\t\tfrom_email, \n\t\t\trecipient_list, \n\t\t\tfail_silently=False, \n\t\t\tauth_user=\"[email protected]\", \n\t\t\tauth_password= \"bancas1314\")\n\t\treturn HttpResponse(\"Envio satisfactorio\")\n\treturn HttpResponse(\"Envio erroneo\")", "def SendDynamic(SENDER_EMAIL_ADDRESS, RECIPIENT_EMAIL_ADDRESS, birth_date, chart_type, chart_for_email): #SENDER_EMAIL_ADDRESS, RECIPIENT_EMAIL_ADDRESS, chart_type, birth_date, chart_for_email\n # create Mail object and populate\n message = Mail(\n from_email=SENDER_EMAIL_ADDRESS,\n to_emails=RECIPIENT_EMAIL_ADDRESS)\n # pass custom values for our HTML placeholders\n message.dynamic_template_data = {\n 'subject': 'Billboard Chart on Your Birthday!',\n 'birth_date': birth_date,\n 'chart_type': chart_type,\n 'chart_for_email': chart_for_email\n }\n message.template_id = TEMPLATE_ID\n # create our sendgrid client object, pass it our key, then send and return our response objects\n try:\n sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))\n response = sg.send(message)\n code, body, headers = response.status_code, response.body, response.headers\n print(\"Response code:\", code)\n print(\"Response headers:\", headers)\n print(\"Response body:\", body)\n print(\"Dynamic Messages Sent!\")\n except Exception as e:\n print(\"Error: {0}\".format(e))\n #return str(response.status_code) #HERE", "def postprocess():\n if ERRORS:\n address = '[email protected]'\n body = '\\n\\n'.join( ERRORS )\n msg = create_message( body, address )\n send_mail( msg, address )", "def incoming_mail(request, recipients):\n try:\n _process_incoming_mail(request.raw_post_data, recipients)\n except InvalidIncomingEmailError as err:\n logging.debug(str(err))\n return HttpTextResponse('')", "def send_email():\n send_mail(\"You've got some problem.\", 'REPAIR IT', '[email protected]',\n ['[email protected]'], fail_silently=False,)", "def\ttry_send(key, form):\n\tdata2sent = dict(SEND_DATA_FMT)\n\tfor i in SEND_DATA_FMT[key]:\n\t\tif i not in form:\n\t\t\tprint(i)\n\t\t\treturn flask.make_response((\"ERROR 1\", 500))\n\t\tdata2sent[key][i] = form[i]\n\tfor i in data2sent:\n\t\tif i != key:\n\t\t\tdata2sent[i] = None\n\tif not send_info(data2sent):\n\t\t\treturn flask.make_response((\"ERROR 2\", 500))\n\treturn flask.make_response((\"OK\", 200))", "def send_email(self, to, content):\r\n server = smtplib.SMTP(\"smtp.gmail.com\", 587)\r\n server.ehlo()\r\n server.starttls()\r\n server.login(self.from_, self.password)\r\n server.sendmail(self.from_, to, content)\r\n speak(\"Email has been sent Succesfully!\")\r\n return \"None\"", "def send_message_to_email():\n json_values = request.json\n values = {\"email\", \"messageSend\"}\n response = Response(json.dumps(json_error(ResponsesREST.INVALID_INPUT.value)),\n status=ResponsesREST.INVALID_INPUT.value, mimetype=\"application/json\")\n if all(key in json_values for key in values):\n if validator_email_account.is_valid(json_values):\n result = Email.send_message_email(json_values[\"messageSend\"], json_values[\"email\"])\n if result == ResponsesREST.CREATED.value:\n response = Response(json.dumps({\"email\": json_values[\"email\"],\n \"messageSend\": json_values[\"messageSend\"]}),\n status=result, mimetype=\"application/json\")\n else:\n response = Response(json.dumps(json_error(result)), status=result,\n mimetype=\"application/json\")\n return response", "def send_mail_when_failed(self, body):\r\n pass" ]
[ "0.69340616", "0.67932796", "0.67354476", "0.65309924", "0.6504864", "0.65033066", "0.6437234", "0.6408452", "0.63727117", "0.62697357", "0.614304", "0.6036675", "0.59977025", "0.5985223", "0.5973128", "0.59453726", "0.59409326", "0.59167147", "0.59006274", "0.58988357", "0.5875025", "0.58640885", "0.585118", "0.5851054", "0.5831658", "0.5824523", "0.5800354", "0.57929134", "0.57695895", "0.57654357" ]
0.7023996
0
Parses the attached files in the request as Attachment array
def parse_attachments(request): attachments = [] for attachment in request.files.getlist('attachment'): attachments.append(Attachment(attachment.filename, attachment)) return attachments
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_attachbox_attachments(self, post_html):\n if 'attachbox' not in post_html:\n return []\n self.post_html = post_html\n self.p = PyQuery(self.post_html)\n\n attachment_dicts = []\n attachment_dicts += self.parse_s_thumbnails()\n attachment_dicts += self.parse_s_image()\n attachment_dicts += self.parse_s_file()\n attachment_dicts += self.parse_s_wm_file()\n attachment_dicts += self.parse_s_flash_file()\n attachment_dicts += self.parse_s_quicktime_file()\n attachment_dicts += self.parse_s_rm_file()\n\n #print('parse_attachbox_attachments() attachment_dicts: {0!r}'.format(attachment_dicts))\n return attachment_dicts", "def attachments(self):\n for part in self.email.walk():\n filename = part.get_filename()\n if filename:\n yield {\n 'type': part.get_content_type(),\n 'name': filename,\n 'content': part.get_payload()\n }", "def parse_inline_attachments(self, post_html):\n if 'inline-attachment' not in post_html:\n return []\n self.post_html = post_html\n self.p = PyQuery(self.post_html)\n\n attachment_dicts = []\n attachment_dicts += self.parse_s_thumbnails()\n attachment_dicts += self.parse_s_image()\n attachment_dicts += self.parse_s_file()\n attachment_dicts += self.parse_s_wm_file()\n attachment_dicts += self.parse_s_flash_file()\n attachment_dicts += self.parse_s_quicktime_file()\n attachment_dicts += self.parse_s_rm_file()\n\n #print('parse_inline_attachments() attachment_dicts: {0!r}'.format(attachment_dicts))\n return attachment_dicts", "def attachments(self):\n return self._attachments", "def test_attachments(self):\n data = mailgun_payload\n attachment_1 = open(self.test_upload_txt, 'r').read()\n attachment_2 = open(self.test_upload_png, 'rb').read()\n data['attachment-1'] = open(self.test_upload_txt, 'r')\n data['attachment-2'] = open(self.test_upload_png, 'rb')\n request = self.factory.post(self.url, data=data)\n email = self.parser.parse(request)\n\n self._assertEmailParsedCorrectly(email, data)\n\n # for each attachmen, check the contents match the input\n self.assertEqual(len(email.attachments), 2)\n\n # convert list of 3-tuples into dict so we can lookup by filename\n attachments = {k[0]: (k[1], k[2]) for k in email.attachments}\n self.assertEqual(smart_bytes(attachments['attachment-1'][0]), smart_bytes(attachment_1))\n self.assertEqual(attachments['attachment-1'][1], 'text/plain')\n self.assertEqual(attachments['attachment-2'][0], attachment_2)\n self.assertEqual(attachments['attachment-2'][1], 'image/jpeg')", "def attachments(self):\n return [Attachment(part) for part in self._parts]", "def attachments(self, val: list):\n self._attachments = []\n if val is not None:\n for item in val:\n if isinstance(item, Attachment):\n self._attachments.append(item)", "def attachments(self):\r\n return Attachments(self)", "def parse_attachment(message_part):\n content_disposition = message_part.get(\"Content-Disposition\", None)\n if content_disposition:\n dispositions = content_disposition.strip().split(\";\")\n if bool(content_disposition and\n dispositions[0].lower() == \"attachment\"):\n\n file_data = message_part.get_payload(decode=True)\n attachment = StringIO(file_data)\n attachment.content_type = message_part.get_content_type()\n attachment.size = len(file_data)\n attachment.name = None\n attachment.create_date = None\n attachment.mod_date = None\n attachment.read_date = None\n\n for param in dispositions[1:]:\n name, value = param.split(\"=\")\n name = name.lower()\n\n if name == \"filename\":\n attachment.name = value\n elif name == \"create-date\":\n attachment.create_date = value # TODO: datetime\n elif name == \"modification-date\":\n attachment.mod_date = value # TODO: datetime\n elif name == \"read-date\":\n attachment.read_date = value # TODO: datetime\n return attachment\n # no attachment\n return None", "def extract(request):\n try:\n files = request.FILES.getlist('myFile')\n msg_data = []\n fs = FileSystemStorage()\n for file in files:\n name = file.name.replace(\" \", \"_\")\n if os.path.exists(settings.MEDIA_ROOT + \"\\\\\" + name):\n os.remove(settings.MEDIA_ROOT + \"\\\\\" + name)\n fs.save(settings.MEDIA_ROOT + \"\\\\\" + name, file)\n msg = extract_msg.Message(settings.MEDIA_ROOT + \"\\\\\" + name)\n msg.save_attachments(customPath=settings.MEDIA_ROOT + \"\\\\\")\n attachments = []\n for i in range(0, len(msg.attachments)):\n attachments.append({\n \"filename\": msg.attachments[i].shortFilename,\n \"filepath\": \"/media/\" + msg.attachments[i].shortFilename\n })\n msg_data.append({\n # \"mainProperties\": msg.mainProperties,\n # \"header\": msg.header,\n \"attachments\": attachments,\n \"filename\": file.name,\n \"filepath\": \"/media/\" + name,\n \"from\": msg.sender,\n \"to\": msg.to,\n \"cc\": msg.cc,\n \"subject\": msg.subject,\n \"date\": msg.date,\n \"body\": msg.body,\n })\n msg.close()\n response = {\n \"response\": \"SUCCESS\",\n \"message\": \"File Uploaded!\",\n \"data\": msg_data\n }\n except:\n response = {\n \"response\": \"FAIL\",\n \"message\": \"Erorr in file uploading!\",\n \"data\": msg_data\n }\n return Response(response)", "def l10n_mx_edi_retrieve_attachments(self):\n self.ensure_one()\n if not self.l10n_mx_edi_cfdi_name:\n return []\n domain = [\n ('res_id', '=', self.id),\n ('res_model', '=', self._name),\n ('name', '=', self.l10n_mx_edi_cfdi_name )]\n return self.env['ir.attachment'].search(domain)", "def attachments(self):\n return self.properties.get('attachments',\n AttachmentCollection(self.context, ResourcePath(\"attachments\", self.resource_path)))", "def get_attachments_for(parser, token):\n def next_bit_for(bits, key, if_none=None):\n try:\n return bits[bits.index(key)+1]\n except ValueError:\n return if_none\n\n bits = token.contents.split()\n args = {\n 'obj': next_bit_for(bits, 'get_attachments_for'),\n 'var_name': next_bit_for(bits, 'as', '\"attachments\"'),\n }\n return AttachmentsForObjectNode(**args)", "def _extract_inline_attachments(doc, files):\n for attr, f in files.items():\n if f.b64:\n data = f.file.replace('\\n', '')\n else:\n data = base64.encodestring(f.file.read()).replace('\\n','')\n f.file.close()\n del f.file\n del f.b64\n del f.inline\n del f.doc_id\n doc.setdefault('_attachments',{})[f.id] = {'content_type': f.mimetype,'data': data}", "def set_attachments(self):\n self.response['attachments'] = []", "def parse_content(content):\n attachments = []\n body = None\n html = None\n\n for part in content.walk():\n if part.get('Content-Disposition') is not None:\n decoded_data = decode_attachment(part)\n\n attachment = parse_attachment(part)\n if attachment:\n attachments.append(attachment)\n elif part.get_content_type() == \"text/plain\":\n if body is None:\n body = \"\"\n body += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n elif part.get_content_type() == \"text/html\":\n if html is None:\n html = \"\"\n html += unicode(\n part.get_payload(decode=True),\n part.get_content_charset(),\n 'replace'\n ).encode('utf8', 'replace')\n # return the parsed data\n return {\n 'body': body,\n 'html': html,\n 'filename': decoded_data['filename']\n # 'attachments': attachments\n }", "def parse_attachments(root, nodes, cfg):\n\n if ignore_attachments(cfg):\n return\n\n attachments = root.find('Attachments').findall('Attachment')\n\n logger.info('Parsing Attachments.')\n for attachment in attachments:\n attachment_type = attachment.find('attachmentType').text\n location = attachment.find('location').text\n object_id = attachment.find('objectID').text\n\n if is_url(attachment_type):\n nodes[object_id]['URL'] = location\n elif is_path(attachment_type):\n nodes[object_id]['path'] = location", "def get_and_send_attachments(self, session, mid, message_payload_parts, context, m_chat_id):\r\n\r\n store_dir_1 = os.getcwd()\r\n\r\n for part in message_payload_parts:\r\n if part['filename']:\r\n attachment_id = part['body']['attachmentId']\r\n\r\n response = session.get(f'https://www.googleapis.com/gmail/v1/users/me/'\r\n f'messages/{mid}/attachments/{attachment_id}')\r\n\r\n data = response.content\r\n encoded_data_dict = ast.literal_eval(data.decode('utf-8'))\r\n file_data = base64.urlsafe_b64decode(encoded_data_dict['data'].encode('UTF-8'))\r\n\r\n path = os.path.join(store_dir_1, part['filename'])\r\n\r\n # запись данных в файловую систему, чтение, отправка и удаление\r\n with open(path, 'wb') as file_object:\r\n file_object.write(file_data)\r\n with open(path, 'rb') as f:\r\n context.bot.send_document(m_chat_id, f)\r\n os.remove(path)", "def getAttachment(mail, directory=detach_dir):#Download attachment to directory & return filename\n filename = []\n for part in mail.walk():\n if part.get_content_maintype() == 'multipart':\n continue\n if part.get('Content-Disposition') is None:\n continue\n\n filename = part.get_filename()\n att_path = os.path.join(directory, filename)\n\n if not os.path.isfile(att_path) :\n fp = open(att_path, 'wb')\n fp.write(part.get_payload(decode=True))\n fp.close()\n\n return filename", "def get_attachments(self, expense_id):\n return self._get_request({}, Expenses.GET_EXPENSE_ATTACHMENTS.format(expense_id))", "def volume_attachments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"volume_attachments\")", "def Get_Attachments(service, userId, msg_id, store_dir):\n try:\n message = service.users().messages().get(userId=userId, id=msg_id).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part['body']['attachmentId']\n ).execute()\n file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))\n else:\n file_data = None\n if file_data:\n #do some staff, e.g.\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)", "def attachments(self):\n return attachment_collection.AttachmentCollectionRequestBuilder(self.append_to_request_url(\"attachments\"), self._client)", "def attachments(self):\n if \"attachments\" in self._prop_dict:\n return AttachmentsCollectionPage(self._prop_dict[\"attachments\"])\n else:\n return None", "def check_attachment_fields(self):\n for field_name, field in self.fields.items():\n if isinstance(field, serializers.ListSerializer):\n if hasattr(field.child, \"field\"):\n for child_name, child in field.child.field.items():\n self.handle_attachment_field(child, child_name)\n else:\n self.handle_attachment_field(field, field_name)", "def find_video_attachments(document_attachments):\n if isinstance(document_attachments, dict):\n document_attachments = [document_attachments]\n video_info_list = []\n for collection in document_attachments:\n if \"video\" in collection['contentType']:\n size = round(collection['size']/1048576, 2)\n video_info_list.append({\"download_url\": collection['url'], \"size\": size})\n return video_info_list", "def collect_attachments(self, paths_or_urls: Iterable[str]) -> List[Tuple[str, str, str, bytes]]:\n attachments = []\n same_content = [] # type: List[bytes]\n for src in paths_or_urls:\n try:\n content = self.load_file(src)\n except ImageNotFound as err:\n self.log_error(err)\n self.conditionally_raise(err)\n continue\n content_hash = hashlib.md5(content).digest()\n if content_hash in same_content:\n continue\n same_content.append(content_hash)\n maintype, subtype = self._get_mime_type(src)\n filename = os.path.basename(src)\n attachments.append((maintype, subtype, filename, content))\n return attachments", "def payload_parse(self, mail):\n\t\tif mail.is_multipart():\n\t\t\tfor payload in mail.get_payload():\n\t\t\t\tif payload.get_content_maintype() == \"multipart\":\n\t\t\t\t\tself.payload_parse(payload)\n\t\t\t\telse:\n\t\t\t\t\tself.payload_handle(payload, mail)\n\t\t\t# Post deletion of payloads:\n\t\t\tself.payload_delete(mail)", "def save_attachments_in_doc(self, doc):\n\t\tsaved_attachments = []\n\n\t\tfor attachment in self.attachments:\n\t\t\ttry:\n\t\t\t\tfile_data = save_file(attachment['fname'], attachment['fcontent'],\n\t\t\t\t\tdoc.doctype, doc.name, is_private=1)\n\t\t\t\tsaved_attachments.append(file_data)\n\n\t\t\t\tif attachment['fname'] in self.cid_map:\n\t\t\t\t\tself.cid_map[file_data.name] = self.cid_map[attachment['fname']]\n\n\t\t\texcept MaxFileSizeReachedError:\n\t\t\t\t# WARNING: bypass max file size exception\n\t\t\t\tpass\n\t\t\texcept frappe.DuplicateEntryError:\n\t\t\t\t# same file attached twice??\n\t\t\t\tpass\n\n\t\treturn saved_attachments", "def body_parts(self):\n return_vals = {'files': []}\n\n for part in self.email.walk():\n maintype, subtype = part.get_content_type().split('/')\n # Multipart/* are containers, so we skip it\n if maintype == 'multipart':\n continue\n # Get Text and HTML\n filename = part.get_filename()\n if filename:\n return_vals['files'].append(filename)\n elif maintype == 'text':\n if subtype in ['plain', 'html']:\n encoder = part.get_content_charset() or 'utf-8'\n return_vals.update(\n {subtype:part.get_payload(decode=True).decode(encoder)})\n return return_vals" ]
[ "0.6827068", "0.6735421", "0.661379", "0.646597", "0.64154345", "0.63524157", "0.631864", "0.6259333", "0.62079674", "0.61960596", "0.6189419", "0.6186647", "0.6141066", "0.6133313", "0.61162454", "0.6106934", "0.6051867", "0.6045327", "0.6036548", "0.60318506", "0.5932437", "0.5915495", "0.585645", "0.58492357", "0.5823654", "0.58043885", "0.5789521", "0.57515365", "0.57449484", "0.57085764" ]
0.83019084
0
Initializes the main client on flask.g and registers providers to it.
def initialize_client(): logging.info('Initializing Sendgrid provider') sendgrid_authentication, sendgrid_username = get_provider_credentials('sendgrid') sendgrid_provider = SendGridProvider(sendgrid_authentication, sendgrid_username) logging.info('Initializing Mailgun provider') mailgun_authentication, mailgun_domain = get_provider_credentials('mailgun') mailgun_provider = MailGunProvider(mailgun_authentication, mailgun_domain) logging.info('Registering providers') client.register_provider(sendgrid_provider, 10) client.register_provider(mailgun_provider, 20)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_client():\n #creating new flask app and a test client\n app = create_app('test')\n client = app.test_client()\n\n #creating the application context and\n #allowing test functions to run by calling test client\n #and finally cleaning house\n ctx = app.app_context()\n ctx.push()\n yield client\n ctx.pop()", "def init():\n server = Flask(__name__)\n \n return server", "def init_app(app):\n\n # pylint: disable=import-outside-toplevel\n\n from flask_cors import CORS\n from flask_jwt_extended import JWTManager\n\n allowed_origins = app.config['ALLOWED_ORIGINS']\n CORS(app, supports_credentials=True, origins=allowed_origins)\n\n jwt = JWTManager()\n jwt.init_app(app)\n\n import json\n import firebase_admin\n\n creds = app.config['FIREBASE_CREDENTIAL']\n\n try:\n credential = firebase_admin.credentials.Certificate(cert=creds)\n except FileNotFoundError:\n creds_escaped = creds.encode().decode('unicode_escape')\n creds_dict = json.loads(creds_escaped, strict=False)\n credential = firebase_admin.credentials.Certificate(cert=creds_dict)\n\n firebase_admin.initialize_app(credential=credential)\n\n from .auth import Token, TokenRevoke, TokenRefresh\n from .quote import Quotes, Quote, Random as RandomQuote, Contributor\n from .like import Likes, Like\n from .user import User, CurrentUser\n from .quote_status import QuoteStatus\n\n api.init_app(bp)\n api.add_resource(Token, '/auth/token')\n api.add_resource(TokenRefresh, '/auth/refresh')\n api.add_resource(TokenRevoke, '/auth/revoke')\n api.add_resource(Quotes, '/quotes')\n api.add_resource(Quote, '/quotes/<int:quote_id>')\n api.add_resource(RandomQuote, '/quotes/random')\n api.add_resource(Likes, '/likes')\n api.add_resource(Like, '/likes/<int:quote_id>')\n api.add_resource(User, '/users/<int:user_id>')\n api.add_resource(CurrentUser, '/users/me')\n api.add_resource(Contributor, '/quotes/<int:quote_id>/contributor')\n api.add_resource(QuoteStatus, '/quote-statuses')\n\n app.register_blueprint(bp, url_prefix='/v1')", "def client_setup(self):\n self.client = Client()", "def init(app):\n from sirepo import feature_config\n from sirepo import simulation_db\n\n if _uri_to_route:\n return\n global _app\n _app = app\n for n in _REQUIRED_MODULES + feature_config.cfg.api_modules:\n register_api_module(importlib.import_module('sirepo.' + n))\n _init_uris(app, simulation_db)", "async def startup_handler(app):\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)", "def setUp(self):\n self.app = Flask(__name__)\n self.app_context = self.app.app_context()\n self.app_context.push()\n self.client = self.app.test_client()", "def service_client_initialization(self) -> global___Snippet.ClientInitialization:", "def init_app(self, app):\n\n self.client = _Minio(\n app.config.get('MINIO_URL'),\n access_key=app.config.get('MINIO_ACCESS_KEY'),\n secret_key=app.config.get('MINIO_SECRET_KEY'),\n session_token=app.config.get('MINIO_SESSION_TOKEN'),\n secure=app.config.get('MINIO_SECURE_CONNECTION') or False,\n region=app.config.get('MINIO_REGION'),\n http_client=app.config.get('MINIO_HTTP_CLIENT'),\n credentials=app.config.get('MINIO_CREDENTIALS'),\n )\n\n for bucket in app.config.get('MINIO_BUCKETS', []):\n if not self.client.bucket_exists(bucket):\n self.client.make_bucket(bucket)\n\n app.extensions = getattr(app, \"extensions\", {})\n app.extensions[\"minio\"] = self.client", "def setup(self):\n # Load application default credentials if they're available.\n self.credentials = self._load_application_default_credentials()\n\n # Otherwise, load credentials from the provided client secrets file.\n # Name of a file containing the OAuth 2.0 information for this\n # application, including client_id and client_secret, which are found\n # on the Credentials tab on the Google Developers Console.\n self.client_secrets = os.path.join(os.path.dirname(__file__),\n self.client_secrets)\n\n credential_store_file = os.path.join(os.path.dirname(__file__),\n self.credential_store_file)\n\n storage = oauthFile.Storage(credential_store_file)\n\n if self.credentials is None or self.credentials.invalid:\n self.credentials = self._load_user_credentials(storage)\n\n # Authorize HTTP object with the prepared credentials.\n http = self.credentials.authorize(http=httplib2.Http())\n\n # Construct and return a service object via the discovery service.\n self.service = discovery.build(self.api_name, self.api_version, http=http)\n return self.service", "def on_startup():\n\n async def startup_handler(app):\n \"\"\"Run all initialization tasks.\n These are tasks that should be run after the event loop has been started but before the HTTP\n server has been started.\n \"\"\"\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)\n\n return startup_handler", "def initialize():\n app = Flask(__name__)\n # Load private config at instance/config.py\n config_path = 'instance/config.py'\n if os.path.exists(config_path):\n app.config.from_pyfile(os.path.abspath(config_path))\n\n # Initialize database\n init_db(\n app.config['DB_USERNAME'],\n app.config['DB_PASSWORD'],\n app.config['DB_NAME']\n )\n\n return app", "def init_compute_clients(self):\n\n print \"\\t* instantiating clients\"\n # instantiate nova client\n self.gen_nova_client()\n\n # instantiate neutron client\n self.gen_neutron_client()\n\n # instantiate heat client (used to validate templates)\n self.gen_heat_client()", "def create_app(self):\r\n self.app = Flask(__name__, instance_relative_config=True)\r\n\r\n # Init the secret key of the app -it is a must for flask to run\r\n self.app.config.from_mapping(\r\n SECRET_KEY='!ZNeverSayNever116Z!',\r\n MONGODB_SETTINGS= {'host': 'mongodb://localhost/opc_integrity'}\r\n )\r\n initialize_db(self.app)\r\n\r\n\r\n # Init the app with core routes\r\n routes.init_app(self.app)", "def __init__(self):\n self.config = get_config()\n self.log = get_logger(self)\n\n self.factory = SugarServerFactory(\"wss://*:5505\")\n self.factory.protocol = SugarServerProtocol\n\n self.console_factory = SugarConsoleServerFactory(\"wss://localhost:5507\")\n self.console_factory.protocol = SugarConsoleServerProtocol\n\n self.api = APIService(self.config)", "def __init__(self):\n #self._app = Flask(__name__) # imports the named package, in this case this file\n self.__load_config()\n self._app = Flask(__name__.split(\".\")[-1], template_folder = self.template_folder)\n self._app.mongo = db_sync_manager #PyMongo(self._app)\n self._app.db = \"felix_mro\" if self.mro_enabled else \"felix_ro\"\n # Added in order to be able to execute \"before_request\" method\n app = self._app\n\n # Setup debugging for app\n cDebug = self.general_section.get(\"debug\")\n if cDebug: # log all actions on the XML-RPC interface\n def log_request(sender, **extra):\n logger.info(\">>> REQUEST %s:\\n%s\" % (request.path, request.data))\n request_started.connect(log_request, self._app)\n def log_response(sender, response, **extra):\n logger.info(\">>> RESPONSE %s:\\n%s\" % (response.status, response.data))\n request_finished.connect(log_response, self._app)\n\n @app.before_request\n def before_request():\n # \"Attach\" objects within the \"g\" object. This is passed to each view method\n g.mongo = self._app.mongo", "def initialize_app(flask_app):\n # Create a blueprint to house the API, swagger can be reached from /api\n # and each of the models from /api/[model]\n blueprint = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint)\n\n # Configure namespaces per model on the API.\n api.add_namespace(noms_namespace)\n\n flask_app.register_blueprint(blueprint)\n db.init_app(flask_app)\n\n with flask_app.app_context():\n db.create_all()", "def setup_provider(self):\n pass", "def init_app(self):\n LOGGER.info('Launching the init app for the producer')\n\n # Open channel to set the exchange\n channel_handler = ChannelHandler(self._connection)\n channel_handler.open_channel()\n self._channel = channel_handler.get_channel()\n\n # Set the default exchange to use\n exchange_name = 'SIEF'\n exchange_handler = ExchangeHandler(self._channel, exchange_name)\n exchange_handler.setup_exchange()\n self._exchange_name = exchange_handler.get_exchange_name()\n\n channel_handler.close_channel()", "def init_app(app):\n api.add_namespace(ns)\n app.register_blueprint(bp, url_prefix='/api/v1')", "def create_app(self):\n app = Flask(__name__)\n\n app.config[\"auth_func\"] = self.auth_func\n app.config[\"hydrator_func\"] = self.hydrator_func\n app.config[\"request_hydrator_func\"] = self.request_hydrator_func\n app.config[\"database_uri\"] = self.database_uri\n app.config[\"hmac_secret\"] = self.hmac_secret\n\n cors = CORS()\n cors.init_app(app, resources={r\"/*\": {\"origins\": self.cors_origins, \"supports_credentials\": True}})\n\n app.register_blueprint(api_v0.bp)\n\n @app.route(\"/\")\n def health_check():\n \"\"\"Can be called by e.g. Kubernetes to verify that the API is up\n\n Returns:\n str: the static string \"Comet-API\", could be anything\n \"\"\"\n return \"Comet-API\"\n\n return app", "def initService(self):", "def __init__(self):\n\n self.loop = asyncio.get_event_loop()\n self.aiohttp = web.Application(\n loop=self.loop,\n middlewares=[unhandled_route],\n )\n self.client = ClientSession()\n self.ws = WebSocketHandler(self)\n self.cert = self._load_ssl_certificate()\n\n self.config()", "def init_app():\r\n LOG.info('Initialising web server.')\r\n app = web.Application(middlewares=[api_key()])\r\n app.router.add_routes(routes)\r\n set_cors(app)\r\n app.on_startup.append(init_db)\r\n app.on_cleanup.append(close_db)\r\n return app", "async def init_app():\n app = web.Application()\n\n # And... here our routes\n app.router.add_route(\n \"POST\", f\"/{ASTERISK_CALL_APP_ROUTE_ASTERISK_INIT}\", asterisk_init\n )\n app.router.add_route(\"POST\", f\"/{ASTERISK_CALL_APP_ROUTE_PLAY}\", asterisk_play)\n return app", "def init_app(app):\n app.register_blueprint(index_bl)\n app.register_blueprint(main_bl, url_prefix=\"/main\")\n app.register_blueprint(map_bl, url_prefix=\"/map\")\n app.register_blueprint(login_bl, url_prefix=\"/login\")\n app.register_blueprint(prof_bl, url_prefix=\"/profile\")\n app.register_blueprint(average_bl, url_prefix=\"/average\")", "def create_app():\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_object('config.Config')\n\n # Initialize Plugins\n db.init_app(app)\n login_manager.init_app(app)\n \"\"\" Initialize plugins \"\"\"\n\n login_manager.login_message = 'You must be logged in to access this page'\n login_manager.login_message_category = 'info'\n login_manager.session_protection = 'strong'\n login_manager.login_view = 'auth_bp.login'\n\n # from .modules.user.models import User\n from .modules.user.methods import UserMethod\n @login_manager.user_loader\n def load_user(session_token):\n # def load_user(user_id):\n print('load_user - user_id - session_token: ', session_token)\n print('loading auth...')\n # since the user_id is just the primary key of our auth table, auth it in the query for the auth\n return UserMethod.get_user_session_token(session_token)\n\n with app.app_context():\n \"\"\" Blueprints \"\"\"\n from .modules.auth.views import auth_bp\n \"\"\" Blueprint for Auth routes in App \"\"\"\n from .modules.catalog.views import catalog_bp\n \"\"\" Blueprint for Catalog routes in App \"\"\"\n from .modules.category.views import category_bp\n \"\"\" Blueprint for Category routes in App \"\"\"\n from .modules.item.views import item_bp\n \"\"\" Blueprint for Item routes in App \"\"\"\n from .modules.user.views import user_bp\n \"\"\" Blueprint for User routes in App \"\"\"\n\n \"\"\"\" Register Blueprints \"\"\"\n app.register_blueprint(auth_bp)\n app.register_blueprint(catalog_bp)\n app.register_blueprint(category_bp)\n app.register_blueprint(item_bp)\n app.register_blueprint(user_bp)\n\n from .modules.catalog.models import Catalog\n from .modules.category.models import Category\n from .modules.item.models import Item\n \"\"\"Import the models so that sqlalchemy can detect them and create the DB \"\"\"\n\n db.create_all()\n \"\"\" Create the DB \"\"\"\n return app", "def configure_app(self):\n self.app.route('/', callback=self.get_api)", "def initialize_app(app):\n # configure_app(app)\n # log.info(\"> Starting development server at http://%s/api/ <<<<<\" %\n # app.config[\"SERVER_NAME\"])\n\n blueprint_api = Blueprint('api', __name__, url_prefix='/api')\n api.init_app(blueprint_api)\n app.register_blueprint(blueprint_api)\n\n api.add_namespace(task_namespace)\n api.add_namespace(chain_namespace)\n\n Bootstrap(app)\n nav.init_app(app)\n app.register_blueprint(frontend_blueprint)\n app.register_blueprint(processors_blueprint)\n app.register_blueprint(chains_blueprint)\n app.register_blueprint(tasks_blueprint)\n app.register_blueprint(compare_blueprint)\n\n db.init_app(app)\n db.create_all(app=app)\n\n if not os.path.exists(app.config[\"OCRD_BUTLER_RESULTS\"]):\n os.makedirs(app.config[\"OCRD_BUTLER_RESULTS\"])", "async def init_provider(self):\n self.dsp_name = \"OpenStack\"\n await self._provider.init(image_names=self.config[\"images\"].values())" ]
[ "0.6897568", "0.68206555", "0.6573613", "0.65611285", "0.645763", "0.6242648", "0.62383497", "0.62190175", "0.6214114", "0.61999184", "0.6143029", "0.6102105", "0.60550094", "0.5952441", "0.594646", "0.59292895", "0.5918728", "0.5914333", "0.5908699", "0.5902818", "0.59021896", "0.5883405", "0.5870065", "0.58673644", "0.5850693", "0.5841212", "0.583458", "0.58306897", "0.5830209", "0.5826748" ]
0.7383164
0
Provider credentials should be injected in the deployed instance as enviroment variables. It gets them as PROVIDER_USERNAME and PROVIDER_AUTHENTICATION PROVIDER_USERNAME is optional in case the provider uses API key. For example, for Sendgrid it would be SENDGRID_USERNAME and SENDGRID_AUTHENTICATION.
def get_provider_credentials(provider): logging.info('Getting provider credentials for {}'.format(provider)) uppercase_provider = provider.upper() username_variable = '{}_USERNAME'.format(uppercase_provider) authentication_variable = '{}_AUTHENTICATION'.format(uppercase_provider) username = os.environ.get(username_variable, '') authentication = os.environ[authentication_variable] return authentication, username
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['REGION'] = 'region'", "def get_credentials(env=\"development\") -> dict:\n load_dotenv()\n credentials = {}\n\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"DEV_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"DEV_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"DEV_AWS_REGION\")\n\n if env == \"production\":\n credentials[\"AWS_ACCESS_KEY_ID\"] = os.getenv(\"PROD_AWS_ACCESS_KEY_ID\")\n credentials[\"AWS_SECRET_ACCESS_KEY\"] = os.getenv(\n \"PROD_AWS_SECRET_ACCESS_KEY\")\n credentials[\"AWS_REGION\"] = os.getenv(\"PROD_AWS_REGION\")\n\n return credentials", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'\n os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'\n os.environ['AWS_DEFAULT_REGION'] = 'us-west-2'", "def _make_sure_credentials_are_set(self):\n if self.backend_options:\n if not os.environ.get('APCA_API_KEY_ID') and \\\n self.backend_options['key_id']:\n os.environ['APCA_API_KEY_ID'] = self.backend_options['key_id']\n if not os.environ.get('APCA_API_SECRET_KEY') and \\\n self.backend_options['secret']:\n os.environ['APCA_API_SECRET_KEY'] = self.backend_options[\n 'secret']\n if not os.environ.get('APCA_API_BASE_URL') and \\\n self.backend_options['base_url']:\n os.environ['APCA_API_BASE_URL'] = self.backend_options[\n 'base_url']", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'", "def aws_credentials():\n os.environ['AWS_ACCESS_KEY_ID'] = 'testing'\n os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'\n os.environ['AWS_SECURITY_TOKEN'] = 'testing'\n os.environ['AWS_SESSION_TOKEN'] = 'testing'", "def _get_credentials(self):\n if self.config_file:\n with open(self.config_file) as f:\n config_str = f.read()\n credentials_dict = json.loads(config_str)\n self.credentials = credentials_dict[self.account][self.auth_type]\n else:\n self.credentials = {\n \"account\": os.environ.get('SNOWSQL_ACCOUNT'),\n \"user\": os.environ.get('SNOWSQL_USER'),\n \"password\": os.environ.get('SNOWSQL_PWD')\n }", "def read_environment(self):\n # Setup credentials\n if os.getenv(\"DO_API_TOKEN\"):\n self.api_token = os.getenv(\"DO_API_TOKEN\")\n if os.getenv(\"DO_API_KEY\"):\n self.api_token = os.getenv(\"DO_API_KEY\")", "def aws_credentials():\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"testing\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"", "def aws_credentials():\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"testing\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"", "def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }", "def _set_credentials():\n # Override credentials here if necessary\n if env.user == 'ubuntu':\n env.key_filename = [\n os.path.expanduser('~/.ssh/ubuntu-id_dsa')]\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n env.use_shell = False", "def aws_credentials() -> None:\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n os.environ[\"AWS_SECURITY_TOKEN\"] = \"testing\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"", "def get_zia_partner_creds_from_env(self, debug):\n\t\tif debug:\n\t\t\tself.debug = debug\n\t\telse:\n\t\t\tself.debug = DEBUG_DEFAULT\n\n\t\t# Partner username should be stored as an environmental variable named \"PARTNER_USERNAME\"\n\t\tif os.environ.get('ZIA_PARTNER_USERNAME') is not None:\n\t\t\tself.partner_username = os.environ.get('ZIA_PARTNER_USERNAME')\n\t\telse:\n\t\t\tlogging.debug(\"ENV IMPORT ERROR: {}\".format(\"ZIA_PARTNER_USERNAME not found\"))\n\t\t\texit()\n\n\t\t# Partner password should be stored as an environmental variable named \"PARTNER_PASSWORD\"\n\t\tif os.environ.get('ZIA_PARTNER_PASSWORD') is not None:\n\t\t\tself.partner_password = os.environ.get('ZIA_PARTNER_PASSWORD')\n\t\telse:\n\t\t\tlogging.debug(\"ENV IMPORT ERROR: {}\".format(\"ZIA_PARTNER_PASSWORD not found\"))\n\t\t\texit()\n\n\t\t# Partner API Key should be stored as an environmental variable named \"PARTNER_API\"\n\t\tif os.environ.get('ZIA_PARTNER_API') is not None:\n\t\t\tself.partner_api_key = os.environ.get('ZIA_PARTNER_API')\n\t\t\tif len(self.partner_api_key) < MIN_API_KEY_LENGTH:\n\t\t\t\tlogging.debug(\"Partner API Key must be %{} characters\".format(MIN_API_KEY_LENGTH))\n\t\t\t\texit()\n\t\telse:\n\t\t\tlogging.debug(\"ENV IMPORT ERROR: %{}\".format(\"ZIA_PARTNER_API not found\"))\n\t\t\texit()", "def get_appengine_credentials():\n return get_credentials()", "def aws_credentials():\n os.environ[\"AWS_ACCESS_KEY_ID\"] = \"testing\"\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = \"testing\"\n os.environ[\"AWS_SESSION_TOKEN\"] = \"testing\"", "def _get_consumer_info_for(self, provider):\n return secrets.AUTH_CONFIG[provider]", "def test_environment_credentials(main_container):\n # Check for credential variables.\n # These are not required for pre-built images.\n assert (\n \"FOUNDRY_USERNAME\" in os.environ\n ), \"FOUNDRY_USERNAME was not in the environment\"\n assert (\n \"FOUNDRY_PASSWORD\" in os.environ\n ), \"FOUNDRY_PASSWORD was not in the environment\"", "def _env_vars(self, cred_file=None, section='default'):\n if cred_file:\n parser = ConfigParser.SafeConfigParser()\n parser.optionxform = str\n parser.read(os.path.expanduser(cred_file))\n for name, value in parser.items(section):\n if name == 'OS_AUTH_URL':\n if not self.module.params.get('login_url'):\n self.module.params['login_url'] = value\n if name == 'OS_USERNAME':\n if not self.module.params.get('login_user'):\n self.module.params['login_user'] = value\n if name == 'OS_PASSWORD':\n if not self.module.params.get('login_password'):\n self.module.params['login_password'] = value\n if name == 'OS_TENANT_ID':\n if not self.module.params.get('login_tenant_name'):\n self.module.params['login_tenant_name'] = value\n else:\n if not self.module.params.get('login_url'):\n authurl = os.getenv('OS_AUTH_URL')\n self.module.params['login_url'] = authurl\n\n if not self.module.params.get('login_user'):\n username = os.getenv('OS_USERNAME')\n self.module.params['login_user'] = username\n\n if not self.module.params.get('login_password'):\n password = os.getenv('OS_PASSWORD')\n self.module.params['login_password'] = password\n\n if not self.module.params.get('login_tenant_name'):\n tenant = os.getenv('OS_TENANT_ID')\n self.module.params['login_tenant_name'] = tenant", "def get_saucelabs_username_and_key():\r\n return {\"username\": settings.SAUCE.get('USERNAME'), \"access-key\": settings.SAUCE.get('ACCESS_ID')}", "def setup_provider(self):\n pass", "def set_credentials():", "def loadenv(self):\n logging.debug('Loading OpenStack authentication information from environment')\n # Grab any OS_ found in environment\n for var in os.environ:\n if var[0:3] == 'OS_':\n value = os.environ[var]\n # Don't print out password or token to debug\n if 'PASSWORD' not in var or 'TOKEN' not in var:\n logging.debug('Using %s from environment for %s', value, var)\n self.creds[var[3:].lower()] = value", "def __init__(self, sendgrid_email_env_name: str, sendgrid_api_key_env_name: str):\n try:\n self.sendgrid_email = os.environ[sendgrid_email_env_name]\n self.sendgrid_api_key = os.environ[sendgrid_api_key_env_name]\n except KeyError:\n self.sendgrid_email = None\n self.sendgrid_api_key = None\n self.logger.error(\"Failed to initialize email service\")\n return\n self.logger.info(\"Email service initialized\")", "def environment_vars_set():\n os.environ[\"YESSSSMS_LOGIN\"] = \"03211234567\"\n os.environ[\"YESSSSMS_PASSWD\"] = \"MySecr3t\"\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n os.environ[\"YESSSSMS_RECIPIENT\"] = \"066356789789\"", "def provider(self, provider):\n if self.local_vars_configuration.client_side_validation and provider is None: # noqa: E501\n raise ValueError(\"Invalid value for `provider`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n provider is not None and len(provider) < 1):\n raise ValueError(\"Invalid value for `provider`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._provider = provider", "def init_auth_client(self):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n token = util.prompt_for_user_token(\n cfg['username'],\n scope=cfg['scope'],\n client_id=cfg['spotipy_client_id'],\n client_secret=cfg['spotipy_client_secret'],\n redirect_uri=cfg['spotipy_redirect_uri'])\n sp = spotipy.Spotify(auth=token)\n return sp, cfg['username']", "def account_from_env(cls):\n return os.getenv(\"OIO_ACCOUNT\", \"myaccount\")" ]
[ "0.6156303", "0.61343807", "0.6078328", "0.60325223", "0.60214007", "0.6018666", "0.6018666", "0.6018666", "0.59797263", "0.59468067", "0.5937931", "0.5937931", "0.58606076", "0.58562934", "0.5853367", "0.5835138", "0.5820004", "0.58101356", "0.5797626", "0.57737917", "0.5765363", "0.57540286", "0.5744873", "0.5703612", "0.56870604", "0.5680869", "0.56484467", "0.56163776", "0.55878484", "0.55762404" ]
0.7291802
0
Building event by hand, but with eliminating the meta fields
def build_event( self, type: EventType, fqid: FullQualifiedId, fields: Optional[Dict[str, Any]] = None, list_fields: Optional[ListFields] = None, ) -> Event: if type == EventType.Update and fields: fields = { k: v for k, v in fields.items() if k != "id" and not k.startswith("meta_") } return super().build_event(type, fqid, fields, list_fields)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildEvent(data):", "def __init__(self, event_list):\n event_list = [AttribDict(event) for event in event_list]\n for event in event_list:\n event.datetime = UTC(event.datetime)\n if feregion is not None:\n event.flynn_region = feregion(event.latitude, event.longitude)\n for item in ['datetime_quality', 'depth_quality', 'magnitude_type', 'author', 'quality', 'information', 'origin_id', 'flynn_region']:\n if event.get(item) == None:\n event[item] = ''\n #if not event.get('magnitude_type'):\n # event['magnitude_type'] = 'xx'\n if event.get('id') == None:\n event['id'] = (str(event['datetime']))[:-4].replace('-', '').replace(':', '').replace('.', '')\n super(Events, self).__init__(event_list)", "def _save_extra_fields(self, event):\n\n term = self.cleaned_data[\"term_name\"]\n week = self.cleaned_data[\"term_week\"]\n day = self.cleaned_data[\"day_of_week\"]\n\n year = int(settings.DEFAULT_ACADEMIC_YEAR)\n date = datetimes.termweek_to_date(year, term, week, day)\n \n start_hour = self.cleaned_data[\"start_hour\"]\n start_minute = self.cleaned_data[\"start_minute\"]\n end_hour = self.cleaned_data[\"end_hour\"]\n end_minute = self.cleaned_data[\"end_minute\"]\n\n tz = timezone.get_current_timezone()\n\n start_naive = datetime.datetime(date.year, date.month, date.day,\n start_hour, start_minute)\n event.start = tz.localize(start_naive)\n\n end_naive = datetime.datetime(date.year, date.month, date.day,\n end_hour, end_minute)\n event.end = tz.localize(end_naive)\n\n event.metadata[\"people\"] = self.cleaned_data[\"people\"]\n event.metadata[\"type\"] = self.cleaned_data[\"event_type\"]\n\n if self.cleaned_data[\"cancel\"] is True:\n event.status = models.Event.STATUS_CANCELLED\n else:\n event.status = models.Event.STATUS_LIVE", "def test_make_event_extra_fields(self):\n msg_helper = MessageHelper()\n event = msg_helper.make_event(\n 'ack', 'abc123', sent_message_id='sent', foo='bar', baz='quux')\n self.assert_message_fields(event, {'foo': 'bar', 'baz': 'quux'})", "def _create_event(self, ph, category, name, pid, tid, timestamp):\n event = {}\n event['ph'] = ph\n event['cat'] = category\n event['name'] = name\n event['pid'] = pid\n event['tid'] = tid\n event['ts'] = timestamp\n return event", "def _empty_event(self):\n event = self.event_from_template(self._fields)\n event.protocol = self._protocol\n event.subject = self._subject\n event.montage = self._montage\n event.experiment = self._experiment\n event.session = self._session\n return event", "def create_event(self, **kwargs):\n events = self.variables['events']\n events.append(kwargs)\n self.variables['events'] = events", "def create_new_event(self):\n pass", "def test_make_event_all_fields(self):\n msg_helper = MessageHelper()\n event_fields = {\n 'event_type': 'ack',\n 'user_message_id': 'abc123',\n 'sent_message_id': '123abc',\n 'transport_type': 'irc',\n 'transport_name': 'vuminet',\n 'transport_metadata': {'foo': 'bar'},\n 'helper_metadata': {'foo': {}},\n\n 'timestamp': datetime.utcnow(),\n 'event_id': 'e6b7efecda8e42988b1e6905ad40fae1',\n 'endpoint': 'foo_ep',\n }\n event = msg_helper.make_event(**event_fields)\n expected_fields = event_fields.copy()\n expected_fields.update({\n 'message_type': TransportEvent.MESSAGE_TYPE,\n 'message_version': TransportEvent.MESSAGE_VERSION,\n 'routing_metadata': {\n 'endpoint_name': expected_fields.pop('endpoint'),\n }\n })\n self.assertEqual(expected_fields, event.payload)", "def __init__(self):\n super(EventObject, self).__init__()\n self._event_data_identifier = None\n self._event_data_row_identifier = None\n self.date_time = None\n self.parser = None\n self.timestamp = None\n # TODO: rename timestamp_desc to timestamp_description\n self.timestamp_desc = None", "def __init__(self):\n super(EventTag, self).__init__()\n self._event_identifier = None\n self._event_row_identifier = None\n self.labels = []", "def CreateNewEvent(arguments: List[Tuple[str, type]] = [], event_name: str = '') -> Event:\n pass", "def __init__(self,\n event_id: str,\n event_type: str,\n event_data: dict = None,\n event_origin: str = None,\n event_timestamp: datetime.datetime = None,\n object_type: str = None,\n object_id: str = None,\n object_key: str = None):\n if event_timestamp is None:\n event_timestamp = datetime.datetime.utcnow().isoformat()\n self._event = dict(\n id=event_id,\n type=event_type,\n data=event_data,\n origin=event_origin,\n timestamp=event_timestamp,\n object_type=object_type,\n object_id=object_id,\n object_key=object_key\n )", "def _construct_event_obj(self,entity_id, timestamp, uuid, value, tags, revenue):\n\n event_obj = {\n \"entity_id\": str(entity_id), \n \"timestamp\": int(timestamp), \n \"uuid\": str(uuid), \n }\n if revenue is not False: \n event_obj['revenue'] = int(revenue) \n if value is not False: \n event_obj['value'] = float(value)\n if tags is not False:\n event_obj['tags'] = json.loads(tags)\n \n return event_obj", "def add_event_from_info(db, event_info, event_id, tag):\n\n if 'description' not in event_info.keys():\n return False\n\n if len(event_info['description']) < MIN_CHARS_DESC:\n if VERBOSE:\n print('Failure: event description too short \\\n (>={} chars needed)'.format(MIN_CHARS_DESC))\n return False\n\n if 'name' in event_info.keys():\n ename = event_info['name']\n else:\n ename = None\n\n if 'venue' in event_info.keys():\n if 'name' in event_info['venue'].keys() and event_info['venue']['name']:\n lname = event_info['venue']['name']\n else:\n lname = None\n\n if 'lon' in event_info['venue'].keys() and event_info['venue']['lon']:\n lon = event_info['venue']['lon']\n else:\n lon = None\n\n if 'lat' in event_info['venue'].keys() and event_info['venue']['lat']:\n lat = event_info['venue']['lat']\n else:\n lat = None\n\n if 'address_1' in event_info['venue'].keys() \\\n and event_info['venue']['address_1']:\n address_1 = event_info['venue']['address_1']\n else:\n address_1 = None\n\n if 'zip' in event_info['venue'].keys() and event_info['venue']['zip']:\n zipcode = event_info['venue']['zip']\n else:\n zipcode = None\n\n if 'city' in event_info['venue'].keys() and event_info['venue']['city']:\n city = event_info['venue']['city']\n else:\n city = None\n\n if 'state' in event_info['venue'].keys() \\\n and event_info['venue']['state']:\n state = event_info['venue']['state']\n else:\n state = None\n else:\n lname = lon = lat = address_1 = zipcode = city = state = None\n\n if 'time' in event_info.keys() and event_info['time']:\n start_time = event_info['time']\n else:\n start_time = None\n\n if 'duration' in event_info.keys() and event_info['duration']:\n duration = event_info['duration']\n else:\n duration = None\n\n if 'description' in event_info.keys() and event_info['description']:\n description = event_info['description']\n else:\n description = None\n\n # taglist = []\n # for t in TAGS:\n # if t in description.lower() or t in ename.lower():\n # taglist.append(t)\n #\n # if len(taglist) > 0:\n # print(ename, taglist)\n # else:\n # return\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE mid = %s\n \"\"\",\n (event_id, ))\n\n result = cursor.fetchone()\n\n if result:\n print('Event already in database.')\n return\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE ename = %s\n \"\"\",\n (ename, ))\n if result:\n print('Event already in database.')\n return\n\n loc_query = \\\n \"\"\"\n INSERT\n INTO Locations(lname, lat, lon, address_1, zip, city, state)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n cursor.execute(loc_query, (\n lname,\n lon,\n lat,\n address_1,\n zipcode,\n city,\n state,\n ))\n\n db.commit()\n\n print('Inserted into Locations.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n lid = cursor.fetchone()\n\n start_date = str(datetime.fromtimestamp(start_time / 1000))\n\n if start_date and duration:\n end_date = str(datetime.fromtimestamp((start_time + duration) / 1000))\n else:\n end_date = None\n\n ev_query = \\\n \"\"\"\n INSERT\n INTO Events(ename, start_date, end_date,\n num_attending, lid, description, mid)\n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n\n cursor.execute(ev_query, (\n ename.encode('ascii', 'ignore'),\n start_date,\n end_date,\n 0,\n lid,\n description.encode('ascii', 'ignore'),\n event_id,\n ))\n\n db.commit()\n\n print('Inserted into Events.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n eid = cursor.fetchone()\n\n # for tag in taglist:\n # category = None\n # for c in CATEGORIES:\n # if tag in CATEGORIES[c]:\n # category = c\n\n et_query = \\\n \"\"\"\n INSERT\n INTO EventTags(eid, tag, category)\n VALUES (%s, %s, %s)\n \"\"\"\n\n cursor.execute(et_query, (eid, tag, tag))\n\n db.commit()\n\n print('Inserted into EventTags.')\n\n if VERBOSE:\n print('Finished.')\n return True", "async def createEvent(self, event: Event) -> None:", "def generate(self):\n\n self.event_type = random.choice(self.TYPES)\n self.event_item = EventGenerator.ITEM_GEN.generate_item()\n\n event_details = random.choice(self.DETAIL_DICT[self.event_type])\n\n self.event_message = event_details[0]\n self.event_title = event_details[1]\n self.event_text = event_details[2]\n\n self.event_widgets = event_details[3]\n\n self.accomplished = event_details[4]", "def makeNewEvent( desc, oldEvent, xtra, data=None ):\n newOd = dict()\n oldOd = None\n if oldEvent:\n oldOd = oldEvent.getPayload()\n \n # has incoming stuff got any % symbols in it, attempt substitution\n if oldOd != '':\n if desc.has_key(\"type\"):\n if '%' in desc[\"type\"]:\n desc[\"type\"] = desc[\"type\"] % oldEvent.getPayload()\n if desc.has_key(\"source\"):\n if '%' in desc[\"source\"]:\n desc[\"source\"] = desc[\"source\"] % oldEvent.getPayload() \n \n if desc.has_key(\"other_data\"):\n for v in desc[\"other_data\"]:\n if '%' in desc[\"other_data\"][v]:\n newOd[v] = desc[\"other_data\"][v] % oldEvent.getPayload()\n else:\n newOd[v] = desc[\"other_data\"][v] \n\n # empty.\n #attempt string substitution here too\n if desc.has_key(\"copy_other_data\"):\n cpList = desc[\"copy_other_data\"]\n for key in cpList:\n if '%' in cpList[key]:\n if xtra and xtra.has_key( cpList[key] ):\n newOd[key] = xtra[ cpList[key] % oldEvent.getPayload() ] \n elif oldOd and oldOd.has_key( cpList[key] ):\n newOd[key] = oldOd[ cpList[key] % oldEvent.getPayload() ] \n else:\n if xtra and xtra.has_key( cpList[key] ):\n newOd[key] = xtra[ cpList[key] ]\n elif oldOd and oldOd.has_key( cpList[key] ):\n newOd[key] = oldOd[ cpList[key] ]\n\n # append/update payload \n if data:\n newOd.update(data)\n \n # may be empty.\n if newOd and len(newOd) == 0:\n newOd = None\n\n return Event( desc[\"type\"], desc[\"source\"], newOd )", "def _create_event_data(index):\n time_stamp = str(datetime.utcnow() + timedelta(seconds=index))\n name = str(index)\n metric = random.randint(0, 1000)\n\n return {\"timeStamp\": time_stamp, \"name\": name, \"metric\": metric, \"source\": \"pyeventhub\"}", "def __init__(self): # , eid='', desc='', handle=''):\n Event.__init__(self)\n # TODO Remove: Event_combo.clearnames is not set or in use\n self.clearnames = [] # filled by models.gen.place.Place.show_names_list\n # to show names list\n self.role = \"\" # role of event from EVENT relation, if available\n self.note_ref = [] # Note uniq_ids (previous noteref_hlink had\n # only the first one)\n self.citation_ref = [] # uniq_ids (previous citationref_hlink = '')\n self.place_ref = [] # uniq_ids (previous placeref_hlink = '')\n self.place = None # Place node, if included\n self.media_ref = [] # uniq_ids (preovious self.objref_hlink had gramps handles)\n self.note_ref = [] # uniq_ids (previously note[])\n\n self.citations = [] # For creating display sets\n self.person = None # Persons names connected; for creating display\n # self.notes = [] # For creating display sets\n # self.places = [] # Places of the event (for person list)", "def create_and_add_event(self, event_data):\n event = event_from_dict(event_data)\n self.add_event(event)", "def buildEventFromDict(evdict):\n evclass = evdict.get(\"eventClass\", Unknown)\n if evclass == Heartbeat:\n for field in (\"device\", \"component\", \"timeout\"):\n if field not in evdict:\n raise ZenEventError(\"Required event field %s not found: %s\" % (field, evdict))\n evt = EventHeartbeat(evdict['device'], evdict['component'], \n evdict['timeout'])\n else:\n evt = Event(**evdict)\n return evt", "def prune_event(event):\n\n # Remove all extraneous fields.\n event.unrecognized_keys = {}\n\n new_content = {}\n\n def add_fields(*fields):\n for field in fields:\n if field in event.content:\n new_content[field] = event.content[field]\n\n if event.type == RoomMemberEvent.TYPE:\n add_fields(\"membership\")\n elif event.type == RoomCreateEvent.TYPE:\n add_fields(\"creator\")\n elif event.type == RoomJoinRulesEvent.TYPE:\n add_fields(\"join_rule\")\n elif event.type == RoomPowerLevelsEvent.TYPE:\n # TODO: Actually check these are valid user_ids etc.\n add_fields(\"default\")\n for k, v in event.content.items():\n if k.startswith(\"@\") and isinstance(v, (int, long)):\n new_content[k] = v\n elif event.type == RoomAddStateLevelEvent.TYPE:\n add_fields(\"level\")\n elif event.type == RoomSendEventLevelEvent.TYPE:\n add_fields(\"level\")\n elif event.type == RoomOpsPowerLevelsEvent.TYPE:\n add_fields(\"kick_level\", \"ban_level\", \"redact_level\")\n elif event.type == RoomAliasesEvent.TYPE:\n add_fields(\"aliases\")\n\n event.content = new_content\n\n return event", "def create_event(klass, form, creator):\n\n if form.is_recurring.data:\n # Series\n return klass.create_series(form, creator)\n # Single event\n return klass.create_single_event(form, creator)", "def _event_builder(self, events, event_codes):\n for ev in events:\n event_out = lems.EventOut(ev) # output (e.g. spike)\n oc = lems.OnCondition(renderer.render_expr(events[ev]))\n oc.add_action(event_out)\n # if event is not in model ports we should add it\n if ev not in self._component_type.event_ports:\n self._component_type.add(lems.EventPort(name=ev, direction='out'))\n if ev in event_codes:\n for ec in re.split(';|\\n', event_codes[ev]):\n event_eq = _equation_separator(ec)\n oc.add_action(lems.StateAssignment(event_eq[0], event_eq[1]))\n spike_flag = False\n if ev == SPIKE:\n spike_flag = True\n yield (spike_flag, oc)", "def format_event(event):\n del event['period']\n try:\n name= \"\"\n for course in event['courses']:\n name = \"%s \"%(course['name'])\n del event['courses']\n event['courses'] = name\n except:\n pass\n\n try:\n name= event['course']['name']\n del event['course']\n event['course'] = name\n except:\n pass\n\n return event", "def populate_create_event_form(form, venue, event):\n # Venue info\n venue = event.venue\n form.venue_name.data = venue.name\n form.address.data = venue.address\n form.city.data = venue.city\n form.state.data = CreateEventForm.convert_choice_to_id(venue.state, \"STATES\")\n form.zip_code.data = venue.zip_code\n\n # Event Info\n form.title.data = event.title\n form.event_type.data = event.event_type.id\n form.category.data = event.event_category.id\n form.start_date.data = event.start_date()\n form.end_date.data = event.end_date()\n form.start_time.data = CreateEventForm.convert_choice_to_id(\n event.start_time(), \"TIMES\"\n )\n form.end_time.data = CreateEventForm.convert_choice_to_id(event.end_time(), \"TIMES\")", "def create_event(wrapped, instance, args, kwargs, start_time, response,\n exception):\n event = PyMongoEvent(\n wrapped,\n instance,\n args,\n kwargs,\n start_time,\n response,\n exception\n )\n trace_factory.add_event(event)", "def createEvent(event):\n event = {\n 'summary': event.description,\n 'location': \"\",\n 'description': \"\",\n 'start': {\n 'dateTime': event.datetime_start,\n 'timeZone': \"America/Los_Angeles\"\n },\n 'end': {\n 'dateTime': event.datetime_end,\n 'timeZone': \"America/Los_Angeles\"\n },\n }\n\n event = service.events().insert(calendarId=SF_FUNCHEAP_CAL_ID, body=event).execute()", "def convert_event(event: dict) -> dict:\n # TODO: Check issues around leap year.\n date_object = datetime.datetime(2020, event['month'], event['day'],\n tzinfo=datetime.timezone(datetime.timedelta(hours=event['timezone'])))\n utc_time_tuple = date_object.utctimetuple()\n month, day = utc_time_tuple[1], utc_time_tuple[2]\n hour, minute = utc_time_tuple[3], utc_time_tuple[4]\n fields = {'FirstName': event['firstName'], 'LastName': event['lastName'], 'Email': event['email'],\n 'Subtext': event['subtext'], 'UnsubscribeKey': uuid.uuid4().hex,\n 'DateKey': '{:02d}-{:02d}'.format(month, day),\n 'TimeName': '{:02d}:{:02d}-{} {}'.format(hour, minute, event['firstName'], event['lastName'])}\n return fields" ]
[ "0.7753694", "0.6666498", "0.64480186", "0.6430229", "0.64239025", "0.63877994", "0.63057095", "0.63010573", "0.62411094", "0.6236453", "0.61496586", "0.6063579", "0.6033859", "0.6013378", "0.60089743", "0.598756", "0.59403574", "0.5883825", "0.5802795", "0.5795733", "0.57948947", "0.5777839", "0.57666874", "0.57536614", "0.5752358", "0.5734527", "0.5702379", "0.5668213", "0.5656433", "0.56498116" ]
0.7140484
1
Retrieves the name of the component
def get_name(self): return COMPONENT_LIST[self.index][0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_component_name(self):\n return self._name", "def getComponentName(self):\n if self.componentName:\n return self.componentName\n else:\n raise Warning(\"No component name has been currently defined\")", "def name(self):\r\n return self.component.get(\"Name\", \"\")", "def name(self):\r\n return self.component.get(\"Name\", \"\")", "def get_component_name(arguments, environ, shell_runner):\n arg = arguments.get('--component-name')\n if arg is not None:\n return arg\n env_var = environ.get('COMPONENT_NAME', None)\n if env_var is not None:\n return env_var\n return get_component_name_from_git(shell_runner)", "def get_name(self):\n\t\treturn self.__name", "def getname(self):\n return self.__name", "def get_name(self):\n\n\t\treturn self.__name", "def getName(self):\n dataDict = self.__dict__\n result = self.varName\n if result is None:\n result = self.chemComp.name\n return result", "def name(self):\n cSld = self._element.cSld\n return cSld.get('name', default='')", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n return self.__name", "def get_name(self):\r\n return self.__name", "def get_name(self):\n return self.__name", "def get_name(self):\n return self.__name", "def get_name(self):\n return self.__name", "def getname(self):\n return self.__class__.__name__", "def get_name(self) -> str:\n pass", "def getName(self):\n return self._get_name( )", "def getName(self):\n return self.__name__", "def component(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"component\")", "def get_name(self) -> str:\n return self.__name", "def get_name(self):\n pass", "def get_name(self):\n pass", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name(self):\n\t\treturn self.name", "def get_name() -> str:\n pass", "def get_name(self):\n return self.__name" ]
[ "0.8993381", "0.8578204", "0.832196", "0.832196", "0.7564453", "0.752911", "0.7503306", "0.74896145", "0.7467424", "0.7465566", "0.74547154", "0.74547154", "0.74547154", "0.74547154", "0.7447792", "0.7447792", "0.7447792", "0.7416273", "0.74120075", "0.7411486", "0.73846656", "0.737401", "0.736648", "0.73562354", "0.73562354", "0.7337242", "0.7337242", "0.7337242", "0.7334736", "0.7334698" ]
0.8755263
1
Retrieves the description of the component
def get_description(self): return COMPONENT_LIST[self.index][1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_description(self):\r\n return self.__description", "def _get_description(self):\n return self.__description", "def _get_description(self):\n return self.__description", "def get_description():\n raise NotImplementedError", "def get_description(self):\n pass", "def getDescription(self):\n return self.description", "def get_description(self):\n return self.__description", "def get_description(self):\n raise NotImplementedError", "def get_description(self) -> str:\n pass", "def getDescription(self):\n return self._description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def get_description(self):\n return self.description", "def getDescription(self):\n raise NotImplementedError", "def get_description(self):\n return self._description", "def get_description(self):", "def get_description(self):\n return self._java_ref.getDescription()", "def getDescription(self):\n return self.base.get(\"description\", [])", "def get_desc(self):\n return self._desc", "def description(self) :\n\t\ttry :\n\t\t\treturn self._description\n\t\texcept Exception as e:\n\t\t\traise e", "def description(self) :\n\t\ttry :\n\t\t\treturn self._description\n\t\texcept Exception as e:\n\t\t\traise e", "def description(self):\n if \"description\" in self._prop_dict:\n return self._prop_dict[\"description\"]\n else:\n return None", "def get_description(self):\n\n return self._description", "def description(self):\n if self._description is None:\n self._load()\n return self._description", "def _get_desc(self):\n return self.__desc", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")", "def description(self) -> str:\n return pulumi.get(self, \"description\")" ]
[ "0.8060678", "0.80528724", "0.80528724", "0.8034792", "0.801626", "0.79952645", "0.79841787", "0.79818565", "0.7962023", "0.79614246", "0.7942446", "0.7942446", "0.7942446", "0.7942446", "0.7909087", "0.7892895", "0.7890679", "0.78325516", "0.7744791", "0.77443457", "0.7720087", "0.7720087", "0.76956534", "0.76728696", "0.76679766", "0.76365954", "0.76333416", "0.76333416", "0.76333416", "0.76333416" ]
0.859268
0
Install firmware to module
def install_firmware(self, image_path): """Not Implement""" return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def install_firmware(self, firmware_file_path: str) -> None:\n raise NotImplementedError()", "def install_firmware(self, pbz_path, recovery=False):\n\n\t\tresources = None\n\t\twith zipfile.ZipFile(pbz_path) as pbz:\n\t\t\tbinary = pbz.read(\"tintin_fw.bin\")\n\t\t\tif not recovery:\n\t\t\t\tresources = pbz.read(\"system_resources.pbpack\")\n\n\t\tself.system_message(\"FIRMWARE_START\")\n\t\ttime.sleep(2)\n\n\t\tif resources:\n\t\t\tclient = PutBytesClient(self, 0, \"SYS_RESOURCES\", resources)\n\t\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\t\tclient.init()\n\t\t\twhile not client._done and not client._error:\n\t\t\t\tpass\n\t\t\tif client._error:\n\t\t\t\traise PebbleError(self.id, \"Failed to send firmware resources %s/system_resources.pbpack\" % pbz_path)\n\n\n\t\tclient = PutBytesClient(self, 0, \"RECOVERY\" if recovery else \"FIRMWARE\", binary)\n\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\tclient.init()\n\t\twhile not client._done and not client._error:\n\t\t\tpass\n\t\tif client._error:\n\t\t\traise PebbleError(self.id, \"Failed to send firmware binary %s/tintin_fw.bin\" % pbz_path)\n\n\t\tself.system_message(\"FIRMWARE_COMPLETE\")", "def install():\n PackCommandExecutor().pack()\n InstallCommandExecutor().install()", "def install_component_firmware(self, component_name, image_path):\n raise NotImplementedError", "def update_firmware(self):\n self.execute_command(CMD_UPDATE_FIRMWARE)", "def install_package(self, module: str, **kwargs):\n logging.message('Installing module from %s %s' % (module, str(kwargs)))\n package = importlib.import_module(module)\n if kwargs.get('package'):\n kwargs.pop('package')\n setup_return = package.Setup(self, module, **kwargs)\n ff_id = kwargs.get('ff_id')\n initial_values = kwargs.get('initial_values')\n if ff_id and initial_values:\n self.device_initial_values[ff_id] = initial_values\n scheduler.runInS(10, self.refresh_firebase, job_id='FIREBASE_REFRESH_CORE')\n scheduler.runInS(15, self.export_all_components, job_id='CORE_EXPORT_ALL')\n return setup_return", "def do(self):\r\n parameters = ParametersParserStr(self.args_parameters).get()\r\n self.core.install(self.product_names, parameters, with_dependencies=True)", "def update_firmware(self) -> str:", "def update_firmware(firmware_path, script_path):\n\n args = ['uflash', '-r', firmware_path, script_path]\n subprocess.call(args)", "def assemble_firmware(self):\n\n # Check that the layout is available from the firmware configuration file\n if \"layout\" not in self.project.firmware_definition:\n self.project.logging.critical(\"The firmware layout is not defined in configuration file\")\n exit(1)\n\n # Check that the stacking method is available from the firmware configuration file\n if \"method\" not in self.project.firmware_definition[\"layout\"]:\n self.project.logging.critical(\"The firmware stacking method is not defined\")\n exit(1)\n\n # Ensure firmware generation path exists and is a dir\n if not os.path.isdir(self.project.firmware_directory):\n os.makedirs(self.project.firmware_directory)\n\n # Ensure firmware exists\n # TODO : iterate the list of squashfs files\n if not os.path.isfile(self.project.firmware_filename):\n logging.critical(\"The firmware does not exist (\" +\n self.project.firmware_filename + \")\")\n exit(1)\n\n # Remove existing initscript if needed\n if os.path.isfile(self.project.init_filename):\n os.remove(self.project.init_filename)\n\n # Copy the init script to the target directory\n\n # Generate the stacking script\n self.generate_stack_script()", "def do_workload(self):\n module_manager = self._core.get_module_manager()\n module_manager.install_module(self.get_meta())", "def _install(self):\n\n pass", "def software_api(self, install_params):\n try:\n self.sw = jnpr.junos.utils.sw.SW(self.dev)\n ok, msg_ret = self.sw.install(**install_params)\n if ok is not True:\n raise AnsibleError('Unable to install the software %s' % msg_ret)\n msg = 'Package %s successfully installed. Response from device is: %s' % (\n install_params.get('package') or\n install_params.get('pkg_set'),\n msg_ret)\n self.queue_message(\"log\", \"%s\" % msg)\n return msg\n except (self.pyez_exception.ConnectError,\n self.pyez_exception.RpcError) as ex:\n raise AnsibleError('Installation failed. Error: %s' % str(ex))", "def install():\n execute(generate)\n execute(upload)", "def on_install(self, event):\n unit = self.model.unit\n\n # Install your software and its dependencies\n\n unit.status = ActiveStatus()", "def update_firmware(self) -> None:\n\n BROADCAST_ID = 0xFFF\n firmware_update_message = self.__set_module_state(\n BROADCAST_ID, Module.State.UPDATE_FIRMWARE, Module.State.PNP_OFF\n )\n self._send_q.put(firmware_update_message)\n self.__delay()", "def pack_firmware(self, work_dir, jobclient, version_string=\"\"):\n raise NotImplementedError(\"Abstract method not implemented\")", "def install_device_software(self, path, image, target_version):\n\n package = os.path.join(os.getcwd(), path)\n\n if c.SERVICEPLUGIN_OSSH in self.sample_device.deviceServicePlugin:\n\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_CLEANUP_STORAGE)\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_CLEANUP_STORAGE)\n self.sample_device.deviceConnection.rpc.request_system_storage_cleanup()\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_COPY_IMG.format(image))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_COPY_IMG.format(image))\n # progress = SoftwareTask.copy_progress\n with SCPClient(transport=self.sample_device.deviceConnection._conn._session.transport) as scp:\n scp.put(package, remote_path=self.grp_cfg.TASKS.Provision.Software.RemoteDir)\n\n except (BadHostKeyException, AuthenticationException) as e:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_COPY_IMG_NOK.format(e.message))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_COPY_IMG_NOK.format(e.message))\n return self.sample_device\n\n try:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_VERS.format(target_version))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_VERS.format(target_version))\n result = self.sample_device.deviceConnection.sw.pkgadd(\n self.grp_cfg.TASKS.Provision.Software.RemoteDir + image,\n dev_timeout=self.grp_cfg.TASKS.Provision.Software.PkgAddDevTimeout)\n\n except Exception as err:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_NOK.format(str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(err)))\n return self.sample_device\n\n if result is True:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))\n\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_NOK.format(str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(result)))\n time.sleep(3)\n return self.sample_device\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))\n\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_REBOOT_DEV_RESP.format(rsp.replace('\\n', \" \")))\n self.sample_device.deviceConnection.close()\n self.sample_device.deviceIsRebooted = True\n self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,\n task_state_message='Rebooting...')\n c.oss_seen_devices_lck.acquire()\n\n try:\n if self.sample_device.deviceIP in c.oss_seen_devices:\n c.oss_seen_devices.pop(self.sample_device.deviceIP, None)\n finally:\n c.oss_seen_devices_lck.release()\n\n return self.sample_device\n\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,\n task_state_message=logmsg.SW_CONN_LOOSE_REBOOT)\n return self.sample_device\n\n else:\n\n try:\n result = self.sample_device.deviceConnection.sw.install(package=package,\n remote_path=self.grp_cfg.TASKS.Provision.Software.RemoteDir,\n cleanfs=True, no_copy=False,\n progress=SoftwareTask.install_progress)\n except Exception as err:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_NOK.format(str(err)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED, task_state_message=str(err))\n return self.sample_device\n\n if result is True:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_INSTALL_OK.format(self.sample_device.deviceIP))\n\n else:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_INSTALL_NOK.format(str(result)))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_INSTALL_NOK.format(str(result)))\n time.sleep(3)\n return self.sample_device\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_REBOOT.format(self.sample_device.deviceIP))\n\n try:\n rsp = self.sample_device.deviceConnection.sw.reboot()\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_REBOOT_DEV_RESP.format(rsp.replace('\\n', \" \")))\n # self.sample_device.deviceConnection.close()\n\n except exception.ConnectClosedError:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_CONN_LOOSE_REBOOT)\n self.update_task_state(new_task_state=c.TASK_STATE_REBOOTING,\n task_state_message=logmsg.SW_CONN_LOOSE_REBOOT)\n finally:\n\n alive = self.probe_device_not_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.RetryProbeCounter)\n\n if not alive:\n self.sample_device.deviceIsRebooted = True\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_PROBE_WAKEUP.format(self.sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(self.sample_device, connect=False)\n\n if status:\n\n alive = self.probe_device_alive(self.sample_device,\n self.grp_cfg.TASKS.Provision.Software.RebootProbeTimeout)\n\n if alive:\n\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_PROBE_WAKUP_OK.format(self.sample_device.deviceIP))\n self.sample_device.deviceIsRebooted = False\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_PROBE_WAKUP_OK.format(\n self.sample_device.deviceIP))\n status, self.sample_device = Tools.create_dev_conn(self.sample_device)\n\n if status:\n\n self.sample_device.deviceConnection.bind(cu=Config, sw=SW)\n # Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n # message=logmsg.SW_CONN_OK.format(self.sample_device.deviceIP))\n self.update_task_state(new_task_state=c.TASK_STATE_PROGRESS,\n task_state_message=logmsg.SW_CONN_OK.format(\n self.sample_device.deviceIP))\n\n return self.sample_device\n\n else:\n return self.sample_device\n\n else:\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=c.TASK_STATE_MSG_FAILED)\n self.sample_device.deviceConnection = None\n return self.sample_device\n\n else:\n Tools.emit_log(task_name=self.task_name, sample_device=self.sample_device,\n message=logmsg.SW_PROBE_DEV_NOK.format(self.sample_device.deviceIP,\n self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter))\n self.update_task_state(new_task_state=c.TASK_STATE_FAILED,\n task_state_message=logmsg.SW_PROBE_DEV_NOK.format(\n self.sample_device.deviceIP,\n self.grp_cfg.TASKS.Provision.Software.RebootProbeCounter))", "def install():\n deploy()\n configure()", "def install(i):\n\n cm_kernel.print_for_con('***********************************************')\n cm_kernel.print_for_con('Installing code ...')\n\n # Check vars\n if 'target_os_uoa' not in i: return {'cm_return':1, 'cm_error':'\"target_os_uoa\" is not defined in \"code install\"'}\n\n # Create entry\n ii={'cm_run_module_uoa':ini['cm_module_uid'],\n 'cm_action':'update'}\n if 'install_data_uid' in i and i['install_data_uid']!='': \n ii['cm_data_uid']=i['install_data_uid']\n if 'install_data_alias' in i and i['install_data_alias']!='': \n ii['cm_data_uoa']=i['install_data_alias']\n if 'install_data_display_as_alias' in i: \n ii['cm_display_as_alias']=i['install_data_display_as_alias']\n if 'install_module_uoa' in i and i['install_module_uoa']!='':\n ii['cm_run_module_uoa']=i['install_module_uoa']\n if 'cm_array' in i and len(i['cm_array'])>0: ii['cm_array']=i['cm_array']\n if 'install_repo_uoa' in i and i['install_repo_uoa']!='': \n ii['cm_repo_uoa']=i['install_repo_uoa']\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n target_path=r['cm_path']\n target_uid=r['cm_uid']\n target_alias=r['cm_alias']\n\n # Prepare script\n rx=get_env({'cm_data_uoa':target_uid,\n 'os_uoa':i['target_os_uoa']})\n if rx['cm_return']>0: return rx\n\n script=rx['cm_string']\n\n ii={'script_name':script,\n 'skip_extension':'yes',\n 'target_os_uoa':i['target_os_uoa'],\n 'cm_path':target_path}\n if 'code_deps' in i and i.get('skip_code_deps','')!='yes':\n ii['code_deps']=i['code_deps']\n\n # Add remark about how code was built\n if 'add_rem_to_script' in i:\n run_commands_before=[]\n run_commands_before.append('')\n for x in i['add_rem_to_script']:\n run_commands_before.append(x)\n ii['run_commands_before']=run_commands_before\n\n rx=prepare_script(ii)\n if rx['cm_return']>0: return rx\n\n r['script_name']=rx['cm_path']\n r['script_filename']=script\n\n return r", "def update_firmware(self, node, port):\n return hpsum_controller.update_firmware(node)", "def _install(self, host):\n pass", "def install(self):\n PiService.install(self)\n self.sudo('svn co https://svn.code.sf.net/p/mjpg-streamer/code /etc/mjpg-streamer')\n self.run('cd /etc/mjpg-streamer/mjpg-streamer && sudo make USE_LIB4VL=true clean all && sudo make DESTDIR=/usr install')", "def firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None,\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode=mode\n if descr is not None:\n mo.descr = descr\n\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info(\"Firmware host pack <%s> not found.\" % name)", "def __do_single_module_install(item):\n\n name = item.name\n local_name = item.local_name\n install_name = item.install_name\n\n # First copy the new file.\n if copy_file(local_name, install_name, DTF_MODULES_DIR) != 0:\n log.e(TAG, \"Error copying module '%s'\" % (local_name))\n return -1\n\n # Update database\n if __update_module(item) == 0:\n log.e(TAG, \"Failed to update module '%s' details in database.\"\n % (name))\n return -2\n\n log.i(TAG, \"Module '%s' installed successfully!\" % name)\n return 0", "def __do_single_binary_install(item):\n\n name = item.name\n local_name = item.local_name\n install_name = item.install_name\n\n # First copy the new file.\n if copy_file(local_name, install_name, DTF_BINARIES_DIR) != 0:\n log.e(TAG, \"Error copying binary '%s'\" % (local_name))\n return -1\n\n # Update database\n if __update_binary(item) == 0:\n log.e(TAG, \"Failed to update binary '%s' details in database.\"\n % (name))\n return -2\n\n log.i(TAG, \"Binary '%s' installed successfully!\" % name)\n return 0", "def install(self):\n raise NotImplementedError", "def setup_module(module):\n print(\"Start rishabhSetupModule of Program\")", "def bootloader() -> NoReturn:", "def fusion_api_li_upgrade_firmware(self, body=None, uri=None, api=None, param='', headers=None):\n param = '/firmware'\n return self.li.update(body=body, uri=uri, api=api, headers=headers, param=param)" ]
[ "0.7427774", "0.6969059", "0.69159126", "0.6860279", "0.6850534", "0.6627969", "0.65738577", "0.6535344", "0.64783776", "0.64002633", "0.6383176", "0.63472944", "0.63452154", "0.6341543", "0.6337719", "0.6332141", "0.62691337", "0.6253833", "0.62381494", "0.61869246", "0.6147193", "0.61109257", "0.6090754", "0.60821944", "0.60463035", "0.59614456", "0.59243625", "0.59219766", "0.5913908", "0.58923835" ]
0.7309129
1
Convert a numpy array to a TF placeholder.
def _build_tensor(self, ndarray): ndarray = np.asarray(ndarray).astype(self.dtype) return tf1.placeholder_with_default( ndarray, shape=ndarray.shape if self.use_static_shape else None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _from_numpy(array):\n return tf.constant(array)", "def transpose_to_tensorflow(array: np.ndarray) -> np.ndarray:\n shape = (\n DIM_PT['batch'],\n DIM_PT['depth'],\n DIM_PT['height'],\n DIM_PT['width'],\n DIM_PT['channels'],\n )\n array = np.transpose(array, shape)\n return array", "def to_tf_placeholder(self, name, batch_dims):\n return tuple(\n s.to_tf_placeholder(type(s).__name__ + '-' + name, batch_dims)\n for s in self.spaces)", "def np2tensor(array, device=None):\n tensor = torch.from_numpy(array)\n return tensor", "def tt(ndarray):\n\n\tif not isinstance(ndarray, torch.Tensor):\n\n\t\tif not isinstance(ndarray, np.ndarray):\n\t\t\tndarray = np.array(ndarray)\n\n\t\tif torch.cuda.is_available():\n\t\t\tndarray = Variable(torch.from_numpy(ndarray).float().cuda(), requires_grad=False)\n\t\telse:\n\t\t\tndarray = Variable(torch.from_numpy(ndarray).float(), requires_grad=False)\n\n\treturn ndarray", "def _img_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.ravel()))", "def _img_array_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value.ravel()))", "def np_to_torch(array):\n tensor = torch.from_numpy(array)\n if tensor.dtype != torch.float32:\n tensor = tensor.float()\n return tensor", "def array2var(array):\n if array.ndim == 3:\n array = array.transpose((2, 0, 1))\n var = Variable(torch.from_numpy(array))\n while len(var.size()) < 4:\n var = var.unsqueeze(0)\n return var", "def convert_array(self, v, t):\n return interpreter.TensorValue(tvm.ndarray.array(v, self.context))", "def np2tensor(array, device_id=-1):\n tensor = torch.from_numpy(array)\n if device_id >= 0:\n tensor = tensor.cuda(device_id)\n return tensor", "def _ListArrayToTensor(\n self, list_array: pa.Array,\n produce_eager_tensors: bool) -> Union[np.ndarray, tf.Tensor]:\n values = list_array.flatten()\n batch_size = len(list_array)\n expected_num_elements = batch_size * self._unbatched_flat_len\n if len(values) != expected_num_elements:\n raise ValueError(\n \"Unable to convert a {} to a tensor of type spec {}: size mismatch. \"\n \"Expected {} elements but got {}. \"\n \"If your data type is tf.Example, make sure that the feature \"\n \"is always present, and have the same length in all the examples. \"\n \"TFX users should make sure there is no data anomaly for the feature.\"\n .format(\n type(list_array), self.type_spec, expected_num_elements,\n len(values)))\n actual_shape = list(self._shape)\n actual_shape[0] = batch_size\n if self._convert_to_binary_fn is not None:\n values = self._convert_to_binary_fn(values)\n values_np = np.asarray(values).reshape(actual_shape)\n if produce_eager_tensors:\n return tf.convert_to_tensor(values_np)\n\n return values_np", "def list_to_backend_type(data: List) -> TTensor:", "def convert(self, example):\n tf_example = _convert_to_tf_example(example, self.tokenizer, self.rules,\n self.config, self.max_sizes)\n return tf_example", "def as_tf(self, expr):\n if isinstance(expr, NeuralQueryExpression):\n return expr.tf\n else:\n return expr", "def collate_tf_no_tokenize(arr, numericalize, tf_dict=None,\n pad=\"<pad>\", device='cuda'):\n arr = [[\"<s>\"] + a + [\"</s>\"] for a in arr]\n arr = [numericalize(a) for a in arr]\n\n if tf_dict is None:\n tf_matrix = [[1. for i in a] for a in arr]\n else:\n tf_matrix = [[tf_dict[i] for i in a] for a in arr]\n\n pad_token = numericalize([pad])[0]\n\n # masked_pos =\n\n padded, lens, mask = padding(arr, pad_token, dtype=torch.long)\n padded_tf, _, _ = padding(tf_matrix, pad_token, dtype=torch.float)\n\n padded = padded.to(device=device)\n mask = mask.to(device=device)\n lens = lens.to(device=device)\n return padded, padded_tf, lens, mask", "def create_from_array(cls, array: np.ndarray) -> \"TensorImage\":\n if array.dtype != np.uint8:\n raise ValueError(\"Expect numpy array with dtype=uint8.\")\n\n image_data = image_utils.ImageData(np.squeeze(array))\n return cls(image_data)", "def extract_into_tensor(arr, timesteps, broadcast_shape):\n device = timesteps.device\n assert arr.device == device\n res = arr[timesteps].float()\n new_dims = [1] * (len(broadcast_shape) - res.ndim)\n res = res.view(*res.shape, *new_dims)\n return torch.broadcast_to(res, broadcast_shape)", "def _to_tensor(x, dtype):\n x = tf.convert_to_tensor(x)\n if x.dtype != dtype:\n x = tf.cast(x, dtype)\n return x", "def do_decode(self, value, decode_fn):\n del decode_fn\n tensor_proto = value.tensor_value\n tensor = constant(tensor_util.MakeNdarray(tensor_proto))\n return tensor", "def create_variable(arr, dtype='float32', device=None, requires_grad=True, backend='autograd'):\n args = {}\n if backend == 'autograd':\n if dtype is not None:\n args['dtype'] = dtype_mapping_dict[dtype]['autograd']\n var = anp.array(arr, **args)\n elif backend == 'pytorch':\n if dtype is not None:\n args['dtype'] = getattr(engine_dict['pytorch'], dtype_mapping_dict[dtype]['pytorch'])\n if device is not None:\n args['device'] = device\n args['requires_grad'] = requires_grad\n var = tc.tensor(arr, **args)\n return var", "def tensor(self, X):\n return tf.convert_to_tensor(X, dtype=self.dtype)", "def convert_raw_arrays(x, f):\n try:\n # Tensor, TensorNetwork...\n x = x.copy()\n x.apply_to_arrays(f)\n return x\n except AttributeError:\n pass\n\n try:\n # raw structured arrays that provide the {get|set}_params interface\n x = x.copy()\n x.set_params(tree_map(f, x.get_params()))\n return x\n except AttributeError:\n pass\n\n # other raw arrays\n return f(x)", "def _to_tf_timestep(time_step: ts.TimeStep) -> ts.TimeStep:\n\n time_step = tf_agents.utils.nest_utils.batch_nested_array(time_step)\n return tf.contrib.framework.nest.map_structure(tf.convert_to_tensor, time_step)", "def array_input_fn(array: np.ndarray, mode: str, batch_size: int):\n if len(array.shape) != 3:\n raise ValueError(\"`array` must have shape [n_samples, height, width].\")\n _assert_valid_mode(mode)\n\n dataset = tf.data.Dataset.from_tensor_slices(array)\n\n if mode == _TRAIN:\n dataset = dataset.shuffle(1000).repeat()\n\n dataset = dataset.batch(batch_size)\n\n return dataset", "def _to_tensor(cls, tensor):\n if isinstance(tensor, Tensor):\n return tensor\n return Tensor(data=tensor)", "def prepare_label(input_batch, new_size, num_classes, one_hot=True, task='seg'):\n with tf.name_scope('label_encode'):\n input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.\n if task == 'seg':\n input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.\n if one_hot:\n input_batch = tf.one_hot(input_batch, depth=num_classes)\n return input_batch", "def create_constant(arr, dtype='float32', device=None, backend='autograd'):\n args = {}\n if backend == 'autograd':\n if dtype is not None:\n args['dtype'] = dtype_mapping_dict[dtype]['autograd']\n var = np.array(arr, **args)\n elif backend == 'pytorch':\n if dtype is not None:\n args['dtype'] = getattr(engine_dict['pytorch'], dtype_mapping_dict[dtype]['pytorch'])\n if device is not None:\n args['device'] = device\n args['requires_grad'] = False\n var = tc.tensor(arr, **args)\n else:\n if dtype is not None:\n args['dtype'] = dtype_mapping_dict[dtype]['autograd']\n var = np.array(arr, **args)\n return var", "def make_tflite_inference(ndvi_img_array, model_interpreter):\n # Get input and output tensors.\n input_details = model_interpreter.get_input_details()\n output_details = model_interpreter.get_output_details()\n\n # Get Input shape\n input_shape = input_details[0]['shape']\n input_data = ndvi_img_array.reshape(input_shape)\n\n model_interpreter.set_tensor(input_details[0]['index'], input_data)\n model_interpreter.invoke()\n\n outputs = []\n\n for tensor in output_details:\n output_data = model_interpreter.get_tensor(tensor['index'])\n outputs.append(output_data[0][0])\n\n prediction = outputs[0]\n\n return prediction", "def create_placeholder(tensor, dtype=None):\r\n if isinstance(tensor, np.ndarray):\r\n if dtype is None:\r\n if tensor.dtype in {np.float32, np.float64, np.float16, np.float}:\r\n placeholder = tf.placeholder(tf.float32, tensor.shape)\r\n elif tensor.dtype in {np.int, np.int32, np.int64}:\r\n placeholder = tf.placeholder(tf.int32, tensor.shape)\r\n else:\r\n raise NotImplementedError('The dtype {} is not implemented.'.format(tensor.dtype))\r\n else:\r\n placeholder = tf.placeholder(dtype, tensor.shape)\r\n elif isinstance(tensor, tf.Tensor):\r\n raise TypeError('The input to placeholder cannot be tf.Tensor.')\r\n elif isinstance(tensor, (list, tuple)):\r\n if isinstance(dtype, (list, tuple)):\r\n placeholder = tuple([create_placeholder(\r\n single_tensor, single_dtype) for single_tensor, single_dtype in zip(tensor, dtype)])\r\n else:\r\n placeholder = tuple([create_placeholder(\r\n single_tensor, dtype) for single_tensor in tensor])\r\n else:\r\n raise NotImplementedError(\r\n 'Placeholder can only be created for numpy array, tf.Tensor, list or tuple')\r\n\r\n return placeholder" ]
[ "0.79168755", "0.6447744", "0.59538347", "0.59502006", "0.5948754", "0.5824443", "0.5824443", "0.57951134", "0.5754777", "0.5732912", "0.57207906", "0.57052535", "0.5687597", "0.5686336", "0.55867285", "0.5567659", "0.55511665", "0.55475926", "0.55343866", "0.5527259", "0.5490072", "0.5486798", "0.54864806", "0.5451805", "0.5420305", "0.5414768", "0.5385764", "0.53840655", "0.5371174", "0.5361851" ]
0.6905999
1
The mod_radial_average class constructor stores the parameters passed from the pyana configuration file in instance variables. All parameters, except address are optional, and hence need not be defined in pyana.cfg. address Address string XXX Que?! out_dirname Optional directory portion of output average pathname out_basename Optional filename prefix of output average pathname xtal_target Phil file with target paramters, including metrology corrections two_theta_low Optional two theta value of interest two_theta_high Optional two theta value of interest
def __init__(self, address, out_dirname = None, out_basename = None, xtal_target = None, two_theta_low = None, two_theta_high = None, **kwds): super(mod_radial_average, self).__init__(address=address, **kwds) self.m_xtal_target = cspad_tbx.getOptString(xtal_target) self._basename = cspad_tbx.getOptString(out_basename) self._dirname = cspad_tbx.getOptString(out_dirname) self._two_theta_low = cspad_tbx.getOptFloat(two_theta_low) self._two_theta_high = cspad_tbx.getOptFloat(two_theta_high) if self._dirname is not None or self._basename is not None: assert self._dirname is not None and self._basename is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, mean, config):\n self.lb = config.get('lb', 0)\n self.ub = config.get('ub', sys.maxint)\n self.a = float(config['a'])", "def radial(*args, attenuation: Union[float, bool]=0.0, magnitude: Union[float, bool]=0.0,\n maxDistance: Union[float, bool]=0.0, name: Union[AnyStr, bool]=\"\", perVertex:\n bool=True, position: Union[List[float, float, float], List[List[float, float,\n float]], bool]=None, torusSectionRadius: Union[float, bool]=0.0, type: Union[float,\n bool]=0.0, volumeExclusion: bool=True, volumeOffset: Union[List[float, float, float],\n bool]=None, volumeShape: Union[AnyStr, bool]=\"\", volumeSweep: Union[float, bool]=0.0,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def __init__(self):\n\n self._parser = configargparse.ArgParser(\n description='ANN-SoLo: Approximate nearest neighbor spectral '\n 'library searching\\n'\n '================================================='\n '================\\n\\n'\n 'Bittremieux et al. Fast open modification '\n 'spectral library searching through approximate '\n 'nearest neighbor indexing. TODO: publication '\n 'information.\\n\\n'\n 'Official code website: '\n 'https://github.com/bittremieux/ANN-SoLo\\n\\n',\n default_config_files=['config.ini'],\n args_for_setting_config_path=['-c', '--config'],\n formatter_class=NewlineTextHelpFormatter)\n\n # IO\n self._parser.add_argument(\n 'spectral_library_filename', help='spectral library file')\n self._parser.add_argument(\n 'query_filename', help='query mgf file')\n self._parser.add_argument(\n 'out_filename',\n help='name of the mzTab output file containing the search '\n 'results')\n\n # PREPROCESSING\n # spectral library resolution to round mass values\n self._parser.add_argument(\n '--resolution', default=None, type=int,\n help='spectral library resolution; masses will be rounded to '\n 'the given number of decimals (default: no rounding)')\n\n # minimum and maximum fragment peak mass values\n self._parser.add_argument(\n '--min_mz', default=11, type=int,\n help='minimum m/z value (inclusive, default: %(default)s m/z)')\n self._parser.add_argument(\n '--max_mz', default=2010, type=int,\n help='maximum m/z value (inclusive, default: %(default)s m/z)')\n\n # remove peaks around the precursor mass from fragment spectra\n self._parser.add_argument(\n '--remove_precursor', action='store_true',\n help='remove peaks around the precursor mass '\n '(default: no peaks are removed)')\n self._parser.add_argument(\n '--remove_precursor_tolerance', default=0, type=float,\n help='the window (in m/z) around the precursor mass to remove '\n 'peaks (default: %(default)s m/z)')\n\n # minimum fragment peak intensity to filter out noise peaks\n self._parser.add_argument(\n '--min_intensity', default=0.01, type=float,\n help='remove peaks with a lower intensity relative to the '\n 'maximum intensity (default: %(default)s)')\n\n # minimum number of fragment peaks or mass range (Dalton)\n self._parser.add_argument(\n '--min_peaks', default=10, type=int,\n help='discard spectra with less peaks (default: %(default)s)')\n self._parser.add_argument(\n '--min_mz_range', default=250, type=int,\n help='discard spectra with a smaller mass range '\n '(default: %(default)s m/z)')\n\n # maximum number of fragment peaks to use for each spectrum\n self._parser.add_argument(\n '--max_peaks_used', default=50, type=int,\n help='only use the specified most intense peaks '\n '(default: %(default)s)')\n\n # manner in which to scale the peak intensities\n self._parser.add_argument(\n '--scaling', default='rank', type=str,\n choices=['sqrt', 'rank'],\n help='to reduce the influence of very intense peaks, scale the'\n ' peaks by their square root or by their rank '\n '(default: %(default)s)')\n\n # MATCHING\n # maximum SSM precursor mass tolerance\n self._parser.add_argument(\n '--precursor_tolerance_mass', type=float, required=True,\n help='precursor mass tolerance (small window for the first '\n 'level of the cascade search)')\n self._parser.add_argument(\n '--precursor_tolerance_mode', type=str,\n choices=['Da', 'ppm'], required=True,\n help='precursor mass tolerance unit (options: %(choices)s)')\n self._parser.add_argument(\n '--precursor_tolerance_mass_open', type=float,\n help='precursor mass tolerance (wide window for the second '\n 'level of the cascade search)')\n self._parser.add_argument(\n '--precursor_tolerance_mode_open', type=str,\n choices=['Da', 'ppm'],\n help='precursor mass tolerance unit (options: %(choices)s)')\n\n # fragment peak matching\n self._parser.add_argument(\n '--fragment_mz_tolerance', type=float, required=True,\n help='fragment mass tolerance (m/z)')\n\n # shifted dot product\n self._parser.add_argument(\n '--allow_peak_shifts', action='store_true',\n help='use the shifted dot product instead of the standard dot '\n 'product')\n \n # maximum FDR\n self._parser.add_argument(\n '--fdr', default=0.01, type=float,\n help='FDR threshold to accept identifications during the '\n 'cascade search (default: %(default)s)')\n\n self._parser.add_argument(\n '--fdr_tolerance_mass', default=0.1, type=float,\n help='mass difference bin width for the group FDR calculation '\n 'during the second cascade level (default: %(default)s '\n 'Da)')\n self._parser.add_argument(\n '--fdr_tolerance_mode', default='Da', type=str,\n choices=['Da', 'ppm'],\n help='mass difference bin unit for the group FDR calculation '\n 'during the second cascade level (default: %(default)s)')\n self._parser.add_argument(\n '--fdr_min_group_size', default=20, type=int,\n help='minimum group size for the group FDR calculation '\n 'during the second cascade level (default: %(default)s)')\n\n # MODE\n # use an ANN index or the conventional brute-force mode\n self._parser.add_argument(\n '--mode', default='ann', type=str, choices=['ann', 'bf'],\n help=\"search using an approximate nearest neighbors or the \"\n \"traditional (brute-force) mode; 'bf': brute-force, \"\n \"'ann': approximate nearest neighbors (default: \"\n \"%(default)s)\")\n\n # bin size for the ANN index (Dalton)\n self._parser.add_argument(\n '--bin_size', default=1.0, type=float,\n help='ANN vector bin width (default: %(default)s Da)')\n\n # number of candidates to retrieve from the ANN index for each query\n self._parser.add_argument(\n '--num_candidates', default=5000, type=int,\n help='number of candidates to retrieve from the ANN index for '\n 'each query (default: %(default)s)')\n\n # minimum number of candidates for a query before ANN indexing is used\n self._parser.add_argument(\n '--ann_cutoff', default=5000, type=int,\n help='minimum number of candidates for a query before ANN '\n 'indexing is used to refine the candidates '\n '(default: %(default)s)')\n\n # custom Annoy parameters\n # number of ANN trees\n self._parser.add_argument(\n '--num_trees', default=1000, type=int,\n help='number of trees in the ANN index (default: %(default)s)')\n\n # number of nodes to explore during ANN searching\n self._parser.add_argument(\n '--search_k', default=50000, type=int,\n help='number of nodes to explore in the ANN index during '\n 'searching (default: %(default)s)')\n\n # filled in 'parse', contains the specified settings\n self._namespace = None", "def __init__(self, center, initialRadius, finalRadius, initialAngle, finalAngle, cartesianImageSize,\n polarImageSize):\n self.center = center\n self.initialRadius = initialRadius\n self.finalRadius = finalRadius\n self.initialAngle = initialAngle\n self.finalAngle = finalAngle\n self.cartesianImageSize = cartesianImageSize\n self.polarImageSize = polarImageSize", "def setup_antenna(self, response_data, center_frequency=500e6,\n bandwidth=800e6, temperature=325, resistance=50,\n orientation=(0,0,1), efficiency=1, noisy=True,\n unique_noise_waveforms=10, **kwargs):\n # Noise rms should be about 40 mV (after filtering with gain of ~5000).\n # This is mostly satisfied by using the default noise temperature from\n # AraSim, 325 K, along with a 50 ohm resistance\n # Additionally, the bandwidth of the antenna is set slightly larger\n # than the nominal bandwidth of the true ARA antenna system (700 MHz),\n # but the extra frequencies should be killed by the front-end filter\n super().setup_antenna(response_data=response_data,\n position=self.position,\n center_frequency=center_frequency,\n bandwidth=bandwidth,\n temperature=temperature,\n resistance=resistance,\n orientation=orientation,\n efficiency=efficiency,\n noisy=noisy,\n unique_noise_waveforms=unique_noise_waveforms,\n **kwargs)", "def __init__(self):\n parameters_list = []\n self.config_dict = self.open_config(parameters_list)\n\n # Define defaults\n self.disc_gt = 0.0\n self.disc_out = 0.0", "def execute(self,callback=None):\n\t\toptions=self.options\n\n\t\tif options[\"verbose\"]>0 : print \"Start averaging class \",options[\"n\"]\n\n\t\ttry: ref=EMData(self.data[\"ref\"][1],self.data[\"ref\"][2])\n\t\texcept: ref=None\n\n#\t\tprint [self.data[\"images\"][1]]+self.data[\"images\"][2]\n\n\t\t# make the class-average\n\t\ttry:\n\t\t\tavg,ptcl_info=class_average([self.data[\"usefilt\"][1]]+self.data[\"usefilt\"][2],ref,options[\"niter\"],options[\"normproc\"],options[\"prefilt\"],options[\"align\"],\n\t\t\t\toptions[\"aligncmp\"],options[\"ralign\"],options[\"raligncmp\"],options[\"averager\"],options[\"scmp\"],options[\"keep\"],options[\"keepsig\"],\n\t\t\t\toptions[\"automask\"],options[\"saveali\"],options[\"verbose\"],callback)\n\t\texcept KeyboardInterrupt: return None\n\t\texcept SystemExit: return None\n\t\texcept:\n\t\t\treturn {\"average\":None,\"info\":None,\"n\":self.options[\"n\"]}\n\n\t\ttry: ref_orient=avg[\"xform.projection\"]\n\t\texcept: ref_orient=None\n\n\t\ttry: ref_model=avg[\"model_id\"]\n\t\texcept: ref_model=0\n\n\t\t# Final alignment to the reference (if there is one)\n\t\tif ref!=None :\n\t\t\t#ref.process_inplace(\"normalize.edgemean\")\n\t\t\t# This was commented out because the class-average doesn't have CTF parameters (or shouldn't) and\n\t\t\t# often will be using a comparator which makes use of CTF. Hard-coding the aligner for now\n\t\t\t#ali=align_one(avg,ref,True,self.options[\"align\"],self.options[\"aligncmp\"],self.options[\"ralign\"],self.options[\"raligncmp\"])\n#\t\t\tali=align_one(avg,ref,True,(\"rotate_translate_flip_iterative\",{}),(\"ccc\",{}),(\"refine\",{}),(\"ccc\",{}))\n\t\t\t# changed to this in 3/6/14 because it was causing class-averages done without flipping to sometimes become flipped. Also not sure if I trust the _iterative aligner\n\t\t\tali=align_one(avg,ref,True,(\"rotate_translate\",{}),(\"ccc\",{}),(\"refine\",{}),(\"ccc\",{}))\n\t\t\tfxf=ali[\"xform.align2d\"]\n\t\t\tavg1=avg\n\t\t\tif options[\"verbose\"]>0 : print \"Final realign:\",fxf\n#\t\t\tavg=class_average_withali([self.data[\"images\"][1]]+self.data[\"images\"][2],ptcl_info,Transform(),options[\"averager\"],options[\"normproc\"],options[\"verbose\"])\n#\t\t\tavg.write_image(\"bdb:xf\",-1)\n\t\t\tavg=class_average_withali([self.data[\"images\"][1]]+self.data[\"images\"][2],ptcl_info,fxf,ref,options[\"averager\"],options[\"normproc\"],options[\"setsfref\"],options[\"verbose\"])\n#\t\t\tavg.write_image(\"bdb:xf\",-1)\n\n\t\t\t#self.data[\"ref\"].write_image(\"tst.hdf\",-1)\n\t\t\t#avg1.write_image(\"tst.hdf\",-1)\n\t\t\t#avg.write_image(\"tst.hdf\",-1)\n\t\telse :\n\t\t\t# Nothing to align to, so we just try to center the final average\n\t\t\t#gmw=max(5,avg[\"nx\"]/16)\t\t# gaussian mask width\n\t\t\t#avg.process_inplace(\"filter.highpass.gauss\",{\"cutoff_pixels\":min(avg[\"nx\"]/10,5)})\t# highpass to reduce gradient issues\n\t\t\t#avg.process_inplace(\"normalize.circlemean\")\n\t\t\t#avg.process_inplace(\"mask.gaussian\",{\"inner_radius\":avg[\"nx\"]/2-gmw,\"outer_radius\":gmw/1.3})\n\t\t\t#avg.process_inplace(\"filter.lowpass.gauss\",{\"cutoff_abs\":0.07})\n\t\t\t#avg.process_inplace(\"normalize.circlemean\")\n\t\t\t#ali=avg.process(\"threshold.binary\",{\"value\":avg[\"mean\"]+avg[\"sigma\"]*1.5})\n\t\t\t#ali.process_inplace(\"xform.centerofmass\",{\"threshold\":0.5})\n\t\t\t\n\t\t\tfxf = Transform() #jesus\n\t\t\tif self.center:\t #jesus\n\t\t\t\t#print \"self.center is\", self.center, type(self.center)\n\t\t\t\tali=avg.process(self.center)\n\t\t\t\tfxf=ali[\"xform.align2d\"] #jesus\n\t\t\t\n\t\t\tif options[\"verbose\"]>0 : print \"Final center:\",fxf.get_trans_2d()\n\t\t\tavg1=avg\n\t\t\tavg=class_average_withali([self.data[\"images\"][1]]+self.data[\"images\"][2],ptcl_info,fxf,None,options[\"averager\"],options[\"normproc\"],options[\"setsfref\"],options[\"verbose\"])\n\t\ttry:\n\t\t\tavg[\"class_ptcl_qual\"]=avg1[\"class_ptcl_qual\"]\n\t\t\tavg[\"class_ptcl_qual_sigma\"]=avg1[\"class_ptcl_qual_sigma\"]\n\t\texcept: pass\n\n\t\tif ref_orient!=None:\n\t\t\tavg[\"xform.projection\"]=ref_orient\n\t\t\tavg[\"model_id\"]=ref_model\n\t\t\ttry: avg[\"projection_image_idx\"]=self.data[\"ref\"][2]\n\t\t\texcept: pass\n\n\t\treturn {\"average\":avg,\"info\":ptcl_info,\"n\":options[\"n\"]}", "def event(self, evt, env):\n\n super(mod_radial_average, self).event(evt, env)\n if (evt.get(\"skip_event\")):\n return\n\n # This module only applies to detectors for which a distance is\n # available.\n distance = cspad_tbx.env_distance(self.address, env, self._detz_offset)\n if distance is None:\n self.nfail += 1\n self.logger.warning(\"event(): no distance, shot skipped\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n\n # See r17537 of mod_average.py.\n device = cspad_tbx.address_split(self.address)[2]\n if device == 'Cspad':\n pixel_size = cspad_tbx.pixel_size\n saturated_value = cspad_tbx.cspad_saturated_value\n elif device == 'marccd':\n pixel_size = 0.079346\n saturated_value = 2**16 - 1\n elif device == 'Rayonix':\n pixel_size = rayonix_tbx.get_rayonix_pixel_size(self.bin_size)\n saturated_value = rayonix_tbx.rayonix_saturated_value\n\n d = cspad_tbx.dpack(\n active_areas=self.active_areas,\n address=self.address,\n beam_center_x=pixel_size * self.beam_center[0],\n beam_center_y=pixel_size * self.beam_center[1],\n data=self.cspad_img.iround(), # XXX ouch!\n distance=distance,\n pixel_size=pixel_size,\n saturated_value=saturated_value,\n timestamp=self.timestamp,\n wavelength=self.wavelength,\n xtal_target=self.m_xtal_target)\n\n from xfel.command_line.radial_average import run\n args = [\n \"file_path=XTC stream\",\n \"xfel_target=%s\"%self.m_xtal_target,\n \"verbose=False\"\n ]\n\n t = self.timestamp\n s = t[0:4] + t[5:7] + t[8:10] + t[11:13] + t[14:16] + t[17:19] + t[20:23]\n\n if self._dirname is not None:\n dest_path = os.path.join(self._dirname, self._basename + s + \".txt\")\n args.append(\"output_file=%s\"%dest_path)\n\n self.logger.info(\"Calculating radial average for image %s\"%s)\n xvals, results = run(args, d)\n\n evt.put(xvals, \"cctbx.xfel.radial_average.xvals\")\n evt.put(results, \"cctbx.xfel.radial_average.results\")\n\n def get_closest_idx(data, val):\n from scitbx.array_family import flex\n deltas = flex.abs(data - val)\n return flex.first_index(deltas, flex.min(deltas))\n\n if self._two_theta_low is not None:\n i_low = results[get_closest_idx(xvals, self._two_theta_low)]\n evt.put(i_low, \"cctbx.xfel.radial_average.two_theta_low\")\n\n if self._two_theta_high is not None:\n i_high = results[get_closest_idx(xvals, self._two_theta_high)]\n evt.put(i_high, \"cctbx.xfel.radial_average.two_theta_high\")", "def __init__(self, optimization_options, network, *args, **kwargs):\n\n self._params = Parameters()\n for path, param in network.get_variables().items():\n self._params.add(path + '_gradient',\n numpy.zeros_like(param.get_value()))\n self._params.add(path + '_mean_sqr_gradient',\n numpy.zeros_like(param.get_value()))\n self._params.add(path + '_mean_sqr_velocity',\n numpy.zeros_like(param.get_value()))\n\n # geometric rate for averaging gradients\n if 'gradient_decay_rate' not in optimization_options:\n raise ValueError(\"Gradient decay rate is not given in optimization \"\n \"options.\")\n self._gamma = optimization_options['gradient_decay_rate']\n\n super().__init__(optimization_options, network, *args, **kwargs)", "def __init__(self, amplitude, centerpt, inner_rad, outer_rad, color):\n self._amplitude = amplitude\n self._centerpt = centerpt\n self._inner_rad = inner_rad\n self._outer_rad = outer_rad\n self._color = color\n # Find the amplitude -> alpha conversion factor\n maxampl = numpy.nanmax(amplitude)\n self._amplfactor = 256 / maxampl * 50", "def __init__(self, center, radius, material):\n self.center = center\n self.radius = radius\n self.material = material", "def __init__(self, center, radius, material):\n self.center = center\n self.radius = radius\n self.material = material", "def __init__(self, params):\r\n _params = {\r\n 'min_pct': 75.0,\r\n 'min_len': 150,\r\n 'blast_db': None,\r\n 'template_filepath': None,\r\n 'pairwise_alignment_method': 'blast',\r\n 'Application': 'PyNAST',\r\n 'Algorithm': 'NAST',\r\n }\r\n _params.update(params)\r\n Aligner.__init__(self, _params)", "def __init__(self, **kwargs):\n\n self.path = kwargs['path']\n self.prefix = kwargs['prefix']\n self.num_points = kwargs['num_points']\n self.blur_radius = kwargs['blur_radius']\n self.num_colors = kwargs['num_colors']", "def __init__(self, number_radial, number_angles, max_peak, force=False, addition_random=False):\n self.number_radial = number_radial\n self.number_angles = number_angles\n self.array = np.zeros((self.number_radial, self.number_angles))\n self.max_peak = max_peak\n self.topple_count = 0\n self.mass_count = 0\n self.mass_fallen_count = 0\n self.mass_left_count = 0\n self.mass_when_iteration = []\n self.when_topple = []\n\n self.number_of_simulations = 0\n\n self.force = force\n self.addition_random = addition_random\n\n self.angles_array = np.linspace(0, 2 * np.pi, number_angles + 1)\n self.radial_array = np.linspace(0, number_radial, number_radial + 1)", "def radial_graph(self):\n \n if self['M_RADIAL']['intens'] != None:\n name = self['name']\n id = self._getGraphId()\n figname = 'RADIAL_%s.eps' % id\n sxlabel = 'Pixel Radius' ; sylabel = 'Intens' \n title = 'Radial profile, %s' % (name,)\n y = self['M_RADIAL']['intens']\n x = self['M_RADIAL']['radii']\n xy = ((x,y),)\n Plot(xy,figname,sxlabel,sylabel,title)\n self['figures']['radial'] = figname\n else : pass", "def __init__(self,average,height):\n super().__init__(average)\n self.heightAboveGround = height\n self.__altitude = collections.deque('', average)\n self.__vertspeed = Parameter(10)", "def average( names):\n\n rs = radioastronomy.Spectrum() # create input and average structures\n asum = radioastronomy.Spectrum()\n nsum = 0\n\n # now average coldest data for calibration\n for filename in names:\n\n rs.read_spec_ast(filename)\n rs.azel2radec() # compute ra,dec from az,el\n\n if nsum == 0:\n asum = copy.deepcopy( rs)\n firstlon = rs.gallon\n asum.ydataA = rs.ydataA * rs.durationSec\n asum.gallat = rs.gallat * rs.durationSec\n asum.gallon = rs.gallon * rs.durationSec\n nsum = 1\n firstutc = rs.utc\n lastutc = rs.utc\n else:\n asum.ydataA = asum.ydataA + (rs.ydataA * rs.durationSec)\n asum.count = asum.count + rs.count\n asum.durationSec = asum.durationSec + rs.durationSec\n # fix wrap of longitudes\n if abs(rs.gallon - firstlon) > 180:\n crossZero = True\n if rs.gallon > firstlon:\n rs.gallon = rs.gallon - 360.\n else:\n rs.gallon = rs.gallon + 360.\n asum.gallon = asum.gallon + (rs.gallon * rs.durationSec)\n asum.gallat = asum.gallat + (rs.gallat * rs.durationSec)\n # keep track of observing time for weighted sum\n lastutc = rs.utc\n nsum = nsum + 1\n #end for all files loop\n\n if nsum < 1:\n print \"No acceptable files in average list\"\n else:\n asum.ydataA = asum.ydataA/float(asum.durationSec)\n asum.gallon = asum.gallon/float(asum.durationSec)\n asum.gallat = asum.gallat/float(asum.durationSec)\n aveutc,duration = radioastronomy.aveutcs( firstutc, lastutc)\n asum.utc = aveutc\n if (duration < 1.):\n print 'hotcold.average: very short average interval: ',duration\n return nsum, asum", "def __init__ (self, config, logger):\n self.logger = logger\n self.logger.add('loading AREA')\n config['data_type'] = np.float32\n self.area = AreaGrid(config,logger = self.logger)\n self.area.config['dataset_name'] = 'Area Data'\n self.area.config['description'] = \\\n \"\"\"Area Data contains fractional cohort data for each year the ATM\n was run. \n \"\"\"\n self.logger.add('performing post AREA setup')\n self.shape = self.area.config['grid_shape']\n self.aoi = self.area.area_of_interest()\n config['shape'] = self.shape\n config['grid_shape'] = self.area.config['grid_shape']\n config['AOI mask'] = self.aoi\n config['cohort list'] = self.area.get_cohort_list()\n self.logger.add('loading ALD')\n self.ald = ALDGrid(config,logger = self.logger)\n self.ald.config['dataset_name'] = 'ALD Data'\n self.ald.config['description'] = \\\n \"\"\"ALD Data contains ALD, and Protective Layer data for each year \n the ATM was run.\n \"\"\"\n self.logger.add('loading POI')\n self.poi = POIGrid(config,logger = self.logger)\n self.poi.config['dataset_name'] = 'POI Data'\n self.poi.config['description'] = \\\n \"\"\"POI Data contains Poi data for each year the ATM was run. \n \"\"\"\n self.logger.add('loading ICE')\n self.ice = IceGrid(config,logger = self.logger)\n self.ice.config['dataset_name'] = 'Ice Data'\n self.ice.config['description'] = \\\n \"\"\"\n Ice Data contains the ice content grid for the ATM model run\n \"\"\"\n self.logger.add('loading LAKE POND')\n self.lake_pond = LakePondGrid(config,logger = self.logger)\n self.lake_pond.config['dataset_name'] = 'Lake Pond Data'\n self.lake_pond.config['description'] = \\\n \"\"\"Lake-Pond Data contains Lake and Pond depth and count data for \n each year the ATM was run. \n \"\"\"\n self.logger.add('loading CLIMATE EVENT')\n self.climate_event = ClimateEventGrid(config,logger = self.logger)\n self.climate_event.config['dataset_name'] = 'Climate Event Data'\n self.climate_event.config['description'] = \\\n \"\"\"Climate Event Data contains climate event data for each \n year the ATM was run. \n \"\"\"\n ## TODO:redo masks here\n # for lpt in config['pond types'] + config['lake types']:\n # #~ print lpt\n # mask = self.area[lpt][0] > 0 # all cells in first ts > 0\n # self.lake_pond.apply_mask(lpt, mask)\n self.logger.add('loading DRAINGAGE')\n self.drainage = DrainageGrid(config,logger = self.logger)\n self.drainage.config['dataset_name'] = 'Drainage Data'\n self.drainage.config['description'] = \"\"\"\n Drainage contains the drainage grid for the ATM model run\n \"\"\"\n \n self.logger.add('loading DEGREE DAY')\n self.degreedays = DegreeDayGrids(\n os.path.join(\n config['Input_dir'], config['Met_Control']['FDD_file']),\n os.path.join(\n config['Input_dir'], config['Met_Control']['TDD_file'])\n )\n \n ## what does this do?\n self.ald.setup_ald_constants(\n self.degreedays.thawing[config['start year']]\n )", "def __init__(self, cfg=None):\n if cfg is None:\n cfg = (1, 1, \"persist\")\n self.cfg = cfg\n self.shift, self.offset, self.avg_type = cfg\n self.forecast = self.average_forecast\n self.averager = None\n if self.avg_type == \"mean\":\n self.averager = np.mean\n elif self.avg_type == \"median\":\n self.averager = np.median\n elif self.avg_type == \"persist\":\n self.forecast = self.persistence_forecast\n elif self.avg_type == \"drift\":\n self.forecast = self.drift_forecast", "def __init__(self, mean, var=2):\n\n self.mean = mean\n self.var = var", "def __init__(self, cfg):\r\n\r\n\t\tself.image_size = cfg.MODEL.INPUT.IMAGE_SIZE\r\n\t\tanchor_config = cfg.MODEL.ANCHORS\r\n\t\tself.feature_maps = anchor_config.FEATURE_MAPS\r\n\t\tself.min_sizes = anchor_config.MIN_SIZES\r\n\t\tself.max_sizes = anchor_config.MAX_SIZES \r\n\t\tself.aspect_ratios = anchor_config.ASPECT_RATIOS\r\n\t\tself.clip = anchor_config.CLIP", "def mean_radius(self, mean_radius):\n\n self._mean_radius = mean_radius", "def __init__(self, avg_rate, max_rate):\n self.average = avg_rate\n self.maximum = max_rate", "def __init__(self,):\r\n self.g = 9.81\r\n self.l = 0.5\r\n self.m1 = 1.0\r\n self.m2 = 1.0\r\n self.m3 = 1.0\r\n self.r1 = 1.0\r\n self.r2 = 1.0\r\n self.tau = 0.001\r\n self.theta1 = 1.0\r\n self.theta2 = 1.0\r\n self.theta3 = 1.0", "def __init__(self, optimization_options, network, *args, **kwargs):\n\n self._params = Parameters()\n for path, param in network.get_variables().items():\n self._params.add(path + '_gradient',\n numpy.zeros_like(param.get_value()))\n self._params.add(path + '_sum_sqr_gradient',\n numpy.zeros_like(param.get_value()))\n\n super().__init__(optimization_options, network, *args, **kwargs)", "def __init__(self,r1,r2):\n self.r1 = r1\n self.r2 = r2\n self.a = (r1+r2)/2", "def __init__(self, nrange=75, nbeam=15, r0=180, dr=45, dtheta=3.24, theta0=None):\n # Set member variables\n self.nrange = int(nrange)\n self.nbeam = int(nbeam+1)\n self.r0 = r0\n self.dr = dr\n self.dtheta = dtheta\n # Initial angle (from X, polar coordinates) for beam 0\n if theta0 == None:\n self.theta0 = (90 - dtheta * nbeam / 2) # By default, point fanplot towards 90 deg\n else:\n self.theta0 = theta0\n return", "def __init__(self, **kwargs):\n self.max_samples = get_value(kwargs, 'max_samples', types=[int], range_min=0, default_value=0)\n self.obs_filename = get_value(kwargs, 'lba_obs_file', types=[str, None], default_value=None)\n self.sample_rate = get_value(kwargs, 'sample_rate', types=[int, None], default_value=None)\n if self.sample_rate is None:\n LOG.warning(\"No sample rate provided, defaulting to {0}\".format(self._default_sample_rate))\n self.sample_rate = self._default_sample_rate\n self.antenna_name = get_value(kwargs, 'lba_antenna_name', types=[str, None], default_value=None)\n if self.obs_filename is not None and self.antenna_name is None:\n raise RuntimeError(\"LBA file is missing --lba_antenna_name parameter which is needed when using \"\n \"--lba_obs_file\")\n self.chunk_size = get_value(kwargs, 'chunk_size', types=[int], range_min=0, default_value=4096)", "def __init__(self):\n\n self._P = 0 # number of pixels\n self._x = 0.0 # x-coordinate of center of mass, i.e.\n # the avg x-coordinate\n self._y = 0.0 # y-coordinate of center of mass, i.e.\n # the avg y-coordinate" ]
[ "0.58823407", "0.5639091", "0.563347", "0.55161", "0.53948456", "0.53743595", "0.5315376", "0.53023726", "0.5255032", "0.5187186", "0.5184907", "0.5184907", "0.5175284", "0.51109797", "0.50662816", "0.504376", "0.5026226", "0.4991186", "0.49473262", "0.49444625", "0.49436808", "0.492361", "0.49202937", "0.49113482", "0.4907126", "0.49058497", "0.48933753", "0.48907882", "0.4863774", "0.48567486" ]
0.85535985
0
The beginjob() function does onetime initialisation from event or environment data. It is called at an XTC configure transition. evt Event data object, a configure object env Environment object
def beginjob(self, evt, env): super(mod_radial_average, self).beginjob(evt, env)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def begin(self):\n\n env = self.context.lookup(\"/environment\")\n\n self._test_results_dir = env[\"output_directory\"]\n self._starttime = env[\"starttime\"]\n self._runid = env[\"runid\"]\n\n self._result_filename = os.path.join(self._test_results_dir, \"testrun_results.jsos\")\n self._summary_filename = os.path.join(self._test_results_dir, \"testrun_summary.json\")\n self._import_errors_filename = os.path.join(self._test_results_dir, \"import_errors.jsos\")\n\n return", "def _do_begin(self):\n self.backend.begin()", "def beginJob(self, histFile, histDirName):\n Module.beginJob(self, histFile, histDirName)\n for itrig,jtrig in zip(self.triggerpaths,self.triggerprescale):\n self.addObject (ROOT.TH1F('h_ak4pt_' + itrig, 'h_ak4pt_' + itrig, 5, 0, 1000) )\n self.addObject (ROOT.TH1F('h_ak4pt_' + itrig + '_eff_' + jtrig, 'h_ak4pt_' + jtrig, 5, 0, 1000) )", "def _initJobs(self):\n pass", "def jobPreRun(job):\n\tif 'b' in job.proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying job begins')\n\t\tEMAIL.send('job', job, 'begin')", "def initialize(context): \n log.info(\"initialzing\")\n context.prime = False\n \n # Rebalance every day, 1 hour after market open.\n schedule_function(my_rebalance, date_rules.month_start(), time_rules.market_open(hours=1))\n \n # Record tracking variables at the end of each day.\n schedule_function(my_record_vars, date_rules.every_day(), time_rules.market_close())\n \n\n # Create our dynamic stock selector.\n attach_pipeline(make_pipeline(), 'my_pipeline')", "def initiate(self):\n self._load_parameters()\n self._initiate_region_dict()\n self._initiate_parameter_dict()\n self.initiated = True", "def start(self):\n self.log.setLevel(logging.INFO)\n super().start()\n \n self._dts = rift.tasklets.DTS(self.tasklet_info,\n UtCompositeYang.get_schema(),\n self._loop,\n self.on_dts_state_change) \n\n # Set the instance id\n self.instance_name = self.tasklet_info.instance_name\n self.instance_id = int(self.instance_name.rsplit('-', 1)[1])\n self.log.debug(\"Starting TestDriverTasklet Name: {}, Id: {}\".format(\n self.instance_name,\n self.instance_id))\n\n self.state = TaskletState.STARTING", "def tpc_begin(self, transaction):\n raise NotImplementedError", "def initialise(self):\n self.sc.init.exec_action(self.variables)", "def __initialize__(self, component_id, invocation_q, start_time=0.0):\n #timer = pytau.profileTimer(\"component.__initialize__\", \"\", str(os.getpid()))\n #pytau.start(timer)\n self.component_id = component_id\n self.invocation_q = invocation_q\n self.start_time = start_time\n# setattr(sys, 'exit', sys.exit)\n\n #pytau.stop(timer)\n return", "def init_workflow():\n pass", "def _call_initialization(self,\r\n input_fp,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n poll_directly,\r\n suppress_submit_jobs):\r\n pass", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "def beginExecution(self):\n self.setup = self.am_getOption(\"Setup\", self.setup)\n self.enabled = self.am_getOption(\"EnableFlag\", self.enabled)\n self.restartAgents = self.am_getOption(\"RestartAgents\", self.restartAgents)\n self.restartExecutors = self.am_getOption(\"RestartExecutors\", self.restartExecutors)\n self.restartServices = self.am_getOption(\"RestartServices\", self.restartServices)\n self.diracLocation = os.environ.get(\"DIRAC\", self.diracLocation)\n self.addressTo = self.am_getOption('MailTo', self.addressTo)\n self.addressFrom = self.am_getOption('MailFrom', self.addressFrom)\n self.controlComponents = self.am_getOption('ControlComponents', self.controlComponents)\n self.commitURLs = self.am_getOption('CommitURLs', self.commitURLs)\n\n self.csAPI = CSAPI()\n\n res = self.getRunningInstances(instanceType='Agents')\n if not res[\"OK\"]:\n return S_ERROR(\"Failure to get running agents\")\n self.agents = res[\"Value\"]\n\n res = self.getRunningInstances(instanceType='Executors')\n if not res[\"OK\"]:\n return S_ERROR(\"Failure to get running executors\")\n self.executors = res[\"Value\"]\n\n res = self.getRunningInstances(instanceType='Services')\n if not res[\"OK\"]:\n return S_ERROR(\"Failure to get running services\")\n self.services = res[\"Value\"]\n\n self.accounting.clear()\n return S_OK()", "def init(self, job_start):\n self.server_addr = self.server.start(self)\n self.job_start = job_start\n self._start_worker()", "def start(self):\n\n self.loadConf()\n self.loadDrivers()\n self.loadFeeds()\n self.runScheduler()\n self.scheduler.print_jobs()\n self.scheduler.start()\n self.printConf(\"test\")\n print(\"scheduler started\")", "def start(self):\n self.log.setLevel(logging.INFO)\n super().start()\n\n self._dts = rift.tasklets.DTS(\n self.tasklet_info,\n composite.get_schema(),\n self._loop,\n self.on_dts_state_change)\n\n # Set the instance id\n self.instance_name = self.tasklet_info.instance_name\n self.instance_id = int(self.instance_name.rsplit('-', 1)[1])\n self.interfaces = {}\n self.log.debug(\"Starting TestTasklet Name: {}, Id: {}\".format(\n self.instance_name,\n self.instance_id))\n\n self.state = TaskletState.STARTING", "def initialConfig(self):\r\r\n\r\r\n loggerCmw = logging.getLogger('initialConfig')\r\r\n\r\r\n self.set_scenario()\r\r\n\r\r\n self.set_default_rf_settings()\r\r\n\r\r\n self.physical_downlink_settings()\r\r\n\r\r\n self.physical_uplink_settings()\r\r\n\r\r\n self.connection_config()\r\r\n\r\r\n self.network_settings()\r\r\n\r\r\n self.set_conn_type(conn= self.connTypeEnum.CS)\r\r\n\r\r\n self.waitForCompletion()", "def test_init(self):\n self.assertEqual(self.job.InputArgs, {'ParamFile': 'test.txt', \n 'SubmitToTestDB': 'True', \n 'UserId': '1', 'Mapping': '5', \n 'ProcessOnly': 'True', \n 'StudyID': '2', 'SFF': '10', \n 'SeqPlatform': 'FLX'})\n self.assertEqual(self.job.OracleJobName, 'jobname')", "def do_begin(begin):\n if begin:\n do_action(begin)", "def __init__(self, runId, pipelineName, topic, brokerHost,brokerPort=None):\n JobOfficeClient.__init__(self, runId, pipelineName, \n brokerHost, brokerPort=brokerPort)\n \n self.jobSender = utils.EventSender(self.runId, topic, brokerHost,\n self.getOriginatorId(), brokerPort)", "def startup(self,context):\n master_socket = int(12345)\n self.task_queue = context.InputQueue\n self.result_queue = context.OutputQueue\n manager = Manager()\n self.dict_position = manager.dict()\n self.dict_cycle = manager.dict()\n self.dict_worker_info = manager.dict()\n\n TaskManager.register('get_job_queue',\n callable = lambda:self.task_queue)\n TaskManager.register('get_result_queue',\n callable = lambda:self.result_queue)\n TaskManager.register('get_data',\n callable = lambda:self.dict_position)\n TaskManager.register('get_cycle',\n callable = lambda:self.dict_cycle)\n TaskManager.register('set_worker_info',\n callable = lambda:self.dict_worker_info)\n self.m = TaskManager(address = ('', master_socket),\n authkey = b'secret')\n\n\n thread = Thread(target=self.runServer)\n thread.start()", "def test_init(self):\n self.assertEqual(self.foo._base_cmd, 'sleep 10; hostname')\n self.assertEqual(self.foo._base_args, {})\n self.assertEqual(self.foo.InputArgs, {})\n self.assertEqual(self.foo.OracleJobName, 'job1')", "def on_start(self):\n self.id = str(self.locust).split('object at')[1].strip().replace('>','')\n\n # get data from common config\n self.read_locust_config()\n\n # get data from tempest config\n self.keystone_user = self.get_tempest_config_value('identity','username')\n self.keystone_tenant = self.get_tempest_config_value('identity','tenant_name')\n self.keystone_pw = self.get_tempest_config_value('identity','password')\n self.keystone_uri = self.get_tempest_config_value('identity','uri')\n\n self.output(\"Prepare to be rannsaka'd...\")", "def do_instance_start(self, component_handle, instance_handle):\n logger.debug(\"RwdtstaskletPython: do_instance_start function called\")\n\n # Create an instance of DTS API - This object is needed by all DTS\n # member and query APIs directly or indirectly.\n # DTS invokes the callback to notify the tasklet that the DTS API instance is ready \n # for use.\n\n foo = Callback()\n #sub = SubscribeInsideXactExample(self)\n self.dts_api = RwDts.Api.new(self.taskletinfo, # tasklet object\n RwDtsToyTaskletYang.get_schema(), # Schema object\n foo.rwdts_tasklet_state_change_cb, # The callback for DTS state change\n #sub.rwdts_tasklet_state_change_cb,\n self) # user data in the callback - in this case self", "def autonomousInit(self):\n '''\n self.cumulativeTime=0\n self.totalTime=0\n self.dataSet=[[-0.5,0,1,-1.0],[0.3,0.4,1,1.0],[-0.5,0,1,-1.0]]\n for i in self.dataSet:\n self.totalTime+=i[2]\n self.intervals = 0\n self.currentTime = 0\n for i in range(0,len(self.dataSet)):\n self.dataSet[i].append([self.currentTime,self.currentTime+self.dataSet[i][2]])\n self.currentTime+=self.dataSet[i][2]\n for i in self.dataSet:\n if i[3]==1.0:\n i.append(\"Forward\")\n if i[3]==-1.0:\n i.append(\"Backward\")\n \n self.timer.reset()\n self.timer.start()\n '''\n self.timer.reset()\n self.timer.start()\n\n #self.auto = self.chooser.getSelected()\n self.auto = 6\n self.autoState = 0\n #self.auto = 1\n\n self.EC1.reset()\n \n\n #self.auto = self.chooser.getSelected()", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()", "def startup(self, event):\n # The engine is starting up. The main task is to do a catch up on any\n # data still on the station, but not yet put in the database. Not\n # all consoles can do this, so be prepared to catch the exception:\n try:\n self._catchup(self.engine.console.genStartupRecords)\n except NotImplementedError:\n pass" ]
[ "0.57861096", "0.56085485", "0.5528966", "0.54996747", "0.5454728", "0.54298323", "0.54231644", "0.54102683", "0.53629297", "0.53195006", "0.53149587", "0.5282009", "0.5270217", "0.5252762", "0.5218137", "0.52084434", "0.51983976", "0.5182246", "0.5144393", "0.5140691", "0.51076174", "0.5082076", "0.50806403", "0.50766003", "0.5072547", "0.5066937", "0.5060767", "0.5047144", "0.5018676", "0.5010813" ]
0.5729683
1
The endjob() function logs the number of processed shots. evt Event object (psana only) env Environment object
def endjob(self, obj1, obj2=None): if obj2 is None: env = obj1 else: evt = obj1 env = obj2 super(mod_radial_average, self).endjob(env) if (env.subprocess() >= 0): self.logger.info("Subprocess %02d: processed %d shots" % (env.subprocess(), self.nshots)) else: self.logger.info("Processed %d shots" % self.nshots)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def endjob(self, obj1, obj2=None):\n\n if obj2 is None:\n env = obj1\n else:\n evt = obj1\n env = obj2\n\n #self.logger.info(\"Saw %d shots, two_color %d, nodata %d \" % (self.nshots, self.naccepted, self.nnodata))", "def on_eval_batch_end(self, step, logs=None):", "def end():\n logging.info(\"Execution Ended\")", "def logFinished(build, step, log):", "def job_completed(self,event):\n if event.exception:\n logger.worker.warning('The job crashed :(')\n else:\n logger.worker.warning(self.task_id+'The job finished ')\n # set job complete to true, will display complete in web interface \n self.job_complete_status[self.task_id] = True", "def on_batch_end(self, batch, logs=None):", "def on_test_batch_end(self, batch, logs=None):", "def on_phase_end(\n self, task: \"tasks.ClassyTask\", local_variables: Dict[str, Any]\n ) -> None:\n batches = len(task.losses)\n if batches:\n self._log_performance_metrics(task, local_variables)", "def onEnd(self, agent):\n\n pass", "def quit(self, evt=None):\n self.log2Stdout()\n self.jobManager.stopLogging() \n self.mainwin.destroy()\n print 'closing plugin'\n return", "def notify_end(self, status, objective):\n pass # pragma: no cover", "def generateFinishOutput(self, job):\n input_dur = job.demand.data_in / self.system.io.to_cpu\n output_dur = job.demand.data_out / self.system.cpu.to_io\n run_dur = job.runtime + job.demand.data_run / self.system.cpu.to_io\n job.ts.finish_in = job.ts.start_in + input_dur\n job.ts.start_run = job.ts.finish_in\n job.ts.finish_run = job.ts.start_run + run_dur\n job.ts.start_out = job.ts.finish_run\n job.ts.finish_out = job.ts.start_out + output_dur\n evt = BBEvent(job, job.ts.finish_out, BBEventType.FinishOut)\n return evt", "def beginjob(self, evt, env):\n\n super(mod_radial_average, self).beginjob(evt, env)", "def stepFinished(build, step, results):", "def _end(self):\n\n self.logger.msg1(\"Done\")", "def on_train_batch_end(self, step, logs=None):", "def stat_end_file(self, status, nbytes=0, task_id=None):\n\n self.filevals['end_time'] = time.time()\n self.filevals['status'] = status\n\n if nbytes != 0:\n self.filevals['numbytes'] = nbytes\n self.batchvals['totbytes'] += nbytes\n\n if self.transfer_stats_per_file:\n self.print_file_stats()", "def quitme(self, evt=None):\n if evt:\n self.dbgprint(\"too much for testing: so-long\")\n sys.exit()", "def on_batch_end(self, batch, logs={}):\n if self.mode == 0:\n self.current_progress += logs.get(\"size\")\n self.total_progress += logs.get(\"size\")\n else:\n self.current_progress += 1\n self.total_progress += 1\n self.loss = logs.get(\"loss\")\n loss_item = {\"samples\": self.total_progress, \"loss\": self.loss}\n self.accuracy = logs.get(\"acc\")\n accuracy_item = {\"samples\": self.total_progress, \"accuracy\": self.accuracy}\n self.total_runtime = time() - self.start_time\n self.update_data(loss_item, accuracy_item)", "def on_test_end(self, logs=None):", "def jobPostRun(job):\n\tif 'e' in job.proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying job ends')\n\t\tEMAIL.send('job', job, 'end')", "def generateFinishOutput(self, job):\n input_dur = job.demand.data_in / self.system.io.to_bb\n output_dur = job.demand.data_out / self.system.bb.to_io\n run_dur = job.runtime + job.demand.data_run / self.system.cpu.to_bb\n run_dur += job.demand.data_in / self.system.bb.to_cpu\n run_dur += job.demand.data_out / self.system.cpu.to_bb\n job.ts.finish_in = job.ts.start_in + input_dur\n job.ts.start_run = job.ts.finish_in\n job.ts.finish_run = job.ts.start_run + run_dur\n job.ts.start_out = job.ts.finish_run\n job.ts.finish_out = job.ts.start_out + output_dur\n evt = BBEvent(job, job.ts.finish_out, BBEventType.FinishOut)\n return evt", "def end_episode(self, interaction, agent, env, history):\n\n history.add('episode_length', interaction.episode_timestep)\n history.add('reward', interaction.episode_rewards)\n\n if interaction.verbose:\n print_episode_statistics(history)", "def end(self) -> None:\n self.process_event(\n PipelineEvent(\n PipelineEventType.RUN_END,\n )\n )", "def finished(self):\n\t\telog(\"finished\")", "def send_finish_event(self):\n self.status['type'] = '__end__'\n self._send()", "def on_test_batch_end(\n self, batch: int, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None\n ):\n pass", "def RunEnd(ss):\n ss.LogRun(ss.RunLog)", "def processEnded(self, status):\r\n self.pid = None\r\n statusMap = {\r\n 0: ProcessDone,\r\n 1: ProcessTerminated,\r\n }\r\n self.proto.processEnded(Failure(statusMap[status](status)))", "def on_terminate(self, agentName, process):\n self.log.info(\"%s's process with ID: %s has been terminated successfully\" % (agentName, process.pid))" ]
[ "0.6863275", "0.55696213", "0.5516178", "0.54396594", "0.5429018", "0.5396587", "0.5374466", "0.529632", "0.5290271", "0.52774626", "0.52727425", "0.52209705", "0.5210125", "0.51715285", "0.5167377", "0.5102996", "0.50926626", "0.5086865", "0.5086229", "0.5083143", "0.50818604", "0.5069985", "0.5048407", "0.5046363", "0.50235194", "0.502157", "0.5002113", "0.49724966", "0.4927292", "0.49090496" ]
0.70764345
0
Test inputs are deduplicated for the encoding function.
def test_cache_duplicate_inputs(self): random_encoding = mock.Mock(side_effect=_random_encoding) cached_random_encoding = encoder_client.cache_encodings( random_encoding, cache_size=100) encodings = cached_random_encoding(["hello"] * 10) self.assertEqual([10, 7], list(encodings.shape)) for i in range(1, 10): np.testing.assert_allclose(encodings[0], encodings[i]) random_encoding.assert_called_once_with(["hello"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testDuplicate(self,permutations=True):\n # This algorithm is faster than encode,\n # but for nplex=2 enmagic2 would probably still be faster.\n if permutations:\n C = self.copy()\n C.sort(axis=1)\n else:\n C = self\n ind = sortByColumns(C)\n C = C.take(ind,axis=0)\n ok = (C != roll(C,1,axis=0)).any(axis=1)\n if not ok[0]: # all duplicates -> should result in one unique element\n ok[0] = True\n return ind,ok", "def test_check_bc_duplicates_default_dups(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = ['Duplicate barcode ACGT found.\\t1,1',\r\n 'Duplicate barcode ACGT found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def test_check_bc_duplicates_added_demultiplex_finds_dups(self):\r\n\r\n # Should find duplicates\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'CGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '1', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = [\r\n 'Duplicate barcode and added demultiplex field CGTA1 found.\\t1,1',\r\n 'Duplicate barcode and added demultiplex field CGTA1 found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find duplicates with var length turned on\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'CGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTAA', 'AAAA', '1', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=True,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = [\r\n 'Duplicate barcode and added demultiplex field CGTAA1 found.\\t1,1',\r\n 'Duplicate barcode and added demultiplex field CGTAA1 found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find duplicates when just using added fields\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'CGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '1', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=False,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = [\r\n 'Duplicate added demultiplex field 1 found.\\t1,3',\r\n 'Duplicate added demultiplex field 1 found.\\t2,3']\r\n\r\n self.assertEqual(errors, expected_errors)", "def test_check_bc_duplicates_var_len_no_dupes(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'TAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=True,\r\n added_demultiplex_field=None)\r\n\r\n # combination of primer seq and barcodes to match largest barcode\r\n # present is ACGTA and ACGTT, so should not get a duplicate hit.\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)", "def test_check_bc_duplicates_var_len_dupes(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=True,\r\n added_demultiplex_field=None)\r\n\r\n # Barcode 1 is the largest, with 5 nts, is sequence ACGTA. When the\r\n # last base at 5' end of primer is added to barcode 2, there is a\r\n # duplicate, as this is also ACGTA.\r\n expected_errors = [\r\n 'Duplicate barcode and primer fragment sequence ACGTA found.\\t1,1',\r\n 'Duplicate barcode and primer fragment sequence ACGTA found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def test_check_added_demultiplex_dups(self):\r\n\r\n # Should not find any duplicates\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_added_demultiplex_dups(header, mapping_data, errors,\r\n has_barcodes=True, added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should not find any duplicates with var length turned on.\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_added_demultiplex_dups(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should not find errors when only looking at added field\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_added_demultiplex_dups(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=False,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)", "def test_check_bc_duplicates_disable_bcs_dups(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=False,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field=None)\r\n\r\n # Should return an error if more than one SampleID present and\r\n # barcodes disabled and no added_demultiplex_field\r\n expected_errors = [\r\n \"If no barcodes are present, and the added_demultiplex_field option isn't used, only a single SampleID can be present.\\t-1,-1\"]\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # If a single sample present, should not raise any errors\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=False,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)", "def test_check_bc_duplicates_default_correct(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field=None)\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)", "def test():\n # Check all numbers are unique\n from collections import defaultdict\n a = defaultdict()\n for i in range(SIZE+1):\n if encode(i) in a:\n return False\n a[encode(i)] = i", "def check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field=None):\r\n\r\n if (has_barcodes and not variable_len_barcodes\r\n and not added_demultiplex_field):\r\n errors = check_fixed_len_bcs_dups(header, mapping_data, errors)\r\n if (has_barcodes and variable_len_barcodes\r\n and not added_demultiplex_field):\r\n errors = check_variable_len_bcs_dups(header, mapping_data, errors)\r\n if added_demultiplex_field:\r\n errors = check_added_demultiplex_dups(header, mapping_data, errors,\r\n has_barcodes, added_demultiplex_field)\r\n # Special case of has_barcodes = False and no added_demultiplex_field,\r\n # need to check that only a single SampleID is passed in this case so\r\n # we have \"unique\" demultiplexing.\r\n if (not has_barcodes and not added_demultiplex_field):\r\n # only one line of mapping data for one sample\r\n if len(mapping_data) != 1:\r\n errors.append(\"If no barcodes are present, and the \" +\r\n \"added_demultiplex_field option isn't used, only a single \" +\r\n \"SampleID can be present.\\t-1,-1\")\r\n return errors", "def test_identical(self):\n write this test!", "def test_check_bc_duplicates_added_demultiplex(self):\r\n\r\n # Should not find any duplicates\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should not find any duplicates with var length turned on.\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=True,\r\n variable_len_barcodes=True,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should not find errors when only looking at added field\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_bc_duplicates(header,\r\n mapping_data,\r\n errors,\r\n has_barcodes=False,\r\n variable_len_barcodes=False,\r\n added_demultiplex_field='run_prefix')\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)", "def test_check_variable_len_bcs_dups(self):\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'TAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_variable_len_bcs_dups(header,\r\n mapping_data,\r\n errors)\r\n\r\n # combination of primer seq and barcodes to match largest barcode\r\n # present is ACGTA and ACGTT, so should not get a duplicate hit.\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'ACGT', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_variable_len_bcs_dups(header,\r\n mapping_data,\r\n errors)\r\n\r\n # Barcode 1 is the largest, with 5 nts, is sequence ACGTA. When the\r\n # last base at 5' end of primer is added to barcode 2, there is a\r\n # duplicate, as this is also ACGTA.\r\n expected_errors = [\r\n 'Duplicate barcode and primer fragment sequence ACGTA found.\\t1,1',\r\n 'Duplicate barcode and primer fragment sequence ACGTA found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def removeDuplicates(seq):\n\n pass", "def _test_sampdup(t):\n return t.shape[1] != len(set(t.ids(axis='sample')))", "def _encode_check_unknown(values, uniques, return_mask=False):\n uniques_set = set(uniques)\n diff = list(set(values) - uniques_set)\n if return_mask:\n if diff:\n valid_mask = [val in uniques_set for val in values]\n else:\n valid_mask = [True] * len(values)\n return diff, valid_mask\n else:\n return diff", "def isdistinct(seq):\n return len(seq) == len(set(seq))", "def check_duplicate(self, state):\n pass", "def test_check_fixed_len_bcs_dups(self):\r\n\r\n # Should not find any duplicates\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'ACGT', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_fixed_len_bcs_dups(header,\r\n mapping_data,\r\n errors)\r\n\r\n expected_errors = []\r\n\r\n self.assertEqual(errors, expected_errors)\r\n\r\n # Should find duplicates\r\n\r\n header =\\\r\n ['SampleID', 'BarcodeSequence', 'LinkerPrimerSequence', 'run_prefix',\r\n 'Description']\r\n mapping_data = [['s-1', 'CGTA', 'AAAA', '1', 's1&data'],\r\n ['s2', 'CGTA', 'AAAA', '2', 's2_data']]\r\n errors = []\r\n\r\n errors = check_fixed_len_bcs_dups(header,\r\n mapping_data,\r\n errors)\r\n\r\n expected_errors = [\r\n 'Duplicate barcode CGTA found.\\t1,1',\r\n 'Duplicate barcode CGTA found.\\t2,1']\r\n\r\n self.assertEqual(errors, expected_errors)", "def is_unique3(a_string):\n\n if len(a_string) is 0:\n print \"String is empty.\"\n return False\n\n charset = [False] * 256\n\n for char in a_string:\n print char\n if charset[ord(char)]:\n return False\n charset[ord(char)] = True\n return True", "def test_duplicate_entries(self):", "def testDegenerate(self):\n srt = asarray(self.copy())\n srt.sort(axis=1)\n return (srt[:,:-1] == srt[:,1:]).any(axis=1)", "def test__remove_duplicates(self):\n\n result = deduped_list\n expected = [\n 'Fred',\n 'Dave',\n 'Sarah',\n 'John',\n 'Matthew',\n 'Joanna',\n 'Marjorie',\n 'Anna',\n 'Tony',\n 'Sam',\n 'Eric',\n 'Susan',\n 'Arthur',\n ]\n\n self.assertListEqual(sorted(result), sorted(expected))", "def is_unique(w):\n chars = {}\n for c in w:\n if c in chars:\n return False\n chars[c] = True\n return True", "def check_unique(self):\n pass", "def clean_duplicate(self):\r\n self.elements = list(set(self.elements))\r\n self.elements = [e for e in self.elements if e != '']", "def verifyDistinct( options, data ):\n tot = 0\n for c in data.chrNames:\n s = set()\n d = mafDataOrNone( data.mafBlocksByChrom, c )\n if d is None:\n continue\n for mb in d:\n for i in xrange( mb.refStart, mb.refEnd + 1):\n if i in s:\n sys.stderr.write('duplicate base found! %s %d [%d-%d], %s [%d-%d]\\n'\n % (mb.refChr, i, mb.refStart, mb.refEnd, \n mb.pairChr, mb.pairStart, mb.pairEnd ))\n sys.exit( 1 )\n else:\n s.add( i )\n tot += len( s )\n sys.stderr.write( 'Verify all bases sent to be binned are distinct: Found %s distinct bases in the alignment to the reference genome, no duplicates, OK.\\n' % tot)", "def handle_same_length(self, a, b):\n found = False\n for i, j in zip(a, b):\n if i == j:\n continue\n elif found:\n return False # this case is the second found edit, thus return false\n else:\n found = True\n return True", "def _remove_duplicates(seq):\n d = {}\n for item in seq:\n item = item.lower()\n if item not in d:\n d[item] = True\n yield item", "def deduped(items):\n\n # # create an empty dictionary\n # # create an emtpy list that we will return \n # # Loop through the items in the list, if the item is not in the dict, add item to the list, and to the dict\n # # If the item is in the dict, increase the count by 1\n # # If the item is in the dict already, dont add the item to the list\n # # return list\n\n\n # duplicate_counts = {}\n\n # deduped = []\n\n # for item in items:\n # duplicate_counts[item] = duplicate_counts.get(item, 0) + 1\n\n\n # if duplicate_counts[item] == 1:\n # deduped.append(item)\n\n # return deduped\n\n ##################### HB SOLUTION ####################################\n\n # # sets are great for de-duplicating lists:\n # # sets dont maintain oder though, so if we want our answer to be in order\n # # we have to do the de-duplicating by hand\n # # however... this runtime would be O(n^2) becaause we have a for loop\n # # and nested inside that, we have an in which is a hidden for-loop\n # # for every charecter that we are looping over, we have to loop in deduped\n # # to check if that charecter is in there\n # # we dont want this \n\n # deduped = []\n\n # for char in items:\n # if char not in deduped:\n # deduped.append(char)\n \n # return deduped\n\n # instead we can use use a set to keep track of what we have seen and use a list\n # to hold the final results\n\n # keep track of what we have seen\n seen = set()\n\n # deduped will be what we return \n deduped = []\n\n for item in items:\n if item not in seen:\n deduped.append(item)\n seen.add(item)\n\n return deduped" ]
[ "0.6525442", "0.62831426", "0.62768084", "0.6242956", "0.61193717", "0.6068965", "0.6045352", "0.6022538", "0.5994702", "0.5966574", "0.59489924", "0.59229994", "0.58942944", "0.5864432", "0.57975835", "0.5708045", "0.56232053", "0.55745244", "0.55653274", "0.55564296", "0.55429673", "0.55237436", "0.54848284", "0.54750425", "0.54724336", "0.5468432", "0.5458243", "0.5455661", "0.54382694", "0.54189295" ]
0.6329421
1
Test the least recently used input is forgotten.
def test_least_recently_used_forgotten(self): cached_random_encoding = encoder_client.cache_encodings( _random_encoding, cache_size=10) encoding = cached_random_encoding(["to be forgotten"]) cached_random_encoding(list(range(10))) encoding_1 = cached_random_encoding(["to be forgotten"]) # Check the two encodings are different, as the old one should have # been forgotten. np.testing.assert_raises( AssertionError, np.testing.assert_allclose, encoding, encoding_1) self.assertEqual(0, cached_random_encoding.cache_hits())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_too_frequent_password_resets(self):\r\n student = self._user_factory_with_history()\r\n grandfathered_student = self._user_factory_with_history(set_initial_history=False)\r\n\r\n self.assertTrue(PasswordHistory.is_password_reset_too_soon(student))\r\n self.assertFalse(PasswordHistory.is_password_reset_too_soon(grandfathered_student))\r\n\r\n staff_reset_time = timezone.now() + timedelta(days=100)\r\n with freeze_time(staff_reset_time):\r\n self.assertFalse(PasswordHistory.is_password_reset_too_soon(student))", "def is_fresh(self):\n return not self.used", "def has_buffered_inputs(self):", "def test_disabled_too_frequent_password_resets(self):\r\n student = self._user_factory_with_history()\r\n\r\n self.assertFalse(PasswordHistory.is_password_reset_too_soon(student))", "def isFresh(self, timestamp):\n pass;", "def test_gethint_unparsable(self):\r\n mock_module = CHModuleFactory.create()\r\n old_answers = copy.deepcopy(mock_module.previous_answers)\r\n json_in = 'blah'\r\n out = mock_module.get_hint(json_in)\r\n self.assertTrue(out is None)\r\n self.assertTrue(mock_module.previous_answers == old_answers)", "def was_used(self):\r\n return self.circ_chosen != 0", "def _check_for_incomplete_input(self):\n pass", "def ask_again(self, wrong_info):\n return input(f\"Wrong {wrong_info}, please try again: \")", "def check_allow_reset(self):\r\n if not self.ready_to_reset:\r\n if self.current_task_number > 0:\r\n last_response_data = self.get_last_response(self.current_task_number - 1)\r\n current_response_data = self.get_current_attributes(self.current_task_number)\r\n\r\n if (current_response_data['min_score_to_attempt'] > last_response_data['score']\r\n or current_response_data['max_score_to_attempt'] < last_response_data['score']):\r\n self.state = self.DONE\r\n self.ready_to_reset = True\r\n\r\n return self.ready_to_reset", "def reset():\n return True", "def test_never_same():\n g = RG.larger_random()\n hundred_calls = set([next(g) for _ in range(20)])\n assert len(hundred_calls) == 20", "def confused(self, rand):\n return rand > 0", "def test_no_forced_password_change(self):\r\n student = self._user_factory_with_history()\r\n staff = self._user_factory_with_history(is_staff=True)\r\n\r\n # also create a user who doesn't have any history\r\n grandfathered_student = UserFactory()\r\n grandfathered_student.date_joined = timezone.now()\r\n\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(student))\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(staff))\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(grandfathered_student))\r\n\r\n staff_reset_time = timezone.now() + timedelta(days=100)\r\n with freeze_time(staff_reset_time):\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(student))\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(grandfathered_student))\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(staff))", "def should_reset(self, current_time_step: ts.TimeStep) -> bool:\n handle_auto_reset = getattr(self, '_handle_auto_reset', False)\n return handle_auto_reset and np.all(current_time_step.is_last())", "def test_allow_all_password_reuse(self):\r\n student_email, _ = self._setup_user()\r\n user = User.objects.get(email=student_email)\r\n\r\n err_msg = 'You are re-using a password that you have used recently.'\r\n\r\n token = default_token_generator.make_token(user)\r\n uidb36 = int_to_base36(user.id)\r\n\r\n # try to do a password reset with the same password as before\r\n resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {\r\n 'new_password1': 'foo',\r\n 'new_password2': 'foo'\r\n }, follow=True)\r\n\r\n self.assertNotIn(\r\n err_msg,\r\n resp.content\r\n )", "def IsRerun(self):\n return self.prev_test_context is not None", "def is_input_order_important(self):", "def test_prompt_msg_confirm_invalid_repeats(self):\n global counter\n counter = 0\n\n def return_helper(*args, **kwargs):\n \"\"\"\n Returns a different value the second time called.\n \"\"\"\n global counter\n\n counter = counter + 1\n if counter > 1:\n return \"Y\"\n\n return \"foobar\"\n\n with mock.patch('__builtin__.raw_input', side_effect=return_helper) as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"Continue\",\n \"ask\": \"result\",\n \"confirm\": False\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"Continue [yN]? \", args[0])\n self.assertEqual(mockinput.call_count, 2)\n self.assertEquals(result['ansible_facts']['result'], True)", "def test_prompt_msg_ask_repeats(self):\n global counter\n counter = 0\n\n def return_helper(*args, **kwargs):\n \"\"\"\n Returns a different value the second time called.\n \"\"\"\n global counter\n\n counter = counter + 1\n if counter > 1:\n return \"foobar\"\n\n return \"\"\n\n with mock.patch('__builtin__.raw_input', side_effect=return_helper) as mockinput:\n result = self.prompt._prompt({}, {\n 'ask': 'varname'\n })\n\n self.assertEqual(mockinput.call_count, 2)\n self.assertEquals(result['ansible_facts']['varname'], 'foobar')", "def test_9999_last(self):\n self.lasttest = True", "def test_play_again_no(self):\n # Prepare test\n choice = random.choice(i18n.IN_NO_LIST)\n self.console.out_farewell = MagicMock()\n self.console.input = MagicMock(side_effect=[choice, StopIteration])\n\n # Run test\n self.console.ask_play_again()\n\n # Evaluate test\n calls = [call(FORMAT_NEWLINE_END(i18n.IN_MSG_REPLAY))]\n self.console.input.assert_has_calls(calls)\n self.console.out_farewell.assert_called_once()", "def test_011(self):\n user_input = [\"0\",\"1\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def check_guess_if_previous(self): # is a helper function to add_previous_guess()\n if self.guess in self.past_guesses:\n return False\n else:\n return True", "def test_000(self):\n user_input = [\"0\",\"0\",\"0\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def check_duplicate(self, state):\n pass", "def NoPrompt(self) -> bool:", "def only_once(self) -> bool:\n return self.times == 1", "def test_101(self):\n user_input = [\"1\",\"0\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")", "def test_111(self):\n user_input = [\"1\",\"1\",\"1\"]\n with patch(\"builtins.input\", side_effect=user_input) as input_call:\n with patch(\"sys.stdout\", new=StringIO()) as output:\n import attempt\n self.assertEqual(output.getvalue().strip(),\"You cannot take this course, sorry!\")" ]
[ "0.617269", "0.5939254", "0.5901139", "0.5857087", "0.5765842", "0.5716414", "0.5697374", "0.568397", "0.56715894", "0.5663747", "0.5653324", "0.5643554", "0.562245", "0.5551021", "0.5533676", "0.5533053", "0.55282557", "0.5516118", "0.5479338", "0.54770094", "0.54523236", "0.5443735", "0.5438346", "0.54348785", "0.54317003", "0.54306865", "0.54033095", "0.5401888", "0.5397091", "0.5389497" ]
0.65517306
0
Test that inputs with nested lists are cached correctly.
def test_nested_lists(self): cached_random_encoding = encoder_client.cache_encodings( _random_encoding, cache_size=100) example_1 = ["hello", ["context 1", "context 2"]] example_2 = ["hi", ["context 1", "context 2", "context 3"]] encodings_1 = cached_random_encoding([example_1, example_2]) encodings_2 = cached_random_encoding([example_2, example_1]) np.testing.assert_allclose(encodings_1[0], encodings_2[1]) np.testing.assert_allclose(encodings_1[1], encodings_2[0]) self.assertEqual(2, cached_random_encoding.cache_hits())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_shallow_deep_object(self):\n # cache params\n cache_key = 'test_shallow_deep_object'\n cache_len = 60\n num_items = 3\n num_sub_items = 20000\n\n # prepare cache data and save\n cache_data = {}\n for n in range(num_items):\n cache_data[n] = self.get_cache_data(num_sub_items)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data[2].items().sort(),\n retrieved_data[2].items().sort())\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())", "def testRemovingDuplicates(self):\n\n item1 = {KEY: 'one', 'name': 'foo'}\n item2 = {KEY: 'two', 'name': 'bar'}\n item3 = {KEY: 'three', 'name': 'baz'}\n dup_item1 = {KEY: 'one', 'name': 'foo'}\n dup_item2 = {KEY: 'two', 'name': 'qux'}\n\n list_with_duplicates = [item1, item2, item3, dup_item1, dup_item2]\n # duplicate items should not be present in the cached list\n expected_list = [item1, item2, item3]\n\n cached_list_logic.setCacheItems('test_list', list_with_duplicates)\n cached_list = cached_list_model.CachedList.get_by_id('test_list')\n self.assertListEqual(cached_list.list_data, expected_list)", "def test_complex_multi_cache(self):\n # cache params\n cache_key = 'test_complex_multi_cache'\n cache_len = 60\n num_items = 5000\n num_sub_items = 20\n\n # prepare cache data and save\n cache_data = {}\n for n in range(num_items):\n cache_data[n] = self.get_cache_data(num_sub_items)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n logging.info([cache_data[1000], retrieved_data[1000]])\n self.assertEqual(cache_data[1000].items().sort(),\n retrieved_data[1000].items().sort())\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())", "def testUpdatingAfterCaching(self):\n valid_period = datetime.timedelta(2, 4, 6)\n cached_list_logic.setCacheItems(\n 'test_list', [{KEY: 'foo'}, {KEY: 'bar'}], valid_period)\n cached_list = cached_list_model.CachedList.get_by_id('test_list')\n\n self.assertAlmostEqual(cached_list.valid_through,\n datetime.datetime.now() + valid_period,\n delta=datetime.timedelta(seconds=5))\n\n self.assertFalse(cached_list.is_processing)", "def test_file_list_cache():\n from nose.tools import raises\n\n tmp = FileListCache()\n\n @raises(TypeError)\n def test_tmp():\n \"\"\" nost test \"\"\"\n tmp.cache_file_list_dict = 0\n\n test_tmp()", "def test_generate_cache_key_from_query_string_repeated_paramaters(app, cache):\n\n @app.route('/works')\n @cache.cached(query_string=True)\n def view_works():\n flatted_values = sum(request.args.listvalues(), [])\n return str(sorted(flatted_values)) + str(time.time())\n\n tc = app.test_client()\n\n # Make our first query...\n first_response = tc.get(\n '/works?mock=true&offset=20&limit=15&user[]=123&user[]=124'\n )\n first_time = first_response.get_data(as_text=True)\n\n # Make the second query...\n second_response = tc.get(\n '/works?mock=true&offset=20&limit=15&user[]=124&user[]=123'\n )\n second_time = second_response.get_data(as_text=True)\n\n # Now make sure the time for the first and second\n # query are the same!\n assert second_time == first_time\n\n # Last/third query with different parameters/values should\n # produce a different time.\n third_response = tc.get(\n '/works?mock=true&offset=20&limit=15&user[]=125&user[]=124'\n )\n third_time = third_response.get_data(as_text=True)\n\n # ... making sure that different query parameter values\n # don't yield the same cache!\n assert not third_time == second_time", "def test_cache_results(self):\n env = pike.Environment()\n value = [1]\n with pike.Graph('g') as graph:\n n = ParrotNode(value)\n env.add(graph)\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n n.value = [1, 2]\n\n # We mutated value, but the return value should be cached\n ret = env.run('g')\n self.assertEqual(ret, {'default': [1]})\n\n # Busting cache should return new value\n ret = env.run('g', True)\n self.assertEqual(ret, {'default': [1, 2]})", "def test_argument_change():\r\n mem = Memory(cachedir=env['dir'], verbose=0)\r\n func = mem.cache(count_and_append)\r\n # call the function for the first time, is should cache it with\r\n # argument x=[]\r\n assert func() == 0\r\n # the second time the argument is x=[None], which is not cached\r\n # yet, so the functions should be called a second time\r\n assert func() == 1", "def test_nested_list_arg(self):\r\n myNestedType = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)), 3)\r\n\r\n myType = TypedListType(T.TensorType(theano.config.floatX,\r\n (False, False)))\r\n\r\n myManualNestedType = TypedListType(TypedListType(\r\n TypedListType(myType)))\r\n\r\n self.assertTrue(myNestedType == myManualNestedType)", "def test_list_deserialization(self):\r\n \r\n original = DeserializationTestModel.create(count=5, text='happy')\r\n nested = original.get_list()\r\n\r\n assert isinstance(nested, list)\r\n assert nested[0] == None\r\n assert nested[1] == 0\r\n assert nested[2] == 1\r\n\r\n assert isinstance(nested[3], list)\r\n assert nested[3][0] == 2\r\n assert nested[3][1] == original\r\n assert nested[3][2] == 3\r\n\r\n assert nested[4] == 5", "def test_argument_change(tmpdir):\n memory = Memory(location=tmpdir.strpath, verbose=0)\n func = memory.cache(count_and_append)\n # call the function for the first time, is should cache it with\n # argument x=[]\n assert func() == 0\n # the second time the argument is x=[None], which is not cached\n # yet, so the functions should be called a second time\n assert func() == 1", "def test_key_for_list_of_cacheable_objects(self):\n Meat.get_protein_sum([self.chicken, self.steak])\n expected_cache_key = 'tests.Meat.get_protein_sum;,Chicken:20,Steak:26;'\n self.assertExpectedKeyInCache(expected_cache_key)", "def test_cache_overflow_default(method):\n if method == \"init\":\n cache = CacheDict([(\"one\", 1), (\"two\", 2), (\"three\", 3)], cache_len=2)\n elif method == \"assign\":\n cache = CacheDict(cache_len=2)\n cache[\"one\"] = 1\n cache[\"two\"] = 2\n cache[\"three\"] = 3\n else:\n assert False\n\n assert \"one\" not in cache.keys()\n assert \"two\" in cache.keys()\n assert \"three\" in cache.keys()", "def test_cache(self):\r\n locator = CourseLocator(org='testx', offering='GreekHero', branch='draft')\r\n course = modulestore().get_course(locator)\r\n block_map = modulestore().cache_items(\r\n course.system, [child.block_id for child in course.children], course.id, depth=3\r\n )\r\n self.assertIn('chapter1', block_map)\r\n self.assertIn('problem3_2', block_map)", "def test_local_cache():", "def testMulti(self):\n\n memcache.set_multi({'map_key_one': 1, 'map_key_two': u'some value'})\n values = memcache.get_multi(['map_key_one', 'map_key_two'])\n assert {'map_key_one': 1, 'map_key_two': u'some value'} == values\n\n memcache.add_multi(\n {'map_key_one': 'one', 'map_key_two': 2, 'three': u'trois'})\n values = memcache.get_multi(['map_key_two', 'three'])\n assert {'map_key_two': u'some value', 'three': u'trois'} == values", "def testReplaceWithList(self):\n\n # Bypass setter\n self.node._desc = [\n 'first description',\n 'second description',\n 'third description'\n ]\n\n self.node.desc = [\n 'forth description',\n 'fifth description',\n 'sixth description'\n ]\n\n self.assertEqual(\n [\n 'forth description',\n 'fifth description',\n 'sixth description'\n ],\n self.node.desc\n )", "def test_multi_cache(self):\n # cache params\n cache_key = 'test_multi_cache'\n cache_len = 60\n num_items = 20000\n\n # prepare cache data and save\n cache_data = self.get_cache_data(num_items)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())", "def test_list_2f(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test9\":4}]')\n self.assertFalse(check_json_array(jdic, jobj))", "def test_cache_lru_overflow(mode, add_third):\n\n cache = CacheDict([(\"one\", 1), (\"two\", 2)], cache_len=2)\n\n if mode == \"get\":\n dummy = cache[\"one\"]\n elif mode == \"set\":\n cache[\"one\"] = 1\n else:\n assert False\n\n if add_third:\n cache[\"three\"] = 3\n\n assert \"one\" in cache.keys()\n assert \"two\" not in cache.keys()\n assert \"three\" in cache.keys()\n else:\n assert \"one\" in cache.keys()\n assert \"two\" in cache.keys()\n assert \"three\" not in cache.keys()", "def test_removes_old_cached_values(self):\n lru = LRUCache(2)\n lru.put(\"3\", 3)\n lru.put(\"2\", 2)\n lru.get(\"3\")\n lru.put(\"1\", 1)\n self.assertEqual(lru.length, 2)\n self.assertEqual(lru.head.value, 1)\n self.assertEqual(lru.tail.value, 3)\n self.assertEqual(lru.get(\"2\"), -1)", "def test_refetch_precomputed_valid_cache():\n ident = _id()\n res1 = proj.fetch('test', ident)\n res2 = proj.fetch('test', ident)\n # As these are timestamps, they would not be the same if this were recomputed\n assert res1.result['val'] == res2.result['val']", "def test_simple_multi_cache(self):\n # cache params\n cache_key = 'test_simple_multi_cache'\n cache_len = 60\n\n # prepare cache data and save\n cache_data = self.get_cache_data(5000)\n multicache.set(cache_key, cache_data, cache_len)\n\n # retrieve data\n retrieved_data = multicache.get(cache_key)\n\n # test\n self.assertEqual(cache_data.keys().sort(), retrieved_data.keys().sort())", "def test_listf(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[]')\n self.assertFalse(check_json_array(jdic, jobj))", "def test_untimed(self):\n cache = TimedCache()\n for i in range(500):\n cache[i] = i\n for i in range(500):\n assert i in cache\n assert cache[i] == i", "def testForExistence(self):\n self.assertTrue(cached_list_logic.isCachedListExists('cached_list'))\n self.assertFalse(cached_list_logic.isCachedListExists('none_existent'))", "def test_unsized(self):\n cache = LRUCache()\n for i in range(500):\n cache[i] = i\n for i in range(500):\n assert i in cache\n assert cache[i] == i", "def test_cache(self):\n response = self.make_call().json[0]\n self.assertFalse(response['cached']) # a call has ben made to Google API\n # each step is saved\n self.assertEqual(len(r.keys(pattern=r'step*')), int(r.get('counter')))\n self.assertEqual(int(r.get('counter')), len(response['steps']))\n pairs = set((i, j) for (i, o), (j, d) in combinations_with_replacement(list(enumerate(response['steps'])), 2) if i <= j)\n self.assertEqual(len(r.keys(pattern=r'origin*')), len(pairs)) # each combination is cached\n for i, j in pairs:\n origin, destination = response['steps'][i], response['steps'][j]\n resp = self.make_call(origin=f\"{origin['start_lat']},{origin['start_lng']}\",\n destination=f\"{destination['end_lat']},{destination['end_lng']}\").json[0]\n # No new API calls are made, cached results are returned for each possible combination of origin/dest\n self.assertEqual(origin['start_lat'], resp['start_lat']) # all coordinates should match\n self.assertEqual(origin['start_lng'], resp['start_lng'])\n self.assertEqual(destination['end_lat'], resp['end_lat'])\n self.assertEqual(destination['end_lng'], resp['end_lng'])\n self.assertTrue(resp['cached'])\n # New API call is made for transit directions. We can't recycle driving directions for this one.\n response = self.make_call(mode='transit').json\n self.assertFalse(response[0]['cached'])\n self.assertTrue(len(response) > 1) # when asking for transit directions it should yield multiple alternatives\n # driving directions should be cached already\n response = self.make_call().json[0]\n self.assertTrue(response['cached'])\n # Walking directions should not be cached\n walking = self.make_call(mode='walking').json[0]\n self.assertFalse(walking['cached'])\n # Bicycling should be treated as walking but 3 times as fast\n bicycling = self.make_call(mode='bicycling').json[0]\n self.assertTrue(bicycling['cached'])\n self.assertEqual(walking['duration'], 3 * bicycling['duration'])", "def test_cache_dataset():\n train = (\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5),\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5),\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5),\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5),\n (\"Lorem ipsum dolor sit amet\", 3, 4.5),\n (\"Sed ut perspiciatis unde\", 5, 5.5))\n\n t = TabularDataset(train, cache=True)\n\n assert len(t.train.cached_data) == 0\n for i, _ in enumerate(t.train):\n assert len(t.train.cached_data) == i + 1", "def test_KanePage_cached(self):\n kane_page = KanePage(mocked=True)\n from_cache = kane_page.fetch_taplist(brewery=\"Kane\")\n assert not from_cache\n\n # 2nd read from cache!\n kane_page.ssml_taplist() # this puts it in the cache\n from_cache = kane_page.fetch_taplist(brewery=\"Kane\")\n assert from_cache" ]
[ "0.6522186", "0.64140004", "0.6132525", "0.61043525", "0.601811", "0.59138286", "0.58714175", "0.5837765", "0.5805114", "0.57767415", "0.5767161", "0.5762907", "0.5743672", "0.57405716", "0.569464", "0.5663124", "0.56611806", "0.56382847", "0.5629299", "0.5627038", "0.5612794", "0.5599503", "0.5594117", "0.55784667", "0.5575885", "0.5571019", "0.5570601", "0.5562359", "0.55601156", "0.5535925" ]
0.6845108
0
Get value of DateEditField
def getValue(self): return qDate2Date(self.field.date())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_date(self):\n return self.cleaned_data['date']", "def date(self):\n return self.date_value", "def getValue(self):\n return self.field.text()", "def getValue(self):\n return self.field.currentText()", "def getValue(self):\n return self.field.value()", "def getValue(self):\n return self.field.value()", "def get_date(self):\n return self.date", "def get_date(self):\n return self.date", "def get_date(self):\n return self.date", "def get_date(self):\n return self.date", "def field(self):\r\n return self.value", "def date(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"date\")", "def dateB(self):\r\n self.date = self.cal.selectedDate()\r\n self.lineEditWidgets[\"CUMPLEAÑOS\"].setText(\r\n self.date.toString(\"yyyy-MM-dd\"))", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def getDate(self):\n return self.date", "def edit_date(entry):\n entry.date = get_date()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def date(self):", "def getValue(self):\n return qDateTime2Datetime(self.field.dateTime())", "def date(self):\n return self._parse_date(self.value)", "def _get_field_edit_widget(self, row_index):\n field_row = self.field_rows[row_index]\n if not field_row.editable:\n raise TypeError(\"Cannot edit a boolean or dropdown field. (Internal error, tell the developer!)\")\n field_type = field_row.field_type\n field_value = self.get_field_dict(self.get_entry_id(self.active_row_index))[field_row.field_name]\n initial_text = repr(sorted(field_value)) if issubclass(field_type, list) else str(field_value)\n return self.Entry(\n field_row.value_box,\n initial_text=initial_text,\n integers_only=field_type == int,\n numbers_only=field_type == float,\n sticky=\"ew\",\n width=5,\n )", "def _event_to_val(self, evt):\n return evt.val", "def get_dtval(record, field_name):\n val = recordval(record, field_name)\n if (val != \"\" and not re.match(r'\\d\\d?/\\d\\d?/\\d\\d\\d\\d', val)):\n parser_error(\"bad value in \"+field_name+\": '\"+val+\"'-- try MM/DD/YYYY\")\n return val", "def get_btn_value(self, inst):\n\n self.active_date[0] = int(inst.text)\n\n selected = [self.active_date[0], self.active_date[1], self.active_date[2]]\n\n global selectedDates\n\n if selected in selectedDates:\n selectedDates.remove(selected)\n else:\n selectedDates.append(selected)\n\n if self.as_popup:\n self.parent_popup.dismiss()\n\n #getInfo.openPopup()", "def date(self):\n return self._date", "def date(self, date):\n self.value = date.strftime(\"%Y-%m-%d\") if date else \"\"", "def Date(self, default=None):\n return self.data.get('date', default)", "def Date(self, default=None):\n return self.data.get('date', default)", "def Date(self, default=None):\n return self.data.get('date', default)", "def date(self):\n return self._date", "def date(self):\n return self._date" ]
[ "0.7726011", "0.68309325", "0.65818435", "0.63797146", "0.63699603", "0.63699603", "0.6341612", "0.6341612", "0.6341612", "0.6341612", "0.6116081", "0.6042181", "0.5984255", "0.5964071", "0.59639466", "0.59452707", "0.59399724", "0.5923637", "0.5918697", "0.5900223", "0.5868414", "0.5860174", "0.57946694", "0.57933474", "0.57749504", "0.57529753", "0.57529753", "0.57529753", "0.57393765", "0.57393765" ]
0.7341388
1
Function for reading a response file
def readResponseFile(self): resp_filename = QFileDialog.getOpenFileName(self, "Open Response File", str(Path.home()), '') try: resp_file = open(resp_filename[0], 'r') except: print("Couldn't get any files from QFileDialog") return try: response = readResponseArrayToResponse(resp_file.read().split('\n'), resp_filename[0].split('/')[-1]) except Exception as e: response = None print("Not a valid response file: {0}".format(e)) resp_file.close() if response is not None: self.setResponse(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_response():\n result = ''\n line = ''\n while line != '\\n':\n result += line\n line = FROMFILE.readline()\n #print(\" I read line:[\"+line+\"]\")\n return result", "def recv_read_response(self, recv_payload): \n\t#Only unpack the headers because we want to store the file data as binary\n\tunpacked_payload = struct.unpack('!H3IQ', recv_payload[:22])\n\tstatus = unpacked_payload[0:1][0]\n\tepoch_no = unpacked_payload[1:2][0]\n\thandle_no = unpacked_payload[2:3][0]\t\n\t\n\t#Check that file handle is the same, to make sure it is the same file request.\n\tif (self.epoch_no == epoch_no and self.handle_no == handle_no):\n\t start_position = unpacked_payload[3:4][0]\n\t num_bytes_been_read = unpacked_payload[4:5][0] \n\t # If we receive less bytes than the number we requested to read, this means that\n\t # end of file has been reached\n\t if (num_bytes_been_read < self.NUM_BYTES_TO_READ):\n\t\tself.eof = True\n\t data_to_write = recv_payload[22:]\t \n\t #If status field says that response contains real data: Append to file. Otherwise react \n\t #depending on error code received.\n\t #Status 00 = OK\n\t #Status 01 = Epoch no. of file handle doesnt match epoch no. of current invocation\n\t #Status 10 = No context found for file-handle and no data has been read\n\t #Status 11 = Context could be found but start position out of range\n\t if (status == 0b00):\n\t\tself.file_append.seek(start_position)\n\t\tself.file_append.write(data_to_write)\n\t elif (status == 0b01):\n\t\tprint(\"Error: Epoch no. of file handle doesnt match epoch no. of current invocation\")\n\t\tsys.exit()\n\t elif (status == 0b10):\n\t\tprint(\"Error: No context found for file-handle and no data has been read\")\n\t\tsys.exit()\n\t elif(status == 0b11):\n\t\tprint(\"Error: Context could be found but start position out of range\")\n\t\tsys.exit()\n\telse:\n\t print(\"Error: File handle does not match file handle stored in client. Wrong file received.\")\n\t sys.exit() \t \n\t#Then return control to read_service_loop() method so that next iteration of send_read_request \n\t#from new start position is called.\n return", "def test_read_file():\n response = echo_client(\"GET webroot/sample.txt HTTP/1.1\")\n test_file = open('webroot/sample.txt', 'r')\n body = test_file.read()\n line_by_line = body.splitlines()\n test_file.close()\n for item in line_by_line:\n assert item in response", "def read_file(file_path):\n try:\n with open(file_path, \"r\") as file_obj:\n data = file_obj.read()\n code_type = classify_response(data)\n return data, code_type\n\n except FileNotFoundError:\n writer(f\"\\nerror: Unable to read file {file_path}\\n\", FORMAT[\"ERROR\"])\n sys.exit(1)", "def test_read_from_file(self):\n test = Server()\n test.cur_dir = os.getcwd()\n inputs = [['read_file', 'test_file.txt'],\n ['read_file', 'test_file.txt'],\n ['read_file', None],\n ['read_file', 'test_file.txt'] ]\n response = ['Hello, this is a test file.',\n 'Message:The file is read completely. Nothing more to read from this file',\n 'name of the file should be given',\n 'Hello, this is a test file.']\n res = []\n for val in inputs:\n res.append(test.read_from_file(val))\n # print(\"****************************************\")\n # print(res)\n self.assertListEqual(res, response)", "def read_file(file_directory, file_path, login_request, user_id):\n\n data, headers, server_host, server_port = process_request_header(file_directory, file_path, login_request, user_id)\n\n request = requests.post(\"http://\" + server_host + \":\" + server_port + \"/fileOperations/readFile\",\n headers=headers)\n return request.text", "def read_dir():\n request()\n sleep_ms( 100 ) # give sometime for the buffer to get data\n try:\n read_response()\n except Exception as err:\n print( 'Error decoding response' )\n print( '[ERROR]', err )\n sleep_ms( 1000 )", "def read(self, filename):\n pass", "def read(self, filename):\n pass", "def recv_open_response(self, recv_payload):\n\n\tunpacked_payload = struct.unpack(\"!?Q2I\", recv_payload)\n # Read status field. If set to False, ignore remaining fields and \n\t# generate error msg (file not found) before exiting. \n\t# Each unpacked value is a tuple, so [0] accesses the value that we want\n\tstatus = unpacked_payload[0:1][0]\n\tif status == False:\n\t print \"Error: File not found.\"\n\t sys.exit()\n\t\n\t#If set to True, read remaining fields.\n\telif status == True:\n\t print(\"File found.\")\n\t self.file_length = unpacked_payload[1:2][0]\n\t self.epoch_no = unpacked_payload[2:3][0]\n\t self.handle_no = unpacked_payload[3:][0]\t \t \n\treturn", "def load_mock_response(file_path: str) -> str:\n with open(file_path, encoding='utf-8') as mock_file:\n return mock_file.read()", "def request_file(self, path: str, token: str) -> Tuple[IO[bytes], dict]:\n response = self.request('get', path, token, stream=True)\n stream = ReadWrapper(response.iter_content,\n int(response.headers['Content-Length']))\n return stream, response.headers", "def load_mock_response(file_name: str) -> str:\n with open(f'test_data/{file_name}', mode='r', encoding='utf-8') as mock_file:\n return mock_file.read()", "def load_mock_response(file_name: str) -> str:\n with open(f'test_data/{file_name}', encoding='utf-8') as mock_file:\n return mock_file.read()", "def getFrom(self,conn,fn,printHeaders=False):\n #log(\"getFrom: \"+str(conn))\n conn.request(self.command,self.path,headers=self.headers)\n resp = conn.getresponse()\n log(\"getFrom: \"+str(resp.status)+\", \"+str(resp.reason)+\", \"+str(resp.version))\n if \"Content-Length\" in resp.headers:\n cl = resp.headers[\"Content-Length\"]\n log(\"getFrom: response length: \"+cl)\n cl = int(cl)\n else:\n log(\"getFrom: no response length\")\n cl = None\n printHeaders = True\n if printHeaders:\n for h in resp.headers:\n log(\"getFrom: resp.hdr: \"+h+\" = \"+resp.headers[h])\n resp.debuglevel=1\n if None != cl:\n try:\n data = resp.read(cl)\n except (Exception,Error) as err:\n log(\"getFrom: read: error: \"+str(err))\n else:\n data = resp.read()\n resp.debuglevel=0\n log(\"getFrom: \"+str(len(data))+\" bytes of response read\")\n if \"Transfer-encoding\" in resp.headers:\n trenc = resp.headers[\"Transfer-encoding\"]\n if \"chunked\"==trenc:\n log(\"getFrom: chunked -> fixed\")\n del resp.headers[\"Transfer-encoding\"]\n resp.headers[\"Content-Length\"] = str(len(data))\n p = os.path.dirname(fn)\n os.makedirs(p,exist_ok=True)\n w2 = 0\n try:\n f2 = open(fn+\".headers\",\"t+w\")\n for h in resp.headers:\n w2 += f2.write(h+\": \"+resp.headers[h]+\"\\n\")\n f2.close()\n except (Exception,Error) as err:\n log(\"getFrom: header: error: \"+str(err))\n log(\"getFrom: \"+str(w2)+\" bytes of headers written to file\")\n try:\n f = open(fn,\"b+w\")\n w = f.write(data)\n f.close()\n except (Exception,Error) as err:\n log(\"getFrom: body: error: \"+str(err))\n log(\"getFrom: \"+str(w)+\" bytes of body written to file\")\n return (resp.headers,data)", "def read(path):", "def handle(self):\n\t\ttry:\n\t\t\trequest_line = self.rfile.readline().decode(\"ascii\")\n\t\t\tassert request_line.endswith(\"\\r\\n\"), \"Request line must end in CRLF\"\n\t\t\tparts = request_line.strip().split()\n\t\t\tassert len(parts)==3, \"Invalid request line\"\n\t\t\thost, path, content_length = parts\n\t\t\tif (content_length:=int(content_length))>0:\n\t\t\t\tdata = self.rfile.read(content_length)\n\t\t\telse:\n\t\t\t\tdata = b''\n\t\t\tself.handle_request(host,path,data)\n\t\texcept AssertionError as e:\n\t\t\tself.response_code(4,e.args[0])", "def __read(self, bytes=31):\n raw_data = self.file_read.read(bytes)\n response = self.__get_response(raw_data)\n is_valid, error_code = self.__is_response_valid(response)\n if is_valid:\n char_list = self.__handle_raspi_glitch(response[1:])\n return str(''.join(char_list)), is_valid\n else:\n return error_code, is_valid", "def read_path(self, path):\n relative_path = path.strip('/')\n if os.path.exists(relative_path):\n try:\n message = open(relative_path).read()\n self.send_response(200)\n except Exception, e:\n message = \"Request failed\", e\n self.send_response(501)\n print message\n else:\n message = 'File not found'\n self.send_response(404)\n return message", "def read(self, filename):\n raise NotImplementedError", "def handle_file(filename,operation = 'r'):\n with open(filename,operation) as f:\n data = f.readlines()\n return data", "def __read_response(self, nblines=-1):\n resp, code, data = (b\"\", None, None)\n cpt = 0\n while True:\n try:\n line = self.__read_line()\n except Response as inst:\n code = inst.code\n data = inst.data\n break\n except Literal as inst:\n resp += self.__read_block(inst.value)\n if not resp.endswith(CRLF):\n resp += self.__read_line() + CRLF\n continue\n if not len(line):\n continue\n resp += line + CRLF\n cpt += 1\n if nblines != -1 and cpt == nblines:\n break\n\n return (code, data, resp)", "def read_file(root, file_name, file_type='t'):\r\n\r\n page_dir = handle_path(main_directory + '/' + root + '/' + file_name)\r\n\r\n try:\r\n # if type != 'b' and type != 't':\r\n # raise\r\n with open(page_dir, 'r' + file_type) as page_reader:\r\n return str(page_reader.read())\r\n except FileNotFoundError:\r\n print(\"The file the user requested doesn't exist\")\r\n raise_http_error(\"Not Found\")\r\n except OSError:\r\n print(\"The server couldn't get the page file\")\r\n raise_http_error(\"Internal Server Error\")", "def test_result_file_path_get(self):\n headers = { \n 'Accept': 'application/zip',\n }\n response = self.client.open(\n '/v1/result/{file_path}'.format(file_path='file_path_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def __read_file(self, filename):\n with open(filename) as f:\n content = f.readlines()\n \n return content", "def load_mock_response(file_name):\n with open('test_data/' + file_name, mode='r') as f:\n return json.loads(f.read())", "def readFile(self, path):\n return self.session.request('diag/files/?q=%s'\n % (path))", "def read_ocsp_response_cache_file(filename, ocsp_validation_cache):\n logger = getLogger(__name__)\n if check_ocsp_response_cache_lock_file(filename) and path.exists(filename):\n with codecs.open(filename, 'r', encoding='utf-8', errors='ignore') as f:\n _decode_ocsp_response_cache(json.load(f), ocsp_validation_cache)\n logger.debug(\"Read OCSP response cache file: %s, count=%s\",\n filename, len(OCSP_VALIDATION_CACHE))\n else:\n logger.info(\n \"Failed to locate OCSP response cache file. \"\n \"No worry. It will validate with OCSP server: %s\",\n filename\n )", "def read_from_file(self, filename: str) -> None:", "def read(filename, conn):\n\n conn.send(open(filename).read())" ]
[ "0.6882571", "0.67764246", "0.6689666", "0.663254", "0.66002065", "0.65235007", "0.6518528", "0.6441972", "0.6441972", "0.63936675", "0.63545483", "0.6312358", "0.6307773", "0.6301515", "0.62988454", "0.623506", "0.6195498", "0.6167778", "0.60561365", "0.60506356", "0.60334563", "0.60083264", "0.60058856", "0.60033447", "0.6002928", "0.59849334", "0.5963999", "0.59446126", "0.5929134", "0.59204036" ]
0.78609484
0
Clear string field value
def clearField(self): self.field.setText("")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\r\n self.room_value.set('')", "def clearField(self):\n self.field.setValue(self.default_val)", "def clearField(self):\n self.field.setValue(self.default_val)", "def clear(self):\r\n self.firstname_value.set('')\r\n self.lastname_value.set('')\r\n self.id_number_value.set('')\r\n self.country_value.set('')", "def _clear( self ):\n self.value = ( '', ) # *Not* '()', which won't do at all!\n self.operator = None", "def clearField(self):\n self.field.clearFields()", "def clearValue(self):\n self.clear()", "def clear_edit(self, value=None):\n self.my_text.edit_reset()", "def clearData(self):\r\n self.title.setVal(\"\"),\r\n self.first.setVal(\"\"),\r\n self.middle.setVal(\"\"),\r\n self.last.setVal(\"\"),\r\n self.suffix.setVal(\"\"),\r\n self.phone.setVal(\"\"),\r\n self.ext.setVal(\"\"),\r\n self.email.setVal(\"\"),\r\n self.affiliation.setVal(\"\")\r\n self.fullName.setVal(\"\")", "def reset(self):\n self.descripcionString.set(\"\")\n self.tituloString.set(\"\")", "def clearField(self):\n self.field.setDate(datetime.now().date())", "def clearField(self):\n raise Exception(\"Default clearing method called! Please implement clearing for {0}\".format(self.__class__.__name__))", "def clear(self) -> None:\n logging.info(f\"Clear input field. {self.desc}\")\n js = f\"\"\"var elm = document.querySelectorAll(\"{self.css}\")[{self.index}];\n elm.style.border=\"2px solid red\";\n elm.value = \"\";\"\"\"\n self._execute_javascript(js)", "def clear(self):\n logging.getLogger(__name__).info(\"Element input field cleared\\nby = {}\\nvalue = {}\".format(self.by, self.value))\n self.driver.find_element(self.by, self.value).clear()", "def Clear_input(self):\r\n self.root.ids.place_name.text = '' # Clear input\r\n self.root.ids.place_country.text = ''\r\n self.root.ids.place_priority.text = ''", "def clearField(self):\n self.field.setDateTime(datetime.now())", "def actionClear(self):\n self.setText(\"\")", "def reset(self):\n self.string = self.axiom", "def clearValue(self, d, name):\r\n \r\n if name in d:\r\n d[name] = None", "def clearEntry(*args, **kwargs):\n\targs[0].set_text('')", "def clearSetting(self, name: unicode) -> None:\n ...", "def delval(self):\r\n self.value = None", "def clearField(self):\n self.field.setCurrentIndex(0)", "def _force_clear(self, element):\n value = element.get_attribute(\"value\")\n actions = ActionChains(self.selenium.driver)\n actions.move_to_element(element).click().send_keys(Keys.END)\n for character in value:\n actions.send_keys(Keys.BACKSPACE)\n actions.perform()", "def clearValue(self):\n self.data = []", "def unset(self) -> None:\n self.val = None\n self.notes = []", "def clear(self, name):\n pass", "def clear_field():\n try:\n focused_element = driver.switch_to.active_element\n focused_element.clear()\n wait()\n except Exception as e:\n return \"Error: \" + str(e)\n return \"Success\"", "def clearOld(self):\n self.monitorTextBox.setPlainText(\"\")", "def clear_result(self):\n\n self.ui.plainTextEdit.clear()" ]
[ "0.749064", "0.7427105", "0.7427105", "0.7367588", "0.7103921", "0.7024007", "0.697663", "0.6923305", "0.6922619", "0.68387425", "0.68102473", "0.6765182", "0.6753275", "0.6720608", "0.66926175", "0.6682789", "0.6559357", "0.6558444", "0.65180975", "0.64615583", "0.6446874", "0.6430662", "0.63769203", "0.6349216", "0.62773955", "0.6261633", "0.62394726", "0.62210923", "0.62066954", "0.61785877" ]
0.78305686
0
Get the value of StringEditField
def getValue(self): return self.field.text()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getValue(self):\n return self.field.currentText()", "def value(self):\n return str(self.input.text())", "def value(self):\n s = str(self.input.text())\n if self._is_string_:\n return s\n else:\n return eval(s)", "def value(self):\n s = str(self.input.toPlainText())\n if self._is_string_:\n return s\n else:\n return eval(s)", "def string_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"string_value\")", "def value(self):\n return str(self.input.currentText())", "def getValue(self):\n return self.field.value()", "def getValue(self):\n return self.field.value()", "def get(self):\n # We use here the fact, that when used in a widget, the value will be\n # retrieved directly instead through .get(). Thus the widget will always \"see\" the str representation.\n value = self._tk.globalgetvar(self._name)\n try:\n value = self.convert(value)\n except Exception as e:\n value = Invalid\n if self._validated_hook:\n self._validated_hook(value is not Invalid)\n return value", "def get_row_input_text(self, row_idx):\n return self.row_items[row_idx][1].get()", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def get_string_value(self, obj, field):\n return smart_unicode(field.value_to_string(obj))", "def text_field(self):\n return self.properties.get('TextField', None)", "def getValue(self):\n return self.text()", "def storeTextEditValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.sender().toPlainText()\n\t\tself.storeValue(category, attr, value)", "def value(self):\r\n v = None\r\n if not self.field.is_readonly() and self.params is not None:\r\n # submitted value. do not deserialize here since that requires\r\n # valid data, which we might not have\r\n try:\r\n v = self._serialized_value()\r\n except formalchemy.fields.FieldNotFoundError, e:\r\n pass\r\n if v:\r\n return v\r\n\r\n return \"\"", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")", "def value(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"value\")" ]
[ "0.7251396", "0.711177", "0.6903616", "0.6872598", "0.67954826", "0.6794966", "0.6647003", "0.6647003", "0.6558761", "0.6513615", "0.6455087", "0.64429784", "0.64178675", "0.6397171", "0.6364782", "0.6331894", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638", "0.6302638" ]
0.7493002
0
Get all values from the checkboxes
def getValues(self): result = [] for cbox in self.checkboxes: if cbox.isChecked(): result.append(cbox.text()) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCheck(self) -> list:\n results = []\n for i in self.checkboxs:\n if i.isChecked():\n results.append(i)\n return results", "def selectedValues(self):\n list_selected = []\n minLevel = float(self.minLevelLineEdit.text() or 0)\n maxLevel = float(self.maxLevelLineEdit.text() or 99)\n for text, checkbox in self.filter_checkBoxs.items():\n if checkbox.isChecked():\n list_selected.append(text)\n print(minLevel, maxLevel, list_selected)\n return minLevel, maxLevel, list_selected", "def getResult(self):\n self.show(modal=True)\n self.exec_()\n b = self.clickedButton()\n if not b: # b == 0 or b is None\n b = self.defaultButton()\n if b:\n res = str(b.text())\n else:\n res = ''\n if self.checks:\n return res,[c.isChecked() for c in self.checks]\n else:\n return res", "def processCheckboxes(checkboxes, options):\n\n tag_selection_bin = list()\n tag_selection = list()\n for checkbox in checkboxes:\n tag_selection_bin += list(checkbox.state())\n\n for i, tag in enumerate(tag_selection_bin):\n if tag:\n tag_selection.append(options[i])\n\n return tag_selection", "def value(self):\n for rb in self.rb:\n if rb.isChecked():\n return str(rb.text())\n return ''", "def get_selected_values(self, selection):\n return [b for b, v in self._choices if b & selection]", "def all_selected_values(self):\n values = self.browser.execute_script(\n self.SELECTED_OPTIONS_VALUE, self.browser.element(self)\n )\n return [value for value in values if value is not None]", "def get_checked_labels(self):\r\n checked_labels = []\r\n item_count = self.count()\r\n if item_count < 1:\r\n return checked_labels\r\n\r\n for item_index in xrange(item_count):\r\n item = self.item(item_index)\r\n if item is None or item.checkState() == Qt.Unchecked:\r\n continue\r\n checked_labels.append(str(item.text()))\r\n return checked_labels", "def get_checked_status_list(self):\r\n checked_status_list = []\r\n for item_index in xrange(self.count()):\r\n item = self.item(item_index)\r\n if not item is None:\r\n checked_status_list.append(item.checkState())\r\n return checked_status_list", "def get_inputs(self):\n inputs = self.view.main_panel.get_inputs()\n result = {}\n for _input in inputs:\n value = inputs[_input]\n if \"bool\" in _input and not isinstance(value, bool):\n value = value.get()\n result[_input] = value\n return result", "def __get_selected_lists(self):\n selected = []\n for i, l in enumerate(self.lists):\n if self.cb_values[i].get():\n selected.append(l) \n return selected", "def get_data(self):\n temp = []\n for item in self.list_of_ctrl:\n cb, data = item\n if cb.GetValue():\n temp.append(data)\n return temp", "def allSelect(self):\n for i in range(len(self.__controlsChecks)):\n self.__controlsChecks[i].setChecked(True)", "def _generateMenuItemCheckedState(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'checkbox'\n indicators = self._script.formatting.getString(**args)\n if obj.getState().contains(pyatspi.STATE_CHECKED):\n result.append(indicators[1])\n return result", "def selectAll(self, value):\n for item in self.getItemsToModify():\n item.setCheckState(Qt.Checked if value else Qt.Unchecked)", "def selectAll(self, value):\n for item in self.getItemsToModify():\n item.setCheckState(Qt.Checked if value else Qt.Unchecked)", "def _generateCheckedState(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'checkbox'\n indicators = self._script.formatting.getString(**args)\n state = obj.getState()\n if state.contains(pyatspi.STATE_CHECKED):\n result.append(indicators[1])\n elif state.contains(pyatspi.STATE_INDETERMINATE):\n result.append(indicators[2])\n else:\n result.append(indicators[0])\n return result", "def _generateCellCheckedState(self, obj, **args):\n result = []\n if self._script.utilities.hasMeaningfulToggleAction(obj):\n oldRole = self._overrideRole(pyatspi.ROLE_CHECK_BOX, args)\n result.extend(self.generate(obj, **args))\n self._restoreRole(oldRole, args)\n\n return result", "def get_box_value(self, *value_sig):\n\n # If solely the value of the checkbox was requested, return it\n if bool in value_sig:\n return(get_box_value(self.checkbox))\n # Else, return the checkbox and widget values\n else:\n return(get_box_value(self.checkbox),\n get_box_value(self.widget, *value_sig))", "def get_checked_status_map(self):\r\n checked_status = {} # {'BJT':True, ...}\r\n for item_index in xrange(self.count()):\r\n item = self.item(item_index)\r\n if item is None:\r\n continue\r\n item_text = str(item.text())\r\n if item.checkState() != Qt.Checked:\r\n checked_status[item_text] = False\r\n else:\r\n checked_status[item_text] = True\r\n return checked_status", "def init_all_checkboxes(self) -> bool:\n raise NotImplementedError", "def GetValues(self):", "def get_choicesdata(self):\n selected_value = self.get_cleaned_value()\n choicesdata = []\n for value, label in self.get_choices_cached():\n is_selected = value == selected_value\n url = self.build_set_values_url(values=[value])\n choicesdata.append({\n 'url': url,\n 'label': label,\n 'is_selected': is_selected,\n 'dom_id': '{}_{}'.format(self.get_inputfield_dom_id(), value)\n })\n return choicesdata", "def get_values(self):\n \n return []", "def get_choicesdata(self):\n # selected_value = self.get_cleaned_value()\n # choicesdata = []\n # found_selected_value = False\n # for value, label in self.get_choices():\n # is_selected = value == selected_value\n # if is_selected:\n # found_selected_value = True\n # url = self.build_set_values_url(values=[value])\n # choicesdata.append({\n # 'url': url,\n # 'label': label,\n # 'is_selected': is_selected\n # })\n choicesdata, found_selected_value = self.__make_choicesdata_list(\n choices=self.get_choices(),\n selected_value=self.get_cleaned_value())\n if not found_selected_value and len(choicesdata) > 0:\n selected_index = self.get_default_is_selected_index(choicesdata=choicesdata)\n choicesdata[selected_index]['is_selected'] = True\n return choicesdata", "def storeCheckBoxValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.getCheckBoxValue(self.sender())\n\t\tself.storeValue(category, attr, value)", "def valuerefs(self):\r\n return self.data.values()", "def gatherSelected(self):\n self.selected_list = []\n for element in self.hand_info:\n if element.status == 1:\n self.selected_list.append(element)\n return self.selected_list", "def values(self):\n\t\treturn self.myVals", "def get_filter_values(self):\n return [f.get() for f in self._filters[:-1]] # Ignore placeholder" ]
[ "0.754504", "0.6864356", "0.6775087", "0.66403234", "0.6456823", "0.63692284", "0.62996274", "0.61769706", "0.61111045", "0.6079384", "0.60151803", "0.59716076", "0.58443946", "0.5843741", "0.5835723", "0.5835723", "0.58272696", "0.579749", "0.5724495", "0.57017225", "0.56901675", "0.5684649", "0.5624378", "0.56100893", "0.5599934", "0.5592102", "0.5581898", "0.55782914", "0.55623305", "0.55477643" ]
0.8536971
0
Perform `n_iter` iterations Gibbs sampling on the CRPMM. A record dict is constructed over the iterations, which contains several fields describing the sampling process. Each field is described by its key and statistics are given in a list which covers the Gibbs sampling iterations. This dict is returned. Also a distribution dict is conducted when the component number equal to 'num_saved' and returned
def collapsed_gibbs_sampler(self, n_iter, true_assignments, num_saved=3, weight_first=True): # Setup record dictionary record_dict = self.setup_record_dict() start_time = time.time() distribution_dict = self.setup_distribution_dict(num_saved) # Loop over iterations for i_iter in range(n_iter): ## save the wanted distribution if num_saved == self.components.K and i_iter > 1: distribution_dict = self.update_distribution_dict(distribution_dict, weight_first) ## Loop over data items # import random # permuted = range(self.components.N) # random.shuffle(permuted) # for i in permuted: for i in xrange(self.components.N): # Cache some old values for possible future use k_old = self.components.assignments[i] K_old = self.components.K stats_old = self.components.cache_component_stats(k_old) # Remove data vector `X[i]` from its current component self.components.del_item(i) # Compute log probability of `X[i]` belonging to each component log_prob_z = np.zeros(self.components.K + 1, np.float) # (25.35) in Murphy, p. 886 log_prob_z[:self.components.K] = np.log(self.components.counts[:self.components.K]) # (25.33) in Murphy, p. 886 log_prob_z[:self.components.K] += self.components.log_post_pred(i) # Add one component to which nothing has been assigned log_prob_z[-1] = math.log(self.alpha) + self.components.cached_log_prior[i] prob_z = np.exp(log_prob_z - logsumexp(log_prob_z)) # Sample the new component assignment for `X[i]` k = utils.draw(prob_z) # logger.debug("Sampled k = " + str(k) + " from " + str(prob_z) + ".") # Add data item X[i] into its component `k` if k == k_old and self.components.K == K_old: # Assignment same and no components have been removed self.components.restore_component_from_stats(k_old, *stats_old) self.components.assignments[i] = k_old else: # Add data item X[i] into its new component `k` self.components.add_item(i, k) # Update record record_dict = self.update_record_dict(record_dict, i_iter, true_assignments, start_time) start_time = time.time() return record_dict, distribution_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gibbs_sample(self):\n # Initialize the initial state of Markov Chain.\n self.initialize()\n # Gibbs Sampling.\n for iteration_index in range(0, self.iteration_number, 1):\n for m in range(0,self.document_number,1):\n for n in range(0, len(self.documents[m]), 1):\n # Change the state of word_m_n according to it's full conditional probability.\n self.sample_by_full_condition(m=m,n=n)\n print 'iteration:', iteration_index,datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n if iteration_index > self.burn_in and iteration_index % self.update_cycle == 0:\n # Update the distribution after burn in.\n self.update_distribution()\n else:\n pass\n # calculate the final distribution.\n self.get_distribution()", "def _gibbs_sampling_iteration(self):\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n k = self.z_mn[m, n]\n self.n_mk[m, k] -= 1\n self.n_m[m] -= 1\n self.n_kt[k, w_mn] -= 1\n self.n_k[k] -= 1\n k = self._conditional_z(\n self.n_components, self.alpha, self.beta,\n self.n_mk, self.n_kt, m, w_mn, self.beta_sum, self.n_k)\n self.z_mn[m, n] = k\n self.n_mk[m, k] += 1\n self.n_m[m] += 1\n self.n_kt[k, w_mn] += 1\n self.n_k[k] += 1", "def resampleParams(self, caliStep, iterNO=-1):\n names = self.getNames()\n smcSamples = self.smcSamples[iterNO]\n numSamples = self.numSamples\n numThreads = self.threads if self.threads else cpu_count()\n # posterior probability at caliStep is used as the proposal distribution\n proposal = self.posterior[:, caliStep]\n newSmcSamples, newparamsFile, gmm, maxNumComponents = \\\n resampledParamsTable(keys=names, smcSamples=smcSamples, proposal=proposal, num=numSamples,\n threads=numThreads,\n maxNumComponents=self.__maxNumComponents, priorWeight=self.__priorWeight,\n covType=self.__covType,\n tableName='smcTable%i.txt' % (iterNO + 1))\n self.smcSamples.append(newSmcSamples)\n self.paramsFiles.append(newparamsFile)\n return gmm, maxNumComponents", "def run_bp(self, niter):\n for v in self.vs.values():\n v.init_received()\n for f in self.fs:\n f.init_received()\n marg = {v: self.get_marginal(v) for v in self.vs}\n for it in range(niter):\n for v in self.vs.values():\n v.send()\n for f in self.fs:\n f.send()\n for v in self.vs:\n marg[v] = np.vstack((marg[v], self.get_marginal(v)))\n domains = {v.name: v.orig_domain for v in self.vs.values()}\n return (marg, domains, self.vobs)", "def gibbs_sample(self, trial_count):\n values = {}\n count = total_trials = 0\n\n # Initialize\n for letter in self.letters:\n if (letter in self.query.evidence):\n # Fix evidence variables\n values[letter] = self.query.evidence[letter]\n else:\n # Initialize non-evidence to True\n values[letter] = True\n\n # Collect non-evidence variables\n non_evidence_letters = []\n for letter in self.letters:\n if (letter not in self.query.evidence):\n non_evidence_letters.append(letter)\n\n for i in xrange(trial_count):\n for letter in non_evidence_letters:\n\n # Probability of x, given its parents\n pos_prob = self.variables[letter].get_prob(values)\n # Probability of x's children, given their parents\n values[letter] = True # FIX TO BE TRUE\n for child in self.variables[letter].children:\n child_prob = self.variables[child].get_prob(values)\n\n if (values[child]):\n pos_prob *= child_prob\n else:\n pos_prob *= (1 - child_prob)\n\n ### DO SAME THING FOR FALSE PROB\n\n # Probability of x, given its parents\n neg_prob = 1 - self.variables[letter].get_prob(values)\n # Probability of x's children, given their parents\n values[letter] = False # FIX TO BE FALSE\n for child in self.variables[letter].children:\n child_prob = self.variables[child].get_prob(values)\n\n if (values[child]):\n neg_prob *= child_prob\n else:\n neg_prob *= (1 - child_prob)\n\n ### NORMALIZE\n prob = pos_prob / (pos_prob + neg_prob)\n\n ### SAMPLE\n values[letter] = self.sample(prob)\n\n if values[self.query.variable]:\n count += 1\n\n total_trials += 1\n\n return float(count) / total_trials", "def gibbs_sample(G, M, num_iters):\n\n # number of games\n N = G.shape[0]\n # Array containing mean skills of each player, set to prior mean\n w = np.zeros((M, 1))\n # Array that will contain skill samples\n skill_samples = np.zeros((M, num_iters))\n # Array containing skill variance for each player, set to prior variance\n pv = 0.5 * np.ones(M)\n\n # number of iterations of Gibbs\n for i in tqdm(range(num_iters)):\n # sample performance given differences in skills and outcomes\n t = np.zeros((N, 1))\n for g in range(N):\n\n s = w[G[g, 0]] - w[G[g, 1]] # difference in skills\n t[g] = s + np.random.randn() # Sample performance\n while t[g] < 0: # rejection step\n t[g] = s + np.random.randn() # resample if rejected\n\n # Jointly sample skills given performance differences\n m = np.zeros((M, 1))\n for p in range(M):\n # fill in m[p] prediction (natural param conditional)\n wins_array = np.array(G[:, 0] == p).astype(int)\n loss_array = np.array(G[:, 1] == p).astype(int)\n m[p] = np.dot(t[:,0], (wins_array - loss_array))\n\n iS = np.zeros((M, M)) # Container for sum of precision matrices (likelihood terms)\n for g in range(N):\n # Build the iS matrix\n winner = G[g, 0]\n loser = G[g, 1]\n\n iS[winner, winner] += 1\n iS[winner, loser] -= 1\n iS[loser, winner] -= 1\n iS[loser, loser] += 1\n\n # Posterior precision matrix\n iSS = iS + np.diag(1. / pv)\n\n # Use Cholesky decomposition to sample from a multivariate Gaussian\n iR = scipy.linalg.cho_factor(iSS) # Cholesky decomposition of the posterior precision matrix\n mu = scipy.linalg.cho_solve(iR, m, check_finite=False) # uses cholesky factor to compute inv(iSS) @ m\n\n # sample from N(mu, inv(iSS))\n w = mu + scipy.linalg.solve_triangular(iR[0], np.random.randn(M, 1), check_finite=False)\n skill_samples[:, i] = w[:, 0]\n\n return skill_samples", "def run_model_sampler(Y, latent_dim, n_iter):\n\tF_sample = []\n\tloading_sample = []\n\tvariance_sample = []\n\ttrace_sample = []\n\tmse_history = []\n\tF = initiate_factors(Y, latent_dim)\n\tfor i in tqdm(range(n_iter)):\n\t\tF, loading_matrix, Y_variance, gp_traces, mse = gibbs_sampling(F, Y)\n\t\tF_sample.append(F)\n\t\tloading_sample.append(loading_matrix)\n\t\tvariance_sample.append(Y_variance)\n\t\ttrace_sample.append(gp_traces)\n\t\tmse_history.append(mse)\n\treturn F_sample, loading_sample, variance_sample, trace_sample, mse_history", "def fisher_iterate(\n self,\n cbl,\n map_tag=None,\n iter_max=200,\n converge_criteria=0.005,\n qb_start=None,\n transfer_run=False,\n save_iters=False,\n null_first_cmb=False,\n delta_beta_prior=None,\n cond_noise=None,\n cond_criteria=None,\n like_profiles=False,\n like_profile_sigma=3.0,\n like_profile_points=100,\n file_tag=None,\n ):\n\n save_name = \"transfer\" if transfer_run else \"bandpowers\"\n\n if transfer_run:\n null_first_cmb = False\n\n # previous fqb iterations to monitor convergence and adjust conditioning\n prev_fqb = []\n cond_adjusted = False\n\n if qb_start is None:\n qb = OrderedDict()\n for k, v in self.bin_def.items():\n if transfer_run:\n if \"cmb\" not in k or \"eb\" in k or \"tb\" in k:\n continue\n if k == \"delta_beta\":\n # qb_delta beta is a coefficient on the change from beta,\n # so expect that it should be small if beta_ref is close\n # (zeroes cause singular matrix problems)\n qb[k] = [self.delta_beta_fix]\n elif k.startswith(\"res_\") or k.startswith(\"fg_\"):\n # res qb=0 means noise model is 100% accurate.\n qb[k] = 1e-5 * np.ones(len(v))\n else:\n # start by assuming model is 100% accurate\n qb[k] = np.ones(len(v))\n else:\n qb = qb_start\n\n obs, nell, debias = self.get_data_spectra(\n map_tag=map_tag, transfer_run=transfer_run\n )\n\n bin_index = pt.dict_to_index(self.bin_def)\n\n success = False\n for iter_idx in range(iter_max):\n self.log(\n \"Doing Fisher step {}/{}...\".format(iter_idx + 1, iter_max), \"info\"\n )\n\n qb_new, inv_fish = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=debias,\n cond_noise=cond_noise,\n delta_beta_prior=delta_beta_prior,\n cond_criteria=cond_criteria,\n null_first_cmb=null_first_cmb,\n )\n\n qb_arr = pt.dict_to_arr(qb, flatten=True)\n qb_new_arr = pt.dict_to_arr(qb_new, flatten=True)\n dqb = qb_new_arr - qb_arr\n fqb = dqb / qb_arr\n max_fqb = np.nanmax(np.abs(fqb))\n\n prev_fqb.append(max_fqb)\n\n fnan = np.isnan(fqb)\n if fnan.any():\n (nanidx,) = np.where(fnan)\n self.log(\n \"Iter {}: Ignoring {} bins with fqb=nan: bins={}, qb_new={}, \"\n \"qb={}\".format(\n iter_idx,\n len(nanidx),\n nanidx,\n qb_new_arr[nanidx],\n qb_arr[nanidx],\n ),\n \"warning\",\n )\n\n self.log(\"Max fractional change in qb: {}\".format(max_fqb), \"info\")\n\n # put qb_new in original dict\n qb = copy.deepcopy(qb_new)\n cls_model = self.get_model_spectra(\n qb, cbl, delta=True, cls_noise=nell, cond_noise=None\n )\n\n if \"delta_beta\" in qb:\n # get beta fit and beta error\n beta_fit = qb[\"delta_beta\"][0] + self.beta_ref\n db_idx = slice(*bin_index[\"delta_beta\"])\n beta_err = np.sqrt(np.diag(inv_fish[db_idx, db_idx]))[0]\n else:\n beta_fit = None\n beta_err = None\n\n if save_iters:\n # save only the quantities that change with each iteration\n out = dict(\n map_tag=map_tag,\n map_tags=self.map_tags,\n iter_index=iter_idx,\n bin_def=self.bin_def,\n bin_weights=self.bin_weights,\n cls_shape=self.cls_shape,\n cls_obs=obs,\n qb=qb,\n fqb=fqb,\n inv_fish=inv_fish,\n cls_model=cls_model,\n cbl=cbl,\n map_freqs=self.map_freqs,\n cls_signal=self.cls_signal,\n cls_noise=self.cls_noise,\n Dmat_obs=self.Dmat_obs,\n gmat_ell=self.gmat_ell,\n extra_tag=file_tag,\n )\n\n if \"fg_tt\" in self.bin_def:\n out.update(\n beta_fit=beta_fit,\n beta_err=beta_err,\n ref_freq=self.ref_freq,\n beta_ref=self.beta_ref,\n )\n\n self.save_data(save_name, bp_opts=not transfer_run, **out)\n\n (nans,) = np.where(np.isnan(qb_new_arr))\n if len(nans):\n msg = \"Found NaN values in qb bins {} at iter {}\".format(nans, iter_idx)\n break\n\n if fnan.all():\n msg = (\n \"All bins have fqb=NaN at iter {}, \"\n \"something has gone horribly wrong.\".format(iter_idx)\n )\n break\n\n negs = np.where(np.diag(inv_fish) < 0)[0]\n if len(negs):\n self.log(\n \"Iter {}: Found negatives in inv_fish diagonal at locations \"\n \"{}\".format(iter_idx, negs),\n \"warning\",\n )\n\n if np.nanmax(np.abs(fqb)) < converge_criteria:\n if not transfer_run:\n # Calculate final fisher matrix without conditioning\n self.log(\"Calculating final Fisher matrix.\", \"info\")\n _, inv_fish = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=debias,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n )\n\n # If any diagonals of inv_fisher are negative, something went wrong\n negs = np.where(np.diag(inv_fish) < 0)[0]\n if len(negs):\n self.log(\n \"Found negatives in inv_fish diagonal at locations \"\n \"{}\".format(negs),\n \"warning\",\n )\n\n success = True\n break\n\n else:\n msg = \"{} {} did not converge in {} iterations\".format(\n \"Multi-map\" if map_tag is None else \"Map {}\".format(map_tag),\n \"transfer function\" if transfer_run else \"spectrum\",\n iter_max,\n )\n # Check the slope of the last ten fqb_maxpoints.\n # If there's not a downward trend, adjust conditioning\n # criteria to help convergence.\n if len(prev_fqb) <= 10 or transfer_run:\n continue\n m, b = np.polyfit(np.arange(10), prev_fqb[-10:], 1)\n if m > 0: # Not converging\n # First, start from very little conditioning\n if not cond_adjusted:\n cond_criteria = 5e3\n cond_adjusted = True\n self.log(\n \"Iter {}: Not converging. Setting cond_criteria={}\".format(\n iter_idx, cond_criteria\n ),\n \"warning\",\n )\n\n elif cond_criteria > 100:\n cond_criteria /= 2.0\n self.log(\n \"Iter {}: Tightening condition criteria to help convergence. \"\n \"cond_criteria={}\".format(iter_idx, cond_criteria),\n \"warning\",\n )\n else:\n self.log(\n \"Iter {}: Can't reduce cond_criteria any more.\".format(\n iter_idx\n ),\n \"warning\",\n )\n # give it ten tries to start converging\n prev_fqb = []\n\n # save and return\n out = dict(\n qb=qb,\n inv_fish=inv_fish,\n fqb=fqb,\n bin_def=self.bin_def,\n bin_weights=self.bin_weights,\n iters=iter_idx,\n success=success,\n map_tags=self.map_tags,\n map_freqs=self.map_freqs,\n converge_criteria=converge_criteria,\n cond_noise=cond_noise,\n cond_criteria=cond_criteria,\n null_first_cmb=null_first_cmb,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n )\n\n if \"fg_tt\" in self.bin_def:\n out.update(\n delta_beta_prior=delta_beta_prior,\n beta_fit=beta_fit,\n beta_err=beta_err,\n ref_freq=self.ref_freq,\n beta_ref=self.beta_ref,\n )\n\n if self.debug:\n out.update(\n cbl=cbl,\n cls_obs=obs,\n cls_signal=self.cls_signal,\n cls_noise=self.cls_noise,\n cls_model=cls_model,\n cls_shape=self.cls_shape,\n cond_noise=cond_noise,\n Dmat_obs=self.Dmat_obs,\n )\n\n if not transfer_run:\n out.update(qb_transfer=self.qb_transfer)\n if self.template_cleaned:\n out.update(template_alpha=self.template_alpha)\n\n if success and not transfer_run:\n # do one more fisher calc that doesn't include sample variance\n # set qb=very close to 0. 0 causes singular matrix problems.\n # don't do this for noise residual bins\n self.log(\"Calculating final Fisher matrix without sample variance.\", \"info\")\n qb_zeroed = copy.deepcopy(qb)\n qb_new_ns = copy.deepcopy(qb)\n for comp in [\"cmb\", \"fg\"]:\n for spec in self.specs:\n stag = \"{}_{}\".format(comp, spec)\n if stag not in qb_zeroed:\n continue\n qb_zeroed[stag][:] = 1e-20\n qb_new_ns[stag][:] = 1.0\n if \"delta_beta\" in qb:\n qb_zeroed[\"delta_beta\"][:] = 1e-20\n qb_new_ns[\"delta_beta\"][:] = 0\n\n _, inv_fish_ns = self.fisher_calc(\n qb_zeroed,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=debias,\n cond_noise=None,\n delta_beta_prior=None,\n null_first_cmb=null_first_cmb,\n )\n\n out.update(\n invfish_nosampvar=inv_fish_ns,\n )\n\n # compute window functions for CMB bins\n self.log(\"Calculating window functions for CMB bins\", \"info\")\n wbl_qb = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cls_debias=None,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n windows=True,\n inv_fish=inv_fish,\n )\n out.update(wbl_qb=wbl_qb)\n\n # compute bandpowers and covariances\n cb, dcb, ellb, cov, qb2cb, wbl_cb = self.do_qb2cb(qb, inv_fish, wbl_qb)\n _, dcb_ns, _, cov_ns, _, _ = self.do_qb2cb(qb, inv_fish_ns, wbl_qb)\n\n out.update(\n cb=cb,\n dcb=dcb,\n ellb=ellb,\n cov=cov,\n qb2cb=qb2cb,\n wbl_cb=wbl_cb,\n dcb_nosampvar=dcb_ns,\n cov_nosampvar=cov_ns,\n )\n\n if like_profiles:\n # compute bandpower likelihoods\n self.log(\"Calculating bandpower profile likelihoods\", \"info\")\n max_like = self.fisher_calc(\n qb,\n cbl,\n obs,\n cls_noise=nell,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n )\n\n dqb = pt.arr_to_dict(np.sqrt(np.abs(np.diag(inv_fish))), qb)\n qb_like = OrderedDict()\n\n for stag, qbs in qb.items():\n qb_like[stag] = np.zeros(\n (len(qbs), 2, like_profile_points), dtype=float\n )\n\n for ibin, q in enumerate(qbs):\n qb1 = copy.deepcopy(qb)\n dq = dqb[stag][ibin] * like_profile_sigma\n q_arr = np.linspace(q - dq, q + dq, like_profile_points)\n like_arr = np.zeros_like(q_arr)\n\n for iq, q1 in enumerate(q_arr):\n qb1[stag][ibin] = q1\n try:\n like = self.fisher_calc(\n qb1,\n cbl,\n obs,\n cls_noise=nell,\n cond_noise=None,\n delta_beta_prior=delta_beta_prior,\n null_first_cmb=null_first_cmb,\n likelihood=True,\n )\n except np.linalg.LinAlgError:\n like = np.nan\n\n like_arr[iq] = like\n\n self.log(\n \"{} bin {} delta qb {} delta like: {}\".format(\n stag, ibin, q1 - q, like - max_like\n ),\n \"debug\",\n )\n\n qb_like[stag][ibin] = np.vstack([q_arr, like_arr])\n\n out.update(max_like=max_like, qb_like=qb_like)\n\n if not success:\n save_name = \"ERROR_{}\".format(save_name)\n self.log(msg, \"error\")\n self.warn(msg)\n\n return self.save_data(\n save_name, map_tag=map_tag, bp_opts=True, extra_tag=file_tag, **out\n )", "def gibbs(train_data, train_label, w_init, num_iterations, display_frequency, collect_final_sample_frequency): \n dim = train_data.shape[1]\n w_covariance = np.eye(dim) + np.sum(np.matmul(train_data[:, :, np.newaxis], train_data[:, np.newaxis, :]), axis= 0)\n w_covariance = np.linalg.inv(w_covariance)\n \n w = w_init\n _, z = get_output(w, train_data)\n sigma = 1\n \n sampled = [] # Keeps track of all samples\n final = [] # Keeps track of final samples which are sampled in a cyclic manner\n\n for i in range(num_iterations):\n # Sample weight\n w_new_mean = np.matmul(w_covariance, np.sum(z*train_data, axis= 0)[:, np.newaxis]) # dim x 1\n w = np.random.multivariate_normal(w_new_mean[:, 0], w_covariance, 1).T # dim x 1\n \n # Now sample hidden variable\n _, z_new = get_output(w, train_data)\n lower= np.zeros((train_data.shape[0], 1))\n upper= INFINITY*np.ones ((train_data.shape[0], 1)) \n lower[train_label < 0.5, :] = -INFINITY\n upper[train_label < 0.5, :] = 0\n\n\n X = stats.truncnorm((lower - z_new) / sigma, (upper - z_new) / sigma, loc= z_new, scale= sigma) \n z_new = X.rvs((train_data.shape[0],1))\n \n z = copy.deepcopy(z_new)\n \n if i % collect_final_sample_frequency == 0:\n # Sample from the current parameters\n final.append(w)\n\n if (i+1) % display_frequency == 0 or i == num_iterations-1:\n print(\"Iter {:6d} done\".format(i+1))\n \n return np.array(final), w", "def run(self, niter, calc_moments=True, save_last_param=None, verbose=True,\n return_analytics=False, seed=None):\n if niter < 1:\n if verbose:\n print(\"Nothing to do here as provided arg. `niter` is {}\" \\\n .format(niter))\n # return with desired args\n out = [self.INFO_OK]\n if calc_moments:\n out.append((None, None))\n if return_analytics:\n out.append((None, None, None))\n return out if len(out) > 1 else out[0]\n\n # Get seeds for sampling in workers for each iteration\n if isinstance(seed, np.random.RandomState):\n rng = seed\n else:\n rng = np.random.RandomState(seed=seed)\n seeds = rng.randint(0, pystan_max_uint, size=(niter, self.K))\n\n # Localise some instance variables\n # Mean and cov of the posterior approximation\n S = self.S\n m = self.m\n # Natural parameters of the approximation\n Q = self.Q\n r = self.r\n # Natural site parameters\n Qi = self.Qi\n ri = self.ri\n # Natural site proposal parameters\n Qi2 = self.Qi2\n ri2 = self.ri2\n # Site parameter updates\n dQi = self.dQi\n dri = self.dri\n\n # Array for positive definitness checking of each cavity distribution\n posdefs = np.empty(self.K, dtype=bool)\n\n if calc_moments:\n # Allocate memory for results\n m_phi_s = np.zeros((niter, self.dphi))\n cov_phi_s = np.zeros((niter, self.dphi, self.dphi))\n\n # monitor sampling times, mean stepsizes, and max rhats, and other times\n stimes = np.zeros(niter)\n msteps = np.zeros(niter)\n mrhats = np.zeros(niter)\n othertimes = np.zeros(niter)\n\n # Iterate niter rounds\n for cur_iter in range(niter):\n self.iter += 1\n\n # Tilted distributions (parallelisable)\n # -------------------------------------\n\n if verbose:\n print(\n \"Iter {} starting. Process tilted distributions\"\n .format(self.iter)\n )\n for k in range(self.K):\n if verbose:\n sys.stdout.write(\"\\r site {}\".format(k+1)+' '*10+'\\b'*9)\n # Force flush here as it is not done automatically\n sys.stdout.flush()\n # Process the site\n if save_last_param:\n posdefs[k] = self.workers[k].tilted(\n dQi[:,:,k],\n dri[:,k],\n save_samples = save_last_param,\n seed = seeds[cur_iter, k]\n )\n else:\n posdefs[k] = self.workers[k].tilted(\n dQi[:,:,k],\n dri[:,k],\n seed = seeds[cur_iter, k]\n )\n if verbose and not posdefs[k]:\n sys.stdout.write(\"fail\\n\")\n if verbose:\n if np.all(posdefs):\n print(\"\\rAll sites ok\")\n elif np.any(posdefs):\n print(\"\\rSome sites failed and are not updated\")\n else:\n print(\"\\rEvery site failed\")\n if not np.any(posdefs):\n # all sites failed, return with desired args\n out = [self.INFO_ALL_SITES_FAIL]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return out if len(out) > 1 else out[0]\n\n # Store max sampling time\n stimes[cur_iter] = max([w.last_time for w in self.workers])\n msteps[cur_iter] = max([w.last_msteps for w in self.workers])\n mrhats[cur_iter] = max([w.last_mrhat for w in self.workers])\n\n if verbose:\n print(\n \"Sampling done, max sampling time {}\"\n .format(stimes[cur_iter])\n )\n\n # measure elapsed time for othertimes\n start_othertime = time.time()\n\n # Update global approx\n # --------------------\n\n # Initial dampig factor\n df = self.df0(self.iter)\n if verbose:\n print(\"Iter {}, starting df {:.3g}\".format(self.iter, df))\n fail_printline_pos = False\n fail_printline_cov = False\n # Fail flag for pos.def enforcing\n failed_force_pos_def = False\n while True:\n # Try to update the global posterior approximation\n\n # These 4 lines could be run in parallel also\n np.add(Qi, np.multiply(df, dQi, out=Qi2), out=Qi2)\n np.add(ri, np.multiply(df, dri, out=ri2), out=ri2)\n np.add(Qi2.sum(2, out=Q), self.Q0, out=Q)\n np.add(ri2.sum(1, out=r), self.r0, out=r)\n\n # Check for positive definiteness\n cho_Q = S\n np.copyto(cho_Q, Q)\n try:\n linalg.cho_factor(cho_Q, overwrite_a=True)\n except linalg.LinAlgError:\n # Not positive definite -> reduce damping factor\n df *= self.df_decay\n if verbose:\n fail_printline_pos = True\n sys.stdout.write(\n \"\\rNon pos. def. posterior cov, \" +\n \"reducing df to {:.3}\".format(df) +\n \" \"*5 + \"\\b\"*5\n )\n sys.stdout.flush()\n if self.iter == 1:\n if verbose:\n print(\"\\nInvalid prior.\")\n # return with desired args\n out = [self.INFO_INVALID_PRIOR]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return out if len(out) > 1 else out[0]\n if df < self.df_treshold:\n if verbose:\n print(\"\\nDamping factor reached minimum.\")\n df = self.df0(self.iter)\n np.add(Qi, np.multiply(df, dQi, out=Qi2), out=Qi2)\n np.add(ri, np.multiply(df, dri, out=ri2), out=ri2)\n if failed_force_pos_def:\n if verbose:\n print(\"Failed to force pos_def global.\")\n # return with desired args\n out = [self.INFO_DF_TRESHOLD_REACHED_CAVITY]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return out if len(out) > 1 else out[0]\n failed_force_pos_def = True\n # Try to fix by forcing improper sites to proper\n posdefs.fill(0)\n for k in range(self.K):\n # Set min eigenvalue to MIN_EIG by adding to the\n # diagonal if it is smaller than MIN_EIG_TRESHOLD\n min_eig = linalg.eigvalsh(\n Qi2[:,:,k], eigvals=(0,0))[0]\n if min_eig < self.MIN_EIG_TRESHOLD:\n Qi[:,:,k].flat[::self.dphi+1] += (\n self.MIN_EIG - min_eig)\n posdefs[k] = 1\n if verbose:\n print(\"Force sites {} pos_def.\".format(\n np.nonzero(posdefs)[0]))\n continue\n\n # Cavity distributions (parallelisable)\n # -------------------------------------\n # Check positive definitness for each cavity distribution\n for k in range(self.K):\n posdefs[k] = \\\n self.workers[k].cavity(Q, r, Qi2[:,:,k], ri2[:,k])\n # Early stopping criterion (when in serial)\n if not posdefs[k]:\n break\n\n if np.all(posdefs):\n # All cavity distributions are positive definite.\n # Accept step (switch Qi-Qi2 and ri-ri2)\n temp = Qi\n Qi = Qi2\n Qi2 = temp\n temp = ri\n ri = ri2\n ri2 = temp\n self.Qi = Qi\n self.Qi2 = Qi2\n self.ri = ri\n self.ri2 = ri2\n break\n\n else:\n # Not all cavity distributions are positive definite ...\n # reduce the damping factor\n df *= self.df_decay\n if verbose:\n if fail_printline_pos:\n fail_printline_pos = False\n print()\n fail_printline_cov = True\n sys.stdout.write(\n \"\\rNon pos. def. cavity, \" +\n \"(first encountered in site {}), \"\n .format(np.nonzero(~posdefs)[0][0]) +\n \"reducing df to {:.3}\".format(df) +\n \" \"*5 + \"\\b\"*5\n )\n sys.stdout.flush()\n if df < self.df_treshold:\n if verbose:\n print(\"\\nDamping factor reached minimum.\")\n df = self.df0(self.iter)\n np.add(Qi, np.multiply(df, dQi, out=Qi2), out=Qi2)\n np.add(ri, np.multiply(df, dri, out=ri2), out=ri2)\n if failed_force_pos_def:\n if verbose:\n print(\"Failed to force pos_def cavities.\")\n # return with desired args\n out = [self.INFO_DF_TRESHOLD_REACHED_CAVITY]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return out if len(out) > 1 else out[0]\n failed_force_pos_def = True\n # Try to fix by forcing improper sites to proper\n posdefs.fill(0)\n for k in range(self.K):\n # Set min eigenvalue to MIN_EIG by adding to the\n # diagonal if it is smaller than MIN_EIG_TRESHOLD\n min_eig = linalg.eigvalsh(\n Qi2[:,:,k], eigvals=(0,0))[0]\n if min_eig < self.MIN_EIG_TRESHOLD:\n Qi[:,:,k].flat[::self.dphi+1] += (\n self.MIN_EIG - min_eig)\n posdefs[k] = 1\n if verbose:\n print(\"Force sites {} pos_def.\".format(\n np.nonzero(posdefs)[0]))\n if verbose and (fail_printline_pos or fail_printline_cov):\n print()\n\n if calc_moments:\n # Invert Q (chol was already calculated)\n # N.B. The following inversion could be done while\n # parallel jobs are running, thus saving time.\n invert_normal_params(cho_Q, r, out_A='in-place', out_b=m,\n cho_form=True)\n # Store the approximation moments\n np.copyto(m_phi_s[cur_iter], m)\n np.copyto(cov_phi_s[cur_iter], S.T)\n if verbose:\n print(\n \"Mean and std of phi[0]: {:.3}, {:.3}\"\n .format(\n m_phi_s[cur_iter,0],\n np.sqrt(cov_phi_s[cur_iter,0,0])\n )\n )\n\n # measure total time - tilted time\n othertimes[cur_iter] = time.time() - start_othertime\n\n if verbose:\n print(\"Iter {} done.\".format(self.iter))\n\n if verbose:\n print(\n \"{} iterations done\\nTotal limiting sampling time: {}\"\n .format(niter, stimes.sum())\n )\n\n # return with desired args\n out = [self.INFO_OK]\n if calc_moments:\n out.append((m_phi_s, cov_phi_s))\n if return_analytics:\n out.append((stimes, msteps, mrhats, othertimes))\n return tuple(out) if len(out) > 1 else out[0]", "def sample_occr_individual(self, iters=2000, nwalkers=2,\n save=True, plot=False):\n\n \n # TODO: make an invert_poisson function that does the MCMC.\n # will need to be transposed to samples of 2d grid\n # But this shape will make it easier to sub the data in\n samples = np.zeros([np.shape(self)[0],\n np.shape(self)[1],\n iters*nwalkers])\n\n volumised_cpf = self._cpf_grid * self.calc_bin_volumes()\n\n for ix, iy in np.ndindex(*np.shape(self)):\n rm = volumised_cpf[ix, iy] * self._N_stars\n\n if self._event_values is not None:\n nevents = np.sum(\n (self._event_values.T[0] > self._R_boundaries[ix]) \\\n & (self._event_values.T[0] < self._R_boundaries[ix+1]) \\\n & (self._event_values.T[1] > self._P_boundaries[iy]) \\\n & (self._event_values.T[1] < self._P_boundaries[iy+1]))\n else:\n nevents = 0\n\n samples[ix, iy, :] = sample_poisson_rate_pymc(rate_multiplier=rm,\n num_events=nevents,\n iters=iters,\n nchains=nwalkers)\n\n samples = samples.swapaxes(0, -1).swapaxes(-1, 1)\n\n if save:\n self._sample_storage = samples\n\n if plot:\n medians = np.median(samples, axis=0)\n hfig = corner(samples, labels=self.occr_r_names, truths=medians)\n hfig.suptitle(\"Occurrence hyperparameters\")\n hfig.show()\n\n # The occurrences marginalised over period bins\n moccr_samples = self.volumise_occr(samples).sum(axis=-1)\n moccr_medians = np.median(moccr_samples, axis=0)\n mfig = corner(moccr_samples, labels=self.occr_r_names,\n truths=moccr_medians)\n mfig.suptitle(\"Marginalised occurrences\")\n mfig.show()\n\n return samples", "def gibbs_sampler(dna, k, t, N):\n\n motifs = random_kmers(k, t, dna)\n best_motifs = motifs.copy()\n\n for j in range(N):\n random_dna_seq_index = random.randrange(t)\n random_dna_seq = dna[random_dna_seq_index, :]\n mask = np.ones(t, dtype=bool)\n mask[random_dna_seq_index] = False\n\n count_mat = count_nucleotides(motifs[mask, :])\n prof_mat = profile(count_mat + 1, t - 1 + 4)\n\n mpk = profile_most_probable_kmer(random_dna_seq, k, prof_mat)\n motifs[random_dna_seq_index, :] = mpk\n\n if score_motif(motifs) < score_motif(best_motifs):\n best_motifs = motifs.copy()\n\n return best_motifs, score_motif(best_motifs)", "def run(self, r, niters=10000):\n validator.validate_type(r, rng, param_name='r')\n validator.validate_positive(niters, param_name='niters')\n model = bind(self._latent, self._view)\n for _ in xrange(niters):\n for name, config in self._kernel_config:\n if name == 'assign':\n gibbs.assign(model, r)\n elif name == 'assign_resample':\n gibbs.assign_resample(model, config['m'], r)\n elif name == 'grid_feature_hp':\n gibbs.hp(model, config, r)\n elif name == 'slice_feature_hp':\n slice.hp(model, r, hparams=config['hparams'])\n elif name == 'slice_cluster_hp':\n slice.hp(model, r, cparam=config['cparam'])\n elif name == 'theta':\n slice.theta(model, r, tparams=config['tparams'])\n else:\n assert False, \"should not be reach\"", "def sample_from_bm(self,\n num_chains, \n num_samples,\n num_steps,\n save_to_path,\n num_burn_in,\n test_inputs = None,\n print_p_tilda = False,\n print_gibbs = False):\n \n if type(test_inputs) is np.ndarray:\n \n print(\"Will initialize gibbs chains with dataset images\\n\")\n \n num_test_examples = test_inputs.shape[0]\n \n self.test_inputs = theano.shared(np.asarray(test_inputs,\n dtype=theano.config.floatX),\n borrow= True) \n \n select_examples = np.random.choice(num_test_examples, \n num_chains, \n replace=False)\n \n init_chains = np.asarray(\n self.test_inputs.get_value(borrow=True)[select_examples,:],\n dtype=theano.config.floatX)\n \n else:\n \n print(\"Will initialize gibbs chains with random images\\n\")\n init_chains = self.np_rand_gen.binomial(n=1,p=0.5, \n size = (num_chains, self.num_vars))\n \n images = np.zeros([num_chains*num_samples+num_chains, self.num_vars])\n \n images[0:num_chains,:] = init_chains\n \n theano.config.exception_verbosity = 'high'\n \n self.x_gibbs = theano.shared(init_chains, name= \"x_gibbs\")\n \n if self.num_hidden > 0:\n print(\"Running gibbs chains for RBM ...\\n\")\n \n (\n [ _,\n _,\n _,\n x_inputs,\n p_xi_given_x_,\n x_samples\n ],\n updates\n ) = theano.scan(\n self.gibbs_step_rbm_vis,\n outputs_info=[None, None, None, None, None, self.x_gibbs],\n n_steps= num_steps)\n \n output_vars = [p_xi_given_x_[-1], x_samples[-1]]\n \n updates.update({self.x_gibbs: x_samples[-1]})\n \n else:\n \n print(\"Running gibbs chains for BM ...\\n\")\n \n (p_xi_given_x_, x_samples), updates =\\\n theano.scan(self.gibbs_step_fully_visible, n_steps = num_steps)\n \n output_vars = [p_xi_given_x_[num_burn_in:],\n x_samples[num_burn_in:]]\n \n take_step = (num_steps - num_burn_in) // self.num_vars \n \n if take_step == 0:\n \n take_step = 1\n \n get_samples = theano.function(inputs = [],\n outputs = output_vars, \n updates = updates)\n \n for ind in range(num_samples):\n \n p_all, samples_all = get_samples()\n \n if num_steps != 1 and self.num_hidden == 0:\n \n p_out, samples_out = self.assemble_image(p_all, \n samples_all,\n num_chains,\n step = take_step)\n \n elif num_steps ==1 and self.num_hidden == 0:\n \n p_out = p_all[-1]\n \n samples_out = samples_all[-1]\n \n elif self.num_hidden > 0:\n \n p_out = p_all\n \n samples_out = samples_all\n \n if self.num_hidden == 0:\n \n p_out = np.transpose(p_out) \n \n # without resetting the chains are persistent for\n # fully visible Boltzmann Machines\n # (self.x_gibbs are modified continuously)\n # self.x_gibbs.set_value(init_chains)\n \n print(\"Sample %d -- max pixel activations for %d gibbs chains:\"%\n (ind, num_chains))\n print(np.max(p_out, axis= 1))\n print(\"\")\n \n if print_gibbs:\n self.print_gibbs_conditionals(p_vals = p_all)\n \n if print_p_tilda: \n is_samples = self.np_rand_gen.binomial(n=1, \n p=0.5, \n size =(10000, self.num_vars))\n \n gibbs_p_tilda, rand_p_tilda = \\\n self.test_p_tilda(np.transpose(samples_out), \n is_samples,\n training = False)\n \n print(\"p_tilda values for gibbs samples:\")\n print(gibbs_p_tilda)\n print(\"\")\n print(\"p_tilda values for randomly chosen importance samples:\")\n print(rand_p_tilda)\n print(\"\")\n \n images[num_chains*(ind+1):num_chains*(ind+2),:] = np.round(p_out)\n \n make_raster_plots(images, \n num_samples, \n num_chains, \n reshape_to = [self.side, self.side], \n save_to_path = save_to_path)", "def bunched_hist_sample(dfr, n_trials=5000, bunchsize=20):\r\n # convert to array to avoid indexing trouble\r\n aa = np.array(dfr)\r\n\r\n # number of bunches in the dataframe dfr\r\n nb = dfr.shape[0] // bunchsize\r\n\r\n # number of bunches to generate\r\n nbg = n_trials // bunchsize\r\n\r\n # numbers of bunches selected for sampling\r\n bind = np.random.choice(range(nb), size=nbg)\r\n\r\n # indices of records for sampling\r\n ind = np.zeros((nbg, bunchsize))\r\n\r\n # fill indices of records\r\n for i in range(nbg):\r\n st = bind[i] * bunchsize\r\n ind[i, :] = range(st, st + bunchsize)\r\n\r\n # convert indices to integer type\r\n indr = ind.reshape((nbg * bunchsize, )).astype('int')\r\n\r\n # make actual sampling\r\n aaa = aa[indr, :]\r\n\r\n # make up a dataframe for output\r\n dfb = pd.DataFrame(aaa, columns=dfr.columns)\r\n return dfb", "def __init__(self, n=5, factors=50, learning_rate=0.001, bias_regularization=0.001, user_regularization=0.001,\n positive_item_regularization=0.001, negative_item_regularization=0.001,iter = 50):\n print 'bpr begin'\n self.n = n\n self.factors = factors\n self.learning_rate = learning_rate\n self.bias_regularization = bias_regularization\n self.user_regularization = user_regularization\n self.positive_item_regularization = positive_item_regularization\n self.negative_item_regularization = negative_item_regularization\n self.iter = iter", "def gibbs_init(self, sigma2_s_param=None, sigma2_g_param=None):\n #Gibbs : Initialization step\n self.gibbs_init_step(self.nb_days, self.nb_particles, sigma2_s_param, sigma2_g_param)\n\n #Gibbs : step t > 0\n for j in range(1, self.nb_particles):\n if(j%(self.nb_particles/10)==0 or j==1):\n print(\"Gibbs sampling for particle \" + str(j) + \"/\" + str(self.nb_particles))\n\n\n self.s[:,j] = self.s[:,j-1]\n self.g_heat[:,j] = self.g_heat[:,j-1]\n self.sigma_s_star_2[:,j] = self.sigma_s_star_2[:,j-1]\n self.sigma_g_star_2[:,j] = self.sigma_g_star_2[:,j-1]\n\n # Compute s[0] for particle j (j>0)\n self.compute_s_0(j)\n\n # Compute s[n] for particle j (n>0 and j>0)\n for i in range(1, self.nb_days):\n self.compute_s(i,j)\n\n # Compute g_heat[O] for particle j (and j>0)\n self.compute_g_0(j)\n\n # Compute g_heat[n] for particle j (n>0 and j>0)\n for i in range(1, self.nb_days):\n self.compute_g(i,j)\n\n shape = 0.01 + ((self.nb_days - 1)/2)\n # Compute the new sigma_s_star2 for particle j (j>0) (follow Inverse Gamma)\n self.sigma_s_star_2[0, j] = self.compute_sigma_star_2(shape, self.s, j)\n\n # Compute the new sigma_g_star2 for particle j (j>0) (follow Inverse Gamma)\n self.sigma_s_star_2[0, j] = self.compute_sigma_star_2(shape, self.g_heat, j)\n\n #Compute x\n self.compute_x()\n #Compute w\n self.compute_w()", "def baum_welch(self,\n observations,\n iter_limit=100,\n threshold=1e-5,\n pseudocounts=[0, 0, 0],\n do_logging=True,\n do_debug=False,\n **args):\n # Make 1-of-K representation\n # This is used to update emission probs in maximization step\n if do_logging:\n logging.info(\"Baum Welch Algorithm started.\")\n x_digits = [np.array(\n [[x[n] == i for i in range(self._M)]\n for n in range(len(x))]\n ).T\n for x in observations]\n if do_logging is True:\n logging.info(\"1 of K representation has been made.\")\n l_prev = 0\n for n in range(iter_limit):\n if do_logging:\n logging.info(\"Estimation step began.\")\n ## gammas: array of R elements. Each element is also an array\n ## (matrix) of N x K (N is length)\n gammas, xisums, cs = np.array([self.estimate(x) for x in observations]).T\n if do_debug:\n return gammas, xisums, cs, x_digits\n if do_logging:\n logging.info(\"Estimation step ended.\")\n l = self.maximize(gammas, xisums, cs, x_digits)\n if np.isnan(l):\n logging.error(\"Log Likelihood is nan. Here are some information:\")\n logging.error(\"Parameters are pickled into params.pickle\")\n pickle.dump({'t': self._t, 'e': self._e, 'i': self._i, 'xisums': xisums},\n open('params.pickle', 'wb'))\n raise ValueError(\"nan detected. The scaling factors are: %s\" % cs)\n #if pseudocounts != [0, 0, 0]: # At least one pseudocount is set\n if np.any(pseudocounts):\n self.add_pseudocounts(pseudocounts)\n dif = l - l_prev\n if do_logging:\n logging.info(\"iter: \" + str(n))\n logging.info(\"Likelihood: \" + str(l))\n logging.info(\"Delta: \" + str(dif))\n l_prev = l\n pickle.dump({'t': self._t, 'e': self._e, 'i': self._i},\n open('params.pickle', 'wb'))\n if n > 0 and dif < threshold:\n break", "def eachDigitGMM(data, cfg):\r\n\r\n models = {}\r\n for j in range(len(data)):\r\n train_set = data[j][0]\r\n for i in range(1, len(data[j])):\r\n train_set = np.concatenate((train_set, data[j][i]), axis=0)\r\n\r\n estimator = GaussianMixture(n_components=cfg['components'], max_iter=cfg['max_iterations'],\r\n tol=cfg['tolerance'], covariance_type=cfg['covariance_type'])\r\n models[j] = estimator.fit(train_set)\r\n\r\n return models", "def run_gibbs(expt, save=True, show_progress=False):\n if isinstance(expt, str):\n expt = get_experiment(expt)\n tr_expt = get_training_expt(expt)\n\n for it in tr_expt.save_after:\n for avg in AVG_VALS:\n print 'Iteration', it, avg\n try:\n rbm = load_rbm(expt, it, avg)\n except:\n continue\n log_Z = storage.load(expt.log_Z_file(it, avg)).as_numpy_array()\n final_states = storage.load(expt.final_states_file(it, avg))\n\n # sample the states proportionally to the Z estimates\n p = log_Z - np.logaddexp.reduce(log_Z)\n p /= p.sum() # not needed in theory, but numpy complains if it doesn't sum exactly to 1\n idxs = np.random.multinomial(1, p, size=expt.annealing.num_particles).argmax(1)\n states = binary_rbms.RBMState(final_states.v[idxs, :], final_states.h[idxs, :])\n\n if show_progress:\n pbar = misc.pbar(expt.gibbs_steps)\n\n for st in range(expt.gibbs_steps):\n states = rbm.step(states)\n\n if show_progress:\n pbar.update(st)\n\n if show_progress:\n pbar.finish()\n\n if save:\n storage.dump(states, expt.gibbs_states_file(it, avg))", "def gibbs_sampler(self, data=None, parameters=None, save_name='test', save_path='outfiles/', plot_status=False, log_file=False, **kwargs):\n print('\\n{}\\n{}\\n{}'.format('-'*len(save_name), save_name, '-'*len(save_name)))\n\n # creates a log file if specified\n if log_file:\n log = save_name + '.log'\n with open(log, 'w') as handle:\n handle.write('[[[[{}]]]]\\n'.format(save_name))\n handle.write('starting Gibbs sampler\\n')\n\n # extract values\n if parameters is None:\n parameters = {}\n parameters = {**PARAMETERS, **parameters, **kwargs}\n\n # data should be a 2d array where each row is the brightness of a different ROI\n data = np.atleast_2d(data)\n self.data = data\n\n # set variables for gibbs sampler\n np.random.seed(parameters['seed']) # set RNG\n val = self.initialize_variables(data, parameters)\n num_iter = val.num_iter\n\n # set history\n self.history = HistoryH5(\n save_name=save_name,\n path=save_path,\n variables=val,\n num_iter=num_iter,\n fields=[\n 'num_flor',\n 'mu_flor',\n 'mu_back',\n 'transitions',\n 'P',\n ],\n )\n\n # run the gibbs sampler\n print('starting Gibbs sampler')\n print('parameters:')\n for key in parameters:\n text = str(getattr(val, key)).replace('\\n', ', ')\n print('--{} = {}'.format(key, text))\n if log_file:\n with open(log, 'a') as handle:\n handle.write('--{} = {}\\n'.format(key, text))\n for iter_num in range(num_iter):\n print('iteration {} of {} ['.format(iter_num + 1, num_iter), end='')\n t = time.time()\n self.sample_states(val)\n print('%', end='')\n self.sample_mu(val)\n print('%', end='')\n self.sample_transitions(val)\n print('%', end='')\n if plot_status:\n self.plot_variables(val)\n print('%', end='')\n self.history.checkpoint(val, iter_num)\n print('%', end='')\n print('] ({} s)'.format(round(time.time()-t, 2)))\n print('num_flors=[{}]'.format(','.join(str(num_flor) for num_flor in val.num_flor)))\n if log_file:\n with open(log, 'a') as handle:\n handle.write('iteration {} of {} ({}s)\\n'.format(iter_num + 1, num_iter, round(time.time()-t, 2)))\n handle.write('num_flors=[{}]\\n'.format(','.join(str(num_flor) for num_flor in val.num_flor)))\n\n print('sampling complete')\n if log_file:\n with open(log, 'a') as handle:\n handle.write('sampling complete\\n')\n\n return", "def gaModel(NGEN,\n CXPB,\n MUTPB,\n modelOmega,\n year,\n region,\n mean,\n tournsize,\n n_aval\n ):\n start = time.clock()\n # Attribute generator\n toolbox.register(\"attr_float\", random.random)\n toolbox.register(\"mate\", tools.cxOnePoint)\n # operator for selecting individuals for breeding the next\n # generation: each individual of the current generation\n # is replaced by the 'fittest' (best) of three individuals\n # drawn randomly from the current generation.\n toolbox.register(\"select\", tools.selTournament, tournsize=tournsize)\n toolbox.register(\"mutate\", tools.mutPolynomialBounded,\n indpb=0.1, eta=1, low=0, up=1)\n\n stats = tools.Statistics(key=lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean)\n stats.register(\"std\", numpy.std)\n stats.register(\"min\", numpy.min)\n stats.register(\"max\", numpy.max)\n\n # calculating the number of individuals of the\n # populations based on the number of executions\n y = int(n_aval / NGEN)\n x = n_aval - y * NGEN\n n = x + y\n\n toolbox.register(\"evaluate\", evaluationFunction,\n modelOmega=modelOmega, mean=mean)\n toolbox.register(\"individual\",\n tools.initRepeat,\n creator.Individual,\n toolbox.attr_float,\n len(modelOmega[0].bins)\n )\n toolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\n logbook = tools.Logbook()\n logbook.header = \"gen\", \"min\", \"avg\", \"max\", \"std\"\n\n pop = toolbox.population(n)\n # Evaluate the entire population\n # 2 model.bins: real data, generated model\n fitnesses = list(map(toolbox.evaluate, pop))\n for ind, fit in zip(pop, fitnesses):\n ind.fitness.values = fit\n for g in range(NGEN):\n print(g)\n # Select the next generation individuals\n offspring = toolbox.select(pop, len(pop))\n # Clone the selected individuals\n offspring = list(map(toolbox.clone, offspring))\n # Apply crossover and mutation on the offspring\n for child1, child2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < CXPB:\n toolbox.mate(child1, child2)\n del child1.fitness.values\n del child2.fitness.values\n for mutant in offspring:\n if random.random() < MUTPB:\n toolbox.mutate(mutant)\n del mutant.fitness.values\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n # The population is entirely replaced by the offspring,\n # but the last pop best_pop\n # Elitism\n best_pop = tools.selBest(pop, 1)[0]\n offspring = sorted(offspring, key=attrgetter(\"fitness\"), reverse=True)\n offspring[len(offspring) - 1] = best_pop\n random.shuffle(offspring)\n pop[:] = offspring\n # logBook\n record = stats.compile(pop)\n logbook.record(gen=g, **record)\n end = time.clock()\n generatedModel = models.model.newModel(modelOmega[0].definitions)\n # conferir se e bins o best_pop\n generatedModel.prob = best_pop\n generatedModel.bins = calcNumberBins(list(best_pop), mean)\n generatedModel.loglikelihood = best_pop.fitness.values\n generatedModel.definitions = modelOmega[0].definitions\n generatedModel.time = start - end\n generatedModel.logbook = logbook\n # output = generatedModel.loglikelihood\n # return((-1)*output[0])\n return generatedModel", "def gibbs_sampler(dna, k, n, seed=None):\n if seed is not None:\n random.seed(seed)\n t = len(dna) # No. of strings in dna\n s = len(dna[0]) # Length of every string in dna\n assert s >= k\n\n nucleotides_freq = nucleotides_frequency(dna)\n\n # Randomly select one motif per dna string, each of length k, as a starting set of motifs\n motifs = [dna[row][i:i + k] for row, i in zip(range(0, t), [randint(0, s - k - 1) for _ in range(0, t)])]\n # Initialise the needful to keep track of the best motifs discovered so far\n best_motifs = deepcopy(motifs) # Deep copy needed to prevent updates to motifs[i] from changing best_motifs[i]\n best_motif_score = score_motif(best_motifs)\n scores = []\n for _ in range(0, n): # Basic stopping criteria\n # Randomly choose one of the motifs\n i = randint(0, t - 1)\n # Find the probability motifs_profile of the motifs, without the randomly choosen one\n motif_ex_i = motifs[:i] + motifs[i + 1:]\n the_profile = motifs_profile(motif_ex_i, pseudocount=1)\n # Determine the likelihood of every k-mer in the i-th string of dna, based on the just calculated profile\n proportions = []\n for kmer in kmers_from_dna(dna[i], k):\n prop = functools.reduce(operator.mul,\n [the_profile[nucleotide][col] for nucleotide, col in\n zip(kmer, range(0, len(kmer)))], 1)\n proportions.append(prop)\n sum_proportions = sum(proportions)\n prob_distr = [prop / sum_proportions for prop in proportions]\n assert len(prob_distr) == s - k + 1\n # Sample one kmer from the i-th string in dna, based on the just calculated probability distribution\n drafted_kmer_i = choices(range(0, len(prob_distr)), weights=prob_distr)[0]\n # Replace the i-th motif in the current set of motifs with the sampled one\n motifs[i] = dna[i][drafted_kmer_i:drafted_kmer_i + k]\n # Check if the obtained motif is the best so far\n # score = score_motif(motifs)\n score = relative_entropy(motifs, nucleotides_freq)\n scores.append(score)\n if score < best_motif_score:\n # Deep copy needed, or else code above that updates motifs[i] will overwrite best_motifs[i]\n best_motifs = deepcopy(motifs)\n best_motif_score = score\n return best_motifs, scores", "def sample(\n self,\n repetitions,\n nChains=3,\n burnIn=100,\n thin=1,\n convergenceCriteria=0.8,\n variables_of_interest=None,\n DEpairs=2,\n adaptationRate=\"auto\",\n eps=5e-2,\n mConvergence=True,\n mAccept=True,\n ):\n\n self.set_repetiton(repetitions)\n print(\n \"Starting the DEMCz algotrithm with \" + str(repetitions) + \" repetitions...\"\n )\n\n self.min_bound, self.max_bound = (\n self.parameter()[\"minbound\"],\n self.parameter()[\"maxbound\"],\n )\n repetitions = int(repetitions / nChains)\n ndraw_max = repetitions * nChains\n maxChainDraws = int(ndraw_max / nChains)\n\n dimensions = len(self.parameter()[\"random\"])\n\n # minbound,maxbound=self.find_min_max()\n # select variables if necessary\n if variables_of_interest is not None:\n slices = []\n for var in variables_of_interest:\n slices.append(self.slices[var])\n else:\n slices = [slice(None, None)]\n\n # make a list of starting chains that at least span the dimension space\n # in this case it will be of size 2*dim\n nSeedIterations = max(int(np.ceil(dimensions * 2 / nChains)), 2)\n\n # init a simulationhistory instance\n history = _SimulationHistory(\n maxChainDraws + nSeedIterations, nChains, dimensions\n )\n history.add_group(\"interest\", slices)\n\n ### BURN_IN\n burnInpar = [np.zeros((nChains, dimensions))] * nSeedIterations\n for i in range(nSeedIterations):\n self._logPs = []\n simulationlist = []\n old_like = np.empty(nChains)\n param_generator = (\n (rep, self.parameter()[\"random\"]) for rep in range(int(nChains))\n )\n\n for rep, vector, simulations in self.repeat(param_generator):\n burnInpar[i][rep] = vector\n likelist = self.postprocessing(i, vector, simulations, chains=rep)\n simulationlist.append(simulations)\n self._logPs.append(likelist)\n old_like[rep] = likelist\n burnInpar[i][rep] = vector\n if self.status.stop:\n break\n if not self.status.stop:\n history.record(burnInpar[i], self._logPs, 1)\n\n gamma = None\n self.accepts_ratio = 0.000001\n\n # initilize the convergence diagnostic object\n grConvergence = _GRConvergence()\n covConvergence = _CovarianceConvergence()\n\n # get the starting log objectivefunction and position for each of the\n # chains\n currentVectors = burnInpar[-1]\n currentLogPs = self._logPs[-1]\n\n # 2)now loop through and sample\n cur_iter = 0\n accepts_ratio_weighting = 1 - np.exp(-1.0 / 30)\n lastRecalculation = 0\n # continue sampling if:\n # 1) we have not drawn enough samples to satisfy the minimum number of iterations\n # 2) or any of the dimensions have not converged\n # 3) and we have not done more than the maximum number of iterations\n\n while cur_iter < maxChainDraws:\n print(cur_iter, burnIn)\n if cur_iter == burnIn:\n print(\"starting\")\n history.start_sampling()\n\n # every5th iteration allow a big jump\n if np.random.randint(5) == 0.0:\n gamma = np.array([1.0])\n else:\n gamma = np.array([2.38 / np.sqrt(2 * DEpairs * dimensions)])\n\n if cur_iter >= burnIn:\n proposalVectors = _dream_proposals(\n currentVectors,\n history,\n dimensions,\n nChains,\n DEpairs,\n gamma,\n 0.05,\n eps,\n )\n for i in range(len(proposalVectors)):\n proposalVectors[i] = self.check_par_validity(proposalVectors[i])\n # print proposalVectors\n else:\n proposalVectors = []\n for i in range(nChains):\n proposalVectors.append(self.parameter()[\"random\"])\n proposalVectors[i] = self.check_par_validity(proposalVectors[i])\n\n # if self.bounds_ok(minbound,maxbound,proposalVectors,nChains):\n proposalLogPs = []\n old_simulationlist = simulationlist\n old_likelist = self._logPs[-1]\n new_simulationlist = []\n new_likelist = []\n\n param_generator = (\n (rep, list(proposalVectors[rep])) for rep in range(int(nChains))\n )\n for rep, vector, simulations in self.repeat(param_generator):\n new_simulationlist.append(simulations)\n like = self.postprocessing(\n cur_iter + nSeedIterations,\n list(vector),\n simulations,\n chains=rep,\n )\n self._logPs.append(like)\n new_likelist.append(like)\n proposalLogPs.append(like)\n if self.status.stop:\n cur_iter = maxChainDraws\n break\n\n if not self.status.stop:\n # apply the metrop decision to decide whether to accept or reject\n # each chain proposal\n decisions, acceptance = self._metropolis_hastings(\n currentLogPs, proposalLogPs, nChains\n )\n self._update_accepts_ratio(accepts_ratio_weighting, acceptance)\n # choose from list of possible choices if 1d_decision is True at\n # specific index, else use default choice\n # np.choose(1d_decision[:,None], (list of possible choices, default\n # choice)\n save_likes = []\n save_pars = []\n save_sims = []\n\n for curchain in range(nChains):\n if decisions[curchain]:\n save_likes.append(float(new_likelist[curchain]))\n old_like[curchain] = float(new_likelist[curchain])\n save_pars.append(proposalVectors[curchain])\n save_sims.append(new_simulationlist[curchain])\n else:\n save_likes.append(old_like[curchain])\n save_pars.append(currentVectors[curchain])\n save_sims.append(old_simulationlist[curchain])\n\n currentVectors = np.choose(\n decisions[:, np.newaxis], (currentVectors, proposalVectors)\n )\n currentLogPs = np.choose(decisions, (currentLogPs, proposalLogPs))\n\n simulationlist = [\n [new_simulationlist, old_simulationlist][int(x)][ix]\n for ix, x in enumerate(decisions)\n ]\n\n likelist = list(\n np.choose(\n decisions[:, np.newaxis], (new_likelist, old_likelist)\n )\n )\n\n # we only want to recalculate convergence criteria when we are past\n # the burn in period\n\n if cur_iter % thin == 0:\n\n historyStartMovementRate = adaptationRate\n # try to adapt more when the acceptance rate is low and less\n # when it is high\n if adaptationRate == \"auto\":\n historyStartMovementRate = min(\n (0.234 / self.accepts_ratio) * 0.5, 0.95\n )\n\n history.record(\n currentVectors,\n currentLogPs,\n historyStartMovementRate,\n grConvergence=grConvergence.R,\n )\n\n if (\n history.nsamples > 0\n and cur_iter > lastRecalculation * 1.1\n and history.nsequence_histories > dimensions\n ):\n lastRecalculation = cur_iter\n grConvergence.update(history)\n covConvergence.update(history, \"all\")\n covConvergence.update(history, \"interest\")\n if all(grConvergence.R < convergenceCriteria):\n cur_iter = maxChainDraws\n print(\n \"All chains fullfil the convergence criteria. Sampling stopped.\"\n )\n cur_iter += 1\n\n # 3) finalize\n # only make the second half of draws available because that's the only\n # part used by the convergence diagnostic\n self.history = history.samples\n self.histo = history\n self.iter = cur_iter\n self.burnIn = burnIn\n self.R = grConvergence.R\n text = \"Gelman Rubin R=\" + str(self.R)\n print(text)\n self.status.rep = self.status.repetitions\n self.final_call()", "def gibbs_segmentation(image, burn_in, collect_frequency, n_samples):\n (Nx, Ny, _) = image.shape\n\n distribution = np.zeros( (Nx, Ny) )\n\n # Initialize binary estimates at every pixel randomly. \n estimates = (np.random.random( (Nx, Ny) ) > .5).astype(int)\n\n total_iterations = burn_in + (collect_frequency * (n_samples - 1) + 1)\n pixel_indices = list(itertools.product(range(Nx),range(Ny)))\n\n for iteration in range(total_iterations):\n\n # Loop over entire grid, using a random order for faster convergence\n random.shuffle(pixel_indices)\n for (i,j) in pixel_indices:\n xf = observation_model(image[i,j,:], 0)\n xb = observation_model(image[i,j,:], 1)\n for neighbor in get_neighbors(estimates, i, j):\n xf *= edge_model(0, neighbor)\n xb *= edge_model(1, neighbor)\n pb = xb / (xf + xb)\n estimates[i,j] = (np.random.random() < pb).astype(int)\n if iteration > burn_in and (iteration - burn_in + collect_frequency)%collect_frequency == 1:\n distribution += estimates\n \n distribution /= n_samples\n\n return distribution", "def fit(self, channel, noise_var, noised_signal, stop_iter=10):\n for i in range(stop_iter):\n self.global_iter_num = i\n \n self.get_parameter_s_from_q(channel, noise_var, noised_signal)\n self.update_r()\n self.get_parameter_s_from_r(channel, noise_var, noised_signal)\n self.update_q()", "def test_m2b_via_uniform (self):\n nt = 5\n ns = 1\n num_giter = 100\n net = self.m2b\n\n tmr = mytime.timeit()\n\n # For this test, each sample is tested independently rather than aggregated\n for i in xrange(ns):\n arrv = net.sample (nt)\n print arrv\n obs = arrv.subset (lambda a,e: a.is_initial (e), copy_evt)\n gsmp = net.gibbs_resample (obs, 0, num_giter, sample_final=False)\n for tid in xrange(nt):\n # For each task, check that the Gibbs distribution is correctly uniform\n times = []\n for smp_id in xrange(1,len(gsmp)):\n byt = gsmp[smp_id].events_of_task (tid)\n self.assertEquals (3, len(byt))\n times.append (byt[1].d)\n \n # examine gibbs function\n e0 = arrv.events_of_task (tid)[1]\n e1 = arrv.events_of_task (tid)[2]\n L = e0.a\n U = e1.d\n cdist = net.gibbs_for_departure (obs, e0)\n xs = [ L+ i*(U-L)/10 for i in xrange(10) ]\n for x in xs: print \" x %.4f p(d = x | A) %.4f\" % (x, cdist(x))\n \n # generate true sample\n s = [ numpy.random.uniform (L, U) for i in xrange(num_giter) ] \n\n # now check the cdfs\n s.sort()\n times.sort()\n print summarize (times)\n netutils.check_quantiles (self, s, times, num_giter)\n\n elapsed = tmr.total() \n print \"Events resampled per sec = \", (nt * 2 * ns * num_giter) / elapsed", "def pvalue_bkgtoys_runs(model, signal_process_groups = None, n_runs = 10, n = 10000, nuisance_constraint = None, nuisance_prior_toys = None, seed_min = 1, ts_method = deltanll):\n if signal_process_groups is None: signal_process_groups = model.signal_process_groups\n result = {}\n for i_run in range(n_runs):\n res = ts_method(model, 'toys:0.0', n, signal_process_groups = signal_process_groups, nuisance_constraint = nuisance_constraint,\n nuisance_prior_toys = nuisance_prior_toys, run_theta = False, seed = seed_min + i_run)\n for spid in res:\n if not spid in result: result[spid] = []\n result[spid].append(res[spid])\n return result", "def run_estimator(partial_order,\n num_iter=1000,\n p0=0.1): \n cover = compute_minimal_chain_cover(partial_order)\n size = partial_order.shape[0]\n p0_thresh = int(num_iter*p0)\n chain_state_dict = {}\n chain_size = {}\n chain_ordered_list = {}\n num_chains = len(cover)\n chain = []\n chain_index = [1]\n\n for i in range(num_chains):\n chain_size[i] = len(cover[i])\n chain_state_dict[i] = chain_to_states(partial_order, cover[i])\n chain_ordered_list[i] = []\n for j in range(chain_size[i]):\n # Add 1 since indices will be used in Fortran arrays\n chain_ordered_list[i].append(np.setdiff1d(\n chain_state_dict[i][j + 1], chain_state_dict[i][j])[0] + 1)\n if i > 0:\n chain_index.append(len(chain) + 1)\n chain.extend(chain_ordered_list[i])\n chain = np.array(chain, dtype='int')\n chain_index = np.array(chain_index, dtype='int')\n\n Ei = size - 1\n Eii = Ei \n curr_set = np.random.randint(0, 2, size) # Random starting set\n Ei_dict = {}\n card_list = []\n card_diff = np.zeros(1, dtype='int')\n last_iter = False\n while True:\n print(\"Sampling with Ei = \" + str(Ei) + \"...\")\n Ei_list = [] \n diff_list = []\n\n for i in range(num_iter):\n card_diff[0] = int(0)\n # After running gibbs_sampler, curr_set is updated with the newly\n # sampled set from E_i, while card_diff contains the distance from\n # curr_set to the smallest knowledge state containing it\n gibbs_sampler(curr_set, partial_order, chain, chain_index, Ei,\n card_diff, num_chains, size) \n Ei_list.append(curr_set.copy())\n diff_list.append(card_diff[0])\n\n # Update E_ii value using p0 threshold\n Eii = np.sort(diff_list)[p0_thresh]\n if Eii == Ei:\n Eii = Ei - 1\n # Find all sets in E_ii (i.e., the sets with a distance at most Eii from\n # the nearest knowledge state)\n seed_list = np.array(Ei_list)[np.flatnonzero(diff_list <= Eii), :]\n # Compute the proportion of sets in E_ii\n Ei_dict[Eii] = len(seed_list)/num_iter\n\n if Ei == 1:\n break\n # Update family of sets and randomly choose one of the sets in E_ii\n Ei = Eii \n curr_set = seed_list[np.random.randint(len(seed_list))].copy()\n\n # Compute estimated probability of sampling a knowledge state \n prob_prod = 1.0\n for prob in list(Ei_dict.values()):\n prob_prod *= prob\n # Compute size of the Cartesian product of the chains\n card_prod = 1.0\n for i in range(len(cover)):\n card_prod *= (len(cover[i]) + 1)\n # Estimate of the knowledge state size\n num_states = prob_prod * card_prod\n \n return num_states, Ei_dict", "def sample_pagerank(corpus, damping_factor, n):\n probabilities = dict()\n samples = []\n\n # Random first sample\n page = random.choice(list(corpus.keys()))\n samples.append(page)\n \n # Remaining samples after first\n for i in range(n-1):\n p = transition_model(corpus, page, damping_factor)\n page = random.choices(list(p.keys()), weights=list(p.values()), k=1)[0]\n samples.append(page)\n\n # Count\n for p in corpus.keys():\n probabilities[p] = samples.count(p) / n\n\n return probabilities" ]
[ "0.6322436", "0.6285666", "0.60211504", "0.5784153", "0.57558376", "0.5718077", "0.5679642", "0.5619398", "0.5619151", "0.5593346", "0.5539781", "0.5460076", "0.5412505", "0.5410561", "0.5342371", "0.5312312", "0.52381414", "0.5194906", "0.51755226", "0.5162795", "0.5161995", "0.513872", "0.5100643", "0.50901926", "0.5086468", "0.5084529", "0.5064273", "0.5060239", "0.5053658", "0.5050017" ]
0.6786249
0
For each label, extract the features from its segment and returns the list with the features from all of them.
def features_from_labels(audio_file, segments): segments_features = [] #for each segment for segment in segments: features = features_from_label(audio_file, segment) #and append it to the list segments_features.append(features) return segments_features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features_from_label(audio_file, segment):\n duration = segment['end'] - segment['start']\n audio, sample_rate = librosa.core.load(\n audio_file,\n duration=duration,\n offset=segment['start']\n )\n features = fe.get_features(audio, sample_rate)\n return features", "def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n #print(labels[start:end])\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def batch_features_labels(features, labels, batch_size):\r\n for start in range(0, len(features), batch_size):\r\n end = min(start + batch_size, len(features))\r\n yield features[start:end], labels[start:end]", "def expand_features_and_labels(x_feat, y_labels):\n x_expanded = []\n y_expanded = []\n for x, y in zip(x_feat, y_labels):\n for segment in x:\n x_expanded.append(segment)\n y_expanded.append(y)\n return x_expanded, y_expanded", "def _collect_features(self, save=None):\n makedir(self.modeldir)\n if save is None:\n save = '{:s}/all.fts'.format(self.modeldir)\n \n feats = []\n fls = glob('{:s}/*.fts'.format(self.modeldir))\n for i,fl in enumerate(fls):\n if fl.split(os.sep)[-1].split('.')[0] in ['all','ranked']: continue\n with open(fl) as fp:\n lns = fp.readlines()\n feats += [' '.join(ln.rstrip().split()[1:]) for ln in lns] \n\n labels = list(set(feats))\n freqs = [feats.count(label) for label in labels]\n labels = [label for _,label in sorted(zip(freqs,labels))][::-1]\n freqs = sorted(freqs)[::-1]\n # write out feature frequencies\n with open(save, 'w') as fp:\n _ = [fp.write('{:d},{:s}\\n'.format(freq,ft)) for freq,ft in zip(freqs,labels)]\n return labels, freqs", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list, 1)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n textlist = example.text_a.split(' ')\n labellist = example.label\n tokens = []\n labels = []\n valid = []\n label_mask = []\n for i, word in enumerate(textlist):\n token = tokenizer.tokenize(word)\n tokens.extend(token)\n label_1 = labellist[i]\n for m in range(len(token)):\n if m == 0:\n labels.append(label_1)\n valid.append(1)\n label_mask.append(True)\n else:\n valid.append(0)\n if len(tokens) >= max_seq_length - 1:\n tokens = tokens[0:(max_seq_length - 2)]\n labels = labels[0:(max_seq_length - 2)]\n valid = valid[0:(max_seq_length - 2)]\n label_mask = label_mask[0:(max_seq_length - 2)]\n ntokens = []\n segment_ids = []\n label_ids = []\n ntokens.append(\"[CLS]\")\n segment_ids.append(0)\n valid.insert(0, 1)\n label_mask.insert(0, True)\n label_ids.append(label_map[\"[CLS]\"])\n for i, token in enumerate(tokens):\n ntokens.append(token)\n segment_ids.append(0)\n if len(labels) > i:\n label_ids.append(label_map[labels[i]])\n ntokens.append(\"[SEP]\")\n segment_ids.append(0)\n valid.append(1)\n label_mask.append(True)\n label_ids.append(label_map[\"[SEP]\"])\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\n input_mask = [1] * len(input_ids)\n label_mask = [True] * len(label_ids)\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n label_ids.append(0)\n valid.append(1)\n label_mask.append(False)\n while len(label_ids) < max_seq_length:\n label_ids.append(0)\n label_mask.append(False)\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(label_ids) == max_seq_length\n assert len(valid) == max_seq_length\n assert len(label_mask) == max_seq_length\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_ids,\n valid_ids=valid,\n label_mask=label_mask))\n return features", "def labeledfeatures(eqdata, featurefunc, labelfunc):\n _size = len(eqdata.index)\n _labels, _skipatend = labelfunc(eqdata)\n _features, _skipatstart = featurefunc(eqdata.iloc[:(_size - _skipatend), :])\n return _features, _labels.iloc[_skipatstart:, :]", "def get_features_and_labels(self, dataframe):\n features = dataframe.drop(columns=self._label, axis=1)\n labels = dataframe[self._label]\n\n return features, labels", "def extract_samples(image, labels):\n faces = []\n for label in labels:\n faces.append(_extract_samples_with_padding(image, label))\n return faces", "def batch_features_labels(features, labels, batch_size):\n # 用 yield迭代器。\n for start in range(0, len(features), batch_size):\n end = min(start + batch_size, len(features))\n yield features[start:end], labels[start:end]", "def generate_feats_and_labels(features_without_activity, feature_with_activity):\n num_time_steps = 64\n step = 6\n segments = []\n labels = []\n for i in range(0, len(features_without_activity) - num_time_steps, step):\n xlist = [features_without_activity[cols].values[i: i + num_time_steps] for cols in features_without_activity.columns]\n label = stats.mode(feature_with_activity['activity'][i: i + num_time_steps])[0][0]\n segments.append(xlist)\n labels.append(label)\n shape_of_segment = np.array(segments).shape\n labels = np.asarray(pd.get_dummies(labels), dtype=np.float32)\n num_features = shape_of_segment[1]\n reshaped_segments = np.asarray(segments, dtype=np.float32).reshape(-1, num_time_steps, num_features)\n return num_features, reshaped_segments, labels", "def convert_examples_to_features_for_train(examples, label_list, max_seq_length, tokenizer):\r\n label_map = {label : i for i, label in enumerate(label_list)} #label -> i index dictionary\r\n features = []\r\n for (ex_index, example) in enumerate(examples):\r\n label_list = example.label.split(' ')\r\n\r\n tokens = []\r\n labels = []\r\n for i, word in enumerate(example.text_a.split(' ')): #textlist\r\n token_wordpiece = tokenizer.tokenize(word)\r\n tokens.extend(token_wordpiece)\r\n label_current = label_list[i]\r\n for m in range(len(token_wordpiece)):\r\n if m == 0:\r\n labels.append(label_current)\r\n else:\r\n labels.append('X')\r\n\r\n # max_seq_length-1\r\n if len(tokens) >= max_seq_length - 1:\r\n tokens = tokens[0:(max_seq_length - 2)]\r\n labels = labels[0:(max_seq_length - 2)]\r\n\r\n ntokens = []\r\n segment_ids = []\r\n label_ids = []\r\n\r\n ntokens.append('[CLS]')\r\n segment_ids.append(0)\r\n label_ids.append(label_map['[CLS]'])\r\n # print(tokens, labels)\r\n for i, token in enumerate(tokens):\r\n ntokens.append(token)\r\n segment_ids.append(0)\r\n label_ids.append(label_map[labels[i]])\r\n\r\n ntokens.append('[SEP]')\r\n segment_ids.append(0)\r\n label_ids.append(label_map['[SEP]'])\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\r\n input_mask = [1] * len(input_ids)\r\n\r\n #if the length is short, tianbu 0\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n #we do not concerned about it\r\n label_ids.append(0)\r\n ntokens.append('NULL')\r\n\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n assert len(label_ids) == max_seq_length\r\n\r\n features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids))\r\n return features", "def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()", "def extract_features(data_dir,mode='train'):\n files = get_files(data_dir)\n t0 = time.time()\n features = list()\n labels = list()\n for f in files:\n freq = get_frequencies(f)\n if mode=='train':\n sents = corpus_reader(f)\n labels.extend(d2l(sents,f,freq))\n elif mode=='decode':\n sents = corpus_reader(f,tag='pos')\n else:\n print('Invalid mode!')\n break\n features.extend(d2f(sents,f,freq)) \n dt = time.time() - t0\n print('Total feature extraction time: %d seconds' % dt)\n return features,labels", "def feature_extract(self, CT_pairs):\n instances = []\n for pair in CT_pairs:\n config = pair[0]\n label = pair[1]\n data = []\n featureset = {}\n \n # for nltk NaiveBayes feature selection stuff when doing MaxEnt decoding parser commit this\n# featureset[\"topOfBuffer\"] = self.token_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.token_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = (self.token_dict[config.sigma.top()], self.token_dict[config.beta.top()])\n# featureset[\"topOfBuffer\"] = self.POS_dict[config.beta.top()]\n# featureset[\"topOfStack\"] = self.POS_dict[config.sigma.top()]\n# featureset[\"bufferStackPair\"] = tuple((self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]))\n \n # add the (StackTopPOS,BufferTopPOS,bufferchildren_POS) feature\n #value_set = tuple([self.POS_dict[config.sigma.top()], self.POS_dict[config.beta.top()]] + [self.POS_dict[child] for child in self.getBufferChildren(config.beta.top())])\n #featureset[\"bufferStackbufferChildrenPair\"] = value_set\n \n # for MaxEnt decoding stuff\n # token variants\n data.append((\"topOfBuffer\",self.token_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.token_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.token_dict[config.sigma.top()],self.token_dict[config.beta.top()]))\n #POS variants\n data.append((\"topOfBuffer\",self.POS_dict[config.beta.top()]))\n data.append((\"topOfStack\",self.POS_dict[config.sigma.top()]))\n data.append((\"bufferStackPair\",self.POS_dict[config.sigma.top()],self.POS_dict[config.beta.top()]))\n ins = Instance(label=label, data=data)\n #ins = Instance(label=label, data=featureset)\n instances.append(ins)\n \n return instances", "def get_transformer_splits(loader_cls, tokenizer, return_intent_labels=True):\n datasets = []\n for subset in SUBSETS:\n dataset = OODDataset(loader_cls(subset=subset), tokenizer.tokenize,\n return_intent_labels)\n dataset.vectorize_texts(tokenizer)\n datasets.append(dataset)\n return datasets", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def surface_labelled_data_preparation_pipeline(word_list: [str]):\n X = []\n\n for word in word_list:\n segments = word.split('-')\n segment_features = []\n for i in range(len(segments)):\n features = {}\n\n segment_length = len(segments[i])\n features['length'] = segment_length\n\n features['segment.lower()'] = segments[i].lower()\n features['pos_in_word'] = i\n\n if segment_length % 2 == 0:\n features['even'] = 1\n else:\n features['odd'] = 1\n\n features['begin'] = segments[i][0]\n features['end'] = segments[i][len(segments[i]) - 1]\n\n try:\n features['prev_segment'] = segments[i - 1]\n except IndexError:\n features['prev_segment'] = ''\n # continue\n\n try:\n features['next_segment'] = segments[i + 1]\n except IndexError:\n features['next_segment'] = ''\n\n if segments[0].isupper():\n features['start_upper'] = 1\n else:\n features['start_lower'] = 1\n\n if segments[0] in 'aeiou':\n features['first_vowel'] = 1\n else:\n features['first_const'] = 1\n\n segment_features.append(features)\n\n X.append(segment_features)\n\n return X", "def convert_examples_to_features(tokens_set, labels_set, max_seq_length, tokenizer):\r\n\r\n #label_map = {label: i for i, label in enumerate(label_list, 1)}\r\n\r\n input_ids, input_masks, segment_ids, labels = [], [], [], []\r\n for index in tqdm_notebook(range(len(tokens_set)),desc=\"Converting examples to features\"):\r\n textlist = tokens_set[index] #example.text_a.split(' ')\r\n labellist = labels_set[index]\r\n input_id, input_mask, segment_id,label = convert_single_example(\r\n textlist, labellist,max_seq_length,tokenizer\r\n )\r\n input_ids.append(input_id)\r\n input_masks.append(input_mask)\r\n segment_ids.append(segment_id)\r\n labels.append(label)\r\n return (\r\n np.array(input_ids),\r\n np.array(input_masks),\r\n np.array(segment_ids),\r\n np.array(labels)\r\n )", "def compute_label_feature(text, token_to_idx):\n tokens = list(text.strip().lower())\n feats = [token_to_idx[token] for token in tokens]\n return feats", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n label_map = {label: i for i, label in enumerate(label_list)}\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n ####\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n ####\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n ####\n input_mask = [1] * len(input_ids)\n ####\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n ####\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n ####\n label_id = label_map[example.label]\n in_f = InputFeatures(\n input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id\n )\n in_f.tokens = tokens\n features.append(in_f)\n return features", "def extract_features(ds, config):\n feature_type = tf.constant(config[\"type\"], tf.string)\n args = _feature_extraction_kwargs_to_args(config)\n tf_device = _get_device_or_default(config)\n\n logger.info(\"Extracting '%s' features on device '%s' with arguments:\\n %s\", config[\"type\"], tf_device, \"\\n \".join(repr(a) for a in args[1:]))\n\n def _append_features(x):\n with tf.device(tf_device):\n features = tf_utils.extract_features(x[\"signal\"], x[\"sample_rate\"], *args)\n feature_types = tf.repeat(feature_type, tf.shape(features)[0])\n return dict(x, input=features, feature_type=feature_types)\n\n if \"group_by_input_length\" in config:\n max_batch_size = config[\"group_by_input_length\"][\"max_batch_size\"]\n logger.info(\"Grouping signals by length, creating batches of max size %d from each group\", max_batch_size)\n ds = group_by_axis_length(ds, \"signal\", max_batch_size, axis=0)\n else:\n batch_size = tf.constant(config.get(\"batch_size\", 1), tf.int64)\n logger.info(\"Batching signals with batch size %s, extracting features in batches.\", batch_size.numpy())\n ds = ds.batch(batch_size)\n\n return (ds.prefetch(TF_AUTOTUNE)\n .map(_append_features, num_parallel_calls=TF_AUTOTUNE)\n .unbatch())", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing \n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n \n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, mode): #check later if we can merge this function with the SQuAD preprocessing\n label_map = {}\n for (i, label) in enumerate(label_list):\n label_map[label] = i\n\n features = []\n for (ex_index, example) in enumerate(examples):\n if mode!=\"ae\":\n tokens_a = tokenizer.tokenize(example.text_a)\n else: #only do subword tokenization.\n tokens_a, labels_a, example.idx_map= tokenizer.subword_tokenize([token.lower() for token in example.text_a], example.label )\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n\n if tokens_b:\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n tokens = []\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in tokens_a:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n if tokens_b:\n for token in tokens_b:\n tokens.append(token)\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n target_indices = find_target_indices(tokens_a, tokens)\n if target_indices is None:\n target_indices = (1, 1 + len(tokens_a))\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if mode!=\"ae\":\n label_id = label_map[example.label]\n else:\n label_id = [-1] * len(input_ids) #-1 is the index to ignore\n #truncate the label length if it exceeds the limit.\n lb=[label_map[label] for label in labels_a]\n if len(lb) > max_seq_length - 2:\n lb = lb[0:(max_seq_length - 2)]\n label_id[1:len(lb)+1] = lb\n\n features.append(\n InputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n target_indices=target_indices))\n return features", "def sentences_to_features(self, sentences, labels):\n\n input_examples = [run_classifier.InputExample(guid=\"\", text_a=s, text_b=None, label=l) for s, l in\n zip(sentences, labels)] # here, \"\" is just a dummy label\n input_features = run_classifier.convert_examples_to_features(input_examples, self.label_list,\n self.params[\"MAX_SEQ_LENGTH\"],\n self.tokenizer)\n return input_features", "def flatten_features_and_labels(self, features, labels, signals=None):\n flattened_inputs = []\n if self._feature_names:\n # We need a fixed ordering for enqueueing and dequeueing.\n flattened_inputs.extend(\n [features[name] for name in self._feature_names])\n else:\n flattened_inputs.append(features)\n\n if labels is not None:\n if self._label_names:\n # We need a fixed ordering for enqueueing and dequeueing.\n flattened_inputs.extend([labels[name] for name in self._label_names])\n else:\n flattened_inputs.append(labels)\n\n if signals is not None:\n flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))\n return flattened_inputs" ]
[ "0.69634634", "0.656113", "0.64468277", "0.63244975", "0.63244975", "0.63244975", "0.6308826", "0.6191861", "0.6083943", "0.60839206", "0.6039162", "0.60375446", "0.5988831", "0.59702706", "0.5932434", "0.59319234", "0.5916766", "0.5876143", "0.58422124", "0.5826482", "0.58198714", "0.581", "0.57981205", "0.5773342", "0.5754759", "0.574714", "0.5739721", "0.57360536", "0.5723661", "0.5712878" ]
0.81941056
0
Using the label, extract the features from the segment defined by the label.
def features_from_label(audio_file, segment): duration = segment['end'] - segment['start'] audio, sample_rate = librosa.core.load( audio_file, duration=duration, offset=segment['start'] ) features = fe.get_features(audio, sample_rate) return features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features_from_labels(audio_file, segments):\n segments_features = []\n #for each segment\n for segment in segments:\n features = features_from_label(audio_file, segment)\n #and append it to the list\n segments_features.append(features)\n return segments_features", "def get_features_and_labels(self, dataframe):\n features = dataframe.drop(columns=self._label, axis=1)\n labels = dataframe[self._label]\n\n return features, labels", "def extract(self, label):\n self._validate_labels(label, must_exist=True)\n with self._h5file('r') as h5file:\n return extract(h5file, label)", "def compute_label_feature(text, token_to_idx):\n tokens = list(text.strip().lower())\n feats = [token_to_idx[token] for token in tokens]\n return feats", "def labeledfeatures(eqdata, featurefunc, labelfunc):\n _size = len(eqdata.index)\n _labels, _skipatend = labelfunc(eqdata)\n _features, _skipatstart = featurefunc(eqdata.iloc[:(_size - _skipatend), :])\n return _features, _labels.iloc[_skipatstart:, :]", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def feature_label(features):\n f=[]\n l=[]\n for item in features:\n f.append(item[0])\n l.append(item[1])\n return f,l", "def _get_seg_features(string):\n seg_feature = []\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def _parse_fn(serialized):\n features = parse_example(serialized)\n label = features.pop('clicked')\n return features, label", "def get_dataset_features(text):\n return model.extract(text)", "def get_features_from_segment_raw(seg_raw_df, feature_func_dict):\n # parse input\n if type(feature_func_dict) == str: # it's a json filename\n import json\n feature_func_str = open(feature_func_dict).read()\n feature_func_dict = json.loads(feature_func_str)\n print \"===========start computing features=================\"\n print \"===========feature function dictionary==============\"\n print feature_func_dict\n grouped = seg_raw_df.groupby(s_info.segment_col)\n # parse feature function dictionary\n result = {}\n for feature_name in feature_func_dict:\n print \"==========compute \" + feature_name + \"================\"\n feature = feature_func_dict[feature_name]\n if len(feature['paras']) == 0: # no parameter need to be set, easiest case\n # find out the function\n func_name = feature['handler']\n if hasattr(np, func_name):\n func = getattr(np, func_name)\n elif hasattr(sp_stats, func_name):\n func = getattr(sp_stats, func_name)\n elif hasattr(s_feature, func_name):\n func = getattr(s_feature, func_name)\n else:\n func = func_name\n # prepare columns\n temp = grouped[feature['apply']].aggregate(func)\n result[feature_name] = temp\n else: # has parameters, will compute column one by one\n paras = feature['paras']\n print paras\n # find out the function\n func_name = feature['handler']\n if hasattr(s_feature, func_name):\n func = getattr(s_feature, func_name)\n elif hasattr(np, func_name):\n func = getattr(np, func_name)\n else:\n print func_name + \" can't be found, ignore this feature\"\n continue\n # iterate over columns\n temp = {}\n c = 0\n for col in feature['apply']:\n if paras.has_key('with'): # need another column\n paras['another'] = grouped[paras['with'][c]].copy(True)\n temp[col] = grouped[col].aggregate(func, paras)\n c += 1\n # construct DataFrame\n result[feature_name] = pd.DataFrame(temp)\n print \"Inf values: %s\" % np.any(np.isinf(result[feature_name]))\n print \"NaN values: %s\" % np.any(np.isnan(result[feature_name]))\n feature_raw_df = pd.concat(result, axis=1)\n # feature_raw_df = feature_raw_df.reset_index(drop=True)\n return feature_raw_df", "def extract_features(self, clip):\n #sr, clip_array = wav_read(io.BytesIO(clip.data))\n sr = 16000\n # clip_decoded = base64.decodestring(clip.data)\n # clip_array = np.frombuffer(clip_decoded, dtype=np.float16)\n clip_array = np.array(clip.data)\n if clip_array.ndim > 1:\n clip_array = clip_array[:, 0]\n segments = frame_breaker.get_frames(clip_array, sample_rate=sr)\n segments_encoded = [self.np2base64(s, sr) for s in segments]\n segment_features = [\n [f.feature_value for f in self.extract_feats_for_segment(s).features]\n for s in segments_encoded\n ]\n return segment_features", "def get_seg_features(string):\n seg_feature = []\n\n for word in jieba.cut(string):\n if len(word) == 1:\n seg_feature.append(0)\n else:\n tmp = [2] * len(word)\n tmp[0] = 1\n tmp[-1] = 3\n seg_feature.extend(tmp)\n return seg_feature", "def features_and_labels(soundfile, frag_length=128):\n label = soundfile.split('\\\\')[-1].split('_')[0]\n waveform, sample_rate = torchaudio.load(soundfile)\n MFCCs = transforms.MFCC(n_mfcc=128, melkwargs={'n_mels':128, 'win_length':320, 'hop_length':160, 'n_fft':1024 })(waveform[0][:])\n MFCCs = MFCCs.T.view((-1, frag_length, 128)) # transform the shape into (index, time_representation, melbands)\n\n frag_nums = MFCCs.shape[0]\n labels = int(label)*np.ones(frag_nums, dtype=np.int8)\n labels = torch.from_numpy(labels)\n\n return MFCCs, labels", "def collect_features(self, wav_path, label_path):\n n_fft = 512\n window_length = 20\n\n sound, fs = librosa.core.load(wav_path, sr=16000)\n\n if fs != 16000:\n print(wav_path)\n\n # Preemphasis\n preemp_sound = np.append(sound[0], sound[1:] - 0.97 * sound[:-1])\n\n # STFT\n spect = librosa.core.stft(preemp_sound,\n n_fft=n_fft,\n win_length=window_length * int(fs / 1000),\n hop_length=window_length * int(fs / 2000),\n window=scipy.signal.hamming,\n center=True)\n\n spect = np.log10(np.transpose(abs(spect[:, 1:]) ** 2) + 1e-16)\n\n return spect", "def __getitem__(self, label_value: int) -> 'SegmentInfo':\n return self.infos[label_value]", "def generate_feats_and_labels(features_without_activity, feature_with_activity):\n num_time_steps = 64\n step = 6\n segments = []\n labels = []\n for i in range(0, len(features_without_activity) - num_time_steps, step):\n xlist = [features_without_activity[cols].values[i: i + num_time_steps] for cols in features_without_activity.columns]\n label = stats.mode(feature_with_activity['activity'][i: i + num_time_steps])[0][0]\n segments.append(xlist)\n labels.append(label)\n shape_of_segment = np.array(segments).shape\n labels = np.asarray(pd.get_dummies(labels), dtype=np.float32)\n num_features = shape_of_segment[1]\n reshaped_segments = np.asarray(segments, dtype=np.float32).reshape(-1, num_time_steps, num_features)\n return num_features, reshaped_segments, labels", "def split_features_labels(self, batch):\n return batch if not self.unsupervised else (batch, batch)", "def getFeatures2(url, label):\r\n result = []\r\n url = str(url)\r\n \r\n #add the url to feature set\r\n result.append(url)\r\n \r\n #parse the URL and extract the domain information\r\n path = urlparse(url)\r\n ext = tldextract.extract(url)\r\n \r\n #counting number of dots in subdomain \r\n result.append(countdots(ext.subdomain))\r\n \r\n #checking hyphen in domain \r\n result.append(CountSoftHyphen(path.netloc))\r\n \r\n #length of URL \r\n result.append(length(url))\r\n \r\n #checking @ in the url \r\n result.append(CountAt(path.netloc))\r\n \r\n #checking presence of double slash \r\n result.append(CountDSlash(path.path))\r\n \r\n #Count number of subdir \r\n result.append(countSubDir(path.path))\r\n \r\n #number of sub domain \r\n result.append(countSubDomain(ext.subdomain))\r\n \r\n #length of domain name \r\n path2 = urlparse(url_format(url))\r\n result.append(len(path2.netloc)) \r\n \r\n #count number of queries \r\n result.append(len(path.query))\r\n \r\n #Adding domain information\r\n \r\n #if IP address is being used as a URL \r\n result.append(containsip(ext.domain))\r\n \r\n #presence of Suspicious_TLD\r\n result.append(1 if ext.suffix in Suspicious_TLD else 0)\r\n \r\n #append default for create_age(months)country\r\n result.append(-1)\r\n \r\n #append default for expiry_age(months)\r\n result.append(-1)\r\n \r\n #append default for update_age(days)\r\n result.append(-1)\r\n \r\n #append default for country\r\n result.append('None')\r\n \r\n #append extension\r\n path = urlparse(url)\r\n \r\n if get_ext(path.path) == '':\r\n result.append('None')\r\n else:\r\n result.append(get_ext(path.path))\r\n \r\n #append label\r\n result.append(str(label))\r\n \r\n return result", "def read_data(feature_file, label_file):", "def _fetch_features(self, X: np.ndarray, model: CnnModel, output_path: str, subset) -> np.ndarray:\n\n file_helper.guarantee_path_preconditions(output_path)\n\n file_path = join(output_path, subset + '.npy')\n if self._are_features_already_extracted(output_path, subset):\n print('Features already present on: ', file_path)\n features = np.load(file_path)\n else:\n print('Features not present yet, predicting now..')\n features = model.predict(X)\n return features", "def prepare_label_feature(self, label2id: dict):\n text, wp_text, label, wp_label, wp_mark = [], [], [], [], []\n sorted_labels = sorted(label2id.items(), key=lambda x: x[1])\n for label_name, label_id in sorted_labels:\n if label_name == '[PAD]':\n continue\n tmp_text = self.convert_label_name(label_name)\n tmp_wp_text = self.tokenizer.tokenize(' '.join(tmp_text))\n text.extend(tmp_text)\n wp_text.extend(tmp_wp_text)\n label.extend(['O'] * len(tmp_text))\n wp_label.extend(['O'] * len(tmp_wp_text))\n wp_mark.extend([0] + [1] * (len(tmp_wp_text) - 1))\n label_item = self.data_item2feature_item(DataItem(text, label, wp_text, wp_label, wp_mark), 0)\n label_input = self.get_test_model_input(label_item)\n return label_input, label_item", "def extractFeatures(self, datum):\n abstract", "def from_labeled_point(rdd: RDD, categorical: bool = False, nb_classes: int = None):\n features = np.asarray(\n rdd.map(lambda lp: from_vector(lp.features)).collect())\n labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype='int32')\n if categorical:\n if not nb_classes:\n nb_classes = np.max(labels) + 1\n temp = np.zeros((len(labels), nb_classes))\n for i, label in enumerate(labels):\n temp[i, label] = 1.\n labels = temp\n return features, labels", "def generateFeatures(src_image, label, knn=None):\n \n # computes the features\n f_vec = extractFeatures(src_image, args.features)\n \n # quantize, if codebook is present\n if not (knn == None):\n # implementation using opencv\n f_vec1 = getHistogramOfVisualWords(f_vec, knn)\n #print f_vec1[0]\n \n# # alternative implementation using scipy, results in the same numbers\n# codebook = loadCodebook()\n# codes,dist = vq.vq(f_vec, codebook)\n# f_vec2, bin_edges = histogram(codes,\n# bins=range(codebook.shape[0]+1),\n# normed=True)\n# print f_vec2[0]\n \n f_vec = f_vec1\n else:\n # flatten the array\n f_vec = np.reshape(f_vec, (1,f_vec.size))\n \n # prepend the label\n f_vec = np.insert(f_vec, 0, label)\n \n return f_vec", "def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()", "def _labels_of_sentence(self, sentence, split):\n labels = torch.ones(1)\n labels[0] = self.category_int_of_label_string(sentence[0][self.name_to_index_dict['label']]) #\n return labels", "def get_features(self, para, label_list, tokenizer, max_seq_length):\n\t\tlabel_map = {label : i for i, label in enumerate(label_list)}\n# self.reverse_label_map = {v: k for k, v in label_map.items()}\n\t\tguid = \"%s-%s\" % (\"test\", 1)\n\t\ttext_a = para[\"model_answer\"]\n\t\ttext_b = para[\"candidate_answer\"]\n\t\tlabel = label_list[0]\n\t\texample = InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)\n\t\t\n\t\ttokens_a = tokenizer.tokenize(example.text_a)\n\n\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\tif example.text_b:\n\t\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\t\t# Modifies `tokens_a` and `tokens_b` in place so that the total\n\t\t\t# length is less than the specified length.\n\t\t\t# Account for [CLS], [SEP], [SEP] with \"- 3\"\n\t\t\tself._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\t\telse:\n\t\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\t\tif len(tokens_a) > max_seq_length - 2:\n\t\t\t\ttokens_a = tokens_a[:(max_seq_length - 2)]\n\n\t\t# The convention in BERT is:\n\t\t# (a) For sequence pairs:\n\t\t# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n\t\t# (b) For single sequences:\n\t\t# tokens: [CLS] the dog is hairy . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0\n\t\t#\n\t\t# Where \"type_ids\" are used to indicate whether this is the first\n\t\t# sequence or the second sequence. The embedding vectors for `type=0` and\n\t\t# `type=1` were learned during pre-training and are added to the wordpiece\n\t\t# embedding vector (and position vector). This is not *strictly* necessary\n\t\t# since the [SEP] token unambigiously separates the sequences, but it makes\n\t\t# it easier for the model to learn the concept of sequences.\n\t\t#\n\t\t# For classification tasks, the first vector (corresponding to [CLS]) is\n\t\t# used as as the \"sentence vector\". Note that this only makes sense because\n\t\t# the entire model is fine-tuned.\n\t\ttokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n\t\tsegment_ids = [0] * len(tokens)\n\n\t\tif tokens_b:\n\t\t\ttokens += tokens_b + [\"[SEP]\"]\n\t\t\tsegment_ids += [1] * (len(tokens_b) + 1)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\tpadding = [0] * (max_seq_length - len(input_ids))\n\t\tinput_ids += padding\n\t\tinput_mask += padding\n\t\tsegment_ids += padding\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\t\tlabel_id = label_map[example.label]\n# print(\"*** Example ***\")\n# print(\"guid: %s\" % (example.guid))\n# print(\"tokens: %s\" % \" \".join(\n# [str(x) for x in tokens]))\n# print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n# print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n# print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\n\t\t\n\t\treturn InputFeatures(input_ids=input_ids,\n\t\t\t\t\t\t\t input_mask=input_mask,\n\t\t\t\t\t\t\t segment_ids=segment_ids,\n\t\t\t\t\t\t\t label_id=label_id)", "def prepare_sep_label_feature(self, label2id):\n label_items = []\n for label_name in label2id:\n if label_name == '[PAD]':\n continue\n text = self.convert_label_name(label_name)\n wp_text = self.tokenizer.tokenize(' '.join(text))\n wp_label = ['O'] * len(wp_text)\n label = ['O'] * len(wp_text)\n wp_mark = [0] + [1] * (len(wp_text) - 1)\n label_items.append(self.data_item2feature_item(DataItem(text, label, wp_text, wp_label, wp_mark), 0))\n label_input = self.get_support_model_input(label_items, len(label2id) - 1) # no pad, so - 1\n return label_input, label_items", "def findHighWeightFeatures(self, label):\n featuresWeights = []\n\n \"*** YOUR CODE HERE ***\"\n\n return featuresWeights" ]
[ "0.73163855", "0.5836902", "0.5791848", "0.5791834", "0.56940895", "0.56239414", "0.56133085", "0.56110984", "0.5592654", "0.5533984", "0.5508768", "0.5500992", "0.5496361", "0.5473228", "0.5451627", "0.54497445", "0.54106414", "0.5410236", "0.54029137", "0.5383069", "0.5349353", "0.53469014", "0.53374934", "0.53068167", "0.5298993", "0.52942234", "0.52416706", "0.52389544", "0.52277577", "0.52188945" ]
0.7549193
0
This function reads all the label files from the label_folder, extract the features from the audio with matching name in audio_folder and saves all the features into the output_folder.
def features_from_folder(label_folder, audio_folder, output_folder): print('Listing label files from folder.') #scan labels folder labels_list = os.listdir(label_folder) label_files = [] for filename in labels_list: #get its extension file_extension = filename.split('.')[-1] if file_extension != 'txt': continue #save to without its extension label_files.append(filename[:-4]) print('Listing audio files from folder.') #scan audio folder audios_list = os.listdir(audio_folder) audio_files = [] for filename in audios_list: #get its extension file_extension = filename.split('.')[-1] if file_extension != 'wav': continue #save to without its extension audio_files.append(filename[:-4]) print('Removing files without matches') #use only the files with matching audio/label files_to_process = [] for label_file in label_files: if label_file in audio_files: files_to_process.append(label_file) print('Processing each file...') i = 1 class_count = {} total_f = len(files_to_process) #for each file for processing in files_to_process: print('File', str(i) + '/' + str(total_f)) i += 1 # label_file = os.path.join(label_folder, processing + ".txt") audio_file = os.path.join(audio_folder, processing + ".wav") #get the segments from the corresponding label file segments = get_segments(label_file) # total_s = len(segments) j = 1 #for each segment for segment in segments: print('\tSegment', str(j) + '/' + str(total_s), segment['class']) j += 1 if class_count.get(segment['class']) is None: class_count[segment['class']] = 1 else: class_count[segment['class']] += 1 output_filename = segment['class'] output_filename += '-' + format(class_count[segment['class']], '04d') output_filename = os.path.join(output_folder, output_filename) #get its features segment_features = features_from_label(audio_file, segment) #save it to a file fe.write_as_bin(output_filename, segment_features)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_training_data(input_path, output_path):\r\n import shutil\r\n image_files = [file for file in os.listdir(path=input_path) if '.JPG' in file or '.jpeg' in file]\r\n \r\n for file in image_files:\r\n file_input_path = os.path.join(input_path,file)\r\n \r\n img = cv2.imread(file_input_path)\r\n \r\n file_output_path = os.path.join(output_path, classify_face(img))\r\n \r\n try:\r\n os.makedirs(file_output_path)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n shutil.move(file_input_path, file_output_path)", "def read_process_labelled(src_dir, window=0.2, overlap=0.5, debug=True):\n\n arr_features = []\n\n # Read files from the folders\n for x, _ in GENRES.items():\n folder = src_dir + x\n \n for root, subdirs, files in os.walk(folder):\n for file in files:\n # Read the audio file\n file_name = folder + \"/\" + file\n signal, sr = librosa.load(file_name)\n signal = signal[:660000]\n \n # Debug process\n if debug:\n print(f\"Reading file: {file_name}\")\n \n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n features['genre'] = GENRES[x]\n arr_features.append(features)\n\n return arr_features", "def load_data(folders):\n features, labels = np.zeros(0), np.zeros(0, dtype=int)\n for folder_id in folders:\n folder = \"fold%d\"%(folder_id)\n for fn in glob.glob(os.path.join(RAW_DATA_DIR, folder, \"*.wav\")):\n just_fn_name = fn.split('/')[-1]\n class_id = just_fn_name.split('-')[1]\n #print(\"fn\", fn, just_fn_name, class_id)\n mfcc2 = _extract_features_from_one_file(fn)\n if mfcc2 is None:\n continue\n features = np.append(features, mfcc2)\n labels= np.append(labels, int(class_id))\n features = features.reshape(-1, N_MFCC)\n #labels = labels.reshape(-1, 1)\n #print(\"features.shape\", features.shape, \"labels.shape\", labels.shape)\n labels = one_hot_encode(labels)\n return features, labels", "def collect_and_rename() -> None:\n image_source_folder = 'image_dir'\n label_source_folder = 'annotation_dir'\n image_target_folder = 'images'\n label_target_folder = 'labels'\n for i, (subdir, _, files) in enumerate(os.walk(image_source_folder), -1):\n # it walks the parent folder first, not a file\n if i == -1: \n continue\n subdir_name = subdir.split('\\\\')[1]\n for file_name in files:\n with open(f'{image_source_folder}/{subdir_name}/{file_name}') as image_file, \\\n open(f'{label_source_folder}/{subdir_name}/{file_name}'.split('.')[0] + '.txt') as label_file:\n shutil.copy2(image_file.name, f'{image_target_folder}/{\"%06d\" % i}.jpg')\n shutil.copy2(label_file.name, f'{label_target_folder}/{\"%06d\" % i}.txt')\n print(f'Processed {i} images')", "def get_feature_labels_files(dataset):\n features = []\n audio_labels = []\n focal_labels = []\n files = []\n for frame in dataset:\n files.append(frame[0])\n features.append(frame[1][0].T)\n if frame[1][1] is not None:\n audio_labels.append(frame[1][1][0].T)\n focal_labels.append(frame[1][1][1].T)\n else:\n audio_labels.append(None)\n focal_labels.append(None)\n features = np.expand_dims(np.asarray(features), 4)\n audio_labels = np.asarray(audio_labels)\n focal_labels = np.asarray(focal_labels)\n return [features, audio_labels,focal_labels, files]", "def generateFeatureData(directory, outFileName='tmp/features.txt', isClassifying=False):\n\n audioList = getAudioFiles(directory)\n\n outFile = open(outFileName, \"w\")\n\n for audio in audioList:\n features = audio.getFeatures()\n \n if isClassifying: # We are classifying, we don't know type\n audioType = '0'\n else: # We are generating training data. Try to predict using file name\n audioType = '1' if audio.predictType() == 'Music' else '-1'\n \n outFile.write(audioType + ' ' + features + ' # ' + audio.name + '\\n')\n\n outFile.close()\n\n return audioList", "def process_files(lab_dir, wav_dir, id_list, out_dir, state_level, question_file, subphone_feat_type, calc_mvn=False):\n file_ids = utils.get_file_ids(lab_dir, id_list)\n _file_ids = utils.get_file_ids(wav_dir, id_list)\n\n if len(file_ids) != len(_file_ids) or sorted(file_ids) != sorted(_file_ids):\n raise ValueError(\"Please provide id_list, or ensure that wav_dir and lab_dir contain the same files.\")\n\n os.makedirs(out_dir, exist_ok=True)\n\n # Linguistic feature directories.\n os.makedirs(os.path.join(out_dir, 'lab'), exist_ok=True)\n if subphone_feat_type is not None:\n os.makedirs(os.path.join(out_dir, 'counters'), exist_ok=True)\n\n # Acoustic feature directories.\n os.makedirs(os.path.join(out_dir, 'f0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'lf0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'vuv'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'sp'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'ap'), exist_ok=True)\n\n # Sequence length feature directories.\n os.makedirs(os.path.join(out_dir, 'dur'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'n_frames'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'n_phones'), exist_ok=True)\n\n questions = lab_features.QuestionSet(question_file)\n subphone_features = lab_features.SubphoneFeatureSet(subphone_feat_type)\n\n @utils.multithread\n def save_lab_and_wav_to_files(file_id):\n lab_path = os.path.join(lab_dir, '{}.lab'.format(file_id))\n label = lab_features.Label(lab_path, state_level)\n\n if subphone_feat_type is None:\n numerical_labels = label.normalise(questions, upsample_to_frame_level=False)\n else:\n numerical_labels, counter_features = label.normalise(questions, subphone_features, False)\n\n wav_path = os.path.join(wav_dir, '{}.wav'.format(file_id))\n wav = wav_features.Wav(wav_path)\n f0, vuv, sp, ap = wav.extract_features()\n lf0 = np.log(f0)\n\n # Often there is a small difference in number of frames between labels and vocoder features.\n durations = label.phone_durations\n n_frames = sum(durations)\n diff = n_frames - f0.shape[0]\n\n if diff > len(durations):\n raise ValueError(\"Number of label frames and vocoder frames is too different for {name}\\n\"\n \"\\tvocoder frames {voc}\\n\"\n \"\\tlabel frames {lab}\\n\"\n \"\\tnumber of phones {phones}\"\n .format(name=file_id, voc=f0.shape[0], lab=n_frames, phones=len(durations)))\n\n # Remove excess durations if there is a shape mismatch.\n if diff > 0:\n # Remove 1 frame from each phone's duration starting at the end of the sequence.\n durations[-diff:] -= 1\n n_frames = f0.shape[0]\n\n assert n_frames == sum(durations)\n\n make_feature_path = lambda name: os.path.join(out_dir, name, '{}.{}'.format(file_id, name))\n\n # Save linguistic features in binary .npy files.\n file_io.save_bin(numerical_labels, make_feature_path('lab'))\n if subphone_feat_type is not None:\n file_io.save_bin(counter_features[:n_frames], make_feature_path('counters'))\n\n # Save acoustic features in binary .npy files.\n file_io.save_bin(f0[:n_frames], make_feature_path('f0'))\n file_io.save_bin(lf0[:n_frames], make_feature_path('lf0'))\n file_io.save_bin(vuv[:n_frames], make_feature_path('vuv'))\n file_io.save_bin(sp[:n_frames], make_feature_path('sp'))\n file_io.save_bin(ap[:n_frames], make_feature_path('ap'))\n\n # Save sequence length features in text files.\n file_io.save_txt(durations, make_feature_path('dur'))\n file_io.save_txt(n_frames, make_feature_path('n_frames'))\n file_io.save_txt(len(label.phones), make_feature_path('n_phones'))\n\n # Save dimensionality of linguistic and acoustic features to text files.\n make_dim_path = lambda name: os.path.join(out_dir, '{}.dim'.format(name))\n\n file_io.save_txt(numerical_labels.shape[1], make_dim_path('lab'))\n if subphone_feat_type is not None:\n file_io.save_txt(counter_features.shape[1], make_dim_path('counters'))\n\n file_io.save_txt(f0.shape[1], make_dim_path('f0'))\n file_io.save_txt(lf0.shape[1], make_dim_path('lf0'))\n file_io.save_txt(vuv.shape[1], make_dim_path('vuv'))\n file_io.save_txt(sp.shape[1], make_dim_path('sp'))\n file_io.save_txt(ap.shape[1], make_dim_path('ap'))\n\n save_lab_and_wav_to_files(file_ids)\n\n if calc_mvn:\n calclate_mvn_parameters(out_dir, 'dur', id_list=id_list, is_npy=False)\n calclate_mvn_parameters(out_dir, 'f0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'lf0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'vuv', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'sp', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'ap', id_list=id_list, dtype=np.float32)", "def categorize_classifier_files(out_dir):\n\n #sort all of the classifier files into a dictionary\n class_files = glob.glob(\"feature_extraction_m*\")\n class_file_dict = {\"positive\":[], \"negative\":[]}\n class_cand_dict = {\"m1\":class_file_dict, \"m2\":class_file_dict, \"m3\":class_file_dict, \"m4\":class_file_dict, \"m5\":class_file_dict}\n\n for filename in class_files:\n split_name = filename.split(\"_\")[-1].split(\".\")\n model_num = split_name[0]\n det = split_name[-1]\n class_cand_dict[model_num][det].append(filename)\n\n #get all of the pfd files into a list\n class_file_m1 = glob.glob(\"feature_extraction_m1*\")\n pfd_files = []\n for afile in class_file_m1:\n f = open(afile, \"r\")\n for line in f.readlines():\n pfd_files.append(line)\n f.close()\n\n #fill a dictionary with pfds and a value for how many positive IDs each pfd has\n pulsar_pfds={}\n for key in pfd_files:\n pulsar_pfds[key]=0\n for model_num in class_cand_dict.keys():\n if class_cand_dict[model_num][\"positive\"]:\n print(class_cand_dict[model_num][\"positive\"])\n f = open(class_cand_dict[model_num][\"positive\"][0], \"r\")\n for line in f.readlines():\n pulsar_pfds[line]+=1\n f.close()\n\n #For each pfd with >=3 positive IDs, write that pfd to 'positive' file, else write to 'negative' file\n pos_f = open(os.path.join(out_dir, \"LOTAAS_positive_detections.txt\"), \"w+\")\n neg_f = open(os.path.join(out_dir, \"LOTAAS_negative_detections.txt\"), \"w+\")\n for pfd_key in pulsar_pfds.keys():\n if pulsar_pfds[pfd_key]>=3:\n print(\"detected pulsar: {}\".format(pfd_key))\n pos_f.write(pfd_key.split(\"/\")[-1])\n else:\n neg_f.write(pfd_key.split(\"/\")[-1])\n pos_f.close()\n neg_f.close()", "def process_elements(feature_name, new_labels, out_path, source_path, fun=None,\n model_name=VOICE_DETECTION_MODEL_NAME, existing_labels=None,\n **kwargs):\n\n def __process_elements(data):\n \"\"\"\n :param data: shape (#_songs, 2) the axis 1 corresponds to the filename/label pair\n :return:\n \"\"\"\n x = data[:, 0]\n y = data[:, 1]\n print('loaded metadata in {}'.format(data))\n\n import torch\n no_cuda = True # no cabe en mi gpu :c\n use_cuda = not no_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n sr = OUNMIX_SAMPLE_RATE\n\n model_manager = OpenUnmixManager()\n for idx, x_i in enumerate(x):\n # for each filename in data\n # this is kind-of standard\n y_i = y[idx]\n ext = 'mp3'\n file_name = FeatureExtractor.get_file_name(x_i, feature_name, ext=ext)\n try:\n # try to load if file already exist\n print('info: trying to load {}'.format(out_path / file_name))\n if existing_labels is not None and file_name in existing_labels['filename'].values:\n new_labels.append([file_name, y_i])\n continue\n librosa.load(str(out_path / file_name), sr=sr)\n new_labels.append([file_name, y_i])\n except (FileNotFoundError, OSError, EOFError, audioread.NoBackendError):\n # OSError and EOFError are raised if file are inconsistent\n # final_shape: (#_hops, #_mel_filters, #_window)\n print('info: processing {}'.format(x_i))\n audio, rate = librosa.core.load(source_path / x_i, mono=False, sr=sr)\n SingingVoiceSeparationOpenUnmixFeatureExtractor.process_x_i(\n device,\n file_name,\n x_i, # as filename is specified manually is just useful for logs\n y_i,\n source_path,\n out_path,\n new_labels,\n sr,\n audio,\n rate,\n model_manager\n )\n\n return __process_elements", "def labels_for_training_data():\n current_id = 0\n label_ids = dict()\n faces, faces_ids = list(), list()\n\n # Go through directories and find label and path to image\n for root, dirs, files in walk('data/'):\n for file in files:\n if file.endswith('.jpg') or file.endswith('.png'):\n img_path = path.join(root, file)\n label = path.basename(root).replace(' ', '-').lower()\n if label not in label_ids:\n label_ids[label] = current_id\n current_id += 1\n id_ = label_ids[label]\n\n test_img = cv2.imread(img_path)\n test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n if test_img is None:\n print('Image not loaded properly')\n continue\n\n faces.append(test_img)\n faces_ids.append(id_)\n\n # Make directory with labels doesn't exist make directory and file with labels\n if not path.exists('labels/'):\n makedirs('labels/')\n with open('labels/face-labels.pickle', 'wb') as file:\n pickle.dump(label_ids, file)\n\n return faces, faces_ids", "def read_classified_data(root_path, to_size = (200,200), transformation = transforms.ToTensor()):\n label_dict = {}\n # for each folder in the dataset\n # get the label\n for i, label in tqdm(enumerate(sorted(os.listdir(root_path))), desc = \"Read in...\", leave = False):\n if len(os.listdir(sub_path)) == 0:\n continue\n sub_path = os.path.join(root_path, label)\n # write the label in the label dict\n label_dict[i] = label\n # find the csv, there should be one and only one csv\n csv_path = glob.glob(os.path.join(sub_path,\"*.csv\"))[0]\n df = pd.read_csv(csv_path)\n # the csv should have a image_name list indicating the 1-1 correspondense\n image_origin = df[\"image_name\"]\n # get the rest and the features\n df.drop(labels = \"image_name\", axis = \"columns\", inplace = True)\n # concate them to our dataset\n if i == 0:\n features = torch.from_numpy(df.to_numpy())\n images = torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin])\n labels = torch.ones(image_origin.shape[0])*label\n else:\n features = torch.cat((features,torch.from_numpy(df.to_numpy())))\n images = torch.cat(images,torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin]))\n labels = torch.cat(labels, torch.ones(image_origin.shape[0])*label)\n # return the dataset with our label_dict\n return TensorDataset(images,features, labels),label_dict", "def process_elements(feature_name, new_labels, out_path, source_path, fun=None,\n model_name=VOICE_DETECTION_MODEL_NAME,\n **kwargs):\n\n def __process_elements(data):\n \"\"\"\n :param data: shape (#_songs, 2) the axis 1 corresponds to the filename/label pair\n :return:\n \"\"\"\n x = data[:, 0]\n y = data[:, 1]\n print('loaded metadata in {}'.format(data))\n\n from keras.models import load_model\n from keras import backend\n\n if len(backend.tensorflow_backend._get_available_gpus()) > 0:\n # set gpu number\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\n # load mode\n loaded_model = load_model(str(MODELS_DATA_PATH / feature_name / 'latest.h5'.format(model_name)))\n print(\"loaded model\")\n print(loaded_model.summary())\n\n for idx, x_i in enumerate(x):\n # this is kind-of standard\n y_i = y[idx]\n file_name = FeatureExtractor.get_file_name(x_i, feature_name, ext='wav')\n try:\n # try to load if file already exist\n librosa.load(str(out_path / file_name), sr=MAGPHASE_SAMPLE_RATE)\n print('info: {} loaded from .npy !'.format(file_name))\n new_labels.append([file_name, y_i])\n except FileNotFoundError or OSError or EOFError:\n # OSError and EOFError are raised if file are inconsistent\n # final_shape: (#_hops, #_mel_filters, #_window)\n print('info: loading magphase data for {}'.format(x_i))\n magphase = np.load(source_path / x_i) # _data, #_coefs, #_samples)\n print('info: formatting data')\n try:\n # magphase shape (2, 513, #_windows (~4000))\n assert len(magphase.shape) == 3\n\n # padding the last dim to fit window_size strictly\n number_of_windows = ceil(magphase.shape[2] / MAGPHASE_PATCH_SIZE)\n padding = number_of_windows * MAGPHASE_PATCH_SIZE - magphase.shape[2]\n if padding > 0:\n magphase = np.pad(magphase, ((0, 0), (0, 0), (0, padding)), mode='constant')\n # discard first coeficient in both\n mag = magphase[0, 1:, :] # shape (512, #_windows)\n phase = magphase[1, :, :] # shape (513, #_windows)\n x = np.array([mag[:, i * MAGPHASE_PATCH_SIZE:(i + 1) + MAGPHASE_PATCH_SIZE] for i in\n range(number_of_windows)])\n\n x = x.reshape(-1, int(MAGPHASE_WINDOW_SIZE / 2), MAGPHASE_PATCH_SIZE, 1)\n\n # stack in a batch of size (512, 128)\n\n print('info: predicting')\n y_pred = loaded_model.predict(x, verbose=1) # Shape=(total_frames,)\n\n target_pred_mag = np.vstack((np.zeros((128)), y_pred.reshape(512, 128)))\n out_wav = istft(\n target_pred_mag * phase\n # (mix_wav_mag * target_pred_mag) * mix_wav_phase\n , win_length=MAGPHASE_WINDOW_SIZE,\n hop_length=MAGPHASE_HOP_LENGTH)\n\n FeatureExtractor.save_audio(out_wav, feature_name, out_path, x_i, y_i, new_labels,\n sr=MAGPHASE_SAMPLE_RATE)\n except MemoryError as e:\n print('error: memory error while proccessing {}. Ignoring...'.format(x_i))\n print(e)\n\n return __process_elements", "def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")", "def extract_features(root_dir: str,\n settings_data: MutableMapping[str, Any],\n settings_features: MutableMapping[str, Any])\\\n -> None:\n # Get the root directory.\n dir_root = Path(root_dir)\n\n # Get the directories of files.\n dir_output = dir_root.joinpath(settings_data['audio_dirs']['output'])\n\n dir_dev = dir_output.joinpath(\n settings_data['audio_dirs']['development'])\n dir_eva = dir_output.joinpath(\n settings_data['audio_dirs']['evaluation'])\n\n # Get the directories for output.\n dir_output_dev = dir_root.joinpath(\n settings_data['features_dirs']['output'],\n settings_data['features_dirs']['development'])\n dir_output_eva = dir_root.joinpath(\n settings_data['features_dirs']['output'],\n settings_data['features_dirs']['evaluation'])\n\n # Create the directories.\n dir_output_dev.mkdir(parents=True, exist_ok=True)\n dir_output_eva.mkdir(parents=True, exist_ok=True)\n\n # Apply the function to each file and save the result.\n for data_file_name in filter(\n lambda _x: _x.suffix == '.npy',\n chain(dir_dev.iterdir(), dir_eva.iterdir())):\n\n # Load the data file.\n data_file = load_numpy_object(data_file_name)\n\n # Extract the features.\n features = feature_extraction(\n data_file['audio_data'].item(),\n **settings_features['process'])\n\n # Populate the recarray data and dtypes.\n array_data = (data_file['file_name'].item(), )\n dtypes = [('file_name', data_file['file_name'].dtype)]\n\n # Check if we keeping the raw audio data.\n if settings_features['keep_raw_audio_data']:\n # And add them to the recarray data and dtypes.\n array_data += (data_file['audio_data'].item(), )\n dtypes.append(('audio_data', data_file['audio_data'].dtype))\n\n # Add the rest to the recarray.\n array_data += (\n features,\n data_file['caption'].item(),\n data_file['caption_ind'].item(),\n data_file['words_ind'].item(),\n data_file['chars_ind'].item())\n dtypes.extend([\n ('features', np.dtype(object)),\n ('caption', data_file['caption'].dtype),\n ('caption_ind', data_file['caption_ind'].dtype),\n ('words_ind', data_file['words_ind'].dtype),\n ('chars_ind', data_file['chars_ind'].dtype)\n ])\n\n # Make the recarray\n np_rec_array = np.rec.array([array_data], dtype=dtypes)\n\n # Make the path for serializing the recarray.\n parent_path = dir_output_dev \\\n if data_file_name.parent.name == settings_data['audio_dirs']['development'] \\\n else dir_output_eva\n\n file_path = parent_path.joinpath(data_file_name.name)\n\n # Dump it.\n dump_numpy_object(np_rec_array, file_path)", "def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512):\r\n\r\n data = {\r\n 'mapping': [],\r\n 'labels': [],\r\n 'MFCCs': [],\r\n 'files': []\r\n }\r\n\r\n # loop through all sub-dirs\r\n total_samples = 0\r\n valid_samples = 0\r\n for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))):\r\n\r\n # ensure we're at sub-folder level\r\n if dirpath is not dataset_path:\r\n # save label (i.e., sub-folder name) in the mapping\r\n label = dirpath.partition('speech_commands_subset')[-1][1:]\r\n\r\n data['mapping'].append(label)\r\n print(\"\\nProcessing: '{}'\".format(label))\r\n print(\"number of files for each class: \", len(filenames))\r\n # process all audio files\r\n for f in filenames:\r\n total_samples += 1\r\n file_path = os.path.join(dirpath, f)\r\n\r\n # load audio file and slice it to ensure length consistency among different files\r\n signal, sample_rate = librosa.load(file_path)\r\n # print(signal.shape)\r\n # print(type(signal[0]))\r\n\r\n # drop audio files with less than pre-decided number of samples\r\n if len(signal) >= SAMPLES_TO_CONSIDER:\r\n valid_samples += 1\r\n # ensure consistency of the length of the signal\r\n signal = signal[:SAMPLES_TO_CONSIDER]\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, \r\n hop_length = hop_length) \r\n # print(MFCCs.shape)\r\n # print(type(MFCCs[0,0]))\r\n\r\n # store data for analysed track\r\n data['MFCCs'].append(MFCCs.T.tolist())\r\n data['labels'].append(i-1)\r\n # data['files'].append(file_path)\r\n # print(\"{}: {}\".format(file_path, i-1))\r\n\r\n # if valid_samples == 20:\r\n # valid_samples =0\r\n # break\r\n print(\"\\ntotal samples: \", total_samples)\r\n print(\"\\nvalid_samples: \", valid_samples)\r\n\r\n \r\n return data", "def process_element(feature_name, new_labels, out_path, source_path, **kwargs):\n\n def __process_element(data):\n \"\"\"\n Compute double stage HPSS for the given audio file\n extracted from https://github.com/kyungyunlee/ismir2018-revisiting-svd/blob/master/leglaive_lstm/audio_processor.py\n :param x: filename (str)\n :param y: label (str)\n :return: mel_D2_total : concatenated melspectrogram of percussive, harmonic components of double stage HPSS. Shape=(2 * n_bins, total_frames) ex. (80, 2004)\n \"\"\"\n print('processing {}'.format(data))\n x_i = data[0]\n y_i = data[1]\n\n file_name = FeatureExtractor.get_file_name(x_i, feature_name)\n try:\n # try to load if file already exist\n np.load(out_path / file_name, allow_pickle=True)\n print('info: {} loaded from .npy !'.format(file_name))\n new_labels.append([file_name, y_i])\n except FileNotFoundError or OSError or EOFError:\n # OSError and EOFError are raised if file are inconsistent\n audio_src, _ = librosa.load(str(source_path / x_i), sr=MAGPHASE_SAMPLE_RATE)\n\n mix_wav_mag, mix_wav_phase = magphase(\n stft(\n audio_src,\n n_fft=MAGPHASE_WINDOW_SIZE,\n hop_length=MAGPHASE_HOP_LENGTH\n ))\n\n # mix_wav_mag = mix_wav_mag[:, START:END] # 513, SR * Duracion en segundos de x_i\n # mix_wav_phase = mix_wav_phase[:, START:END] # ~\n # mix_wav_mag = mix_wav_mag[1:].reshape(1, 512, 128, 1) # reshape to match train data\n array = np.stack((mix_wav_mag, mix_wav_phase))\n\n # stacks the magnitude and phase,\n # final shape should be (2, 513 (n_fft/2 + 1), 128 (patchsize), 1 (dummy channels)\n\n # this is kind-of standard\n FeatureExtractor.save_feature(array, feature_name, out_path, x_i, y_i, new_labels)\n\n return __process_element", "def save_data(data_dir):\r\n for k in range(1,11):\r\n fold_name = 'fold' + str(k)\r\n print \"Saving\" + fold_name\r\n features, labels = process_audio(parent_path, [fold_name])\r\n labels = encode(labels)\r\n print \"Features of\", fold_name , \" = \", features.shape\r\n print \"Labels of\", fold_name , \" = \", labels.shape\r\n feature_file = os.path.join(data_dir, fold_name + '_x.npy')\r\n labels_file = os.path.join(data_dir, fold_name + '_y.npy')\r\n np.save(feature_file, features)\r\n print \"Saved \" + feature_file\r\n np.save(labels_file, labels)\r\n print \"Saved \" + labels_file", "def main(args):\n data_transform = transforms.Compose([\n transforms.Scale((256, 256)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n dataset = datasets.ImageFolder(root=args.root_dir, transform=data_transform)\n dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, \n shuffle=False, num_workers=0, pin_memory=True)\n net = get_feature_extractor()\n\n if torch.cuda.is_available():\n net = net.cuda()\n\n features_out = np.zeros((len(dataset), 4096))\n labels_out = np.zeros(len(dataset))\n \n p = progressbar.ProgressBar(widgets=[progressbar.ETA(), ' ', progressbar.Percentage()])\n for i, samples in p(enumerate(dataloader)):\n images, labels = samples\n if torch.cuda.is_available():\n images = images.cuda()\n images = Variable(images)\n features = net(images).cpu().data.numpy()\n features_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = features\n labels_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = labels.int().numpy()\n print(i)\n\n with open(os.path.join(args.out, 'features.pickle'),'wb') as f:\n pickle.dump(features_out, f)\n with open(os.path.join(args.out, 'labels.pickle'),'wb') as f:\n pickle.dump(labels_out, f)", "def read_data(feature_file, label_file):", "def _parse_data_dir(self, data_dir):\n categories = os.listdir(data_dir)\n for folder_name in categories:\n all_fnames_list_fname = os.path.join(data_dir, folder_name,\n folder_name + \".bmf\")\n if not os.path.isfile(all_fnames_list_fname):\n raise IOError(\"Not found file {}\".format(all_fnames_list_fname))\n all_fnames_list = np.loadtxt(all_fnames_list_fname, dtype=np.str,\n skiprows=1)\n # Correct from pgm to jpg\n all_fnames_list = [f.split('.')[0]+'.jpg' for f in all_fnames_list]\n\n all_fnames_list = [os.path.join(data_dir, folder_name, f) for f \\\n in all_fnames_list]\n\n self.samples += len(all_fnames_list)\n # Append the last\n self.image_filenames.append(all_fnames_list)", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def process_elements(feature_name, new_labels, out_path, source_path, fun=None,\n model_name=VOICE_DETECTION_MODEL_NAME,\n **kwargs):\n\n def __process_elements(data):\n \"\"\"\n :param data: shape (#_songs, 2) the axis 1 corresponds to the filename/label pair\n :return:\n \"\"\"\n x = data[:, 0]\n y = data[:, 1]\n print('loaded metadata in {}'.format(data))\n\n from keras.models import load_model\n from keras import backend\n\n if len(backend.tensorflow_backend._get_available_gpus()) > 0:\n # This NN use more that 2 GB of VRAM. So i disable GPU for my local environment with\n # print(backend.tensorflow_backend._get_available_gpus())\n # set gpu number\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(len(backend.tensorflow_backend._get_available_gpus())) # \"0\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n # load mode\n loaded_model = load_model(str(MODELS_DATA_PATH / 'leglaive' / 'rnn_{}.h5'.format(model_name)))\n print(\"loaded model\")\n print(loaded_model.summary())\n\n mean_std = np.load(MODELS_DATA_PATH / 'leglaive' / 'train_mean_std_{}.npy'.format(model_name))\n mean = mean_std[0]\n std = mean_std[1]\n\n for idx, x_i in enumerate(x):\n # this is kind-of standard\n y_i = y[idx]\n file_name = FeatureExtractor.get_file_name(x_i, feature_name)\n try:\n # try to load if file already exist\n np.load(out_path / file_name, allow_pickle=True)\n print('info: {} loaded from .npy !'.format(file_name))\n new_labels.append([file_name, y_i])\n except FileNotFoundError or OSError or EOFError:\n # OSError and EOFError are raised if file are inconsistent\n # final_shape: (#_hops, #_mel_filters, #_window)\n print('info: loading hpss data for {}'.format(x_i))\n hpss = np.load(source_path / x_i) # _data, #_coefs, #_samples)\n print('info: formatting data')\n try:\n print(\"debug: hpss shape is {}\".format(hpss.shape))\n print('debug: size of hpss is {} KB'.format(sys.getsizeof(hpss) / 1024))\n padding = RNN_INPUT_SIZE_VOICE_ACTIVATION - hpss.shape[1]\n if padding > 0:\n # if hpss is shorter that RNN input shape, then add padding on axis=1\n hpss = np.pad(hpss, ((0, 0), (0, padding)), mode='constant')\n number_of_mel_samples = hpss.shape[1]\n # at least should have 1 window\n number_of_steps = max(number_of_mel_samples - RNN_INPUT_SIZE_VOICE_ACTIVATION, 1)\n total_x = np.array([hpss[:, i: i + RNN_INPUT_SIZE_VOICE_ACTIVATION]\n for i in range(0, number_of_steps, 1)])\n # final_shape: (#_hops, #_mel_filters, #_window)\n\n total_x_norm = (total_x - mean) / std\n total_x_norm = np.swapaxes(total_x_norm, 1, 2)\n # final_shape: (#_hops, #_window, #_mel_filters)\n\n x_test = total_x_norm\n print('info: predicting')\n y_pred = loaded_model.predict(x_test, verbose=1) # Shape=(total_frames,)\n time, aligned_y_pred = VoiceActivationFeatureExtractor.post_process(y_pred,\n number_of_mel_samples)\n print('info: predicted!')\n\n result_array = np.asarray([time, aligned_y_pred])\n\n print('info: reducing dimensionality')\n\n FeatureExtractor.save_feature(result_array, feature_name, out_path, x_i, y_i, new_labels)\n except MemoryError as e:\n print('error: memory error while proccessing {}. Ignoring...'.format(x_i))\n print(e)\n\n return __process_elements", "def dataset_files_labels(folders, db_root):\n fl = []\n for f in folders:\n\n fo = open(db_root + '/sample labels ' + f + '.txt', 'r')\n dialect = csv.Sniffer().sniff(fo.read(1024), delimiters=\"\\t \")\n fo.seek(0)\n for x in csv.reader(fo, dialect):\n fl.append([db_root + '/' + f + '/' + x[0], x[1]])\n return fl", "def __create_label_file(self, species_list: List[str]) -> None:\n\n nips4bplus_filtered_audio_folder = self.file_manager.data_folder(\"nips4bplus_filtered\", \"audio\")\n nips4bplus_audio_folder = self.file_manager.data_folder(\"nips4bplus\", \"audio\")\n\n nips4b_species_list = self.download_nips4b_species_list()\n\n nips4bplus_selected_labels = []\n nips4bplus_labels = []\n\n species_to_sound_types = self._parse_species_list(species_list, {\"song\", \"call\"})\n\n for file in os.listdir(self.extracted_nips_annotations_folder):\n label_file_path = os.path.join(self.extracted_nips_annotations_folder, file)\n\n def map_class_names(row):\n if row[\"label\"] in ('Unknown', 'Human'):\n return \"noise\"\n\n nips4b_class_name = nips4b_species_list[nips4b_species_list[\"nips4b_class_name\"] == row[\"label\"]]\n scientific_n = nips4b_class_name[\"Scientific_name\"].item()\n sound_t = nips4b_class_name[\"sound_type\"].item()\n\n if len(nips4b_class_name) != 1:\n raise NameError(f\"No unique label found for class {row['label']}\")\n\n if scientific_n not in species_to_sound_types or sound_t not in species_to_sound_types[scientific_n]:\n return \"noise\"\n else:\n return nips4b_class_name[\"class name\"].item()\n\n if file.endswith(\".csv\"):\n try:\n labels = pd.read_csv(label_file_path, names=[\"start\", \"duration\", \"label\"])\n labels[\"label\"] = labels.apply(map_class_names, axis=1)\n except pd.errors.EmptyDataError:\n labels = pd.DataFrame([0, 5, \"noise\"], columns=[\"start\", \"duration\", \"label\"])\n\n file_id = file.lstrip(\"annotation_train\").rstrip(\".csv\")\n\n labels[\"id\"] = f\"nips4b_birds_trainfile{file_id}\"\n labels[\"file_path\"] = f\"nips4b_birds_trainfile{file_id}.wav\"\n labels[\"start\"] = labels[\"start\"] * 1000\n labels[\"end\"] = labels[\"start\"] + labels[\"duration\"] * 1000\n\n contains_selected_species = False\n for idx, label in labels.iterrows():\n class_name = nips4b_species_list[nips4b_species_list[\"class name\"] == label[\"label\"]]\n\n if label[\"label\"] != \"noise\" and class_name[\"Scientific_name\"].item() in species_to_sound_types:\n contains_selected_species = True\n if contains_selected_species:\n nips4bplus_selected_labels.append(labels)\n\n labels = labels[[\"id\", \"file_path\", \"start\", \"end\", \"label\"]]\n\n self.append = nips4bplus_labels.append(labels)\n\n nips4bplus_labels = pd.concat(nips4bplus_labels)\n self._save_label_file(nips4bplus_labels, \"nips4bplus\")\n if len(nips4bplus_selected_labels) > 0:\n nips4bplus_selected_labels = pd.concat(nips4bplus_selected_labels)\n else:\n nips4bplus_selected_labels = pd.DataFrame(columns=[\"id\", \"file_path\", \"label\", \"start\", \"end\"])\n\n self._save_label_file(nips4bplus_selected_labels, \"nips4bplus_filtered\")\n\n for dataset in [\"train\", \"test\"]:\n folder_path = os.path.join(self.extracted_nips_audio_folder, dataset)\n FileManager.copytree(folder_path, nips4bplus_filtered_audio_folder)\n FileManager.copytree(folder_path, nips4bplus_audio_folder)\n\n # remove audio files without labels\n for file in os.listdir(nips4bplus_filtered_audio_folder):\n if nips4bplus_selected_labels[nips4bplus_selected_labels[\"file_path\"] == file].empty:\n os.remove(os.path.join(nips4bplus_filtered_audio_folder, file))\n for file in os.listdir(nips4bplus_audio_folder):\n if nips4bplus_labels[nips4bplus_labels[\"file_path\"] == file].empty:\n os.remove(os.path.join(nips4bplus_audio_folder, file))", "def prepare_training_data(self, data_folder_path):\n\n #get the directories (one directory for each subject) in data folder\n dirs = os.listdir(data_folder_path)\n\n #list to hold all subject faces\n faces = []\n #list to hold labels for all subjects\n labels = []\n #List to hold subject names\n subjects = []\n\n label = -1;\n #let's go through each directory and read images within it\n for dir_name in dirs:\n\n #ignore system files like .DS_Store\n if dir_name.startswith(\".\"):\n continue;\n\n label += 1\n subjects.append(dir_name)\n logger.info(\"label=%d subject=%s\" %(label, dir_name))\n\n #build path of directory containing images for current subject subject\n #sample subject_dir_path = \"training-data/Bruce\"\n subject_dir_path = data_folder_path + \"/\" + dir_name\n\n #get the images names that are inside the given subject directory\n subject_images_names = os.listdir(subject_dir_path)\n\n #go through each image name, read image,\n #detect face and add face to list of faces\n for image_name in subject_images_names:\n\n #ignore system files like .DS_Store\n if image_name.startswith(\".\"):\n continue;\n\n #sample image path = training-data/Bruce/face1.png\n image_path = subject_dir_path + \"/\" + image_name\n image = cv2.imread(image_path)\n logger.info(\"file size: %d. numpy image size: %d\" %(os.path.getsize(image_path), len(image)))\n face, rect = self.detect_face(image)\n\n #we will ignore faces that are not detected\n if face is not None:\n #add face to list of faces\n faces.append(face)\n #add label for this face\n labels.append(label)\n\n return faces, labels, subjects", "def save_all_features(nb_samples, source=\"./datasets/D1/images/\", dest=\"./datasets/D1/features/\", input_size=(416, 416), batch_size=16):\n\n # check if the directory exists, and if not make it\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n # define image height and width\n (img_height, img_width) = input_size\n\n # build the VGG16 network and extract features after every MaxPool layer\n model = VGG16(weights='imagenet', include_top=False)\n\n c1 = model.layers[-16].output\n c1 = GlobalAveragePooling2D()(c1)\n\n c2 = model.layers[-13].output\n c2 = GlobalAveragePooling2D()(c2)\n\n c3 = model.layers[-9].output\n c3 = GlobalAveragePooling2D()(c3)\n\n c4 = model.layers[-5].output\n c4 = GlobalAveragePooling2D()(c4)\n\n c5 = model.layers[-1].output\n c5 = GlobalAveragePooling2D()(c5)\n\n\n model = Model(inputs=model.input, outputs=(c1, c2, c3, c4, c5))\n\n # always save your weights after training or during training\n model.save_weights('first_try.h5')\n model.save('model_save')\n\n # define image generator without augmentation\n datagen = ImageDataGenerator(rescale=1. / 255.)\n\n generator = datagen.flow_from_directory(\n source,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=\"sparse\",\n shuffle=False)\n\n # generate and save features, labels and respective filenames\n steps = nb_samples / batch_size + 1\n X = model.predict_generator(generator, steps)\n Y = np.concatenate([generator.next()[1] for i in range(0, generator.samples, batch_size)])\n names = generator.filenames\n\n for n, i in enumerate(X):\n print(\"Saving \" + n + \" and \" + i)\n with open(dest + \"X-\" + str(img_height) + \"-c\" + str(n + 1) + \"-AVG.npy\", 'w') as f:\n np.save(f.name, i)\n\n if not os.path.exists(dest + \"Y.npy\"):\n with open(dest + \"Y.npy\", 'w') as f:\n np.save(f.name, Y)\n\n if not os.path.exists(dest + \"filenames.npy\"):\n with open(dest + \"filenames.npy\", 'w') as f:\n np.save(f.name, names)", "def load_train_dataset(data_dir, word_list, silence_percentage, noise_percentage):\n validation_percentage, testing_percentage = 0.1, 0.1\n temp_list = []\n\n #wav_lists = os.path.join(data_dir, *, '*.wav')\n for word_l in word_list:\n #wav_word_list = os.path.join(data_dir, word_l)\n wav_list = os.path.join(data_dir, word_l, '*.wav')\n for file in gfile.Glob(wav_list):\n _, word = os.path.split(os.path.dirname(file))\n word = word.lower()\n\n if which_set(file, validation_percentage, testing_percentage) == 'training':\n rate, signal = load_wav(file);\n signal_and_noise = add_noise(signal, rate, 1, os.path.join(data_dir,'_background_noise_'), noise_percentage)\n \n feature = psf.mfcc(signal_and_noise, rate, nfilt = 40,numcep = 12, appendEnergy = False)\n #if feature.shape[0] != 99:\n # print(str(len(signal)) + \" \" + str(rate))\n temp_list.append({'feature': feature, 'label': word_l})\n\n # hotspot\n #silence = len(X_train) * silence_percentage\n silence = int(math.ceil(len(temp_list) * silence_percentage / 100))\n for _ in range(silence):\n temp_list.append({'feature': 0, 'label': \"_silence_\"})\n\n random.shuffle(temp_list)\n\n X_train = np.zeros((len(temp_list), 99, 12))\n Y_train = np.zeros( len(temp_list) )\n\n for i in range(len(X_train)):\n X_train[i] = temp_list[i]['feature']\n Y_train[i] = word2index(temp_list[i]['label'])\n\n return X_train, Y_train", "def convert_labels() -> None:\n data_folder = 'images'\n validation_split = 0.10\n\n # Convert annotations and split into validation and train set\n number_images = int(len(os.listdir(data_folder)) / 2)\n train_size = int(number_images * (1 - validation_split))\n val_size = number_images - train_size\n\n print(f'Training dataset size: {train_size}')\n print(f'Validation dataset size: {val_size}')\n\n with open('train.txt', 'w') as train_file, open('val.txt', 'w') as val_file:\n files = os.listdir(data_folder)\n print(len(files))\n # shuffle otherwise validation is from the same session\n random.shuffle(files)\n processed = 0\n for file_name in files:\n if file_name.split('.')[1] == 'jpg':\n # if image has no labels\n write = False\n if processed < train_size:\n file_to_write = train_file\n else:\n file_to_write = val_file\n\n with open(f'{data_folder}/{file_name}'.split('.')[0] + '.txt') as label_file:\n labels = []\n for line in label_file:\n line = line.split(' ')\n line[-1] = line[-1].rstrip()\n\n img = cv2.imread(f'{data_folder}/{file_name}')\n img_height = img.shape[0]\n img_width = img.shape[1]\n \n x = float(line[1]) * img_width\n y = float(line[2]) * img_height\n w = float(line[3]) * img_width\n h = float(line[4]) * img_height\n\n xmin = int(x - w/2)\n ymin = int(y - h/2)\n xmax = int(x + w/2)\n ymax = int(y + h/2)\n\n labels.append(f' {xmin},{ymin},{xmax},{ymax},{line[0]}')\n if len(labels) > 0:\n write = True\n file_to_write.write(f'{data_folder}/{file_name}')\n for label in labels:\n file_to_write.write(label)\n if write:\n file_to_write.write('\\n') \n processed += 1\n print(f'[{processed}/{number_images}] Processed {file_name}')", "def dir_resolution(self, src_path, frag_length=128):\n src_path = os.path.join(self.root_path, src_path)\n files = os.listdir(src_path)\n\n MFCCs = None\n labels = None\n cnt = 1\n total_num = len(files)\n for wav in files:\n wav_path = os.path.join(src_path, wav)\n MFCCs_each, labels_each = self.features_and_labels(wav_path, frag_length)\n if MFCCs is not None:\n MFCCs = torch.cat((MFCCs, MFCCs_each))\n labels = torch.cat((labels, labels_each))\n else:\n MFCCs, labels = MFCCs_each, labels_each\n\n if cnt % 1000 == 0:\n print('{} data pieces have been loaded in and {} are left'.format(cnt, total_num-cnt))\n cnt += 1\n\n np.save(self.feature_file, MFCCs.numpy()) \n np.save(self.label_file, labels.numpy())\n print('Loading into files finished!')", "def getFiles(folder, pattern, labelfile):\n # read labelfile\n with open(labelfile, 'r') as f:\n all_lines = f.readlines()\n \n # get filenames from labelfile\n all_files = []\n labels = []\n check = True\n for line in all_lines:\n # using shlex we also allow spaces in filenames when escaped w. \"\"\n splits = shlex.split(line)\n file_name = splits[0]\n class_id = splits[1]\n\n # strip all known endings, note: os.path.splitext() doesnt work for\n # '.' in the filenames, so let's do it this way...\n for p in ['.pkl.gz', '.txt', '.png', '.jpg', '.tif', '.ocvmb','.csv']:\n if file_name.endswith(p):\n file_name = file_name.replace(p,'')\n\n # get now new file name\n true_file_name = os.path.join(folder, file_name + pattern)\n all_files.append(true_file_name)\n labels.append(class_id)\n\n return all_files, labels" ]
[ "0.6931007", "0.68347615", "0.6515901", "0.6497116", "0.64797205", "0.636615", "0.6359229", "0.63424253", "0.6320269", "0.6292423", "0.627479", "0.62713116", "0.62389636", "0.6136188", "0.6114869", "0.6111111", "0.61073345", "0.6093371", "0.6091186", "0.60888165", "0.60680854", "0.60617614", "0.60454684", "0.60348165", "0.6029651", "0.6024965", "0.59977233", "0.59894085", "0.59560835", "0.59417963" ]
0.8482654
0
Iterator to consume the messages available on this consumer
def __iter__(self): # Trigger the consumer procs to start off. # We will iterate till there are no more messages available self.size.value = 0 self.events.pause.set() while True: self.events.start.set() try: # We will block for a small while so that the consumers get # a chance to run and put some messages in the queue # TODO: This is a hack and will make the consumer block for # at least one second. Need to find a better way of doing this partition, message = self.queue.get(block=True, timeout=1) except queue.Empty: break # Count, check and commit messages if necessary self.offsets[partition] = message.offset + 1 self.events.start.clear() self.count_since_commit += 1 self._auto_commit() yield message self.events.start.clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n # Trigger the consumer procs to start off.\n # We will iterate till there are no more messages available\n self.size.value = 0\n self.pause.set()\n\n while True:\n self.start.set()\n try:\n # We will block for a small while so that the consumers get\n # a chance to run and put some messages in the queue\n # TODO: This is a hack and will make the consumer block for\n # at least one second. Need to find a better way of doing this\n meta, message = self.queue.get(block=True, timeout=1)\n except Empty:\n break\n\n # Count, check and commit messages if necessary\n self.offsets[meta.partition] = message.offset + 1\n self.start.clear()\n self.count_since_commit += 1\n self._auto_commit()\n yield message\n\n self.start.clear()", "def __iter__(self):\n while True:\n m = self.recv(timeout=1.0)\n if m is not None:\n yield m\n logger.debug(\"done iterating over bus messages\")", "def next_message(self):\n while self.queue.consuming:\n yield self.queue.channel._consume_message()", "def _consume(self):\n # HACK: run_in_executor is used as a workaround to use boto\n # inside a coroutine. This is a stopgap solution that should be\n # replaced once boto has support for asyncio or aiobotocore has\n # a stable release.\n loop = asyncio.get_event_loop()\n receive_message = partial(\n self.client.receive_message,\n QueueUrl=self.app.settings['SQS_INBOUND_QUEUE_URL'],\n AttributeNames=self.app.settings['SQS_ATTRIBUTE_NAMES'],\n MessageAttributeNames=self.app.settings['SQS_MESSAGE_ATTRIBUTES'],\n MaxNumberOfMessages=self.app.settings['SQS_MESSAGE_BATCH_SIZE'],\n VisibilityTimeout=self.app.settings['SQS_VISIBILITY_TIMEOUT'],\n WaitTimeSeconds=self.app.settings['SQS_WAIT_TIME'],\n )\n while True:\n future = loop.run_in_executor(None, receive_message)\n messages = yield from future\n for message in messages.get('Messages', []):\n message['Body'] = json.loads(message['Body'])\n yield from self._message_queue.put(message)", "def start_consuming(self):\n\n for queue in self._handlers.keys():\n self._consumer_tags += self._channel.basic_consume(self.on_message,\n queue=queue)", "def retrieve(self) -> Iterator[SQSMessage]:\n while True:\n try:\n sqs = SQSClientFactory(boto3).from_env()\n\n res = sqs.receive_message(\n QueueUrl=self.queue_url,\n WaitTimeSeconds=3,\n MaxNumberOfMessages=10,\n )\n\n messages = res.get(\"Messages\", [])\n if not messages:\n LOGGER.info(\"queue was empty\")\n\n s3_events = [SQSMessage(msg) for msg in messages]\n for sqs_message in s3_events:\n yield sqs_message\n\n sqs.delete_message(\n QueueUrl=self.queue_url,\n ReceiptHandle=sqs_message.receipt_handle,\n )\n\n except Exception as e:\n LOGGER.error(traceback.format_exc())\n time.sleep(2)", "def __iter__(self):\n self.enable_receiving()\n with closing(select.epoll()) as notifier:\n notifier.register(self, select.EPOLLIN)\n while True:\n events = eintr_retry_call(notifier.poll)\n for event in events:\n yield self.receive_device()", "def read(self):\n if not self._consuming:\n yield from self._begin_consuming()\n return (yield from self._message_queue.get())", "def consumer (self):\n try:\n while True:\n try:\n data = os.read (self.read_fd, 65536)\n if not data:\n break\n except OSError as error:\n if error.errno not in BlockingErrorSet:\n break\n yield self.core.Poll (self.read_fd, POLL_READ)\n finally:\n self.Dispose ()", "def server_message_iterator(self) -> Iterator[ServerMessage]:\n while not self._is_closed():\n with self._cv:\n self._cv.wait_for(\n lambda: self._status\n in [Status.CLOSED, Status.SERVER_MESSAGE_AVAILABLE]\n )\n\n self._raise_if_closed()\n\n server_message = self._server_message # Read\n self._server_message = None # Reset\n\n # Transition before yielding as after the yield the execution of this\n # function is paused and will resume when next is called again.\n # Also release condition variable by exiting the context\n self._transition(Status.AWAITING_CLIENT_MESSAGE)\n\n if server_message is None:\n raise Exception(\"Server message can not be None\")\n\n yield server_message", "def _generator(self):\n\t\twhile(1):\n\t\t\ttry:\n\t\t\t\tm = self.messages.pop(0) # pop the first Flash2Message in the list\n\t\t\t\tyield m\n\t\t\texcept IndexError:\n\t\t\t\traise StopIteration", "def subscribe(self):\n pubsub = self.redis_client.pubsub()\n pubsub.subscribe(self.message_channel)\n for item in pubsub.listen():\n if item.get(\"data\") not in (1, None):\n yield item", "def consume_messages(process_func: Callable[[str], None]):\n consumer = get_consumer()\n\n for message in consumer:\n log.debug(f'Received a message: {message}')\n try:\n process_func(message.value)\n except Exception as e:\n log.error(f'Failed to process a message: {message.value}')\n log.exception(e)", "def next(self): # wait for 5 minutes after sending message\n if self.queue:\n messages = self.queue.get_messages(1,visibility_timeout=self.visibility_timeout)\n if messages:\n for m in messages:\n return m\n raise StopIteration", "def data_generator():\n msg = Message(Message.ADD, queue.uuid, queue)\n PROVIDER_MQ.put(msg)\n keep_running = True\n while keep_running:\n try:\n chunk = queue.get()\n yield chunk\n except Empty:\n app.logger.info('Queue empty. Ending stream')\n keep_running = False", "def __iter__(self):\n for item in self._reader:\n yield item", "def __iter__(self):\n return iter(self.queue)", "def consume(self):\n if not self.__events:\n raise StopIteration\n\n if self.__instance.id is None:\n raise ImplementationError(\n \"Can not consume event stream before aggregate has\"\n \"been assigned an identifier.\")\n\n cid = str(uuid.UUID(bytes=os.urandom(16)))\n while self.__events:\n event = self.__events.pop(0)\n event.aggregate_id = self.__instance.id\n event.correlation_id = cid\n event.timestamp = sq.timezone.now()\n yield ImmutableDTO(event).as_dto()", "def __iter__(self):\n return iter([self.format_message(record) for record in self._messages])", "def data(self) -> Generator:\n # Not using a consumer group and setting partitions manually so it's a smaller\n # jump to make this deterministic/repeatable with multiple workers later on.\n\n self.connect()\n\n self.approx_position = 0\n for partition_id, start_offset, end_offset in self._partition_ranges():\n # TODO - confirm this can never jump to another partition\n tp = TopicPartition(topic=self.topic, partition=partition_id)\n self.client.assign([tp])\n\n self.items_to_fetch = end_offset - start_offset\n self.client.seek(tp, start_offset)\n\n if self.items_to_fetch <= 0:\n msg = f\"Invalid offsets {start_offset}:{end_offset} for partition {partition_id}\"\n raise ValueError(msg)\n\n for m in self.client:\n self.approx_position += 1\n yield Pinnate(data=m.value)\n\n if end_offset is not None and m.offset >= end_offset:\n break", "def consume(self, timeout=None):\n\n def _raise_timeout(exc):\n raise driver_common.Timeout(str(exc))\n\n timer = driver_common.DecayingTimer(duration=timeout)\n timer.start()\n\n poll_timeout = (self.consumer_timeout if timeout is None\n else min(timeout, self.consumer_timeout))\n\n while True:\n if self._consume_loop_stopped:\n return\n try:\n return self._poll_messages(poll_timeout)\n except kafka.errors.ConsumerTimeout as exc:\n poll_timeout = timer.check_return(\n _raise_timeout, exc, maximum=self.consumer_timeout)\n except Exception:\n LOG.exception(_LE(\"Failed to consume messages\"))\n return", "def message_listener(self, topic, timeout):\n \"\"\"\n demo_message = [\n {'user_id': 'Lazy Man', 'timestamp': '2019-10-06T22:59:59.989Z', 'risk_level': 3}\n ]\n\n for message in demo_message:\n yield ERROR_CODE_ZERO, \"\", message\n \"\"\"\n\n while True:\n for error_code, error_message, message in self._consumer.subscribe(topic, timeout):\n yield error_code, error_message, message\n if error_code == 1:\n break", "def receive_messages(self):\n messages = self.incoming_messages\n self.incoming_messages = []\n return messages", "def test_consumer_read_messages(self):\n try:\n test_consumer = TestConsumer(self.msg_queue, self.queue_lock, self.topic, self.properties_file)\n test_consumer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n producer_msg_queue = queue.Queue()\n producer_queue_lock = threading.Lock()\n try:\n test_producer = Producer(producer_msg_queue, producer_queue_lock, self.topic, self.producer_properties_file)\n test_producer.start()\n except Exception as e:\n self.fail(f\"test_consumer_read_messages() failed with exception: {e}\")\n\n msgs = []\n\n for i in range(1, 4):\n msg = f\"Message number {i}\"\n\n producer_queue_lock.acquire()\n producer_msg_queue.put_nowait(msg)\n producer_queue_lock.release()\n\n msgs.append(msg)\n\n # Sleep for few seconds seconds to allow the consumer thread to process all the messages.\n time.sleep(20)\n\n self.assertEqual(test_consumer.dequeue_msgs(), msgs)\n\n test_producer.stop()\n test_consumer.stop()\n test_producer.join()\n test_consumer.join()", "def consume(self):\n LOGGER.debug('Consumer Initialized')\n # self.connect()\n channel = self.get_channel()\n self._bind_things(channel)\n\n try:\n LOGGER.info('Start consuming')\n channel.start_consuming()\n except ConnectionClosed:\n LOGGER.exception('Pika connection closed detected. Will attempt to start consuming again')\n self.consume()\n except KeyboardInterrupt as e:\n LOGGER.info('Keyboard interrupt, stop consuming')\n self.shutdown()\n raise e\n except Exception as e:\n LOGGER.exception(\"'%s\" % str(e))\n self.shutdown()\n if self.settings.CONSUMER['RAISE_EXCEPTION']:\n LOGGER.info(\"CONSUMER RAISED EXCEPTION\")\n raise e", "def stream(self):\n\n iterator = iter(self._stream)\n while self._state < Computation.STATE_COMPLETED:\n try:\n message = next(iterator)\n except StopIteration:\n if self._state < Computation.STATE_COMPLETED:\n self._stream = self._execute()\n iterator = iter(self._stream)\n continue\n break\n\n if isinstance(message, messages.StreamStartMessage):\n self._state = Computation.STATE_STREAM_STARTED\n continue\n\n if isinstance(message, messages.JobStartMessage):\n self._state = Computation.STATE_COMPUTATION_STARTED\n self._id = message.handle\n yield message\n continue\n\n if isinstance(message, messages.JobProgressMessage):\n yield message\n continue\n\n if isinstance(message, messages.ChannelAbortMessage):\n self._state = Computation.STATE_ABORTED\n raise errors.ComputationAborted(message.abort_info)\n\n if isinstance(message, messages.EndOfChannelMessage):\n self._state = Computation.STATE_COMPLETED\n continue\n\n # Intercept metadata messages to accumulate received metadata...\n if isinstance(message, messages.MetadataMessage):\n self._metadata[message.tsid] = message.properties\n yield message\n continue\n\n # ...as well as expired-tsid messages to clean it up.\n if isinstance(message, messages.ExpiredTsIdMessage):\n if message.tsid in self._metadata:\n del self._metadata[message.tsid]\n yield message\n continue\n\n if isinstance(message, messages.InfoMessage):\n self._process_info_message(message.message)\n self._batch_count_detected = True\n yield message\n if self._current_batch_message:\n yield self._get_batch_to_yield()\n continue\n\n # Accumulate data messages and release them when we have received\n # all batches for the same logical timestamp.\n if isinstance(message, messages.DataMessage):\n self._state = Computation.STATE_DATA_RECEIVED\n\n if not self._batch_count_detected:\n self._expected_batches += 1\n\n if not self._current_batch_message:\n self._current_batch_message = message\n self._current_batch_count = 1\n elif (message.logical_timestamp_ms ==\n self._current_batch_message.logical_timestamp_ms):\n self._current_batch_message.add_data(message.data)\n self._current_batch_count += 1\n else:\n self._batch_count_detected = True\n\n if (self._batch_count_detected and\n self._current_batch_count == self._expected_batches):\n yield self._get_batch_to_yield()\n continue\n\n if isinstance(message, messages.EventMessage):\n yield message\n continue\n\n if isinstance(message, messages.ErrorMessage):\n raise errors.ComputationFailed(message.errors)\n\n # Yield last batch, even if potentially incomplete.\n if self._current_batch_message:\n yield self._get_batch_to_yield()", "def recv_messages(self):\n while True:\n b = unwrap_read(self.sock.recv(4096))\n msgs = self.parser.feed(b)\n if msgs:\n for msg in msgs:\n self.router.incoming(msg)\n return", "def consume(self, **kwargs):\n kwargs.setdefault('block', True)\n try:\n while True:\n msg = self.get(**kwargs)\n if msg is None:\n break\n yield msg\n except KeyboardInterrupt: # pragma: no cover\n print()\n return", "def consume(self):\n\n self.consumer = self.getConsumer(self.client.topics[self.topic])\n\n # create splunk hec instance\n splunk_hec = hec(self.splunk_server,\n self.splunk_hec_port,\n self.splunk_hec_channel,\n self.splunk_hec_token,\n self.splunk_sourcetype,\n self.splunk_source,\n self.use_https,\n self.verify_ssl,\n self.use_compression,\n self.compresslevel)\n while(True):\n m = self.consumer.consume()\n \n # Append messages to list until we've hit self.batch_size\n if(len(self.messages) <= self.batch_size):\n self.messages.append(m.value)\n\n # Send messages to Splunk HEC\n if(len(self.messages) == self.batch_size):\n retry(self.sendToSplunk,\n attempts=self.retry_attempts,\n sleeptime=self.sleeptime,\n max_sleeptime=self.max_sleeptime,\n sleepscale=self.sleepscale,\n jitter=self.jitter,\n retry_exceptions=(Exception,),\n args=(splunk_hec,))", "def receive(self):\n while True:\n if self.pending_request:\n request = self.unpack(self.pending_request)\n self.pending_request = None\n else: \n request = self.unpack(self.mh.receive_message())\n if request:\n yield request\n else: break" ]
[ "0.7876559", "0.7503774", "0.73663545", "0.7265061", "0.70114726", "0.69939095", "0.6885746", "0.68245155", "0.6739083", "0.66143394", "0.6587319", "0.65804994", "0.655792", "0.6548321", "0.6544168", "0.6527674", "0.6492114", "0.6468669", "0.6443228", "0.6439748", "0.63970184", "0.63683367", "0.63447726", "0.6334904", "0.6332139", "0.6327359", "0.63055587", "0.62904406", "0.62783307", "0.62653947" ]
0.7853538
1
Inspect the supplied auth token to determine the user's IPAM server
def _user_ipam_server(token): logger.info('Looking up IPAM server') try: header, payload, signature = token.split(b'.') except (ValueError, AttributeError, TypeError) as doh: # Mangled or missing JSON Web Token logger.exception(doh) user = None else: padding_needed = len(payload) % 4 if padding_needed: payload += b'=' * (4 - padding_needed) decoded_payload = base64.urlsafe_b64decode(payload) try: username = ujson.loads(decoded_payload)['username'] user = '{}.{}'.format(username, const.VLAB_FQDN) except ValueError: # bad json logger.error('invalid JSON for token payload') user = None return user
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_ilo_access(remote_console):\n url = remote_console.get('remoteConsoleUrl')\n url_parse = parse.urlparse(url)\n host_ip = parse.parse_qs(url_parse.netloc).get('addr')[0]\n token = parse.parse_qs(url_parse.netloc).get('sessionkey')[0]\n return host_ip, token", "def get_token_info_remote(self, token_info_url):", "def check_token(token):\n return conn.hget('login:', token)", "def check_auth():", "def getUser(self, authenticationToken):\r\n pass", "def _lookup_token(self):\n path = '/authn/{account}/{login}/authenticate'.format(\n account=self.account, login='admin'\n )\n res = self._post(path, data=self.api_token, skip_auth=True)\n return base64.b64encode(res.text)", "def auth_token(self):", "def getUser():\n\n if 'token' in session:\n return \"Authenticated\"\n else:\n return \"Unauthenticated\"", "def _get_address():\n ret = subprocess.getoutput([\"swift auth\"])\n ret = ret.split(\"\\n\")[0]\n ret = ret.split(\"=\")[1]\n return ret", "def get_auth_token():\n if CFG.auth_enabled:\n auth_token = get_keystone_token()\n else:\n auth_token = 'notrealtoken'\n\n return auth_token", "def authcheck():\n user = get_user()\n return jsonify({'current_identity': user.username})", "def auth(self):\r\n basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))\r\n if basic: return basic\r\n ruser = self.environ.get('REMOTE_USER')\r\n if ruser: return (ruser, None)\r\n return None", "def _get_auth_string(self):", "def get_token(self):\n auth_data = {\"auth\": {\"tenantName\": 'service',\n \"passwordCredentials\":{ \"username\": 'vsm',\n \"password\": self._password}}}\n\n auth_request = urllib2.Request(self._auth_url)\n auth_request.add_header(\"content-type\", \"application/json\")\n auth_request.add_header('Accept', 'application/json')\n auth_request.add_header('User-Agent', 'python-mikeyp')\n auth_request.add_data(json.dumps(auth_data))\n auth_response = urllib2.urlopen(auth_request)\n response_data = json.loads(auth_response.read())\n\n self._token = response_data['access']['token']['id']\n\n service_list = response_data['access']['serviceCatalog']\n for s in service_list:\n if s['type'] == 'vsm' and s['name'] == 'vsm':\n self._vsm_url = s['endpoints'][0]['publicURL']\n break\n\n url_id = self._vsm_url.split('/')[-1]\n return self._token + \"-\" + url_id", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def get_oauth_token():\n return session.get('remote_oauth')", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def gen_ip(self):\n\n try:\n self.ip = self.auth_url.split(\":\")[1].strip(\"//\")\n except Exception:\n self.ip = socket.gethostbyname(socket.gethostname())\n print \"\\t! Error obtaining ip address from cred file. Using %s\" % (self.ip)", "def whoami():\n try:\n\n token = request.headers['token']\n username, uid, wid = read_auth_token(token)\n return dict(username=username, uid=uid, wid=wid)\n\n except SignatureExpired as e:\n return dict(error=str(e)), 401\n except BadSignature as e:\n return dict(error=str(e)), 401\n except Exception as e:\n return dict(error=str(e)), 500", "def auth_server_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"auth_server_id\")", "def auth_server_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"auth_server_id\")", "def get_user_info(self, token):\n\n user_info_resp = get_remote(get_config('login.live.user_info_url') + token)\n user_info = json.loads(user_info_resp)\n\n if user_info.get(\"error\") is not None:\n raise Exception(user_info)\n\n return user_info", "def identify_auth():\r\n from requests.auth import HTTPBasicAuth\r\n from requests.auth import HTTPDigestAuth\r\n # HTTPBasicAuth Auth Method\r\n response = requests.get(base_url + '/basic-auth/51zxw/8888', auth=HTTPBasicAuth('51zxw', '8888'))\r\n print(response.status_code)\r\n print(response.text)\r\n\r\n # HTTPDigestAuth Auth Method\r\n response = requests.get(base_url + '/digest-auth/auth/zwx/6666', auth=HTTPDigestAuth('zwx', '6666'))\r\n print(response.status_code)\r\n print(response.text)\r\n print(response.json())", "def verify_token(self, token: str) -> str:\n return decode(self.rd.hget(\"auth:by_token\", token))", "def verify_token(vial_http: urllib3.connectionpool.ConnectionPool) -> bool:\n verify_resp = vial_http.request(\"GET\", \"/api/verifyToken\")\n return verify_resp.status == 200", "def auth_server_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"auth_server_id\")", "def get_client_login_token_string(http_body):\n for response_line in http_body.splitlines():\n if response_line.startswith('Auth='):\n # Strip off the leading Auth= and return the Authorization value.\n return response_line[5:]\n return None", "def get_token(self):\n message = {\n \"request\": \"access_token\",\n \"account\": self.account,\n \"min_valid_period\": self.validity,\n \"application_hint\": \"orpy\",\n }\n try:\n self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self._sock.connect(self.socket_path)\n self._sock.sendall(json.dumps(message).encode())\n\n data = \"\"\n while True:\n recv = self._sock.recv(16).decode()\n if recv:\n data += recv\n else:\n break\n except socket.error as err:\n raise exceptions.AuthExceptiob(\n err=\"Cannot communicate with the \" \"oidc-agent: %s\" % err\n )\n finally:\n self._sock.close()\n\n token = json.loads(data)\n if token.get(\"status\") == \"failure\":\n raise exceptions.AuthError(err=token.get(\"error\"))\n return token", "def _authenticate(self):\n\t\tfrom getpass import getpass\n\t\tpassword = getpass()\n\t\tself.msg('nickserv', 'identify %s' % password)", "def authTest(token=None):\n if not token:\n token = bottle.request.get_header('X-Auth-Token')\n\n data = bottle.request.json\n if not token:\n user = data.get('user')\n password = data.get('password')\n\n query = odict(bottle.request.query.items())\n if not user or not password:\n user = query.get('user')\n password = query.get('password')\n\n if not token and (not user or not password):\n bottle.abort(400, \"Authentication credentials missing.\")\n\n result = odict(token=token,\n user=user,\n password=password,\n headers=odict(bottle.request.headers.items()),\n query=query,\n data=data,\n )\n return result" ]
[ "0.61946833", "0.6118207", "0.60206807", "0.5868862", "0.5855231", "0.58198047", "0.5806784", "0.57857645", "0.5719891", "0.56942225", "0.5674779", "0.565937", "0.5640423", "0.56349134", "0.5603921", "0.55928963", "0.55651516", "0.55602133", "0.55421984", "0.55298877", "0.5526956", "0.5521031", "0.55166495", "0.5504239", "0.5499721", "0.5498109", "0.54956675", "0.54545397", "0.54425466", "0.543871" ]
0.76831394
0
Set a high water mark on the zmq socket. Do so in a way that is crosscompatible with zeromq2 and zeromq3.
def set_high_water_mark(socket, config): if config['high_water_mark']: if hasattr(zmq, 'HWM'): # zeromq2 socket.setsockopt(zmq.HWM, config['high_water_mark']) else: # zeromq3 socket.setsockopt(zmq.SNDHWM, config['high_water_mark']) socket.setsockopt(zmq.RCVHWM, config['high_water_mark'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, ip='127.0.0.1', port='50020'):\n self.ip = ip \n self.port = port\n self.ctx = zmq.Context()\n self.socket = zmq.Socket(self.ctx, zmq.REQ) # this is pub socket", "def start(self):\n zmq_uri = (\n \"{protocol}://{address}:{port}\".format(\n protocol=self.protocol, address=self.address, port=self.port\n )\n if self.port\n else \"{protocol}://{address}\".format( # noqa\n protocol=self.protocol, address=self.address\n )\n )\n log.debug(\"ZMQ URI: %s\", zmq_uri)\n self.ctx = zmq.Context()\n if hasattr(zmq, self.type):\n skt_type = getattr(zmq, self.type)\n else:\n skt_type = zmq.PULL\n self.sub = self.ctx.socket(skt_type)\n self.sub.connect(zmq_uri)\n if self.hwm is not None:\n self.sub.setsockopt(zmq.RCVHWM, self.hwm)\n if self.recvtimeout is not None:\n log.debug(\"Setting RCVTIMEO to %d\", self.recvtimeout)\n self.sub.setsockopt(zmq.RCVTIMEO, self.recvtimeout)\n if self.keepalive is not None:\n log.debug(\"Setting TCP_KEEPALIVE to %d\", self.keepalive)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE, self.keepalive)\n if self.keepalive_idle is not None:\n log.debug(\"Setting TCP_KEEPALIVE_IDLE to %d\", self.keepalive_idle)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_IDLE, self.keepalive_idle)\n if self.keepalive_interval is not None:\n log.debug(\"Setting TCP_KEEPALIVE_INTVL to %d\", self.keepalive_interval)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_INTVL, self.keepalive_interval)", "def __sendHeartbeat(self):\n \n while not rospy.is_shutdown():\n rospy.sleep(5)\n self.setOutput(self.write_start+1,0)", "def feltBump(self):\n self.stamp = rospy.Time.now()\n self.ready_to_publish = True", "def set_pin_high(pin):\n HIGH_PINS.append(pin)", "def wake_up(self):\r\n self._write.send('1')", "def set_tcp_keepalive(socket, config):\n\n keepalive_options = {\n # Map fedmsg config keys to zeromq socket constants\n 'zmq_tcp_keepalive': 'TCP_KEEPALIVE',\n 'zmq_tcp_keepalive_cnt': 'TCP_KEEPALIVE_CNT',\n 'zmq_tcp_keepalive_idle': 'TCP_KEEPALIVE_IDLE',\n 'zmq_tcp_keepalive_intvl': 'TCP_KEEPALIVE_INTVL',\n }\n for key, const in keepalive_options.items():\n if key in config:\n attr = getattr(zmq, const, None)\n if attr:\n _log.debug(\"Setting %r %r\" % (const, config[key]))\n socket.setsockopt(attr, config[key])", "def zmq_version():\n return \"%i.%i.%i\" % zmq_version_info()", "def zmq_version():\n return \"%i.%i.%i\" % zmq_version_info()", "def _send_heartbeat(self):\n if time.time() > self.__heartbeat + self.__hb_interval:\n self.__pipe.send({\"command\": \"heartbeat\"})\n self.__heartbeat = time.time()", "def setkeepalives(sock):\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)", "def fix_zmq_exit():\n import zmq\n ctx = zmq.Context.instance()\n ctx.term()", "def zpipe(ctx):\n a = ctx.socket(zmq.PAIR)\n b = ctx.socket(zmq.PAIR)\n a.linger = b.linger = 0\n a.hwm = b.hwm = 1\n iface = f\"inproc://{binascii.hexlify(os.urandom(8))}\"\n a.bind(iface)\n b.connect(iface)\n return a, b", "def wake_up(self):\n self._write.send(b'1')", "def transmitPollAck(): \n global data\n DW1000.newTransmit()\n data[0] = C.POLL_ACK\n data[17] = anchorID #data[17] is tag Id data[18] is anchor Id\n data[18] = tagID #data[17] is tag Id data[18] is anchor Id\n DW1000.setDelay(REPLY_DELAY_TIME_US, C.MICROSECONDS)\n DW1000.setData(data, LEN_DATA)\n DW1000.startTransmit()", "def test_overlongWrite(self):\n buf = imap4.WriteBuffer(self.transport)\n data = b'x' * (buf.bufferSize + 1)\n\n buf.write(data)\n\n self.assertEqual(self.transport.value(), data)", "def set_id(zsocket):\n identity = f\"{randint(0, 0x10000):04x}-{randint(0, 0x10000):04x}\"\n zsocket.setsockopt_string(zmq.IDENTITY, identity)", "def broker_null(self, data):\n\n print(\"Heartbeat\")\n #TODO: Reset heartbeat timer or something like that", "def transmitPollAck(): \r\n global data\r\n DW1000.newTransmit()\r\n data[0] = C.POLL_ACK\r\n DW1000.setDelay(REPLY_DELAY_TIME_US, C.MICROSECONDS)\r\n DW1000.setData(data, LEN_DATA)\r\n DW1000.startTransmit()", "def _create_socket(self, socket_type, linger_value):\n socket = zmq.Context.instance().socket(socket_type)\n socket.setsockopt(zmq.LINGER, linger_value)\n socket.set_hwm(0)\n port_number = socket.bind_to_random_port(LOCAL_ADDR)\n self.poller.register(socket, zmq.POLLIN)\n self.logger.debug(\"bind to \" + LOCAL_ADDR + ':' + str(port_number))\n return (socket, port_number)", "def test_deflate_on_oom(test_microvm_with_api, deflate_on_oom):\n test_microvm = test_microvm_with_api\n test_microvm.spawn()\n test_microvm.basic_config()\n test_microvm.add_net_iface()\n\n # Add a deflated memory balloon.\n test_microvm.api.balloon.put(\n amount_mib=0, deflate_on_oom=deflate_on_oom, stats_polling_interval_s=0\n )\n\n # Start the microvm.\n test_microvm.start()\n firecracker_pid = test_microvm.jailer_clone_pid\n\n # We get an initial reading of the RSS, then calculate the amount\n # we need to inflate the balloon with by subtracting it from the\n # VM size and adding an offset of 10 MiB in order to make sure we\n # get a lower reading than the initial one.\n initial_rss = get_stable_rss_mem_by_pid(firecracker_pid)\n inflate_size = 256 - int(initial_rss / 1024) + 10\n\n # Inflate the balloon\n test_microvm.api.balloon.patch(amount_mib=inflate_size)\n # This call will internally wait for rss to become stable.\n _ = get_stable_rss_mem_by_pid(firecracker_pid)\n\n # Check that using memory leads an out of memory error (or not).\n make_guest_dirty_memory(test_microvm.ssh, should_oom=not deflate_on_oom)", "def socket_set_hwm(socket, hwm=-1):\n try:\n socket.sndhwm = socket.rcvhwm = hwm\n except AttributeError:\n socket.hwm = hwm", "def setSlowConsumerWarningHiWaterMark(self, hiWaterMark):\n err = internals.blpapi_SessionOptions_setSlowConsumerWarningHiWaterMark(\n self.__handle, hiWaterMark)\n _ExceptionUtil.raiseOnError(err)", "def high(self, high):\n\n self._high = high", "def writable(self):\n now = int(time.time())\n if now - self.lastest_keepalive >= 40:\n msg = \"type@=keeplive/tick@={}/\".format(now)\n self.push_with_producer(MessageProducer(msg))\n self.lastest_keepalive = now\n return asynchat.async_chat.writable(self)", "def FlushCluster(config):\n ctx = zmq.Context()\n request_socket = config.request_receiver.MakeSocket(ctx, type = zmq.PULL)\n result_socket = config.result_receiver.MakeSocket(ctx, type = zmq.PULL)\n time.sleep(1) # wait for connections\n FlushSocket(request_socket)\n FlushSocket(result_socket)", "def setsockopt(self, *args):\r\n self._fd.setsockopt(*args)", "def __sync_z(self) -> None:\n if self.__peer is not None:\n try:\n self.__peer.send_command(MicrobitAccelerometerGetZ(z=self.__z))\n except CommunicationClosedError:\n self.__peer = None", "def on_write_needed(self, nbytes, underflow):", "async def ack(self, offset: int):" ]
[ "0.51234335", "0.5065628", "0.50134385", "0.49544883", "0.49474162", "0.49436027", "0.4931907", "0.488579", "0.488579", "0.48692766", "0.48552787", "0.48457694", "0.48174453", "0.48031065", "0.47897077", "0.47836304", "0.4770771", "0.4770241", "0.4731286", "0.4699968", "0.46963012", "0.46895176", "0.46677175", "0.46539402", "0.46521732", "0.46471888", "0.46171784", "0.4559776", "0.45597732", "0.45536464" ]
0.80861634
0
Set a series of TCP keepalive options on the socket if and only if 1) they are specified explicitly in the config and 2) the version of pyzmq has been compiled with support We ran into a problem in FedoraInfrastructure where longstanding connections between some hosts would suddenly drop off the map silently. Because PUB/SUB sockets don't communicate regularly, nothing in the TCP stack would automatically try and fix the connection. With TCP_KEEPALIVE options (introduced in libzmq 3.2 and pyzmq 2.2.0.1) hopefully that will be fixed. See the following
def set_tcp_keepalive(socket, config): keepalive_options = { # Map fedmsg config keys to zeromq socket constants 'zmq_tcp_keepalive': 'TCP_KEEPALIVE', 'zmq_tcp_keepalive_cnt': 'TCP_KEEPALIVE_CNT', 'zmq_tcp_keepalive_idle': 'TCP_KEEPALIVE_IDLE', 'zmq_tcp_keepalive_intvl': 'TCP_KEEPALIVE_INTVL', } for key, const in keepalive_options.items(): if key in config: attr = getattr(zmq, const, None) if attr: _log.debug("Setting %r %r" % (const, config[key])) socket.setsockopt(attr, config[key])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_tcp_keepalive(sock, opts):\n if hasattr(socket, \"SO_KEEPALIVE\"):\n if opts.get(\"tcp_keepalive\", False):\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n if hasattr(socket, \"SOL_TCP\"):\n if hasattr(socket, \"TCP_KEEPIDLE\"):\n tcp_keepalive_idle = opts.get(\"tcp_keepalive_idle\", -1)\n if tcp_keepalive_idle > 0:\n sock.setsockopt(\n socket.SOL_TCP, socket.TCP_KEEPIDLE, int(tcp_keepalive_idle)\n )\n if hasattr(socket, \"TCP_KEEPCNT\"):\n tcp_keepalive_cnt = opts.get(\"tcp_keepalive_cnt\", -1)\n if tcp_keepalive_cnt > 0:\n sock.setsockopt(\n socket.SOL_TCP, socket.TCP_KEEPCNT, int(tcp_keepalive_cnt)\n )\n if hasattr(socket, \"TCP_KEEPINTVL\"):\n tcp_keepalive_intvl = opts.get(\"tcp_keepalive_intvl\", -1)\n if tcp_keepalive_intvl > 0:\n sock.setsockopt(\n socket.SOL_TCP,\n socket.TCP_KEEPINTVL,\n int(tcp_keepalive_intvl),\n )\n if hasattr(socket, \"SIO_KEEPALIVE_VALS\"):\n # Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor\n # TCP_KEEPINTVL. Instead, it has its own proprietary\n # SIO_KEEPALIVE_VALS.\n tcp_keepalive_idle = opts.get(\"tcp_keepalive_idle\", -1)\n tcp_keepalive_intvl = opts.get(\"tcp_keepalive_intvl\", -1)\n # Windows doesn't support changing something equivalent to\n # TCP_KEEPCNT.\n if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:\n # Windows defaults may be found by using the link below.\n # Search for 'KeepAliveTime' and 'KeepAliveInterval'.\n # https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA\n # If one value is set and the other isn't, we still need\n # to send both values to SIO_KEEPALIVE_VALS and they both\n # need to be valid. So in that case, use the Windows\n # default.\n if tcp_keepalive_idle <= 0:\n tcp_keepalive_idle = 7200\n if tcp_keepalive_intvl <= 0:\n tcp_keepalive_intvl = 1\n # The values expected are in milliseconds, so multiply by\n # 1000.\n sock.ioctl(\n socket.SIO_KEEPALIVE_VALS,\n (\n 1,\n int(tcp_keepalive_idle * 1000),\n int(tcp_keepalive_intvl * 1000),\n ),\n )\n else:\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)", "def setkeepalives(sock):\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)", "def _activate_keepalive(self, s, after_idle_sec=30, interval_sec=10,\n max_fails=5):\n s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)\n s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)\n s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)\n s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)", "def set_keepalive(self):\n self._session._transport.set_keepalive(60)", "def keepalive(self):\n return f'PersistentKeepalive = {self._peer.keepalive}'", "def setKeepAliveEnabled(self, isEnabled):\n keepAliveValue = 1 if isEnabled else 0\n err = internals.blpapi_SessionOptions_setKeepAliveEnabled(\n self.__handle, keepAliveValue)\n _ExceptionUtil.raiseOnError(err)", "def keepalive_received(self, peer, timestamp):\n\n if peer.msg_recv_stat['Keepalives'] == 1:\n # do something with the connection establish event\n pass\n\n if CONF.message.write_keepalive:\n # write bgp message\n self.write_msg(\n peer=peer.factory.peer_addr,\n timestamp=timestamp,\n msg_type=4,\n msg={\"msg\": None}\n )", "def add_http_keepalive_requests(self, value):\n path = [u\"http\", u\"keepalive_requests\"]\n self.add_config_item(self._nodeconfig, value, path)", "def keep_alive(self):\n req = BFGlobalFactory.create(\"ns1:KeepAliveReq\")\n rsp = self._soapcall(BFGlobalService.keepAlive, req)\n if rsp.header.errorCode != APIErrorEnum.OK:\n logger.error(\"{keepAlive} failed with error {%s}\",\n rsp.header.errorCode)", "def setUp(self) -> None:\n local_sock, remote_sock = socketpair()\n local_sock.settimeout(1.0)\n remote_sock.settimeout(1.0)\n self.inverter = KeepAliveInverter(local_sock, None, keep_alive=0.01)\n self.sock = remote_sock", "def add_http_keepalive_timeout(self, value):\n path = [u\"http\", u\"keepalive_timeout\"]\n self.add_config_item(self._nodeconfig, value, path)", "def set_high_water_mark(socket, config):\n\n if config['high_water_mark']:\n if hasattr(zmq, 'HWM'):\n # zeromq2\n socket.setsockopt(zmq.HWM, config['high_water_mark'])\n else:\n # zeromq3\n socket.setsockopt(zmq.SNDHWM, config['high_water_mark'])\n socket.setsockopt(zmq.RCVHWM, config['high_water_mark'])", "def start(self):\n zmq_uri = (\n \"{protocol}://{address}:{port}\".format(\n protocol=self.protocol, address=self.address, port=self.port\n )\n if self.port\n else \"{protocol}://{address}\".format( # noqa\n protocol=self.protocol, address=self.address\n )\n )\n log.debug(\"ZMQ URI: %s\", zmq_uri)\n self.ctx = zmq.Context()\n if hasattr(zmq, self.type):\n skt_type = getattr(zmq, self.type)\n else:\n skt_type = zmq.PULL\n self.sub = self.ctx.socket(skt_type)\n self.sub.connect(zmq_uri)\n if self.hwm is not None:\n self.sub.setsockopt(zmq.RCVHWM, self.hwm)\n if self.recvtimeout is not None:\n log.debug(\"Setting RCVTIMEO to %d\", self.recvtimeout)\n self.sub.setsockopt(zmq.RCVTIMEO, self.recvtimeout)\n if self.keepalive is not None:\n log.debug(\"Setting TCP_KEEPALIVE to %d\", self.keepalive)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE, self.keepalive)\n if self.keepalive_idle is not None:\n log.debug(\"Setting TCP_KEEPALIVE_IDLE to %d\", self.keepalive_idle)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_IDLE, self.keepalive_idle)\n if self.keepalive_interval is not None:\n log.debug(\"Setting TCP_KEEPALIVE_INTVL to %d\", self.keepalive_interval)\n self.sub.setsockopt(zmq.TCP_KEEPALIVE_INTVL, self.keepalive_interval)", "def keep_alive_time(self, keep_alive_time: ConfigNodePropertyInteger):\n\n self._keep_alive_time = keep_alive_time", "def setDefaultKeepAliveResponseTimeout(self, timeoutMsecs):\n err = internals.blpapi_SessionOptions_setDefaultKeepAliveResponseTimeout(\n self.__handle, timeoutMsecs)\n _ExceptionUtil.raiseOnError(err)", "async def peers_keepalive(\n *,\n ourselves: Peer,\n):\n try:\n while True:\n logger.debug(f\"Peering keep-alive update for {ourselves.id} (priority {ourselves.priority})\")\n ourselves.keepalive()\n\n # How often do we update. Keep limited to avoid k8s api flooding.\n # Should be slightly less than the lifetime, enough for a patch request to finish.\n await asyncio.sleep(max(1, int(ourselves.lifetime.total_seconds() - 10)))\n finally:\n try:\n ourselves.disappear()\n except:\n pass", "async def keep_alive(self):\n self._keepalive = True\n while True:\n await gen.sleep(self.KEEPALIVE_INTERVAL)\n if not self._keepalive:\n return\n try:\n # lines that start with : are comments\n # and should be ignored by event consumers\n self.write(\":keepalive\\n\\n\")\n await self.flush()\n except StreamClosedError:\n return", "def setDefaultKeepAliveInactivityTime(self, inactivityMsecs):\n err = internals.blpapi_SessionOptions_setDefaultKeepAliveInactivityTime(\n self.__handle, inactivityMsecs)\n _ExceptionUtil.raiseOnError(err)", "def change_session_timeout(duthost1, duthost2, keep_and_peer_link_member):\n cmd = 'config mclag session-timeout {} {}'\n keep_alive_interface = keep_and_peer_link_member[duthost1.hostname]['keepalive']\n duthost1.shell(cmd.format(MCLAG_DOMAINE_ID, NEW_SESSION_TIMEOUT))\n duthost2.shell(cmd.format(MCLAG_DOMAINE_ID, NEW_SESSION_TIMEOUT))\n duthost1.shutdown(keep_alive_interface)\n\n yield\n\n duthost1.shell(cmd.format(MCLAG_DOMAINE_ID, DEFAULT_SESSION_TIMEOUT))\n duthost2.shell(cmd.format(MCLAG_DOMAINE_ID, DEFAULT_SESSION_TIMEOUT))\n duthost1.no_shutdown(keep_alive_interface)", "def test_keep_alive_sent(self):\n # Receive keep-alive message\n msg = self.sock.recv(4096)\n self.assertTrue(msg.startswith(b\"\\x55\\xaa\"))\n # Send some arbitrary response\n self.sock.send(bytes.fromhex(\"55 aa 01 02 02 00 00 01 04\"))\n # Receive another keep-alive message\n msg = self.sock.recv(4096)\n self.assertTrue(msg.startswith(b\"\\x55\\xaa\"))\n # Send some arbitrary response\n self.sock.send(bytes.fromhex(\"55 aa 01 02 02 00 00 01 04\"))", "def _is_tcp_synack(tcp_flags):\n if tcp_flags == 0x12:\n return 1\n else:\n return 0", "def setProtocolOptions(self,\n version=None,\n utf8validateIncoming=None,\n acceptMaskedServerFrames=None,\n maskClientFrames=None,\n applyMask=None,\n maxFramePayloadSize=None,\n maxMessagePayloadSize=None,\n autoFragmentSize=None,\n failByDrop=None,\n echoCloseCodeReason=None,\n serverConnectionDropTimeout=None,\n openHandshakeTimeout=None,\n closeHandshakeTimeout=None,\n tcpNoDelay=None,\n perMessageCompressionOffers=None,\n perMessageCompressionAccept=None,\n autoPingInterval=None,\n autoPingTimeout=None,\n autoPingSize=None):", "def keepAliveReceived(self):", "def _updateHeartbeat (self) :\r\n for pw, conn in self.clients :\r\n if conn : # we do have at least one client, enable heartbeat if needed\r\n self.have_clients = True\r\n return\r\n \r\n self.have_clients = False", "def _validate_conn(self, conn):\n # Call the method on the base class\n super()._validate_conn(conn)\n\n # Set up TCP Keep Alive probes, this is the only line added to this function\n TCPKeepAliveValidationMethods.adjust_connection_socket(conn)", "def _validate_conn(self, conn):\n # Call the method on the base class\n super()._validate_conn(conn)\n\n # Set up TCP Keep Alive probes, this is the only line added to this function\n TCPKeepAliveValidationMethods.adjust_connection_socket(conn)", "def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2", "def tcp_timeout(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n seconds = 86400\n\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"nat_tcp_timeout\": seconds})", "def client_heartbeat(self, ip):\n for cli in self.clients:\n if cli.ip == ip:\n cli.set_heartbeat()\n for cli in self.cooplist:\n if cli.ip == ip:\n cli.set_heartbeat()", "def slot_keepalive_timer(self, _sender, _data):\r\n if self.connected:\r\n #self.debug(\"### sending keepalive\")\r\n self._try_send_raw(\"2::\")" ]
[ "0.75030094", "0.6916339", "0.66866887", "0.55712473", "0.5282807", "0.5062797", "0.5035763", "0.50337327", "0.5011154", "0.5004305", "0.49721786", "0.49601498", "0.49445322", "0.49265453", "0.4898219", "0.48714292", "0.48604316", "0.48500705", "0.48452032", "0.48276287", "0.47639203", "0.4757217", "0.46955028", "0.46898732", "0.46840355", "0.46840355", "0.46702337", "0.46501103", "0.45786354", "0.4562134" ]
0.8027103
0
Override to use ACLESAggregator.
def _setup_aggregation(self, aggregator=None, **kwargs): return super(ACLFilterViewMixin, self)._setup_aggregation( aggregator=ACLESAggregator, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, aggregator: af.Aggregator):\r\n self.aggregator = aggregator", "def _aggregation_target(self):\n ...", "def aggregator():\n return Aggregator(\n agg_col=\"col_a\", values_col=\"col_b\", aggregates=[\"min\", \"max\", \"avg\", \"sum\"]\n )", "def ADP (self):", "def __call__(self, seq_path, result_path=None, log_path=None):\r\n raise NotImplementedError(\"Aligner is an abstract class\")", "def __init__(self, AEs):\n \"\"\" the default view of the stacked autoencoders\"\"\"\n sa = AEs\n \"\"\" the encoder view of the stacked autoencoders \"\"\"\n ec = Cat([a.ec for a in sa])\n \"\"\" the decoder view of the stacked autoencoders \"\"\"\n dc = Cat([a.dc for a in reversed(sa)])\n\n self.sa = sa # default view\n self.ec = ec # encoder view\n self.dc = dc # decoder view\n\n nts = []\n nts.extend(ec)\n nts.extend(dc)\n super(SAE, self).__init__(nts)", "def syncADC(self):\n pass", "def create_accumulator(self):\n raise NotImplementedError", "def aggregate(self, arg):\n return self.agg(arg)", "def adc(self, signal):", "def __init__(self):\n super(LinearAggregationLayer, self).__init__()", "def acc_a(self):\n return self._acc_a", "def _add_aggregator_logger(master_logger):\n agg_logger = logging.getLogger('aggregator.aggregators')\n agg_logger.setLevel(master_logger.getEffectiveLevel())\n for handler in master_logger.handlers:\n agg_logger.addHandler(handler)", "def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)", "def alpha(self):\r\n f, s, t, v_atm_n = self.f, self.shift, self.t, self.v_atm_n\r\n beta, rho, volvol = self.beta, self.rho, self.volvol\r\n # Convert ATM normal vol to ATM shifted lognormal\r\n return alpha(f+s, t, v_atm_n, beta, rho, volvol)", "def on_a(self):\r\n self.log()", "def fAT(self):\n pass", "def setup(self, args):\n for key, ags in self._mapp.items():\n arg = args.get(key)\n\n if arg: #if exist, turn aggregator actived and create a new instance a new aggregator class\n self.active = True\n return ags(arg)", "def __aiter__(self):\n return self", "async def __aenter__(self) -> 'Batch':\n return self", "def add_aggregation_data(self, payload):\n raise NotImplementedError()", "def aic(self, X):\n raise NotImplementedError", "def group_by(self, *args) -> B[B, E]:", "def _aggregate(self, method_name, *args, as_index=None, **kwargs):\n res = self._groupby_obj._wrap_aggregation(\n qc_method=type(self._query_compiler).groupby_rolling,\n numeric_only=False,\n agg_args=args,\n agg_kwargs=kwargs,\n agg_func=method_name,\n rolling_kwargs=self.rolling_kwargs,\n )\n\n if as_index is None:\n as_index = self._as_index\n\n if not as_index:\n res = res.reset_index(\n level=[i for i in range(len(self._groupby_obj._internal_by))],\n drop=False,\n )\n\n return res", "def __gia(self, *args, **kwargs):\n pass", "def __init__(self, *args, **kwargs):\n super(DateWindowEOCMeasure, self).__init__(*args, **kwargs)\n self.fields_to_group_by = ['bene_sk']", "def __init__(__self__,\n resource_name: str,\n args: AggregatorArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def attach_AC(self):\n n = self.pC - 1\n self.A[n] = self._mps_AC(self.A[n], self.C)", "def a(self):\n pass", "def a(self):\n pass" ]
[ "0.58657575", "0.53307277", "0.5028247", "0.50152344", "0.49879622", "0.4987954", "0.49778157", "0.49720332", "0.49186838", "0.49104032", "0.49044472", "0.49029803", "0.48971638", "0.48956355", "0.4886094", "0.4840375", "0.48350465", "0.4830124", "0.48059037", "0.47872633", "0.47509605", "0.47351375", "0.46795392", "0.46540543", "0.46539846", "0.46521977", "0.46504325", "0.46267852", "0.46251544", "0.46251544" ]
0.60906595
0
Preprocess a dataset before training NER. Assuming That a clean dataset of Entities should not contain verbs, adverbs, adjectives and random symbols
def dataset_NER_prepocess(dataset): preprocessed = [] try: preprocessed = stop_word_remove(dataset) if not preprocessed: preprocessed = dataset preprocessed = adverb_remove(dataset) if not preprocessed: preprocessed = dataset preprocessed = verb_remove(dataset) if not preprocessed: preprocessed = dataset preprocessed = adjective_remove(dataset) if not preprocessed: preprocessed = dataset preprocessed = special_symbols_remove(dataset) except Exception as e: print(e) return None return preprocessed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()", "def preprocess_data(self):\n\n self._preprocess_train_data()\n self._preprocess_test_data()", "def pre_process_dataset(self):\n sentences = []\n idx = 1\n # Iterates of dataframe to collect sentences and labels\n for index, row in self.df.iterrows():\n # Normalizing and separate words of each sentence\n norm_sentence = self.norm_text(row['comment_text'])\n word_sentences = re.sub(\"[^\\w]\", \" \", norm_sentence).split()\n sentences.append(word_sentences)\n # Creating a word dictionary\n for word in word_sentences:\n if word not in self.word_2_idx:\n self.word_2_idx[word] = idx\n idx += 1\n # Getting all labels and creates a one-hot vector\n row_label = row[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].values\n self.labels.append(row_label)\n\n # Collect word indexes from prepared word dictionary\n for words_sentence in sentences:\n self.input_data.append([self.word_2_idx[w] for w in words_sentence])", "def preprocess_dataset(self, dataset, params=None):\n if params is None:\n assert self.params_loaded, (\n \"You must either provide parameters or load the model params before preprocessing.\")\n params = self.params\n for key in dataset.keys():\n if dataset[key] is None:\n continue\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\"):\n if params.whiten_method == \"FT\": # other methods require patching first\n if hasattr(params, \"whiten_batch_size\"):\n batch_size = params.whiten_batch_size\n else:\n batch_size = None\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data_batch(dataset[key].images, method=params.whiten_method,\n batch_size=batch_size)\n print(\"INFO:preprocessing:FT Whitened \"+key+\" data\")\n if hasattr(params, \"lpf_data\") and params.lpf_data:\n dataset[key].images, dataset[key].data_mean, dataset[key].lpf_filter = \\\n dp.lpf_data(dataset[key].images, cutoff=params.lpf_cutoff)\n print(\"INFO:preprocessing:Low pass filtered \"+key+\" data\")\n if hasattr(params, \"contrast_normalize\") and params.contrast_normalize:\n if hasattr(params, \"gauss_patch_size\"):\n dataset[key].images = dp.contrast_normalize(dataset[key].images,\n params.gauss_patch_size)\n else:\n dataset[key].images = dp.contrast_normalize(dataset[key].images)\n print(\"INFO:preprocessing:Contrast normalized \"+key+\" data\")\n if hasattr(params, \"standardize_data\") and params.standardize_data:\n if params.data_type == \"mnist\":\n eps = 1e-5\n else:\n eps = None\n dataset[key].images, dataset[key].data_mean, dataset[key].data_std = \\\n dp.standardize_data(dataset[key].images, eps)\n self.data_mean = dataset[key].data_mean\n self.data_std = dataset[key].data_std\n print(\"INFO:preprocessing:Standardized \"+key+\" data\")\n if hasattr(params, \"extract_patches\") and params.extract_patches:\n assert all(key in params.__dict__.keys()\n for key in [\"num_patches\", \"patch_edge_size\", \"overlapping_patches\",\n \"randomize_patches\"]), (\"Insufficient params for patches.\")\n out_shape = (int(params.num_patches), int(params.patch_edge_size),\n int(params.patch_edge_size), dataset[key].num_channels)\n dataset[key].num_examples = out_shape[0]\n dataset[key].reset_counters()\n if hasattr(params, \"patch_variance_threshold\"):\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n params.patch_variance_threshold, dataset[key].rand_state)\n else:\n dataset[key].images = dp.extract_patches(dataset[key].images, out_shape,\n params.overlapping_patches, params.randomize_patches,\n var_thresh=0, rand_state=dataset[key].rand_state)\n dataset[key].shape = dataset[key].images.shape\n dataset[key].num_rows = dataset[key].shape[1]\n dataset[key].num_cols = dataset[key].shape[2]\n dataset[key].num_channels = dataset[key].shape[3]\n dataset[key].num_pixels = np.prod(dataset[key].shape[1:])\n print(\"INFO:preprocessing:Extracted patches from \"+key+\" data\")\n if hasattr(params, \"whiten_data\") and params.whiten_data:\n if hasattr(params, \"whiten_method\") and params.whiten_method != \"FT\":\n dataset[key].images, dataset[key].data_mean, dataset[key].w_filter = \\\n dp.whiten_data(dataset[key].images, method=params.whiten_method)\n print(\"INFO:preprocessing:Whitened \"+key+\" data\")\n if hasattr(params, \"norm_data\") and params.norm_data:\n dataset[key].images, dataset[key].data_max = dp.normalize_data_with_max(dataset[key].images)\n self.data_max = dataset[key].data_max\n print(\"INFO:preprocessing:Normalized \"+key+\" data with maximum\")\n if hasattr(params, \"rescale_data\") and params.rescale_data:\n dataset[key].images, dataset[key].data_min, dataset[key].data_max = dp.rescale_data_to_one(dataset[key].images)\n self.data_max = dataset[key].data_max\n self.data_min = dataset[key].data_min\n print(\"INFO:preprocessing:Rescaled each \"+key+\" datapoint to one\")\n if hasattr(params, \"center_data\") and params.center_data:\n dataset[key].images, dataset[key].data_mean = dp.center_data(dataset[key].images,\n use_dataset_mean=True)\n self.data_mean = dataset[key].data_mean\n print(\"INFO:preprocessing:Centered \"+key+\" data\")\n return dataset", "def preprocess_sentences(train, val, test):\r\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\r\n\r\n train_data = convert_examples_to_features(train, tokenizer, pad_token_label_id=pad_id)\r\n val_data = convert_examples_to_features(val, tokenizer, pad_token_label_id=pad_id)\r\n test_data = convert_examples_to_features(test, tokenizer, pad_token_label_id=pad_id)\r\n\r\n train_dataloader = create_dataloader(train_data)\r\n val_dataloader = create_dataloader(val_data)\r\n test_dataloader = create_dataloader(test_data)\r\n\r\n return train_dataloader, val_dataloader, test_dataloader", "def preprocess(data):\n raise NotImplementedError", "def _preprocess_training_model(self, data):\n def _pre_process(raw_data):\n \"\"\" Pre-process raw data. \"\"\"\n pattern = re.compile(\n r\"((?<=')\\w\\d.*?(?=')|(?<=\\\")\\w\\d.*?(?=\\\")|[\\w\\d]+)\")\n words = re.findall(pattern, raw_data)\n return ' '.join(list(map(string_utils.snake_case_to_camel, words)))\n\n data_list = []\n # Preprocess the dataset with naming convention, etc.\n with Progress() as progress:\n preprocess_task = progress.add_task('Pre-processing dataset...',\n total=data.shape[0])\n for idx, row in data.iterrows():\n row_data = {}\n for column in ['text', 'key', 'value']:\n row_data[column] = _pre_process(row[column])\n data_list.append(row_data)\n progress.update(preprocess_task, advance=1)\n return pd.DataFrame(data=data_list)", "def pre_train(self, dataset, **kwargs):\n\n pass", "def pre_process_data(self, all_labels, all_data):\n\n # [1] Normalizes data\n all_data = self.pre_precess_manager.normalization(all_data)\n\n data_train, data_test, label_train, label_test = train_test_split(all_data, all_labels, test_size=0.1,\n shuffle=True)\n\n return data_train, data_test, label_train, label_test", "def preprocessing(self, dataset: tf.data.Dataset) -> tf.data.Dataset:\n # Teacher forcing.\n def preprocess(input_sentence, output_sentence):\n return ((input_sentence, output_sentence[:-1]), output_sentence[1:])\n\n return dataset.map(preprocess)", "def preprocess_train_dataset(dataset):\n return (dataset\n # Shuffle according to the largest client dataset\n .shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)\n # Repeat to do multiple local epochs\n .repeat(CLIENT_EPOCHS_PER_ROUND)\n # Batch to a fixed client batch size\n .batch(CLIENT_BATCH_SIZE, drop_remainder=False)\n # Preprocessing step\n .map(reshape_emnist_element))", "def prepare_data():\n user_name = os.environ.get('USER')\n traintest_corpus = ResumeCorpus('/Users/' + user_name + '/Documents/Data')\n random.shuffle(traintest_corpus.resumes)\n\n for resume in traintest_corpus.resumes:\n try:\n review_text = pre_processing(resume[0])\n review_text = \" \".join(review_text)\n data_dict['data'].append(review_text)\n data_dict['label'].append(resume[1])\n except:\n pass", "def preprocess(dataset):\n preped_dataset = {}\n preped_dataset['c'] = preprocess_set(dataset['c'])\n preped_dataset['r'] = preprocess_set(dataset['r'])\n preped_dataset['y'] = dataset['y']\n return preped_dataset", "def normalize_dataset(self):", "def preprocess(self, impute=True, all_null='raise', normalize=True,\n use_ents=True, ohc_ents=True, use_cats=True, ohc_cats=True,\n remove_cold_start=True):\n # might be iterable, in which case any elements would make it true\n if remove_cold_start == True:\n self.remove_cold_start()\n elif remove_cold_start:\n self.remove_cold_start(remove_cold_start)\n\n train_eids = pd.DataFrame()\n test_eids = pd.DataFrame()\n for entity in self.fguide.entities:\n self.map_column_to_index(entity)\n train_eids[entity] = self.train[entity].values\n test_eids[entity] = self.test[entity].values\n\n # Z-score scaling of real-valued features.\n self.impute_reals(all_null=all_null)\n if normalize:\n self.scale_reals()\n # TODO: if already normalized, normalize=False will have no effect.\n\n # One-hot encoding of categorical and possibly entity features.\n to_ohc = OrderedSet()\n if use_ents and ohc_ents:\n to_ohc |= self.fguide.entities\n to_ohc |= self.fguide.categoricals\n # TODO: implement use_cats and ohc_cats args.\n # if use_cats and ohc_cats:\n # to_ohc |= self.fguide.categoricals\n\n to_ohc = list(to_ohc)\n if to_ohc:\n train_enc_cats, test_enc_cats, fmap, encoder = self.one_hot_encode(to_ohc)\n n_ohc_total = sum(encoder.n_values_)\n nf_ohc = len(fmap)\n else:\n # TODO: implement use_cats and ohc_cats args.\n n_ohc_total = 0\n nf_ohc = 0\n fmap = []\n\n # Update feature matrices to include entities if they were not included\n # in the one-hot encoding.\n # Also update the feature map while we're at it.\n n_ents = len(self.fguide.entities)\n if use_ents:\n if ohc_ents: # already included in feature map\n last_ent = self.fguide.entities[-1]\n pairs = zip(fmap, range(nf_ohc))\n for name, i in pairs[::-1]:\n if last_ent in name:\n break\n\n nf_ents = i + 1 # if 10 active entities, last is at index 9\n n_ents_total = sum(encoder.n_values_[i] for i in range(n_ents))\n n_cats_total = n_ohc_total - n_ents_total\n else: # need to update feature map\n nf_ents = n_ents\n n_ents_total = n_ents\n n_cats_total = n_ohc_total\n\n # Add to beginning of feature map.\n labels = list(self.fguide.entities)\n fmap = labels + fmap\n\n # Add entities onto the beginning of the feature matrices.\n train_enc_cats = sp.sparse.hstack((\n self.train[labels].values, train_enc_cats))\n test_enc_cats = sp.sparse.hstack((\n self.test[labels].values, test_enc_cats))\n else: # not using ents\n nf_ents = 0\n n_ents_total = 0\n n_cats_total = n_ohc_total\n\n # How many features of each type do we have after one-hot-encoding?\n nf_cats = nf_ohc - nf_ents\n nf_real = len(self.fguide.real_valueds)\n nf = nf_ents + nf_cats + nf_real\n\n # Add in the real-valued features.\n only_reals = not use_ents and not use_cats\n fmap += self.fguide.real_valueds\n\n if self.train_reals.shape[1] == 0:\n if only_reals:\n raise ValueError(\n \"no real values and not using ents or cats\")\n train_X = train_enc_cats.tocsr()\n test_X = test_enc_cats.tocsr()\n else:\n if only_reals:\n train_X = self.train_reals.values\n test_X = self.test_reals.values\n else:\n train_X = sp.sparse.hstack((\n train_enc_cats, self.train_reals.values)).tocsr()\n test_X = sp.sparse.hstack((\n test_enc_cats, self.test_reals.values)).tocsr()\n\n # Log information regarding encoded features.\n logging.info('number of active entity features: %d of %d' % (\n nf_ents, n_ents_total))\n logging.info('number of active categorical features: %d of %d' % (\n nf_cats, n_cats_total))\n logging.info('number of real-valued features: %d' % nf_real)\n logging.info('Total of %d features after encoding' % nf)\n\n train_y = np.squeeze(self.train_target.values)\n test_y = np.squeeze(self.test_target.values)\n\n return (train_X, train_y, train_eids,\n test_X, test_y, test_eids,\n fmap, nf_ents)", "def preprocessing_for_bert(self, data):\n # Create empty lists to store outputs\n input_ids = []\n attention_masks = []\n MAX_LEN = self.opt.MAX_QUESTION_LEN\n # For every sentence...\n for sent in data:\n \n encoded_sent = self.tokenizer.encode_plus(\n text=self.text_preprocessing(sent), # Preprocess sentence\n add_special_tokens=True, # Add `[CLS]` and `[SEP]`\n max_length=MAX_LEN, # Max length to truncate/pad\n pad_to_max_length=True, # Pad sentence to max length\n #return_tensors='pt', # Return PyTorch tensor\n truncation=True,\n return_attention_mask=True # Return attention mask\n )\n \n # Add the outputs to the lists\n input_ids.append(encoded_sent.get('input_ids'))\n attention_masks.append(encoded_sent.get('attention_mask'))\n\n # Convert lists to tensors\n input_ids = torch.tensor(input_ids)\n attention_masks = torch.tensor(attention_masks)\n\n return input_ids, attention_masks", "def preprocess_train_data(self):\r\n print(\"* Preprocessing training data.\", flush=True)\r\n prep.create_HDF_file(self.C.training_set, is_training_set=True)\r\n\r\n self.print_time_elapsed()", "def preprocess(self):\n lines = [line.rstrip() for line in open(self.attr_path, 'r')]\n all_attr_names = lines[1].split()\n for i, attr_name in enumerate(all_attr_names):\n self.attr2idx[attr_name] = i\n self.idx2attr[i] = attr_name\n\n lines = lines[2:]\n random.seed(1234)\n random.shuffle(lines)\n for i, line in enumerate(lines):\n split = line.split()\n filename = split[0]\n values = split[1:]\n\n label = []\n for attr_name in self.selected_attrs:\n idx = self.attr2idx[attr_name]\n label.append(values[idx] == '1')\n\n if (i+1) < 4:\n self.test_dataset.append([filename, label])\n else:\n self.train_dataset.append([filename, label])", "def preprocess(self):", "def set_up_data():\r\n \r\n X, Y = pretreatment.import_dataset()\r\n \r\n print('Applying cleansing...')\r\n X = pretreatment.pretreatment(X)\r\n Y = pretreatment.pretreatment(Y)\r\n \r\n indice = [i for i in range(len(X)) if (len(X[i]) > SENTENCE_LENGTH-2 and len(X[i]) < SENTENCE_LENGTH+1 and len(Y[i]) > SENTENCE_LENGTH-2 and len(Y[i]) < SENTENCE_LENGTH+1)]#(len(X[i]) > SENTENCE_LENGTH and len(X[i]) < 2 * SENTENCE_LENGTH and len(Y[i]) > SENTENCE_LENGTH and len(Y[i]) < 2 * SENTENCE_LENGTH)]\r\n X = [X[i] for i in indice]\r\n Y = [Y[i] for i in indice]\r\n \r\n X = pretreatment.standardize_sentence_length(X)\r\n Y = pretreatment.standardize_sentence_length(Y)\r\n \r\n print('Computing the corpus sizes...')\r\n compute_T(X, 'english')\r\n compute_T(Y, 'french')\r\n compute_S(X, 'english')\r\n compute_S(Y, 'french')\r\n compute_N(X, 'french')\r\n compute_N(Y, 'english')\r\n \r\n print('English corpus: %d tokens' % T_ENGLISH)\r\n print('French corpus: %d tokens' % T_FRENCH)\r\n print('English sentence length: %d' % S_ENGLISH)\r\n print('French sentence length: %d' % S_FRENCH)\r\n print('Number of sentences (both english and french): %d / %d' % (N_ENGLISH, N_FRENCH))\r\n \r\n print('Converting in one hot vectors')\r\n global CORPUS_ENGLISH, CORPUS_FRENCH\r\n params_ENGLISH = (N_ENGLISH, S_ENGLISH, T_ENGLISH)\r\n params_FRENCH = (N_FRENCH, S_FRENCH, T_FRENCH)\r\n X, CORPUS_ENGLISH= treatment.convert_to_one_hot(X, params_ENGLISH)\r\n Y, CORPUS_FRENCH= treatment.convert_to_one_hot(Y, params_FRENCH)\r\n \r\n return (X, Y)", "def preprocess(self, data, label):\n\t\traise NotImplementedError", "def preprocess_dataset(dataset, tokenizer):\n eos = torch.tensor([tokenizer.eos_token_id], dtype=torch.long)\n q_start = torch.tensor(tokenizer.encode('question:'), dtype=torch.long)\n q_end = torch.tensor(tokenizer.encode(':question'), dtype=torch.long)\n\n tensors = [[] for i in range(3)]\n for i in trange(len(dataset)):\n example = dataset[i]\n\n context_start_idx = (example[2] == 1).nonzero()[0].item()\n if example[1][-1] == 1:\n context_end_idx = len(example[1]) - 1\n else:\n context_end_idx = (example[1] == 0).nonzero()[0].item()\n ans_start = example[3] - context_start_idx\n ans_end = example[4] - context_start_idx\n\n context = example[0][context_start_idx: context_end_idx]\n question = example[0][: context_start_idx]\n answer = example[0][example[3]: example[4] + 1]\n\n input_ids = torch.cat([\n context,\n eos,\n answer,\n eos,\n q_start,\n question,\n q_end,\n eos\n ])\n\n attention_mask = torch.ones_like(input_ids, dtype=torch.long)\n token_type_ids = torch.cat([\n torch.zeros(len(context) + 1, dtype=torch.long),\n torch.ones(len(answer) + 1, dtype=torch.long),\n 2 * torch.ones(len(question) + 3, dtype=torch.long)\n ])\n token_type_ids[ans_start: ans_end + 1] = 1\n\n tensors[0].append(input_ids)\n tensors[1].append(attention_mask)\n tensors[2].append(token_type_ids)\n\n tensors_padded = []\n for i, sequences in enumerate(tqdm(tensors)):\n padded = pad_sequence(sequences, batch_first=True)\n tensors_padded.append(padded)\n\n new_dataset = TensorDataset(*tensors_padded)\n return new_dataset", "def preprocess_csv():\n filename = DATA_DIR + 'text_classification/codi/intents.csv'\n df = pd.read_csv(filename, header=None)\n df = df.dropna()\n classes = df[1].unique()\n class_list = classes.tolist()\n df[0] = df[0].apply(clean_text)\n df[1] = df[1].apply(lambda x: class_list.index(x))\n counts = df[1].value_counts()\n\n # omit classes with too few examples\n omit = counts[counts < 2].index.values\n omitted = df[df[1].isin(omit)]\n included = df[~df[1].isin(omit)]\n y = included.pop(1)\n\n x_train, x_test, y_train, y_test = train_test_split(included, y, test_size=0.1, stratify=y, random_state=42)\n x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.1, stratify=y_train,\n random_state=42)\n train_df: pd.DataFrame = pd.concat([x_train, y_train], axis=1)\n val_df: pd.DataFrame = pd.concat([y_val, x_val], axis=1)\n test_df: pd.DataFrame = pd.concat([y_test, x_test], axis=1)\n\n # add omitted examples back to training sets\n train_df: pd.DataFrame = pd.concat([train_df, omitted], axis=0)\n train_df = train_df.reindex(columns=[1, 0])\n x_train: pd.DataFrame = pd.concat([x_train, omitted[0]], axis=0)\n y_train: pd.DataFrame = pd.concat([y_train, omitted[1]], axis=0)\n\n # save to file\n train_df.to_csv('train.csv', header=False, index=False)\n val_df.to_csv('val.csv', header=False, index=False)\n test_df.to_csv('test.csv', header=False, index=False)\n np.savetxt('classes.txt', classes, fmt='%s')\n\n return (train_df, val_df, test_df,\n x_train.values, y_train.values, x_val.values, y_val.values, x_test.values, y_test.values, classes)", "def preprocess(sent):\n return sent", "def preprocess(args: argparse.Namespace) -> None:\n data_dir = os.path.join(args.data_dir, args.corpus)\n train_file = os.path.join(data_dir, 'train.jsonl')\n train_instances = load_jsonl(train_file, max_instances=args.max_instances)\n precompute_ngrams(train_instances)\n text1_gram1 = compute_most_freq_ngrams(train_instances, max_number=args.max_1gram,\n length=1, target=True)\n text1_gram2 = compute_most_freq_ngrams(train_instances, max_number=args.max_2gram,\n length=2, target=True)\n text1_gram3 = compute_most_freq_ngrams(train_instances, max_number=args.max_3gram,\n length=3, target=True)\n text2_gram1 = compute_most_freq_ngrams(train_instances, max_number=args.max_1gram,\n length=1, target=False)\n text2_gram2 = compute_most_freq_ngrams(train_instances, max_number=args.max_2gram,\n length=2, target=False)\n text2_gram3 = compute_most_freq_ngrams(train_instances, max_number=args.max_3gram,\n length=3, target=False)\n all_ngrams = list(set(text1_gram1 + text1_gram2 + text1_gram3 + text2_gram1 + text2_gram2 +\n text2_gram3))\n gram_to_dim_mapping = {ng: i for i, ng in enumerate(all_ngrams)}\n label_to_dim_mapping = map_labels_to_dims(train_instances)\n save_to_pickle(data=train_instances, fpath_out=os.path.join(\n args.serialization_dir, 'train_instances.pickle'))\n save_dict(data=gram_to_dim_mapping, fpath_out=os.path.join(args.serialization_dir,\n 'gram_mapping.json'))\n save_dict(data=label_to_dim_mapping, fpath_out=os.path.join(args.serialization_dir,\n 'label_mapping.json'))\n # save_dict(data=gram1, fpath_out=os.path.join(args.serialization_dir, '1grams.json'))\n # save_dict(data=gram2, fpath_out=os.path.join(args.serialization_dir, '2grams.json'))\n # save_dict(data=gram3, fpath_out=os.path.join(args.serialization_dir, '3grams.json'))", "def fit(self, dataset, labels):\n self.dataset = dataset\n self.labels = labels\n self.normalization_n = []\n self.normalization_d = []\n self.first_title = list(self.dataset.keys())[0]\n for ind in range(len(self.dataset[self.first_title])):\n self.normalize_features(self.dataset, ind)", "def pre_train_person(self, dataset, **kwargs):\n \n pass", "def data_preprocessing(dat: pd.DataFrame, art='C', y=None, logger=None, remove=True):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n logger.info('Start data preprocessing')\n # replace original indeices with default ones\n dat = dat.reset_index(drop=True)\n\n if art == 'C':\n logger.info('Start to label target feature y for classification task')\n dat.iloc[:, -1] = LabelEncoder().fit_transform(dat.iloc[:, -1])\n logger.info('End with label encoding the target feature')\n if remove:\n # remove columns with more than 1/2 na\n dat = dat.loc[:, dat.isna().sum()/len(dat) < .5]\n logger.info('Following features are removed from the dataframe because half of their value are NA: %s' %\n (dat.columns[dat.isna().sum()/len(dat) > .5].to_list()))\n # Encoding\n oe = OneHotEncoder(drop='first')\n # get categorical columns\n if y:\n dat_y = dat[[y]]\n cols = dat.columns.to_list()\n cols.remove(y)\n dat_x = dat[cols]\n else:\n dat_y = dat[[dat.columns[-1]]]\n dat_x = dat[dat.columns[:-1]]\n dat_categ = dat_x.select_dtypes(include=['object'])\n # get kterm of categ features\n for i in dat_categ.columns:\n # save output to dat\n tmp = dat_x[i].value_counts()\n dat_x[i + '_kterm'] = dat_x[i].map(lambda x: tmp[x] if x in tmp.index else 0)\n # float columns including the k term cols\n dat_numeric = dat_x.select_dtypes(include=['float32', 'float64', 'int32', 'int64'])\n # onehot encoding and label encoding\n dat_categ_onehot = dat_categ.iloc[:, dat_categ.apply(lambda x: len(x.unique())).values < 8]\n dat_categ_label = dat_categ.iloc[:, dat_categ.apply(lambda x: len(x.unique())).values >= 8]\n flag_onehot = False\n flag_label = False\n # oe\n if dat_categ_onehot.shape[1] > 0:\n logger.info('Start to do onehot to the following categoric features: %s' %\n (str(dat_categ_onehot.columns.to_list())))\n dat_onehot = pd.DataFrame(oe.fit_transform(dat_categ_onehot.astype(str)).toarray(),\n columns=oe.get_feature_names(dat_categ_onehot.columns))\n logger.info('End with onehot')\n flag_onehot = True\n else:\n dat_onehot = None\n # le\n if dat_categ_label.shape[1] > 0:\n logger.info('Start to do label encoding to the following categoric features: %s' %\n (str(dat_categ_label.columns.to_list())))\n dat_categ_label = dat_categ_label.fillna('NULL')\n dat_label = pd.DataFrame(columns=dat_categ_label.columns)\n for i in dat_categ_label.columns:\n dat_label[i] = LabelEncoder().fit_transform(dat_categ_label[i].astype(str))\n flag_label = True\n logger.info('End with label encoding')\n else:\n dat_label = None\n # scaling\n # combine\n dat_new = pd.DataFrame()\n if flag_onehot and flag_label:\n dat_new = pd.concat([dat_numeric, dat_onehot, dat_label], axis=1)\n elif flag_onehot:\n dat_new = pd.concat([dat_numeric, dat_onehot], axis=1)\n elif flag_label:\n dat_new = pd.concat([dat_numeric, dat_label], axis=1)\n else:\n dat_new = dat_numeric\n dat_new = pd.concat([dat_new, dat_y], axis=1)\n # imputation\n dat_new = dat_new.dropna(axis=1, how='all')\n if dat_new.isna().sum().sum() > 0:\n logger.info('Nan value exist, start to fill na with iterative imputer: ' +\n str(dat_new.isna().sum().sum()))\n # include na value, impute with iterative Imputer or simple imputer\n columns = dat_new.columns\n imp = IterativeImputer(max_iter=10, random_state=0)\n # imp = SimpleImputer(missing_values=np.nan, strategy='mean')\n dat_new = imp.fit_transform(dat_new)\n dat_new = pd.DataFrame(dat_new, columns=columns)\n dat_numeric = dat_new.iloc[:, :-1].select_dtypes(include=['float32', 'float64', 'int32', 'int64'])\n logger.info('End with fill nan')\n return dat_new, dat_numeric.columns", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def preprocess(self):\n pass" ]
[ "0.7355754", "0.6839999", "0.6822704", "0.6733091", "0.6711649", "0.6583312", "0.6569212", "0.6514732", "0.6510468", "0.64858013", "0.64236563", "0.6358511", "0.63251275", "0.63248324", "0.6324185", "0.63179976", "0.631258", "0.62988704", "0.62934196", "0.62665325", "0.6261226", "0.62581235", "0.62575257", "0.6219485", "0.6211554", "0.6200378", "0.6178308", "0.61683536", "0.6157403", "0.61535126" ]
0.6929304
1
get needed data about the group of contacts specified with groupName
def getGroupData(service, groupName, attList): # import IPython ; IPython.embed() ; exit(); groupsDataList = service.contactGroups().list().execute()["contactGroups"] for group in groupsDataList: if group["name"] == groupName: groupData = [] for att in attList: groupData.append(group[att]) return groupData
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getContactsData(service, groupResourceName, maxMembers):\n # get the ids of the contacts inside the specified group\n contactsIDs = service.contactGroups().get(\n resourceName=groupResourceName, \n maxMembers=maxMembers).execute()[\"memberResourceNames\"]\n\n # get data of the contacts that correspond to the ids obtained\n contactsData = service.people().getBatchGet(\n resourceNames=contactsIDs,\n personFields='names,emailAddresses').execute()[\"responses\"]\n\n # extract the names and the emailAddresses of the contacts\n namessList = [] \n mailsList = []\n for contact in contactsData:\n try:\n namessList.append(contact[\"person\"][\"names\"][0][\"displayName\"])\n except:\n raise Exception(\"All contacts must have a name associated\")\n mailsList.append(contact[\"person\"][\"emailAddresses\"][0][\"value\"])\n return namessList, mailsList", "async def get_contacts_for_contact_group(dbcon: DBConnection, contact_group_id: int) -> Iterable[object_models.Contact]:\n q = \"\"\"select\n contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active\n from contact_group_contacts, contacts\n where contact_group_contacts.contact_group_id = %s\n and contact_group_contacts.contact_id = contacts.id\"\"\"\n return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (contact_group_id,))]", "def contact(self, request, **kwargs):\n group_obj = self.get_object()\n contact_data = group_obj.contacts.all()\n if contact_data is not None:\n serializer_data = ContactSerializer(contact_data, many=True)\n return Response(serializer_data.data)\n else:\n return Response({'message': 'No details found for contact of this group'}, status=status.HTTP_404_NOT_FOUND)", "def getPeopleInAddressBook(group_name=None):\n ab = ABAddressBook.sharedAddressBook()\n people = None\n if not group_name:\n people = ab.people()\n else:\n for group in ab.groups():\n if group.name() == group_name:\n people = group.members()\n if people == None:\n print \"No contacts could be found for given group\"\n return _clist(people)", "def get_contactgroup(self, object_name, user_key = None):\n\t\treturn self.get_object('contactgroup',object_name, user_key = user_key)", "async def get_contact_group(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.\n q = \"\"\"select id, name, active from contact_groups where id=%s\"\"\"\n row = await dbcon.fetch_row(q, (id,))\n contact = None\n if row:\n contact = object_models.ContactGroup(*row)\n return contact", "def get_groups_details(self, groups):\n assert isinstance(groups, list)\n # It may be require we request the API by splitting the names list\n # If the list is too long to be handled by the Gerrit server (URI)\n query_args = \"?%s\" % \"&\".join([\"q=%s\" % g for g in groups])\n query_args += \"&o=MEMBERS\" if groups else \"o=MEMBERS\"\n\n try:\n ret = self.g.get('groups/%s' % query_args)\n except HTTPError as e:\n return self._manage_errors(e)\n\n return ret", "def get_group_by_name_get(self, groupName, groupType):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/GroupV2/Name/{groupName}/{groupType}/\"))", "def getGroupInfo(groupId):\n url = f\"https://groups.roblox.com/v1/groups/{groupId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j", "def get_queryset(self):\n contact_data = Contact.objects.filter(contact_groups__in=Member.objects.filter(\n user=self.request.user).values('group_id').distinct())\n\n return contact_data", "def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def customer_group_get(group_id=None):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n\n if group_id:\n query += \"\"\"\n AND `group_id` = \\\"%s\\\"\n \"\"\" % (group_id)\n\n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def get_group_list(self):\n return [(item[0], item[1][0]) for item in self.contacts_by_group_list]", "def get(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('GET', url)", "def groupfinder(name, request):\n #FIXME: Implement\n return ()\n return request.context.get_groups(name)", "def get_group_info(groupname):\n return jsonify(admin.get_group_info(current_app.scoped_session(), groupname))", "def GetGroupMembers(self, group):\n return []", "def test_api_v1_groups_names_get(self):\n pass", "def customer_group_get_related(group_id):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `groups`.`company_name` = (\n SELECT `asshole`.`company_name` \n FROM \n (\n SELECT * \n FROM `groups` \n WHERE `group_id` = \"%s\"\n ) AS `asshole`\n )\n \"\"\" %(group_id)\n \n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def get(self):\n status = ErrorCode.SUCCESS\n try:\n res = []\n cid = self.get_argument('cid', None)\n if not (cid is None):\n res = QueryHelper.get_groups_by_cid(cid, self.db)\n self.write_ret(status,\n dict_=DotDict(res=res))\n except Exception as e:\n logging.exception(\"[UWEB] Get groups failed. Exception: %s\",\n e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def createGroupWithContacts(self, name, contacts=[]):\n try:\n contact_ids = []\n for contact in contacts:\n contact_ids.append(contact.id)\n\n group = LineGroup(self, self._createGroup(name, contact_ids))\n self.groups.append(group)\n\n return group\n except Exception as e:\n self.raise_error(e)\n\n return None", "def test_function(self):\n self.ms_client.http_request(method='GET', url_suffix='groups', params={'$orderby': 'displayName'})\n demisto.results('ok')", "def get_group(self, group_name):\n\n return self._group[group_name]", "def test_04_get_group_of_person(self):\n p1 = Person.query.first().mongo_id\n rv = self.app.get('person/' + str(p1) + '/groups')\n data = json.loads(rv.data)\n self.assertEqual(data[\"status\"], \"success\")", "def set_selected_group(self, group_id):\n self.contact_list = self.contacts_by_group_list[group_id - 1][1][1]\n\n\t# Return the contact list so far", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def test_get_group(self):\n pass", "def add_to_group(self, org, contact, group):\n pass", "def list_group(group):\n\n members = group_members(group)\n ret = {}\n if members:\n for member in members:\n info = get(member)\n if info:\n ret[uid2dn(member)] = info\n return ret" ]
[ "0.6783003", "0.66426826", "0.66284263", "0.6583931", "0.6508952", "0.64133483", "0.6247849", "0.6223199", "0.61684126", "0.6117678", "0.6113146", "0.6007093", "0.6001018", "0.59909797", "0.59482914", "0.58569443", "0.58345133", "0.5827508", "0.5820286", "0.5813939", "0.58055633", "0.57988966", "0.5778508", "0.57765627", "0.57331586", "0.5729625", "0.5715613", "0.57095194", "0.5703677", "0.5676752" ]
0.745583
0
get names and mails of the contacts inside the specified group
def getContactsData(service, groupResourceName, maxMembers): # get the ids of the contacts inside the specified group contactsIDs = service.contactGroups().get( resourceName=groupResourceName, maxMembers=maxMembers).execute()["memberResourceNames"] # get data of the contacts that correspond to the ids obtained contactsData = service.people().getBatchGet( resourceNames=contactsIDs, personFields='names,emailAddresses').execute()["responses"] # extract the names and the emailAddresses of the contacts namessList = [] mailsList = [] for contact in contactsData: try: namessList.append(contact["person"]["names"][0]["displayName"]) except: raise Exception("All contacts must have a name associated") mailsList.append(contact["person"]["emailAddresses"][0]["value"]) return namessList, mailsList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPeopleInAddressBook(group_name=None):\n ab = ABAddressBook.sharedAddressBook()\n people = None\n if not group_name:\n people = ab.people()\n else:\n for group in ab.groups():\n if group.name() == group_name:\n people = group.members()\n if people == None:\n print \"No contacts could be found for given group\"\n return _clist(people)", "async def get_contacts_for_contact_group(dbcon: DBConnection, contact_group_id: int) -> Iterable[object_models.Contact]:\n q = \"\"\"select\n contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active\n from contact_group_contacts, contacts\n where contact_group_contacts.contact_group_id = %s\n and contact_group_contacts.contact_id = contacts.id\"\"\"\n return [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (contact_group_id,))]", "def getGroupData(service, groupName, attList):\n # import IPython ; IPython.embed() ; exit(); \n groupsDataList = service.contactGroups().list().execute()[\"contactGroups\"]\n for group in groupsDataList:\n if group[\"name\"] == groupName:\n groupData = []\n for att in attList:\n groupData.append(group[att])\n return groupData", "def get_group_list(self):\n return [(item[0], item[1][0]) for item in self.contacts_by_group_list]", "def get_contactgroup(self, object_name, user_key = None):\n\t\treturn self.get_object('contactgroup',object_name, user_key = user_key)", "def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise", "def contact(self, request, **kwargs):\n group_obj = self.get_object()\n contact_data = group_obj.contacts.all()\n if contact_data is not None:\n serializer_data = ContactSerializer(contact_data, many=True)\n return Response(serializer_data.data)\n else:\n return Response({'message': 'No details found for contact of this group'}, status=status.HTTP_404_NOT_FOUND)", "def GetGroupMembers(self, group):\n return []", "def get_groups_details(self, groups):\n assert isinstance(groups, list)\n # It may be require we request the API by splitting the names list\n # If the list is too long to be handled by the Gerrit server (URI)\n query_args = \"?%s\" % \"&\".join([\"q=%s\" % g for g in groups])\n query_args += \"&o=MEMBERS\" if groups else \"o=MEMBERS\"\n\n try:\n ret = self.g.get('groups/%s' % query_args)\n except HTTPError as e:\n return self._manage_errors(e)\n\n return ret", "def list_group(group):\n\n members = group_members(group)\n ret = {}\n if members:\n for member in members:\n info = get(member)\n if info:\n ret[uid2dn(member)] = info\n return ret", "def search_contact_list(self):\n\n search_db = Database()\n result = search_db.contact_search(self.name)\n if not result:\n print Fore.YELLOW + ' No such contact'\n return None\n if result > 1:\n print ' Which contact ??'\n for items in result:\n if items[2] > 1:\n print Fore.BLUE + ' %s %s %s' % ([items[0]], items[1], items[2])\n else:\n print str(items[1]), items[2]\n\n return result", "def get_group_of_emails(M):\n print \"Try to access group of emails\"\n data = search_email_advanced(M)\n if data is None:\n return\n # print \"Got data as \", data\n ids = data[0]\n id_list = ids.split()\n for id_num in id_list:\n rv, data = M.uid('fetch', id_num, \"(RFC822)\")\n if rv != \"OK\":\n print \"Error getting message\"\n return\n # get raw text of the whole email\n raw_email = data[0][1]\n content = email.message_from_string(raw_email)\n # print raw_email\n p = EmailParser()\n # print sender and receivers\n print \"To: \", content['To'], \"\\n\"\n print \"From: \", email.utils.parseaddr(content['From']), \"\\n\"\n print \"Date: \", content['Date'], \"\\n\"\n print \"Subject: \", p.parsestr(raw_email).get('Subject'), \\\n \"\\n\"\n result = parse_content(content)\n # print results\n printData(result)", "def get_queryset(self):\n contact_data = Contact.objects.filter(contact_groups__in=Member.objects.filter(\n user=self.request.user).values('group_id').distinct())\n\n return contact_data", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "async def get_contact_group(dbcon: DBConnection, id: int) -> Any: # Use any because optional returns suck.\n q = \"\"\"select id, name, active from contact_groups where id=%s\"\"\"\n row = await dbcon.fetch_row(q, (id,))\n contact = None\n if row:\n contact = object_models.ContactGroup(*row)\n return contact", "def inviteIntoGroup(self, group, contacts=[]):\n contact_ids = [contact.id for contact in contacts]\n self._inviteIntoGroup(group.id, contact_ids)", "def pull_one_contact(self, name):\n contact = []\n for x in self.contacts:\n if x[0] == name:\n contact_name = x[0]\n number = x[1]\n email = x[2]\n zipcode = x[3]\n contact = [contact_name, number, email, zipcode]\n print(contact)\n return contact, self.contacts.index(x)", "def group(self, group_cn):\n group = self.search(base=GROUPS, cn=group_cn)\n\n if len(group) == 0:\n return []\n else:\n group_members = group[0]['attributes']['member']\n\n members = []\n for member in group_members:\n members.append(self.search(dn=member))\n\n if self.objects:\n return self.member_objects(members)\n\n return members", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def get_contacts_list(self):\n contacts = self.driver.find_elements_by_class_name(\"_1wjpf\")\n s= [contact.text for contact in contacts] #extracts chats and last messsages\n print (\"get contacts: \"+str(s)) #print only chat names\n return s[::2] #returns only chat names", "def _find_groups_for_user(email):\n return [g['name'] for g in groups.find({\"users\":email})]", "def getGroup(group: int, name=\"\") -> list:\n groups = mongo.db.groups.find({'id':group},{'_id':0})\n userID_list = []\n user_list = []\n for entry in groups:\n if entry[\"id\"] == group:\n userID_list = userID_list + entry[\"members\"]\n if len(userID_list) != 0:\n for entry in userID_list:\n x = fetchUser(userId=entry)\n user_list = user_list + x\n return user_list", "def set_selected_group(self, group_id):\n self.contact_list = self.contacts_by_group_list[group_id - 1][1][1]\n\n\t# Return the contact list so far", "def customer_group_get_related(group_id):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `groups`.`company_name` = (\n SELECT `asshole`.`company_name` \n FROM \n (\n SELECT * \n FROM `groups` \n WHERE `group_id` = \"%s\"\n ) AS `asshole`\n )\n \"\"\" %(group_id)\n \n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def list_contacts(self):\n return self.contacts", "def customer_group_get(group_id=None):\n db_conn = DB_Conn()\n db = db_conn.db_connect()\n\n query = \"\"\"\n SELECT \n `group_id`,\n `group_name`,\n `description`,\n `timestamp`,\n `created_by`,\n `creation_time`,\n `is_deleted`,\n `updated_by`,\n `role_id`,\n `is_default`,\n `is_customer`,\n `company_name`,\n `company_address`,\n `company_telephone`,\n `company_fax`,\n `company_website`,\n `company_sales_contact`,\n `company_purchase_contact`,\n `company_business`,\n `company_business_type`,\n `company_sales_email`,\n `company_purchase_email`,\n `company_reg_number`,\n `company_vat_number` \n FROM `groups` \n WHERE `is_customer` = 1\n \"\"\"\n\n if group_id:\n query += \"\"\"\n AND `group_id` = \\\"%s\\\"\n \"\"\" % (group_id)\n\n group_details = None\n cursor = db.cursor()\n\n if cursor.execute(query) != 0:\n group_details = cursor.fetchall()\n\n cursor.close()\n db.close()\n\n return group_details", "def get_membersof(self, kwargs):\n group = kwargs[\"group\"]\n verbose = kwargs.get(\"verbose\", False)\n\n results = list(self.engine.query(self.engine.GROUP_DN_FILTER(group), [\"distinguishedName\", \"objectSid\"]))\n if results:\n group_dn = results[0][\"distinguishedName\"]\n else:\n error(\"Group {group} does not exists\".format(group=group))\n\n primary_group_id = results[0][\"objectSid\"].split('-')[-1]\n results = self.engine.query(self.engine.ACCOUNTS_IN_GROUP_FILTER(primary_group_id, group_dn))\n self.display(results, verbose)", "def get_contacts(self, uuid=None, urn=None, group=None, deleted=None, before=None, after=None):\n params = self._build_params(uuid=uuid, urn=urn, group=group, deleted=deleted, before=before, after=after)\n return self._get_query('contacts', params, Contact)", "def get_contact_email():\n from shotglass2.shotglass import get_site_config\n \n site_config = get_site_config()\n \n to = None\n to_name = None\n to_addr = None\n \n \n rec = Pref(g.db).get(\"Contact Name\",user_name=site_config.get(\"HOST_NAME\"),default=site_config.get(\"CONTACT_NAME\",site_config.get(\"MAIL_DEFAULT_SENDER\",\"Site Contact\")))\n if rec:\n to_name = rec.value\n \n if site_config['TESTING']:\n rec = Pref(g.db).select_one(where=\"name='Contact Email Address' and user_name='test'\")\n else:\n rec = Pref(g.db).get(\"Contact Email Address\",user_name=site_config.get(\"HOST_NAME\"),\n default=site_config.get(\"CONTACT_EMAIL_ADDR\",\n site_config.get(\"MAIL_DEFAULT_ADDR\",\"info@{}\".format(site_config.get(\"HOST_NAME\",\"example.com\")))))\n if rec:\n to_addr = rec.value\n # split the addresses into a list if there are commas\n temp_addr_list = to_addr.split(',')\n if len(temp_addr_list) > 1:\n to = []\n for index, val in enumerate(temp_addr_list):\n if index == 0:\n to.append((to_name,val,))\n else:\n to.append((None,val,)) \n else:\n to = (to_name,to_addr,)\n \n return to" ]
[ "0.72366965", "0.72157335", "0.68207794", "0.66384506", "0.6626178", "0.6562149", "0.65262914", "0.64727664", "0.6297635", "0.628419", "0.6233957", "0.6177852", "0.61701566", "0.6106635", "0.6102098", "0.6078653", "0.60124636", "0.5987554", "0.59866476", "0.5976168", "0.5919961", "0.58431965", "0.5834403", "0.5803522", "0.57902443", "0.5789157", "0.57758087", "0.5769377", "0.5766294", "0.5714408" ]
0.7256055
0
This function is used to create a binary raster mask from polygons in a given geojson file, so as to label the pixels in the image as either background or target.
def training_mask_generation(img_pan_filename, input_geojson_filename, labels): with rasterio.open(img_pan_filename) as f: metadata_pan = f.profile img_pan = f.read(1) mask = np.zeros((img_pan.shape[0], img_pan.shape[1])) xres = metadata_pan['transform'][0] ulx = metadata_pan['transform'][2] yres = metadata_pan['transform'][4] uly = metadata_pan['transform'][5] lrx = ulx + (metadata_pan['width'] * xres) lry = uly - (metadata_pan['height'] * abs(yres)) polygons = json.load(open(input_geojson_filename)) for polygon in range(len(polygons['features'])): layer_num = labels.index(str(polygons['features'][polygon]['properties']['Label'])) coords = np.array(polygons['features'][polygon]['geometry']['coordinates'][0][0]) xf = ((metadata_pan['width']) ** 2 / (metadata_pan['width'] + 1)) / (lrx - ulx) yf = ((metadata_pan['height']) ** 2 / (metadata_pan['height'] + 1)) / (lry - uly) coords[:, 1] = yf * (coords[:, 1] - uly) coords[:, 0] = xf * (coords[:, 0] - ulx) position = np.round(coords).astype(np.int32) cv2.fillConvexPoly(mask, position, layer_num) return np.expand_dims(mask, axis = 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def geojsons_to_masks_and_fill_nodata(rtiler, vtiler, label_tile_dir, fill_value=0):\n rasterized_label_paths = []\n print(\"starting label mask generation\")\n if not os.path.exists(label_tile_dir):\n os.mkdir(label_tile_dir)\n for img_tile, geojson_tile in tqdm(zip(sorted(rtiler.tile_paths), sorted(vtiler.tile_paths))):\n fid = os.path.basename(geojson_tile).split(\".geojson\")[0]\n rasterized_label_path = os.path.join(label_tile_dir, fid + \".tif\")\n rasterized_label_paths.append(rasterized_label_path)\n gdf = gpd.read_file(geojson_tile)\n # gdf.crs = rtiler.raster_bounds_crs # add this because gdfs can't be saved with wkt crs\n arr = instance_mask(gdf, out_file=rasterized_label_path, reference_im=img_tile, \n geom_col='geometry', do_transform=None,\n out_type='int', burn_value=1, burn_field=None) # this saves the file, unless it is empty in which case we deal with it below.\n if not arr.any(): # in case no instances in a tile we save it with \"empty\" at the front of the basename\n with rasterio.open(img_tile) as reference_im:\n meta = reference_im.meta.copy()\n reference_im.close()\n meta.update(count=1)\n meta.update(dtype='uint8')\n if isinstance(meta['nodata'], float):\n meta.update(nodata=0)\n rasterized_label_path = os.path.join(label_tile_dir, \"empty_\" + fid + \".tif\")\n with rasterio.open(rasterized_label_path, 'w', **meta) as dst:\n dst.write(np.expand_dims(arr, axis=0))\n dst.close()\n rtiler.fill_all_nodata(nodata_fill=fill_value)\n return rasterized_label_paths", "def mask_to_poly_geojson(pred_arr, channel_scaling=None, reference_im=None,\n output_path=None, output_type='geojson', min_area=40,\n bg_threshold=0, do_transform=None, simplify=False,\n tolerance=0.5, **kwargs):\n\n mask_arr = preds_to_binary(pred_arr, channel_scaling, bg_threshold)\n\n if do_transform and reference_im is None:\n raise ValueError(\n 'Coordinate transformation requires a reference image.')\n\n if do_transform:\n with rasterio.open(reference_im) as ref:\n transform = ref.transform\n crs = ref.crs\n ref.close()\n else:\n transform = Affine(1, 0, 0, 0, 1, 0) # identity transform\n crs = rasterio.crs.CRS()\n\n mask = mask_arr > bg_threshold\n mask = mask.astype('uint8')\n\n polygon_generator = features.shapes(mask_arr,\n transform=transform,\n mask=mask)\n polygons = []\n values = [] # pixel values for the polygon in mask_arr\n for polygon, value in polygon_generator:\n p = shape(polygon).buffer(0.0)\n if p.area >= min_area:\n polygons.append(shape(polygon).buffer(0.0))\n values.append(value)\n\n polygon_gdf = gpd.GeoDataFrame({'geometry': polygons, 'value': values},\n crs=crs.to_wkt())\n if simplify:\n polygon_gdf['geometry'] = polygon_gdf['geometry'].apply(\n lambda x: x.simplify(tolerance=tolerance)\n )\n # save output files\n if output_path is not None:\n if output_type.lower() == 'geojson':\n if len(polygon_gdf) > 0:\n polygon_gdf.to_file(output_path, driver='GeoJSON')\n else:\n save_empty_geojson(output_path, polygon_gdf.crs.to_epsg())\n elif output_type.lower() == 'csv':\n polygon_gdf.to_csv(output_path, index=False)\n\n return polygon_gdf", "def mask_to_poly_geojson(pred_arr, channel_scaling=None, reference_im=None,\n output_path=None, output_type='geojson', min_area=40,\n bg_threshold=0, do_transform=None, simplify=False,\n tolerance=0.5, **kwargs):\n\n mask_arr = preds_to_binary(pred_arr, channel_scaling, bg_threshold)\n\n if do_transform and reference_im is None:\n raise ValueError(\n 'Coordinate transformation requires a reference image.')\n\n if do_transform:\n with rasterio.open(reference_im) as ref:\n transform = ref.transform\n crs = ref.crs\n ref.close()\n else:\n transform = Affine(1, 0, 0, 0, 1, 0) # identity transform\n crs = rasterio.crs.CRS()\n\n mask = mask_arr > bg_threshold\n mask = mask.astype('uint8')\n\n polygon_generator = features.shapes(mask_arr,\n transform=transform,\n mask=mask)\n polygons = []\n values = [] # pixel values for the polygon in mask_arr\n for polygon, value in polygon_generator:\n p = shape(polygon).buffer(0.0)\n if p.area >= min_area:\n polygons.append(shape(polygon).buffer(0.0))\n values.append(value)\n\n polygon_gdf = gpd.GeoDataFrame({'geometry': polygons, 'value': values},\n crs=crs.to_wkt())\n if simplify:\n polygon_gdf['geometry'] = polygon_gdf['geometry'].apply(\n lambda x: x.simplify(tolerance=tolerance)\n )\n # save output files\n if output_path is not None:\n if output_type.lower() == 'geojson':\n if len(polygon_gdf) > 0:\n polygon_gdf.to_file(output_path, driver='GeoJSON')\n else:\n save_empty_geojson(output_path, polygon_gdf.crs.to_epsg())\n elif output_type.lower() == 'csv':\n polygon_gdf.to_csv(output_path, index=False)\n\n return polygon_gdf", "def binary_mask_to_polygon(binary_mask, tolerance=0):\r\n\r\n polygons = []\r\n if isinstance(binary_mask, torch.Tensor):\r\n binary_mask = binary_mask.cpu().numpy()\r\n # pad mask to close contours of shapes which start and end at an edge\r\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\r\n contours = measure.find_contours(padded_binary_mask, 0.5)\r\n contours = np.subtract(contours, 1)\r\n for contour in contours:\r\n contour = close_contour(contour)\r\n contour = measure.approximate_polygon(contour, tolerance)\r\n if len(contour) < 3:\r\n continue\r\n contour = np.flip(contour, axis=1) # x, y\r\n polygon = np.maximum(contour, 0)\r\n #segmentation = contour.ravel().tolist()\r\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\r\n #segmentation = [0 if i < 0 else i for i in segmentation]\r\n polygons.append(polygon)\r\n\r\n return polygons", "def binary_mask_to_polygon(binary_mask, tolerance=0):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = close_contour(contour)\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) < 3:\n continue\n contour = np.flip(contour, axis=1)\n segmentation = contour\n # after padding and subtracting 1 we may get -0.5 points in our segmentation\n segmentation = [np.clip(i,0.0,i).tolist() for i in segmentation]\n polygons.append(segmentation)\n\n return polygons", "def load_mask(self, image_id):\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"face\":\n return super(self.__class__, self).load_mask(image_id)\n info = self.image_info[image_id]\n mask = np.zeros([info['height'], info['width'], len(info['boundingbox'])], dtype=np.uint8)\n for i, p in enumerate(info['boundingbox'].values()):\n rr, cc = skimage.draw.polygon(p['y'], p['x'])\n mask[rr, cc, i] = 1\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def binary_mask_to_polygon(binary_mask, tolerance=0):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(\n binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = close_contour(contour)\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) < 3:\n continue\n contour = np.flip(contour, axis=1)\n segmentation = contour.ravel().tolist()\n # after padding and subtracting 1 we may\n # get -0.5 points in our segmentation\n segmentation = [0 if i < 0 else i for i in segmentation]\n polygons.append(segmentation)\n return polygons", "def test_make_binary_and_fp(self):\n output_mask = boundary_mask(df=os.path.join(data_dir, 'sample.csv'),\n geom_col=\"PolygonWKT_Pix\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_b_mask_inner.tif'))\n\n assert np.array_equal(output_mask, truth_mask)", "def create_binary_masks(image_path):\n mask = cv2.imread(image_path, cv2.IMREAD_ANYDEPTH)\n size = mask.shape\n for row_pixel in range(0, size[0]):\n for column_pixel in range(0, size[1]):\n if mask[row_pixel, column_pixel] == 0:\n mask[row_pixel, column_pixel] = 65535\n\n else:\n mask[row_pixel, column_pixel] = 0\n\n cv2.imwrite(image_path[:-4]+'_binary.png', mask)", "def load_mask(self, image_id):\n # If not homeobject dataset, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != 'homeobject':\n print(\n \"Warn: \\'{}\\' label not found. Processing with parent load_mask.\".format(image_info[\"source\"]))\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n class_ids = image_info['class_ids']\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])], dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n\n # modify dirt mask if it resides outside of image boundary\n rr[rr > mask.shape[0] - 1] = mask.shape[0] - 1\n cc[cc > mask.shape[1] - 1] = mask.shape[1] - 1\n\n mask[rr, cc, i] = 1\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n class_ids = np.array(class_ids, dtype=np.int32)\n # return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)\n return mask, class_ids", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n\n shapes = info['polygons']\n\n for i, p in enumerate(info['polygons']):\n shape = p['shape_attributes']['name']\n mask[:, :, i:i + 1] = self.draw_shape(mask[:, :, i:i + 1].copy(),\n shape, p, 1)\n\n # Map class names to class IDs.\n if (self.config.MODE == \"Combined\"):\n class_ids = np.array([self.class_names.index(s['region_attributes']['element_type'])\n if 'element_type' in s['region_attributes'].keys() else self.class_names.index('door') for s in shapes])\n elif (self.config.MODE == \"Separate\"):\n class_ids = np.array([self.class_names.index(s['region_attributes']['Class']) if 'Class' in s['region_attributes'].keys(\n ) else self.class_names.index('Door (Curve)') for s in shapes])\n\n return mask, class_ids.astype(np.int32)", "def load_mask(self, image_id):\n\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"pcb\":\n return super(self.__class__, self).load_mask(image_id)\n\n # convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n \n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n \n for i, p in enumerate(info[\"polygons\"]):\n # get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # return mask, and array of class IDs of each instance.\n # since we have one class ID only, we return an array of 1s\n return mask.astype(np.bool), info[\"class_ids\"]", "def get_binary_mask(self,index):\n mask = self.load_mask_png(index)\n (rows,cols) = np.where(mask>0)[0:2] #pixels in mask disregarding the color\n new_mask = np.zeros(shape=mask.shape[0:2], dtype=np.uint8)\n new_mask[(rows,cols)] = 255\n return new_mask", "def getGeocodedMask ( self, image, intersects, out_path, fill=0 ):\n\n # construct filename\n _, extension = os.path.splitext( image.pathname )\n filename = os.path.basename( image.pathname )\n filename = filename.replace( extension, '-mask.tif' )\n\n # delete label pathname if exists\n label_pathname = os.path.join( out_path, filename )\n if not os.path.exists( out_path ):\n os.makedirs( out_path )\n\n # create mask with lossless compression\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create( label_pathname, \n image.cols, \n image.rows, \n 1, \n gdal.GDT_Byte, \n options=[ 'TILED=YES', 'COMPRESS=DEFLATE' ] )\n\n if ds is not None:\n\n # copy image geocoding to mask\n ds.SetProjection( image.projection )\n ds.SetGeoTransform( image.transform ) \n ds.GetRasterBand(1).Fill( fill )\n\n # add polygon(s) to new label image\n self.addPolygonsToMask( ds, intersects, 255-fill )\n ds = None\n\n return", "def test_make_mask(self):\n output_mask = footprint_mask(os.path.join(data_dir, 'sample.csv'),\n geom_col=\"PolygonWKT_Pix\")\n truth_mask = skimage.io.imread(os.path.join(data_dir,\n 'sample_fp_mask.tif'))\n\n assert np.array_equal(output_mask, truth_mask)", "def mapping_image_to_label (self, labels_df, polygons, fpath_tiff): \n \n unread_tiff = rasterio.open(fpath_tiff)\n\n #Projecting the coordinates to that CRS \n proj = Proj(init='epsg:32618')\n data = []\n labels = []\n failed = []\n \n src = rasterio.open(fpath_tiff, 'r')\n outfolder = '/train/batch'\n \n print (\"Hold on tight! Mapping each image to its respective label...\")\n \n \n for num, row in labels_df.iterrows():\n try:\n \n \n roof_material_num = 0\n polygon0 = polygons [num]\n polygon0['coordinates'] = self.transforming_coordinates(polygon0['coordinates'], proj)\n masked_image, out_transform = rasterio.mask.mask(src,[polygon0], filled = True, crop=True, nodata = 0)\n img_image = reshape_as_image (masked_image)\n \n #Defining the name of the image file as \"buildingID+roofMaterial+png\" and its path \n img_path = os.path.join (outfolder, str (row['id'])+'-'+ str (row['roof_material'])+'.png')\n \n #swapping the color channels from RGB2BGR\n img_image = cv2.cvtColor (img_image, cv2.COLOR_RGB2BGR) #img_image is a numpy array\n \n #resizing the image dimensions to 128x128 to match ImageNet dimensions\n img_image = cv2.resize(img_image, (128, 128))\n \n #writing the image in the file\n #cv2.imwrite (img_path, img_image)\n # update the data and labels lists, respectively\n data.append(img_image) #data is a list\n labels.append(row['roof_material'])\n \n except Exception as e:\n print (e)\n failed.append (num)\n \n \n #print number of images we failed to crop and write \n print (\"Bad News First: Failed to write\", len(failed), \"Images.\")\n print (\"Good News: Successfully mapped\", len (data), \"Images.\")\n data = np.array(data)\n labels = np.array(labels)\n #batch = data.sample(frac=0.5, replace=False, random_state=1)\n #print(\"Size and shape of validY: {}\\n\".format(batch.shape))\n return data, labels", "def clip_image(coords_poly, fname):\n with rio.open(\"%s.tif\" % fname) as src:\n out_image, out_transform = mask.mask(src, [to_geojson(coords_poly)],\n crop=True, nodata=-9999)\n masked_image = ma.masked_equal(out_image, -9999)\n return masked_image", "def test_make_mask_w_file_and_transform(self):\n output_mask = footprint_mask(\n os.path.join(data_dir, 'geotiff_labels.geojson'),\n reference_im=os.path.join(data_dir, 'sample_geotiff.tif'),\n do_transform=True,\n out_file=os.path.join(data_dir, 'test_out.tif')\n )\n truth_mask = skimage.io.imread(\n os.path.join(data_dir, 'sample_fp_mask_from_geojson.tif')\n )\n saved_output_mask = skimage.io.imread(os.path.join(data_dir,\n 'test_out.tif'))\n\n assert np.array_equal(output_mask, truth_mask)\n assert np.array_equal(saved_output_mask, truth_mask)\n # clean up\n os.remove(os.path.join(data_dir, 'test_out.tif'))", "def generate_effective_mask(self, mask_size: tuple, polygons_ignore):\n mask = np.ones(mask_size, dtype=np.uint8)\n\n for poly in polygons_ignore:\n instance = poly.astype(np.int32).reshape(1, -1, 2)\n cv2.fillPoly(mask, instance, 0)\n\n return mask", "def mask(self, polygon: Union[Polygon, MultiPolygon], srs=\"EPSG:4326\") -> 'ImageCollection':\n geojson = mapping(polygon)\n geojson['crs'] = {\n 'type': 'name',\n 'properties': {\n 'name': srs\n }\n }\n\n process_id = 'mask'\n\n args = {\n 'imagery': self.graph,\n 'mask_shape': geojson\n }\n\n return self.graph_add_process(process_id, args)", "def mask(self, polygon: Union[Polygon, MultiPolygon], srs=\"EPSG:4326\") -> 'ImageCollection':\n geojson = mapping(polygon)\n geojson['crs'] = {\n 'type': 'name',\n 'properties': {\n 'name': srs\n }\n }\n\n process_id = 'mask'\n\n args = {\n 'imagery': self.graph,\n 'mask_shape': geojson\n }\n\n return self.graph_add_process(process_id, args)", "def polys_to_mask(polygons, height, width):\n rle = mask_util.frPyObjects(polygons, height, width)\n mask = np.array(mask_util.decode(rle), dtype=np.float32)\n # Flatten in case polygons was a list\n mask = np.sum(mask, axis=2)\n mask = np.array(mask > 0, dtype=np.float32)\n return mask", "def load_mask(self, image_id):\n info = self.image_info[image_id]\n label_path = info['path']\n\n # 读取json文件\n with open(os.path.join(self.DATA_ROOT_DIR, label_path), encoding='utf-8') as json_file:\n labelmeJson = json.load(json_file)\n height = labelmeJson['imageHeight']\n width = labelmeJson['imageWidth']\n shapes = labelmeJson['shapes']\n\n count = len(shapes)\n mask = np.zeros([height, width, count], dtype=np.uint8)\n\n for i, shape in enumerate(shapes):\n mask[:, :, i] = self.shape_to_mask(mask.shape, shape['points'], shape['shape_type'])\n\n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(shape['label']) if shape['label'] in self.class_names else self.class_names.index('undefined') for shape in shapes])\n #print('class_ids:', class_ids)\n #input()\n return mask.astype(np.bool), class_ids.astype(np.int32)", "def create_mask(frame):\n \n # detect ridges\n ridges = enhance_ridges(frame)\n\n # threshold ridge image\n thresh = filters.threshold_otsu(ridges)\n thresh_factor = 1.1\n prominent_ridges = ridges > thresh_factor*thresh\n prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=128)\n\n # the mask contains the prominent ridges\n mask = morphology.convex_hull_image(prominent_ridges)\n mask = morphology.binary_erosion(mask, disk(10))\n return mask", "def load_mask(self, image_id):\n # If not a balloon dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"glomerulus\":\n return super(self.__class__, self).load_mask(image_id)\n\n # Convert polygons to a bitmap mask of shape\n # [height, width, instance_count]\n info = self.image_info[image_id]\n mask = np.zeros([info[\"height\"], info[\"width\"], len(info[\"polygons\"])],\n dtype=np.uint8)\n for i, p in enumerate(info[\"polygons\"]):\n # Get indexes of pixels inside the polygon and set them to 1\n rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])\n mask[rr, cc, i] = 1\n\n # Return mask, and array of class IDs of each instance. Since we have\n # one class ID only, we return an array of 1s\n return mask, np.ones([mask.shape[-1]], dtype=np.int32)", "def maskall(image_input_path,shapeloc_input_path,txtloc_input_path, comploc_input_path, output_path):\n print(\"========== mask shapes and text ==============\")\n if os.path.exists(output_path):\n shutil.rmtree(output_path)\n os.makedirs(output_path)\n img_fn_list = get_images(image_input_path)\n wl = 4\n for img_fn in img_fn_list:\n print(img_fn)\n img_raw = cv2.imread(img_fn)\n img_gray = cv2.cvtColor(img_raw, cv2.COLOR_BGR2GRAY)\n img_binary = cv2.threshold(img_gray, 128, 255, cv2.THRESH_BINARY)[1]\n h, w = img_binary.shape[:2]\n img_blank = np.ones(shape=[h, w], dtype=np.uint8)*255\n\n with open(os.path.join (shapeloc_input_path, os.path.splitext(os.path.basename(img_fn))[0]) + \"_loc.txt\", 'r') as file1:\n for line in file1:\n cnt = [i for i in line.split()]\n loc = [int(i) for i in cnt[1:]]\n cv2.drawContours(img_blank, [np.array(loc).reshape((-1,1,2))], 0, (0), thickness = -1, lineType=8)\n\n with open(os.path.join (comploc_input_path, os.path.splitext(os.path.basename(img_fn))[0]) + \"_loc.txt\", 'r') as file2:\n for line in file2:\n cnt = [i for i in line.split()]\n loc = [int(i) for i in cnt[1:]]\n cv2.drawContours(img_blank, [np.array(loc).reshape((-1,1,2))], 0, (0), thickness = -1, lineType=8)\n\n with open(os.path.join (txtloc_input_path, os.path.splitext(os.path.basename(img_fn))[0]) + \"_loc.txt\", 'r') as file3:\n for line in file3:\n if bool(line and line.strip()):\n cnt = [i for i in line.split()] \n loc = [int(i) for i in cnt[1:]]\n # crop image with rectangle box and save\n x0,y0,w0,h0 = cv2.boundingRect(np.array(loc[:8]).astype(np.int32).reshape((-1, 2)))\n img_crop = img_blank[y0:y0+h0,x0:x0+w0].copy()\n hc, wc = img_crop.shape[:2]\n countzero = hc*wc - cv2.countNonZero(img_crop)\n if countzero *1.0 / (hc*wc) <= 0.25:\n # if new area is less than 25% of overlap with other proposed masked area\n cv2.drawContours(img_blank, [np.array(loc[:8]).astype(np.int32).reshape((-1, 1, 2))], 0, (0), thickness=-1, lineType=8)\n \n #img_mask = cv2.bitwise_and(img_blank, img_binary, mask = None)\n img_mask = cv2.bitwise_or(cv2.bitwise_not(img_blank), img_binary, mask = None)\n cv2.imwrite(os.path.join(output_path, os.path.basename(img_fn)), img_mask)", "def gen_background_mask( img ):\n\t\t\n\tif len( img.shape ) == 3: t = img[0]\n\telif len( img.shape ) == 2: t = img\n\n\tmask = img > filters.threshold_li(t)\n\n\treturn mask", "def json2mask(txt, mattr, filepath):\n img = np.zeros((2048, 2448, 3),\n dtype=np.uint8)\n info = json.loads(txt)['codes']\n for code in info:\n barcode_area = (slice(code['y0'], code['y1']),\n slice(code['x0'], code['x1']), slice(0, 3))\n leny = barcode_area[0].stop - barcode_area[0].start\n lenx = barcode_area[1].stop - barcode_area[1].start\n img[barcode_area] = 1\n if leny * lenx > (2048 * 2448) / 16: # if barcodearea larger than a\n # 16th of the original image\n return None\n return img", "def get_regions_mask(self, input):", "def geojson_to_polygons_groundtruth(js_):\n\n burnt_polys = []\n building_polys = []\n for i, feat in enumerate(js_['features']):\n o = {\n \"coordinates\": feat['geometry']['coordinates'],\n \"type\": feat['geometry']['type']\n }\n s = json.dumps(o)\n\n # convert to geojson.geometry.Polygon\n g1 = geojson.loads(s)\n\n # covert to shapely.geometry.polygon.Polygon\n g2 = shape(g1)\n\n if feat['properties']['color'] == 'red': # red for the burnt region\n burnt_polys.append(g2)\n else: # for the building poly\n if feat['properties']['Burnt_Label']:\n building_polys.append([g2, [feat['properties']['BuildingID'], 'blue',\n True]]) # mark building polygons as 'blue' for burnt for now\n else:\n building_polys.append([g2, [feat['properties']['BuildingID'], 'yellow',\n False]]) # mark building polygons as 'yellow' for non-burnt for now\n return burnt_polys, building_polys" ]
[ "0.6391694", "0.6338343", "0.6338343", "0.62419814", "0.6236292", "0.616786", "0.61537826", "0.61002177", "0.603093", "0.6000926", "0.60001945", "0.5966524", "0.5924841", "0.5906967", "0.5888724", "0.58342755", "0.5831503", "0.58054227", "0.57415473", "0.5734043", "0.5734043", "0.5724179", "0.5721656", "0.5718938", "0.5718646", "0.5705016", "0.5701449", "0.5689511", "0.56842893", "0.56825256" ]
0.7429962
0
This function is used to convert image files and their respective polygon training masks into numpy arrays, so as to facilitate their use for model training.
def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list): img_ms_files = glob.glob(DATA_DIR + '\\Train_MS' + '\\Train_*.tif') img_pan_files = glob.glob(DATA_DIR + '\\Train_Pan' + '\\Train_*.tif') polygon_files = glob.glob(DATA_DIR + '\\Train_Polygons' + '\\Train_*.geojson') img_ms_array_list = [] img_pan_array_list = [] mask_array_list = [] for file in range(len(img_ms_files)): with rasterio.open(img_ms_files[file]) as f: metadata = f.profile img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0]) with rasterio.open(img_pan_files[file]) as g: metadata_pan = g.profile img_pan = np.expand_dims(g.read(1), axis = 2) ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0] if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0: raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio))) mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list) img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, img_height_size, img_width_size) img_ms_array_list.append(img_ms_array) img_pan_array_list.append(img_pan_array) mask_array_list.append(mask_array) img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0) img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0) mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list)) return img_ms_full_array, img_pan_full_array, mask_full_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_images_from_folder(folder, n_cases,patch_size, mask_path, mask_type, mask_name,normalize=False, imrotate=False):\n\n# # Initialize the arrays:\n# if imrotate: # number of images is 4 * n_im\n# bigy = np.empty((n_im * 4, 64, 64))\n# bigx = np.empty((n_im * 4, 64, 64, 2))\n# else:\n# bigy = np.empty((n_im, 64, 64))\n# bigx = np.empty((n_im, 64, 64, 2))\n\n# im = 0 # image counter\n bigy = []\n filenames = os.listdir(folder)\n\n for filename in filenames[n_cases[0]:n_cases[1]]:\n if not filename.startswith('.'):\n temp = loadmat(os.path.join(folder, filename))['res']\n print temp.shape\n # Clean the STONE sense recon data\n row, col = temp.shape\n temp = np.reshape(temp, (row, col, -1))\n #valid_mask = (np.abs(np.squeeze(temp[int(row/2), int(col/2), :])) != 0)\n #final_images = temp[:,:,valid_mask]\n final_images = temp\n \n# # Resize images\n #final_images = np.abs(final_images)\n final_images_resized = np.zeros((patch_size,patch_size,final_images.shape[2]))\n for i in range(final_images.shape[2]):\n final_images_resized[:,:,i] = cv2.resize(final_images[:,:,i], (patch_size,patch_size))\n \n# # Only take a small part of the data\n# final_images = final_images[140:180,140:180,:]\n \n# # Convert to abs values\n# final_images = np.abs(final_images)\n# \n# # Normalize based on single patient case\n# final_images = (final_images - np.mean(final_images)) / np.std(final_images)\n \n# bigy_temp = cv2.imread(os.path.join(folder, filename),\n# cv2.IMREAD_GRAYSCALE)\n \n \n bigy.append(final_images_resized)\n \n bigy = np.asarray(bigy)\n cases, row, col, imgs = bigy.shape\n bigy = np.transpose(np.reshape(np.transpose(bigy, (1,2,3,0)), (row, col, -1)), (2,0,1))\n \n # convert to k-space\n imgs, row, col = bigy.shape\n bigx = np.empty((imgs, row, col, 2))\n mask = read_mask(mask_path=mask_path,mask_type=mask_type,mask_name=mask_name,patch_size=patch_size,show_image=False)\n for i in range(imgs):\n bigx[i, :, :, :] = create_x(np.squeeze(bigy[i,:,:]),mask)\n \n # convert bigx from complex to abs values\n bigy = np.abs(bigy)\n \n# im += 1\n# if imrotate:\n# for angle in [90, 180, 270]:\n# bigy_rot = im_rotate(bigy_temp, angle)\n# bigx_rot = create_x(bigy_rot, normalize)\n# bigy[im, :, :] = bigy_rot\n# bigx[im, :, :, :] = bigx_rot\n# im += 1\n\n# if imrotate:\n# if im > (n_im * 4 - 1): # how many images to load\n# break\n# else:\n# if im > (n_im - 1): # how many images to load\n# break\n\n# if normalize:\n# bigx = (bigx - np.amin(bigx)) / (np.amax(bigx) - np.amin(bigx))\n\n return bigx, bigy", "def pre_process_data(input_path: list, cuts: int, shape: int = 32, normalize: bool = True) -> list:\n images = []\n images_uncut = []\n for files_path in input_path:\n\n files = os.listdir(files_path) # TODO paths\n for f in files:\n file_path = f'{files_path}/{f}'\n im_uncut = cv2.imread(file_path)\n im_uncut = cv2.cvtColor(im_uncut, cv2.COLOR_RGB2GRAY)\n images_uncut.append(cv2.resize(im_uncut, (shape * cuts, shape * cuts)))\n x = np.array(images_uncut)\n\n if normalize:\n x_mean = np.mean(x, axis=(0, 1, 2))\n x_std = np.std(x, axis=(0, 1, 2))\n x = (x - x_mean) / (x_std + 1e-9)\n\n for im in x:\n height = im.shape[0]\n width = im.shape[1]\n frac_h = height // cuts\n frac_w = width // cuts\n i = 0\n image = []\n for h in range(cuts):\n for w in range(cuts):\n crop = im[h * frac_h:(h + 1) * frac_h, w * frac_w:(w + 1) * frac_w]\n crop_rehaped = cv2.resize(crop, (shape, shape))\n image.append([crop_rehaped, i, number_to_angle(i, cuts), neighbours(i, cuts)])\n i = i + 1\n images.append(image)\n # return np.array(images) # todo back to array\n return images", "def read_training_pixels(image_path, label_path):\n\n if io_function.is_file_exist(image_path) is False or io_function.is_file_exist(label_path) is False:\n return False\n\n # check: they are from the same polygons\n polygon_index_img = os.path.basename(image_path).split('_')[-3]\n polygon_index_label = os.path.basename(label_path).split('_')[-3]\n if polygon_index_img != polygon_index_label:\n raise ValueError(\"%s and %s are not from the same training polygons\" % (image_path, label_path))\n\n with rasterio.open(image_path) as img_obj:\n # read the all bands\n indexes = img_obj.indexes\n nbands = len(indexes)\n img_data = img_obj.read(indexes)\n\n with rasterio.open(label_path) as img_obj:\n # read the all bands (only have one band)\n indexes = img_obj.indexes\n if len(indexes) != 1:\n raise ValueError('error, the label should only have one band')\n\n label_data = img_obj.read(indexes)\n\n # check the size\n # print(img_data.shape)\n # print(label_data.shape)\n if img_data.shape[1] != label_data.shape[1] or img_data.shape[2] != label_data.shape[2]:\n raise ValueError('the image and label have different size')\n\n X_arr = img_data.reshape(nbands, -1)\n y_arr = label_data.reshape(-1)\n\n basic.outputlogMessage(str(X_arr.shape))\n basic.outputlogMessage(str(y_arr.shape))\n # sys.exit(1)\n\n return X_arr, y_arr", "def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels", "def create_masks(image_folder: str, annotation_path: str, outpath: str):\n\n train_reader = ReaderAnnotation(annotation_path)\n\n all_images = os.listdir(image_folder)\n annotated_images = train_reader.annotation.keys()\n\n creator = MaskCreator()\n\n for key in annotated_images:\n file_extension = \".JPG\"\n if not os.path.isfile(\n os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n ):\n file_extension = file_extension.lower()\n\n image_name = os.path.join(\n image_folder,\n key.split(\".\")[0] + file_extension,\n )\n print(image_name)\n\n out_image_path = os.path.join(outpath, os.path.split(image_name)[-1])\n assert os.path.exists(out_image_path), \"Out image path doesn't exist\"\n\n image = plt.imread(image_name)\n h, w, c = image.shape\n\n regions = train_reader.get(key)[\"regions\"]\n # less than minimal distance\n radius = int(train_reader.get_radius_min(regions=regions) * 0.9)\n\n masks = []\n for _, center in regions.items():\n masks.append(\n creator.create_circular_mask(\n h=h,\n w=w,\n center=(\n int(center[\"shape_attributes\"][\"cx\"]),\n int(center[\"shape_attributes\"][\"cy\"]),\n ),\n radius=radius,\n )\n )\n\n if len(masks) > 50:\n masks = [creator._unite_masks(masks)]\n\n if masks:\n creator.visualize(\n image=image,\n masks=masks,\n filename=out_image_path,\n use_image=False,\n )\n else:\n creator._create_empty_mask(image=image, filename=out_image_path)\n\n print(\"Empty images:\")\n for empty_image in list(set(all_images) - set(annotated_images)):\n if os.path.exists(out_image_path):\n continue\n empty_image = os.path.join(image_folder, empty_image)\n print(empty_image)\n image = plt.imread(empty_image)\n creator._create_empty_mask(\n image=image,\n filename=os.path.join(\n outpath,\n os.path.split(empty_image)[-1],\n ),\n )", "def __data_generation(self, image_mask_dirs): # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, *self.dim, 1))\n\n # Generate data\n for i, dirs in enumerate(image_mask_dirs):\n # Store image\n x_img = cv2.imread(dirs[0])\n X[i,] = cv2.cvtColor(x_img, cv2.COLOR_BGR2RGB)\n\n # Store mask\n y_img = cv2.imread(dirs[1], cv2.IMREAD_GRAYSCALE).reshape((*self.dim, 1))\n y[i,] = y_img\n\n if self.preprocessor is not None:\n X = self.preprocessor(X)\n y = self.preprocessor(y)\n\n X = X.astype('float32')\n X /= 255\n y = y.astype('float32')\n y /= 255\n\n return X, y", "def training_mask_generation(img_pan_filename, input_geojson_filename, labels):\r\n with rasterio.open(img_pan_filename) as f:\r\n metadata_pan = f.profile\r\n img_pan = f.read(1)\r\n \r\n mask = np.zeros((img_pan.shape[0], img_pan.shape[1]))\r\n \r\n xres = metadata_pan['transform'][0]\r\n ulx = metadata_pan['transform'][2]\r\n yres = metadata_pan['transform'][4]\r\n uly = metadata_pan['transform'][5]\r\n \r\n lrx = ulx + (metadata_pan['width'] * xres) \r\n lry = uly - (metadata_pan['height'] * abs(yres))\r\n\r\n polygons = json.load(open(input_geojson_filename))\r\n \r\n for polygon in range(len(polygons['features'])):\r\n layer_num = labels.index(str(polygons['features'][polygon]['properties']['Label']))\r\n coords = np.array(polygons['features'][polygon]['geometry']['coordinates'][0][0]) \r\n xf = ((metadata_pan['width']) ** 2 / (metadata_pan['width'] + 1)) / (lrx - ulx)\r\n yf = ((metadata_pan['height']) ** 2 / (metadata_pan['height'] + 1)) / (lry - uly)\r\n coords[:, 1] = yf * (coords[:, 1] - uly)\r\n coords[:, 0] = xf * (coords[:, 0] - ulx) \r\n position = np.round(coords).astype(np.int32)\r\n cv2.fillConvexPoly(mask, position, layer_num)\r\n \r\n return np.expand_dims(mask, axis = 2)", "def get_data(path):\n all_images_as_array=[]\n label=[]\n for filename in os.listdir(path):\n try:\n if re.match(r'positive',filename):\n label.append(1)\n else:\n label.append(0)\n img=cv2.imread(path + filename)\n (b, g, r)=cv2.split(img)\n img=cv2.merge([r,g,b])\n np_array = np.asarray(img)\n l,b,c = np_array.shape\n np_array = np_array.reshape(l*b*c,)\n all_images_as_array.append(np_array)\n except:\n continue\n return np.array(all_images_as_array), np.array(label)", "def create_GT_masks(root_dir, background_dir, intrinsic_matrix,classes):\n list_all_images = load_obj(root_dir + \"all_images_adr\")\n training_images_idx = load_obj(root_dir + \"train_images_indices\")\n for i in range(len(training_images_idx)):\n img_adr = list_all_images[training_images_idx[i]]\n label = os.path.split(os.path.split(os.path.dirname(img_adr))[0])[1]\n regex = re.compile(r'\\d+')\n idx = regex.findall(os.path.split(img_adr)[1])[0]\n\n if i % 1000 == 0:\n print(str(i) + \"/\" + str(len(training_images_idx)) + \" finished!\")\n\n image = cv2.imread(img_adr)\n ID_mask = np.zeros((image.shape[0], image.shape[1]))\n U_mask = np.zeros((image.shape[0], image.shape[1]))\n V_mask = np.zeros((image.shape[0], image.shape[1]))\n\n ID_mask_file = root_dir + label + \\\n \"/ground_truth/IDmasks/color\" + str(idx) + \".png\"\n U_mask_file = root_dir + label + \\\n \"/ground_truth/Umasks/color\" + str(idx) + \".png\"\n V_mask_file = root_dir + label + \\\n \"/ground_truth/Vmasks/color\" + str(idx) + \".png\"\n\n tra_adr = root_dir + label + \"/data/tra\" + str(idx) + \".tra\"\n rot_adr = root_dir + label + \"/data/rot\" + str(idx) + \".rot\"\n rigid_transformation = get_rot_tra(rot_adr, tra_adr)\n\n # Read point Point Cloud Data\n ptcld_file = root_dir + label + \"/object.xyz\"\n pt_cld_data = np.loadtxt(ptcld_file, skiprows=1, usecols=(0, 1, 2))\n ones = np.ones((pt_cld_data.shape[0], 1))\n homogenous_coordinate = np.append(pt_cld_data[:, :3], ones, axis=1)\n\n # Perspective Projection to obtain 2D coordinates for masks\n homogenous_2D = intrinsic_matrix @ (rigid_transformation @ homogenous_coordinate.T)\n coord_2D = homogenous_2D[:2, :] / homogenous_2D[2, :]\n coord_2D = ((np.floor(coord_2D)).T).astype(int)\n x_2d = np.clip(coord_2D[:, 0], 0, 639)\n y_2d = np.clip(coord_2D[:, 1], 0, 479)\n ID_mask[y_2d, x_2d] = classes[label]\n\n if i % 100 != 0: # change background for every 99/100 images\n background_img_adr = background_dir + random.choice(os.listdir(background_dir))\n background_img = cv2.imread(background_img_adr)\n background_img = cv2.resize(background_img, (image.shape[1], image.shape[0]), interpolation=cv2.INTER_AREA)\n background_img[y_2d, x_2d, :] = image[y_2d, x_2d, :]\n background_adr = root_dir + label + \"/changed_background/color\" + str(idx) + \".png\"\n mpimg.imsave(background_adr, background_img)\n\n # Generate Ground Truth UV Maps\n centre = np.mean(pt_cld_data, axis=0)\n length = np.sqrt((centre[0]-pt_cld_data[:, 0])**2 + (centre[1] -\n pt_cld_data[:, 1])**2 + (centre[2]-pt_cld_data[:, 2])**2)\n unit_vector = [(pt_cld_data[:, 0]-centre[0])/length, (pt_cld_data[:,\n 1]-centre[1])/length, (pt_cld_data[:, 2]-centre[2])/length]\n U = 0.5 + (np.arctan2(unit_vector[2], unit_vector[0])/(2*np.pi))\n V = 0.5 - (np.arcsin(unit_vector[1])/np.pi)\n U_mask[y_2d, x_2d] = U\n V_mask[y_2d, x_2d] = V\n\n # Saving ID, U and V masks after using the fill holes function\n ID_mask, U_mask, V_mask = fill_holes(ID_mask, U_mask, V_mask)\n cv2.imwrite(ID_mask_file, ID_mask)\n mpimg.imsave(U_mask_file, U_mask, cmap='gray')\n mpimg.imsave(V_mask_file, V_mask, cmap='gray')", "def _preprocessing(self, path: str) -> np.array:\n if Checker.check_input_type_bool(path, 'nii'):\n image = sitk.ReadImage(path)\n self.space = image.GetSpacing()\n image = sitk.GetArrayFromImage(image).astype('float32')\n\n elif Checker.check_input_type_bool(path, 'npy'):\n image = np.load(path)\n self.space = [1., 1., 1.]\n warnings.warn(\n '.npy is not recommended as an image format.'\n 'Since spacing cannot be identified from .npy, spacing is set as [1., 1., 1.].', UserWarning)\n\n elif Checker.check_input_type_bool(path, 'dcm'):\n raise ValueError(\n '.dcm is not supported.'\n 'Please convert dcm dummies to nii format.')\n\n else:\n input_ext = path.split('.')[-1]\n raise ValueError(\n f'.{input_ext} format is not supported.')\n\n self.img_shape = image.shape\n\n # normalize\n windowing_range = [-40., 120.]\n windowing_min = windowing_range[0] - windowing_range[1] // 2\n windowing_max = windowing_range[0] + windowing_range[1] // 2\n image = ndimage.zoom(image, [.5, .5, .5], order=1, mode='constant')\n image = np.clip(image, windowing_min, windowing_max)\n image = (image - windowing_min) / (windowing_max - windowing_min)\n image = image[np.newaxis, ..., np.newaxis]\n return image", "def loadData(image, mask, im_shape):\r\n X, y = [], []\r\n\r\n img = transform.resize(image, im_shape, mode='constant')\r\n img = np.expand_dims(img, -1)\r\n mask = transform.resize(mask, im_shape, mode='constant')\r\n mask = np.expand_dims(mask, -1)\r\n X.append(img)\r\n y.append(mask)\r\n X = np.array(X)\r\n y = np.array(y)\r\n X -= X.mean()\r\n X /= X.std()\r\n\r\n return X, y", "def preprocess_image(image: Image) -> np.ndarray:\n return np.array(image.convert('L'))", "def get_data(folder):\n X = []\n y = []\n\n for seismic_type in os.listdir(folder):\n if not seismic_type.startswith('.'):\n if seismic_type in ['Class1']:\n label = '0'\n else:\n label = '1'\n for image_filename in os.listdir(folder + seismic_type):\n img_file = cv2.imread(folder + seismic_type + '/' + image_filename)\n if img_file is not None:\n # Downsample the image to 120, 160, 3\n #img_file = scipy.misc.imresize(arr=img_file, size=(120, 160, 3))\n img_arr = np.asarray(img_file)\n # img_arr = image.img_to_array(img_arr)\n X.append(img_arr)\n y.append(label)\n X = np.asarray(X)\n y = np.asarray(y)\n return X,y", "def convert_masks():\n for fn in sorted(glob.glob('../input/extra_data/*/masks/*.png')):\n print(fn)\n img = skimage.io.imread(fn)\n # utils.print_stats('mask', img)\n img[img > 0] = 255\n skimage.io.imsave(fn, img)", "def extract_images(f):\r\n if f == \"train\":\r\n D_train = numpy_array_stick(\"/home/luzihao/xiaoluo/xiyuan/CNN/standard2/\",10)\r\n #D_train = numpy_array_stick(\"D:\\\\FDU\\\\Template\\\\FDUROP\\\\face_detection_and_recognition\\\\standard2\\\\\",10)\r\n #\"X\" means data,\"Y\" means label\r\n Y_train = D_train[1]\r\n X_train = D_train[0].reshape(people*10,80,80,1)\r\n return X_train,Y_train\r\n elif f == \"test\":\r\n D_test = numpy_array_stick(\"/home/luzihao/xiaoluo/xiyuan/CNN/standard2/\",1)\r\n #D_test = numpy_array_stick(\"D:\\\\FDU\\\\Template\\\\FDUROP\\\\face_detection_and_recognition\\\\standard2\\\\\",1)\r\n #\"X\" means data,\"Y\" means label\r\n Y_test = D_test[1]\r\n X_test = D_test[0].reshape(people*1,80,80,1)\r\n return X_test,Y_test", "def load_Data(img_path, mask_path):\n image_files = glob(os.path.join(img_path, '*.*'))\n mask_files = glob(os.path.join(mask_path, '*.*'))\n image_files.sort()\n mask_files.sort()\n images_list = []\n masks_list = []\n\n for _ in range(len(image_files)):\n image = cv2.imread(image_files[_])\n mask = cv2.imread(mask_files[_])\n images_list.append(image)\n masks_list.append(mask)\n\n return images_list, masks_list", "def create_images_as_numpy(idx, out_dir, model_outs, K):\n masks = model_outs['pis'].exp()\n sub_images = model_outs['x_loc']\n\n images = []\n all_masks = []\n all_subis = []\n for i in range(K):\n images += [masks[i,0] * sub_images[i,0]]\n all_masks += [masks[i,0]]\n all_subis += [sub_images[i,0]]\n\n images = torch.stack(images)\n all_masks = torch.stack(all_masks)\n all_subis = torch.stack(all_subis)\n whole_image = images.sum(0)\n\n all_masks_grid = torchvision.utils.make_grid(all_masks, nrow=K)\n all_subis_grid = torchvision.utils.make_grid(all_subis, nrow=K)\n all_images_grid = torchvision.utils.make_grid(images, nrow=K)\n \n filepath = out_dir / f'whole_image_{idx}'\n np.save(filepath, whole_image.data.cpu().numpy())\n filepath = out_dir / f'all_images_{idx}'\n np.save(filepath, all_images_grid.data.cpu().numpy())\n filepath = out_dir / f'masks_{idx}'\n np.save(filepath, all_masks_grid.data.cpu().numpy())\n filepath = out_dir / f'sub_images_{idx}'\n np.save(filepath, all_subis_grid.data.cpu().numpy())", "def _convert_images(raw):\n # Convert the raw images from the data-files to floating-points.\n #raw_float = np.array(raw, dtype=float) / 255.0\n\n # Reshape the array to 4-dimensions.\n images = raw.reshape([-1, num_channels, img_size, img_size])\n\n # Reorder the indices of the array.\n images = images.transpose([0, 2, 3, 1])\n\n return images", "def _populate_mask_data(self, filename: str) -> None:\n if self.seg_images.get(filename) is None:\n return None\n\n mask = cv2.imread(self.seg_targets[filename])\n mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)\n\n # convert pixel masks to multidimentional\n height, width = mask.shape[:2]\n segmentation_mask = np.zeros((height, width, len(VOC_COLORMAP)), dtype=np.float32)\n for label_index, label in enumerate(VOC_COLORMAP):\n segmentation_mask[:, :, label_index] = np.all(mask == label, axis=-1).astype(float)\n\n return segmentation_mask", "def preprocess(exam, data_folder, save_path, image_format):\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n if len(exam[v]) == 0:\n continue\n else:\n for image in exam[v]:\n image_path = data_folder + '/' + image + '.' + image_format\n # Extract subdirectories\n subdirs = \"/\".join(image.split('/')[:-1])\n save_dirs = os.path.join(save_path, subdirs)\n # Extract image id\n image_id = image.split('/')[-1]\n # Create save directories\n os.makedirs(save_dirs, exist_ok=True)\n png_save_path = os.path.join(save_dirs, image_id + '.png')\n with Image(filename=image_path, format=image_format) as img:\n with img.clone() as i:\n i.auto_level()\n with i.convert('png') as png_image:\n png_image.transform(resize='896x1152!')\n png_image.save(filename=png_save_path)", "def load_dataset(image_home, mask_home, patient_list, \n size = 512, \n downsample = 0.5, \n overlap = 1.5, \n verbose=False):\n\n image_list = np.concatenate([sorted(glob.glob(f'{image_home}/{p}/*')) for p in patient_list])\n mask_list = np.concatenate([sorted(glob.glob(f'{mask_home}/{p}/*')) for p in patient_list])\n\n if verbose:\n for i, (im, m) in enumerate(zip(image_list, mask_list)):\n print(i, im, m)\n\n x = []\n y = [] \n\n for im, m in zip(image_list, mask_list):\n image = cv2.imread(im)[:,:,::-1]\n mask = cv2.imread(m, -1)\n mask = squash_labels(mask)\n \n image = cv2.resize(image, dsize=(0,0), fx=downsample, fy=downsample)\n mask = cv2.resize(mask, dsize=(0,0), fx=downsample, fy=downsample,\n interpolation=cv2.INTER_NEAREST)\n\n # assert (image.shape == mask.shape).all()\n split_x , split_y = split(image, mask, int(size * downsample), overlap)\n\n x.append(split_x)\n y.append(split_y)\n\n\n x = np.concatenate(x, axis=0)\n y = np.concatenate(y, axis=0)\n y = np.eye(N=y.shape[0], M=4)[y]\n\n shuffle = np.arange(x.shape[0]).astype(np.int)\n np.random.shuffle(shuffle)\n x = x[shuffle, :]\n y = y[shuffle, :]\n\n x = (x / 255.).astype(np.float32)\n\n print('split_datasets returning x:', x.shape, x.dtype, x.min(), x.max())\n print('split_datasets returning y:', y.shape, y.dtype)\n return x, y", "def load_images(self, image_path):\n X_train = []\n\n # Load all files from the image path using Image.open.\n for i in recursive_list(image_path):\n # Open images as ???\n img = Image.open(i)\n # Convert to NP array.\n img = np.asarray(img)\n # Append them into higher order array.\n if img.shape == (128, 128, 3):\n X_train.append(img)\n\n # return all the images concatenated as a 4D array\n return np.asarray(X_train)", "def get_data(input_path):\n all_imgs = []\n classes_count = {}\n class_mapping = {}\n\n # parsing Flag\n visualise = False\n\n # MSCOCO directory\n data_path = input_path\n\n print('Parsing annotation files')\n annot_path = os.path.join(data_path, 'annotations_bbox')\n imgs_path = os.path.join(data_path, 'images')\n\n # images directory (train, val, trainval, test)\n imgsets_path_trainval = os.path.join(data_path, 'images', 'trainval.txt')\n imgsets_path_train = os.path.join(data_path, 'images', 'train.txt')\n imgsets_path_val = os.path.join(data_path, 'images', 'val.txt')\n imgsets_path_test = os.path.join(data_path, 'images', 'test.txt')\n\n trainval_files = []\n train_files = []\n val_files = []\n test_files = []\n\n with open(imgsets_path_trainval) as f:\n for line in f:\n trainval_files.append(line.strip())\n\n with open(imgsets_path_train) as f:\n for line in f:\n train_files.append(line.strip())\n\n with open(imgsets_path_val) as f:\n for line in f:\n val_files.append(line.strip())\n\n # test-set (default) not included in MSCOCO\n if os.path.isfile(imgsets_path_test):\n with open(imgsets_path_test) as f:\n for line in f:\n test_files.append(line.strip())\n\n # annotation read\n annots_train = json.load(open(os.path.join(annot_path, 'bbox_train2017.json'), 'r'))\n annots_val = json.load(open(os.path.join(annot_path, 'bbox_val2017.json'), 'r'))\n annots = dict()\n annots['train'] = annots_train\n annots['val'] = annots_val\n\n for part in ['train', 'val']:\n annots_keys = tqdm(annots[part].keys())\n for img_name in annots_keys:\n annots_keys.set_description(\"Processing %s\" % img_name)\n for bbox in annots[part][img_name]:\n class_name = bbox['label'].replace(' ', '')\n all_imgs.append({\n \"filepath\": os.path.join(data_path, 'images', '%s2017' % part, \"%s.jpg\" % img_name),\n \"width\": None,\n \"height\": None,\n \"bboxes\": [{\n \"class\": class_name,\n \"x1\": bbox['bbox']['x1'],\n \"y1\": bbox['bbox']['x2'],\n \"x2\": bbox['bbox']['y1'],\n \"y2\": bbox['bbox']['y2'],\n \"difficult\": False\n }],\n \"image_id\": img_name,\n \"imageset\": part\n })\n if class_name not in classes_count:\n classes_count[class_name] = 1\n else:\n classes_count[class_name] += 1\n if class_name not in class_mapping:\n class_mapping[class_name] = len(class_mapping)\n\n # visualise bounding boxes\n if visualise:\n img = cv2.imread(annotation_data['filepath'])\n for bbox in annotation_data['bboxes']:\n cv2.rectangle(img, (bbox['x1'], bbox['y1']), (bbox['x2'], bbox['y2']), (0, 0, 255))\n cv2.imshow('img', img)\n print(annotation_data['imageset'])\n cv2.waitKey(0)\n\n return all_imgs, classes_count, class_mapping", "def load_images(self, target = \"standard\", path=OMNIGLOT_DATAPATH):\n X = []\n Y = []\n folderName = {}\n if target == \"standard\":\n trainFolders = [\"images_background\"]\n testFolders = [\"images_evaluation\"]\n elif target == \"minimal\":\n trainFolders = [\"images_background_small1\", \"images_background_small2\"]\n testFolders = [\"images_evaluation\"]\n \n if self.train:\n for trainFolder in trainFolders:\n folderPath = os.path.join(path, trainFolder)\n imgAllCount = 0 # this is counted for the whole images in all alphabet\n chaAllCount = 0 # this is counted for the whole characters in all alphabet\n\n for alphabet in sorted(os.listdir(folderPath)):\n alphabetPath = os.path.join(folderPath, alphabet)\n folderName[alphabet] = {'totalChar': 0, 'charIndex': [], 'totalImg': 0, 'imgIndex': []}\n \n imgAlphabetCount = 0 # this is counted for the number of images in this alphabet\n chaAlphabetCount = 0 # this is counted for the number of character in this alphabet\n\n folderName[alphabet]['charIndex'].append(chaAllCount)\n folderName[alphabet]['imgIndex'].append(imgAllCount)\n \n for letter in sorted(os.listdir(alphabetPath)):\n letterPath = os.path.join(alphabetPath, letter)\n \n for letterImage in os.listdir(letterPath):\n imagePath = os.path.join(letterPath, letterImage)\n image = mpimg.imread(imagePath)\n X.append(image)\n Y.append(chaAllCount)\n \n imgAlphabetCount += 1\n imgAllCount += 1\n\n chaAlphabetCount += 1\n chaAllCount += 1\n \n folderName[alphabet]['totalChar'] = chaAlphabetCount\n folderName[alphabet]['totalImg'] = imgAlphabetCount\n folderName[alphabet]['charIndex'].append(chaAllCount-1)\n folderName[alphabet]['imgIndex'].append(imgAllCount-1)\n \n X = np.stack(X) \n X = X.reshape(-1, IMAGES_PER_CHARACTER, X.shape[1], X.shape[2])\n return X, np.stack(Y), folderName", "def Read_Raw_Images(path_data,path_labels):\n \n data = skimage.io.imread(path_data).astype(np.float32)\n for i in range(data.shape[0]):\n data[i,...] = skimage.exposure.rescale_intensity(data[i,...], out_range=(0,1))\n data_labels = skimage.io.imread(path_labels) > 0\n \n training_data=data[0:25,:,:]\n training_labels=data_labels[0:25,:,:]\n \n testing_data=data[25:data.shape[0],:,:]\n testing_labels=data_labels[25:data.shape[0],:,:]\n \n np.save(\"data.npy\",training_data)\n np.save(\"labels.npy\",training_labels)\n np.save(\"data_validation.npy\",testing_data)\n np.save(\"labels_validation.npy\",testing_labels)\n \n return()", "def _preprocess(self):\n print(\"Note: if root path is changed, the previously generated json files need to be re-generated (delete them first)\")\n if osp.exists(self.imgs_labeled_dir) and \\\n osp.exists(self.imgs_detected_dir) and \\\n osp.exists(self.split_classic_det_json_path) and \\\n osp.exists(self.split_classic_lab_json_path) and \\\n osp.exists(self.split_new_det_json_path) and \\\n osp.exists(self.split_new_lab_json_path):\n return\n\n mkdir_if_missing(self.imgs_detected_dir)\n mkdir_if_missing(self.imgs_labeled_dir)\n\n print(\"Extract image data from {} and save as png\".format(self.raw_mat_path))\n mat = h5py.File(self.raw_mat_path, 'r')\n\n def _deref(ref):\n return mat[ref][:].T\n\n def _process_images(img_refs, campid, pid, save_dir):\n img_paths = [] # Note: some persons only have images for one view\n for imgid, img_ref in enumerate(img_refs):\n img = _deref(img_ref)\n # skip empty cell\n if img.size == 0 or img.ndim < 3: continue\n # images are saved with the following format, index-1 (ensure uniqueness)\n # campid: index of camera pair (1-5)\n # pid: index of person in 'campid'-th camera pair\n # viewid: index of view, {1, 2}\n # imgid: index of image, (1-10)\n viewid = 1 if imgid < 5 else 2\n img_name = '{:01d}_{:03d}_{:01d}_{:02d}.png'.format(campid+1, pid+1, viewid, imgid+1)\n img_path = osp.join(save_dir, img_name)\n imageio.imwrite(img_path, img)\n img_paths.append(img_path)\n return img_paths\n\n def _extract_img(name):\n print(\"Processing {} images (extract and save) ...\".format(name))\n meta_data = []\n imgs_dir = self.imgs_detected_dir if name == 'detected' else self.imgs_labeled_dir\n for campid, camp_ref in enumerate(mat[name][0]):\n camp = _deref(camp_ref)\n num_pids = camp.shape[0]\n for pid in range(num_pids):\n img_paths = _process_images(camp[pid,:], campid, pid, imgs_dir)\n assert len(img_paths) > 0, \"campid{}-pid{} has no images\".format(campid, pid)\n meta_data.append((campid+1, pid+1, img_paths))\n print(\"done camera pair {} with {} identities\".format(campid+1, num_pids))\n return meta_data\n\n meta_detected = _extract_img('detected')\n meta_labeled = _extract_img('labeled')\n\n def _extract_classic_split(meta_data, test_split):\n train, test = [], []\n num_train_pids, num_test_pids = 0, 0\n num_train_imgs, num_test_imgs = 0, 0\n for i, (campid, pid, img_paths) in enumerate(meta_data):\n \n if [campid, pid] in test_split:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n test.append((img_path, num_test_pids, camid))\n num_test_pids += 1\n num_test_imgs += len(img_paths)\n else:\n for img_path in img_paths:\n camid = int(osp.basename(img_path).split('_')[2])\n train.append((img_path, num_train_pids, camid))\n num_train_pids += 1\n num_train_imgs += len(img_paths)\n return train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs\n\n print(\"Creating classic splits (# = 20) ...\")\n splits_classic_det, splits_classic_lab = [], []\n for split_ref in mat['testsets'][0]:\n test_split = _deref(split_ref).tolist()\n\n # create split for detected images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_detected, test_split)\n splits_classic_det.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n\n # create split for labeled images\n train, num_train_pids, num_train_imgs, test, num_test_pids, num_test_imgs = \\\n _extract_classic_split(meta_labeled, test_split)\n splits_classic_lab.append({\n 'train': train, 'query': test, 'gallery': test,\n 'num_train_pids': num_train_pids, 'num_train_imgs': num_train_imgs,\n 'num_query_pids': num_test_pids, 'num_query_imgs': num_test_imgs,\n 'num_gallery_pids': num_test_pids, 'num_gallery_imgs': num_test_imgs,\n })\n \n write_json(splits_classic_det, self.split_classic_det_json_path)\n write_json(splits_classic_lab, self.split_classic_lab_json_path)\n\n def _extract_set(filelist, pids, pid2label, idxs, img_dir, relabel):\n tmp_set = []\n unique_pids = set()\n for idx in idxs:\n img_name = filelist[idx][0]\n camid = int(img_name.split('_')[2])\n pid = pids[idx]\n if relabel: pid = pid2label[pid]\n img_path = osp.join(img_dir, img_name)\n tmp_set.append((img_path, int(pid), camid))\n unique_pids.add(pid)\n return tmp_set, len(unique_pids), len(idxs)\n\n def _extract_new_split(split_dict, img_dir):\n train_idxs = split_dict['train_idx'].flatten() - 1 # index-0\n pids = split_dict['labels'].flatten()\n train_pids = set(pids[train_idxs])\n pid2label = {pid: label for label, pid in enumerate(train_pids)}\n query_idxs = split_dict['query_idx'].flatten() - 1\n gallery_idxs = split_dict['gallery_idx'].flatten() - 1\n filelist = split_dict['filelist'].flatten()\n train_info = _extract_set(filelist, pids, pid2label, train_idxs, img_dir, relabel=True)\n query_info = _extract_set(filelist, pids, pid2label, query_idxs, img_dir, relabel=False)\n gallery_info = _extract_set(filelist, pids, pid2label, gallery_idxs, img_dir, relabel=False)\n return train_info, query_info, gallery_info\n\n print(\"Creating new splits for detected images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_det_mat_path),\n self.imgs_detected_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_det_json_path)\n\n print(\"Creating new splits for labeled images (767/700) ...\")\n train_info, query_info, gallery_info = _extract_new_split(\n loadmat(self.split_new_lab_mat_path),\n self.imgs_labeled_dir,\n )\n splits = [{\n 'train': train_info[0], 'query': query_info[0], 'gallery': gallery_info[0],\n 'num_train_pids': train_info[1], 'num_train_imgs': train_info[2],\n 'num_query_pids': query_info[1], 'num_query_imgs': query_info[2],\n 'num_gallery_pids': gallery_info[1], 'num_gallery_imgs': gallery_info[2],\n }]\n write_json(splits, self.split_new_lab_json_path)", "def read_training_pixels_from_multi_images(input, subImg_folder, subLabel_folder):\n img_list = io_function.get_file_list_by_ext('.tif', subImg_folder, bsub_folder=False)\n label_list = io_function.get_file_list_by_ext('.tif', subLabel_folder, bsub_folder=False)\n img_list.sort()\n label_list.sort()\n\n if len(img_list) < 1 or len(label_list) < 1:\n raise IOError('No tif images or labels in folder %s or %s' % (subImg_folder, subLabel_folder))\n if len(img_list) != len(label_list):\n raise ValueError('the number of images is not equal to the one of labels')\n\n # read them one by one\n Xs, ys = [], []\n for img, label in zip(img_list, label_list):\n # # test by hlc\n # polygon_index_img = os.path.basename(img).split('_')[-3]\n # # print(polygon_index_img)\n # if polygon_index_img not in [str(83), str(86)] :\n # continue\n\n X_aImg, y_a = read_training_pixels(img, label)\n Xs.append(X_aImg)\n ys.append(y_a)\n\n X_pixels = np.concatenate(Xs, axis=1)\n y_pixels = np.concatenate(ys, axis=0)\n X_pixels = np.transpose(X_pixels, (1, 0))\n basic.outputlogMessage(str(X_pixels.shape))\n basic.outputlogMessage(str(y_pixels.shape))\n\n return X_pixels, y_pixels", "def load_pascal(data_dir, split='train'):\n # Wrote this function\n # idx = 0\n # if idx >20:\n # idx+=1\n # break\n \"\"\"\n print(\"Begin Load Images ------------------------------------\")\n images = []\n # images_dict -> key: img_file_idx, value: rgb image ndarray (256*256*3)\n images_dict = {}\n # count\n for infile in glob.glob(\"./VOCdevkit/VOC2007/JPEGImages/*.jpg\"):\n # reshape the images to 256*256*3\n file, ext = os.path.splitext(infile)\n file_idx = file[-6:]\n\n try:\n im = Image.open(infile)\n resized_img = im.resize((256, 256), Image.ANTIALIAS)\n resized_arr = np.array(resized_img)\n images_dict[file_idx] = resized_arr.astype(np.float32)\n except IOError:\n print(\"Error\")\n\n save_obj(images_dict,\"images_dict\")\n \"\"\"\n # label_mat: 2d array, each annotation file is one label_col, multiple label_col mean multiple annotation files\n label_mat = []\n weight_mat = []\n image_mat = []\n\n images_dict = load_obj(\"images_dict\")\n print(\"Return Load Images ------------------------------------\")\n\n # for filename in os.listdir(\"./VOCdevkit/VOC2007/ImageSets/Main/\"):\n for filename in enumerate(CLASS_NAMES):\n\n with open(\"./VOCdevkit/VOC2007/ImageSets/Main/\"+filename[1] +\"_\"+split+\".txt\") as fp:\n print(fp)\n image_mat = []\n label_col = []\n weight_col = []\n line = fp.readline()\n cnt = 1\n while line:\n\n label_idx = line.strip()[:-3]\n try:\n # print(\"Line {}: {}\".format(label_idx, type(label_idx)))\n # Be aware!! '000005 ' is different from '000005', there is a space in the first string!!!\n # label_idx = '000005 ' label_idx[:-1]='000005'\n image_mat.append(images_dict[label_idx])\n except IOError:\n print(\"Error Line {}: {}\".format(label_idx, type(label_idx)))\n\n label_flag = int(line.strip()[-2:])\n\n if label_flag is 0 or label_flag is -1:\n label_col.append(np.int32(0))\n else:\n label_col.append(np.int32(1))\n\n if label_flag is 1 or label_flag is -1:\n weight_col.append(np.int32(1))\n else:\n weight_col.append(np.int32(0))\n\n line = fp.readline()\n cnt += 1\n np_label_col = np.asarray(label_col)\n label_mat.append(np_label_col)\n # print(np.shape(label_mat))\n np_weight_col = np.asarray(weight_col)\n weight_mat.append(np_weight_col)\n\n # print('image_mat {}: label_mat {}'.format(np.shape(image_mat), np.shape(label_mat)))\n np_image_mat = np.asarray(image_mat)\n np_label_mat = np.asarray(label_mat)\n np_weight_mat = np.asarray(weight_mat)\n # print('np_image_mat {}: np_label_mat {}'.format(np.shape(np_image_mat), np.shape(np_label_mat)))\n np_trans_label_mat = np_label_mat.transpose()\n np_trans_weight_mat = np_weight_mat.transpose()\n # print(np.shape(np_label_mat))\n # print(np.shape(np_weight_mat))\n print('np_trans_label_mat {}: np_trans_weight_mat {}'.format(np.shape(np_trans_label_mat), np.shape(np_trans_weight_mat)))\n print(\"Return Load Weights and Labels ------------------------------------\")\n return np_image_mat, np_trans_label_mat, np_trans_weight_mat", "def _open_images(training_filenames, path):\n imagePaths=[os.path.join(path,f) for f in training_filenames]\n faces=[]\n for i, imagePath in enumerate(imagePaths):\n faceImg=Image.open(imagePath).convert('L')\n faceNp=np.array(faceImg,'uint8')\n faces.append(faceNp)\n return faces", "def create_image_array(files_list):\n im_array = np.array([np.array(cv2.imread(file)) for file in files_list])\n return im_array" ]
[ "0.67383176", "0.6712333", "0.66240686", "0.6617843", "0.6607581", "0.652733", "0.64513606", "0.6360706", "0.6346707", "0.63367313", "0.63006884", "0.627508", "0.62277615", "0.6199671", "0.61431366", "0.6130395", "0.61136776", "0.6061919", "0.60611165", "0.60610926", "0.6056419", "0.6034468", "0.60323936", "0.6019369", "0.6018865", "0.6017226", "0.6016271", "0.59962845", "0.598461", "0.5981686" ]
0.7334974
0
This function generates the Two Branch Land Cover Classification Convolutional Neural Network (TBLCCCNN) that is proposed in the paper 'A Two Branch CNN Architecture for Land Cover Classification of PAN and MS Imagery' by Gaetano R., Ienco D., Ose K., Cresson R. (2018)
def TBLCCCNN_Model(pan_image_height_size, pan_image_width_size, ms_to_pan_ratio, n_bands, n1_pan, n2_pan, n3_pan, n1_ms, n2_ms, n3_ms, dropout_rate, n_classes, l_r): if (pan_image_height_size % ms_to_pan_ratio) != 0 or (pan_image_width_size % ms_to_pan_ratio) != 0: raise ValueError('Please make sure that both pan_image_height_size and pan_image_width_size can be divided by {}'.format(int(ms_to_pan_ratio))) pan_img_input = Input(shape = (pan_image_height_size, pan_image_width_size, 1)) conv_1_pan = Conv2D(n1_pan, (7, 7), padding = 'same', activation = 'relu')(pan_img_input) max_pool_1_pan = MaxPooling2D(pool_size = (2, 2))(conv_1_pan) conv_2_pan = Conv2D(n2_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_1_pan) max_pool_2_pan = MaxPooling2D(pool_size = (2, 2))(conv_2_pan) conv_3_pan = Conv2D(n3_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_2_pan) glob_max_pool_pan = GlobalMaxPooling2D()(conv_3_pan) glob_max_pool_pan = Dropout(dropout_rate)(glob_max_pool_pan) ms_img_input = Input(shape = (int(pan_image_height_size / ms_to_pan_ratio), int(pan_image_width_size / ms_to_pan_ratio), n_bands)) conv_1_ms = Conv2D(n1_ms, (3, 3), padding = 'same', activation = 'relu')(ms_img_input) conv_2_ms = Conv2D(n2_ms, (3, 3), padding = 'same', activation = 'relu')(conv_1_ms) conv_3_ms = Conv2D(n3_ms, (3, 3), padding = 'same', activation = 'relu')(conv_2_ms) glob_max_pool_ms = GlobalMaxPooling2D()(conv_3_ms) glob_max_pool_ms = Dropout(dropout_rate)(glob_max_pool_ms) all_features = concatenate([glob_max_pool_pan, glob_max_pool_ms]) pred_layer = Dense(n_classes, activation = 'softmax')(all_features) tblcccnn_model = Model(inputs = [ms_img_input, pan_img_input], outputs = pred_layer) tblcccnn_model.compile(loss = 'categorical_crossentropy', optimizer = Adam(lr = l_r), metrics = ['categorical_crossentropy']) return tblcccnn_model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cifar10_cnn():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 128\n epochs = 4\n \n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n \n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n #x._train shape: (50000, 32, 32, 3)\n #input shape (32, 32, 3)\n input_shape = x_train.shape[1:]\n\n #print('x_train shape:', x_train.shape)\n #print(x_train.shape[0], 'train samples')\n #print(x_test.shape[0], 'test samples')\n #print('input shape', input_shape)\n \n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)", "def blackbox_network():\n num_nodes = 6\n num_states = 2 ** num_nodes\n tpm = np.zeros((num_states, num_nodes))\n\n for index, previous_state in enumerate(all_states(num_nodes)):\n current_state = [0 for i in range(num_nodes)]\n if previous_state[5] == 1:\n current_state[0] = 1\n current_state[1] = 1\n if previous_state[0] == 1 and previous_state[1]:\n current_state[2] = 1\n if previous_state[2] == 1:\n current_state[3] = 1\n current_state[4] = 1\n if previous_state[3] == 1 and previous_state[4] == 1:\n current_state[5] = 1\n tpm[index, :] = current_state\n\n # fmt: off\n cm = np.array([\n [0, 0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 1, 0],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [1, 1, 0, 0, 0, 0],\n ])\n # fmt: on\n\n return Network(tpm, cm, node_labels=LABELS[:tpm.shape[1]])", "def TCN_V2(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def make_coco_labels(real_c):\n y = np.eye(real_c.size(1))\n\n fixed_c_list = []\n\n # single object addition and removal\n for i in range(2*real_c.size(1)):\n fixed_c = real_c.clone()\n for c in fixed_c:\n if i%2:\n c[i//2] = 0.\n else:\n c[i//2] = 1.\n fixed_c_list.append(Variable(fixed_c, volatile=True).cuda())\n\n # multi-attribute transfer (H+G, H+A, G+A, H+G+A)\n #if self.dataset == 'CelebA':\n # for i in range(4):\n # fixed_c = real_c.clone()\n # for c in fixed_c:\n # if i in [0, 1, 3]: # Hair color to brown\n # c[:3] = y[2]\n # if i in [0, 2, 3]: # Gender\n # c[3] = 0 if c[3] == 1 else 1\n # if i in [1, 2, 3]: # Aged\n # c[4] = 0 if c[4] == 1 else 1\n # fixed_c_list.append(self.to_var(fixed_c, volatile=True))\n return fixed_c_list", "def ternausnetv1(input_shape=(512, 512, 3), base_depth=64):\n inputs = Input(input_shape)\n conv1 = Conv2D(base_depth, 3, activation='relu', padding='same')(inputs)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(pool1)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2_1)\n\n conv3_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(pool2)\n conv3_2 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(conv3_1)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3_2)\n\n conv4_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool3)\n conv4_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv4_1)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4_2)\n\n conv5_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool4)\n conv5_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv5_1)\n pool5 = MaxPooling2D(pool_size=(2, 2))(conv5_2)\n\n conv6_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool5)\n\n up7 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv6_1)\n concat7 = concatenate([up7, conv5_2])\n conv7_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat7)\n\n up8 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv7_1)\n concat8 = concatenate([up8, conv4_2])\n conv8_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat8)\n\n up9 = Conv2DTranspose(base_depth*2, 2, strides=(2, 2), activation='relu',\n padding='same')(conv8_1)\n concat9 = concatenate([up9, conv3_2])\n conv9_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(concat9)\n\n up10 = Conv2DTranspose(base_depth, 2, strides=(2, 2), activation='relu',\n padding='same')(conv9_1)\n concat10 = concatenate([up10, conv2_1])\n conv10_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(concat10)\n\n up11 = Conv2DTranspose(int(base_depth/2), 2, strides=(2, 2),\n activation='relu', padding='same')(conv10_1)\n concat11 = concatenate([up11, conv1])\n\n out = Conv2D(1, 1, activation='sigmoid', padding='same')(concat11)\n\n return Model(input=inputs, output=out)", "def create_coco_label(is_training):\n from pycocotools.coco import COCO\n\n coco_root = config.coco_root\n data_type = config.val_data_type\n if is_training:\n data_type = config.train_data_type\n\n # Classes need to train or test.\n train_cls = config.coco_classes\n train_cls_dict = {}\n for i, cls in enumerate(train_cls):\n train_cls_dict[cls] = i\n\n anno_json = os.path.join(coco_root, config.instances_set.format(data_type))\n\n coco = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco.loadCats(coco.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"id\"]] = cat[\"name\"]\n\n image_ids = coco.getImgIds()\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n\n for img_id in image_ids:\n image_info = coco.loadImgs(img_id)\n file_name = image_info[0][\"file_name\"]\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n image_path = os.path.join(coco_root, data_type, file_name)\n annos = []\n iscrowd = False\n for label in anno:\n bbox = label[\"bbox\"]\n class_name = classs_dict[label[\"category_id\"]]\n iscrowd = iscrowd or label[\"iscrowd\"]\n if class_name in train_cls:\n x_min, x_max = bbox[0], bbox[0] + bbox[2]\n y_min, y_max = bbox[1], bbox[1] + bbox[3]\n annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])\n\n if not is_training and iscrowd:\n continue\n if len(annos) >= 1:\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(annos)\n\n return images, image_path_dict, image_anno_dict", "def TCN_V5(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 32\n\n config = [ \n [(1,8,32)],\n [(1,8,32)],\n [(1,8,32)],\n [(2,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def create_cnn(num_half_rows, num_half_columns, num_channels):\n\n error_checking.assert_is_integer(num_half_rows)\n error_checking.assert_is_integer(num_half_columns)\n error_checking.assert_is_integer(num_channels)\n\n error_checking.assert_is_greater(num_half_rows, 0)\n error_checking.assert_is_greater(num_half_columns, 0)\n error_checking.assert_is_greater(num_channels, 0)\n\n regularizer_object = keras.regularizers.l1_l2(l1=L1_WEIGHT, l2=L2_WEIGHT)\n\n num_grid_rows = 2 * num_half_rows + 1\n num_grid_columns = 2 * num_half_columns + 1\n input_layer_object = keras.layers.Input(\n shape=(num_grid_rows, num_grid_columns, num_channels)\n )\n\n current_num_filters = None\n current_layer_object = None\n\n # Add convolutional layers.\n for _ in range(NUM_CONV_LAYER_SETS):\n for _ in range(NUM_CONV_LAYERS_PER_SET):\n\n if current_num_filters is None:\n current_num_filters = (\n num_channels * NUM_CHANNELS_TO_FIRST_NUM_FILTERS)\n this_input_layer_object = input_layer_object\n\n else:\n current_num_filters *= 2\n this_input_layer_object = current_layer_object\n\n current_layer_object = keras.layers.Conv2D(\n filters=current_num_filters,\n kernel_size=(NUM_CONV_FILTER_ROWS, NUM_CONV_FILTER_COLUMNS),\n strides=(1, 1), padding='valid', data_format='channels_last',\n dilation_rate=(1, 1), activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(this_input_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if CONV_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=CONV_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n current_layer_object = keras.layers.MaxPooling2D(\n pool_size=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n strides=(NUM_POOLING_ROWS, NUM_POOLING_COLUMNS),\n padding='valid', data_format='channels_last'\n )(current_layer_object)\n\n these_dimensions = numpy.array(\n current_layer_object.get_shape().as_list()[1:], dtype=int)\n num_features = numpy.prod(these_dimensions)\n\n current_layer_object = keras.layers.Flatten()(current_layer_object)\n\n # Add intermediate dense layers.\n _, num_outputs_by_dense_layer = (\n architecture_utils.get_dense_layer_dimensions(\n num_input_units=num_features, num_classes=NUM_CLASSES,\n num_dense_layers=NUM_DENSE_LAYERS)\n )\n\n for k in range(NUM_DENSE_LAYERS - 1):\n current_layer_object = keras.layers.Dense(\n num_outputs_by_dense_layer[k], activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.LeakyReLU(\n alpha=SLOPE_FOR_RELU\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n if USE_BATCH_NORMALIZATION:\n current_layer_object = keras.layers.BatchNormalization(\n axis=-1, center=True, scale=True\n )(current_layer_object)\n\n # Add output layer (also dense).\n current_layer_object = keras.layers.Dense(\n NUM_CLASSES, activation=None, use_bias=True,\n kernel_initializer='glorot_uniform', bias_initializer='zeros',\n kernel_regularizer=regularizer_object\n )(current_layer_object)\n\n current_layer_object = keras.layers.Activation(\n 'softmax'\n )(current_layer_object)\n\n if DENSE_LAYER_DROPOUT_FRACTION is not None and NUM_DENSE_LAYERS == 1:\n current_layer_object = keras.layers.Dropout(\n rate=DENSE_LAYER_DROPOUT_FRACTION\n )(current_layer_object)\n\n # Put the whole thing together and compile.\n cnn_model_object = keras.models.Model(\n inputs=input_layer_object, outputs=current_layer_object)\n cnn_model_object.compile(\n loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adam(),\n metrics=LIST_OF_METRIC_FUNCTIONS)\n\n cnn_model_object.summary()\n return cnn_model_object", "def TCN_V1(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def get_classification_simulator(self, image):\n\n r_channel = image[:,:,2]\n g_channel = image[:,:,1]\n\n\n\n # Threshold color channel\n s_rgy_min = 50\n s_thresh_min = 245\n s_thresh_max = 255\n \n #s_binary = np.zeros_like(r_channel)\n r_binary = np.zeros_like(r_channel)\n g_binary = np.zeros_like(r_channel)\n y_binary = np.zeros_like(r_channel)\n \n #s_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) | ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n \n r_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & (g_channel <= s_rgy_min)] = 1\n g_binary[((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max)) & (r_channel <= s_rgy_min)] = 1\n y_binary[((r_channel >= s_thresh_min) & (r_channel <= s_thresh_max)) & ((g_channel >= s_thresh_min) & (g_channel <= s_thresh_max))] = 1\n \n\n #res = cv2.bitwise_and(img,img,mask = s_binary)\n \n #maxx=image.shape[1]\n maxy=image.shape[0]\n \n y_top=0\n window_size_y=50\n y_bottom=y_top+window_size_y\n \n max_color=0\n tf_color=TrafficLight.UNKNOWN\n \n while (y_bottom< maxy):\n #print(img[y_top:y_bottom,:,:])\n rs= r_binary[y_top:y_bottom,:].sum()\n gs= g_binary[y_top:y_bottom,:].sum()\n ys= y_binary[y_top:y_bottom,:].sum()\n if (rs>max_color):\n max_color=rs\n tf_color=TrafficLight.RED\n if (gs>max_color):\n max_color=gs\n tf_color=TrafficLight.GREEN\n if (ys>max_color):\n max_color=ys\n tf_color=TrafficLight.YELLOW\n y_top+=window_size_y\n y_bottom+=window_size_y\n \n if (max_color<100):\n tf_color=TrafficLight.UNKNOWN\n \n\n\n return tf_color", "def compile_model_cnn(genome, nb_classes, input_shape):\n # Get our network parameters.\n nb_layers = genome.geneparam['nb_layers' ]\n nb_neurons = genome.nb_neurons()\n activation = genome.geneparam['activation']\n optimizer = genome.geneparam['optimizer' ]\n\n logging.info(\"Architecture:%s,%s,%s,%d\" % (str(nb_neurons), activation, optimizer, nb_layers))\n\n model = Sequential()\n\n # Add each layer.\n for i in range(0,nb_layers):\n # Need input shape for first layer.\n if i == 0:\n model.add(Conv2D(nb_neurons[i], kernel_size = (3, 3), activation = activation, padding='same', input_shape = input_shape))\n else:\n model.add(Conv2D(nb_neurons[i], kernel_size = (3, 3), activation = activation))\n \n if i < 2: #otherwise we hit zero\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Dropout(0.2))\n\n model.add(Flatten())\n # always use last nb_neurons value for dense layer\n model.add(Dense(nb_neurons[len(nb_neurons) - 1], activation = activation))\n model.add(Dropout(0.5))\n model.add(Dense(nb_classes, activation = 'softmax'))\n\n #BAYESIAN CONVOLUTIONAL NEURAL NETWORKS WITH BERNOULLI APPROXIMATE VARIATIONAL INFERENCE\n #need to read this paper\n\n model.compile(loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n\n return model", "def load_data5():\n# dirname = 'cifar-10-batches-py'\n# origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n# path = get_file(dirname, origin=origin, untar=True)\n# path= './cifar-10-batches-py'\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n# Below shows a test class has 999 examples instead of the claimed 1000\n# tclasscount=np.zeros((10,), dtype=int)\n# for i in range(0, len(y_test)-1):\n# tclasscount[y_test[i][0]]= tclasscount[y_test[i][0]] + 1\n# print('Test class count',tclasscount)\n num_train_samples = 50000\n num_5_class = 25000\n num_5_test = 4999 # should be 5000 if all the categories had 1000 in them but they do not. One is missing.\n print('x_train shape orig:', x_train.shape)\n print('More:', x_train.shape[1:])\n print('y_test shape',y_test.shape)\n\n x5_train = np.empty((num_5_class, 32, 32, 3), dtype='uint8')\n y5_train = np.empty((num_5_class,), dtype='uint8')\n\n count=0\n\n for i in range(0, len(y_train)-1):\n if (y_train[i][0] == 2) or (y_train[i][0] == 3) or (y_train[i][0] == 4) or (y_train[i][0] == 5) or (y_train[i][0] == 7):\n x5_train[count]=x_train[i]\n y5_train[count]=y_train[i]\n count=count+1\n \n # find test data of interest\n count=0\n x5_test=np.empty((num_5_test, 32, 32, 3), dtype='uint8')\n y5_test= np.empty((num_5_test,), dtype='uint8')\n\n for i in range(0, len(y_test)-1):\n if (y_test[i][0] == 2) or (y_test[i][0] == 3) or (y_test[i][0] == 4) or (y_test[i][0] == 5) or (y_test[i][0] == 7):\n x5_test[count]=x_test[i]\n y5_test[count]=y_test[i]\n count=count+1\n# Below shows class 7 is only 999 and not 1000 examples!!! One horse got away it seems.\n# if(y_test[i][0] == 2):\n# c2=c2+1\n# if(y_test[i][0] == 3):\n# c3=c3+1\n# if(y_test[i][0] == 4):\n# c4=c4+1\n# if(y_test[i][0] == 5):\n# c5=c5+1\n# if(y_test[i][0] == 7):\n# c7=c7+1\n# print('c2count, c3count, c4count, c5count, c7count',c2,c3,c3,c5,c7)\n# print('y5tstshape',y5_test.shape, count)\n# print('y5tst',y5_test)\n# return (x_train, y_train), (x_test, y_test)\n return (x5_train, y5_train), (x5_test, y5_test)", "def build_cnn(input_var=None):\n\n # input layer\n network = lasagne.layers.InputLayer(\n shape=(\n None,\n 1,\n 128,\n 129\n ),\n input_var=input_var\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(5, 5),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(5, 5),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(3, 3),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(3, 3),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # Fully-connected layer of 256 units with 50% dropout on its inputs\n network = lasagne.layers.DenseLayer(\n lasagne.layers.dropout(network, p=.5),\n num_units=256,\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.HeUniform() # W initialization\n )\n\n # Finally add a 1-unit softmax output layer\n network = lasagne.layers.DenseLayer(\n network,\n num_units=1,\n nonlinearity=lasagne.nonlinearities.sigmoid\n )\n\n return network", "def actual_causation():\n # fmt: off\n tpm = np.array([\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 0, 1],\n ])\n cm = np.array([\n [1, 1],\n [1, 1],\n ])\n # fmt: on\n return Network(tpm, cm, node_labels=('OR', 'AND'))", "def test_two_layer_classifier(caplog):\n caplog.set_level(logging.WARNING, logger=Logger.name)\n\n # Input X specification\n D = 2 # Dimension of X WITHOUT bias\n\n # Layer 01. Output [email protected] of shape (N,M1)\n M1 = 4 # Nodes in the matmul 01\n W1 = weights.he(M1, D+1) # Weights in the matmul 01 WITH bias (D+1)\n\n # Layer 02. Input A01 of shape (N,M1).\n # Output [email protected] of shape (N,M2)\n M2: int = 3 # Number of categories to classify\n W2 = weights.he(M2, M1+1) # Weights in the matmul 02 WITH bias (M1+1)\n\n optimizer = SGD(lr=TYPE_FLOAT(0.2))\n\n # X data\n # X, T, V = linear_separable_sectors(n=N, d=D, m=M)\n X, T = venn_of_circle_a_not_b(\n radius=TYPE_FLOAT(1.0),\n ratio=TYPE_FLOAT(1.3),\n m=M2,\n n=10\n )\n N = X.shape[0]\n assert X.shape[0] > 0 and X.shape == (N, D)\n X, T = transform_X_T(X, T)\n\n def callback(W1, W2):\n \"\"\"Dummy callback\"\"\"\n pass\n\n profiler = cProfile.Profile()\n profiler.enable()\n\n train_two_layer_classifier(\n N=N,\n D=D,\n X=X,\n T=T,\n M1=M1,\n W1=W1,\n M2=M2,\n W2=W2,\n log_loss_function=softmax_cross_entropy_log_loss,\n optimizer=optimizer,\n num_epochs=10,\n test_numerical_gradient=True,\n log_level=logging.DEBUG,\n callback=callback\n )\n\n profiler.disable()\n profiler.print_stats(sort=\"cumtime\")", "def get_cifar10(self):\n\t\t# Get the data.\n\t\tself.x_train = self.x_train.reshape(self.nb_train, self.input_dim)\n\t\tself.x_test = self.x_test.reshape(self.nb_test, self.input_dim)\n\t\tself.x_train = self.x_train.astype('float32')\n\t\tself.x_test = self.x_test.astype('float32')\n\t\tself.x_train /= 255\n\t\tself.x_test /= 255\n\n\t\t# convert class vectors to binary class matrices\n\t\tself.y_train = np_utils.to_categorical(self.y_train, self.nb_classes)\n\t\tself.y_test = np_utils.to_categorical(self.y_test, self.nb_classes)", "def create(self):\n \n \"\"\" A solo prepressing reduction network in the head \"\"\"\n print(\"pre_reduction\")\n with tf.name_scope('pre_reduction'):\n conv1 = NW.conv(self.X, 7, 7, 64, 2, 2, name='conv1')\n pool1 = NW.max_pool(conv1, 3, 3, 2, 2, name='pool1')\n norm1 = NW.lrn(pool1, 2, 2e-05, 0.75, name='norm1')\n reduction2 = NW.conv(norm1, 1, 1, 64, 1, 1, name='reduction2')\n conv2 = NW.conv(reduction2, 3, 3, 192, 1, 1,name='conv2')\n norm2 = NW.lrn(conv2, 2, 2e-05, 0.75, name='norm2')\n pool2 = NW.max_pool(norm2, 3, 3, 2, 2, name='pool2')\n \n \"\"\" 1st inception layer group \"\"\"\n print(\"icp1\")\n with tf.name_scope('icp1'):\n # branch 0\n icp1_out0 = NW.conv(pool2, 1, 1, 64, 1, 1, name='icp1_out0')\n # branch 1\n icp1_reduction1 = NW.conv(pool2, 1, 1, 96, 1, 1, name='icp1_reduction1')\n icp1_out1 = NW.conv(icp1_reduction1, 3, 3, 128, 1, 1, name='icp1_out1')\n # branch 2\n icp1_reduction2 = NW.conv(pool2, 1, 1, 16, 1, 1, name='icp1_reduction2')\n icp1_out2 = NW.conv(icp1_reduction2, 5, 5, 32, 1, 1, name='icp1_out2')\n # branch 3\n icp1_pool = NW.max_pool(pool2, 3, 3, 1, 1, name='icp1_pool')\n icp1_out3 = NW.conv(icp1_pool, 1, 1, 32, 1, 1, name='icp1_out3')\n # concat\n icp2_in = NW.concat([icp1_out0,\n icp1_out1,\n icp1_out2,\n icp1_out3], 3, 'icp2_in')\n\n \"\"\" 2nd inception layer group \"\"\"\n print(\"icp2\")\n with tf.name_scope('icp2'):\n # branch 0\n icp2_out0 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_out0')\n # branch 1\n icp2_reduction1 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_reduction1')\n icp2_out1 = NW.conv(icp2_reduction1, 3, 3, 192, 1, 1, name='icp2_out1')\n # branch 2\n icp2_reduction2 = NW.conv(icp2_in, 1, 1, 32, 1, 1, name='icp2_reduction2')\n icp2_out2 = NW.conv(icp2_reduction2, 5, 5, 96, 1, 1, name='icp2_out2')\n # branch 3\n icp2_pool = NW.max_pool(icp2_in, 3, 3, 1, 1, name='icp2_pool')\n icp2_out3 = NW.conv(icp2_pool, 1, 1, 64, 1, 1, name='icp2_out3')\n # concat\n icp2_out = NW.concat([icp2_out0,\n icp2_out1,\n icp2_out2,\n icp2_out3], 3, 'icp2_out')\n \n \"\"\" 3rd inception layer group \"\"\"\n print(\"icp3\")\n with tf.name_scope('icp3'):\n icp3_in = NW.max_pool(icp2_out, 3, 3, 2, 2, name='icp3_in')\n # branch 0\n icp3_out0 = NW.conv(icp3_in, 1, 1, 192, 1, 1, name='icp3_out0')\n # branch 1\n icp3_reduction1 = NW.conv(icp3_in, 1, 1, 96, 1, 1, name='icp3_reduction1')\n icp3_out1 = NW.conv(icp3_reduction1, 3, 3, 208, 1, 1, name='icp3_out1')\n # branch 2\n icp3_reduction2 = NW.conv(icp3_in, 1, 1, 16, 1, 1, name='icp3_reduction2')\n icp3_out2 = NW.conv(icp3_reduction2, 5, 5, 48, 1, 1, name='icp3_out2')\n # branch 3\n icp3_pool = NW.max_pool(icp3_in, 3, 3, 1, 1, name='icp3_pool')\n icp3_out3 = NW.conv(icp3_pool, 1, 1, 64, 1, 1, name='icp3_out3')\n # concat\n icp3_out = NW.concat([icp3_out0,\n icp3_out1,\n icp3_out2,\n icp3_out3], 3, 'icp3_out')\n \n \"\"\" 1st classify branch \"\"\"\n with tf.name_scope('cls1'):\n cls1_pool = NW.avg_pool(icp3_out, 5, 5, 3, 3, padding='VALID', name='cls1_pool')\n cls1_reduction_pose = NW.conv(cls1_pool, 1, 1, 128, 1, 1, name='cls1_reduction_pose')\n cls1_fc1_pose = NW.fc(cls1_reduction_pose, 1024, name='cls1_fc1_pose')\n cls1_fc_pose_xy = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_xy')\n cls1_fc_pose_ab = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_ab')\n self.layers[\"cls1_fc_pose_xy\"] = cls1_fc_pose_xy\n self.layers[\"cls1_fc_pose_ab\"] = cls1_fc_pose_ab\n \n \"\"\" 4st inception layer group \"\"\"\n print(\"icp4\")\n with tf.name_scope('icp4'):\n # branch 0\n icp4_out0 = NW.conv(icp3_out, 1, 1, 160, 1, 1, name='icp4_out0')\n # branch 1\n icp4_reduction1 = NW.conv(icp3_out, 1, 1, 112, 1, 1, name='icp4_reduction1')\n icp4_out1 = NW.conv(icp4_reduction1, 3, 3, 224, 1, 1, name='icp4_out1')\n # branch 2\n icp4_reduction2 = NW.conv(icp3_out, 1, 1, 24, 1, 1, name='icp4_reduction2')\n icp4_out2 = NW.conv(icp4_reduction2, 5, 5, 64, 1, 1, name='icp4_out2')\n # branch 3\n icp4_pool = NW.max_pool(icp3_out, 3, 3, 1, 1, name='icp4_pool')\n icp4_out3 = NW.conv(icp4_pool, 1, 1, 64, 1, 1, name='icp4_out3')\n # concat\n icp4_out = NW.concat([icp4_out0,\n icp4_out1,\n icp4_out2,\n icp4_out3],3, name='icp4_out')\n\n \"\"\" 5st inception layer group \"\"\"\n print(\"icp5\")\n with tf.name_scope('icp5'):\n # branch 0\n icp5_out0 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_out0')\n # branch 1\n icp5_reduction1 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_reduction1')\n icp5_out1 = NW.conv(icp5_reduction1, 3, 3, 256, 1, 1, name='icp5_out1')\n # branch 2\n icp5_reduction2 = NW.conv(icp4_out,1, 1, 24, 1, 1, name='icp5_reduction2')\n icp5_out2 = NW.conv(icp5_reduction2, 5, 5, 64, 1, 1, name='icp5_out2')\n # branch 3\n icp5_pool = NW.max_pool(icp4_out,3, 3, 1, 1, name='icp5_pool')\n icp5_out3 = NW.conv(icp5_pool, 1, 1, 64, 1, 1, name='icp5_out3')\n # concat\n icp5_out = NW.concat([icp5_out0, \n icp5_out1, \n icp5_out2, \n icp5_out3], 3, name='icp5_out')\n \n \"\"\" 6st inception layer group \"\"\"\n print(\"icp6\")\n with tf.name_scope('icp6'):\n # branch 0\n icp6_out0 = NW.conv(icp5_out, 1, 1, 112, 1, 1, name='icp6_out0')\n # branch 1\n icp6_reduction1 = NW.conv(icp5_out, 1, 1, 144, 1, 1, name='icp6_reduction1')\n icp6_out1 = NW.conv(icp6_reduction1, 3, 3, 288, 1, 1, name='icp6_out1')\n # branch 2\n icp6_reduction2 = NW.conv(icp5_out, 1, 1, 32, 1, 1, name='icp6_reduction2')\n icp6_out2 = NW.conv(icp6_reduction2, 5, 5, 64, 1, 1, name='icp6_out2')\n # branch 3\n icp6_pool = NW.max_pool(icp5_out,3, 3, 1, 1, name='icp6_pool')\n icp6_out3 = NW.conv(icp6_pool, 1, 1, 64, 1, 1, name='icp6_out3')\n # concat\n icp6_out = NW.concat([icp6_out0,\n icp6_out1,\n icp6_out2,\n icp6_out3], 3, name='icp6_out')\n\n \"\"\" 2nd classify branch \"\"\"\n with tf.name_scope('cls2'):\n cls2_pool = NW.avg_pool(icp6_out, 5, 5, 3, 3, padding='VALID', name='cls2_pool')\n cls2_reduction_pose = NW.conv(cls2_pool, 1, 1, 128, 1, 1, name='cls2_reduction_pose')\n cls2_fc1 = NW.fc(cls2_reduction_pose, 1024, name='cls2_fc1')\n cls2_fc_pose_xy = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_xy')\n cls2_fc_pose_ab = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_ab')\n self.layers[\"cls2_fc_pose_xy\"] = cls2_fc_pose_xy\n self.layers[\"cls2_fc_pose_ab\"] = cls2_fc_pose_ab\n\n \"\"\" 7st inception layer group \"\"\"\n print(\"icp7\")\n with tf.name_scope('icp7'):\n # branch 0\n icp7_out0 = NW.conv(icp6_out, 1, 1, 256, 1, 1, name='icp7_out0')\n # branch 1\n icp7_reduction1 = NW.conv(icp6_out, 1, 1, 160, 1, 1, name='icp7_reduction1')\n icp7_out1 = NW.conv(icp7_reduction1, 3, 3, 320, 1, 1, name='icp7_out1')\n # branch 2\n icp7_reduction2 = NW.conv(icp6_out, 1, 1, 32, 1, 1, name='icp7_reduction2')\n icp7_out2 = NW.conv(icp7_reduction2, 5, 5, 128, 1, 1, name='icp7_out2')\n # branch 3\n icp7_pool = NW.max_pool(icp6_out, 3, 3, 1, 1, name='icp7_pool')\n icp7_out3 = NW.conv(icp7_pool, 1, 1, 128, 1, 1, name='icp7_out3')\n # concat\n icp7_out = NW.concat([icp7_out0,\n icp7_out1,\n icp7_out2,\n icp7_out3], 3, name='icp7_out')\n\n \"\"\" 8st inception layer group \"\"\"\n print(\"icp8\")\n with tf.name_scope('icp8'):\n icp8_in = NW.max_pool(icp7_out, 3, 3, 2, 2, name='icp8_in')\n # branch 0\n icp8_out0 = NW.conv(icp8_in, 1, 1, 256, 1, 1, name='icp8_out0')\n # branch 1\n icp8_reduction1 = NW.conv(icp8_in, 1, 1, 160, 1, 1, name='icp8_reduction1')\n icp8_out1 = NW.conv(icp8_reduction1, 3, 3, 320, 1, 1, name='icp8_out1')\n # branch 2\n icp8_reduction2 = NW.conv(icp8_in, 1, 1, 32, 1, 1, name='icp8_reduction2')\n icp8_out2 = NW.conv(icp8_reduction2, 5, 5, 128, 1, 1, name='icp8_out2')\n # branch 3\n icp8_pool = NW.max_pool(icp8_in, 3, 3, 1, 1, name='icp8_pool')\n icp8_out3 = NW.conv(icp8_pool, 1, 1, 128, 1, 1, name='icp8_out3')\n # concat\n icp8_out = NW.concat([icp8_out0,\n icp8_out1,\n icp8_out2,\n icp8_out3], 3, name='icp8_out')\n \n \"\"\" 9st inception layer group \"\"\"\n print(\"icp9\")\n with tf.name_scope('icp9'):\n # branch 0\n icp9_out0 = NW.conv(icp8_out, 1, 1, 384, 1, 1, name='icp9_out0')\n # branch 1\n icp9_reduction1 = NW.conv(icp8_out, 1, 1, 192, 1, 1, name='icp9_reduction1')\n icp9_out1 = NW.conv(icp9_reduction1, 3, 3, 384, 1, 1, name='icp9_out1')\n # branch 2\n icp9_reduction2 = NW.conv(icp8_out, 1, 1, 48, 1, 1, name='icp9_reduction2')\n icp9_out2 = NW.conv(icp9_reduction2, 5, 5, 128, 1, 1, name='icp9_out2')\n # branch 3\n icp9_pool = NW.max_pool(icp8_out, 3, 3, 1, 1, name='icp9_pool')\n icp9_out3 = NW.conv(icp9_pool, 1, 1, 128, 1, 1, name='icp9_out3')\n # concat\n icp9_out = NW.concat([icp9_out0,\n icp9_out1,\n icp9_out2,\n icp9_out3], 3, name='icp9_out')\n\n \"\"\" 3rd classify branch \"\"\"\n with tf.name_scope('cls3'):\n cls3_pool = NW.avg_pool(icp9_out, 7, 7, 1, 1, padding='VALID', name='cls3_pool')\n cls3_fc1_pose = NW.fc(cls3_pool, 2048, name='cls3_fc1_pose')\n cls3_fc_pose_xy = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_xy')\n cls3_fc_pose_ab = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_ab')\n self.layers[\"cls3_fc_pose_xy\"] = cls3_fc_pose_xy\n self.layers[\"cls3_fc_pose_ab\"] = cls3_fc_pose_ab", "def TCN_V3(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 128\n\n config = [ \n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def __init__(self):\n super(DLStudio.ExperimentsWithCIFAR.Net2, self).__init__()\n self.relu = nn.ReLU()\n strides = []\n patch_size = 2\n ## conv1:\n out_ch, ker_size, conv_stride, pool_stride = 128,5,1,2\n self.conv1 = nn.Conv2d(3, out_ch, (ker_size,ker_size), padding=(ker_size-1)//2) \n self.pool1 = nn.MaxPool2d(patch_size, pool_stride) \n strides += (conv_stride, pool_stride)\n ## conv2:\n in_ch = out_ch\n out_ch, ker_size, conv_stride, pool_stride = 128,3,1,2\n self.conv2 = nn.Conv2d(in_ch, out_ch, ker_size, padding=(ker_size-1)//2)\n self.pool2 = nn.MaxPool2d(patch_size, pool_stride) \n strides += (conv_stride, pool_stride)\n ## conv3: \n ## meant for repeated invocation, must have same in_ch, out_ch and strides of 1\n in_ch = out_ch\n out_ch, ker_size, conv_stride, pool_stride = in_ch,2,1,1\n self.conv3 = nn.Conv2d(in_ch, out_ch, ker_size, padding=1)\n self.pool3 = nn.MaxPool2d(patch_size, pool_stride) \n# strides += (conv_stride, pool_stride)\n ## figure out the number of nodes needed for entry into fc:\n in_size_for_fc = out_ch * (32 // np.prod(strides)) ** 2 ## (A)\n self.in_size_for_fc = in_size_for_fc\n self.fc1 = nn.Linear(in_size_for_fc, 150)\n self.fc2 = nn.Linear(150, 100)\n self.fc3 = nn.Linear(100, 10)", "def load_breast_cancer():\n bc_data_train = np.load(_BREAST_CANCER_FOLDER+'bc_data.train')\n bc_data_test = np.load(_BREAST_CANCER_FOLDER+'bc_data.test')\n bc_target_train = np.load(_BREAST_CANCER_FOLDER+'bc_target.train')\n bc_target_test = np.load(_BREAST_CANCER_FOLDER+'bc_target.test')\n for i in range(len(bc_target_test)):\n if bc_target_test[i] == 2:\n bc_target_test[i] = 0\n elif bc_target_test[i] == 4:\n bc_target_test[i] = 1\n for i in range(len(bc_target_train)):\n if bc_target_train[i] == 2:\n bc_target_train[i] = 0\n elif bc_target_train[i] == 4:\n bc_target_train[i] = 1\n return (bc_data_train, bc_target_train.reshape(-1, 1), bc_data_test, bc_target_test.reshape(-1, 1))", "def TCN_V4(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 4\n initial_conv_num = 64\n\n config = [ \n [(1,4,64)],\n [(1,4,64)],\n [(1,4,64)],\n [(2,4,128)],\n [(1,4,128)],\n [(1,4,128)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def cnn(train_X, train_y, test_X, n_epochs =50, batch_size = 100, eps = 0.01):\n \n def get_onehot(x):\n onehot=np.zeros((len(x),10))\n onehot[np.arange(len(x)),x]=1\n return onehot\n \n def f_props(layers, x):\n for layer in layers:\n x = layer.f_prop(x)\n return x\n \n layers = [ # (縦の次元数)x(横の次元数)x(チャネル数)\n Conv((5, 5, 1, 20), tf.nn.relu), # 28x28x 1 -> 24x24x20\n Pooling((1, 2, 2, 1)), # 24x24x20 -> 12x12x20\n Conv((5, 5, 20, 50), tf.nn.relu), # 12x12x20 -> 8x 8x50\n Pooling((1, 2, 2, 1)), # 8x 8x50 -> 4x 4x50\n Flatten(),\n Dense(4*4*50, 10, tf.nn.softmax)\n ]\n\n x = tf.placeholder(tf.float32, [None, 28, 28, 1])\n t = tf.placeholder(tf.float32, [None, 10])\n\n y = f_props(layers, x)\n cost = -tf.reduce_mean(tf.reduce_sum(t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)), axis=1))\n train = tf.train.GradientDescentOptimizer(eps).minimize(cost)\n valid = tf.argmax(y, 1)\n \n\n print(\"BEGIN: CNN learning with n_epochs = {0}, batch_size = {1}, eps = {2}\".format(n_epochs, batch_size, eps))\n \n train_X = train_X.reshape((train_X.shape[0], 28, 28, 1))\n test_X = test_X.reshape((test_X.shape[0], 28, 28, 1))\n train_y=get_onehot(train_y)\n \n train_X, valid_X, train_y, valid_y = train_test_split(train_X, train_y, test_size=0.1, random_state=42)\n n_batches = train_X.shape[0]//batch_size\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n train_X, train_y = shuffle(train_X, train_y, random_state=random_state)\n for i in range(n_batches):\n start = i * batch_size\n end = start + batch_size\n sess.run(train, feed_dict={x: train_X[start:end], t: train_y[start:end]})\n pred_y, valid_cost = sess.run([valid, cost], feed_dict={x: valid_X, t: valid_y})\n print('\\tEPOCH:: %i, Validation cost: %.3f, Validation F1: %.3f' % (epoch + 1, valid_cost, f1_score(np.argmax(valid_y, 1).astype('int32'), pred_y, average='macro')))\n \n pred_y= sess.run(valid, feed_dict={x: test_X})\n return pred_y", "def cspdarknet53_tiny(input_data):\n input_data = common.convolutional(input_data, (3, 3, 3, 32), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 32, 64))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 64, 128))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 64, 128))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 128, 256))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 128, 256))\n route_1 = input_data\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 512, 512))\n\n return route_1, input_data", "def build_dc_classifier():\n # return nn.Sequential(\n # Unflatten(Batch_size, 1, 28, 28),\n # nn.Conv2d(1, 32, kernel_size=5, stride=1),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.MaxPool2d(2, stride=2),\n # nn.Conv2d(32, 64, kernel_size=5, stride=1),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.MaxPool2d(kernel_size=2, stride=2),\n # Flatten(),\n # nn.Linear(4 * 4 * 64, 4 * 4 * 64),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.Linear(4 * 4 * 64, 1)\n # )\n\n return nn.Sequential(\n Unflatten(Batch_size, 1, 128, 128), #28,28,32 #128,128,16\n nn.Conv2d(1, 16,kernel_size=5, stride=1), #24,24,32 #124,124,16\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(2, stride=2), #12,12,32 #62,62,16\n nn.Conv2d(16, 32,kernel_size=5, stride=1), # 8, 8,64 #58,58,32\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), # 4, 4,64 #29,29,32\n nn.Conv2d(32, 64, kernel_size=5, stride=1), #25,25,64\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), #12,12,64\n nn.Conv2d(64, 128, kernel_size=5, stride=1), # 8, 8,128\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), # 4, 4,128\n Flatten(),\n nn.Linear(4*4*128, 4*4*128), # 4*4*64 # 4*4*128\n nn.LeakyReLU(negative_slope=0.01),\n nn.Linear(4*4*128,1) # 4*4*64 # 4*4*128\n )", "def unet_network(input_tensor, nb_classes):\n # contraction 1\n conv1 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv1')(\n input_tensor) # (batch_size, ?, ?, 64)\n conv2 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv2')(\n conv1) # (batch_size, ?, ?, 64)\n crop2 = Cropping2D(\n cropping=((88, 88), (88, 88)),\n name=\"crop2\")(\n conv2) # (batch_size, ?, ?, 64)\n maxpool2 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool2\")(\n conv2) # (batch_size, ?, ?, 64)\n\n # contraction 2\n conv3 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv3')(\n maxpool2) # (batch_size, ?, ?, 128)\n conv4 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv4')(\n conv3) # (batch_size, ?, ?, 128)\n crop4 = Cropping2D(\n cropping=((40, 40), (40, 40)),\n name=\"crop4\")(\n conv4) # (batch_size, ?, ?, 128)\n maxpool4 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool4\")(\n conv4) # ((batch_size, ?, ?, 128)\n\n # contraction 3\n conv5 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv5')(\n maxpool4) # (batch_size, ?, ?, 256)\n conv6 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv6')(\n conv5) # (batch_size, ?, ?, 256)\n crop6 = Cropping2D(\n cropping=((16, 16), (16, 16)),\n name=\"crop6\")(\n conv6) # (batch_size, ?, ?, 256)\n maxpool6 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool6\")(\n conv6) # (batch_size, ?, ?, 256)\n\n # contraction 4\n conv7 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv7')(\n maxpool6) # (batch_size, ?, ?, 512)\n conv8 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv8')(\n conv7) # (batch_size, ?, ?, 512)\n crop8 = Cropping2D(\n cropping=((4, 4), (4, 4)),\n name=\"crop8\")(\n conv8) # (batch_size, ?, ?, 512)\n maxpool8 = MaxPooling2D(\n pool_size=(3, 3),\n strides=(2, 2),\n name=\"maxpool8\")(\n conv8) # (batch_size, ?, ?, 512)\n\n # bottom\n conv9 = Conv2D(\n filters=1024,\n kernel_size=(3, 3),\n activation='relu',\n name='conv9')(\n maxpool8) # (batch_size, ?, ?, 1024)\n conv10 = Conv2D(\n filters=1024,\n kernel_size=(3, 3),\n activation='relu',\n name='conv10')(\n conv9) # (batch_size, ?, ?, 1024)\n\n # expansion 1\n upconv11 = up_conv_2d(\n input_tensor=conv10,\n nb_filters=512,\n name='upconv11') # (batch_size, ?, ?, 512)\n concat11 = tf.concat(\n values=[crop8, upconv11],\n axis=-1,\n name='concat11') # (batch_size, ?, ?, 1024)\n conv12 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv12')(\n concat11) # (batch_size, ?, ?, 512)\n conv13 = Conv2D(\n filters=512,\n kernel_size=(3, 3),\n activation='relu',\n name='conv13')(\n conv12) # (batch_size, ?, ?, 512)\n\n # expansion 2\n upconv14 = up_conv_2d(\n input_tensor=conv13,\n nb_filters=256,\n name='upconv14') # (batch_size, ?, ?, 256)\n concat14 = tf.concat(\n values=[crop6, upconv14],\n axis=-1,\n name='concat14') # (batch_size, ?, ?, 512)\n conv15 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv15')(\n concat14) # (batch_size, ?, ?, 256)\n conv16 = Conv2D(\n filters=256,\n kernel_size=(3, 3),\n activation='relu',\n name='conv16')(\n conv15) # (batch_size, ?, ?, 256)\n\n # expansion 3\n upconv17 = up_conv_2d(\n input_tensor=conv16,\n nb_filters=128,\n name='upconv17') # (batch_size, ?, ?, 128)\n concat17 = tf.concat(\n values=[crop4, upconv17],\n axis=-1,\n name='concat17') # (batch_size, ?, ?, 256)\n conv18 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv18')(\n concat17) # (batch_size, ?, ?, 128)\n conv19 = Conv2D(\n filters=128,\n kernel_size=(3, 3),\n activation='relu',\n name='conv19')(\n conv18) # (batch_size, ?, ?, 128)\n\n # expansion 4\n upconv20 = up_conv_2d(\n input_tensor=conv19,\n nb_filters=64,\n name='upconv20') # (batch_size, ?, ?, 64)\n concat20 = tf.concat(\n values=[crop2, upconv20],\n axis=-1,\n name='concat20') # (batch_size, ?, ?, 128)\n conv21 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv21')(\n concat20) # (batch_size, ?, ?, 64)\n conv22 = Conv2D(\n filters=64,\n kernel_size=(3, 3),\n activation='relu',\n name='conv22')(\n conv21) # (batch_size, ?, ?, 64)\n conv23 = Conv2D(\n filters=nb_classes,\n kernel_size=(1, 1),\n activation='sigmoid',\n name='conv23')(\n conv22) # (batch_size, ?, ?, nb_classes)\n\n return conv23", "def TCN(input_dim): \r\n # Number of dilations in order to use for the temporal blocks.\r\n dilations = np.array([1, 2, 4, 8, 16, 32])\r\n\r\n input_dim.insert(0,1)\r\n print(f\"input_dim: {input_dim}\")\r\n input_layer = Input(shape=input_dim)\r\n cropping = 0\r\n assert (sum(dilations) * block_size + 1) == 127, \"Paper specifies receptive field size should be 127\"\r\n \r\n prev_layer, skip_layer, _ = add_temporal_block(input_layer, None, 1, 1, cropping)\r\n \r\n for dilation in dilations:\r\n prev_layer, skip_layer, cropping = add_temporal_block(prev_layer, skip_layer, 2, dilation, cropping)\r\n\r\n output_layer = PReLU(shared_axes=[2, 3])(skip_layer)\r\n output_layer = SpectralNormalization(Conv1D(fixed_filters, kernel_size=1))(output_layer)\r\n output_layer = PReLU(shared_axes=[2, 3])(output_layer)\r\n output_layer = SpectralNormalization(Conv1D(1, kernel_size=1))(output_layer)\r\n\r\n return Model(input_layer, output_layer)", "def prepare_train_coco_data(args):\n image_dir, annotation_file, data_dir = args.train_coco_image_dir, args.train_coco_annotation_file, args.train_coco_data_dir\n batch_size = args.batch_size\n basic_model = args.basic_model\n num_roi = args.num_roi\n\n coco = COCO(annotation_file)\n\n img_ids = list(coco.imgToAnns.keys())\n img_files = []\n img_heights = []\n img_widths = []\n anchor_files = []\n gt_classes = []\n gt_bboxes = []\n\n for img_id in img_ids:\n img_files.append(os.path.join(image_dir, coco.imgs[img_id]['file_name'])) \n img_heights.append(coco.imgs[img_id]['height']) \n img_widths.append(coco.imgs[img_id]['width']) \n anchor_files.append(os.path.join(data_dir, os.path.splitext(coco.imgs[img_id]['file_name'])[0]+'_'+basic_model+'_anchor.npz')) \n\n classes = [] \n bboxes = [] \n for ann in coco.imgToAnns[img_id]: \n classes.append(coco_category_to_class[ann['category_id']]) \n bboxes.append([ann['bbox'][1], ann['bbox'][0], ann['bbox'][3]+1, ann['bbox'][2]+1]) \n\n gt_classes.append(classes) \n gt_bboxes.append(bboxes) \n \n print(\"Building the training dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths, batch_size, anchor_files, gt_classes, gt_bboxes, True, True)\n print(\"Dataset built.\")\n return coco, dataset", "def classify(test, network, rbfknn=0, radio=0, spinvalue=0, convalue=0, num_board=1, verbose=1):\r\n value4 = spinvalue #value4 : K\r\n consensus=convalue #value5 : Min consensus of N\r\n if rbfknn==3:\r\n network.rbforknn=0\r\n elif rbfknn==4:\r\n network.rbforknn=1\r\n #print(rbfknn)\r\n #print(network.rbforknn)\r\n #print (network.neurons[0:11])\r\n # Train network\r\n patternID = []\r\n test_detail=[]\r\n total_detail = []\r\n i=1\r\n ID = 0\r\n UNC_c = 0\r\n UNC_i = 0\r\n UNK = 0\r\n incorrect=0\r\n cla_result=[]\r\n classification_result=0\r\n cat_acurracy=[]\r\n j=1\r\n for input in test:\r\n input_comps = [int(x) for x in input]\r\n test_context = input_comps[0]\r\n test_cat = input_comps[1]\r\n test_pattern = input_comps[2:]\r\n test_detail.append([input_comps[0],input_comps[1]])\r\n patternID.append(i)\r\n i+=1\r\n id_, unc, firing_neurons = network.broadcast(input_=test_pattern, new_gcr=test_context)\r\n #print(firing_neurons)\r\n temp=0\r\n temp2=0\r\n temp3=0\r\n temp4=0\r\n temp5=0\r\n cat=[]\r\n for firing_neuron in firing_neurons:\r\n cat.append(firing_neuron.cat)\r\n if len(cat)>=value4:\r\n iter=value4\r\n else :\r\n iter=len(cat)\r\n #print '=========================================='\r\n #print('network.read_cat() : ', cat)\r\n #print('iter : ', iter)\r\n\r\n #########Category Out#########\r\n #Best match\r\n if radio==5:\r\n best_neuron = firing_neurons[-1] if firing_neurons else None\r\n if best_neuron != None:\r\n classification_result = cat[0]\r\n else :\r\n classification_result = None\r\n #print('classification_result : ', classification_result)\r\n\r\n #Dominant\r\n elif radio==6:\r\n if iter>=1:\r\n for i in range(iter):\r\n if cat[i]==1:\r\n temp+=1\r\n elif cat[i]==2:\r\n temp2+=1\r\n elif cat[i]==3:\r\n temp3+=1\r\n elif cat[i]==4:\r\n temp4+=1\r\n # elif cat[i]==5:\r\n # temp5+=1\r\n list=[temp, temp2, temp3, temp4]\r\n value=max(list)\r\n\r\n if value==temp:\r\n classification_result = 1\r\n elif value == temp2:\r\n classification_result = 2\r\n elif value == temp3:\r\n classification_result = 3\r\n elif value == temp4:\r\n classification_result = 4\r\n # elif value==temp5:\r\n # classification_result = 5\r\n else: #iter==0\r\n classification_result = None\r\n #print('classification_result : ', classification_result)\r\n #Unanimity\r\n elif radio==7:\r\n if iter>=1:\r\n for i in range(iter):\r\n if cat[i] == test_cat:\r\n temp += 1\r\n else:\r\n temp -= 1\r\n if temp == (iter):\r\n classification_result = test_cat\r\n else:\r\n classification_result = None\r\n else: #iter==0\r\n classification_result = None\r\n #print('temp : ', temp)\r\n #print('classification_result : ', classification_result)\r\n #Min consensus of N(value5)\r\n elif radio==8:\r\n for i in range(iter):\r\n if cat[i] == 1:\r\n temp += 1\r\n elif cat[i] == 2:\r\n temp2 += 1\r\n elif cat[i] == 3:\r\n temp3 += 1\r\n elif cat[i] == 4:\r\n temp4 += 1\r\n # elif cat[i]==5:\r\n # temp5+=1\r\n list = [temp, temp2, temp3, temp4]\r\n value = max(list)\r\n if value >= consensus:\r\n if value == temp:\r\n classification_result = 1\r\n elif value == temp2:\r\n classification_result = 2\r\n elif value == temp3:\r\n classification_result = 3\r\n elif value == temp4:\r\n classification_result = 4\r\n # elif value==temp5:\r\n # classification_result=5\r\n else:\r\n classification_result=None\r\n #print('classification_result : ', classification_result)\r\n cla_result.append(classification_result)\r\n\r\n #Accuracy\r\n #print('test_cat == classification_result ?', test_cat, classification_result)\r\n l=len(cat)\r\n if l==1 or l==0:\r\n if classification_result == test_cat :\r\n ID += 1\r\n cat_acurracy.append([test_cat, 'ID'])\r\n elif classification_result == None:\r\n UNK += 1\r\n cat_acurracy.append([test_cat, 'UNK'])\r\n elif classification_result != test_cat :\r\n incorrect +=1\r\n elif l==2:\r\n if cat[0] == cat[1] :\r\n if classification_result == test_cat:\r\n ID += 1\r\n cat_acurracy.append([test_cat, 'ID'])\r\n else:\r\n if classification_result == None:\r\n UNK += 1\r\n cat_acurracy.append([test_cat, 'UNK'])\r\n elif classification_result != test_cat:\r\n UNC_i += 1\r\n cat_acurracy.append([test_cat, 'UNC_i'])\r\n elif classification_result == test_cat:\r\n UNC_c += 1\r\n cat_acurracy.append([test_cat, 'UNC_c'])\r\n elif l>=3:\r\n if cat[0] == cat[1] and cat[0] == cat[2]:\r\n if classification_result == test_cat:\r\n ID += 1\r\n cat_acurracy.append([test_cat, 'ID'])\r\n else:\r\n if classification_result == None:\r\n UNK += 1\r\n cat_acurracy.append([test_cat, 'UNK'])\r\n elif classification_result != test_cat:\r\n UNC_i += 1\r\n cat_acurracy.append([test_cat, 'UNC_i'])\r\n elif classification_result == test_cat:\r\n UNC_c += 1\r\n cat_acurracy.append([test_cat, 'UNC_c'])\r\n\r\n #sum=ID+UNC_i+UNC_c+UNK\r\n #print(\"ID, UNC_c, UNC_i, UNK\", j, ID, UNC_c, UNC_i, UNK, sum)\r\n #print(l)\r\n j+=1\r\n #table data\r\n #print(cat_acurracy)\r\n detail = []\r\n for firing_neuron in firing_neurons:\r\n detail.append([firing_neuron.cat,firing_neuron.dist, firing_neuron.id_ ])\r\n #print(detail)\r\n patternID = np.reshape(patternID, (1, -1))\r\n test_detail = np.reshape(test_detail, (1, -1))\r\n detail = np.reshape(detail, (1, -1))\r\n if classification_result != None:\r\n temp_detail = np.hstack((patternID, test_detail, detail))\r\n else:\r\n temp_detail = np.hstack((patternID, test_detail))\r\n test_detail= []\r\n patternID = []\r\n total_detail.append(temp_detail)\r\n #print(temp_detail)\r\n\r\n #print (ID/32.16)\r\n #print(UNC_c/32.16)\r\n #print(UNC_i/32.16)\r\n #print(UNK/32.16)\r\n return ID, UNC_c, UNC_i, UNK, total_detail, cla_result, cat_acurracy", "def build_cnn(self):\n model = Sequential()\n model.add(Conv2D(24, (1, 3), activation = 'relu', input_shape = (1, grid_size*grid_size+2, 1)))\n model.add(Conv2D(24, (1, 3), activation = 'relu', input_shape = (1, grid_size*grid_size+2, 1)))\n model.add(Flatten())\n model.add(Dense(len(ACTIONS), activation = 'linear'))\n model.compile(loss = 'mse', optimizer = Adam(lr = alpha))\n\n return model", "def create_classification_model(include_top=True,\n input_tensor=None, input_shape=None,\n pooling=None,\n classes=1000):\n\n\n img_input = Input(shape=input_shape)\n # Block 1\n x = Conv2D(16, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n if include_top:\n # Classification block\n x = Flatten(name='flatten')(x)\n x = Dense(512, activation='relu', name='fc1')(x)\n x = Dense(128, activation='relu', name='fc2')(x)\n x = Dense(classes, activation='softmax', name='predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n # Create model.\n model = Model(inputs, x, name='vgg19')\n\n # # load weights\n # if weights == 'imagenet':\n # if include_top:\n # weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',\n # WEIGHTS_PATH,\n # cache_subdir='models')\n # else:\n # weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',\n # WEIGHTS_PATH_NO_TOP,\n # cache_subdir='models')\n # model.load_weights(weights_path)\n # if K.backend() == 'theano':\n # layer_utils.convert_all_kernels_in_model(model)\n #\n # if K.image_data_format() == 'channels_first':\n # if include_top:\n # maxpool = model.get_layer(name='block5_pool')\n # shape = maxpool.output_shape[1:]\n # dense = model.get_layer(name='fc1')\n # layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n #\n # if K.backend() == 'tensorflow':\n # warnings.warn('You are using the TensorFlow backend, yet you '\n # 'are using the Theano '\n # 'image data format convention '\n # '(`image_data_format=\"channels_first\"`). '\n # 'For best performance, set '\n # '`image_data_format=\"channels_last\"` in '\n # 'your Keras config '\n # 'at ~/.keras/keras.json.')\n return model" ]
[ "0.6311451", "0.61126846", "0.60787344", "0.60380226", "0.5890978", "0.5878846", "0.58740735", "0.5861365", "0.58431727", "0.5805545", "0.5722827", "0.5722399", "0.56968087", "0.5693346", "0.56774884", "0.5656095", "0.5633978", "0.5627525", "0.56193244", "0.5611147", "0.5587589", "0.5586875", "0.5580071", "0.5571081", "0.5566402", "0.5565068", "0.55624676", "0.55618286", "0.5532023", "0.55305505" ]
0.7077625
0
Whether the interval overlaps the given point, range or Interval.
def overlaps(self, begin, end=None): if end is not None: # An overlap means that some C exists that is inside both ranges: # begin <= C < end # and # self.begin <= C < self.end # See https://stackoverflow.com/questions/3269434/whats-the-most-efficient-way-to-test-two-integer-ranges-for-overlap/3269471#3269471 return begin < self.end and end > self.begin try: return self.overlaps(begin.begin, begin.end) except: return self.contains_point(begin)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def overlaps(x1, x2, y1, y2):\n\n return x1 <= y2 and y1 <= x2", "def contains_interval(self, other):\n return (\n self.begin <= other.begin and\n self.end >= other.end\n )", "def dates_intervals_are_overlapped(start_1, end_1, start_2, end_2):\n return end_1 >= start_2 and end_2 >= start_1", "def overlap(range1, range2):\n if range1[0] <= range2[1] and range2[0] <= range1[1]:\n return True\n return False", "def overlap(start1, end1, start2, end2):\n return not (end1 < start2 or end2 < start1)", "def overlap(t1start, t1end, t2start, t2end):\n\n return (t1start <= t2start <= t1end) or (t2start <= t1start <= t2end)", "def overlaps(self, region):\n region = as_region(region)\n\n if region.chromosome != self.chromosome:\n return False\n\n if self.end is None or region.start is None or region.start <= self.end:\n if self.start is None or region.end is None or region.end >= self.start:\n return True\n return False", "def check_interval_bounds(begin, end):\n if begin.get_midpoint() >= end.get_midpoint():\n return False\n\n if begin.get_radius() is not None and end.get_radius() is not None:\n if begin.get_midpoint() - begin.get_radius() > \\\n end.get_midpoint() - end.get_radius():\n return False\n\n return True", "def overlaps(self, other):\n return self.start <= other.end and self.end >= other.start", "def overlaps(self, right: GeoSpatialValue) -> ir.BooleanValue:\n return ops.GeoOverlaps(self, right).to_expr()", "def is_overlapping(self, region):\n if self.x2 < region.x1:\n return False # this box is left the other\n if self.x1 > region.x2:\n return False # this box is right the other\n if self.y2 < region.y1:\n return False # this box is above the other\n if self.y1 > region.y2:\n return False # this box is below the other\n return True", "def overlaps(self, other):\n\n if self.start.equal(other.start) or self.stop.equal(other.stop):\n return True\n elif self.start.before(other.start) and self.stop.after(other.start):\n return True\n elif other.stop.after(self.start) and other.stop.before(self.stop):\n return True\n else:\n return False", "def __contains__(self, other):\n if isinstance(other, (sppasInterval,\n sppasPoint,\n float, int)) is False:\n raise AnnDataTypeError(other,\n \"sppasInterval, sppasPoint, float, int\")\n\n if isinstance(other, sppasInterval):\n return (self.__begin <= other.get_begin() and\n other.get_end() <= self.__end)\n\n return self.__begin <= other <= self.__end", "def overlaps(self, period):\n\n start_datetime = self.start_time.to_python_datetime()\n end_datetime = self.end_time.to_python_datetime()\n\n return period.start_time < end_datetime and period.end_time > start_datetime", "def has_overlap(vevent, start, end):\n event_start = vevent.dtstart.value\n event_end = vevent.dtend.value\n\n assert not is_naive(start), 'start dt is naive'\n assert not is_naive(end), 'end dt is naive'\n assert not is_naive(event_start), 'event_start dt is naive'\n assert not is_naive(event_end), 'event_end dt is naive'\n\n if start <= event_start <= end: # starts today\n return True\n if start <= event_end <= end: # ends today\n return True\n if event_start <= start and end <= event_end: # spans over today\n return True\n return False", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def overlaps(self, other): # -> bool:\n ...", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def overlap(a: Pos, b: Pos, exact: bool = False) -> bool:\n if a == b:\n return True\n elif exact:\n return False\n s0, e0 = a\n s1, e1 = b\n if in_interval(s1, s0, e0):\n return True\n if in_interval(e1, s0, e0):\n return True\n if in_interval(s0, s1, e1):\n return True\n if in_interval(e0, s1, e1):\n return True\n return False", "def overlap(p1: Tuple, p2: Tuple) -> bool:\n if (p2[1] - p1[0]) * (p2[0] - p1[1]) <= 0:\n return True\n else:\n return False", "def does_overlap(self, start, stop):\n\n ranges = [list(range(key, self.map[key] + 1)) for key in self.map]\n all_coords = [item for sublist in ranges for item in sublist]\n # removing all_coords implementation until we write some tests\n for i in range(start, stop + 1):\n if i in all_coords:\n return True\n return False", "def iOverlap (a1, a2, b1, b2):\n if b1<=a1<=b2 or b1<=a2<=b2 or a1<=b1<=a2 or a1<=b2<=a2:\n return True\n elif a1>a2 or b1>b2:\n return False\n else:\n return False", "def overlaps(self, that):\n if (not isinstance(that, Annotation)):\n raise ValueError(\"Argument for intersects should be an annotation\")\n\n if (self.bbox.xmin >= that.bbox.xmax or that.bbox.xmin >= self.bbox.xmax):\n return False\n\n # the coordinates are inverted, so y0 is larger than y1\n if (self.bbox.ymin >= that.bbox.ymax or that.bbox.ymin >= self.bbox.ymax):\n return False\n\n return True", "def contains(self, interval):\n first, last = self._intersect(interval)\n return first != last", "def _range_contains(self, a, b):\n\t\treturn b[0] >= a[0] and b[-1] <= a[-1]", "def is_bound(self, point):\n return self.__begin == point or self.__end == point", "def contains_point(self, p):\n return self.begin <= p < self.end", "def overlaps(self, other):\n\n if self.ll.x >= other.ur.x:\n return False\n \n if self.ll.y >= other.ur.y:\n return False\n \n if self.ur.x <= other.ll.x:\n return False\n \n if self.ur.y <= other.ll.y:\n return False\n \n return True", "def is_in_interval(self, low, high, value):\n return low <= value and value <= high" ]
[ "0.7450774", "0.72448725", "0.72238123", "0.7217159", "0.721403", "0.72099704", "0.71903545", "0.7162582", "0.71042466", "0.704064", "0.7011865", "0.6966663", "0.69630086", "0.6934209", "0.69117546", "0.6827023", "0.68079567", "0.6803514", "0.67905587", "0.67840844", "0.6782557", "0.6750904", "0.67381305", "0.670704", "0.66939425", "0.66802573", "0.66553485", "0.6655106", "0.66481316", "0.6636417" ]
0.7603803
0
Return the overlap size between two intervals or a point
def overlap_size(self, begin, end=None): overlaps = self.overlaps(begin, end) if not overlaps: return 0 if end is not None: # case end is given i0 = max(self.begin, begin) i1 = min(self.end, end) return i1 - i0 # assume the type is interval, in other cases, an exception will be thrown i0 = max(self.begin, begin.begin) i1 = min(self.end, begin.end) return i1 - i0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overlap_len(range1, range2):\n return min(range1[1], range2[1]) - max(range1[0], range2[0])", "def find_overlap_range(x1, lenght1, x2, length2):\n\n\n highest_start_point = max(x1, x2)\n lowest_end_point = min(x1 + lenght1, x2 + length2)\n \n if highest_start_point >= lowest_end_point:\n return None\n \n overlap_length = lowest_end_point - highest_start_point\n \n return (highest_start_point, overlap_length)", "def iOverlapLen (a1, a2, b1, b2):\n if a1<=b1 and b2<=a2: # if b1 and b2 is between a1 and a2\n return float( (a2-a1) - ((b1-a1)+(a2-b2)) )\n elif b1<=a1 and a2<=b2: # if a1 and a2 is between b1 and b2\n return float( (b2-b1) - ((a1-b1)+(b2-a2)) )\n elif (a1>=b1 and a1<=b2) or (a1<=b2 and b2<=a2):\n # # if a1 is between b1 and b2 OR if b1 is between a1 and a2\n return float(b2-a1)\n elif (b1<=a2 and a2<=b2) or (b1>=a1 and b1<=a2):\n # if a2 is between b1 and b2 OR if b2 is between a1 and a2\n return float(a2-b1)\n else:\n return float(0)", "def get_overlap(a, b):\n return max(0, min(a[1], b[1]) - max(a[0], b[0]))", "def percentages_overlapping(self, other: 'Interval') -> Optional['Interval']:\n intersection = Interval.intersection([self, other])\n if intersection is None:\n return None\n if self.length == 0:\n return Interval(0, 1)\n return Interval(\n (intersection.a - self.a) / self.length,\n (intersection.b - self.a) / self.length)", "def poverlap(t1, t2, size1, size2):\n x0 = t1[0]\n y0 = t1[1]\n x1 = t1[0] + size1[0]\n y1 = t1[1] + size1[1]\n\n x2 = t2[0]\n y2 = t2[1]\n x3 = t2[0] + size2[0]\n y3 = t2[1] + size2[1]\n \n ol = max(0, min(x1, x3) - max(x0, x2)) * max(0, min(y1, y3) - max(y0, y2))\n\n return ol / float(2*(size2[0]*size2[1]) - ol)", "def overlap_area(a, b):\n return min(a[2] - b[0], b[2] - a[0]) * min(a[3] - b[1], b[3] - a[1])", "def intersectarea(p1,p2,size):\n x1, y1 = p1\n x2, y2 = p2\n ix1, iy1 = max(x1,x2), max(y1,y2)\n ix2, iy2 = min(x1+size,x2+size), min(y1+size,y2+size)\n iarea = abs(ix2-ix1)*abs(iy2-iy1)\n if iy2 < iy1 or ix2 < ix1: iarea = 0\n return iarea", "def calcOverlap(intervals):\n bp = 0 \n for i in intervals:\n bp += sum([overlapCases(i, j) for j in intervals])\n return(bp)", "def points_to_size(start_point: tuple, end_point: tuple):\n return tuple(b - a + 1 for a, b in zip(start_point, end_point))", "def overlap(x,y):\n if (x[0]<=y[-1] and x[-1]>y[0]) or (y[0]<=x[-1] and y[-1]>x[0]):\n return 1\n else: return 0", "def detect_overlap_1d(first, first_length, second, second_length):\n first_end = first + first_length - 1\n second_end = second + second_length - 1\n return second_end >= first and first_end >= second", "def real_overlap(from_sequence, to_sequence):\n sequence_overlap = difflib.SequenceMatcher(None, from_sequence, to_sequence)\n start = 0\n start_pos_from = -1\n start_pos_to = -1\n size = -1\n while start_pos_to != 0 or not start_pos_from+size == len(from_sequence):\n start_pos_from, start_pos_to, size = sequence_overlap.find_longest_match(start, \\\n len(from_sequence), 0, len(to_sequence))\n if not start_pos_to == 0 or not start_pos_from+size == len(from_sequence):\n start = start_pos_from+1\n\n return size", "def rectangle_overlap(a, b):\n # Decompose the coordinates\n a_x0, a_y0, a_x1, a_y1 = a\n b_x0, b_y0, b_x1, b_y1 = b\n\n if a_x1 < b_x0 or b_x1 < a_x0 or a_y1 < b_y0 or b_y1 < a_y0:\n # No intersection\n return 0, None\n else:\n x0, y0, x1, y1 = get_overlap_rectangle(a, b, relative=True)\n width = x1 - x0\n height = y1 - y0\n return (width * height, (x0, y0, x1, y1))", "def get_bbox_overlap(self, that, epsilon):\n if (not isinstance(that, Annotation)):\n raise ValueError(\"Argument for intersects should be an annotation\")\n\n # find the width and height of the overlapping rectangle\n width = min(self.bbox.xmax, that.bbox.xmax) - \\\n max(self.bbox.xmin, that.bbox.xmin)\n height = min(self.bbox.ymax, that.bbox.ymax) - \\\n max(self.bbox.ymin, that.bbox.ymin)\n\n height = abs(that.bbox.ymax - self.bbox.ymin) + epsilon\n\n return (width, height)", "def range_overlap(range1, range2):\n return range(max(range1[0], range2[0]), min(range1[1], range2[1]))", "def bbox_overlap(bbox_a, bbox_b):\n ymin_a, xmin_a, ymax_a, xmax_a = bbox_a\n ymin_b, xmin_b, ymax_b, xmax_b = bbox_b\n\n x_intersection = min(xmax_a, xmax_b) - max(xmin_a, xmin_b) + 1\n y_intersection = min(ymax_a, ymax_b) - max(ymin_a, ymin_b) + 1\n\n if x_intersection <= 0 or y_intersection <= 0:\n return 0\n else:\n return x_intersection * y_intersection", "def overlap_cost(track_a, track_b):\n return 1 - overlap(track_a.bbox, track_b.bbox)", "def rOverlapArea (x1, y1, w1, h1, x2, y2, w2, h2):\n \n if x1<=x2<=(x1+w1) or y1<=y2<=(y1+h1) or x1<=(x2+w2)<=(x1+w1):\n return (x1+w1) - ((x2-x1)+((x1+w1)-(x2+w2)))\n else:\n return False", "def overlap(self, a, b):\n return np.maximum(a, b)", "def ranges_overlap(start1, end1, start2, end2):\n return start1 <= end2 and end1 >= start2", "def iou(bbox1, bbox2):\n\n bbox1 = [float(x) for x in bbox1]\n bbox2 = [float(x) for x in bbox2]\n\n (x0_1, y0_1, x1_1, y1_1) = bbox1\n (x0_2, y0_2, x1_2, y1_2) = bbox2\n\n # get the overlap rectangle\n overlap_x0 = max(x0_1, x0_2)\n overlap_y0 = max(y0_1, y0_2)\n overlap_x1 = min(x1_1, x1_2)\n overlap_y1 = min(y1_1, y1_2)\n\n # check if there is an overlap\n if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:\n return 0\n\n # if yes, calculate the ratio of the overlap to each ROI size and the unified size\n size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)\n size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)\n size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)\n size_union = size_1 + size_2 - size_intersection\n\n return size_intersection / size_union", "def iou(bbox1, bbox2):\n\n bbox1 = [float(x) for x in bbox1]\n bbox2 = [float(x) for x in bbox2]\n\n (x0_1, y0_1, x1_1, y1_1) = bbox1\n (x0_2, y0_2, x1_2, y1_2) = bbox2\n\n # get the overlap rectangle\n overlap_x0 = max(x0_1, x0_2)\n overlap_y0 = max(y0_1, y0_2)\n overlap_x1 = min(x1_1, x1_2)\n overlap_y1 = min(y1_1, y1_2)\n\n # check if there is an overlap\n if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:\n return 0\n\n # if yes, calculate the ratio of the overlap to each ROI size and the unified size\n size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)\n size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)\n size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)\n size_union = size_1 + size_2 - size_intersection\n\n return size_intersection / size_union", "def pred_overlap(t, h):\n a_set = set(get_pred(t))\n b_set = set(get_pred(h))\n return len(a_set&b_set)/float(len(a_set|b_set))", "def span_overlap(a: Tuple[int, int], b: Tuple[int, int]) -> bool:\n return not (a[0] > b[1] or a[1] < b[0])", "def _intersect_interval(self, other):\n interval = Intersection(self.interval, other.interval)\n return interval.inf, interval.sup", "def _calculate_area_overlap(self, wake_velocities, freestream_velocities, turbine):\n count = np.sum(freestream_velocities - wake_velocities <= 0.05)\n return (turbine.grid_point_count - count) / turbine.grid_point_count", "def total_range_size(self) -> int:\n if not len(self):\n return 0\n regions = merge(self.data, bp=1)\n return regions.end.sum() - regions.start.sum()", "def boundaries_size(*args):\n return _ida_hexrays.boundaries_size(*args)", "def getOverlap(self):\n return 0.5" ]
[ "0.73398906", "0.71709347", "0.71136653", "0.7091278", "0.69940406", "0.6926135", "0.68776464", "0.66812265", "0.65890884", "0.650149", "0.64867634", "0.6443508", "0.6277583", "0.62052244", "0.6189153", "0.61834484", "0.6166948", "0.6160046", "0.6154346", "0.61486506", "0.613852", "0.6111011", "0.6111011", "0.610442", "0.60978097", "0.6081898", "0.6061143", "0.6054152", "0.6037172", "0.60124224" ]
0.7516056
0
Whether the Interval contains p.
def contains_point(self, p): return self.begin <= p < self.end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contains(self, p):\n return self.distance(p=p) < self.tolerance", "def contains(self, p):\n p = base.getvector(p)\n if len(p) == 2:\n p = np.r_[p, 1]\n return base.iszero(self.line * p)", "def containsPoint(self, p):\n return self.frameGeometry().contains(p)", "def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False", "def isPointInside(self, p):\n x,y = p[0], p[1]\n A = self.left <= x <= self.right\n B = self.bottom <= y <= self.top\n return (A and B)", "def isPointInside(self, p):\n x,y = p[0], p[1]\n A = self.left <= x <= self.right\n B = self.bottom <= y <= self.top\n return (A and B)", "def contains(self, pt):\n x,y = pt.as_tuple()\n return (self.left <= x <= self.right and\n self.top <= y <= self.bottom)", "def __contains__(self, other):\n if isinstance(other, (sppasInterval,\n sppasPoint,\n float, int)) is False:\n raise AnnDataTypeError(other,\n \"sppasInterval, sppasPoint, float, int\")\n\n if isinstance(other, sppasInterval):\n return (self.__begin <= other.get_begin() and\n other.get_end() <= self.__end)\n\n return self.__begin <= other <= self.__end", "def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True", "def inside(self, p: PointType, q: PointType) -> bool:\n\n # XXX re-implement with ccw and a list of points instead of a pair\n\n i = min(p.x, q.x) < self.x < max(p.x, q.x)\n j = min(p.y, q.y) < self.y < max(p.y, q.y)\n\n return i and j", "def contains(self, p, radius=0.0):\n # note the sign of radius is negated if the polygon is drawn clockwise\n # https://stackoverflow.com/questions/45957229/matplotlib-path-contains-points-radius-parameter-defined-inconsistently\n # edges are included but the corners are not\n\n if isinstance(p, (list, tuple)) or (isinstance(p, np.ndarray) and p.ndim == 1):\n return self.path.contains_point(tuple(p), radius=radius)\n else:\n return self.path.contains_points(p.T, radius=radius)", "def __contains__(self, point: Point[Scalar]) -> bool:\n return point in self._points_set", "def contains_point(self, point):\n if self.orientation(point) == 0:\n return point >= min(self.begin, self.end) and point <= max(self.begin, self.end)\n\n return False", "def within(p, q, r):\r\n return p <= q <= r or r <= q <= p", "def contains(self, interval):\n first, last = self._intersect(interval)\n return first != last", "def contains(self, point):\n if in_range(point[0], self.xrange) and in_range(point[0], self.yrange) and in_range(point[0], self.zrange):\n return True\n return False", "def __contains__(self, f) :\n if self.__disc is infinity :\n return True\n \n (s, l) = f\n\n (a, _, c) = apply_GL_to_form(self.__p1list[l], s)\n if not c % self.__level == 0 :\n return False\n\n return a + c < self.index()", "def is_inside(self, p) -> bool:\r\n h = self.wedge\r\n inside = False\r\n if lefton(h, p):\r\n while not h.nexthedge is self.wedge:\r\n h = h.nexthedge\r\n if not lefton(h, p):\r\n return False\r\n return True\r\n else:\r\n return False", "def __contains__(self, pos):\n if pos in self._coordinates:\n return True\n return False", "def _is_p_inside_points_hull(points, p):\n\n from scipy.spatial import ConvexHull\n\n hull = ConvexHull(points)\n new_points = np.append(points, p, axis=0)\n new_hull = ConvexHull(new_points)\n if list(hull.vertices) == list(new_hull.vertices):\n return True\n else:\n return False", "def contained(query, intervalset):\n for i in intervalset:\n if query == i:\n continue\n if query[0] <= i[0] and i[1] <= query[1] and i[1]-i[0] < query[1]-query[0]:\n return True\n return False", "def __contains__(self, point, e=1e-10):\n if point == self.p1:\n return True\n v1 = Vector.createFromTwoPoints(self.p1, point)\n v2 = self.getVector()\n return (abs(v1.angle - v2.angle) % (2 * math.pi) < e) and (v1.norm <= v2.norm)", "def contains(self, point : Point):\n return ( self.corner.x <= point.x <= (self.corner.x + self.width)\n and self.corner.y <= point.y <= (self.corner.y + self.height))", "def contains(self, another_interval):\n if another_interval.left_endpoint < self.left_endpoint:\n return False\n if another_interval.left_endpoint == self.left_endpoint:\n if not self.left_closed and another_interval.left_closed:\n return False\n if another_interval.right_endpoint > self.right_endpoint:\n return False\n if another_interval.right_endpoint == self.right_endpoint:\n if not self.right_closed and another_interval.right_closed:\n return False\n return True", "def __contains__(self, point):\n if not isinstance(point, np.ndarray):\n point = np.array(point)\n return any(point in u for u in self.list_poly)", "def __contains__(self, point: Point2D) -> bool:\n raise NotImplementedError", "def is_point_on_curve(self, P):\n x, y, = P[0], P[1]\n left = y * y\n right = (x * x * x) + (self.a * x) + self.b\n return (left - right) % self.p == 0", "def contains(self, point):\n try:\n p = vector(point)\n except TypeError: # point not iterable or no common ring for elements\n if len(point)>0:\n return False\n else:\n p = vector(self.field(), [])\n\n if len(p)!=self.ambient_dim():\n return False\n \n for H in self.Hrep_generator():\n if not H.contains(p):\n return False\n return True", "def is_occupied(self, p):\r\n return 0 <= p[0] < self.width and 0 <= p[1] < self.height and self.grid[p[1]][p[0]] == '#'", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1" ]
[ "0.7817759", "0.7427082", "0.7307771", "0.71583736", "0.70834273", "0.70834273", "0.7017026", "0.6873289", "0.68610114", "0.68444407", "0.6707772", "0.6675187", "0.66390264", "0.65507036", "0.651224", "0.6506173", "0.64738977", "0.64628136", "0.642916", "0.6428434", "0.64103", "0.64049006", "0.6404088", "0.6400745", "0.6379375", "0.6343825", "0.6333571", "0.6315722", "0.62884504", "0.6281729" ]
0.8188833
0
Whether this equals the null interval.
def is_null(self): return self.begin >= self.end
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_null(self):\n return self.value is None", "def is_null(self):\n return self.length2 < pygonal.EPSILON2", "def is_null(self) -> bool:\n return self.allele1 == -1 and self.allele2 == -1", "def is_null(self) -> bool:\n for y in range(0, self.num_of_rows):\n for x in range(0, self.num_of_cols):\n if self._A[y][x] != 0:\n return False\n return True", "def has_null(self) -> bool:\n\n return any([x is NULL for x in self.values])", "def __nonzero__(self):\n return not (self.year is None and\n self.month is None and\n self.day is None)", "def is_null(self):\n return self._internal_handle() == 0", "def is_null(self):\n return self._internal_handle() == 0", "def is_null(self):\n return self._internal_handle() == 0", "def is_null(self):\n return self._internal_handle() == 0", "def is_null(self):\n return self._internal_handle() == 0", "def is_empty(self):\n return all(x is None for x in self._values.values())", "def isEmpty(self):\n return self.start == -1 and self.end == -1", "def isNull(self):\n return self.__column is None", "def isempty(self):\n\n if self.values is None or self.values.empty:\n return True", "def isNull(self):\n for query in self.__queries:\n if not query.isNull():\n return False\n else:\n return True", "def _val_is_null(self, val):\r\n return val is None", "def is_interval(self):\n return len(self.interval_list) > 0", "def is_nilpotent(self):\n if self._is_nilpotent is None:\n lcs = self.lower_central_series()\n terminator = lcs[len(lcs) - 1]\n gens = terminator.generators\n degree = self.degree\n identity = _af_new(list(range(degree)))\n if all(g == identity for g in gens):\n self._is_solvable = True\n self._is_nilpotent = True\n return True\n else:\n self._is_nilpotent = False\n return False\n else:\n return self._is_nilpotent", "def _isEmpty(self, x, y):\n\t\treturn self.getValue(x, y) == None", "def 是否为空(self): # real signature unknown; restored from __doc__\n return self.IsEmpty()", "def is_zero(self):\n for t in self:\n if t != TRIT_ZERO:\n return False\n return True", "def est_nul(self):\n\t\tif self.__valide:\n\t\t\treturn (self.degre() == 0) and (self.valuation().est_nul())\n\t\telse:\n\t\t\treturn False", "def is_empty(self):\n return not bool(self.range)", "def __nonzero__(self):\n return not self.as_point == (0, 0)", "def is_empty(self):\n return len(self.values) == 0", "def is_empty(self):\n return not bool(self.values)", "def __nonzero__(self):\n for e in self:\n if e != 0:\n return True\n return False", "def __nonzero__(self):\n\n return not ipset.ipset_is_empty(self.set)", "def empty(self):\n return not self.values" ]
[ "0.7243578", "0.70753247", "0.7025592", "0.7005975", "0.69593084", "0.673967", "0.67244315", "0.67244315", "0.67244315", "0.67244315", "0.67244315", "0.671538", "0.6705342", "0.6700119", "0.6637087", "0.66299367", "0.66105014", "0.657015", "0.65618104", "0.6559858", "0.64235765", "0.64190763", "0.6409305", "0.63841146", "0.63634235", "0.6362654", "0.63625866", "0.63323873", "0.6320279", "0.6318258" ]
0.774378
0
Less than operator. Parrots __cmp__()
def __lt__(self, other): return self.__cmp__(other) < 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __lt__(self, other):\n return self.lessThan(other)", "def __lt__(self, other):\n return less(self, other)", "def __lt__(self, other):\n return self.__le__(other) and self.__ne__(other)", "def __lt__(self, other):\n return other > self._cmpkey()", "def __lt__(self, other):\n return self.f() < other.f()", "def __lt__(self, other):\n return self._obj_func() < other._obj_func()", "def __lt__(self, other):\n return self <= other and not self >= other", "def less_than(self) -> global___Expression:", "def __lt__(self, other):\n return self <= other and self != other", "def __lt__(self, other):\n return self <= other and self != other", "def __lt__(self, other):\n self.conds.append((self.name, '<', other))\n return self", "def _less_than_op(spec):", "def less(lhs, rhs):\n return _make.less(lhs, rhs)", "def __lt__(self, other: Any) -> ColumnOperators:\n return self.operate(lt, other)", "def __lt__(self, other):\n return self.element() < other.element()", "def __lt__(self, other: t.Any) -> bool:\n return self._op_bool('__lt__', other)", "def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def __lt__(self, other):\n return self.priority < other.priority", "def __lt__(self, other):\n return self.priority < other.priority", "def __lt__(self, other):\n return self.priority < other.priority", "def _cmp(a, b): # pylint: disable=invalid-name\n return (a > b) - (a < b)", "def __lt__(self, other):\n return self.score < other.score", "def __lt__(self, other):\n return True", "def __lt__(self, other):\n return True", "def __lt__(self, other: 'LTL'):\n lt = self <= other\n neq = self != other\n return lt and neq", "def __lt__(self,other):\r\n\t\treturn self.n < other.n", "def __lt__(self, value):\n self = self.__le__(value)\n return self.__invert__()", "def __lt__(self, other):\n return self._priority < other._priority", "def __lt__(self, rhs):\n return self.balance < rhs.balance", "def __le__(self, rhs):\n \n result = (self == rhs or self < rhs)\n return result" ]
[ "0.82182264", "0.7857503", "0.7801803", "0.7793374", "0.77342093", "0.77223086", "0.77172345", "0.7710354", "0.7697853", "0.7697853", "0.76659536", "0.76443315", "0.7621985", "0.76200825", "0.76200014", "0.75991046", "0.75313133", "0.752628", "0.752628", "0.752628", "0.75240546", "0.75100577", "0.7489349", "0.7489349", "0.7472164", "0.74518096", "0.74490947", "0.7441249", "0.7391395", "0.7360376" ]
0.8009337
1
Executable string representation of this Interval.
def __repr__(self): if isinstance(self.begin, Number): s_begin = str(self.begin) s_end = str(self.end) else: s_begin = repr(self.begin) s_end = repr(self.end) if self.data is None: return "Interval({0}, {1})".format(s_begin, s_end) else: return "Interval({0}, {1}, {2})".format(s_begin, s_end, repr(self.data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n s = \"{}, {}\".format(self.left_endpoint, self.right_endpoint)\n if self.left_closed:\n left_bracket = '['\n else:\n left_bracket = '('\n\n if self.right_closed:\n right_bracket = ']'\n else:\n right_bracket = ')'\n interval_string = left_bracket + s + right_bracket\n return 'Interval({})'.format(interval_string)", "def __str__(self):\n iso_time = str(datetime.datetime.fromtimestamp(self.next_time))\n return \"<Job(%s, %ss, %s)>\" % (iso_time, self.interval, self.func)", "def __str__(self):\n return INTERVALS[self.semitone_interval]", "def __str__(self):\n return str((self.instruction_pointer, self.program,))", "def toString(self) -> str:\n raise NotImplementedError", "def __str__(self) -> str:\n return self.__repr__() + \"\\n\" + \"\\n\".join(self.regimes())", "def __str__(self):\n return \"<aospy.Calc instance: \" + ', '.join(\n (self.name, self.proj.name, self.model.name, self.run.name)\n ) + \">\"", "def as_string(self):\n return self.__repr__()", "def to_string(self):\r\n return self.__str__()", "def __repr__(self):\n return \"{}(start_time={!r}, end_time={!r}, events={!r})\" \\\n .format(self.__class__.__name__,\n self._start_time, self._end_time, self._events)", "def __str__ (self):\n cycles = []\n for cycle in self._getcycles():\n cycles.append(' '.join(str(x) for x in cycle).join('()'))\n return ''.join(cycles)", "def __repr__(self):\n s = \"{}, {}\".format(self.left_endpoint, self.right_endpoint)\n if self.left_closed:\n left_bracket = '['\n else:\n left_bracket = '('\n\n if self.right_closed:\n right_bracket = ']'\n else:\n right_bracket = ')'\n interval_string = left_bracket + s + right_bracket\n return 'TreeNode({})'.format(interval_string)", "def __str__(self):\n info = 'cmd=\"%s\", pid=%d, cwd=%s, alive=%s' % ( self._cmd, \\\n self._pid, \\\n repr(self._cwd), \\\n self.alive)\n return '<%s %s>' % (self.__class__.__name__, info)", "def __str__(self) -> str:\n\n return str(tstack([self._domain, self._range]))", "def _create_formatted_string_with_interval(self):\n if isinstance(self.get_subject_term(),StatementTerm) or isinstance(self.get_subject_term(),CompoundTerm):\n string = NALSyntax.StatementSyntax.Start.value + \\\n self.get_subject_term().get_formatted_string_with_interval()\n else:\n string = NALSyntax.StatementSyntax.Start.value + \\\n self.get_subject_term().get_formatted_string()\n\n if not self.is_first_order() and self.interval > 0:\n string = string[:-1] + \\\n NALSyntax.StatementSyntax.TermDivider.value + \\\n str(self.interval) + \\\n string[-1]\n\n string += \" \" + self.get_copula_string() + \" \"\n\n string += self.get_predicate_term().get_formatted_string() + \\\n NALSyntax.StatementSyntax.End.value\n\n return string", "def __str__(self) -> str:\n st = \"<Output> \"\n if self.inst_out:\n st += f'instance:{self.inst_out};'\n st += f'''{self.output} -> {self.target or '\"\"'} -> '''\n if self.inst_in:\n st += f\"instance:{self.inst_in};\"\n st += self.input\n\n if self.params and not self.inst_in:\n st += f\" ({self.params})\"\n if self.delay != 0:\n st += f\" after {self.delay} seconds\"\n if self.times != -1:\n st += \" (once only)\" if self.times == 1 else f\" ({self.times!s} times only)\"\n return st", "def __repr__(self):\n modulename = str(type(self).__module__)\n\n ichars = len(str(int(self.max())))\n slen = ichars + casas\n fstr = \"{{:>{}.{}g}}\".format(slen, casas)\n\n if modulename == \"__main__\":\n s = str(type(self).__name__)\n else:\n s = modulename + '.' + str(type(self).__name__)\n\n s += '(['\n s += ', '.join([fstr.format(x) for x in self.elem])\n s += '])'\n\n return s", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self) -> str:\n return '{:s}({!r})'.format(self.__class__.__name__, self.getvalue())", "def __repr__(self):\n return \"\\'{}\\'\".format(self.__str__())", "def __str__(self):\n\t\tif self.code == Const.NEWLINE:\n\t\t\treturn \"[newline]\"\n\t\t\n\t\tname = \"[\" + self.code\n\t\tif self.code in (Const.numericalLiteral, Const.ID, Const.UET):\n\t\t\tname += \"(\" + self.value + \")\"\n\t\tname += \"]\"\n\t\treturn name", "def __repr__(self):\n params = self.get_params()\n param_values = self.get_param_values(params, [], self.param_kwargs)\n\n # Build up task id\n repr_parts = []\n param_objs = dict(params)\n for param_name, param_value in param_values:\n if param_objs[param_name].significant and \\\n param_objs[param_name].visibility == luigi.parameter.ParameterVisibility.PUBLIC:\n repr_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))\n\n task_str = '{}({})'.format(self.get_task_family(), ', '.join(repr_parts))\n\n return task_str", "def as_string (self) :\n\n if self.is_machinetag() :\n return \"%s:%s=%s\" % (self.namespace(), self.predicate(), self.value())", "def __str__(self):\n schedule = \"\"\n\n schedule_list = self.in_order_traversal()\n for node in schedule_list:\n schedule += str(node)\n if node is not schedule_list[-1]:\n schedule += \" \"\n return schedule", "def toString():", "def __repr__(self) -> str:\n for index, task in enumerate(self.steps):\n self.names.append(f\"{index+1}- {task[0]}\")\n tasks = \"\\n\".join(self.names)\n rpr = f\"\"\"---- Start ----\n{tasks}\n---- End ----\n \"\"\"\n return rpr", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())", "def to_str(self):\n return pformat(self.to_dict())" ]
[ "0.71480864", "0.696534", "0.6899967", "0.66029596", "0.6529993", "0.6518206", "0.64999825", "0.6445101", "0.62968284", "0.62619025", "0.6259937", "0.6250147", "0.6226611", "0.6221005", "0.62134147", "0.62100565", "0.62071943", "0.62010485", "0.62010485", "0.62010485", "0.61948526", "0.61855954", "0.61621326", "0.61505425", "0.61499447", "0.6133892", "0.6133353", "0.60784876", "0.60784876", "0.60784876" ]
0.72492284
0
Split an s3 uri into the bucket and object name
def split_uri(s3_uri): if not s3_uri.startswith("s3://"): # This is a local path, indicate using None raise ValueError(f"failed to parse s3 uri: {s3_uri}") bucket, key = s3_uri.split("s3://")[1].split("/", 1) return bucket, key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_uri(uri):\n if not uri.startswith(\"s3://\"):\n raise ValueError(\"Expected S3 URI\")\n\n bucket_name, key = uri.replace(\"s3://\", \"\").split(\"/\", 1)\n return bucket_name, key", "def split_s3_path(url):\n\tparsed = urlparse (url)\n\tif not parsed.netloc or not parsed.path:\n\t\traise ValueError (\"bad s3 path {}\".format (url))\n\tbucket_name = parsed.netloc\n\ts3_path = parsed.path\n\t# Remove '/' at beginning of path.\n\tif s3_path.startswith (\"/\"):\n\t\ts3_path = s3_path[1:]\n\treturn bucket_name, s3_path", "def split_s3_path(url):\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path", "def split_s3_path(url):\n parsed = urlparse(url)\n if not parsed.netloc or not parsed.path:\n raise ValueError(\"bad s3 path {}\".format(url))\n bucket_name = parsed.netloc\n s3_path = parsed.path\n # Remove '/' at beginning of path.\n if s3_path.startswith(\"/\"):\n s3_path = s3_path[1:]\n return bucket_name, s3_path", "def parse_s3_uri(URIs):\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split(\"/\")\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n\n return buckets, keys", "def parse_uri(uri: str) -> Tuple[str, str]:\n scheme, bucket, key, _, _, _ = urllib.parse.urlparse(uri)\n if not scheme == 's3':\n raise ValueError(f'Not a S3 URI: {uri}')\n return (bucket, key.lstrip('/'))", "def parse_s3_url(url):\n parsed_url = urlparse(url)\n if parsed_url.scheme != \"s3\":\n raise ValueError(\"S3 URLs must start with 's3://'\")\n\n bucket = parsed_url.netloc.split(\".\")[0]\n key = parsed_url.path.lstrip(\"/\")\n\n return {\"bucket\": bucket, \"key\": key}", "def _extract_bucket_key(s3_uri: str)->tuple:\n s3_regex=\"^s3://([a-z0-9.-]+)/(.*)$\"\n search =re.search(s3_regex, s3_uri)\n if search is None:\n raise Error(\"Invalid s3 uri: {}\".format(s3_uri))\n return search.groups()", "def parse_s3_url(url):\n result = urlparse.urlparse(url)\n return result.netloc, result.path[1:] # strip leading slash", "def split_gcs_uri(gcs_uri):\n m = GCS_REGEX.match(gcs_uri)\n bucket = m.group(1)\n path = \"\"\n if m.group(2):\n path = m.group(2).lstrip(\"/\")\n return bucket, path", "def split_gcs_uri(gcs_uri):\n m = GCS_REGEX.match(gcs_uri)\n bucket = m.group(1)\n path = \"\"\n if m.group(2):\n path = m.group(2).lstrip(\"/\")\n return bucket, path", "def _get_server_bucket_object(uri):\n # First split the uri into the network location and path, and build the\n # server\n url_p = urlparse(uri)\n # check that the uri contains a scheme and a netloc\n if url_p.scheme == '' or url_p.netloc == '':\n raise APIException(\n \"URI supplied to s3aioFileObject is not well-formed: {}\". format(uri)\n )\n server = url_p.scheme + \"://\" + url_p.netloc\n split_path = url_p.path.split(\"/\")\n # get the bucket\n try:\n bucket = split_path[1]\n except IndexError as e:\n raise APIException(\n \"URI supplied has no bucket contained within it: {}\".format(uri)\n )\n # get the path\n try:\n path = \"/\".join(split_path[2:])\n except IndexError as e:\n raise APIException(\n \"URI supplied has no path contained within it: {}\".format(uri)\n )\n return server, bucket, path", "def url(cls, bucket, path):\n if path.startswith('/'):\n path = path[1:]\n if bucket.startswith('http://') or bucket.startswith('https://'):\n url = bucket\n else:\n url = cls.S3_BASE + bucket\n if not url.endswith('/'):\n url += '/'\n return url + path", "def get_bucket_and_path_from_uri(path):\n parsed_url = urlparse(path)\n return parsed_url.netloc, parsed_url.path.lstrip('/')", "def test_parse_s3_bucket_key_url(url, expected_bucket, expected_key):\n bucket, key = ff_utils.parse_s3_bucket_and_key_url(url)\n assert expected_bucket == bucket and key == expected_key", "def split_name(name):\n split_name = [x for x in name.split(\"/\") if x != '']\n bucket_name = split_name[0]\n key_path = \"/\".join(split_name[1:])\n return bucket_name, key_path", "def parse_url(url):\n if url.startswith(URL_SCHEME) and len(url) > len(URL_SCHEME):\n bucket_and_path = url.rstrip('/')[len(URL_SCHEME):].split('/', 1)\n if len(bucket_and_path) == 1:\n bucket_and_path.append('')\n return bucket_and_path\n return (None, None)", "def UriStrFor(iterated_uri, obj):\n return '%s://%s/%s' % (iterated_uri.scheme, obj.bucket.name, obj.name)", "def get_bucket_name_from_url(file_url):\n\tparts = urlparse(file_url)\n\tpaths = parts.path.split(\"/\")\n\treturn paths[1]", "def url(self, bucket, path):\n custom_url = bucket.startswith('http://') or bucket.startswith('https://')\n\n if isinstance(path, list):\n # This is a list of key components that need to be quoted\n # and assembled.\n path = self.key_join(path, encode=custom_url)\n if isinstance(path, bytes):\n path = path.decode(\"utf-8\")\n if path.startswith('/'):\n path = path[1:]\n\n if custom_url:\n url = bucket\n\n if not url.endswith('/'):\n url += '/'\n\n return url + path\n else:\n url = self._generate_s3_url(bucket, path)\n\n return url", "def get_from_s3(s3_client, s3_url):\n url = urlparse(s3_url)\n\n # Split the bucket from the key\n bucket_name = urllib2.unquote(url.netloc).decode('utf8')\n key_name = urllib2.unquote(url.path[1:]).decode('utf8')\n\n # We're done parsing; start doing some S3 ops\n bucket = s3_client.get_bucket(bucket_name, validate=False)\n key = bucket.get_key(key_name)\n return key.get_contents_as_string()", "def trim_s3_bucket_from_path(self, fullpath):\n return fullpath.replace(self.bucket_base, '')", "def to_uri(bucket: str, key: str) -> str:\n return f's3://{bucket}/{key}'", "def _get_dest_obj_name(initial_src, obj):\n immed_prefix = \"\"\n if _is_s3(initial_src):\n immed_prefix = _extract_immediate_prefix(_extract_bucket_key(initial_src)[1])\n else:\n if os.path.isdir(os.path.abspath(initial_src)):\n immed_prefix = os.path.basename(os.path.abspath(initial_src))\n else:\n immed_prefix = _extract_immediate_prefix(initial_src)\n \n if immed_prefix == \"\":\n return obj\n else:\n return obj.split(\"{}/\".format(immed_prefix))[-1]", "def s3_url(row):\n return f's3://{row[\"Bucket\"]}/{row[\"Key\"]}'", "def _get_object_name(self, object_url):\n infos = str(object_url).split('/')\n return infos[len(infos) - 1]", "def get_matching_s3_keys(bucket, prefix=\"\", suffix=\"\"):\n for obj in get_matching_s3_objects(bucket, prefix, suffix):\n yield obj[\"Key\"]\n\n def download_froms3(myfile, env='prod'):\n # session = boto3.Session(profile_name=PROFILE)\n boto_s3_session = boto3.Session(profile_name=env)\n s3 = boto_s3_session.resource('s3')\n s3client = boto_s3_session.client('s3', region_name='eu-west-2')\n try:\n file_name = unquote(myfile.split('/')[-1])\n oparse = urlparse(myfile, allow_fragments=False)\n print(oparse)\n S3_SRC_BUCKET_NAME = oparse.netloc\n key = oparse.path[1:]\n download_path = '{0}{1}'.format(BASE_PATH, file_name)\n print(f'Downloading from {S3_SRC_BUCKET_NAME} , {key} to {download_path} ')\n # s3.Bucket(S3_SRC_BUCKET_NAME).download_file(key, download_path)\n # s3.Bucket(S3_SRC_BUCKET_NAME).download_file(file_name, download_path)\n s3client.download_file(S3_SRC_BUCKET_NAME, key, download_path)\n print('File Downloaded')\n except botocore.exceptions.ClientError as err:\n if err.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\", err)\n else:\n # raise\n error = str(err)\n print(error)\n\n return myfile", "def build_bucket_url(bucket_name) -> str:\n return \"https://s3.console.aws.amazon.com/s3/buckets/{0}\".format(bucket_name)", "def url_for(filename):\n return \"{}{}\".format(S3_LOCATION, filename)", "def test_parse_url(self):\n filename = 'demo-file.tar.gz'\n backend = BackendS3(**self.config)\n pb = PathBuilder('123456')\n base_url = backend.get_url()\n id = utils.generate_id(filename)\n parts = backend.id_to_path(id)\n path = '/'.join(parts)\n object_url = base_url + '/' + path + '/'\n original = object_url + filename\n crop_filename = pb.get_auto_crop_filename(id, '100x100', 'fit', 'jpg')\n resize = object_url + crop_filename\n result1 = backend.parse_url(original)\n result2 = backend.parse_url(resize)\n self.assertEquals(id, result1[0])\n self.assertEquals(filename, result1[1])\n self.assertEquals(id, result2[0])\n self.assertEquals(crop_filename, result2[1])" ]
[ "0.84234256", "0.79439884", "0.78763926", "0.78763926", "0.767611", "0.76380134", "0.7330717", "0.71763176", "0.69720215", "0.69384485", "0.69290155", "0.6860623", "0.6848078", "0.674747", "0.65667576", "0.6564563", "0.63594395", "0.635297", "0.6327186", "0.630025", "0.6298736", "0.6292263", "0.62581104", "0.6224674", "0.62240344", "0.6120527", "0.61055", "0.61034465", "0.6070935", "0.5994077" ]
0.83376247
1
Converts a first or second level batch to human readable
def first_or_second_level_to_human_readable(batch): job_level_batches = db.get_child_batch_metadata( batch[Attributes.BATCH_ID], BatchMetadataType.JOB_LEVEL ) job_responses = [ job_level_to_human_readable(job_level_batch) for job_level_batch in job_level_batches ] return { "status": batch[Attributes.BATCH_STATUS], "numChildBatches": batch[Attributes.NUM_CHILD_BATCHES], "numChildBatchesComplete": batch[Attributes.NUM_CHILD_BATCHES_COMPLETE], "jobLevels": job_responses, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def input_batch_to_human_readable(batch):\n\n # User should only be querying for parent batches of type \"INPUT\", not frame\n # level batches.\n if batch[Attributes.BATCH_METADATA_TYPE] != BatchMetadataType.INPUT:\n logger.error(\n \"User requested existing batch, but it is of the wrong input type: %s\",\n batch[Attributes.BATCH_ID],\n )\n return None\n\n response = {\n \"batchId\": batch[Attributes.BATCH_ID],\n \"status\": batch[Attributes.BATCH_STATUS],\n # Straight copy of request labeling jobs to acknowledge the request.\n \"inputLabelingJobs\": batch[Attributes.LABELING_JOBS],\n }\n\n stage_attributes = [\n (\"firstLevel\", BatchMetadataType.FIRST_LEVEL),\n (\"secondLevel\", BatchMetadataType.SECOND_LEVEL),\n (\"thirdLevel\", BatchMetadataType.THIRD_LEVEL),\n ]\n\n for field_name, attribute in stage_attributes:\n first_or_second_level_batches = db.get_child_batch_metadata(\n batch[Attributes.BATCH_ID], attribute\n )\n for first_or_second_level_batch in first_or_second_level_batches:\n response[field_name] = first_or_second_level_to_human_readable(\n first_or_second_level_batch\n )\n\n return response", "def job_level_to_human_readable(batch):\n response = {\n \"batchId\": batch[Attributes.BATCH_ID],\n \"batchStatus\": batch[Attributes.BATCH_STATUS],\n \"labelingJobName\": batch[Attributes.LABELING_JOB_NAME],\n \"labelAttributeName\": batch[Attributes.LABEL_ATTRIBUTE_NAME],\n \"labelCategoryS3Uri\": batch[Attributes.LABEL_CATEGORY_CONFIG],\n \"jobInputS3Uri\": batch[Attributes.JOB_INPUT_LOCATION],\n \"jobInputS3Url\": create_presigned_url(batch[Attributes.JOB_INPUT_LOCATION]),\n \"jobOutputS3Uri\": batch[Attributes.JOB_OUTPUT_LOCATION],\n \"jobOutputS3Url\": create_presigned_url(batch[Attributes.JOB_OUTPUT_LOCATION]),\n }\n\n num_frames = batch.get(Attributes.NUM_CHILD_BATCHES)\n num_frames_completed = batch.get(Attributes.NUM_CHILD_BATCHES_COMPLETE)\n if num_frames is not None and num_frames_completed is not None:\n response[\"numFrames\"] = num_frames\n response[\"numFramesCompleted\"] = num_frames_completed\n\n return response", "def batchify_summary(batch):\r\n\r\n if type(batch[0][1]) != torch.LongTensor:\r\n no_elmo, use_char = (True, False) if batch[0][1] == -2 else (False, False)\r\n else:\r\n no_elmo, use_char = True, True\r\n\r\n docs = [ex[0] for ex in batch]\r\n docs_char = [ex[1] for ex in batch]\r\n summaries = [ex[2] for ex in batch]\r\n\r\n # Batch documents\r\n max_doc_length = max([d.size(0) for d in docs])\r\n x1_len = torch.LongTensor(len(docs)).zero_()\r\n x1 = torch.LongTensor(len(docs),\r\n max_doc_length).zero_() if no_elmo else torch.LongTensor(len(docs),\r\n max_doc_length,\r\n 50).zero_()\r\n x1_char = torch.LongTensor(len(docs),\r\n max_doc_length,\r\n docs_char[0].size(1)).zero_() if (no_elmo and use_char) else None\r\n for i, d in enumerate(docs):\r\n x1_len[i] = d.size(0)\r\n x1[i, :d.size(0)].copy_(d)\r\n if not no_elmo:\r\n x1_char[i, :d.size(0), :].copy_(docs_char[i])\r\n\r\n # Batch answers\r\n max_ans_length = max([a.size(0) for a in summaries])\r\n ans_len = torch.LongTensor(len(summaries)).zero_()\r\n ans = torch.LongTensor(len(summaries), max_ans_length).zero_()\r\n for i, a in enumerate(summaries):\r\n ans_len[i] = a.size(0)\r\n ans[i, :a.size(0)].copy_(a)\r\n\r\n ids = [ex[3] for ex in batch]\r\n contexts = [ex[4] for ex in batch]\r\n # FIXME: multiple answers are possible, fix vectorize also.\r\n targets = [ex[5] for ex in batch]\r\n src_vocabs = [ex[6] for ex in batch]\r\n source_maps = []\r\n alignments = []\r\n\r\n # Prepare source vocabs, alignment [required for Copy Attention]\r\n for eid, context, target, (token2idx, idx2token) in \\\r\n zip(ids, contexts, targets, src_vocabs):\r\n # Mapping source tokens to indices in the dynamic dict.\r\n src_map = torch.LongTensor([token2idx[w] for w in context])\r\n source_maps.append(src_map)\r\n\r\n # TODO: does skipping the first and last token in answer valid?\r\n mask = torch.LongTensor([token2idx[w] if w in token2idx\r\n else UNK for w in target])\r\n alignments.append(mask)\r\n\r\n return {'doc_rep': x1,\r\n 'doc_char_rep': x1_char,\r\n 'doc_len': x1_len,\r\n 'summ_rep': ans,\r\n 'summ_len': ans_len,\r\n 'ids': ids,\r\n 'documents': contexts,\r\n 'answers': targets,\r\n 'source_vocabs': src_vocabs,\r\n 'src_map': source_maps,\r\n 'alignment': alignments}", "def collate_fn(batch):\n sentence1 = [item[0] for item in batch]\n sentence2 = [item[1] for item in batch]\n label = [item[2] for item in batch]\n label = torch.tensor(label)\n return sentence1, sentence2, label", "def _format_batch(self, *args):\n result = [np.array(item) for item in args]\n if len(result) == 1:\n return result[0]\n return result", "def batch_info():\n return BatchInfo(\"UFG Hackathon\")", "def collator(self, batch):\n\n # Retrieve data from batch\n ids = [item[\"ids\"] for item in batch]\n label = [item[\"label\"] for item in batch]\n\n # Sort the list\n ids, label = map(\n list,\n zip(\n *sorted(\n zip(ids, label), key=lambda _tuple: len(_tuple[0]), reverse=True,\n )\n ),\n )\n\n max_len = len(ids[0])\n\n # Initialize seq len list\n text_lengths = []\n new_ids = []\n for id in ids:\n\n _len = len(id)\n pad_len = max_len - _len\n\n if pad_len < 0:\n id = id[:max_len]\n else:\n id = np.pad(\n id, (0, pad_len), \"constant\", constant_values=self.pad_id\n ).tolist()\n\n new_ids.append(id)\n\n text_lengths.append(_len if _len < max_len else max_len)\n\n label = torch.tensor(label)\n text_lengths = torch.tensor(text_lengths)\n text = np.stack(new_ids)\n text = torch.from_numpy(text)\n\n return {\"label\": label, \"text_lengths\": text_lengths, \"text\": text}", "def batches2string(batches):\n s = [''] * batches[0].shape[0]\n for b in batches:\n s = [''.join(x) for x in zip(s, characters(b))]\n return s", "def batches2string(batches):\n s = [''] * batches[0].shape[0]\n for b in batches:\n s = [''.join(x) for x in zip(s, characters(b))]\n return s", "def batches2string(batches):\n s = [''] * batches[0].shape[0]\n for b in batches:\n s = [''.join(x) for x in zip(s, characters(b))]\n return s", "def pp_batch(self, batch):\n self.separator()\n for item in batch:\n print(item.ljust(27, ' ') + ': {}'.format(batch[item]))\n \n self.separator()", "def dev_collate(batch, level: int = 1, logger_name: str = \"dev_collate\"):\n elem = batch[0]\n elem_type = type(elem)\n l_str = \">\" * level\n batch_str = f\"{batch[:10]}{' ... ' if len(batch) > 10 else ''}\"\n if isinstance(elem, torch.Tensor):\n try:\n logging.getLogger(logger_name).critical(f\"{l_str} collate/stack a list of tensors\")\n return torch.stack(batch, 0)\n except TypeError as e:\n logging.getLogger(logger_name).critical(\n f\"{l_str} E: {e}, type {[type(elem).__name__ for elem in batch]} in collate({batch_str})\"\n )\n return\n except RuntimeError as e:\n logging.getLogger(logger_name).critical(\n f\"{l_str} E: {e}, shape {[elem.shape for elem in batch]} in collate({batch_str})\"\n )\n return\n elif elem_type.__module__ == \"numpy\" and elem_type.__name__ != \"str_\" and elem_type.__name__ != \"string_\":\n if elem_type.__name__ in [\"ndarray\", \"memmap\"]:\n logging.getLogger(logger_name).critical(f\"{l_str} collate/stack a list of numpy arrays\")\n return dev_collate([torch.as_tensor(b) for b in batch], level=level, logger_name=logger_name)\n elif elem.shape == (): # scalars\n return batch\n elif isinstance(elem, (float, int, str, bytes)):\n return batch\n elif isinstance(elem, abc.Mapping):\n out = {}\n for key in elem:\n logging.getLogger(logger_name).critical(f'{l_str} collate dict key \"{key}\" out of {len(elem)} keys')\n out[key] = dev_collate([d[key] for d in batch], level=level + 1, logger_name=logger_name)\n return out\n elif isinstance(elem, abc.Sequence):\n it = iter(batch)\n els = list(it)\n try:\n sizes = [len(elem) for elem in els] # may not have `len`\n except TypeError:\n types = [type(elem).__name__ for elem in els]\n logging.getLogger(logger_name).critical(f\"{l_str} E: type {types} in collate({batch_str})\")\n return\n logging.getLogger(logger_name).critical(f\"{l_str} collate list of sizes: {sizes}.\")\n if any(s != sizes[0] for s in sizes):\n logging.getLogger(logger_name).critical(\n f\"{l_str} collate list inconsistent sizes, got size: {sizes}, in collate({batch_str})\"\n )\n transposed = zip(*batch)\n return [dev_collate(samples, level=level + 1, logger_name=logger_name) for samples in transposed]\n logging.getLogger(logger_name).critical(f\"{l_str} E: unsupported type in collate {batch_str}.\")\n return", "def collate_sentences(batch: List[Tuple]):\n # fill this list with all the labels in the batch\n batch_labels = []\n\n # we need to find the maximum length of a sentence in this batch\n max_len = 0\n for i in batch:\n if len(i[0]) > max_len:\n max_len = len(i[0])\n batch_size = len(batch)\n\n # print('batch size',batch_size)\n # initialize a Tensor filled with zeros (aka index of <PAD>)\n batch_sentences = torch.LongTensor(batch_size, max_len).fill_(0)\n\n # fill each row idx in batch_sentences with the corresponding\n # sequence tensor\n #\n # ... batch_sentences[idx, ...] = ...\n for idx in range(0, batch_size):\n # print(idx)\n # print(len(batch[idx][0]))\n # print(len(batch_sentences[idx]))\n batch_sentences[idx][0:len(batch[idx][0])] = batch[idx][0]\n print(batch[idx])\n batch_labels.append(batch[idx][1])\n # print(batch_sentences[idx])\n print(type(batch_labels))\n # batch_labels = [torch.LongTensor(x) for x in batch_labels]\n batch_labels = torch.tensor(batch_labels)\n # print(batch_labels)\n return batch_sentences, batch_labels", "def conv_batchify(self, batch):\n batch_roles = []\n batch_context_tokens = []\n batch_response = []\n\n for conv_dict in batch:\n batch_roles.append(0 if conv_dict['role'] == 'Seeker' else 1)\n context_tokens = [utter + [self.conv_bos_id] for utter in conv_dict['context_tokens']]\n context_tokens[-1] = context_tokens[-1][:-1]\n batch_context_tokens.append(\n truncate(merge_utt(context_tokens), max_length=self.context_truncate, truncate_tail=False),\n )\n batch_response.append(\n add_start_end_token_idx(\n truncate(conv_dict['response'], max_length=self.response_truncate - 2),\n start_token_idx=self.start_token_idx,\n end_token_idx=self.end_token_idx\n )\n )\n\n batch_context_tokens = padded_tensor(items=batch_context_tokens,\n pad_idx=self.pad_token_idx,\n max_len=self.context_truncate,\n pad_tail=False)\n batch_response = padded_tensor(batch_response,\n pad_idx=self.pad_token_idx,\n max_len=self.response_truncate,\n pad_tail=True)\n batch_input_ids = torch.cat((batch_context_tokens, batch_response), dim=1)\n batch_roles = torch.tensor(batch_roles)\n\n return (batch_roles,\n batch_input_ids,\n batch_context_tokens,\n batch_response)", "def args_batch_to_text(args_batch: ArgsBatch) -> Text:\n lines = []\n for args in args_batch:\n lines.append('; '.join(str(a) for a in args))\n return '\\n'.join(lines)", "def _get_batch(batch, ctx):\n if isinstance(batch, mx.io.DataBatch):\n data = batch.data[0]\n label = batch.label[0]\n else:\n data, label = batch\n return (gluon.utils.split_and_load(data, ctx),\n gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def graph_collate(batch):\n elem = batch[0]\n if isinstance(elem, Data):\n batch = Batch.from_data_list(batch)\n return batch, batch.y", "def collate_fn(batch):\n text = [item[0] for item in batch]\n audio = [item[1] for item in batch]\n\n text_lengths = [len(x) for x in text]\n audio_lengths = [len(x) for x in audio]\n\n max_text = max(text_lengths)\n max_audio = max(audio_lengths)\n\n text_batch = np.stack(pad_text(x, max_text) for x in text)\n audio_batch = np.stack(pad_spectrogram(x, max_audio) for x in audio)\n\n return (torch.LongTensor(text_batch),\n torch.FloatTensor(audio_batch).permute(1, 0, 2),\n text_lengths, audio_lengths)", "def _collate_fn(batch):\n # imgs = [b[0] for b in batch]\n # labels = [b[1] for b in batch]\n # imgs = torch.stack(imgs, dim=0)\n # return [imgs, labels]\n imgs = [b[0] for b in batch]\n labels = [b[1] for b in batch]\n imgs = torch.cat(imgs, dim=0)\n labels = [l for sublist in labels for l in sublist]\n return [imgs, labels]", "def batchify(batch):\n\n PAD_ID = batch[0]['<PAD>']\n inputs_list = [ex['input'] for ex in batch]\n max_length_list = []\n for docs in inputs_list:\n max_length = max([len(doc[1]) for doc in docs])\n max_length_list.append(max_length)\n inputs = []\n for index,docs in enumerate(inputs_list):\n bat_size = len(docs)\n tp_vecs = torch.zeros((bat_size,max_length_list[index]),dtype=torch.long)\n tp_vecs += PAD_ID\n for k,doc in enumerate(docs):\n for j,word in enumerate(doc[1]):\n tp_vecs[k,j] = word\n tp_list = [doc[0] for doc in docs]\n tp_list = torch.tensor(tp_list,dtype=torch.long)\n inputs.append([tp_list,tp_vecs])\n week_index_list = torch.tensor([ex['target'][0] for ex in batch],dtype=torch.long)\n word_index_list = torch.tensor([ex['target'][1] for ex in batch],dtype=torch.long)\n targets = (week_index_list,word_index_list)\n return inputs,targets", "def imed_collate(batch):\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if torch.is_tensor(batch[0]):\n stacked = torch.stack(batch, 0)\n return stacked\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return __numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int_classes):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.abc.Mapping):\n return {key: imed_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.abc.Sequence):\n return [imed_collate(samples) for samples in batch]\n\n return batch", "def format_model_output(self, output, batch_size=1):\r\n return output", "def _collate_fn(batch):\r\n batch = list(zip(*batch))\r\n batch[0] = torch.stack(batch[0])\r\n batch[1] = list(batch[1])\r\n batch[2] = torch.stack(batch[2])\r\n return tuple(batch)", "def __call__(self, batch):\r\n # Right zero-pad all one-hot text sequences to max input length\r\n input_lengths, ids_sorted_decreasing = torch.sort(\r\n torch.LongTensor([len(x[0]) for x in batch]),\r\n dim=0, descending=True)\r\n max_input_len = input_lengths[0]\r\n\r\n text_padded = torch.LongTensor(len(batch), max_input_len)\r\n text_padded.zero_()\r\n for i in range(len(ids_sorted_decreasing)):\r\n text = batch[ids_sorted_decreasing[i]][0]\r\n text_padded[i, :text.size(0)] = text\r\n\r\n # Right zero-pad mel-spec\r\n num_mels = batch[0][1].size(0)\r\n max_target_len = max([x[1].size(1) for x in batch])\r\n if max_target_len % self.n_frames_per_step != 0:\r\n max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step\r\n assert max_target_len % self.n_frames_per_step == 0\r\n\r\n # include mel padded and gate padded\r\n mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)\r\n mel_padded.zero_()\r\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\r\n gate_padded.zero_()\r\n output_lengths = torch.LongTensor(len(batch))\r\n for i in range(len(ids_sorted_decreasing)):\r\n mel = batch[ids_sorted_decreasing[i]][1]\r\n mel_padded[i, :, :mel.size(1)] = mel\r\n gate_padded[i, mel.size(1)-1:] = 1\r\n output_lengths[i] = mel.size(1)\r\n\r\n return text_padded, input_lengths, mel_padded, gate_padded, \\\r\n output_lengths", "def __call__(self, docs_batch: List[str]) -> Tuple[List[List[str]], List[List[int]]]:\n text_batch_list = []\n text_batch = []\n nums_batch_list = []\n nums_batch = []\n count_texts = 0\n text = \"\"\n curr_doc = 0\n for n, doc in enumerate(docs_batch):\n sentences = sent_tokenize(doc)\n for sentence in sentences:\n if len(text) + len(sentence) < self.max_chunk_len and n == curr_doc:\n text += f\"{sentence} \"\n else:\n if count_texts < self.batch_size:\n text_batch.append(text.strip())\n if n == curr_doc:\n nums_batch.append(n)\n else:\n nums_batch.append(n - 1)\n count_texts += 1\n else:\n text_batch_list.append(text_batch)\n text_batch = []\n nums_batch_list.append(nums_batch)\n nums_batch = [n]\n count_texts = 0\n curr_doc = n\n text = f\"{sentence} \"\n\n if text:\n text_batch.append(text.strip())\n text_batch_list.append(text_batch)\n nums_batch.append(len(docs_batch) - 1)\n nums_batch_list.append(nums_batch)\n\n return text_batch_list, nums_batch_list", "def batch_format_fn(element):\n return collections.OrderedDict(\n x=tf.reshape(element['pixels'], [-1, 784]),\n y=tf.reshape(element['label'], [-1, 1]))", "def __call__(self, batch: Dict[str, Tensor]) -> Tuple[Tensor, Dict[str, float]]:\n obs, actions, next_obs = get_keys(batch, *self.batch_keys)\n logps = self.model_likelihoods(obs, actions, next_obs)\n loss = -torch.stack(logps)\n info = {f\"loss(models[{i}])\": -l.item() for i, l in enumerate(logps)}\n return loss, info", "def SNLI_collate_func(batch):\n x1_list = []\n x1_length_list = []\n x2_list = []\n x2_length_list = []\n label_list = []\n for datum in batch:\n x1_padded_vec = np.pad(np.array(datum[0]), \n pad_width=((0,MAX_SENTENCE_LENGTH-datum[1])), \n mode=\"constant\", constant_values=0)\n x1_list.append(x1_padded_vec)\n x1_length_list.append(datum[1])\n \n x2_padded_vec = np.pad(np.array(datum[2]), \n pad_width=((0,MAX_SENTENCE_LENGTH-datum[3])), \n mode=\"constant\", constant_values=0)\n x2_list.append(x2_padded_vec)\n x2_length_list.append(datum[3])\n \n label_list.append(datum[4])\n\n return [torch.from_numpy(np.array(x1_list)), torch.LongTensor(x1_length_list),\n torch.from_numpy(np.array(x2_list)), torch.LongTensor(x2_length_list),\n torch.LongTensor(label_list)]", "def __call__(self, batch):\n # Right zero-pad all one-hot text sequences to max input length\n input_lengths, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([len(x[0]) for x in batch]),\n dim=0, descending=True)\n max_input_len = input_lengths[0]\n\n text_padded = torch.LongTensor(len(batch), max_input_len)\n text_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n text = batch[ids_sorted_decreasing[i]][0]\n text_padded[i, :text.size(0)] = text\n\n # Right zero-pad mel-spec\n num_mels = batch[0][1].size(1)\n max_target_len = max([x[1].size(0) for x in batch])\n if max_target_len % self.r != 0:\n max_target_len += self.r - max_target_len % self.r\n assert max_target_len % self.r == 0\n\n # include mel padded and gate padded\n mel_padded = torch.FloatTensor(len(batch), max_target_len, num_mels)\n mel_padded.zero_()\n gate_padded = torch.FloatTensor(len(batch), max_target_len)\n gate_padded.zero_()\n output_lengths = torch.LongTensor(len(batch))\n for i in range(len(ids_sorted_decreasing)):\n mel = batch[ids_sorted_decreasing[i]][1]\n mel_padded[i, :mel.size(0), :] = mel\n gate_padded[i, mel.size(0)-1:] = 1\n output_lengths[i] = mel.size(0)\n\n return text_padded, input_lengths, mel_padded, gate_padded, output_lengths", "def generate_batch(\n batch, vocab: Dict[str, int]\n ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:\n input_unigrams = [DatasetLSTM.encode_sequence(b[0][0], vocab) for b in batch]\n input_bigrams = [DatasetLSTM.encode_sequence(b[0][1], vocab) for b in batch]\n input_unigrams = torch.tensor(input_unigrams)\n input_bigrams = torch.tensor(input_bigrams)\n labels = torch.tensor([b[1] for b in batch])\n return (input_unigrams, input_bigrams), labels" ]
[ "0.7052451", "0.6277633", "0.6207949", "0.59580165", "0.5956875", "0.56889665", "0.5668856", "0.5634892", "0.5634892", "0.5634892", "0.55901664", "0.55360013", "0.549158", "0.5428135", "0.5394302", "0.5389692", "0.53437525", "0.53286153", "0.5324247", "0.5321802", "0.52739024", "0.5265054", "0.52497673", "0.52116036", "0.51920784", "0.51819324", "0.5172946", "0.5136186", "0.51227385", "0.51099396" ]
0.7289912
0
Gets a full URL to a JAFC search page
def get_search_url(query, page=1): # type: (str, int) -> str return "{}?orderby=default&sound=all&num={}&page={}&keyword={}".format( JAFC_SEARCH_URI, SEARCH_MAX_RESULTS, page, query)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_search_url(free_text_search):\n url = baseUrl + \"data/\"\n if not free_text_search:\n url += \"warehouse/\"\n url += \"search?\"\n return url", "def url(self):\n return url_search_posts(self.parameters, url_domain=self.url_domain)", "def search_url(url):\r\n\r\n surl = url + '/search-middle-ns.asp'\r\n driver.get(surl)\r\n\r\n return", "def get_search_url(index=None, query=None, dates=None, page=0):\n # type: (str, str, str, int) -> str\n index = \"\" if index is None else index\n query = \"\" if query is None else query\n dates = \"\" if dates is None else dates\n return LOC_SEARCH_TEMPLATE.format(index, query, dates, SEARCH_MAX_RESULTS, page)", "def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)", "def _get_search_url(self, keywords):\n search_url = urljoin(_BASE_URL, (\"s/field-keywords=%s\" % (keywords)))\n return search_url", "def search_link(self):\n return self._json['coredata'].get('link', [])[2].get('@href')", "def searchUrl(self):\n return self.lweBaseUrl + \\\n \"/collections/\" + self.collectionName", "def get_query_url(self, search_args):\n self._browser.open(\"http://poe.trade/\")\n # There are two forms, the second one is the search form\n # Both forms don't have names so we just know the 2nd one is the right one\n self._browser.form = list(self._browser.forms())[1]\n \n # Populate the forms with the stuff we want\n for form_name in search_args:\n control = self._browser.form.find_control(form_name)\n control.value = search_args[form_name]\n \n # By default we want people are are online and accepting buyouts\n buyout_control = self._browser.form.find_control(name=\"has_buyout\")\n online_control = self._browser.form.find_control(name=\"online\")\n buyout_control.value = [\"1\"]\n online_control.value = [\"x\"]\n \n search_response = self._browser.submit()\n return search_response.geturl()", "def query(url):", "def build_search_url(query):\n google_url = []\n # Build URL to query Google\n google_url.append('https://www.google.com/search?')\n # I'm feeling lucky: go to first result\n google_url.append('btnI=1')\n # Limit results to only this specific website\n google_url.append('&as_sitesearch=docs.aws.amazon.com')\n # Build query\n query = \"aws cloudformation \" + query\n # This line escapes spaces and the like\n query = urllib.quote_plus(query.strip())\n # Attach query to URL\n google_url.append(\"&q=\")\n google_url.append(query)\n return \"\".join(google_url)", "def create_search_url():\n\n search_url = 'http://newsapi.org/v2/everything?'\n\n # A date and optional time for the oldest article allowed. This should be in ISO 8601 format.\n oldest_article = get_oldest_article_date()\n \n payload = {\n \"q\":\"solar+energy+utility\",\n \"from\":oldest_article,\n \"sortBy\":\"relevancy\",\n \"pageSize\":100,\n \"apiKey\": os.environ['GOOGLE_NEWS_KEY']\n }\n\n\n return search_url, payload", "def go_search(self, driver, pid):\n return [self.search_url(website, pid)]", "def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url", "def query_url(query: str) -> str:\n base_url = \"https://www.mta-dialog.de/stellenmarkt.html?tx_jobs_pi1%5Baction%5D=fullTextSearch&\" \\\n \"tx_jobs_pi1[value]=\"\n return base_url + query", "def query_url(target):\n query = \"info:\"+target\n params = urllib.urlencode({\n \"client\": \"navclient-auto\",\n \"ch\": \"6%s\" % checksum(query),\n \"ie\": \"UTF-8\",\n \"oe\": \"UTF-8\",\n \"features\": \"Rank\",\n \"q\": query,\n })\n return \"http://%s/search?%s\" % (HOST, params)", "def form_search_url(self):\r\n self.reformat_search_for_spaces()\r\n self.target_yt_search_url_str = self.prefix_of_search_url + self.yt_search_key + self.filter_url_portion", "def get_scraper_url(self):\r\n \r\n return self.reformat_scraper_url()", "def get_page(search):\n headers = {\n \"User-Agent\":\n \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) Gecko/20100101 Firefox/42.0\",\n }\n url = 'http://google.com/search?h1=en&q=' + search + \"&meta=&gws_rd=ssl\"\n page = requests.get(url, headers=headers)\n return page", "def solr_url(config):\n return _solr_core_url(config) + 'query'", "def response_url():\n current_url = urlparse(cherrypy.url()) # gets current location on the server\n try:\n location = cherrypy.request.json[\"location\"]\n if parse_qs(urlparse(location['href']).query)['from']: # get from query href\n cleaned_url = parse_qs(urlparse(location['href']).query)['from'][0]\n if not cleaned_url.__contains__(\n current_url.netloc): # check net location to avoid cross site script attacks\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n else:\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n except Exception as e:\n # cherrypy.log.error(\"no location provided setting target to /projects\")\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n return cleaned_url", "def get_search_url(\n base_url: str,\n search_type: str,\n query: str,\n page: Optional[int],\n parameters: Optional[Mapping[str, Optional[Union[int, str, float]]]],\n) -> str:\n url = f\"{base_url}/search/{search_type}?q={query}\"\n url = get_url_with_page(url, page, delimiter=\"&page=\")\n if parameters is not None:\n url += \"\".join(f\"&{k}={v}\" for k, v in parameters.items())\n return url", "def url(self):\n\n if not hasattr(self, \"_url\"):\n query = db.Query(\"query_term u\", \"u.value\")\n query.join(\"query_term t\", \"t.doc_id = u.doc_id\")\n query.where(f\"u.path = '{self.URL_PATH}'\")\n query.where(f\"t.path = '{self.TERM_PATH}'\")\n query.where(query.Condition(\"t.int_val\", self.id))\n rows = query.execute(self.loader.cdr_cursor).fetchall()\n self._url = rows[0].value if rows else \"\"\n return self._url", "def fetch(url):\r\n PAGES = {\"http://SEARCH_QUERY_URL?&page=1\" : SEARCH_RESULT_PAGE1,\r\n \"http://SEARCH_QUERY_URL?&page=2\" : SEARCH_RESULT_PAGE2} \r\n return PAGES[url]", "def Url(self) -> str:", "def getURLForThing(thing):", "def url(self, **kwargs):\n return self._location.url(**kwargs)", "def _get_url(self, absolute):", "def get_serach_url(self, email):\n return reverse_lazy('giza:search giza', kwargs={'search_type': 'email', 'search_word': email})", "def url(result):\n return result.entities.get(u'urls')" ]
[ "0.72922665", "0.6772499", "0.65420073", "0.6432122", "0.64029217", "0.6359451", "0.6330832", "0.6317165", "0.6235276", "0.6229302", "0.6211998", "0.6206645", "0.6206043", "0.6205089", "0.6143072", "0.61282516", "0.6092116", "0.6081848", "0.6070567", "0.6063114", "0.6032462", "0.60148567", "0.6013644", "0.60051614", "0.5998933", "0.5988903", "0.5941471", "0.5926157", "0.59170365", "0.5914978" ]
0.7016892
1
Gets a full URL to a JAFC player page
def get_player_url(id): return JAFC_M3U8_TEMPLATE.format(id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_embed_url(self):\n if not self.id_video or not self.original_url or not self.xml_response:\n return ''\n return '//view.vzaar.com/{0}/player'.format(self.id_video)", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n return 'http://www.dailymotion.com/%s' % self.get_video_id()", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n if self.get_video_id() == -1:\n return self.original_url\n \n return 'http://www.slideshare.net/slideshow/embed_code/%s' % self.get_video_id()", "def get_url(self):\n if not self.get_video_id() or not self.get_username():\n return ''\n \n return 'http://www.livestream.com/%s/video?clipId=%s' % (self.get_username(), self.get_video_id())", "def amp_url(self):\n return self.url.child(\"amp\")", "def get_url(self, page):\n return self.server_url + page", "def get_embed_url(self):\n return self.embed_url", "def full_url(self):\n return self.url + \"?channel_id=\" + self.external_id", "def url(self):\r\n return self.urlparts.geturl()", "def get_embed_url(self):\n if not self._oembed:\n return ''\n \n if not self.original_url:\n return ''\n \n return 'https://w.soundcloud.com/player/?url=%s' % (self.original_url)", "def url(self):\n return self.full()", "def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url", "def get_download_link(ep: mdl.Episode) -> str:\n embed_url = ep.video_data.get(\"streamtape\")\n if not embed_url:\n return None\n\n try:\n response = requests.get(embed_url, headers=settings.REQUEST_HEADERS)\n soup = BeautifulSoup(response.text, \"html.parser\")\n\n text = [str(script) for script in soup.find_all(\"script\") if \").innerHTML\" in str(script)][\n 0\n ]\n text = \"\".join(text.rstrip(\"</script>\").lstrip(\"<script>\").split())\n text = text.split(\"innerHTML=\")[1].rstrip(\";\")\n text = \"\".join([substr.strip('\"').strip(\"'\") for substr in text.split(\"+\")])\n\n download_link = f\"https:{text}\"\n except Exception as e:\n print(e)\n return None\n\n return download_link", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def extraire(self, url, prefix):\n # Recuperer le code html de la page youtube\n print(url)\n code = urlopen(url).read().decode('utf8').split('\"')\n\n for elmt in code:\n if prefix in elmt:\n return elmt\n \n # Valeur par defaut\n return '/watch?v=jNQXAC9IVRw'", "def url(self):\n url = self.url\n return url", "def get_url(self):\n if not self.get_video_id():\n return ''\n \n return 'http://www.vimeo.com/%s' % self.get_video_id()", "def getFullURL(self):\n return self.FullURL", "def geturl(self):\n return self.__url", "def get_embed_url(self):\n if not self.get_video_id() or not self.get_username():\n return ''\n \n return 'http://cdn.livestream.com/embed/%s?layout=4&amp;clip=%s' % (self.get_username(), self.get_video_id())", "def geturl(self) -> str:\n\n req = request.Request(url=self._url, headers=self._headers)\n with request.urlopen(req) as f:\n return f.read().decode('utf-8', 'ignore')", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url", "def mpd_url(self):\n # type: () -> string_types\n return self._mpd_url", "def embed_url(self):\n\n ref_number = self.ID\n embed_link = \"\".join(('https://embeds.datpiff.com/mixtape/', \n str(ref_number),\n '?trackid=1&platform=desktop'))\n return embed_link", "def web_url(self) -> str:\n return pulumi.get(self, \"web_url\")", "def url_ExoMol():\n url=u\"http://www.exomol.com/db/\"\n return url", "def GetURL(self, rel_url):\n return 'http://localhost:%d/%s' % (self.port, rel_url)", "def Url(self) -> str:" ]
[ "0.66149753", "0.6611158", "0.6531794", "0.64999014", "0.6488381", "0.64640135", "0.6438209", "0.6393434", "0.6368918", "0.63414407", "0.6317633", "0.63145584", "0.6299857", "0.6293298", "0.6293298", "0.6273694", "0.6272204", "0.6265445", "0.62509024", "0.6201217", "0.619866", "0.6191729", "0.61893755", "0.61893755", "0.6179095", "0.61769783", "0.6170063", "0.61682874", "0.6142719", "0.61351585" ]
0.71524704
0
Gets a full URL to a JAFC html page
def get_page_url(href): # type: (str) -> str return "{}{}".format(JAFC_URI, href.lstrip("/"))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Url(self) -> str:", "def web_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"web_url\")", "def get_url(self, page):\n return self.server_url + page", "def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url", "def _get_url(self, absolute):", "def web_url(self) -> str:\n return pulumi.get(self, \"web_url\")", "def url(self):\n return self.full()", "def url():\n ...", "def url (self):\n return Links.createURL('/')", "def url(self):\n url = self.url\n return url", "def getHTML(url): \n return urlopen(url)", "def geturl(self) -> str:\n\n req = request.Request(url=self._url, headers=self._headers)\n with request.urlopen(req) as f:\n return f.read().decode('utf-8', 'ignore')", "def get_scraper_url(self):\r\n \r\n return self.reformat_scraper_url()", "def url(self):\r\n return self.urlparts.geturl()", "def getHtml(url):\n return urlopen(url)", "def getUrl(self): #$NON-NLS-1$\r", "def getUrl(self): #$NON-NLS-1$\r", "def response_url():\n current_url = urlparse(cherrypy.url()) # gets current location on the server\n try:\n location = cherrypy.request.json[\"location\"]\n if parse_qs(urlparse(location['href']).query)['from']: # get from query href\n cleaned_url = parse_qs(urlparse(location['href']).query)['from'][0]\n if not cleaned_url.__contains__(\n current_url.netloc): # check net location to avoid cross site script attacks\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n else:\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n except Exception as e:\n # cherrypy.log.error(\"no location provided setting target to /projects\")\n # No longer need to add projects to root url, so removing \n # cleaned_url = \"https://\" + current_url.netloc + \"/projects\"\n cleaned_url = \"https://\" + current_url.netloc\n return cleaned_url", "def get_url(self) -> str:\n\n return self.__page_url", "def getFullURL(self):\n return self.FullURL", "def getHtml(url):\n log.finer(\" Opening URL: %s\" % url)\n handle = MozURLopener().open(url)\n html = handle.read()\n handle.close()\n return html", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self):\n return app.settings.cherrypy.url()", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")", "def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")" ]
[ "0.6670181", "0.66274405", "0.66116786", "0.66016185", "0.6582116", "0.65696293", "0.65174687", "0.6513991", "0.6511577", "0.6438163", "0.64040387", "0.6384952", "0.63380724", "0.6331847", "0.6324063", "0.63232374", "0.63232374", "0.631195", "0.6308127", "0.6307245", "0.6280083", "0.62662524", "0.62662524", "0.626174", "0.6249467", "0.6249467", "0.6249467", "0.6249467", "0.6249467", "0.6249467" ]
0.6633017
1
Saves a list of search strings
def save(searches): # type: (list) -> None with Cache(CACHE_URI) as c: c.set(SAVED_SEARCH, json.dumps(searches, ensure_ascii=False))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveQuery(self, query):\n items_temp = []\n field = self.txtSearchHistory # Initialise Search History textbox as 'field'\n field.config(state='normal') # Enable 'field' for editing (removing and adding texts)\n index = 1\n\n # Iterate through 'field' to check if query made matches previous searches\n for item in field.get(\"1.0\", 'end').splitlines():\n if item:\n if str(item).lower() == query.lower():\n field.delete(str(index) + '.0',\n str(index) + '.end + 1 char') # Remove text from 'field' if matches with current query\n index += 1\n\n self.txtSearchHistory.insert('1.0', query.capitalize() + \"\\n\") # Insert current query to first line of 'field'\n field.config(state='disabled') # Disable user from changing 'field' text box\n\n # Get updated search history to store in file\n for item in field.get(\"1.0\", 'end').splitlines():\n if item: items_temp.append(item)\n\n # Store queries (past and current) to file\n de.addSearchHist(items_temp)", "def on_save_search(self, event):\r\n\r\n search = self.m_searchfor_textbox.GetValue()\r\n if search == \"\":\r\n errormsg(_(\"There is no search to save!\"))\r\n return\r\n dlg = SaveSearchDialog(self, search, self.m_regex_search_checkbox.GetValue())\r\n dlg.ShowModal()\r\n dlg.Destroy()", "def save(self, *args, **kwargs):\n self._update_search_tokens()\n super().save(*args, **kwargs)", "def search_all(search_tree, word_list):\n\n to_write = \"\"\n for w in word_list:\n comp = pocet_porovnani(search_tree, w.value, print_results=False)\n to_write += w.value + \":\" + str(comp) + \"\\n\"\n\n with open(\"search.txt\", \"w\") as f:\n f.write(to_write)\n print(colored(\" ulozene do suboru search.txt\", color=\"green\"))", "def update_list(*args):\n\n search_term = search_var.get()\n all_anime = load(open(Save_file_dir.joinpath(\"anime_save.p\"), \"rb\"))\n\n all_anime_list = []\n for key, value in all_anime.items():\n all_anime_list.append(key)\n\n libox_all_anime.delete(0, END)\n\n for item in all_anime_list:\n if search_term.lower() in item.lower():\n libox_all_anime.insert(END, item)", "def Save(self, e):\n for i in range(len(self.var_names)):\n name = self.var_names[i]\n entry = str(self.entries[i].GetValue()).strip() # grab user entries\n\n # convert entry into a list\n if (\",\" in entry):\n vals = entry.split(\",\")\n values = [v.strip() for v in vals]\n else:\n values = [entry]\n\n variable = Variable(name, values) # build a Variable object\n\n # update the variable values\n n = self.mainparent.namelist\n self.mainparent.input_file.namelists[n].add_variable(variable)\n\n self.mainparent.statusbar.SetStatusText(\"Namelist saved\", 0)", "def setSearchPaths (cls, searchPathList : StringList):\n\n Logging.trace(\">>: %r\", searchPathList)\n cls._searchPathList = [\".\"] + searchPathList\n Logging.trace(\"<<\")", "def save(self, values):", "def save_list(filename:str, seg_sents:List[List[str]]):\n\twith open(filename, 'w', encoding=\"utf-8\") as f:\n\t\tfor sent in seg_sents:\n\t\t\tsentence = \" \".join(sent)\n\t\t\t# print(sentence)\n\t\t\tf.write(sentence + '\\n')", "def save(self, *args, **kwargs):\n good_list = []\n for word in self.words.lower().split('\\n'):\n word = word.strip()\n if word and word not in good_list:\n good_list.append(word)\n self.words = '\\n'.join(good_list)\n return super(WordSet, self).save(*args, **kwargs)", "def save(self, sentence):\r\n listOfWords = sentence.split(\" \")\r\n self.repo.save(Sentence(listOfWords))", "def handleActionSave(self):\n for w in self.filesList.selectedItems():\n self.filesList.saveFile(w.text(2))", "def save(self, words):\n\t\t# TODO: Need to cap the network, expire old words/phrases\n\t\tinitial = None,\n\t\tall_words = itertools.chain(initial, words)\n\t\tconsume(itertools.starmap(self.update, pairwise(all_words)))", "def save(self):\n lang = self.languageCombo.currentText()\n kwSet = self.setSpinBox.value()\n self.__keywords[lang][\"Sets\"][kwSet] = self.keywordsEdit.toPlainText()\n \n for lang, keywords in self.__keywords.items():\n Preferences.setEditorKeywords(lang, keywords[\"Sets\"])", "def modify_search(add=[], remove=[]):\n\n query = request.args.get('q', '').split()\n query = [x.strip() for x in query if x.strip()]\n\n for word in remove:\n if word in query:\n query.remove(word)\n\n for word in add:\n if word and word not in query:\n query.append(word)\n\n return \" \".join(query)", "def search_multiple_words(words):\n # YOUR CODE HERE #\n pass # delete this when you write your code", "def search_and_add(_file, search_string, new_string):\n with open(_file, encoding='utf-8') as f:\n buf = f.readlines()\n new_array = []\n for line in buf:\n new_array.append(line)\n if line == search_string:\n new_array.append(new_string)\n\n with open(_file, 'w') as f:\n for item in new_array:\n f.write(item)", "def _manage_words(words, save_to=None):\n if save_to is None:\n return words\n with open(save_to, 'w+') as file:\n file.write('\\n'.join(words))", "def _update_search_tokens(self):\n # Get all IDs\n self.search_tokens = [self.name] + [x[\"patient\"] for x in self.pedigree if x.get(\"patient\")]\n # Remove -N1-DNA1-WES1 etc.\n self.search_tokens = [\n re.sub(r\"-\\S+\\d+-\\S+\\d+-[^-]+\\d+$\", \"\", x) for x in self.search_tokens\n ]\n # Convert to lower case\n self.search_tokens = [x.lower() for x in self.search_tokens]\n # Strip non-alphanumeric characters\n self.search_tokens = [re.sub(r\"[^a-zA-Z0-9]\", \"\", x) for x in self.search_tokens]", "def save_words_to_database(database_path: str, words_list: list):\n\n db = sqlite3.connect(database_path)\n with db:\n cursor = db.cursor()\n for word in words_list:\n # check is word in DB already\n sql = \"SELECT COUNT(*) FROM {} WHERE word='{}'\".format('words', word)\n cursor.execute(sql)\n count = cursor.fetchone()[0]\n\n if count > 0:\n sql = \"UPDATE {} SET {} = {} + 1 WHERE {} = '{}'\"\\\n .format('words', 'usage_count', 'usage_count', 'word', word)\n else:\n sql = \"INSERT INTO {}({}) VALUES('{}')\".format('words', 'word', word)\n\n # print(sql)\n cursor.execute(sql)\n\n print('Database save complete')\n\n if db is not None:\n db.close()", "def saveEditorsList(self, editors):\n for editor in editors:\n ok = editor.saveFile()\n if ok:\n self.setEditorName(editor, editor.getFileName())", "def save( self, resultOrResults ):\n\n # make a list if only one passed in\n resultOrResults = [ resultOrResults ] if not isinstance( resultOrResults, list ) else resultOrResults\n\n for result in resultOrResults:\n try:\n # self._is_valid(result)\n\n # get a Word object for the result\n word = self.get_word( result )\n # create a mapping object and attach word\n wordMap = self.get_map( result, word )\n # print( \"save: %s\" % wordMap.sentence_index )\n\n if self.write_to_db( word, wordMap ) is True:\n # fire a save complete event\n self._fire_save_notification( wordMap )\n else:\n self._fire_error_saving_notification( wordMap )\n except DataError:\n self._fire_error_saving_notification( wordMap )\n print( 'uh oh' )", "def search_resources(self,searchtext):\n\n self.search.value = searchtext\n self.submit.click()", "def save_finding(self, cur_finding_list: List = None,\n term: str = None,\n query_result: dict = None,\n cor_walk: Any = None,\n finding_type: str = None) -> None:\n\n current_finding_ids = [finding[\"corresponding_id\"] for finding in cur_finding_list]\n\n for record_entry in query_result[\"results\"][\"bindings\"]:\n mesh_record_id = self.get_record_value(record_entry, \"record\")\n\n if mesh_record_id in current_finding_ids:\n print(\"record id already in list\")\n else:\n entry = {\n \"base_word\": self.base_word,\n \"queried_term\": term,\n \"corresponding_id\": mesh_record_id,\n \"corresponding_term\": self.get_record_value(record_entry, \"termName\"),\n \"cor_walk\": cor_walk,\n \"finding_type\": finding_type,\n \"context_sentence\": self.context_sentence\n }\n\n best_abstraction_path, all_similarities = self.calculate_best_mesh_abstraction_path(mesh_record_id)\n entry[\"best_abstraction_path\"] = best_abstraction_path\n entry[\"all_abstraction_path_similarities\"] = all_similarities\n\n cur_finding_list.append(entry)\n self.all_findings_list.append(entry)", "def save(self, file_name):\n try:\n open(file_name, 'w').write(\"\\n\".join(self.word_list.keys()))\n except IOError as e:\n print(e)", "def index(self, suggestions: Sequence[str]) -> None:\n for s in suggestions:\n self.data.append(s)", "def saveWords(data):\n\tdata[\"words\"] = data[\"edu\"]\n\tfor e in [\",\", \".\", \"!\", \"--\", \"?\", \";\"]:\n\t\tdata[\"words\"] = data[\"words\"].replace(e,\"\")\n\t\t\n\tdata[\"words\"] = data[\"words\"].split()\n\t\n\treturn data", "def add_search_words(self,\r\n index,\r\n entrytext):\r\n\r\n\r\n for a_temp in DELETECHARACTERS:\r\n entrytext = entrytext.replace(a_temp, BLANK)\r\n\r\n for w in set(entrytext.split()):\r\n\r\n w = w.strip()\r\n if self.word_dict_contains(w):\r\n self.add_word(w,index)\r\n\r\n else:\r\n if w not in SMALLWORDS+[BLANK,EMPTYCHAR]:\r\n\r\n self.initiate_new_word(w,index)", "def save_all(self):\r\n for index in range(self.count()):\r\n self.save(index)", "def writeSearchDataToText(binarySearchData, linearSearchData):\r\n\r\n # Write data from binary search\r\n binarySearchFile = open('binary_search.txt', 'w')\r\n for i in range(len(binarySearchData)):\r\n binarySearchFile.write(str(binarySearchData[i]))\r\n binarySearchFile.write('\\n')\r\n\r\n # Write data from linear search\r\n linearSearchFile = open('linear_search.txt', 'w')\r\n for i in range(len(linearSearchData)):\r\n linearSearchFile.write(str(linearSearchData[i]))\r\n linearSearchFile.write('\\n')" ]
[ "0.63106495", "0.62108016", "0.6156991", "0.58614767", "0.5767715", "0.56644654", "0.5652136", "0.5616301", "0.56007016", "0.5581326", "0.55317277", "0.5507554", "0.54770094", "0.545046", "0.5402758", "0.53920615", "0.5372612", "0.53690314", "0.530982", "0.528692", "0.52482677", "0.5245694", "0.5239843", "0.520363", "0.5198256", "0.5189737", "0.51853466", "0.51526517", "0.51475406", "0.5147042" ]
0.71367985
0
Removes a query from the saved search list
def remove(query): # type: (str) -> bool if not query or not SEARCH_SAVED: return False searches = retrieve() if query in searches: searches.remove(query) save(searches) return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, query):\n self.collection.remove(query)", "def query_remove(self,*q):\n query = self.parameters['q'].difference(q)\n params = join_params(self.parameters,\n {\"q\": query, \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)", "def remove(self, query: dict, limit: Optional[int] = 0) -> None:\n\n matches = self.find(query, limit)\n for match in matches:\n self._db[\"documents\"].remove(match)\n\n self._dump()", "def delete(saved_query):\n saved_query.delete()", "def remove_from_cache(self, query):\n return", "def remove_query(iden):\r\n table = query_queue_table\r\n d = table.delete(table.c.iden == iden)\r\n d.execute()", "def unset(cls, query, unset_query):\n cls.collection.update(query, {\"$unset\": unset_query}, multi=True)", "def removeTitleQuery(self, titleQuery):\n try:\n self.queries[\"ti\"].remove(titleQuery)\n except ValueError:\n raise NotInQueryException", "def _remove_duplicates(self):\n self.search_query = remove_duplicates(self.search_query)", "def removeSavedSearch(self, searchName):\n facade = self._getFacade()\n if facade.noSaveSearchProvidersPresent():\n return DirectResponse.succeed()\n\n # save the search\n facade.removeSavedSearch(searchName)\n return DirectResponse.succeed()", "def delete(self):\n self.solr.delete(q=self.q)", "def unset_queries(self, *args):\n for k in args:\n self._query_dict.pop(k, None)", "def removeAllTitleQueries(self):\n self.queries[\"ti\"] = []", "def __delitem__(self, query_filter):\n subquery_count = len(self.__bound_queries)\n keyerror_count = 0\n saved_items = []\n for index, query in enumerate(self.__bound_queries):\n try:\n saved_items.append(query.get(query_filter, None))\n del query[query_filter]\n except KeyError:\n keyerror_count += 1\n except:\n for q, old_value in itertools.izip(self.__bound_queries[:index],\n saved_items):\n if old_value is not None:\n q[query_filter] = old_value\n raise\n\n if keyerror_count == subquery_count:\n raise KeyError(query_filter)", "def removestopwords(query):\n wordlist = [word for word in query.split() if word not in stopwords.words('english')]\n return \" \".join(wordlist)", "def delete_old_search_queries_from_db():\n retention_days = settings.RTD_ANALYTICS_DEFAULT_RETENTION_DAYS\n days_ago = timezone.now().date() - timezone.timedelta(days=retention_days)\n search_queries_qs = SearchQuery.objects.filter(\n created__date__lt=days_ago,\n )\n\n if search_queries_qs.exists():\n log.info(\n \"Deleting search queries for last 3 months.\",\n total=search_queries_qs.count(),\n )\n search_queries_qs.delete()", "def delete_by_query(self, query, params = {}):\n params['hitsPerPage'] = 1000\n params['attributesToRetrieve'] = ['objectID']\n\n res = self.search(query, params)\n while (res['nbHits'] != 0):\n object_ids = []\n for elt in res['hits']:\n object_ids.append(elt['objectID'])\n task = self.delete_objects(object_ids)\n self.wait_task(task['taskID'])\n res = self.search(query, params)", "def modify_search(add=[], remove=[]):\n\n query = request.args.get('q', '').split()\n query = [x.strip() for x in query if x.strip()]\n\n for word in remove:\n if word in query:\n query.remove(word)\n\n for word in add:\n if word and word not in query:\n query.append(word)\n\n return \" \".join(query)", "def remove(self, id_obj=None, query_data=None):\n if id_obj:\n return self.collection.remove(id_obj, query_data)\n return self.collection.remove(query_data)", "def clear(self):\n self.solr.delete_query(\"%s:%s\"\n % (self.index_uuid_field, self.index_uuid))\n self.solr.commit()", "def removeAbstractQuery(self, abstractQuery):\n try:\n self.queries[\"abs\"].remove(abstractQuery)\n except ValueError:\n raise NotInQueryException", "def deleteMatches():\n db = connect()\n c = db.cursor()\n query = (\"DELETE FROM results;\")\n c.execute(query)\n db.commit()\n db.close()", "def remove():", "def _func_delete(self, arr: list, search: str) -> list:\n for a in arr:\n try:\n a.pop(search)\n except Exception:\n _logger.debug('Skipping delete, field not found.')\n return arr", "def removeQuestion(self, search, questionIndex=False):\n if questionIndex == True and\\\n type(search) == int and search < len(self.questions):\n print(\"Initiating removal of search={}\".format(search))\n del self.questions[search]\n print(\"After trying to delete i={}, var questions is: {}\".format(search, self.questions))\n elif questionIndex == False:\n # Search questions for string `search`\n pass\n else:\n raise ValueError(\"Bad input.\")", "def delete_by_query(self, query):\n query = pylastica.query.Query.create(query)\n return self.request('_query', pylastica.request.Request.DELETE, query.query)", "def delete(ctx, saved_search_id):\n r = SavedSearch(ctx.obj['TOKEN'], ctx.obj['DEBUG']).delete(saved_search_id)\n click.echo(json_dumps(r, ctx.obj['PRETTY']))", "def delete_named_query(NamedQueryId=None):\n pass", "def delete(self, query):\n if query.isId():\n # simple\n url = '%s/%s/%i' % (self.uri, query.table(), query._where[0].value)\n else:\n # real query\n url = '%s/%s/filter?%s' % (self.uri, query.table(), query.encode())\n data, resp = self.execute('DELETE', url, decode=True)\n return data", "def remove(self):" ]
[ "0.7211253", "0.7050755", "0.6936614", "0.6839507", "0.67858285", "0.6480618", "0.6479802", "0.64102256", "0.63554883", "0.6253427", "0.6241588", "0.6116487", "0.6018235", "0.60095644", "0.5906871", "0.59045094", "0.5893667", "0.58832836", "0.5882228", "0.58817315", "0.58676887", "0.5853054", "0.5851698", "0.5847204", "0.58461416", "0.5833224", "0.58244747", "0.5820771", "0.58163583", "0.57765704" ]
0.789691
0
Adds a query to the saved search list unless the query is equal to False or SEARCH_SAVED settings is False
def append(query): # type: (str) -> bool if not query or not SEARCH_SAVED: return False searches = retrieve() if query not in searches: searches.append(query) save(searches)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveQuery(self, query):\n items_temp = []\n field = self.txtSearchHistory # Initialise Search History textbox as 'field'\n field.config(state='normal') # Enable 'field' for editing (removing and adding texts)\n index = 1\n\n # Iterate through 'field' to check if query made matches previous searches\n for item in field.get(\"1.0\", 'end').splitlines():\n if item:\n if str(item).lower() == query.lower():\n field.delete(str(index) + '.0',\n str(index) + '.end + 1 char') # Remove text from 'field' if matches with current query\n index += 1\n\n self.txtSearchHistory.insert('1.0', query.capitalize() + \"\\n\") # Insert current query to first line of 'field'\n field.config(state='disabled') # Disable user from changing 'field' text box\n\n # Get updated search history to store in file\n for item in field.get(\"1.0\", 'end').splitlines():\n if item: items_temp.append(item)\n\n # Store queries (past and current) to file\n de.addSearchHist(items_temp)", "def store_current_search(self):\n search_query = self.request.GET.urlencode()\n self.request.session[settings.SEARCH_COOKIE_NAME] = search_query", "def remove(query):\n # type: (str) -> bool\n if not query or not SEARCH_SAVED:\n return False\n searches = retrieve()\n if query in searches:\n searches.remove(query)\n save(searches)\n return True\n return False", "def _add_better_search_words(self):\n for kw in self.better_search_kw:\n self.search_query += kw", "def append(self, search):\n self._search_history.append(search)", "def getAllSavedSearches(self, query=None, addManageSavedSearch=False):\n facade = self._getFacade()\n if facade.noSaveSearchProvidersPresent():\n return DirectResponse.succeed()\n\n data = Zuul.marshal(facade.getSavedSearchesByUser())\n if addManageSavedSearch:\n manageName = '<span id=\"manage-search-link\">%s</span>' % (_t('Manage Saved Searches...'))\n data.append(dict(id='manage_saved_search', name=manageName))\n return DirectResponse.succeed(data=data)", "def modify_search(add=[], remove=[]):\n\n query = request.args.get('q', '').split()\n query = [x.strip() for x in query if x.strip()]\n\n for word in remove:\n if word in query:\n query.remove(word)\n\n for word in add:\n if word and word not in query:\n query.append(word)\n\n return \" \".join(query)", "def __on_query_edited(self):\n self.__refresh_search_results()", "def after_search(self):\n self.search_number += 1\n\n if not self.store():\n logger.debug('''\n No results to store for keyword: \"{}\" in search engine: {}\n '''.format(\n self.query,\n self.search_engine_name)\n )\n\n if self.progress_queue:\n self.progress_queue.put(1)\n self.cache_results()", "def record_search_query(project_slug, version_slug, query, total_results, time_string):\n if not project_slug or not version_slug or not query:\n log.debug(\n \"Not recording the search query.\",\n project_slug=project_slug,\n version_slug=version_slug,\n query=query,\n total_results=total_results,\n time=time_string,\n )\n return\n\n time = parse(time_string)\n before_10_sec = time - timezone.timedelta(seconds=10)\n partial_query_qs = SearchQuery.objects.filter(\n project__slug=project_slug,\n version__slug=version_slug,\n modified__gte=before_10_sec,\n ).order_by(\"-modified\")\n\n # If a partial query exists, then just update that object.\n # Check max 30 queries, in case there is a flood of queries.\n max_queries = 30\n for partial_query in partial_query_qs[:max_queries]:\n if query.startswith(partial_query.query):\n partial_query.query = query\n partial_query.total_results = total_results\n partial_query.save()\n return\n\n version = (\n Version.objects.filter(slug=version_slug, project__slug=project_slug)\n .prefetch_related(\"project\")\n .first()\n )\n if not version:\n log.debug(\n \"Not recording the search query because project does not exist.\",\n project_slug=project_slug,\n version_slug=version_slug,\n )\n return\n\n SearchQuery.objects.create(\n project=version.project,\n version=version,\n query=query,\n total_results=total_results,\n )", "def cli_saved_queries_add(query_name, query_data=None):\n api.saved_queries_add(query_name=query_name, query_data=query_data)\n print \"Create a saved query called: %(query_name)s WHERE %(query_data)s\" % locals()", "def add_query(self, query_string, bool_operator='AND'):\n solr = self._clone()\n solr.q = \"%s %s (%s)\" % (solr.q, bool_operator, query_string)\n return solr", "def pre_search(self, qs):\n return qs", "def _(event):\n input_buffer = event.cli.buffers.previous(event.cli)\n search_buffer = event.cli.buffers[SEARCH_BUFFER]\n\n # Update search state.\n if search_buffer.text:\n get_search_state(event.cli).text = search_buffer.text\n\n # Apply search.\n input_buffer.apply_search(get_search_state(event.cli), include_current_position=True)\n\n # Add query to history of search line.\n search_buffer.append_to_history()\n search_buffer.reset()\n\n # Focus previous document again.\n event.cli.pop_focus()", "def save(self, **kwargs):\n if self.search_terms is None:\n self.search_terms = ''\n super().save(**kwargs)\n return self", "def updateSavedSearch(self, searchName, queryString):\n facade = self._getFacade()\n if facade.noSaveSearchProvidersPresent():\n return DirectResponse.succeed()\n\n # save the search\n facade.updateSavedSearch(searchName, queryString)\n return DirectResponse.succeed()", "def post_search(self, qs):\n return qs", "def on_save_search(self, event):\r\n\r\n search = self.m_searchfor_textbox.GetValue()\r\n if search == \"\":\r\n errormsg(_(\"There is no search to save!\"))\r\n return\r\n dlg = SaveSearchDialog(self, search, self.m_regex_search_checkbox.GetValue())\r\n dlg.ShowModal()\r\n dlg.Destroy()", "def on_searchin_changed(self):\r\n\r\n self.check_searchin()", "def add_music_from_search(request, music_id: int) -> HttpResponse:\n music_item = get_object_or_404(Music, id=music_id)\n\n if music_item in request.user.profile.playlist.all():\n return HttpResponse('Success')\n\n playpos = PlayPosition(\n position=music_item,\n plist=request.user.profile\n )\n\n playpos.add_order()\n playpos.save()\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))", "def append(self, search):\n query_values = {\n \"id\": str(search.id),\n \"term\": search.term,\n \"timestamp\": search.timestamp\n }\n\n self._cursor.execute(f\"\"\"\n INSERT INTO {self._table_name}\n VALUES (:id, :term, :timestamp);\"\"\", query_values)\n\n self._conn.commit()", "def save(self, *args, **kwargs):\n self.where_clause = None\n\n if self.filters is not None:\n queries = []\n\n for key in self.filters:\n category = self.project.categories.get(pk=key)\n queries.append(category.get_query(self.filters[key]))\n\n if len(queries) > 0:\n query = ' OR '.join(queries)\n self.where_clause = query\n else:\n self.where_clause = 'FALSE'\n\n super(FilterMixin, self).save(*args, **kwargs)", "def __generate_search_query(self) -> None:\n if self.query_accuracy < 100:\n if self.title is not None and self.title != '' and self.artist is not None and self.artist != '':\n # Use the title and the artist name to find more information about the song.\n query: str = self.title + ' ' + self.artist\n query = re.sub(self.__get_filter_regex(), '', query)\n self.query = query\n # Remove unnecessary information in order to get a simpler query version.\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 100\n return\n if self.query_accuracy < 50:\n # No title nor artist name available, use the filename as search query.\n filename: str = os.path.basename(self.original_path)\n filename = os.path.splitext(filename)[0]\n query: str = filename.lower()\n query = re.sub(self.__get_filter_regex(), '', query)\n query = query.replace('_', ' ')\n query = query.strip()\n self.query = query\n self.minimal_query = re.sub(r'\\([\\s\\S]+\\)', '', query).strip()\n self.query_accuracy = 50", "def addTitleQuery(self, titleQuery):\n if not (titleQuery in self.queries[\"ti\"]):\n self.queries[\"ti\"].append(titleQuery)", "def question_new_search():", "def addAbstractQuery(self, abstractQuery):\n if not (abstractQuery in self.queries[\"abs\"]):\n self.queries[\"abs\"].append(abstractQuery)", "def query_append(self,*q):\n query = self.parameters['q'].union(q)\n params = join_params(self.parameters,\n {\"q\": query, \"limit\": self._limit,\n \"url_domain\": self.url_domain,\n \"proxies\": self.proxies}\n )\n\n return self.__class__(**params)", "def global_search(user, search, qs):\n fks = {'os': 'name',\n 'site': 'label',\n 'supplier': 'name',\n 'type': 'text',\n 'status': 'name'}\n # Filtre les foreign key en fonction du niveau de securite.\n authorized_keys = SearchHostForm.filter_list(user, fks.keys())\n fks = dict([(k, v) for k, v in fks.iteritems()\n if k in authorized_keys])\n\n # Filter local fields\n fields = SearchHostForm.filter_list(user, SearchHostForm.Meta.fields)\n qs = qs.filter(\n (\n # Do search on extra_fields\n Q(additionnalfield__field__fast_search__exact=True)\n & ~Q(additionnalfield__field__data_type__in=('2', '3', '6'))\n & Q(additionnalfield__value__icontains=search)\n ) | (\n # Do search only for local fields\n reduce(ior, (Q(**{\"%s__icontains\" % (key,): search})\n for key in fields if key not in fks.keys()))\n ) | (\n # Do search on filtered foreign keys\n reduce(ior, (Q(**{\"%s__%s__icontains\" % (key, value): search})\n for key, value in fks.iteritems()))\n )\n )\n # Distict is needed because could match 2 fields in the or just above\n return qs.distinct()", "def search(self, query):", "def search_settings(self, search_settings):\n\n self._search_settings = search_settings" ]
[ "0.6368964", "0.60500705", "0.5986336", "0.59545386", "0.5931738", "0.5926475", "0.59156233", "0.5875235", "0.5836838", "0.57738036", "0.5770017", "0.55606484", "0.55499476", "0.5534752", "0.55133146", "0.55129987", "0.54866916", "0.5432528", "0.54266226", "0.54100585", "0.5409602", "0.54092807", "0.5388184", "0.5380216", "0.5349884", "0.5255031", "0.5224982", "0.5209183", "0.5208247", "0.5197902" ]
0.77002674
0
Gets cached or live HTML from the url
def get_html(url): # type: (str) -> BeautifulSoup headers = { "Accept": "text/html", "Accept-encoding": "gzip" } with Cache(CACHE_URI) as c: cached = c.get(url) if cached: add_cache_headers(headers, cached) # always return cached info regardless if cached["fresh"] or url.startswith(JAFC_INFO_URI): return BeautifulSoup(cached["blob"], "html.parser") r = requests.get(url, headers=headers, timeout=SEARCH_TIMEOUT) if 200 == r.status_code: soup = BeautifulSoup(r.content, "html.parser") # pre-cache clean-up for x in soup(["script", "style"]): x.extract() c.set(url, str(soup), r.headers) return soup if 304 == r.status_code: c.touch(url, r.headers) return BeautifulSoup(cached["blob"], "html.parser")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scrape_page(url):\n cached_page = cache.get(url)\n\n if cached_page:\n return html.fromstring(cached_page)\n else:\n page = get(url)\n\n cache.set(url, page.text)\n\n return html.fromstring(page.text)", "def get(self, url=None, script=None, key=None):\n self.base_url = self.base_url or url # set base URL if not set\n html = self.cache.get(key)\n if html:\n if self.debug: print 'load cache', key \n self.setHtml(html, QUrl(self.base_url))\n elif url:\n self.load(QUrl(url))\n elif script:\n self.js(script)\n\n loop = QEventLoop()\n timer = QTimer()\n timer.setSingleShot(True)\n timer.timeout.connect(loop.quit)\n self.loadFinished.connect(loop.quit)\n timer.start(self.timeout * 1000)\n loop.exec_() # delay here until download finished or timeout\n \n if timer.isActive():\n # downloaded successfully\n timer.stop()\n html = self.current_html()\n if key:\n self.cache[key] = html\n self.inject_jquery()\n else:\n # didn't download in time\n print 'Download timeout'\n html = ''\n return html", "def getHtml(url):\n return urlopen(url)", "def get_html(url):\n print('fetching', url)\n try:\n re = requests.get(url, timeout=1, stream=True)\n print('success!')\n # limit file size to 1mb\n html = re.raw.read(1000000+1, decode_content=True)\n if len(html) > 1000000:\n raise ValueError('response too large')\n return html\n except:\n raise TimeoutError('request timed out')", "def get_html(url):\n return urllib.request.urlopen(url)", "def getHTML(url): \n return urlopen(url)", "def getPageContent(url, insertDelay = False, shouldCache = True):\n\n # Try to retrieve this url from the cache\n cache = Cache()\n if shouldCache:\n cachedContent = cache.read(url)\n else:\n cachedContent = None\n\n if cachedContent is not None:\n\n # Take the cached content\n content = cachedContent\n\n else:\n\n try:\n # Load the content from the web\n content = loadFromUrl(url, insertDelay)\n\n # Cache this page\n cache.write(url, content)\n\n except Exception:\n\n print \"CACHE ERROR: \" + str(sys.exc_info()[1])\n content = ''\n\n return content", "def _extract_html(self, url):\n self.response = requests.get(url, timeout=5)\n self.html = BeautifulSoup(self.response.content, \"lxml\") if self.response.ok else None\n # return self.html", "def get_html(url):\n\n r = requests.get(url, headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'\n })\n html = r.text\n\n return html", "def getHtml(self, url):\n r = requests.get(url)\n html = r.content\n return html", "def retrieve_html(url):\n req = urllib2.Request(url)\n req.add_header('User-Agent', 'Just-Crawling 0.1')\n request = None\n status = 0\n try:\n logger.info(\"Crawling %s\" % url)\n request = urllib2.urlopen(req)\n except urllib2.URLError as e:\n logger.error(\"Exception at url: %s\\n%s\" % (url, e))\n except urllib2.HTTPError as e:\n status = e.code\n except:\n return\n if status == 0:\n status = 200\n\n try:\n data = request.read()\n except:\n return\n\n return str(data)", "def get_html(url):\n req = urllib.request.Request(\n url,\n headers={\n 'User-Agent': 'Python Learning Program',\n 'From': '[email protected]'\n }\n )\n resp = urllib.request.urlopen(req)\n\n if resp.code == 200:\n return resp.read() # returns the html document\n else:\n return None", "def get_page_contents(url, cache_dir, verbose,use_cache=True):\n\t#url=f\"https://www.mcgill.ca/study/2020-2021/courses/search?page={page_num}\"\n\tfname = hashlib.sha1(url.encode('utf-8')).hexdigest() # hash the url so we can save the cache\n\tfull_fname = osp.join(cache_dir, fname)\n\tif osp.exists(full_fname) and use_cache: #if the page has already been cached...\n\t\tif(verbose):\n\t\t\tprint(f\"Loading {url} from cache\")\n\t\tcontents = open(full_fname, 'r').read()\n\t\t\n\telse:\n\t\tif(verbose):\n\t\t\tprint(f\"Loading {url} from source\")\n\t\tr = requests.get(url)\n\t\tcontents = r.text\n\t\twith open(full_fname, 'w') as f: # write the cache\n\t\t\tf.write(contents)\n\treturn contents, full_fname # return the full hashed fname so we can use it...", "def get_html_from_url(url):\n request = requests.get(url)\n data = request.text\n return data", "def getHTML(url):\n\n time.sleep(2.00)\n html = urllib2.urlopen(url,timeout=10).read()\n urllib2.urlopen(url).close()\n\n soup = BeautifulSoup(html)\n\n return soup", "def FetchUrlContent(url):\n content = memcache.get(url)\n if content:\n return content\n\n request = urlfetch.fetch(url)\n\n if request.status_code == 200:\n content = request.content\n memcache.add(url, content, 60 * 60)\n return content\n\n raise LookupError('Unable to fetch URL. Response code: ' +\n str(request.status_code))", "def fetch_url(url):\n try:\n soup = bs(urlopen(url).read(), 'html.parser')\n return soup\n except:\n print \"Couldnot download the content from the URL\", url\n return \"\"", "def fetchUrl(self, url):\n self.driver.get(url)\n html = self.driver.page_source\n return html", "def get(self, url):\n\t\ttry:\n\t\t\tassert(type(url)) == str\n\t\t\tself.driver.get(url)\n\t\t\t# sleep(1) # Even tho driver.get is blocking, it returns as soon as DOM loads, without waiting for JS to run and update the DOM with the new elements\n\t\t\t# wait(self.driver, 10).until( EC.visibility_of_element_located() ) # Not sure how to wait here efficiently\n\t\t\tsleep(5) # A little long, but without a conditional variable to tell us when the page is ready us when to go our only choice is to nap\n\t\t\tself.bsource = bs( self.viewSource(), \"lxml\" ) # Update internal BeautifulSoup source with new javascript-encriched code (\"lxml\" is faster that \"html.parser\")\n\t\texcept Exception as e:\n\t\t\tprint(\"[*] Unable to GET page {}\\n{}\".format(url, e))\n\t\t\treturn -1", "def _html(url: str) -> BeautifulSoup:\n with urllib3.PoolManager() as manager:\n res = manager.request(\"GET\", url, headers={\"User-Agent\": ua.chrome})\n if res.status != 200:\n raise Exception(res.status)\n soup = BeautifulSoup(res.data, \"html.parser\")\n return soup", "def simple_get(self, url):\r\n \"\"\"\r\n The simple_get function accepts a single url argument. \r\n It then makes a GET request to that url. \r\n If nothing goes wrong, you end up with the raw HTML content for the page you requested. \r\n If there were any problems with your request (like the url is bad or the remote server is down) \r\n then your functon returns None.\r\n \"\"\"\r\n try:\r\n with closing(get(url, stream=True)) as resp:\r\n if self.is_good_response(resp):\r\n return resp.content\r\n else:\r\n return None\r\n except RequestException as e:\r\n self.log_error('Error during requests to {0} : {1}'.format(url, str(e)))\r\n return None", "def get_html_source(url):\n # import urllib\n try:\n sock = urllib.urlopen(url)\n html_source = sock.read()\n sock.close()\n return html_source\n except IOError:\n print \"IOError: Not a valid URL\"", "def fetch(self, url):\r\n fname = os.path.join(self._cachedir, self._formatter(url))\r\n if not os.path.exists(fname):\r\n time.sleep(self._sleep)\r\n html = urllib.urlopen(url).read()\r\n with codecs.open(fname, 'w', 'utf-8') as f:\r\n soup = BeautifulSoup(html)\r\n f.write(unicode(soup))\r\n return fname", "def get_page(url):\n # todo need some error checking\n\n r = requests.get(url)\n\n if r.status_code != 200:\n log_date = datetime.now().strftime(\"%Y-%m-%d %H%M%S\")\n filename = f'{log_date} response.html'\n with open(filename, 'w+') as f:\n f.write(r.text)\n logging.critical('get_page failed with status {}. See file {}.'.format(\n r.status_code,\n filename\n ))\n r.raise_for_status()\n\n return r", "def get_html(url):\n response = requests.get(url)\n response.encoding = 'utf-8'\n return response.text", "def getHtml(url):\n log.finer(\" Opening URL: %s\" % url)\n handle = MozURLopener().open(url)\n html = handle.read()\n handle.close()\n return html", "async def fetch_html(url: str,\n session: aiohttp.ClientSession,\n **kwargs) -> str:\n\n resp = await session.request(method=\"GET\", url=url, **kwargs)\n resp.raise_for_status()\n logger.info(\"Got response [%s] for URL: %s\", resp.status, url)\n html = await resp.text()\n return html", "def get_html(url: str) -> str:\n headers = {\n 'User-Agent': Config.Scraper.user_agent,\n }\n logging.debug('User-Agent: ' + headers['User-Agent'])\n r = requests.get(url.strip(), headers=headers)\n r.encoding = 'utf8'\n print('[Status Code: %s]' % r.status_code)\n if r.status_code != 200:\n raise Exception('Error in get HTML!')\n return r.text", "def load_page(url):\n try:\n response = urllib2.urlopen(url)\n html = response.read()\n\n if response.code == 200:\n body_text = html\n return html\n return \"\"\n except Exception:\n return \"\"", "def __download_web(self):\n page = requests.get(self.url)\n\n if page.status_code == 200:\n return BeautifulSoup(page.content, \"html.parser\")" ]
[ "0.7545125", "0.7428418", "0.7357613", "0.7355026", "0.7341723", "0.7332338", "0.7266856", "0.7048026", "0.7047912", "0.7039737", "0.7023649", "0.69982666", "0.69752645", "0.6962329", "0.69497025", "0.69189936", "0.69020534", "0.68549544", "0.68450296", "0.68338495", "0.6815891", "0.6809432", "0.68084794", "0.6777074", "0.6773717", "0.6769582", "0.6729074", "0.67007095", "0.66878176", "0.668063" ]
0.74664515
1
Deletes an existing Pipeline.
def delete(self, params=None): self.logger.debug('Deleting {resource_type} with parameters:' ' {params}'.format(resource_type=self.type_name, params=params)) self.client.delete_pipeline(**params)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list_pipeline_delete_one(self):\n response = self.client.delete_pipeline(self.pipeline_name)\n nose.tools.assert_is_not_none(response)\n\n response = self.client.list_pipelines()\n exsit = False\n for pipeline in response.pipelines:\n if pipeline.pipeline_name == self.pipeline_name:\n exsit = True\n break\n nose.tools.assert_false(exsit)", "def cli(ctx, pipeline):\n\n project_root = pu.get_project_root()\n pipelines = pu.read_config()['pipelines']\n\n if pipeline in pipelines:\n path = pipelines[pipeline]['path']\n pipeline_dir = os.path.join(\n project_root,\n path)\n else:\n pu.fail(\"Pipeline '{}' not in this project\".format(pipeline))\n\n if os.path.isdir(pipeline_dir):\n\n shutil.rmtree(pipeline_dir)\n\n popper_config = pu.read_config()\n del popper_config['pipelines'][pipeline]\n\n pu.info(\"Pipeline '{}' removed successfully\".format(pipeline),\n fg=\"blue\")\n\n pu.write_config(popper_config)\n\n else:\n pu.fail(\"Path '{}' is not a folder\".format(pipeline))", "def DeleteDeliveryPipeline(self, pipeline_config, force):\n log.debug('Deleting delivery pipeline: ' + repr(pipeline_config))\n return self._pipeline_service.Delete(\n self.messages\n .ClouddeployProjectsLocationsDeliveryPipelinesDeleteRequest(\n allowMissing=True, name=pipeline_config.name, force=force))", "def delete_delivery_pipeline(\n self,\n ) -> Callable[\n [cloud_deploy.DeleteDeliveryPipelineRequest], operations_pb2.Operation\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"delete_delivery_pipeline\" not in self._stubs:\n self._stubs[\"delete_delivery_pipeline\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/DeleteDeliveryPipeline\",\n request_serializer=cloud_deploy.DeleteDeliveryPipelineRequest.serialize,\n response_deserializer=operations_pb2.Operation.FromString,\n )\n return self._stubs[\"delete_delivery_pipeline\"]", "def delete(\n self, pipeline: Optional['Pipeline'] = None, remove_from_queue: bool = True, delete_dependents: bool = False\n ):\n connection = pipeline if pipeline is not None else self.connection\n\n self._remove_from_registries(pipeline=pipeline, remove_from_queue=remove_from_queue)\n\n if delete_dependents:\n self.delete_dependents(pipeline=pipeline)\n\n connection.delete(self.key, self.dependents_key, self.dependencies_key)", "def test_delete_pipeline_not_exist(self):\n pipeline_name = 'pipeline_name_not_exist'\n try:\n self.client.delete_pipeline(pipeline_name)\n except BceHttpClientError as e:\n if isinstance(e.last_error, BceServerError):\n assert e.last_error.message.startswith('The requested pipeline does not exist')\n else:\n assert True == False, 'not throw bceservererror'", "def test_delete_pipeline_with_name_is_none(self):\n with nose.tools.assert_raises_regexp(ValueError,\n 'arg \"pipeline_name\" should not be None'):\n self.client.delete_pipeline(None)", "def delete_dependents(self, pipeline: Optional['Pipeline'] = None):\n connection = pipeline if pipeline is not None else self.connection\n for dependent_id in self.dependent_ids:\n try:\n job = Job.fetch(dependent_id, connection=self.connection, serializer=self.serializer)\n job.delete(pipeline=pipeline, remove_from_queue=False)\n except NoSuchJobError:\n # It could be that the dependent job was never saved to redis\n pass\n connection.delete(self.dependents_key)", "def test_delete_pipeline_with_name_is_empty(self):\n pipeline_name = ''\n with nose.tools.assert_raises_regexp(BceClientError, \n 'pipeline_name can\\'t be empty string'):\n self.client.delete_pipeline(pipeline_name)", "def pipeline_finished(self, pipeline):\n\n # Get the sizes of all output adios files\n self._get_adios_metadata(pipeline)\n\n # Free resources used by the pipeline\n with self.free_cv:\n # Return nodes used by the pipeline\n while not pipeline.nodes_assigned.empty():\n pipe_node = pipeline.nodes_assigned.get()\n self.allocated_nodes.put(pipe_node)\n\n _log.debug(\"finished pipeline {}, free nodes {} -> {}\".format(\n pipeline.id, self.free_nodes, self.free_nodes +\n pipeline.total_nodes))\n self.free_nodes += pipeline.total_nodes\n\n self.free_cv.notify()\n\n # Remove pipeline from list of running pipelines\n with self.pipelines_lock:\n self._running_pipelines.remove(pipeline)\n if self._status is not None:\n self._status.set_state(pipeline.get_state())", "def test_remove_old_pipelines():\n\n former_version_test = \"1.0.0\"\n latest_version_test = \"2.0.0\"\n\n pipeline_for_test = pipeline.pump_pipeline\n\n # Saving the former pipeline\n save_former_name = f\"former_pipeline_v{former_version_test}.pkl\"\n save_former_path = core.TRAINED_MODEL_DIR / save_former_name\n joblib.dump(pipeline_for_test, save_former_path)\n\n trained_model_dir_file_list = [\n file.name for file in core.TRAINED_MODEL_DIR.iterdir()\n ]\n\n assert save_former_name in trained_model_dir_file_list\n\n # Saving the subject pipeline\n subject = f\"subject_pipeline_v{latest_version_test}.pkl\"\n save_subject_test_path = core.TRAINED_MODEL_DIR / subject\n joblib.dump(pipeline_for_test, save_subject_test_path)\n\n # When\n utils.remove_old_pipelines(\n files_to_keep=[\n subject,\n f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\",\n ]\n )\n\n trained_model_dir_file_list = [\n file.name for file in core.TRAINED_MODEL_DIR.iterdir()\n ]\n\n # Then\n assert subject in trained_model_dir_file_list\n assert \"__init__.py\" in trained_model_dir_file_list\n if (\n f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n in trained_model_dir_file_list\n ):\n assert len(trained_model_dir_file_list) == 3\n else:\n assert len(trained_model_dir_file_list) == 2\n # removing the fake pipeline\n save_subject_test_path.unlink()", "def pipeline(self, pipeline_id):\r\n return pipelines.Pipeline(self, pipeline_id)", "def destroy_dataflow(self, *args, **kwargs):\n raise NotImplementedError", "def test_deletion(self):\n\n # Set the current pipe to the 'orig' data pipe.\n name = 'orig'\n pipes.switch(name)\n\n # Delete the 'orig' data pipe.\n pipes.delete(name)\n\n # Test that the data pipe no longer exists.\n self.assert_(name not in ds)\n\n # Test that the current pipe is None (as the current pipe was deleted).\n self.assertEqual(pipes.cdp_name(), None)", "def unregister_pipeline(pipeline_id):\n request = service_pb2.UnRegisterPipelineRequest()\n request.pipeline_id = pipeline_id\n\n response = _service.request(request, \"unregister_pipeline\")\n\n import google.protobuf.json_format as json_format\n res = json_format.Parse(response, service_pb2.VoidResponse())\n #logger.info(res)\n return None, res.status", "def delete(self):\n logger.info('Delete the port chain: %s' % self.name)\n # Delete port chain\n self.pc_client.delete('port_chain', self.name)\n\n logger.info('Delete the flow classifier.')\n self.pc_client.delete('flow_classifier', self.flow_conf['name'])\n\n # Delete all port pair groups\n logger.info('Delete port pair groups and port pairs.')\n srv_ppgrp_lst = self.srv_chain.get_srv_ppgrp_id()\n for grp_idx in range(len(srv_ppgrp_lst)):\n pp_grp_name = 'pp_grp_%s' % grp_idx\n self.pc_client.delete('port_pair_group', pp_grp_name)\n\n # Delete all port pairs\n for grp_idx, pp_grp in enumerate(srv_ppgrp_lst):\n for pp_idx in range(len(pp_grp)):\n pp_name = 'pp_%s_%s' % (grp_idx, pp_idx)\n self.pc_client.delete('port_pair', pp_name)", "def delete(self):\n self.vera.delete_scene(self)", "def test_save_pipeline():\n\n # Given\n try:\n pipeline_for_test = joblib.load(\n core.TRAINED_MODEL_DIR\n / f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n )\n subject_file_name = (\n f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n )\n except:\n subject_file_name = f\"fake_pipe_line_model_v{_version}.pkl\"\n\n # When\n utils.save_pipeline(pipeline_for_test, subject_file_name)\n\n # Then\n # Get the files in the model save's directory\n trained_model_dir_file_list = [\n file.name for file in core.TRAINED_MODEL_DIR.iterdir()\n ]\n\n # Check if the pipeline was saved in TRAINED_MODEL_DIR and with the right filename\n assert subject_file_name in trained_model_dir_file_list\n # Check if the __init__.py file is in the TRAINED_MODEL_DIR\n assert \"__init__.py\" in trained_model_dir_file_list\n # Check if the TRAINED_MODEL_DIR folder contains just the new saved pipeline and the __init__.py file\n assert len(trained_model_dir_file_list) == 2\n # remove the fake pipeline\n if subject_file_name == f\"fake_pipe_line_model_v{_version}.pkl\":\n core.TRAINED_MODEL_DIR / subject_file_name.unlink()", "def deleteStep( self ):\n assert isinstance( self._env, Env )\n assert isinstance( self._steps, list )\n\n # Save the stuff we need\n oldSteps = self._steps\n oldSteps.pop( )\n\n # Reinitialize this instance\n self._initialize( oldSteps )", "def cleanup(self, ttl: Optional[int] = None, pipeline: Optional['Pipeline'] = None, remove_from_queue: bool = True):\n if ttl == 0:\n self.delete(pipeline=pipeline, remove_from_queue=remove_from_queue)\n elif not ttl:\n return\n elif ttl > 0:\n connection = pipeline if pipeline is not None else self.connection\n connection.expire(self.key, ttl)\n connection.expire(self.dependents_key, ttl)\n connection.expire(self.dependencies_key, ttl)", "def delete(parameters, session):\n from Modules.Classes.ExperimentalScenario import ExperimentalScenario\n # Received --> [id_exeriment]\n # Retrieve all scenarios associated with target experiment\n exp_sc = session.query(ExperimentalScenario).filter(ExperimentalScenario.experiment_id == parameters[0]).all()\n for item in exp_sc:\n # Retrieve all ExperimentalScenarioPattern association for current experimental scenario\n exp_scenarios_pat = session.query(ExperimentalScenarioPattern).filter(and_(\n ExperimentalScenarioPattern.experimental_scenario_id == item.id,\n ExperimentalScenarioPattern.pattern_type == 2)).all()\n for item2 in exp_scenarios_pat:\n session.delete(item2)\n session.commit()\n session.close()\n msg_rspt = Message(action=2, comment='Register deleted successfully')\n return msg_rspt", "def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)", "def delete(self):\n self.model.remove_agents(self)", "def delete(self):\n self.parser.add_argument('lp_id',\n help=\"Language pack id\")\n args = self.parser.parse_args()\n self.client.languagepacks.delete(lp_id=args.lp_id)", "def delete(self, **kwargs):\n self._cycles.delete(**kwargs)", "def delete_stage(stage):\n folder = stage_folder(stage)\n shutil.rmtree(folder) # delete old\n ensure_path(folder) # create new", "def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)", "def delete(self):\n if self.is_running:\n raise errors.ChalmersError(\"Can not remove running program (must be stopped)\")\n\n if path.isfile(self.definition_filename):\n os.unlink(self.definition_filename)\n\n if path.isfile(self.state_filename):\n os.unlink(self.state_filename)", "def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])", "def delete(self, **kwargs):\n url_str = self.base_url + \"/%s\" % kwargs['transform_id']\n newheaders = self.get_headers()\n resp, body = self.client.json_request('DELETE', url_str,\n headers=newheaders)\n return resp" ]
[ "0.71344304", "0.66652894", "0.6538766", "0.64160043", "0.606167", "0.5928474", "0.586184", "0.577419", "0.5719994", "0.5530857", "0.5435308", "0.51739424", "0.51568216", "0.5153596", "0.51468605", "0.5143934", "0.51435167", "0.50878054", "0.5085399", "0.5078952", "0.50729287", "0.5022553", "0.4983266", "0.49819025", "0.49542344", "0.4933853", "0.4916245", "0.49129227", "0.49001044", "0.48983577" ]
0.73128957
0
Gets the direction to the left of the car, from the perspective of the direction that the car is facing. E.g., if the car is facing UP, this will return LEFT, and if it's facing DOWN, this will return RIGHT
def get_direction_to_left(self, direction): return direction_to_left[direction]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def directionLeft(self):\n return self.__directionLeft", "def turn_left(self):\n self.facing_direction -= self.config\n if self.facing_direction < 0:\n self.facing_direction += 8\n self.x, self.y = self.compute_positions()", "def get_left(self):\n return -self.l_motor.get()", "def turn_left(self):\n temp = self.direction[0]\n self.direction[0] = self.direction[1]\n self.direction[1] = -temp", "def turn_left(self):\n\t\tself.direction = (self.direction - 1)%4", "def MoveLeftStep(self):\n if self.facing == 0:\n self.facing = 3\n self.x -= self.stepLeft\n elif self.facing == 1:\n self.facing = 0\n self.y -= self.stepUp\n elif self.facing == 2:\n self.facing = 1\n self.x += self.stepRight\n elif self.facing == 3:\n self.facing = 2\n self.y += self.stepDown", "def left(self):\r\n if self.d in direction_tuple:\r\n index = direction_tuple.index(self.d)\r\n if index == 0:\r\n self.d = direction_tuple[3]\r\n else:\r\n self.d = direction_tuple[index - 1]\r\n else:\r\n print(\"NO VALID ROBOT POSITION\")", "def left_rotation(self, ang_vel):\n vel = self.om_right_max * self.R - ang_vel * self.L\n om_left = (vel - ang_vel * self.L) / self.R -1\n return vel, om_left", "def go_left(self):\n self.change_x = -6\n self.direction = \"L\"", "def left(self):\n if self.head.heading() != RIGHT and self.last_direction != RIGHT:\n self.head.setheading(LEFT)", "def left(self, key):\n return self.side(key, self.forward)", "def turn_left(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(1 + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(1 + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(speed)", "def rotate_left(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.clockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(speed + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.clockwise_rotate(speed + 1 + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(speed)", "def _determine_direction(self, degrees_left: float) -> float:\n if degrees_left >= 0:\n return 1.0\n else:\n return -1.0", "def steerleft(self):\n self.direction = self.direction+self.steering\n if self.direction > 360:\n self.direction = 0+90\n self.image, self.rect = rot_center(self.image_orig,self.rect,self.direction)", "def left(self, speed):\n self.controller.front_left_backward(speed)\n self.controller.front_right_forward(speed)\n self.controller.rear_left_forward(speed)\n self.controller.rear_left_backward(speed)", "def driveLeft(self):\n return self.__driveLeft", "def mate_left(self):\n # TODO: query self.local_obj geometry to get center of face?\n return Mate(self, CoordSystem(\n origin=(-self.width / 4, 0, (self.height + self.left_wall_height) / 2),\n xDir=(0,1,0),\n normal=(-sin(radians(self.angle_left)), 0, cos(radians(self.angle_left)))\n ))", "def getLeft(self):\n return self.left", "def rotate_left(self):\n reversed_compass = compass[::-1]\n current = reversed_compass.index(self.heading)\n return replace(self, heading=reversed_compass[(current + 1) % 4])", "def turn_left(self):\n self.direction_mod_offset -= 1\n self.calculate_offset_mapping()\n direction_num = self.direction_mod_offset % len(self.direction_arr)\n client.rotateToYawAsync(direction_num * 90).join()", "def leftTurn(self):\n #print('leftTurn\\r')\n self.linearVector = Vector3(x=0.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=1.0)", "def left(self, speed=1):\n self.right_motor.forward(speed)\n self.left_motor.backward(speed)", "def downleft(self):\n return Coord([self.x - 1, self.y + 1])", "def do_left_turn(robot_name):\n global current_direction_index\n\n current_direction_index -= 1\n if current_direction_index < 0:\n current_direction_index = 3\n\n return True, ' > '+robot_name+' turned left.'", "def left(self, angle):\r\n self.dir -= math.radians(angle)", "def move_left(self):\n self.yaw_motor.step_backward()", "def go_left(self):\n self.rect.centerx -= self.__dx", "def left_distance(self):\n return self.x", "def get_left(self):\n return self.left" ]
[ "0.7663516", "0.73489416", "0.71305645", "0.69870096", "0.6913713", "0.68939495", "0.68638486", "0.68636286", "0.6823035", "0.6767186", "0.6711137", "0.6702193", "0.6595132", "0.6579246", "0.6517725", "0.6501973", "0.6487817", "0.6425401", "0.64018047", "0.6377863", "0.63699996", "0.6359768", "0.6340693", "0.6309297", "0.6301435", "0.6298309", "0.6274007", "0.6268099", "0.6261345", "0.62602806" ]
0.76009375
1
Gets the direction to the right of the car, from the perspective of the direction that the car is facing. E.g., if the car is facing UP, this will return RIGHT, and if it's facing DOWN, this will return LEFT
def get_direction_to_right(self, direction): return direction_to_right[direction]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def directionRight(self):\n return self.__directionRight", "def turn_right(self):\n temp = self.direction[0]\n self.direction[0] = -self.direction[1]\n self.direction[1] = temp", "def turn_right(self):\n self.facing_direction += self.config\n if self.facing_direction > 7:\n self.facing_direction -= 8\n self.x, self.y = self.compute_positions()", "def get_right(self):\n return self.r_motor.get()", "def _get_look_right(self):\n view_direction = self.look_to - self.look_from\n right_vec = normalize(cross_product(view_direction, self.look_up))\n return right_vec", "def right(self):\r\n z = len(direction_tuple)\r\n if self.d in direction_tuple:\r\n index = direction_tuple.index(self.d)\r\n if index == (z-1):\r\n self.d = direction_tuple[0]\r\n else:\r\n self.d = direction_tuple[index + 1]\r\n else:\r\n print(\"NO VALID ROBOT POSITION\")", "def go_right(self):\n self.change_x = 6\n self.direction = \"R\"", "def right_rotation(self, ang_vel):\n vel = self.om_left_max * self.R + ang_vel * self.L\n om_right = (vel + ang_vel * self.L) / self.R \n return vel, om_right", "def move_right(self):\n self.yaw_motor.step_forward()", "def driveRight(self):\n return self.__driveRight", "def MoveRightStep(self):\n if self.facing == 0:\n self.facing = 1\n self.x += self.stepLeft\n elif self.facing == 1:\n self.facing = 2\n self.y += self.stepUp\n elif self.facing == 2:\n self.facing = 3\n self.x -= self.stepRight\n elif self.facing == 3:\n self.facing = 0\n self.y -= self.stepDown", "def right(self):\n return self.r", "def right(self):\n if self.head.heading() != LEFT and self.last_direction != LEFT:\n self.head.setheading(RIGHT)", "def getRight(self):\n return self.right", "def right(self, angle):\r\n self.dir += math.radians(angle)", "def turn_right(self, speed):\n\t\t# You should modify the bias of 4 wheels depending on your hardware.\n\t\tself._front_left_wheel.anticlockwise_rotate(speed + LEFT_FR_BIAS + LEFT_RIGHT_BIAS)\n\t\tself._front_right_wheel.clockwise_rotate(1 + RIGHT_FR_BIAS)\n\t\tself._rear_left_wheel.anticlockwise_rotate(speed + LEFT_RIGHT_BIAS)\n\t\tself._rear_right_wheel.clockwise_rotate(1)", "def get_right(self):\n return self.right", "def turn_right(self):\n turn = self.__heading - Ship.TURN\n if turn < Ship.MIN_HEADING:\n turn += Ship.MAX_HEADING\n self.__heading = turn", "def get_direction(self):\n return self.actual_coordinates[2]", "def right():\n Robot.rotate(\"RIGHT\")", "def direction(self):\r\n return 180 - atan2(self.x, self.y)*180/pi", "def right(self, speed):\n self.controller.front_left_forward(speed)\n self.controller.front_right_backward(speed)\n self.controller.rear_left_backward(speed)\n self.controller.rear_right_forward(speed)", "def turn_right(self):\n self.direction_mod_offset += 1\n self.calculate_offset_mapping()\n direction_num = self.direction_mod_offset % len(self.direction_arr)\n client.rotateToYawAsync(direction_num * 90).join()", "def get_direction(self):\n return self.direction", "def getRobotDirection(self):\n return self.direction", "def getRobotDirection(self):\n return self.direction", "def get_right(self):\n return self.__right", "def right_or_left(self):\n self.scan()\n\n max = 0\n side = 'l'\n\n #analyze scan results\n for angle in self.scan_data:\n #RIGHT SIDE\n if angle < self.MIDPOINT:\n if self.scan_data[angle] > max:\n max = self.scan_data[angle]\n side = 'r'\n #LEFT SIDE\n else:\n if self.scan_data[angle] > max:\n max = self.scan_data[angle]\n side = 'l'\n\n return side", "def get_direction(self):\r\n return self.__direction", "def decide_turn_direction(self, next_direction):\n\n # if facing backwards, try to initially go forwards.\n if GPIO.input(pins[\"DirectionMotorRight\"]):\n GPIO.output(pins[\"DirectionMotorRight\"], GPIO.LOW)\n GPIO.output(pins[\"DirectionMotorLeft\"], GPIO.HIGH)\n self.facing = not self.facing\n\n if self.facing == \"Right\":\n if next_direction == \"Down\":\n return \"Right\"\n elif next_direction == \"Up\":\n return \"Left\"\n else: # Left\n return \"Left\"\n\n elif self.facing == \"Left\":\n if next_direction == \"Down\":\n return \"Left\"\n elif next_direction == \"Up\":\n return \"Right\"\n else: # Right\n return \"Right\"\n\n elif self.facing == \"Up\":\n if next_direction == \"Right\":\n return \"Right\"\n elif next_direction == \"Left\":\n return \"Left\"\n else: # Down\n return \"Left\"\n\n else: # down\n if next_direction == \"Right\":\n return \"Left\"\n elif next_direction == \"Left\":\n return \"Right\"\n else: # Up\n return \"Right\"" ]
[ "0.7777795", "0.735609", "0.72058654", "0.7097927", "0.70696753", "0.7038165", "0.69736624", "0.69053406", "0.6898099", "0.6856463", "0.6797121", "0.6769212", "0.6765747", "0.675166", "0.67343193", "0.6712764", "0.66965467", "0.6684191", "0.66769063", "0.666835", "0.665002", "0.66159225", "0.6604502", "0.6599639", "0.65954065", "0.65954065", "0.65916616", "0.65819985", "0.65666026", "0.65376025" ]
0.78666174
0
Moves the car in the desired direction. When this method is called, the car should already be certain that it can move to the tile in question. This method also removes the car from the tile it is move away off of and onto the tile it is moving onto.
def move_in_direction(self, direction, next_tile, previous_tile): # for ii in range(self.velocity): # Don't run over any cars or blow through traffic lights if self.can_move_to_tile(next_tile): self.turn_to_direction_map[direction]() # Adding the parentheses actually calls the method next_tile.add_car(self) previous_tile.remove_car(self) if self.destination == self.position: self.destination_reached = True else: # if we didn't move, update previous position to be current position # TODO: change `self.go_up()` and other movement methods to not update `self.previous_position` so that we # only do it once here self.previous_position = deepcopy(self.position)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_car(self):\n a = self.h / 50\n self.x += self.speed_x / FPS\n if self.x + 170 * a >= 1100:\n self.dir = -1\n self.speed_x = -self.speed_x\n if self.x - 170 * a <= 50:\n self.dir = 1\n self.speed_x = -self.speed_x", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def move_character(self, direction, game_resolution_function):\n pos = self.tiles.index(2) # Catch player's position\n if direction == self.LEFT and pos % self.row_len != 0 \\\n and self.tiles[pos-1] != 1:\n self.tiles[pos-1] = 2\n elif direction == self.RIGHT and (pos+1) % self.row_len != 0 \\\n and self.tiles[pos+1] != 1:\n self.tiles[pos+1] = 2\n elif direction == self.UP and pos-self.row_len > 0 \\\n and self.tiles[pos-self.row_len] != 1:\n self.tiles[pos-self.row_len] = 2\n elif direction == self.DOWN and pos+self.row_len < len(self.tiles) \\\n and self.tiles[pos+self.row_len] != 1:\n self.tiles[pos+self.row_len] = 2\n else:\n return\n self.tiles[pos] = 0\n self.update_components_count()\n game_resolution_function()", "def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)", "def move(self) -> None:\n\n if self.move_up:\n self.__moveUpIfPossible()\n if self.move_down:\n self.__moveDownIfPossible()", "def move_car(self, name, movekey):\n for a_car in self.__cars:\n if name == a_car.get_name() \\\n and movekey in a_car.possible_moves():\n empty_cell = a_car.movement_requirements(movekey)\n lst_of_idx = self.cell_list()\n last_cell = lst_of_idx[-1]\n if empty_cell[0] == last_cell:\n a_car.move(movekey)\n return True\n if self.cell_content(empty_cell[0]) is None \\\n and empty_cell[0] in self.cell_list():\n a_car.move(movekey)\n return True\n return False", "def move(self, direction):\n pass", "def car_movement(car, car_list):\n car.move1()\n if accelerate_condition_car(car, car_list):\n problem_car = accelerate_condition_car(car, car_list)\n car.speed = problem_car[1]\n else:\n if random_deceleration():\n if car.speed > 2:\n car.decelerate()\n else:\n car.accelerate()\n car.move2()\n return car", "def moveable(self, board):\n # horizontal\n if self.direction == \"horizontal\":\n # the position to which the car wants to move is either 1 more or 1 less column wise\n right = self.get_cols()[1] + self.size - 1\n left = self.get_cols()[0] - 1\n # check if right or left is out of the boards margins \n if right > board.width_height:\n move_left = board.positions[self.get_rows()[0]][left]\n move_right = None\n elif left < 0:\n move_right = board.positions[self.get_rows()[0]][right]\n move_left = None\n else: \n move_right = board.positions[self.get_rows()[0]][right]\n move_left = board.positions[self.get_rows()[0]][left]\n\n # try to move left and right\n if move_right == \"x\" and move_left == \"x\":\n return \"leftright\"\n elif move_right == \"x\":\n return \"right\"\n elif move_left == \"x\":\n return \"left\"\n else: \n return \"none\"\n \n # vertical\n else:\n up = self.get_rows()[0] - 1\n #print(up)\n down = self.get_rows()[1] + self.size - 1\n # check if up or down is out of the boards margins \n if up < 0:\n # no room on the board for upward movement\n move_down = board.positions[down][self.get_cols()[0]]\n move_up = None\n elif down > board.width_height:\n # no room on the board for downward movement\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = None\n else:\n # both up and down are possible positions on the board\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = board.positions[down][self.get_cols()[0]]\n\n # try to move up and down\n if move_down == \"x\" and move_up == \"x\":\n return \"updown\"\n elif move_up == \"x\":\n return \"up\"\n elif move_down == \"x\":\n return \"down\"\n else: \n return \"none\"", "def move(self, direction):\n # TODO: Update the current room to a connected direction.\n # get route id list\n route_ids = self.current_room.is_connected(direction)\n # check route id one by one\n for route_id in route_ids:\n route = self.current_room.route[int(route_id)]\n room_id = int(route[1])\n # victory\n if room_id == 0:\n self.over = 1\n return\n\n if len(route) == 3:\n # item required\n item_name = route[2]\n if self.inventory.find(item_name):\n # contain this item\n self.current_room = self.rooms[room_id]\n return\n else:\n # do not required item\n self.current_room = self.rooms[room_id]\n return\n # do not have the required item, check the next route", "def move(self, distance):\n self._go(distance)", "def move(self, direction):\n # Store the values in the connection dictionary in a list\n self.room_num = self.current_room.connection[direction]\n\n # Check if there is a conditional movement and change the current room\n if len(self.room_num) == 1:\n self.current_room = self.rooms[int(self.room_num[0]) - 1]\n else:\n adventure.check(len(self.room_num))", "def move(self, direction, cycles):\n\t\tself.planet.tiles[self.y][self.x].set_occupant() # set occupant to the initial tile\n\t\tif direction == \"N\": # unit vector (0, -1)\n\t\t\ty_symbol = -1\n\t\t\tx_symbol = 0\n\t\tif direction == \"S\": # unit vector (0, 1)\n\t\t\ty_symbol = 1\n\t\t\tx_symbol = 0\n\t\tif direction == \"W\": # unit vector (-1, 0)\n\t\t\tx_symbol = -1\n\t\t\ty_symbol = 0\n\t\tif direction == \"E\": # unit vector (1, 0)\n\t\t\tx_symbol = 1\n\t\t\ty_symbol = 0\n\t\ti = 0\n\t\twhile i < int(cycles):\n\t\t\tnext_x = self.x + x_symbol # change x coordinate\n\t\t\tnext_y = self.y + y_symbol # change y coordinate\n\t\t\tnext_x, next_y = self.spherical(next_x, next_y) # get the next tile's coordinate\n\t\t\tif self.can_move(next_x, next_y): # check whether rover can move\n\t\t\t\t#reduce battery\n\t\t\t\tif self.planet.tiles[next_y][next_x].is_shaded():\n\t\t\t\t\tself.battery -= 1\n\t\t\t\tself.x = next_x\n\t\t\t\tself.y = next_y\n\t\t\t\ttile = self.planet.tiles[next_y][next_x]\n\t\t\t\ttile.set_occupant()\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tbreak", "def update(self):\n\n for car in self.cars.get_all_cars():\n old_position = car.position\n nearby_cars = self.cars.get_nearby_cars(position=car.position)\n nearby_cars[car.lane].remove(car)\n car.update(self.speed_limit, nearby_cars)\n # Check if we need to wrap the car (periodic boundary)\n if car.position >= self.track_length:\n car.position -= self.track_length\n elif car.position < 0:\n car.position += self.track_length\n # Inform the car tracker that the car has moved\n self.cars.car_has_moved(car=car, old_position=old_position)\n\n if self.cars.get_num_cars() < self.max_num_cars:\n # self.try_to_spawn_car()\n self.try_to_spawn_car_single_lane(lane=0)", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def move_car(self):\n import interface\n self.reset_position()\n print(\"move car during %f s...\" % self.portion_duration*self.nbr_portions)\n\n X, Y = [], []\n t_debut = time.time()\n while time.time() - t_debut < self.portion_duration*self.nbr_portions:\n current_time = time.time() - t_debut\n # On fait bouger les 4 roues.\n for numero_roue, speed in enumerate(self(current_time)):\n print(numero_roue)\n interface.move_wheel(numero_roue+1, speed)\n\n # Recuperation de la position reele\n (x, y), _ = interface.get_position()\n X.append(x)\n Y.append(y)\n\n interface.move_wheel(\"\", 0) # La voiture s'arette a la fin.\n print(\"\\tterminate\")\n return x, y", "def move_piece(self, direction=None):\n if self.will_collide(direction=direction):\n return\n self.active_piece.move(direction=direction)\n self.display_piece()", "def move(self):\n pass", "def move(self, game_display, maze):\n if isinf(self.lidars[0].get_sense()):\n self.forward(acc=2)\n elif self.lidars[0].get_sense() >= 2 * self.lidars[0].radius // 3:\n self.backward(acc=0.5)\n elif self.lidars[0].get_sense() >= self.lidars[0].radius // 3:\n self.backward()\n else:\n self.backward(acc=2)\n Drone.move(self, game_display=game_display, maze=maze)", "def move(self):\n\n x, y = self.position\n\n if self.in_spawn_area:\n if 0 <= x < MAP_SIZE and 0 <= y < MAP_SIZE:\n self.in_spawn_area = False\n\n preferred_direction = self.get_preferred_direction()\n\n if preferred_direction == (0, 0):\n return\n\n new_tiles = self.calculate_tiles_ahead(preferred_direction)\n\n if self.can_advance(new_tiles, preferred_direction):\n self.position = self.position[0] + preferred_direction[0] * 2, self.position[1] + preferred_direction[1] * 2\n self.update_cache_after_move(preferred_direction, new_tiles)\n self.previous_direction = preferred_direction[:]", "def cancel_move(self):\n self.should_move = False", "def move(self, direction):\n\n # Check if there are empty tiles available\n for row in self._grid:\n if row.count(0) != 0:\n self._game_over = False\n break\n else:\n self._game_over = True\n\n # If empty tiles are not available, game over\n if self._game_over == True:\n print \"Sorry Game Over, Board Full\"\n print self.__str__()\n return None\n\n # New tiles won't be needed for illegal moves\n new_tiles_needed = False\n\n for tile in self._initial_tiles[direction]:\n old_tiles = self.traverse_grid(tile, OFFSETS[direction], self._steps[direction])\n tiles = merge(old_tiles)\n if old_tiles != tiles:\n # The old row and the new row are different after the merge\n # New tile will be needed\n new_tiles_needed = True\n self.set_grid(tile, OFFSETS[direction], tiles)\n\n if new_tiles_needed == True:\n self.new_tile()", "def move(self):\n vector = vectors[compass.index(self.heading)]\n x = self.position[0] + vector[0]\n y = self.position[1] + vector[1]\n self._check_move(x, self.plateau[0])\n self._check_move(y, self.plateau[1])\n return replace(self, position=(x, y))", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction, cycles):\n\t\tpass", "def unmakeMove(self, move):", "def move_dolly(self, distance: int, direction: int, time: int = None):\n\n self.__do_action(self.motor.move(direction, distance, time))", "def move(self, direction):\n moved = False\n initial_tiles = self.dir_dic[direction]\n offset = OFFSETS[direction]\n if direction == UP or direction == DOWN:\n bound = self.grid_height\n else:\n bound = self.grid_width\n for tile in initial_tiles:\n temp = [self.get_tile(tile[0] + idx*offset[0], tile[1] + idx*offset[1]) \n for idx in range(bound)]\n temp = merge(temp)\n \n for idx in range(bound):\n row = tile[0] + idx*offset[0]\n col = tile[1] + idx*offset[1]\n if self.get_tile(row, col) != temp[idx]:\n moved = True\n self.set_tile(row, col, temp[idx]) \n if moved:\n self.new_tile()", "def move(self):\n raise NotImplementedError" ]
[ "0.6581758", "0.64435995", "0.6281606", "0.62406456", "0.62285095", "0.61850435", "0.6177967", "0.617256", "0.6108114", "0.60884064", "0.60834736", "0.6047164", "0.6045349", "0.60351455", "0.600586", "0.599142", "0.5980788", "0.5972384", "0.5958476", "0.5947111", "0.5936153", "0.59324896", "0.59021217", "0.5893701", "0.5893701", "0.5870961", "0.5852591", "0.58477646", "0.5836485", "0.5809465" ]
0.7081806
0
To be called by the main simulator when the car reaches its destination. It removes the car from the tile objects so that other cars can continue on their way
def destroy(self): self.city_map.get_tile_at_position(self.position).car = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n\n for car in self.cars.get_all_cars():\n old_position = car.position\n nearby_cars = self.cars.get_nearby_cars(position=car.position)\n nearby_cars[car.lane].remove(car)\n car.update(self.speed_limit, nearby_cars)\n # Check if we need to wrap the car (periodic boundary)\n if car.position >= self.track_length:\n car.position -= self.track_length\n elif car.position < 0:\n car.position += self.track_length\n # Inform the car tracker that the car has moved\n self.cars.car_has_moved(car=car, old_position=old_position)\n\n if self.cars.get_num_cars() < self.max_num_cars:\n # self.try_to_spawn_car()\n self.try_to_spawn_car_single_lane(lane=0)", "def drop(self):\n if (pyxel.frame_count % self.vy) == 0:\n mapDel(self, theFallen)\n self.y = (self.y + 1)\n mapAdd(self, theFallen)", "def removeIfDead(self):\n if self.y < 0:\n del projectiles[findPlace(self, projectiles)]", "def test_car_unpark(self):\n\n self.test_space.park(Car(\"DL-2N-007\", \"Space Grey\"))\n\n self.assertEqual(self.test_space.get_vehicle.get_regno, 'DL-2N-007')\n self.assertEqual(self.test_space.get_vehicle.get_colour, 'Space Grey')\n\n self.test_space.unpark()\n\n self.assertEqual(self.test_space.get_vehicle, None)", "def move_cloud(self):\n self.remove()\n self.min_x -= 1\n self.max_x -= 1\n self.update()", "def player_removes_tile(self, x, y):\n activePlayer = self.get_active_player()\n if activePlayer.humanControlled:\n super(RobotGame, self).player_removes_tile(x, y)", "def unmakeMove(self, move):", "def move_car(self):\n a = self.h / 50\n self.x += self.speed_x / FPS\n if self.x + 170 * a >= 1100:\n self.dir = -1\n self.speed_x = -self.speed_x\n if self.x - 170 * a <= 50:\n self.dir = 1\n self.speed_x = -self.speed_x", "def stop(self):\n self.move(None)", "def stop_full(self):\n self.systems[\"driver\"].move(0, 0)", "def process(self, car):\n super(LeftIntersectionMessage, self).process(car)\n car.delete_car_at_intersection(self)", "def recall(self):\n for t in self.placed_tiles:\n row = self.placed_tiles[t][1][0]\n col = self.placed_tiles[t][1][1]\n # remove tiles from board\n self.board.board[row][col].letter = None\n # put tiles back on rack\n self.rack[t] = self.placed_tiles[t][0]", "def remove_obstacle(self, x, y):\n self.BOARD[y][x].traversable = True\n self.board_array[y][x] = 0", "def _unmove(self):\n (start, end) = self.history.pop()\n self._board[start] = self._board[end]\n self._board[end] = 0\n self.winner = None\n self.player_turn = CheckersGame.opposite[self.player_turn]", "def setup(self):\n pygame.init()\n screen = self.screen\n screen.fill((240,240,240)) \n inter_car_gap = 10\n \n for car in self.model.board.cars.values():\n \n # get long and short side of the car\n long_side = int(((self.size*(car.length)) - inter_car_gap))\n short_side = self.size - inter_car_gap \n\n # get car x and y coordinates in order to place car on the board\n position = self.model.get_car_pos(car)\n row, column = position[0], position[1]\n x = ((column)*self.size + (inter_car_gap / 2))\n y = ((row)*self.size + (inter_car_gap / 2))\n\n # place red car on the board\n if car.cid == 'X':\n image = pygame.Surface((long_side, short_side))\n sprite = pygame.draw.rect(image,self.red,(0,0,((self.size*(car.length))-inter_car_gap),(self.size - inter_car_gap)))\n \n # place trucks on the board\n elif car.length > 2:\n if car.orientation == \"H\":\n image = pygame.Surface((long_side, short_side))\n sprite = pygame.draw.rect(image, self.green, (0, 0, ((self.size*car.length)-inter_car_gap), (self.size - inter_car_gap)))\n else:\n image = pygame.Surface((short_side, long_side))\n sprite = pygame.draw.rect(image, self.green, (0, 0 ,(self.size - inter_car_gap), ((self.size*car.length) - inter_car_gap)))\n \n # place cars on the board\n else:\n if car.orientation == \"H\":\n image = pygame.Surface((long_side, short_side))\n sprite = pygame.draw.rect(image,self.blue,(0,0,((self.size*car.length) - inter_car_gap),(self.size - inter_car_gap)))\n else:\n image = pygame.Surface((short_side, long_side))\n sprite = pygame.draw.rect(image,self.blue,(0,0,(self.size - inter_car_gap),((self.size * car.length) - inter_car_gap)))\n \n screen.blit(image,(x,y))\n\n pygame.display.update()", "def minusToHome():\n\tif (not checkMotorsInPosition(-134.76, -34.197)):\n\t\treturn\n\n\tmoveMotor(dktheta, 0)\n\tmoveMotor(dkappa, 0)\n\tsimpleLog(\"Done\")", "def update_cars(self):\n \"\"\" Reconstructs and handles the collision on the fly, to be more efficient \"\"\"\n\n # Run the cars if the sim is not paused.\n if self.paused: return\n\n if self.draw_sensor: self.collision_points = []\n\n # Calculate the delta of fit_timer and generation timer\n self.fit_timer += g.delta\n self.generation_timer -= g.delta\n\n # If the timer is up, then go to the next generation.\n if self.generation_timer <= 0:\n self.reset_gen()\n\n # Determine whether the fitness of each car will be calculated in this frame.\n calc_fit = False\n if self.fit_timer > Car.fit_calc:\n self.fit_timer -= Car.fit_calc\n calc_fit = True\n\n # Reset list\n self.fit_list = []\n\n # Grid size\n size = g.conf[\"col_grid_size\"]\n # start = time()\n\n # Counter for the cars\n counter = 0\n for car in self.cars:\n # We check first if the car is active.\n # If its not, move on to the next car.\n counter += 1\n\n # Calculate the fitness, if it's determined to be calculated this frame.\n if calc_fit:\n # Only calculate if car is active\n if car.active:\n car.fit += distance(car.x, car.y, *car.fit_last_coords)\n car.fit_last_coords = [car.x, car.y]\n self.fit_list.append([counter - 1, car.fit])\n\n if not car.active:\n continue\n\n # Update the car vars\n car.update()\n \n # Get the future polygon, and use it for collision\n f_poly = car.get_future_poly(Car.car_poly)\n\n # First, based on the car's bounding box insert it into the coll_dict.\n x1, y1, x2, y2 = col.poly_bounding_box(f_poly)\n\n # Whether the car is colliding or not.\n collision = False\n\n ## Checks and inserts the bounding box into the grids that it overlaps\n # Gets all the grid it consumes\n for i in range(int(x1 // size), int(x2 // size + 1)):\n for j in range(int(y1 // size), int(y2 // size + 1)):\n # Check the collision first\n # Iterate every collidable object in the coll_dict and in the coll_dict of track manager.\n \n # If a collision has not been confirmed yet, then check it.\n # Track collision\n if not collision:\n for obj in self.trackmanager.coll_dict.get((i, j), []):\n # Check the AABB of the current car and the destination object\n if col.rectrect(x1, y1, x2, y2, *col.correct_bounding_box(obj[0], obj[1], obj[2], obj[3])):\n # AABB collision detected\n if col.linepoly(obj[0], obj[1], obj[2], obj[3], f_poly):\n collision = True\n break\n\n # Move the car if collision is not detected.\n if not collision:\n car.handle_movement(f_poly)\n else:\n # Don't move the car and deactivate.\n car.handle_movement()\n car.active = False\n \n # Here, we can do one more iteration to get all the distances from the sensors.\n for s in range(len(car.res_sensor)//2):\n\n # Gets the bounding box for the current sensor\n # This is an array length of 4 [x1, y1, x2, y2]\n sensor = car.res_sensor[s*2] + car.res_sensor[s*2 +1]\n \n # Get the correct bounding box\n x1, y1, x2, y2 = col.correct_bounding_box(*sensor)\n\n # Iterate all the collision objects\n collision = False\n for i in range(int(x1 // size), int(x2 // size + 1)):\n for j in range(int(y1 // size), int(y2 // size + 1)):\n \n # First, iterate over the roads\n for obj in self.trackmanager.coll_dict.get((i, j), []):\n # If a bounding box is found\n if col.rectrect(x1, y1, x2, y2, *col.correct_bounding_box(*obj)):\n # print(\"Sensor {}:\".format(s), *sensor, *obj)\n # Find the line intersection.\n intersection = col.lineline(*sensor, *obj, True)\n if intersection:\n # Add the sensor data to the car object.\n car.sensors[s] = distance(sensor[0], sensor[1], *intersection, True) / Car.sensor_max_dist[s]\n collision = True\n if self.draw_sensor: self.collision_points.append(intersection)\n\n # Draw the dot at the tip of the sensor if collision is not detected.\n if self.draw_sensor and not collision:\n self.collision_points.append([sensor[2], sensor[3]])\n\n if calc_fit:\n max_fit = max(self.fit_list, key=lambda x : x[1])[1]\n\n # Calculate fit based on the normalized value from the distance fit,\n # and the speed of the car.\n fit_final = list(map(\n lambda x : [x[0], (x[1]/max_fit) + self.cars[x[0]].speed/Car.max_speed * 0.5],\n self.fit_list\n ))\n fit_final.sort(key=lambda x: x[1], reverse=True)\n\n # Set the best fit\n self.best_fit = [fit_final[0][0], fit_final[1][0]]\n \n # Change the color according the best.\n for i in range(len(self.cars)):\n self.cars[i].car_color = arcade.color.ALLOY_ORANGE if i in self.best_fit else (arcade.color.BLUE_SAPPHIRE if self.cars[i].active else arcade.color.RED_DEVIL)\n\n g.ui_text += \"Time Left: {}\\nIteration: {}\\n\".format(round(self.generation_timer), self.iteration)\n # print(\"col_time: {}ms\".format(round((time() - start) * 1000, 2)))", "def one_second(self):\n self.road.reset()\n for car, car_ahead in zip(self.car_list, self.car_list[1:]+self.car_list[:1]):\n if car.current_speed > 25 and self.check_for_crash(car, car_ahead):\n self.match_car_speed(car, car_ahead)\n elif self.check_buffer_zone(car, car_ahead):\n self.match_car_speed(car, car_ahead)\n else:\n self.accelerate_or_deccelerate(car)\n car.move()\n self.road.place_car(car)", "def move1(self):\n\n options = self.location.exits.keys()\n self.location.objects.remove(a)\n print('fred is moving..')\n self.location = self.location.exits[random.choice(list(options))]\n self.location.objects.append(a)", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move_car(self):\n import interface\n self.reset_position()\n print(\"move car during %f s...\" % self.portion_duration*self.nbr_portions)\n\n X, Y = [], []\n t_debut = time.time()\n while time.time() - t_debut < self.portion_duration*self.nbr_portions:\n current_time = time.time() - t_debut\n # On fait bouger les 4 roues.\n for numero_roue, speed in enumerate(self(current_time)):\n print(numero_roue)\n interface.move_wheel(numero_roue+1, speed)\n\n # Recuperation de la position reele\n (x, y), _ = interface.get_position()\n X.append(x)\n Y.append(y)\n\n interface.move_wheel(\"\", 0) # La voiture s'arette a la fin.\n print(\"\\tterminate\")\n return x, y", "def move_back(self):\n\n # slowly drive backwards\n self.velocity = -1 * const.Driving.CAUTIOUS_VELOCITY\n self.angle = const.Driving.NEUTRAL_STEERING_ANGLE\n\n # drive as long there is enough space to the next vehicle or obstacle\n gap = self.formation.calc_gap()\n self.start_driving()\n while self.sensor_manager.rear > gap: continue\n\n self.stop_driving()", "def move_in_direction(self, direction, next_tile, previous_tile):\r\n # for ii in range(self.velocity):\r\n # Don't run over any cars or blow through traffic lights\r\n if self.can_move_to_tile(next_tile):\r\n self.turn_to_direction_map[direction]() # Adding the parentheses actually calls the method\r\n next_tile.add_car(self)\r\n previous_tile.remove_car(self)\r\n if self.destination == self.position:\r\n self.destination_reached = True\r\n else: # if we didn't move, update previous position to be current position\r\n # TODO: change `self.go_up()` and other movement methods to not update `self.previous_position` so that we\r\n # only do it once here\r\n self.previous_position = deepcopy(self.position)", "def update(self):\n self._spots[constants.ROAD_LENGTH - 1].remove_car(0)\n self._spots[0].remove_car(1)\n\n for i in xrange(constants.ROAD_LENGTH - 1):\n self._num_queued += (self._spots[constants.ROAD_LENGTH - i - 2].\n update_spot(self._spots[constants.ROAD_LENGTH - i - 1], 0))\n self._num_queued += self._spots[i + 1].update_spot(\n self._spots[i], 1)\n\n self.create_car(self._distribution)\n self._steps += 1", "def interaction_void(self) -> None:\n self.grid.obj_list.swap_obj(self.moving_character, self.target)", "def removeOldCars(self):\n self.cars = [car for car in self.cars if (self.currentFrame - car.updatedInFrame) < DROP_AFTER_N_FRAMES]\n for i, car in enumerate(self.cars): # update id's\n car.id = i + 1", "def remove_self(self):\n if self.game.rules[\"trapping\"]:\n [neighbor.untrap() for neighbor in self.get_neighbors() if neighbor.trapped and self in neighbor.get_sandwichers() and len(neighbor.get_sandwichers()) == 2]\n self.game.empty_square(self.position)\n self.position = None", "def stop(self):\n self.move(0, 0)", "def cleanup(self):\n self.subpixel, self.pixel = self.stepup(self.subpixel, self.pixel, AxisDistance.pixelsize)\n self.pixel, self.tile = self.stepup(self.pixel, self.tile, AxisDistance.tilesize)" ]
[ "0.6408383", "0.6162225", "0.6145868", "0.6057817", "0.6027329", "0.6027052", "0.59835607", "0.59433895", "0.58956313", "0.58877546", "0.5817011", "0.5816", "0.579692", "0.5777173", "0.5775827", "0.5755283", "0.5739541", "0.57330614", "0.5697933", "0.5685382", "0.5685382", "0.5684213", "0.567712", "0.56711876", "0.5663119", "0.56508553", "0.5649526", "0.564285", "0.56373966", "0.56363386" ]
0.7115272
0
Gets a list of cars between this car and the next intersection the car is approaching. We only look at the cars straight ahead and cutoff the range at the nearest intersection straight ahead, because we don't want to start trying to flock to cars several blocks ahead. And since cars will probably be going in a different direction after the intersection (and this car will turn rather than going straight over 50% of the time), we don't pay any attention to cars past the next intersection.
def get_cars_between_me_and_next_intersection(self): next_intersection_tile = self.path[0] tiles = self.city_map.get_tiles_between_tile_a_and_tile_b(self.city_map.get_tile_at_position(self.position), next_intersection_tile) cars = [] for tile in tiles: if tile.car is not None: cars.append(tile.car) return cars
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cars_at_intersection(self):\n return self.cars_at_intersection", "def get_cars_at_intersection(self):\n return self.cars_at_intersection", "def get_cars_at_intersection(self):\n return self.cars_at_intersection", "def getNext(self, carPos):\n if carPos.lane != self:\n print \"car is on other lane\"\n return []\n next = []\n shortestDist = sys.maxint\n for car in self.carsPosition.itervalues():\n if car.isGoalFlag:\n next.append(car)\n continue\n if car.position is None:\n print \"the car has no position\"\n continue\n if car.car.id == carPos.car.id:\n continue\n distance = car.position - carPos.position\n if not car.free and (0 < distance < shortestDist): # only pick the cars in front of current car\n shortestDist = distance\n next.append(car)\n return next", "def possibleMatches(self, newCar: Car):\n selectedCars = []\n for car in self.cars:\n if self.carsAreClose(car, newCar):\n selectedCars.append(car)\n\n return selectedCars", "def doRide(car, ride):\n global MAX_DISTANCE_START, MAX_DISTANCE_FINISH\n (a, b, x, y, s, f) = ride\n lenght_ride = abs(x - a) + abs(y - b)\n # Simple heuristic to make it faster\n if lenght_ride > MAX_DISTANCE_FINISH: # So it doesn't take too long rides\n return None\n if car is None or len(car) == 0: # No car or no rides asigned to the car\n (cx, cy) = INITIAL_POS\n cs = INITIAL_TIME\n else: # Else, look in the list\n last_ride = car[-1]\n (cx, cy) = tuple(rides[last_ride][0:2]) # Position of the car\n # When will the car be at that position\n cs = rides_done[last_ride][2]\n # Distance to the ride's starting intersection\n distance = abs(cx - a) + abs(cy - b)\n if distance > MAX_DISTANCE_START: # Do not take too far away ones\n return None\n when = max(cs + distance, s)\n if when + lenght_ride > f: # The car cant make it\n return None\n\n return when, when + lenght_ride, when == s", "def pred_car_other_lane(self):\n other_lane = self.other_lane()\n\n for i in range(self.road.N):\n if self.road.cells[other_lane][(self.x + i + 1) % (self.road.N-1)] != -1:\n return self.road.cars[self.road.cells[other_lane][(self.x + i + 1) % (self.road.N-1)]]", "def succ_car_other_lane(self):\n other_lane = self.other_lane()\n\n for i in range(self.road.N):\n if self.road.cells[other_lane][(self.x - (i + 1)) % (self.road.N-1)] != -1:\n return self.road.cars[self.road.cells[other_lane][(self.x - (i + 1)) % (self.road.N-1)]]", "def traverse(g, cars):\n while any(car.timeleft > 0 and car.running for car in cars):\n for car in cars:\n if car.running and car.timeleft > 0:\n path = closest(car.position, car.angle, car.timeleft)\n if path is None:\n car.running = False\n else:\n car.follow_path(path)\n return cars", "def accelerate_condition_car(car, car_list):\n check = (\n [x for x in car_list if (\n (car.position) < x.position < ((car.position)+25))])\n check1000 = (\n [x for x in car_list if (\n (car.position) < (x.position+1000) < ((car.position)+25))])\n\n if check:\n problem_car = check[0]\n return problem_car.position, problem_car.speed\n elif check1000:\n problem_car = check1000[0]\n return problem_car.position, problem_car.speed\n else:\n return False", "def _distance_next(self):\n\n self.distance = 10\n\n # Here a set index to 0 if the car is finishing a lap\n # Also reset the farthest\n if self.index > (len(self.x_trajectory) - 6) and self.closed:\n self.index = 0\n self.farthest = -1\n self.laps += 1\n\n for w in range(self.index, self.index + 20):\n\n self.dist_point = math.sqrt((self.x_trajectory[w] - self.x)**2\n + (self.y_trajectory[w] - self.y)**2)\n\n if self.dist_point < self.distance:\n self.distance = self.dist_point\n self.index = w\n\n if w >= (len(self.x_trajectory) - 1):\n break\n\n self._calc_side()\n\n self.distance = self.distance * self.sign\n\n return self.distance", "def combineCars(self, cars):\n\n changed = False\n newcars = []\n\n while (len(cars) > 0):\n c = cars[0]\n cars = cars[1:]\n found = False\n\n for car in cars:\n if (c[-1] == car[0]) and (c[0] == car[-1]):\n # Could be done in any order\n self.multiplier = self.multiplier * 2\n newcars.append(c)\n cars.remove(car)\n changed = True\n found = True\n break\n elif (c[-1] == car[0]):\n # end to front\n nc = c + car\n newcars.append(nc)\n cars.remove(car)\n changed = True\n found = True\n break\n elif (c[0] == car[-1]):\n # front to end\n nc = car + c\n newcars.append(nc)\n cars.remove(car)\n changed = True\n found = True\n break\n\n if not found:\n newcars.append(c)\n\n return (newcars, changed)", "def intersect(self, other):\n if isinstance(other, Arc):\n if self.center == other.center:\n if nearly_zero(self.radius - other.radius):\n v = Arc(self.center, self.radius, 0, 0)\n v.angle_range = self.angle_range.intersection(other.angle_range)\n return v\n else:\n return None\n else:\n # find the two points where the circles intersect\n # filter them by the angle ranges of both arcs, must be in both to survive\n # return list of surviving points, or None\n k = 1. / abs(self.center - other.center)\n theta = math.atan2(other.center.y - self.center.y, other.center.x - self.center.x)\n r1 = k * self.radius\n r2 = k * other.radius\n intersections = []\n # u and v are in a coordinate system that has been scaled, rotated, and translated\n # to move the two centers to (0, 0) and (1, 0) to simplify some of the math.\n u = (r1**2 + 1 - r2**2) / 2\n if abs(r1) >= abs(u):\n v = (r1**2 - u**2) ** .5\n # Transform u and v back into the original coordinate system.\n x1 = self.center.x + (u * math.cos(theta) - v * math.sin(theta)) / k\n y1 = self.center.y + (v * math.cos(theta) + u * math.sin(theta)) / k\n p = Point(x1, y1)\n if self.included_angle(p) and other.included_angle(p):\n intersections.append(Point(x1, y1))\n if not nearly_zero(r1 - u):\n x2 = self.center.x + (u * math.cos(theta) + v * math.sin(theta)) / k\n y2 = self.center.y + (-v * math.cos(theta) + u * math.sin(theta)) / k\n p2 = Point(x2, y2)\n if self.included_angle(p2) and other.included_angle(p2):\n intersections.append(p2)\n return intersections or None\n elif isinstance(other, LineSegment):\n c = (self.center - other.p2).square() - self.radius**2\n b = 2 * (other.p1 - other.p2).dot(other.p2 - self.center)\n a = (other.p1 - other.p2).square()\n det = b**2 - 4 * a * c\n if det < 0:\n return None\n elif nearly_zero(det):\n pts = [-b / (2. * a)]\n else:\n pts = [(-b + det**0.5) / (2 * a), (-b - det**0.5) / (2 * a)]\n pts = map(other.param_to_point,\n filter(lambda root: 0 <= root <= 1, pts))\n pts = filter(self.included_angle, pts)\n if len(pts) == 0:\n return None\n elif len(pts) == 1:\n return pts[0]\n else:\n return pts\n raise TypeError(other)", "def intersection(self, other):\n a, b = min(self.start, self.finish), max(self.start, self.finish)\n c, d = min(other.start, other.finish), max(other.start, other.finish)\n a1 = normalize(a, 0, TWO_PI)\n a, b = a1, b + a1 - a\n c1 = normalize(c, 0, TWO_PI)\n c, d = c1, d + c1 - c\n e, f = max(a, c), min(b, d)\n if f >= e:\n return AngleRange(e, f)\n else:\n return None # no overlap", "def intersection_distances(self, altitude, start_distance, finish_distance):\n distances = []\n alt_index, alt_ratio = calculate_value_reference(self.distances,\n start_distance)\n start_alt = calculate_value(self.altitudes, alt_index, alt_ratio)\n\n finish_index, finish_ratio = calculate_value_reference(self.distances,\n finish_distance)\n while alt_index < finish_index:\n next_index = alt_index + 1\n next_ratio = 0.0\n next_alt = calculate_value(self.altitudes, next_index, next_ratio)\n\n # Does this section span the given altitude?\n if (next_alt != start_alt) and \\\n (min(start_alt, next_alt) < altitude < max(start_alt, next_alt)):\n delta = altitude - start_alt\n denom = next_alt - start_alt\n ratio = delta / denom\n distance = calculate_value(self.distances, alt_index, ratio)\n distances.append(distance)\n\n alt_index = next_index\n alt_ratio = next_ratio\n start_alt = next_alt\n\n if finish_ratio:\n finish_alt = calculate_value(self.altitudes, finish_index, finish_ratio)\n # Does the last section span the given altitude?\n if (start_alt != finish_alt) and \\\n (min(start_alt, finish_alt) < altitude < max(start_alt, finish_alt)):\n # Calculate the altitude at the end of the section to maximimse accuracy\n next_alt = calculate_value(self.altitudes, finish_index, 1.0)\n delta = altitude - start_alt\n denom = next_alt - start_alt\n ratio = delta / denom\n distance = calculate_value(self.distances, alt_index, ratio)\n distances.append(distance)\n\n return distances", "def intersect(self, other: Wire) -> List[Intersection]:\n intersections = []\n for segment_a, segment_b in product(self.wire_segments, other.wire_segments):\n intersection = segment_a.intersect(segment_b)\n if intersection and intersection.location != self.ORIGIN:\n intersections.append(intersection)\n\n return intersections", "def find_closest_intersections(wire_data):\n\n # Find the intersection of the two lists\n intersections = find_intersections(wire_data)\n\n # For each intersection measure distance from the centre\n dists = [abs(point[0]) + abs(point[1]) for point in intersections]\n\n return min(dists)", "def get_lat_offsets(self):\n\n startlat = self.parameters['startlatitude']\n stoplat = self.parameters['stoplatitude']\n\n #Given the start and stops,\n startidx, startvalue = utils.getnearest(self.latitudes, startlat)\n stopidx, stopvalue = utils.getnearest(self.latitudes, stoplat)\n startidx -= 2\n stopidx += 2\n latslice = np.arange(startidx, stopidx + 1)\n if utils.checkmonotonic(latslice):\n latslice = latslice\n else:\n #TODO: Support pole crossing images\n logger.error('Image is pole crossing, not currently supported.')\n '''\n print \"NOT MONOTONIC\"\n #Handle wraps around the poles\n latslice = np.arange(start_idx, stop_idx + 1)\n nlats = self.startlookup.shape[1]\n greatermask = np.where(latslice >= nlats)\n latslice[greatermask] -= nlats\n lessmask = np.where(latslice < 0)\n latslice[lessmask] += self.startlookup.shape[1]\n\n self.latsort = np.argsort(latslice)\n self.latslice = latslice[self.latsort]\n self.latsort = np.argsort(self.latsort)\n '''\n latslice = None\n logger.debug('Start latitude node is {}. Nearest lookup node is {}.'.format(startlat, startidx))\n logger.debug('Stop latitude node is {}. Nearest lookup node is {}.'.format(stoplat, stopidx))\n return latslice", "def find_closest_flight_in_range(self, x, y, max_range=10):\n closest_flight = None\n closest_distance = max_range\n point = pygame.math.Vector2(x, y)\n for flight in self.incoming_flights:\n distance = point.distance_to(flight.get_pos())\n if distance < closest_distance:\n closest_distance = distance\n closest_flight = flight\n return closest_flight", "def _get_obs(self):\n pos1, orn1 = p.getBasePositionAndOrientation(self.car1)\n pos2, orn2 = p.getBasePositionAndOrientation(self.car2)\n theta1 = p.getEulerFromQuaternion(orn1)[2]\n theta2 = p.getEulerFromQuaternion(orn2)[2]\n x1 = pos1[0]\n y1 = pos1[1]\n x2 = pos2[0]\n y2 = pos2[1]\n dis = np.sqrt((x1-x2)**2 + (y1-y2)**2)\n vec_dis1 = np.array([x2-x1, y2-y1])\n vec_dis2 = np.array([x1-x2, y1-y2])\n wall1 = min([abs(self.max_dist_x-x1),abs(-self.max_dist_x-x1),abs(self.max_dist_y-y1),abs(-self.max_dist_y-y1)])\n wall2 = min([abs(self.max_dist_x-x2),abs(-self.max_dist_x-x2),abs(self.max_dist_y-y2),abs(-self.max_dist_y-y2)])\n xp1 = x1 + math.sin(theta1)\n yp1 = y1 - math.cos(theta1)\n vec1 = np.array([xp1 - x1, yp1 - y1])\n vec1_len = np.sqrt((x1-xp1)**2 + (y1-yp1)**2)\n cross1 = np.cross(vec1, vec_dis1)\n dot1 = np.dot(vec1, vec_dis1)\n angle1 = math.asin(cross1/(dis*vec1_len))\n if(dot1<0 and cross1<0):\n angle1 = -(np.pi + angle1)\n if(dot1<0 and cross1>0):\n angle1 = np.pi - angle1\n xp2 = x2 + math.sin(theta2)\n yp2 = y2 - math.cos(theta2)\n vec2 = np.array([xp2 - x2, yp2 - y2])\n vec2_len = np.sqrt((x2-xp2)**2 + (y2-yp2)**2)\n cross2 = np.cross(vec2, vec_dis2)\n dot2 = np.dot(vec2, vec_dis2)\n angle2 = math.asin(cross2/(dis*vec2_len))\n if(dot2<0 and cross2<0):\n angle2 = -(np.pi + angle2)\n if(dot2<0 and cross2>0):\n angle2 = np.pi - angle2 \n return np.array([[wall1,dis,angle1],[wall2,dis,angle2]])", "def lower_central_series(self):\n res = [self]\n current = self\n nxt = self.commutator(self, current)\n while not current.is_subgroup(nxt):\n res.append(nxt)\n current = nxt\n nxt = self.commutator(self, current)\n return res", "def get_closest_lane(self, car: Car) -> AbstractLane:\n pos = car.position\n lanes = self.upstream_lane_list if car.lane.upstream else self.downstream_lane_list\n return min(lanes, key=lambda l: l.distance(pos))", "def nextIntersectors(self, inter):\n assert self.poly1.pnFacesInPoly() and self.poly2.pnFacesInPoly()\n otherFInters = self.getIntersectorList(inter.f.vertices)\n # First intersector\n pInters = self.getIntersectorList(inter.pe.pFace.vertices)\n otherI1 = next(filter(lambda x: x is not None and x.f == inter.f and x != inter,\n pInters),\n None)\n if otherI1 is None:\n # The pFace does not intersect inter.f a second time,\n # looking for a place where inter.f intersects the pFace\n otherI1 = next(filter(lambda x: x is not None and x.f == inter.pe.pFace,\n otherFInters),\n None)\n if otherI1 is None:\n # polyhedron(inter.f.vertices + inter.pe.pFace.vertices,\n # inter.pe.pFace.edges() + inter.f.edges(),\n # [inter.f, inter.pe.pFace]).plot(True, col=('none', 'k', 'r'))\n # # print(inter.f, '\\n\\n', inter.pe.pFace)\n assert all(v in self.poly1.vertices for v in inter.f.vertices) or all(v in self.poly2.vertices for v in inter.f.vertices)\n assert self.poly1.facesInVertices() and self.poly2.facesInVertices()\n assert self.poly1.pnFacesInPoly() and self.poly2.pnFacesInPoly()\n assert self.poly1.nonDoubleVertices() and self.poly2.nonDoubleVertices()\n # # print('\\n', [(i, i.f) for i in filter(lambda x: x is not False, pInters)])\n # # print([sum(min(v.dist(v2) for v2 in inter.f.vertices) for v in i.f.vertices) for i in filter(lambda x: x is not False, pInters)])\n # # print('\\n', [(i, i.f) for i in filter(lambda x: x is not False, otherFInters)])\n # # print([sum(min(v.dist(v2) for v2 in inter.pe.pFace.vertices) for v in i.f.vertices) for i in filter(lambda x: x is not False, otherFInters)])\n raise ValueError('No intersector found')\n # Second intersector\n nInters = self.getIntersectorList(inter.pe.nFace.vertices)\n otherI2 = next(filter(lambda x: x is not None and x.f == inter.f and x != inter,\n nInters),\n None)\n if otherI2 is None:\n # The nFace does not intersect inter.f a second time,\n # looking for a place where inter.f intersects the nFace\n otherI2 = next(filter(lambda x: x is not None and x.f == inter.pe.nFace,\n otherFInters),\n None)\n if otherI2 is None:\n polyhedron(inter.f.vertices + inter.pe.nFace.vertices,\n inter.pe.nFace.edges() + inter.f.edges(),\n [inter.f, inter.pe.nFace]).plot(True, col=('none', 'k', 'r'))\n # # print(inter.f, inter.pe.pFace)\n raise ValueError('No intersector found')\n inter.adjacents = (otherI1, otherI2)\n return (otherI1, otherI2)", "def stable_limits(self, dist, left_cone, right_cone): \n\n # find where car COR line (rear axle) intersects bottom of cone\n # top of cone not possible with a car\n car_ic_line = -1*(self.wheelbase + self.bump2_front_axle + dist)\n c_l = (car_ic_line - left_cone[0,1])/left_cone[0,3]\n c_r = (car_ic_line - right_cone[0,1])/right_cone[0,3]\n left_int = c_l*left_cone[0,2:] + left_cone[0,:2]\n right_int = c_r*right_cone[0,2:] + right_cone[0,:2]\n\n # convert x values (turning radius) to max angular velocities\n left_w = self.speed/left_int[0]\n right_w = self.speed/right_int[0]\n\n min_stable = np.array([0, self.speed, left_w])\n max_stable = np.array([0, self.speed, right_w])\n\n return min_stable, max_stable", "def _pair_intersection(\n cls,\n availabilities_a: List[\"Availability\"],\n availabilities_b: List[\"Availability\"],\n ) -> List[\"Availability\"]:\n result = []\n\n # yay for O(b*a) time! I am sure there is some fancy trick to make this faster,\n # but we're dealing with less than 100 items in total, sooo.. ¯\\_(ツ)_/¯\n for a in availabilities_a:\n for b in availabilities_b:\n if a.overlaps(b, True):\n result.append(a.intersect_with(b))\n\n return result", "def find_overlapping_cds_simple(v_start, v_stop, cds_begins, strand):\n # cds_start = cds_begin[0]\n if strand == '+':\n return list(filter(lambda x: x[0] >= v_start and x[0] < v_stop, cds_begins))\n else:\n return list(filter(lambda x: x[0] > v_start and x[0] <= v_stop, cds_begins))", "def closest_intersect_steps(self, other: Wire) -> Tuple[Intersection, int]:\n intersections = self.intersect(other)\n\n # For each intersection, iterate along each wire's path until the intersection is\n # encountered, keeping track of the number of steps taken\n distances = []\n for intersection in intersections:\n total_steps = 0\n for wire in (self, other):\n for segment in wire.wire_segments:\n try:\n total_steps += segment.steps.index(intersection.location)\n break\n except ValueError:\n # The intersection coordinate isn't in our segment\n total_steps += len(segment.steps) - 1\n\n distances.append((intersection, total_steps))\n\n return sorted(distances, key=lambda x: x[1])[0]", "def _get_next_candidates(\n self, query: str, candidate_obj: BkTreeNode, tolerance: int\n ) -> Tuple[list, int, float]:\n dist = self.distance_function(candidate_obj.node_value, query)\n if dist <= tolerance:\n validity = 1\n else:\n validity = 0\n search_range_dist = list(range(dist - tolerance, dist + tolerance + 1))\n candidate_children = candidate_obj.children\n candidates = [\n k\n for k in candidate_children.keys()\n if candidate_children[k] in search_range_dist\n ]\n return candidates, validity, dist", "def car_positions(car_sectors, car_laps):\n car_sector_and_lap = [0] * 6\n # calculate all cars' total positions\n for i in range(6):\n car_sector_and_lap[i] = car_laps[i] * 1000 + car_sectors[i] \n # Sort the cars so that car at index 0 is the first car in the race\n sorted_cars = [i[0] for i in sorted(enumerate(car_sector_and_lap), key=lambda x:x[1])]\n sorted_cars.reverse()\n return sorted_cars", "def gap_to_next_car(self):\n c = self.next_car()\n if c.x > self.x:\n return c.x - c.length_in_cells - self.x\n elif c.x < self.x:\n return (self.road.N - self.x) + (c.x - c.length_in_cells)\n elif c.x == self.x:\n return self.road.N" ]
[ "0.7006788", "0.7006788", "0.7006788", "0.63193274", "0.58075684", "0.56612337", "0.538843", "0.53481513", "0.5330617", "0.5250249", "0.5212691", "0.516299", "0.51339966", "0.5120874", "0.50953263", "0.50810665", "0.50649995", "0.5049101", "0.5031665", "0.49640206", "0.49521583", "0.4949442", "0.49457547", "0.49143934", "0.4901511", "0.4899872", "0.48962933", "0.4883222", "0.4875959", "0.48693225" ]
0.77712464
0
Initializes the SatData object and reads in the .json file
def __init__(self): with open("sat.json", "r") as infile: self._sat = json.load(infile)["data"] #Define the headers for the csv self._headers = ["DBN", "School Name", "Number of Test Takers", "Critical Reading Mean", "Mathematics Mean", "Writing Mean"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, name):\n try:\n with open(DATA_DIR + name + \".json\") as data:\n self.data = json.load(data)\n except IOError:\n print \"Cannot open file for \", name", "def __init__(self, data):\n\t\tassert isinstance(data, str), \"Data location must be provided in type 'str'!\"\n\t\t\n\t\t# load the location provided\n\t\tdata = json.loads(open(data).read())\n\n\t\t# check for correct format\n\t\tassert isinstance(data, list), \"Data must be of type 'list'!\"\n\n\t\tfor element in data:\n\t\t\tassert isinstance(element, dict), \"Each element of data must be of type 'dict'!\"\n\n\t\tself.data = data", "def _load(self):\n if self.file_path.exists():\n with open(self.file_path) as fid:\n self.data = json.load(fid)", "def __init__(self, file):\n self.__config = file\n with open(self.__config) as json_file:\n data = json.load(json_file)\n self.__data = data", "def __init__(self, filename):\n #Opening the file and storing its contents in a list\n with open(filename) as fp:\n self.data = json.load(fp)", "def __init__(self):\n with open('data.json') as data_file:\n self.data = json.load(data_file)\n self.game_over = False", "def load(self):\n if not self.exist:\n self.create()\n\n with open(self.file_path, encoding=Config.ENCODING) as file:\n self.data = json.load(file)", "def __init__(self):\n with open('info.json') as file:\n self.info = json.load(file)\n file.close()\n self.count = 0", "def read(self):\n self.data = {}\n if path.isfile(self.json_file):\n with open(self.json_file) as data_file:\n self.data = json.load(data_file)\n data_file.close()\n if (self.custom_path and self.is_only\n and path.exists(self.custom_path)):\n self.data[\"icons_path\"].append(self.custom_path)\n self.check_paths()\n be_added = (len(self.data[\"icons_path\"]) > 0\n and len(self.data[\"app_path\"]) > 0)\n if be_added:\n self.dont_install = False\n if isinstance(self.data[\"icons\"], list):\n self.data[\"icons\"] = get_iterated_icons(self.data[\"icons\"])\n self.get_app_icons()", "def __init__(self):\n\n this_folder = os.path.dirname(os.path.abspath(__file__))\n file_name = os.path.join(this_folder, \"event-mapping.json\")\n with open(file_name) as f:\n self.event_mapping = json.load(f)", "def init_json(path='', json_data=''):\n if not path or not json:\n raise ValueError\n if path and json_data:\n raise ValueError\n data = {}\n if path:\n with open(path, 'r') as file:\n temp = file.read()\n data = json.loads(temp)\n elif json_data:\n data = json_data\n\n if data['city']:\n json_city = data['city']\n else:\n raise ValueError\n if data['country']:\n json_country = data['country']\n else:\n raise ValueError\n if data['list_of_streets']:\n json_list_of_streets = data['list_of_streets']\n else:\n raise ValueError\n\n if check(json_city, json_country, json_list_of_streets):\n return get_sample_data(json_city, json_country, json_list_of_streets)", "def __init__(self, file_name=None):\n # deserialize\n if file_name:\n if os.path.isfile(file_name):\n self.__dict__ = load_json_object(file_name)\n else:\n raise IOError('The file {0} was not found.'.format(file_name))\n else:\n self.checking_entity = ''\n self.checking_level = '1'\n self.comments = ''\n self.contributors = ''\n self.publish_date = datetime.today().strftime('%Y-%m-%d')\n self.source_text = 'en'\n self.source_text_version = ''\n self.version = ''", "def __init__(self):\n self.data = json.loads(resource_string(__name__, 'data/oz_postcodes.json'))", "def __init__(self):\n with open('config.json', encoding='UTF-8') as json_data_file:\n self.config = json.load(json_data_file)\n self._get_credential()\n self.file_tree = [{}] * 100", "def load_file(self):\n self._check_setup()\n json_str = self.get_json_file()\n if json_str is None:\n return\n\n if not self._is_json_str():\n with open(json_str, 'r') as f:\n jf = json.load(f)\n else:\n jf = json.loads(json_str)\n\n\n self.jf = jf\n\n target = jf['target']\n if isinstance(target, str):\n target = eval(target)\n\n goal = jf['goal']\n if isinstance(goal, str):\n goal = eval(goal)\n\n self.gen_target_pos = np.array(target)\n self.gen_goal_pos = np.array(goal)\n\n if 'place_walls' in jf:\n self.place_walls = jf['place_walls']\n\n if self.get_is_rnd():\n self.rnd_map = jf['rnd']\n self.env_jf = jf['env']", "def init(sFileName, sDescription):\n \n try:\n with open(sFileName) as f:\n my_dict = json.load(f)\n \n \n except:\n \n #assume there was an error, possibly the file does not exist\n my_dict = {'descriptor':sDescription ,'measurements':[]}\n with open (sFileName, 'w') as f:\n json.dump(my_dict,f)\n \n return my_dict", "def set_data_from_json(self, filename):\n with open(filename, 'r') as f:\n self.data = json.load(f, object_pairs_hook=OrderedDict)", "def setUp(self):\n with open('test/0a6a357e.json') as read_file:\n self.tx_json_0a6a357e = json.load(read_file)\n with open('test/bip69-synth.json') as read_file:\n self.bip69_synth = json.load(read_file)", "def read_json():\n try:\n rospack = rospkg.RosPack()\n file_path = rospack.get_path('autonomous') + \"/src/data.txt\"\n with open(file_path) as json_file:\n json_data = json.load(json_file)\n \n new_data = []\n for d in json_data:\n a = Autons(len(new_data))\n a.deserialize_json(d)\n new_data.append(a)\n\n global data\n data = new_data\n except:\n read_json()", "def _load_data(self):\n if self._api_response.status_code == 200:\n self._dataset = self._api_response.json()\n self._fill_day_dicts()", "def load_data_file(self):\n with open(self.files['data'], 'r') as infile:\n data = json.load(infile)\n self.boundary_nodes = data['boundary_nodes']\n self.nodes = {int(k): v for k, v in data['nodes'].items()}\n self.levels = data['levels']\n infile.close()", "def __init__(self, filename):\n self._filename = filename\n fp = open(filename)\n self._contents = json.loads(fp.read())\n for key in self._contents.keys():\n #\n # Some .json keys begin with an @ sign, which represents ???.\n # The caller should not have to know which fields have @ signs\n # and which don't. For each key that begins with an @ sign,\n # create a secondary key consisting of the same string without\n # the @ sign, and having the same value.\n if re.search(\"^@\", key):\n secondaryKey = re.sub(\"^@\", \"\", key)\n self._contents[secondaryKey] = self._contents[key]\n self._dataFileName = re.sub(\".json\", \"\", self._filename)\n self._validate()", "def __init__(self, paths):\n assert isinstance(paths, ScatterPath)\n jsondata = ScatterJson(paths.jsondata)\n\n # load data from init file\n with open(paths.init_file, \"r\") as f:\n ## -- check header -- ##\n self.dim = np.fromfile(f, UINT_T, 1)[0]\n self.Ntraj = np.fromfile(f, UINT_T, 1)[0]\n self.inittemp = np.fromfile(f, DOUBLE_T, 1)[0]\n self.mass = np.fromfile(f, DOUBLE_T, self.dim)\n\n assert self.dim == jsondata.dim\n assert self.Ntraj == jsondata.Ntraj\n assert self.inittemp == jsondata.inittemp\n assert tuple(self.mass) == jsondata.mass\n\n ## -- load data -- ##\n self.r0p0 = np.fromfile(f, DOUBLE_T, self.Ntraj * self.dim * 2)\n self.r0p0 = self.r0p0.reshape((self.Ntraj, self.dim * 2))\n self.r0 = self.r0p0[:,:self.dim]\n self.p0 = self.r0p0[:,self.dim:]", "def read_json_file(self, fname):\n return {}", "def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))", "def __init__(self, config_file):\n with open(config_file, 'r') as file:\n self.config = json.load(file)\n self.set_config(self.config)", "def read(self,filename):\n with open(str(filename),\"r\") as f:\n data = f.read()\n #check if the loaded file is json\n try:\n datajson = json.loads(data)\n except Exception as e:\n if mer == True:\n merrors.error('could not load '+str(filename)+', add a basic entry to the config like {\"name\":\"Example\"}. Python error: '+str(e))\n quit()\n else:\n print(\"could not load \"+str(filename)+\". Python error: \"+str(e))\n quit()\n self.datajson = datajson\n self.filename = filename\n f.close()", "def __init__(self):\n try:\n with open(os.path.expanduser(\"~/.dkeyrc\"), 'r') as f:\n self.__cfgdata = json.load(f)\n except Exception as e:\n print(\"Error: Unable to load config JSON at ~/.dkeyrc -- %s\" % (e))\n sys.exit(1)", "def __init__(self, json_str: object = None, json_file_path: object = None) -> None:\n self.data = None\n if json_str is None and json_file_path is None:\n # raise Exception(\"Invalid file path or json string. Please provide valid file path for json data or provide json string\")\n print(\"No valid json file has been loaded\")\n if json_str is None:\n with open(json_file_path) as file:\n self.data = json.load(file)\n else:\n self.data = json.loads(json_str)\n # if self.data is not None:", "def __init__(self, path):\n\n # ----- static assignment\n # self.data = {\n # \"ACCT100\":{\"paid\": 60, \"due\": 100}, # balance = 40\n # \"ACCT200\": {\"paid\": 70, \"due\": 60}, # balance = -10\n # \"ACCT300\": {\"paid\": 60, \"due\": 0}, # balance = 0\n # }\n\n\n # open the specified database file for reading and perform loading\n\n with open(path, \"r\") as handle:\n # JSON\n import json\n self.data = json.load(handle)\n\n # #YAML\n # import yaml\n # self.data = yaml.safe_load(handle)\n\n # import xmltodict\n # self.data = xmltodict.parse(handle.read())[\"root\"]\n # print(self.data)" ]
[ "0.7230022", "0.70839703", "0.70451206", "0.7019796", "0.6950166", "0.6915002", "0.69049865", "0.6745017", "0.6663421", "0.64876246", "0.6458275", "0.6453081", "0.6421125", "0.6399048", "0.6386527", "0.63818234", "0.63470894", "0.6332923", "0.6332479", "0.632893", "0.63204163", "0.62947375", "0.62782794", "0.6276597", "0.6269913", "0.6267336", "0.625788", "0.62219924", "0.6221873", "0.62105286" ]
0.73582524
0
This method takes a list of district bureau numbers and saves a CSV file
def save_as_csv(self, DBNs): with open("output.csv", "w") as outfile: # create the headers for i in range(0, 5): outfile.write(self._headers[i] + ",") # delimits header names # moves to next line outfile.write(self._headers[5] + "\n") # populates information for data in self._sat: if data[8] in DBNs: outfile.write(data[8] + ",") if "," in data[9]: outfile.write("\""+data[9]+"\"" + ",") else: outfile.write(data[9] + ",") outfile.write(",".join([data[i] for i in range(10,14)]) + "\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export(tako_list, filename):\n for tak in tako_list:\n tak = tak[0]\n l1 = [tak.ident, \"a\"]\n for gen in tak.genome.weightchr_a:\n l1.append(gen.ident)\n l1.append(gen.weight)\n l1.append(gen.mut_rate)\n l1.append(gen.dom)\n f = os.path.join(\"Data\", (filename[:-4] + \" gene data.csv\"))\n with open(f, 'a', newline=\"\") as csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(l1)\n if len(tak.genome.weightchr_b) != 0:\n l2 = [tak.ident, \"b\"]\n for gen in tak.genome.weightchr_b:\n l2.append(gen.ident)\n l2.append(gen.weight)\n l2.append(gen.mut_rate)\n l2.append(gen.dom) \n writ.writerow(l2)", "def save_csv(outfile, cities):\n writer = csv.writer(outfile)\n writer.writerow(['Name'])\n for row in cities:\n writer.writerow([row])", "def writeCSV(list, filename):\n with open(filename, \"w\") as file:\n for row in list:\n for i in range(len(row)):\n file.write(str(row[i]))\n if i != len(row) - 1:\n file.write(\",\")\n else:\n file.write(\"\\n\")\n return", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def create_main_csv(data_list):\n path = \"camelot/clean/CWC_National-Register-of-Large-Dams_2019.csv\"\n if not os.path.exists(\"camelot/clean\"):\n os.makedirs(\"camelot/clean\")\n data_concat = pd.concat(data_list)\n data_concat.reset_index(drop=True).to_csv(path, index=True)", "def persist_list_to_csv(liste, nom_fichier):\n with open(nom_fichier, 'w') as f:\n for elem in liste :\n f.write(\"{}\\n\".format(elem))", "def write_csv(list_file, path):\n\n\twith open(path, 'w') as f:\n\t\twriter = csv.writer(f, delimiter=',')\n\t\tfor i in list_file:\n\t\t\twriter.writerow(i)", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def save_csv(data): \n bank_data = data\n\n #Creating headers for the csv file\n header = [\"Lender\", \"Max Loan Amount\", \"Max LTV\", \"Max DTI\", \"Max Credit Score\", \"Interest Rate\"]\n\n #Creating output path of the CSV file\n csvpath = Path(\"save_file.csv\")\n\n #Opening the csv file in csvpath by using the open() method\n with open(csvpath, \"w\", newline='') as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter = \",\")\n csvwriter.writerow(header)\n for row in bank_data:\n csvwriter.writerow(row)\n\n return data", "def exportFoldFile(vectors, authors, fileName):\n with open(fileName, \"w\") as fFile:\n for idv, vec in enumerate(vectors):\n [fFile.write(str(val)+',') for val in vec]\n fFile.write(authors[idv] + '\\n')", "def write_data(city_name: str, source: str, year: int) -> None:\n parse = source.split(',')\n current_pos = 0\n end = 6\n with open(f'{city_name}_{year}.csv', 'w', newline='', encoding='utf-8') as output:\n id_writer = csv.writer(output, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n while end < len(parse):\n id_writer.writerow(parse[current_pos:end])\n current_pos = end\n end = current_pos + 6\n if end >= len(parse):\n end = len(parse)", "def dbtocsv():\n connection = sqlite3.connect(\"sensordata.db\")\n cursor = connection.cursor()\n cursor.execute(\"Select * from sensordata\")\n roadstationdata = cursor.fetchall()\n\n with open('roadstationdata.csv', 'w') as f:\n writer = csv.writer(f)\n writer.writerow(['id','name','value','unit','time'])\n writer.writerows(roadstationdata)", "def generate_csv(self, lista):\r\n\t\ts = ''\r\n\t\tsalida = self.get_rel_path() + \"/\" + \"tree_names.csv\"\r\n\t\tfor i in lista:\r\n\t\t\t#st = i[2].split('/')\r\n\t\t\t#newpath = os.path.join(i[1],st)\r\n\t\t\thash = str(i[0])\r\n\t\t\tname_path = str(i[1] + \"/\" + i[2])\r\n\t\t\t#s = s + str(i[0]) + \";\" + i[1] + \"/\" + i[2] + \"\\n\"\r\n\t\t\tself.copy_file(hash,name_path)\r\n\t\t\ts = s + str(hash + \";\" + name_path + \"\\n\")\r\n\r\n\t\tf = open(salida,\"w\")\r\n\t\tf.write(s)\r\n\t\treturn salida", "def write_data_to_csv(cell_cent_top_lst, u_top_fe_conv_lst, disp_cent_PD_array_lst, u_disp_PD_array_lst, file_path, file_name):\n import csv\n def _write_(abs_file_path, arr):\n with open(abs_file_path, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=';')\n writer.writerows(arr)\n\n\n num_data = len(cell_cent_top_lst)\n for i in range(num_data):\n cel_cnt_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_cel_cnt_top.csv')\n ufe_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_ufe_top.csv')\n dsc_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_dsp_cnt.csv')\n u_dsp_file_name = path.join(file_path, file_name + str(i).zfill(2) + '_u_dsp.csv')\n\n _write_(cel_cnt_file_name, cell_cent_top_lst[i])\n _write_(ufe_file_name, u_top_fe_conv_lst[i])\n _write_(dsc_file_name, disp_cent_PD_array_lst[i])\n _write_(u_dsp_file_name, u_disp_PD_array_lst[i])\n\n return", "def download_postcode_districts(postcode_area: str):\n uk_postcode_districts = f\"https://en.wikipedia.org/wiki/{postcode_area}_postcode_area\"\n postcode_districts = pd.read_html(uk_postcode_districts)[1]\n\n print(f\"Saving {postcode_area} district codes...\")\n output_path = path.join(\"../..\", \"datasets\", \"uk_postcodes\", f\"{postcode_area}.csv\")\n postcode_districts.to_csv(output_path)\n print(f\"Saving {postcode_area} district codes...DONE\")", "def write_output_file(ad_models):\n\n with open('output-data-utf8.csv', 'w', newline='', encoding='UTF-8') as output_file:\n csv_writer = csv.writer(output_file, delimiter=',')\n for ad in ad_models:\n csv_writer.writerow((ad.date.strftime('%Y/%m/%d'), ad.country_code, ad.impression, ad.clicks))", "def save_csv(filename, save_list):\n with open(filename, mode='w') as csv:\n csv.writelines([','.join(item) + '\\n' for item in save_list])", "def write_csv(elongation, file_name):\n e = elongation\n\n with open(file_name, 'w') as f:\n f.write(f\"\"\"\\\nBreak Load, {e.break_load()}\nBreak Strength, {e.break_strength()}\nBreak Elongation, {e.break_elongation()}\nYield Load, {e.yield_load()}\nYield Strength, {e.yield_strength()}\nYield Elongation, {e.yield_elongation()}\nGauge Length, {e.gauge_length}\nSample Width, {e.sample_width}\nSample Thickness, {e.sample_thickness}\n\nPoints\n %, N\"\"\")\n for x, y in zip(e.xs, e.ys):\n f.write(f'\\n{x:>8.4f}, {y:>8.4f}')", "def Export_in_files(COVID_data, COVID_data_reconstructed):\r\n F_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')\r\n FR_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')\r\n \r\n COVID_data_lists = [COVID_data, COVID_data_reconstructed]\r\n Data_file_list = [F_data_file, FR_data_file]\r\n Countries_list = list(COVID_data.keys())[1:]\r\n \r\n for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file\r\n COVID_data_temp = COVID_data_lists[Data_set_inc]\r\n Data_file_temp = Data_file_list[Data_set_inc]\r\n \r\n Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\\n')\r\n \r\n for Country in Countries_list:\r\n COVID_data_single_country = COVID_data_temp[Country]\r\n \r\n Date_list = list(COVID_data[Country].keys())\r\n for Date in Date_list:\r\n COVID_data_single_country_single_date = COVID_data_single_country[Date]\r\n Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None\r\n \r\n Data_file_temp.write('%s;%s;' % (Country, Date))\r\n Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))\r\n Data_file_temp.write('\\n')\r\n \r\n Data_file_temp.close()", "def writetoCSV(self, fileName):\n\n with open(fileName, 'w') as writeFile:\n writeFile.write(\"ID,Fx,Fy,Fz\\n\")\n for fstnr in F:\n writeFile.write(str(fstnr.ID))\n for i in fstnr.force:\n writeFile.write(',' + str(i))\n writeFile.write('\\n')", "def writeCSV(path,aList):\n\twith open(path,'wb') as w:\n\t\ta = csv.writer(w, delimiter = ',')\n\t\ta.writerows(aList)\n\tw.close()", "def write_to_csv(box_regions, num_of_cols, num_of_rows, filename=\"out.csv\"):\r\n\r\n cwriter = csv.writer(open(filename, 'w'))\r\n\r\n digit_list = []\r\n for region in box_regions:\r\n # performing Neural Network algorithm to get predicted digits\r\n digits = performRecognition.get_decimal_in_box(region)\r\n\r\n # sort the digits\r\n digits = sort_digits(digits, key=lambda digit: digit[1][0])\r\n\r\n print (digits)\r\n\r\n digit_str = ''\r\n for digit in digits:\r\n digit_str += str(digit[0])\r\n\r\n digit_list.append(digit_str)\r\n\r\n # writes the digits into the csv\r\n for i in range(0, num_of_rows*num_of_cols, num_of_cols):\r\n cwriter.writerow(digit_list[i:i+num_of_cols])", "def write_new_csv(self, pd_edit_series, csv_list):\n logging.debug('write_new_csvs called. File saved to:'\n '%sdocs/edited_csv/', self.out_dir)\n\n pd_edit_series.to_csv(f'{self.out_dir}docs/edited_csv/'\n f'edited_location_totals.csv',\n encoding='utf-8')\n\n for country, csv in csv_list.items():\n csv.to_csv(f'{self.out_dir}docs/edited_csv/edited_'\n f'{country}.csv')", "def makeCsv(net, date, opt, path, minlat, maxlat, minlon, maxlon, variables, estaciones):\n\n # data_lon = Dataset('/ServerData/KRAKEN/Reanalisis/a1979/wrfout_c15d_d01_1979-08-15_00:00:00.1979')\n # LON = data_lon.variables['XLONG'][:]\n # LAT = data_lon.variables['XLAT'][:]\n #\n # LON = LON[0][0]\n # LAT = LAT[0]\n #\n # LONsize = len(LON)\n # LATsize = len(LAT)\n #\n # celda = []\n var_cut = []\n for i in variables:\n var = net.variables[i][:,int(minlat):int(maxlat),int(minlon):int(maxlon)]\n #print(LON)\n #print(var)\n #return\n # celda.append(var)\n # result = ne(var, LON, LAT, LONsize, LATsize, minlat, maxlat, minlon, maxlon)\n var_cut.append(var)\n\n for ls in range(len(var_cut)):\n saveData(var_cut[ls], variables[ls], date, opt, path, estaciones)", "def write_csv(self, key_list, word_list):\n # Write out data\n out_data = []\n # Match filtered indexes to words\n for i in key_list.index:\n subset = word_list[word_list['key'] == i]\n # Add to aggregate list\n out_data.append(subset['word'].tolist())\n # Dump list to headerless CSV\n with open(self.output, 'w') as f:\n writer = csv.writer(f)\n writer.writerows(out_data)\n return len(out_data)", "def write_file(file):\n file.to_csv('data_set.csv', encoding='utf-8', index=False)", "def write_to_csv(list_of_rows, file_name):\n with open(file_name, 'w') as f:\n writer = csv.writer(f)\n for row in list_of_rows:\n if None in row:\n continue\n writer.writerow(row)\n \n f.close()", "def save_csv_data(representatives, votes, house, session):\n df = pd.DataFrame({'Representatives': representatives})\n for bill in votes:\n vote_record = []\n for person in representatives:\n if person in bill['vote_yea']:\n vote_record.append(1)\n elif person in bill['vote_nay']:\n vote_record.append(0)\n else:\n vote_record.append(2)\n df[bill['bill']] = vote_record\n df.to_csv(os.path.join(default_path, \"voting\", house+str(session)+'_voting.csv'))", "def save_csv(companies):\n print(\"Saving companies.csv...\")\n\n Path(\"output\").mkdir(parents=True, exist_ok=True)\n file_name = 'output/companies.csv'\n\n with open(file_name, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',')\n i = 0\n while i < 500:\n company = companies[i]\n name = company.text\n url = company.get_attribute('href')\n writer.writerow([name, url])\n i = i + 1\n \n print('companies.csv created')", "def List_to_CSV(OutFname, DataList):\n with open(OutFname, 'w') as myfile:\n wr = csv.writer(myfile, delimiter=',')\n wr.writerows(line for line in DataList)" ]
[ "0.64082557", "0.6216291", "0.61752206", "0.6168384", "0.6143117", "0.6140604", "0.60723305", "0.60699093", "0.60474026", "0.6013382", "0.600645", "0.5995924", "0.5993597", "0.59917456", "0.5989028", "0.5963057", "0.59537816", "0.59295964", "0.59092754", "0.5907891", "0.5906663", "0.5860689", "0.584047", "0.578208", "0.5761388", "0.5743961", "0.5743057", "0.5736228", "0.5734686", "0.572879" ]
0.686182
0
Play a guessing game with a user. The exercise here is to rewrite the exampleGuessingGame() function
def advancedGuessingGame(): print("\nWelcome to the guessing game!") print("A number between _ and _ ?") lowerBound = not_number_rejector("Enter Lower Bound: ") higher_number = False # we need to set an upper and lowerbound for game while not higher_number: upperBound = not_number_rejector("Enter Upper Bound: ") if upperBound > lowerBound: higher_number = True else: print("The upperbound is lower than you lowerbound: TRY AGAIN") # above code ensures upper > lower, see stubbon_asker in EX1 print("OK then, guess a number between {} and {} ?".format(lowerBound, upperBound)) lowerBound = int(lowerBound) # ensures integer is give (Not a letter) upperBound = int(lowerBound) actualNumber = random.randint(lowerBound, upperBound) guessed = False while not guessed: guessedNumber = not_number_rejector("Make a guess: ") print("You guessed {},".format(guessedNumber),) if guessedNumber == actualNumber: print("HOW DID YOU GET THAT! It was {}".format(actualNumber)) guessed = True elif guessedNumber > upperBound: print("This is higher than the upperbound! Try again!") elif guessedNumber < lowerBound: print("This is lower than the lowerbound! Try again!") elif guessedNumber < actualNumber: print("{} is too small, try again".format(actualNumber)) else: print("{} is too big, try again ".format(actualNumber)) return "You got it!" # the tests are looking for the exact string "You got it!". Don't modify that!
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def play(self):\n print(\"Game is starting!!\")\n self.generate_secret_number()\n while True:\n self.get_guess_from_user()\n self.ans = self.compare_results()\n if self.ans:\n print(f\"Right Guess!! , the number is {self.secret_number}\")\n break\n else:\n print(f\"Wrong Guess!! , Please try again.\")\n return self.ans", "def input_guess(guess):\n global counter\n global secret_number\n \n guess_int = int(guess)\n counter = counter - 1\n \n print \"Guess was\", guess_int\n if guess_int == secret_number:\n print \"Correct!\"\n print \"\"\n new_game(n)\n return\n elif guess_int > secret_number and counter != 0:\n print \"Number of remaining guesses is\", counter\n print \"Lower!\"\n print \"\"\n elif guess_int < secret_number and counter != 0:\n print \"Number of remaining guesses is\", counter\n print \"Higher!\"\n print \"\"\n else:\n print \"You ran out of guesses. The number was\",secret_number\n print \"\"\n new_game(n)", "async def guess(self, ctx):\n server = ctx.message.server.id\n current_streak = 0\n while True:\n if current_streak > 0:\n await self.bot.say('Your current streak is {}'.format(current_streak))\n reply = guessing_game(server, ctx)\n await self.bot.send_file(\n ctx.message.channel,\n 'images/lineup/game_postgame.png',\n content='Guess a hero {} played that game. {}'.format(\n reply[1], reply[2])\n )\n\n def guess_check(m):\n return ctx.message.content\n\n guess = await self.bot.wait_for_message(\n timeout=30.0,\n check=guess_check,\n author=ctx.message.author,\n channel=ctx.message.channel\n )\n answer = reply[0]\n if guess is None:\n fmt = 'Sorry, you took too long. It was {}.\\nGame over. Your score: {}.'\n await self.bot.send_message(\n ctx.message.channel,\n fmt.format(answer, current_streak)\n )\n if current_streak > 0:\n db.add_leaderboard_guess(\n ctx.message.server.id,\n ctx.message.author.id,\n current_streak,\n 'guess-leaderboard'\n )\n break\n if guess.content.lower() == answer.lower():\n\n await self.bot.say('Yay! You are right.')\n current_streak += 1\n else:\n await self.bot.say(\n 'Nope. It is actually {}.\\n Game over. Your score: {}'.format(answer, current_streak))\n if current_streak > 0:\n db.add_leaderboard_guess(\n ctx.message.server.id,\n ctx.message.author.id,\n current_streak,\n 'guess-leaderboard'\n )\n break", "def input_guess(guess):\n \n print \"\"\n \n guess_int = int(guess)\n print \"Guess was\",guess_int\n \n global remaining_guesses\n remaining_guesses = remaining_guesses - 1 \n print \"Number of remaining guesses is\",remaining_guesses \n \n # main game logic goes here\t\n \n if ((guess_int > secret_number) and (remaining_guesses > 0)):\n print \"Lower!\"\n elif((guess_int < secret_number) and (remaining_guesses > 0)): \n print \"Higher!\"\n elif((guess_int == secret_number) and (remaining_guesses >= 0)):\n print \"Correct!\"\n new_game()\n else:\n print \"You ran out of guesses.The number was\",secret_number\n new_game()", "def game_start():\n # Greets user, prompts for name, and asks if they are ready\n input(\"Hello there, welcome to the number guessing game! \" \n \"What is your name? \\n \\n\") \n print(\"\\nAre you ready to play?\") \n\n # Uses .upper() so that the input is not case sensitive\n game_choice = input(\"Enter 'YES' to start the game, \" \n \"or enter 'NO' to quit. \\t\").upper() \n game_loop = True # Intialized as true to make game_loop repeat\n\n while game_loop: # Repeatedly prompts user with different game choices\n\n console_clear(1) # Clear console with 1 second delay\n\n # If user enters \"YES\", the game starts using the game_number function\n # below. If user enters \"NO\", they are prompted to system exit. If \n # \"YES\" or \"NO\" are not entered, the greeting starts over\n if game_choice == \"YES\": \n console_clear(1) \n game_number()\n\n elif game_choice == \"NO\": \n console_clear(1) \n sys.exit()\n\n else: \n game_start()", "def guess_a_number():\n x = check_raw()\n random_number=randint(0,100)\n count_tries = 0\n\n while x != random_number:\n count_tries = count_tries + 1\n if count_tries == 10:\n print ('GAME OVER! You failed too many times!')\n break\n x = evaluate_my_number(x,random_number)\n if x == random_number:\n print ('Your number is correct! You needed {} tries.'.format(count_tries))\n break\n\n new_game = str(input(\"Do you want to play again? If so, say 'yes'! If not, say 'no' \"))\n if new_game == 'yes':\n guess_a_number()\n else:\n print('Goodbye!')\n\n # TODO:\n # generate a random number (uniformly distributed between 0 and 100)\n # read input from the user and validate that the input is numeric (use the function check_raw)\n # check whether the number was guessed \n # implement the functions evaluate_my_number, which checks whether the number is too high or too low\n # and print this information to the user\n # let the computer guess, therefore implement the demo_a_number function", "def get_input(self, guess):\r\n print\r\n print \"The player guessed = \", guess\r\n result = self.process_player_input(guess)\r\n print result\r\n if ((self.remaining_guesses == 0) or ( result == self.correctguess_message)):\r\n # Start a new game, with same range\r\n self.init(self.num_range)\r\n return result", "def main():\n word = random_word()\n attempt_left = N_TURNS\n ans = intro(word, attempt_left)\n while attempt_left != 0:\n hangman_figure(attempt_left)\n ans, attempt_left = hangman(word, ans, attempt_left)\n if ans == word: # if players had guess the word correctly\n print('You are correct!')\n print('You win!!')\n print('The word was: ' + word)\n break\n else:\n print('The word looks like: ' + ans)\n if attempt_left == 0: # players failed to guess the word correctly\n hangman_figure(attempt_left)\n print('You are completely hung : (')", "def guess_the_number():\n # get a random number from 1 to 1000\n number = random.randrange(1, 1000)\n\n guess = 0\n gcounter = 0\n # compare guess and selected number\n while guess != number:\n # get user input\n guess = int(input('Guess my number between 1 to 1000: '))\n # compare with number\n if guess > number:\n print('Too high. Try again')\n gcounter += 1\n elif guess < number:\n print('Too low. Try again')\n gcounter += 1\n else:\n # if equal, congratulate the user\n print('Congratulations, you guessed the number!')\n print(f'You used {gcounter} guesses')\n # check the number of guesses and provide feedback\n if gcounter > 10:\n print('You should be able to do better')\n else:\n print('Either you know the secret or you got lucky.')\n # give the option to restart the game or quit.\n response = input((\"Would you like to play it again? \"\n \"('yes' or 'no'): \"))\n # check user response\n if response == 'yes':\n number = random.randrange(1, 100)\n guess = 0\n gcounter = 0\n elif response == 'no':\n print('Bye.')\n break\n else:\n print('Invalid response. Quitting...')\n break", "def main():\n word = random_word()\n old_ans = dashed(word)\n print('You have ' + str(N_TURNS) + ' guesses left.')\n guess(word, old_ans)", "def correct_guess(total_guesses):\n \n print(\"\\nCongratulations! You guessed the random number in \" \n + str(total_guesses) + \" guesses!\")\n print(\"You are now returning to the menu...\")\n \n # Clears the console after a 3 second delay, then restarts game\n console_clear(3)\n game_start()", "def play_game():\n # let the user select her levle\n level = raw_input(\"\"\"\n Please select a game difficulty by typing it in!\n Possible choices include easy, medium, and hard.\n \"\"\")\n print \"You've chosen %s!\\n\" %(level)\n print \"You will get %s guesses per problem\\n\" %(number_of_guess)\n\n quiz_and_answer = quiz_and_answer_list[level]\n quiz, answer = quiz_and_answer[0], quiz_and_answer[1]\n\n # iterate through the blanks.\n for index, value in enumerate(answer):\n if index != len(answer) - 1:\n print \"The current paragraph reads as such:\\n\"\n print quiz\n guess = raw_input(\"What should be substituted in for __%s__?\" %(index + 1))\n quiz = guess_until_right(index, value, guess, quiz)\n if index == len(answer) - 1:\n print quiz\n print \"You won!\"\n else:\n print \"Correct!\\n\"", "async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))", "def play_game(self):\n \n# self.display_letter_prompt()\n\n if self.input_letter != None:\n if self.input_letter == self.current_prompt:\n self.correct_response()\n else:\n self.incorrect_response()\n\n self.frames_passed += 1\n\n if self.prompt_vibrated == False:\n self.vibrate_buttons()\n self.prompt_vibrated = True\n\n if self.frames_passed > (self.delay * self.fps * 0.07):\n self.vibrate_buttons()\n self.frames_passed = 0", "async def guess(self, ctx):\r\n\r\n async def play():\r\n try:\r\n await ctx.send('Lets play a game! You have to guess a number between 1 and 10.')\r\n guess = await self.viking.wait_for('message', check=lambda message: message.author == ctx.author)\r\n answer = random.randint(1, 10)\r\n counter = 1\r\n\r\n while int(guess.content) != answer:\r\n counter += 1\r\n if int(guess.content) > answer:\r\n await ctx.send('Your guess is too high! Try again.')\r\n guess = await self.viking.wait_for('message', check=lambda message: message.author == ctx.author)\r\n else:\r\n await ctx.send('Your guess is too low! Try again.')\r\n guess = await self.viking.wait_for('message', check=lambda message: message.author == ctx.author)\r\n else:\r\n if counter <= 1:\r\n await ctx.send('Congratulations! You got it on your first attempt!')\r\n else:\r\n await ctx.send('Congratulations! It took you **{}** tries to guess the correct answer.'.format(counter))\r\n await gameover()\r\n except ValueError:\r\n await ctx.send('Please enter a number.')\r\n await play()\r\n\r\n async def gameover():\r\n await ctx.send('Do you want to play again? (Enter: **Yes** / **No**)')\r\n response = await self.viking.wait_for('message', check=lambda message: message.author == ctx.author)\r\n response = response.content.lower()\r\n\r\n if response == 'yes':\r\n await play()\r\n elif response == 'no':\r\n await ctx.send('Thanks for playing!')\r\n else:\r\n await ctx.send('Invalid response.')\r\n await gameover()\r\n\r\n await play()", "def get_user_input(self):\r\n try:\r\n user_input = input('Guess a letter: ')\r\n print('\\n')\r\n if user_input.lower() in self.already_guessed:\r\n raise ValueError(YELLOW + 'You already guessed '\r\n f'{user_input.lower()}.\\n' + END)\r\n if len(user_input) == 0:\r\n raise ValueError(YELLOW + 'You didn\\'t enter a letter. '\r\n 'Please enter a letter between A-Z\\n' + END)\r\n if not user_input.isalpha():\r\n raise ValueError(YELLOW + 'You entered a number. '\r\n 'Please enter a letter between A-Z.\\n' + END)\r\n if len(user_input) > 1:\r\n raise ValueError(YELLOW + 'Please enter one letter.\\n' + END)\r\n except ValueError as error:\r\n print(error)\r\n self.get_user_input()\r\n else:\r\n if len(self.already_guessed) > 0: # prints previous guesses\r\n self.print_previous_guesses()\r\n if user_input.lower() in [letter.original.lower() for letter in\r\n self.active_phrase if letter != ' ']:\r\n for letter in self.active_phrase:\r\n if letter != ' ':\r\n letter.compare_guess(user_input) # checks guess\r\n self.active_phrase.print_phrase()\r\n else:\r\n self.lives -= 1\r\n print(f'You have {self.lives} out of 5 lives remaining!\\n')\r\n if user_input.lower() not in self.already_guessed:\r\n self.already_guessed.append(user_input.lower())\r\n self.active_phrase.print_phrase()", "def main():\n # init variables\n lower_bound = 1\n higher_bound = 10\n guess = generate_guess(1, 10)\n while True:\n try:\n secret = input(\"What should the computer guess? Enter a number between 1 and 10: \")\n except ValueError:\n print(\"{} isn't a number!\".format(secret))\n while True:\n if int(guess) == int(secret):\n print(\"I guessed {}! Your number was {}! I win!\".format(guess, secret))\n play_again = input(\"Do you want to play again? (Y/n)\")\n if play_again != \"Y\":\n print(\"Thanks for playing!\")\n exit()\n else:\n main()\n elif int(guess) != int(secret):\n high_or_low = input(\"I guessed {}. Was it high or low? (H/L)\".format(guess))\n print(\"G: {}, HB: {}, LB: {}\".format(guess, higher_bound, lower_bound))\n if high_or_low == \"H\":\n higher_bound = guess - 1\n guess = generate_guess(lower_bound, higher_bound)\n elif high_or_low == \"L\":\n lower_bound = guess + 1\n guess = generate_guess(lower_bound, higher_bound)\n else:\n print(\"Please try again: \\n\")", "def start_game(self):\n self.code = code.get_random_num()\n self.Player1 = self.get_player(1)\n self.Player2 = self.get_player(2)\n attempt = self.Player1.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n check.check(num_guessed_list, right_answer_list)\n attempt = self.Player2.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n output = check.check(num_guessed_list, right_answer_list)\n play = end_game.end_game(output)\n if play == True:\n self.keep_playing()", "def game_number():\n \n total_guesses = 0 # Initializes total number of guesses as 0 when game starts\n rand_number = randint(1,20) # Creates a random number between 1 and 20\n print(\"\\nThe number you shall guess is between 1 and 20.\" \n \" You have 3 guesses.\")\n\n while total_guesses < 3: # Ensures user only recieves 3 attempts\n\n print(\"Enter your guess below.\") # Prompts user to enter guess\n\n # Notifies user which attempt they are on\n if total_guesses == 0:\n print(\"This is your first attempt. \\t\") \n if total_guesses == 1:\n print(\"This is your second attempt. \\t\") \n if total_guesses == 2:\n print(\"This is your final attempt. \\t\") \n \n # Assigns guess to be the input as well as an \n # integer value for guessing the random number\n guess = input() \n guess = int(guess)\n \n total_guesses = total_guesses + 1 # Tracks number of total guesses used\n\n # Helps user confine their guesses based on clues given by the game\n if guess < rand_number:\n print(\"\\nYour guess is below the value of the random number!\")\n if guess > rand_number:\n print(\"\\nYour guess is above the value of the random number!\")\n if guess == rand_number:\n correct_guess(total_guesses)\n if guess != rand_number and total_guesses == 3:\n incorrect_guess(rand_number)", "def play_game(n):\n tries = 0\n magic_number = generate_random(n)\n print(\"Let's play the mimsmind0 game.\")\n # Get and validate user's first guess\n while True:\n try:\n guess = int(input(\"Guess a {}-digit number: \".format(n)))\n tries += 1\n break\n except:\n print(\"That is not a valid number, try again.\") \n while True:\n # Check guess against magic number and give directional guidance if incorrect\n try:\n if magic_number > guess:\n guess = int(input(\"Try again. Guess a higher number: \"))\n tries += 1\n elif magic_number < guess:\n guess = int(input(\"Try again. Guess a lower number: \"))\n tries += 1\n else:\n print(\"Congratulations. You guessed the correct number in {} tries.\".format(tries))\n break\n except:\n print(\"That's not a valid number.\")", "def game_over(user_name, answer, correct_answer):\n print(\"'{0}' is wrong answer ;(. Correct answer was '{1}'.\"\n .format(answer, correct_answer))\n print(\"Let's try again, {0}!\".format(user_name))", "def play_hangman(self):\n while self.stage < 6:\n self.display_hangman()\n guess = input(f'{Fore.YELLOW}Choose a letter: {Style.RESET_ALL}').lower().strip() # noqa\n print('\\n')\n if guess.isalpha() and len(guess) == 1:\n if guess not in self.word:\n if guess in self.guessed_letters:\n print(f'You already guessed {guess}, try again')\n print('\\n')\n else:\n print(f'{Fore.RED}{guess} is not in the word, try again{Style.RESET_ALL}') # noqa\n print('\\n')\n self.stage += 1\n self.guessed_letters.append(guess)\n elif guess.isalpha() and guess in self.word:\n if guess in self.guessed_letters:\n print(f'You already guessed {guess}, try again')\n print('\\n')\n else:\n print(f'{Fore.GREEN}{guess} is in the word!{Style.RESET_ALL}') # noqa\n print('\\n')\n self.guessed_letters.append(guess)\n # code for replacing dashes with letters adapted from # noqa\n # https://github.com/kiteco/python-youtube-code/blob/master/build-hangman-in-python/hangman.py\n word_as_list = list(self.progress)\n indices = [i for i, letter in enumerate(self.word) if letter == guess] # noqa\n for index in indices:\n word_as_list[index] = guess\n self.progress = \"\".join(word_as_list)\n if \"-\" not in self.progress:\n print(f'{Fore.GREEN}Congrats! You correctly guessed the answer: {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.games_won += 1\n break\n\n elif guess.isalpha() and guess == self.word:\n print(f'{Fore.GREEN}Congrats! You correctly guessed the answer: {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.games_won += 1\n break\n\n elif guess.isalpha() and guess not in self.word and guess in self.guessed_words: # noqa\n print(f'You already guessed {guess}, try again')\n print('\\n')\n\n elif guess.isalpha() and guess not in self.word and guess not in self.guessed_words: # noqa\n print(f'{Fore.RED}{guess} is not the word, try again{Style.RESET_ALL}') # noqa\n print('\\n')\n self.stage += 1\n self.guessed_words.append(guess)\n print('\\n')\n else:\n print('Invalid input \\n')\n if self.stage >= 6:\n print(Fore.CYAN + HANGMAN_PICS[self.stage])\n print('\\n')\n print(f'{Fore.RED}Game Over! The word was {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.play_again()", "def input_guess(guess):\n global turn_count\n type(guess)\n turn_count -= 1\n if int(guess) == secret_number:\n print (\"\\n\" + \"You guessed \" + str(guess))\n print (\"THAT'S IT! Dang, you got me! Good game bud.\")\n new_game()\n elif int(guess) > secret_number and turn_count > 0:\n print (\"\\n\" + \"As if, guess lower!\")\n print (\"You guessed \" + str(guess))\n print (\"Number of guesses left: \" + str(turn_count))\n elif (int(guess) < secret_number) and (turn_count > 0):\n print (\"\\n\" + \"Pitiful attempt, guess higher!\")\n print (\"You guessed \" + str(guess))\n print (\"Number of guesses left: \" + str(turn_count))\n elif turn_count == 0 and int(guess) != secret_number:\n print (\"\\n\" + \"You're out of guesses! Game over, loser!\")\n print (\"You guessed \" + str(guess))\n print (\"The number was \" + str(secret_number))\n new_game()", "def main():\n print(\"\\tWelcome to 'Guess My Number (Edit)'!\")\n print(\"\\nI'm thinking of a number between 1 and 100.\")\n print(\"Try to guess it in as few attempts as possible.\\n\")\n\n # set the initial values\n the_number = random.randint(1, 100)\n guess = ask_number(\"Take a guess: \", 1, 100)\n tries = 10\n\n # guessing loop\n while guess != the_number:\n tries -= 1\n if tries <= 0:\n break\n if guess > the_number:\n print(\"Lower...\")\n else:\n print(\"Higher...\")\n guess = ask_number(\"Take a guess: \", 1, 100)\n\n if guess == the_number:\n print(\"You guessed it! The number was\", the_number)\n print(\"And it only took you\", tries, \"tries!\\n\")\n\n elif tries <= 0:\n print(\"\\nSorry, you're out of tries. Better luck next time!.\")", "def process_player_input(self,guess):\r\n # Step 1 - Catch faulty input, this is not topic of week 2\r\n\r\n # Tell the player the secret number :-)\r\n if (guess == \"Cheat\"):\r\n return \"Secret number = %d\" % (self.secret_number)\r\n \r\n # Step 2 - Verify player's input.\r\n user_input = self.verify_input(guess, self.num_range)\r\n if (type(user_input) != type(0)):\r\n # Verify_input() detected faulty input\r\n # Let's leave here with the error message\r\n return user_input\r\n\r\n # Decrease the number of still available tries\r\n if (self.remaining_guesses>0):\r\n self.remaining_guesses -= 1\r\n print \"Remaining number of tries = \", self.remaining_guesses\r\n \r\n # Step 3 - Give the player a hint for next guess\r\n if ((user_input > self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Lower!\"\r\n elif ((user_input < self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Higher!\"\r\n elif (user_input == self.secret_number):\r\n result_message = self.correctguess_message\r\n else:\r\n # As the guess was wrong and there is no further try anymore,\r\n # tell the player that he/she lost\r\n result_message = \"You tried too often than necessary, You lost!\"\r\n return result_message", "def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()", "def new_round(guesses, letters_guessed = letters_guessed):\n\n # print(get_guessed_word(secret_word, letters_guessed) )\n print(\"You have \" + str(guesses) + \" guesses left.\")\n print(\"Available letters: \" + get_available_letters(letters_guessed))\n ans = input(\"Please guess a letter: \")\n if ans.isalpha():\n return ans.lower()\n else:\n return None", "def user_guess():\n return list(input(\"What is your guess?\"))", "def guess_the_number():\n\n print(\"Welcome to no.guessing game . You have 10 trials . Good luck\")\n global player\n print(f\"Player{player}'s turn : \")\n\n a = int(input(\"Enter the starting of the range:\\n\"))\n b = int(input(\"Enter the ending of the range:\\n\"))\n from random import randint\n # Generates a random number between the given range\n random_number = randint(a, b)\n global trials\n while trials <= 10:\n\n n = int(input(\"Guess a number:\\n\")) # User's number\n\n if n > random_number:\n print(\"Wrong ! Please enter a lesser number:\")\n\n elif n < random_number:\n print(\"Wrong! Please enter a greater number:\")\n else:\n print(\"Yeah ! you won \")\n print(F\"player{player} won the game in {trials} no. of trials\")\n break\n print(f\"{10-trials} no. of trials left\")\n trials += 1\n if trials>10:\n print(f\"GAME OVER! the number was {random_number}\")\n # creating player 1's and player 2's points in the global scope\n if player == 1:\n global player_point1\n player_point1 = trials\n\n else:\n global player_point2\n player_point2 = trials", "def play_hangman(self) -> None: \n tries=6\n current_word=self.get_word()\n guessed_word = False\n word_hidden_states = [current_word[indx] for indx in sample(range(0, len(current_word)-1), randint(1, len(current_word)-2))]\n word_completion_state = [letter if letter not in word_hidden_states else \"_\" for letter in current_word]\n\n while tries > 0 and not guessed_word: \n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n self.display_state(tries,word_completion_state)\n guessed_char=str(input(\"Guess a Character : \")).upper()\n\n if guessed_char in word_hidden_states :\n print(\"\\nCorrect Guess !!!!!! Updating..........\")\n for indx,_ in enumerate(word_completion_state) : \n if guessed_char == current_word[indx]:\n word_completion_state[indx]=guessed_char\n \n word_hidden_states = [char for char in word_hidden_states if char != guessed_char]\n guessed_word = False if \"_\" in word_completion_state else True\n sleep(5)\n else :\n print(\"\\nIncorrect Guess!!! Updating!!!!!!\")\n sleep(5)\n tries=tries-1\n \n if tries == 0 and not guessed_word:\n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n print(f\"{'-' * 20}HANGMAN{ '-' * 20}\\n\\n\")\n print(self.hangman_states[-1] + \"\\n\")\n print(f\"No Tries Remaining , YOU LOST !!!!!\")\n print(f\"CORRECT WORD was ------> {current_word}\")\n print(f\"GAME OVER\")\n \n if guessed_word:\n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n print(f\"{'-' * 20}HANGMAN{ '-' * 20}\\n\\n\")\n print(self.hangman_states[-tries] + \"\\n\")\n print(f\"YOU GUESSED THE WORD CORRECTLY !!!\")\n print(f\"WORD was ------> {current_word}\")\n print(f\"Congratulations You win\")" ]
[ "0.72188973", "0.70376676", "0.6994092", "0.69199556", "0.69090265", "0.68983454", "0.68386334", "0.68023723", "0.6791833", "0.6778669", "0.67778534", "0.6773469", "0.6772827", "0.6761244", "0.67608136", "0.67598337", "0.6736602", "0.6716704", "0.6715845", "0.66994876", "0.6678569", "0.66380435", "0.66105163", "0.65990955", "0.658078", "0.6569671", "0.656471", "0.6519002", "0.6507482", "0.6478028" ]
0.71829236
1
Initalize instance with given response_dict.
def __init__(self, response_dict={}): self.id = response_dict.get('id') self.name = response_dict.get('name') self.image_url = response_dict.get('imageUrl') self.subtype = response_dict.get('subtype') self.supertype = response_dict.get('supertype') self.ability = response_dict.get('ability') self.hp = response_dict.get('hp') self.retreat_cost = response_dict.get('retreatCost') self.number = response_dict.get('number') self.artist = response_dict.get('artist') self.rarity = response_dict.get('rarity') self.series = response_dict.get('series') self.set = response_dict.get('set') self.set_code = response_dict.get('setCode') self.types = response_dict.get('types') self.attacks = response_dict.get('attacks') self.weaknesses = response_dict.get('weaknesses') self.resistances = response_dict.get('resistances')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, res):\n self.fromResponseObj(res)", "def __init__(self, res):\n self.fromResponseObj(res)", "def __init__(\n self,\n response: dict\n ):\n\n self.__name = read_value(\n \"name\", response, str, True)\n self.__uuid = read_value(\n \"uuid\", response, str, True)\n self.__note = read_value(\n \"note\", response, str, True)\n self.__location = read_value(\n \"location\", response, str, True)\n self.__datacenter_uuid = read_value(\n \"datacenter.uuid\", response, str, True)\n self.__row_uuids = read_value(\n \"rows.uuid\", response, str, False)\n self.__row_count = read_value(\n \"rowCount\", response, int, True)\n self.__rack_count = read_value(\n \"rackCount\", response, int, True)\n self.__host_count = read_value(\n \"hostCount\", response, int, True)", "def __init__(self, response):\n self.response = response\n self.json = response.json()\n self.text = response.text\n try:\n for key in response.json():\n value = response.json()[key]\n setattr(self, key, sanitize(key, value))\n\n except Exception, e:\n # It is possible that json is empty and throws: TypeError: 'NoneType' object is not iterable\n if self._Hoiio.debuglevel > 0:\n print 'Exception: %s' % e\n import traceback\n traceback.print_exc()\n raise HoiioException", "def __init__(self, response):\n self.response = response\n self.object = response['object']\n self.webhook_endpoint_id = response['webhook_endpoint_id']\n self.created_at = response['created_at']\n self.updated_at = response['updated_at']\n self.status = response['status']\n self.url = response['url']\n self.events = response['events']\n self.livemode = response['livemode']\n self.secret = response['secret']", "def __init__(self, status_code, response_obj):\r\n self._status_code = status_code\r\n self._response_error = \"\"\r\n self._original_response = response_obj\r\n \r\n if type(response_obj) is bytes:\r\n response_obj = response_obj.decode(\"utf-8\", errors='ignore')\r\n\r\n if type(response_obj) is str:\r\n try:\r\n self._response = json.loads(response_obj)\r\n except:\r\n self._response = response_obj\r\n elif type(response_obj) is dict:\r\n try:\r\n self._response = json.loads(json.dumps(response_obj))\r\n except:\r\n self._response = response_obj\r\n else:\r\n self._response = response_obj", "def __init__(self, response_class, *args, **kwargs):\n self._response_class = response_class\n self._args = args\n self._kwargs = kwargs", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)", "def _from_dict(cls, _dict):\n return cls.from_dict(_dict)" ]
[ "0.73315465", "0.73315465", "0.7124996", "0.70806485", "0.703705", "0.69967353", "0.6830665", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124", "0.68262124" ]
0.747926
0
Return a specific Card given an id.
def find(id): return QueryBuilder(Card).find(id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCard(self,id):\n if not self.cardExists(id):\n return None\n return self.cards[id]", "def retrieve(customer, card_id):\n if isinstance(customer, resources.Customer):\n customer = customer.id\n\n http_client = HttpClient()\n response, __ = http_client.get(routes.url(routes.CARD_RESOURCE, resource_id=card_id, customer_id=customer))\n return resources.Card(**response)", "def card(self, card_id):\r\n return Card(self, card_id)", "def get_card(self, name):\n for card in self.cards:\n if card.name == name:\n return card\n\n return None", "def getCard(id):\n r = requests.get('https://api.scryfall.com/cards/multiverse/' + str(id))\n data = r.json()\n try:\n name = data['name']\n except KeyError:\n name = ''\n try:\n url = data['image_uris']['normal']\n except KeyError:\n url = ''\n return name, url", "async def retrieve(self, profile_id):\n profile = await self.get(self.profile_load.format(profile_id))\n log(\"retrieved card for {}\".format(profile['title']))\n return profile", "def card(self, cardid: int, season: str) -> Card:\n return Card(self, cardid, season)", "def get_card_by_name(self,name):\n try:\n card_id = self._category2id['name'][name].values()\n except KeyError:\n print \"No card by given name! [{}]\".format(name)\n return None\n\n if len(card_id) > 1:\n print \"Multiple cards match name, returning first...\"\n\n return self._id2database[card_id[0]]", "def card(self, card_id_or_shortlink):\r\n return Card(self, card_id_or_shortlink)", "def draw(self, id):\n card = self.deck.pop()\n self.players[id].add_card(card)\n return card", "def get_by_id(cls, id):\n return cls.query().get(id)", "def get_by_id(cls, id):\n e = api.get([key.Key(cls.__name__, id)])\n if e:\n return cls.from_entity(e[0])\n raise ObjectDoesNotExist", "def get_by_id(c_id):\n return cr.get_by_id(c_id)", "def getNextCard(deckId):\n deckOfCards = getCardsForDeck(deckId)\n card = deckOfCards.order_by('?')[0]\n return card", "def get_card (self, card):\n\t\treturn self._card", "def hand(self, id):\n return self.players[id].cards", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def get_object(self, id_):\n return self._objects.get(id_, None)", "def get(cls, id):\n\n return cls.query.get(id)", "def get(cls, id):\n\n return cls.query.get(id)", "def find(self, id):\n return self._select_one('''\n select\n *\n from\n {table}\n where\n {primary_key} = %s\n '''.format(table=self.__class__._table,\n primary_key=self.__class__._primary_key), [id])", "def get_by_id(self, id):\n accts = [acct for acct in self.accounts if UUID(acct.uuid) == UUID(id)]\n assert len(accts) <= 1\n if len(accts) == 0:\n raise KeyError('account with id {} unknown'.format(id))\n elif len(accts) > 1:\n log.warning('multiple accounts with same UUID found', uuid=id)\n return accts[0]", "def deleteCard(self,id):\n if not self.cardExists(id):\n print(\"Card \"+id+\" doesn't exist\")\n return\n del self.cards[id]", "def createCard(self,id,name):\n card = Card(id,name)\n self.cards[id] = card\n print('Created Card:'+id)", "def find(self, id):\n response = self._connection.session.get(self.url + \"/%s\" % id)\n return self._raise_or_return_json(response)", "def find(cls, id=None):\n return cls.query.filter_by(id=id).one_or_none()", "def get_card(name, reda):\n connection = pymongo.MongoClient(MONGO_URL)\n db = connection[DB]\n\n dbcard = db.cards.find_one({'name': name, 'redaction': reda})\n return tocard(dbcard) if dbcard is not None else None", "def get_card(self, suit, face):\n for card in self.deck:\n if card.suit == suit and card.value == face:\n return card", "def get_customer_by_id(cid: int) -> Optional[Customer]:\n return get_market().get_customer(cid)", "def get_card(self, repo, card_name):\n # This goes through manage.py because, it requires a check that the\n # user actually has repo access.\n card = Card.objects.get(\n repo_base=self.repo_base, repo_name=repo, card_name=card_name)\n if not card.public:\n DataHubManager.has_repo_file_privilege(\n self.username, self.repo_base, repo, 'read')\n\n card = Card.objects.get(\n repo_base=self.repo_base, repo_name=repo, card_name=card_name)\n\n return card" ]
[ "0.8715357", "0.73507446", "0.72734964", "0.69763416", "0.69386464", "0.6935953", "0.6860235", "0.680616", "0.6607529", "0.6569944", "0.6448182", "0.64085186", "0.6315493", "0.6297896", "0.6294089", "0.62614655", "0.62189525", "0.62170476", "0.6211605", "0.6211605", "0.6190433", "0.6179802", "0.61526245", "0.6130748", "0.6123358", "0.6111804", "0.61089814", "0.6105841", "0.6100202", "0.6093484" ]
0.8268551
1
Returns the GPS week, day, seconds and microseconds since the beginning of the GPS week
def utctoweekseconds(utc, leapseconds=18): datetimeformat = "%Y-%m-%d %H:%M:%S" epoch = datetime.datetime.strptime("1980-01-06 00:00:00", datetimeformat) tdiff = utc - epoch + datetime.timedelta(seconds=leapseconds) gpsweek = tdiff.days // 7 gpsdays = tdiff.days - 7 * gpsweek gpsseconds = tdiff.seconds + 86400 * (tdiff.days - 7 * gpsweek) return gpsweek, gpsdays, gpsseconds, tdiff.microseconds
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_week_date(self, raw_week: str) -> tuple:\n\n search_result = re.search(r'^(\\d+.\\d+)\\s+-\\s+\\d+.\\d+', raw_week)\n\n if \"from\" in raw_week:\n week = re.sub(r'^\\D+', '', raw_week)\n\n elif search_result:\n week = search_result.group(1)\n else:\n week = \"{}.{}\".format(current_day, current_month)\n\n week_in_date_format_1900 = datetime.datetime.strptime(week, \"%d.%m\")\n currect_week = week_in_date_format_1900.replace(current_year)\n\n return currect_week.isoformat(), currect_week.isocalendar()[1]", "def UTCFromGps(gpsWeek, SOW, leapSecs=18): \n\tsecFract = SOW % 1 \n\tepochTuple = gpsEpoch + (-1, -1, 0) \n\tt0 = time.mktime(epochTuple) - time.timezone #mktime is localtime, correct for UTC \n\ttdiff = (gpsWeek * secsInWeek) + SOW - leapSecs \n\tt = t0 + tdiff \n\t(year, month, day, hh, mm, ss, dayOfWeek, julianDay, daylightsaving) = time.gmtime(t) \n\t#use gmtime since localtime does not allow to switch off daylighsavings correction!!! \n\treturn (year, month, day, hh, mm, ss + secFract)", "def ms_localtime(warnme = True) :\r\n\r\n global _ts_last\r\n \r\n # we just need some standard way to represent local time with ms precision\r\n # ( and by a 32-bit integer, but this can wait until 2036 )\r\n\r\n # ms s m h \r\n # ms_one_day = 1000 * 60 * 60 * 24 \r\n # current_time = math.floor( time.time() * 1000 ) % ms_one_day \r\n\r\n ## ms_current_time = int( math.floor( time.time() * 1000 ) ) \r\n ## return ms_current_time \r\n\r\n # debug ( check the byte order )\r\n # return 1 \r\n \r\n ## seconds_in_a_day = 60 * 60 * 24 # 86400 \r\n ## day_time_ms = int( math.floor( ( time.time() % seconds_in_a_day ) * 1000 ) ) \r\n ## 2^32 = 4294967296\r\n modulo = 1000000 \r\n # modulo = 10 # tests \r\n ms_remainder = int( math.floor( ( time.time() % modulo ) * 1000 ) ) \r\n\r\n if warnme and ( ms_remainder < _ts_last ) : \r\n\r\n raise Eggog( \"internal 32-bit counter passed through zero, please resynchronize ( call .synch() once again )\" ) \r\n\r\n _ts_last = ms_remainder\r\n \r\n return ms_remainder # finish the recording before midnight ( and start after 00:00 ) \r", "def current_week(self):\n\n if not self.iso_equal() and self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 2\n if not self.iso_equal() or self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 1 \n return self.time_stamp_iso[1]", "def CONST_WEEK_TIMESTAMP() -> int:\n return 604800", "def from_gps_time(self):\n reason = \"[!] GPS timestamps are 10 digits\"\n ts_type = self.ts_types['gpstime']\n try:\n if not len(self.gps) == 10 or not self.gps.isdigit():\n self.in_gpstime = indiv_output = combined_output = False\n pass\n else:\n leapseconds = self.leapseconds\n gps_stamp = self.epoch_1980 + timedelta(seconds=(float(self.gps)))\n tai_convert = gps_stamp + timedelta(seconds=19)\n epoch_convert = (tai_convert - self.epoch_1970).total_seconds()\n check_date = dt.utcfromtimestamp(epoch_convert)\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n gps_out = check_date - timedelta(seconds=variance)\n self.in_gpstime = gps_out.strftime('%Y-%m-%d %H:%M:%S.%f')\n indiv_output = str(\"{} {}\".format(ts_type, self.in_gpstime))\n combined_output = str(\"{}{}\\t\\t\\t{} UTC{}\".format(self.left_color, ts_type, self.in_gpstime, self.right_color))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.in_gpstime = indiv_output = combined_output = False\n return self.in_gpstime, indiv_output, combined_output, reason", "def get_nightly_start_time():\n return 14 # 2PM local Tucson time", "def get_week_frame():\n now = datetime.now()\n\n week_start = now - timedelta(days=now.weekday(),\n hours=now.hour,\n minutes=now.minute,\n seconds=now.second)\n week_end = now + timedelta(days=6 - now.weekday(),\n hours=23 - now.hour,\n minutes=59 - now.minute,\n seconds=59 - now.second)\n\n return week_start, week_end", "def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))", "def get_datetime_fields():\n wochentag = datetime.datetime.today().weekday()\n now = datetime.datetime.now()\n\n if now.hour > 14:\n ist_schulzeit = 0\n elif now.hour < 8:\n ist_schulzeit = 0\n else:\n ist_schulzeit = 1\n\n return wochentag, ist_schulzeit", "def get_time(self):\n start=''\n end=''\n time=''\n times=self.times\n print(times[self.istep])\n if self.istep > 0:\n start=ncEarth.beginstr % times[self.istep].isoformat()\n\n\n if self.istep < len(times)-2:\n end = ncEarth.endstr % times[self.istep+1].isoformat()\n\n if start is not '' or end is not '':\n time=ncEarth.timestr % {'begin':start,'end':end}\n\n return time", "def timestamp(self):\n return parse_windows_timestamp(self.unpack_qword(0x4))", "def aquarius_timestamp(arg):\n\n meta = readMetadata(arg)\n\n sat_name = meta['Sensor'].lower()\n stime = meta['Start Time'][0:13]\n etime = meta['End Time'][0:13]\n\n return (stime,\n etime,\n sat_name)", "def GetMonotime():\n return float(open(PROC_UPTIME).read().split()[0])", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def getTimeStamps():\n\n # Initialize\n results = dict()\n\n # UT time\n ut = utils.getUT(pointing=True).split()\n results['utday'] = ut[0]\n results['ut'] = float(ut[1])\n\n # year/month/day/second\n utStamp = time.gmtime()\n utHour = maybeAddAZero(utStamp[3])\n utMin = maybeAddAZero(utStamp[4])\n utSec = maybeAddAZero(utStamp[5])\n results['timeLab'] = ''.join([commands.yearMonthDay(),'_',utHour,utMin,utSec])\n\n # Done\n return results", "def gps_data():\n gpsd = gps(mode=WATCH_ENABLE|WATCH_NEWSTYLE)\n lat = 0\n lon = 0\n satellites = 0\n tpv = False\n sky = False\n while True:\n nx = gpsd.next()\n if nx['class'] == 'TPV':\n lat = getattr(nx, 'lat', 0)\n lon = getattr(nx, 'lon', 0)\n tpv = True\n elif nx['class'] == 'SKY':\n satellites = len(nx['satellites'])\n sky = True\n if sky and tpv:\n return satellites, lat, lon", "def reporting_week(self):\n\n print(\"Week Numbers:\")\n print(self.time_stamp)\n print(self.time_stamp_iso)\n print(\"Current = {}\".format(self.current_week()))\n print(\"Reporting = {}\".format(self.current_week() - 1))", "def _first_presence_time(self) -> float:\n return self.population.presence_interval().boundaries()[0][0]", "def get_system_date_and_time(self):\n return self.mycam.devicemgmt.GetSystemDateAndTime()", "def get_times(now, then):\n total = then - now\n days = total.days\n weeks = total.days // 7\n bus_days = np.busday_count(now, then)\n return weeks, days, bus_days, total", "def get_reltriggertimes(self):\n return np.array(self.trtimes)-self.soundstarttime", "def _unit_wk(self):\n return ((self.time_base * 60.0) * 24.0) * 7", "def temperatures():\n\n return station_9281", "def reorganized_timestamp(self):\n return parse_windows_timestamp(self.unpack_qword(0xA8))", "def unit_wk(self):\n return ((self.time_base * 60.0) * 24.0) * 7", "def output(self):\n if self.after_sunrise:\n return \"%02d:%02d:%02dR\" % self.time\n if self.after_sunset:\n return \"%02d:%02d:%02dT\" % self.time\n return \"%02d:%02d:%02d\" % self.time", "def weekly():", "def to_gps_time(self):\n ts_type = self.ts_types['gpstime']\n try:\n leapseconds = self.leapseconds\n check_date = duparser.parse(self.timestamp)\n if hasattr(check_date.tzinfo, '_offset'):\n dt_tz = check_date.tzinfo._offset.total_seconds()\n check_date = duparser.parse(self.timestamp, ignoretz=True)\n else:\n dt_tz = 0\n for entry in leapseconds:\n check = self.date_range(leapseconds.get(entry)[0], leapseconds.get(entry)[1], check_date)\n if check is True:\n variance = entry\n else:\n variance = 0\n leap_correction = check_date + timedelta(seconds=variance)\n epoch_shift = leap_correction - self.epoch_1970\n gps_stamp = (dt.utcfromtimestamp(epoch_shift.total_seconds()) - self.epoch_1980).total_seconds() - 19\n gps_stamp = int(gps_stamp) - int(dt_tz)\n self.out_gpstime = str(gps_stamp)\n ts_output = str(\"{}\\t\\t\\t{}\".format(ts_type, self.out_gpstime))\n except Exception:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n print(str(exc_type) + \" - \" + str(exc_obj) + \" - line \" + str(exc_tb.tb_lineno))\n self.out_gpstime = ts_output = False\n return self.out_gpstime, ts_output", "def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var" ]
[ "0.59134704", "0.59009886", "0.5853655", "0.5782736", "0.5747621", "0.57289743", "0.5699698", "0.56796503", "0.5656576", "0.56065714", "0.55873585", "0.55524087", "0.5505642", "0.5439676", "0.5432224", "0.5425123", "0.5401933", "0.5364168", "0.53482115", "0.5335548", "0.5329257", "0.53035176", "0.5302344", "0.5293522", "0.52855045", "0.52817434", "0.52738357", "0.52562195", "0.5236623", "0.52326626" ]
0.60639554
0
Unregister accessors or rtypes.
def unregister(self, rtypes=None, accessors=None): if rtypes is not None: for rtype in rtypes: del self[rtype] if accessors is not None: for accessor in accessors: for rtype in accessor.__rtypes__: if rtype in self: del self[rtype]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unregister ():\n dsf_prop_export.unregister ()\n dsf_geom_export.unregister ()\n dsf_wm_import.unregister ()\n dsf_pose_import.unregister ()\n dsf_arm_import.unregister ()\n dsf_uvset_import.unregister ()\n dsf_morph_export.unregister ()\n dsf_morph_import.unregister ()\n dsf_geom_import.unregister ()", "def unregister():\n STAC_IO.read_text_method = STAC_IO.default_read_text_method\n STAC_IO.write_text_method = STAC_IO.default_write_text_method", "def unregister(self):\r\n self._unregister()", "def XPLMUnregisterDataAccessor(inDataRef):", "def unregister(self, alias):\n delattr(self, alias)", "def unregister(self, name):\r\n raise NotImplementedError", "def unregister(self, name: str, opset: OpsetVersion) -> None:\n if name not in self._registry:\n return\n self._registry[name].remove_custom(opset)", "def unregister(self) -> None:\n actions_registry.unregister(self)", "def unregister():\n for name in _registered_ops:\n try:\n torch.onnx.unregister_custom_op_symbolic(name, _OPSET_VERSION)\n except AttributeError:\n # The symbolic_registry module was removed in PyTorch 1.13.\n # We are importing it here for backwards compatibility\n # because unregister_custom_op_symbolic is not available before PyTorch 1.12\n from torch.onnx import symbolic_registry\n\n namespace, kind = name.split(\"::\")\n for version in symbolic_helper._onnx_stable_opsets:\n if version >= _OPSET_VERSION and symbolic_registry.is_registered_op(kind, namespace, version):\n del symbolic_registry._registry[(namespace, version)][kind]", "def deregister_refinement(self, name):\n self.refined_types.pop(name, None)\n self.cython_c2py_conv.pop(name, None)\n self.cython_py2c_conv.pop(name, None)\n self.cython_cimports.pop(name, None)\n self.cython_cyimports.pop(name, None)\n self.cython_pyimports.pop(name, None)\n self.clearmemo()", "def tear_down_registry(registry):\n for reg_adp in list(registry.registeredAdapters()):\n registry.unregisterAdapter(factory=reg_adp.factory,\n required=reg_adp.required,\n provided=reg_adp.provided,\n name=reg_adp.name)\n for reg_ut in list(registry.registeredUtilities()):\n registry.unregisterUtility(component=reg_ut.component,\n provided=reg_ut.provided,\n name=reg_ut.name)", "def removeType(self, name):\n delattr(self, name)\n try:\n del self._type_names[name]\n except ValueError:\n pass", "def unregister(self):\n idaapi.unregister_action(self.get_name())", "def deregister_specialization(self, t):\n t = self.canon(t)\n self.cython_ctypes.pop(t, None)\n self.cython_cytypes.pop(t, None)\n self.cython_pytypes.pop(t, None)\n self.cython_cimports.pop(t, None)\n self.cython_cyimports.pop(t, None)\n self.cython_pyimports.pop(t, None)\n self.clearmemo()", "def unregister(self, path, type):\n if not type in ('js', 'css'):\n raise ValueError('Only js or css types are supported.')\n if type == 'js':\n if path in self.js_extensions:\n JS_EXTENSIONS.remove(path)\n else:\n if path in self.css_extensions:\n CSS_EXTENSIONS.remove(path)", "def remove_feature_accessors(obj, feats: FeaturesTuple):\n for feat in feats:\n try:\n delattr(obj, feat.get_name())\n\n except AttributeError:\n pass", "def unregister(self):\r\n self.__screen.unregister_asteroid(self)", "def __delattr__(self, name):\n self.unset(name)", "def _unregister(self):\r\n if hasattr(self, '_registered') and self._registered:\r\n self._conn.unregisterInterface(self._iTag, self)\r\n self._registered = False", "def __delattr__(self, name):\n del self[name]", "def on_deregistered(self):\r\n self.unpossessed()\r\n\r\n # Register type information\r\n cls = self.__class__\r\n\r\n subclass_cache = cls._of_subclass_cache\r\n type_cache = cls._of_type_cache\r\n\r\n # Uncache subtypes\r\n for base_cls in cls.__mro__:\r\n instances = subclass_cache[base_cls]\r\n instances.remove(self)\r\n\r\n if not instances:\r\n subclass_cache.pop(base_cls)\r\n\r\n # Uncache the type\r\n instances = type_cache[cls]\r\n instances.remove(self)\r\n\r\n if not instances:\r\n type_cache.pop(cls)\r\n\r\n ReplicableUnregisteredSignal.invoke(target=self)\r\n\r\n super().on_deregistered()", "def del_typecheck(self, name: str):\n try:\n del self.__custom_types[name]\n except KeyError:\n pass", "def __delattr__(cls, name):\n raise TypeError('May not delete attributes on definition class')", "def unregister_resource(resource):\n del _name_to_resources[resource.name]\n del _name_to_resources[resource.name_plural]\n del _class_to_resources[resource.__class__]", "def remove_type(self, name):\n del self.types[name]", "def unregister(self, old):\n raise NotImplementedError", "def unregister(self, old):\n for item in self.items:\n item.unregister(old)", "def unregister(self) -> None:\n for child in self._children:\n child.unregister()\n\n actions_registry.unregister(self)", "def unregisterMetaChanged(self, function):\n self._sig_changed.unsubscribe(function)", "def deregister_class(self, name):\n isbase = name in self.base_types\n if not isbase and name not in self.template_types:\n _raise_type_error(name)\n if isbase:\n self.base_types.remove(name)\n else:\n self.template_types.pop(name, None)\n\n self.cython_ctypes.pop(name, None)\n self.cython_cytypes.pop(name, None)\n self.cython_pytypes.pop(name, None)\n self.from_pytypes.pop(name, None)\n self.cpp_types.pop(name, None)\n self.humannames.pop(name, None)\n self.cython_cimports.pop(name, None)\n self.cython_cyimports.pop(name, None)\n self.cython_pyimports.pop(name, None)\n\n self.cython_c2py_conv.pop(name, None)\n self.cython_py2c_conv.pop(name, None)\n self.cython_classnames.pop(name, None)\n\n self.clearmemo()" ]
[ "0.69912624", "0.6978314", "0.675342", "0.64715624", "0.6418582", "0.63744724", "0.63308245", "0.6324244", "0.62827414", "0.6219619", "0.6187397", "0.6147885", "0.61440146", "0.6115181", "0.6083284", "0.5992793", "0.5981351", "0.59576404", "0.59505045", "0.59396935", "0.5923208", "0.59082854", "0.5876142", "0.58609426", "0.5847481", "0.5792764", "0.5792227", "0.5779344", "0.57718515", "0.57674867" ]
0.83660007
0
Create the Attribute instance and create AttributeValue(s) associated with it
def create(self, validated_data): # Create the Attribute instance attribute = Attribute.objects.create( name=validated_data['name'] ) # Create each AttributeValue instance for item in validated_data.get('values', []): AttributeValue.objects.create( name=item['name'], value=item['value'], attribute=attribute) return attribute
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createAttribute(nid, label, primary, list, x, y):\n attribute = Attribute(nid, label, primary, x, y)\n list.append(attribute)", "def create_attribute(owner_name, att_name, context=ast.Load(), line=0, column=0):\n attribute = ast.Attribute()\n attribute.attr = att_name\n attribute.ctx = context\n attribute.lineno = line\n attribute.col_offset = column\n\n if isinstance(owner_name, str):\n attribute_name = ast.Name()\n attribute_name.ctx = ast.Load()\n attribute_name.id = owner_name\n attribute_name.lineno = line\n attribute_name.col_offset = column\n\n attribute.value = attribute_name\n else:\n attribute.value = owner_name\n\n return attribute", "def _handle_attr(self, attr, dev):\n attr_val = None\n list_flag = False\n\n if attr.name == \"os\":\n attr_val = self.OS_MAPPER[attr.val]\n elif attr.name == \"network\":\n attr_val = self._create_network(attr.val)\n list_flag = True\n elif attr.name == \"bluetooth\":\n attr_val = Bluetooth(version=attr.val.version)\n elif attr.name == \"cpu\":\n attr_val = CPU(cpu_family=attr.val.cpu_family,\n max_freq=float(attr.val.max_freq\n * self.FREQ_MULT[attr.val.unit]),\n fpu=attr.val.fpu)\n elif attr.name == \"memory\":\n attr_val = self._create_memory(attr.val)\n elif attr.name == \"type\":\n self._per_type = self.PER_MAPPER[attr.val]\n elif attr.name == \"pins\":\n list_flag = True\n attr_val = self._create_pins(attr.val)\n else:\n attr_val = attr.val\n\n # Set attribute\n if list_flag:\n getattr(dev, attr.name).extend(attr_val)\n elif attr_val:\n setattr(dev, attr.name, attr_val)", "def _create_attribute(self,\n identifier,\n idl_type,\n is_readonly=False,\n extended_attributes=None,\n node=None):\n if isinstance(idl_type, str):\n idl_type = self._create_type(idl_type)\n if isinstance(extended_attributes, dict):\n extended_attributes = self._create_extended_attributes(\n extended_attributes)\n debug_info = self._build_debug_info(node) if node else None\n\n return Attribute.IR(\n identifier,\n idl_type=idl_type,\n is_readonly=is_readonly,\n extended_attributes=extended_attributes,\n component=self._component,\n debug_info=debug_info)", "def test_create_att_object():\n from .scripts.initializedb import create_att_object\n att_object = create_att_object(\"a\", \"b\", \"c\", \"d\", \"c\")\n assert isinstance(att_object, Attribute)", "def set_attr_values(self):\n ats = self.attributes # convenient short name\n for aid in ats:\n value = ats[aid]['nv'] if 'nv' in ats[aid] else (\n ats[aid]['value'] if 'value' in ats[aid] else None)\n if value is not None:\n# self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)\n #- self.file.h5save_attribute(self.full_path, aid, value)\n #- self.file.h5commands.append(\"set attribute(%s:%s)-%s\" % (self.full_path,\n #- aid, value))", "def update(self, instance, validated_data):\n\n # Update the Attribute instance\n instance.name = validated_data.get('name', instance.name)\n instance.save()\n\n # If there is no supplied values then do nothing with it\n if validated_data.get('values'):\n # Delete any AttributeValue not included in the request\n value_ids = [item.get('id') for item in validated_data['values']]\n for value in instance.values.all():\n if value.id not in value_ids:\n value.delete()\n\n # Create or update AttributeValue instances that are in the request\n for item in validated_data['values']:\n value = AttributeValue(\n id=item.get('id'),\n name=item['name'],\n value=item['value'],\n attribute=instance)\n value.save()\n\n return instance", "def _build_attributes(self):\n\n # We might rebuild the program because of snippets but we must\n # keep already bound attributes\n\n dtype = []\n for (name,gtype) in self.all_attributes:\n if name not in self._attributes.keys():\n attribute = Attribute(self, name, gtype)\n else:\n attribute = self._attributes[name]\n\n self._attributes[name] = attribute\n dtype.append(attribute.dtype)", "def add_attribute(self, attribute_name, attribute_value):\n self.attributes[attribute_name] = attribute_value", "def _create(self, data):\n model = self.model\n data = self._check_odoo_attribute(data)\n binding = model.create(data)\n self._create_attribute_option(binding, data)\n _logger.debug('%s %d created from magento %s',\n self.model._name, binding.id, self.magento_id)\n return binding", "async def create_attribute(\n attribute: AttributeIn, \n session: AsyncSession = Depends(get_session)\n ):\n result = await insert_attribute(session=session, attribute=attribute)\n return result", "def attributes(self, *args):\n kwargs = {}\n if args:\n kwargs[\"attributenames\"] = args\n\n r = self._token_id_request(urljoin(self._url, Client._attribute_resource), **kwargs)\n\n # parse contennt looking for all attributes\n attributes = []\n for line in r.text.splitlines():\n r = re.match(\"(userdetails\\.attribute\\.name)=(.*)\", line)\n if r:\n name = r.groups()[1]\n attributes.append([name, None])\n continue # next line\n\n r = re.match(\"(userdetails\\.attribute\\.value)=(.*)\", line)\n if r:\n value = r.groups()[1]\n # last name parsed is where it has to\n # be stacked\n if attributes[-1][1] == None:\n attributes[-1][1] = value\n if isinstance(attributes[-1][1], list):\n attributes[-1][1].append(value)\n else:\n # cast to list\n attributes[-1].append([attributes[-1][1], value])\n\n return dict([(item[0], item[1]) for item in attributes])", "def __init__(self, **initial_attributes):\n\n for attribute_name, attribute_value in initial_attributes.items():\n setattr(self, attribute_name, attribute_value)", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def make_attributes(kwargs: Dict[str, Any]) -> List:\n\n def _make_attribute(name: str, value: any):\n attribute = {'AttributeName': name}\n if isinstance(value, str):\n attribute['Value'] = {ValueTypes.StringValue.name: value}\n elif isinstance(value, bytes):\n attribute['Value'] = {ValueTypes.BinaryValue.name: value}\n elif isinstance(value, bool):\n attribute['Value'] = {ValueTypes.BooleanValue.name: value}\n elif isinstance(value, int):\n attribute['Value'] = {ValueTypes.NumberValue.name: str(value)}\n # int to str is required by cloud directory\n elif isinstance(value, datetime):\n attribute['Value'] = {ValueTypes.DatetimeValue.name: value}\n else:\n raise ValueError()\n return attribute\n\n return [_make_attribute(name, value) for name, value in kwargs.items()]", "def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)", "def add(\n self,\n key,\n value,\n category=None,\n lockstring=\"\",\n strattr=False,\n accessing_obj=None,\n default_access=True,\n ):\n if accessing_obj and not self.obj.access(\n accessing_obj, self._attrcreate, default=default_access\n ):\n # check create access\n return\n\n if not key:\n return\n\n category = category.strip().lower() if category is not None else None\n keystr = key.strip().lower()\n attr_obj = self._getcache(key, category)\n\n if attr_obj:\n # update an existing attribute object\n attr_obj = attr_obj[0]\n if strattr:\n # store as a simple string (will not notify OOB handlers)\n attr_obj.db_strvalue = value\n attr_obj.save(update_fields=[\"db_strvalue\"])\n else:\n # store normally (this will also notify OOB handlers)\n attr_obj.value = value\n else:\n # create a new Attribute (no OOB handlers can be notified)\n kwargs = {\n \"db_key\": keystr,\n \"db_category\": category,\n \"db_model\": self._model,\n \"db_attrtype\": self._attrtype,\n \"db_value\": None if strattr else to_pickle(value),\n \"db_strvalue\": value if strattr else None,\n }\n new_attr = Attribute(**kwargs)\n new_attr.save()\n getattr(self.obj, self._m2m_fieldname).add(new_attr)\n # update cache\n self._setcache(keystr, category, new_attr)", "def save_object(self, data):\n return Attribute(**data)", "def __init__(self, attribute_name=None, attribute_value=None, attribute_price=None): # noqa: E501 # noqa: E501\n\n self._attribute_name = None\n self._attribute_value = None\n self._attribute_price = None\n self.discriminator = None\n\n if attribute_name is not None:\n self.attribute_name = attribute_name\n if attribute_value is not None:\n self.attribute_value = attribute_value\n if attribute_price is not None:\n self.attribute_price = attribute_price", "def _serialize_attr_value(self, value):\n if hasattr(value, 'deconstruct'):\n assert callable(value.deconstruct)\n\n attr_type_path, attr_args, attr_kwargs = value.deconstruct()\n\n value = {\n 'type': attr_type_path,\n 'args': [\n self._deconstruct_attr_value(arg_value)\n for arg_value in attr_args\n ],\n 'kwargs': {\n key: self._deconstruct_attr_value(arg_value)\n for key, arg_value in attr_kwargs\n },\n '_deconstructed': True,\n }\n\n return value", "def define_attribute(self, name, atype, data=None):\n self.attributes.append(name)\n self.attribute_types[name] = atype\n self.attribute_data[name] = data", "def create(self, validated_data):\n\n # Create the Attribute instance\n product = Product(\n name=validated_data['name'],\n product_template=validated_data['product_template']['id'],\n description=validated_data.get('description'),\n active=validated_data.get('active', False)\n )\n if validated_data['min_price'].get('amount'):\n product.min_price = Money(\n amount=validated_data['min_price'].get('amount', Decimal(0.0)),\n currency=validated_data.get('min_price_currency', settings.DEFAULT_CURRENCY),\n )\n product.save()\n\n # Create each ConnectedProductAttribute instance associated with it\n for item in validated_data.get('attributes', []):\n ConnectedProductAttribute.objects.create(\n product=product,\n connection=item['connection'],\n value=item['value']\n )\n # # Fully working, commented for make structure easier\n # # by allowing variants created only at one location\n # # Create each Variant instance associated with it\n # for item in validated_data.get('variants', []):\n # variant = ProductVariant(\n # name=item['name'],\n # product=product,\n # active=item.get('active', False)\n # )\n # if item.get('price'):\n # if item['price'].get('amount'):\n # variant.price = Money(\n # amount=item['price'].get('amount', Decimal(0.0)),\n # currency=item.get('price_currency', settings.DEFAULT_CURRENCY),\n # )\n # variant.save()\n\n return product", "def _validate_value(\n cls,\n attribute: models.Attribute,\n value_data: dict,\n is_swatch_attr: bool,\n ):\n value = value_data.get(\"name\")\n if value is None:\n raise ValidationError(\n {\n cls.ATTRIBUTE_VALUES_FIELD: ValidationError(\n \"The name field is required.\",\n code=AttributeErrorCode.REQUIRED.value,\n )\n }\n )\n\n if is_swatch_attr:\n cls.validate_swatch_attr_value(value_data)\n else:\n cls.validate_non_swatch_attr_value(value_data)\n\n slug_value = value\n value_data[\"slug\"] = slugify(unidecode(slug_value))\n\n attribute_value = models.AttributeValue(**value_data, attribute=attribute)\n try:\n attribute_value.full_clean()\n except ValidationError as validation_errors:\n for field, err in validation_errors.error_dict.items():\n if field == \"attribute\":\n continue\n errors = []\n for error in err:\n error.code = AttributeErrorCode.INVALID.value\n errors.append(error)\n raise ValidationError({cls.ATTRIBUTE_VALUES_FIELD: errors})", "def build_attributes(\n cls,\n attributes: Dict[str, Any],\n namespace: ConfigNamespace\n ) -> Dict[str, Any]:\n config_path = attributes.get('config_path')\n tokens = {}\n\n def build_config_key(value_def: ValueTypeDefinition, config_key: str) -> str:\n key = value_def.config_key or config_key\n return f\"{config_path}.{key}\" if config_path else key\n\n def build_token(\n name: str,\n value_def: ValueTypeDefinition\n ) -> Tuple[str, property]:\n config_key = build_config_key(value_def, name)\n value_token = ValueToken.from_definition(\n value_def, namespace, config_key)\n getters.register_value_proxy(namespace, value_token, value_def.help)\n tokens[name] = value_token\n return name, build_property(value_token)\n\n def build_attr(name: str, attribute: Any) -> Tuple[str, property]:\n if not isinstance(attribute, ValueTypeDefinition):\n return name, attribute\n return build_token(name, attribute)\n\n attributes = dict(build_attr(*item)\n for item in attributes.items())\n attributes['_tokens'] = tokens\n return attributes", "def _setAttribute(self, attribute, value):\n\n # if multiple values found\n if hasattr(self, attribute):\n\n # make sure attribute is a list\n values = getattr(self, attribute)\n if not isinstance(values, list):\n setattr(self, attribute, [values])\n\n # append value to list\n getattr(self, attribute).append(value)\n\n # single value found\n else:\n setattr(self, attribute, value)", "def __init__(self, events={}, attributes={}):\n self.events = events.copy()\n self.attributes = attributes.copy()\n if not AT.VALUE_STRATEGY in self.attributes:\n self.attributes[AT.VALUE_STRATEGY] = ValueStrategy.PRESET", "def batch_add(self, *args, **kwargs):\n new_attrobjs = []\n strattr = kwargs.get(\"strattr\", False)\n for tup in args:\n if not is_iter(tup) or len(tup) < 2:\n raise RuntimeError(\"batch_add requires iterables as arguments (got %r).\" % tup)\n ntup = len(tup)\n keystr = str(tup[0]).strip().lower()\n new_value = tup[1]\n category = str(tup[2]).strip().lower() if ntup > 2 and tup[2] is not None else None\n lockstring = tup[3] if ntup > 3 else \"\"\n\n attr_objs = self._getcache(keystr, category)\n\n if attr_objs:\n attr_obj = attr_objs[0]\n # update an existing attribute object\n attr_obj.db_category = category\n attr_obj.db_lock_storage = lockstring or \"\"\n attr_obj.save(update_fields=[\"db_category\", \"db_lock_storage\"])\n if strattr:\n # store as a simple string (will not notify OOB handlers)\n attr_obj.db_strvalue = new_value\n attr_obj.save(update_fields=[\"db_strvalue\"])\n else:\n # store normally (this will also notify OOB handlers)\n attr_obj.value = new_value\n else:\n # create a new Attribute (no OOB handlers can be notified)\n kwargs = {\n \"db_key\": keystr,\n \"db_category\": category,\n \"db_model\": self._model,\n \"db_attrtype\": self._attrtype,\n \"db_value\": None if strattr else to_pickle(new_value),\n \"db_strvalue\": new_value if strattr else None,\n \"db_lock_storage\": lockstring or \"\",\n }\n new_attr = Attribute(**kwargs)\n new_attr.save()\n new_attrobjs.append(new_attr)\n self._setcache(keystr, category, new_attr)\n if new_attrobjs:\n # Add new objects to m2m field all at once\n getattr(self.obj, self._m2m_fieldname).add(*new_attrobjs)", "def addattribute(self, uid, field, value):\n\n raise NotImplementedError" ]
[ "0.64116377", "0.6013851", "0.59971005", "0.5967344", "0.5908096", "0.58857423", "0.5882443", "0.58695245", "0.5827147", "0.57940096", "0.5792889", "0.5781367", "0.57775974", "0.5776246", "0.5776246", "0.5776246", "0.5768085", "0.5757633", "0.5725134", "0.5722951", "0.5719026", "0.5667984", "0.5663626", "0.5656581", "0.56430763", "0.5625784", "0.5612913", "0.55979323", "0.5542676", "0.5542415" ]
0.78820187
0
Update the Attribute instance and update/create AttributeValue(s) associated with it and delete unwanted AttributeValue(s).
def update(self, instance, validated_data): # Update the Attribute instance instance.name = validated_data.get('name', instance.name) instance.save() # If there is no supplied values then do nothing with it if validated_data.get('values'): # Delete any AttributeValue not included in the request value_ids = [item.get('id') for item in validated_data['values']] for value in instance.values.all(): if value.id not in value_ids: value.delete() # Create or update AttributeValue instances that are in the request for item in validated_data['values']: value = AttributeValue( id=item.get('id'), name=item['name'], value=item['value'], attribute=instance) value.save() return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, instance, validated_data):\n\n # Update the Attribute instance\n instance.name = validated_data.get('name', instance.name)\n instance.save()\n\n # Update AttributeProduct\n self.update_attributes(instance, validated_data,\n field_name=\"attribute_product\",\n attr_model=AttributeProduct,\n attr_all=instance.attribute_product.all())\n\n # Update AttributeVariant\n self.update_attributes(instance, validated_data,\n field_name=\"attribute_variant\",\n attr_model=AttributeVariant,\n attr_all=instance.attribute_variant.all())\n\n return instance", "def merge_attrs(self):\n for aid in self.attrs:\n new_val = self.attrs[aid]\n if aid in self.attributes:\n if ('value' in self.attributes[aid] and\n self.attributes[aid]['value'] != new_val):\n pass\n # print \"Updating attribute %s[%s] %s -> %s\" % (\n # self.name, aid, self.attributes[aid]['value'], new_val)\n else:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, new_val)\n self.remember_custom_attribute(self.name, aid, new_val)\n self.attributes[aid] = {}\n self.attributes[aid]['nv'] = new_val", "def test_update_data_type_of_attrvalue(self):\n user = User.objects.create(username=\"hoge\")\n\n entity = Entity.objects.create(name=\"entity\", created_user=user)\n entity.attrs.add(\n EntityAttr.objects.create(\n **{\n \"name\": \"attr\",\n \"type\": AttrTypeValue[\"string\"],\n \"created_user\": user,\n \"parent_entity\": entity,\n }\n )\n )\n\n entry = Entry.objects.create(name=\"entry\", schema=entity, created_user=user)\n entry.complement_attrs(user)\n\n attrv = entry.attrs.first().add_value(user, \"hoge\")\n\n # vanish data_type of initial AttributeValue instance\n attrv.data_type = 0\n attrv.save()\n\n # this processing complements data_type parameter of latest AttributeValue\n # as the current type of Attribute instance\n results = entry.get_available_attrs(self._user)\n self.assertEqual(len(results), 1)\n self.assertEqual(results[0][\"last_value\"], \"\")\n self.assertEqual(AttributeValue.objects.get(id=attrv.id).data_type, AttrTypeValue[\"string\"])", "def _save_attrs(self) -> None:\n for attr_id in self.attr_ids:\n orig_label = self.attr_ids[attr_id]\n attr_label = self.attr_labels[attr_id].GetValue()\n attr_value = self.attr_values[attr_id].GetValue()\n if attr_label == '':\n continue\n if orig_label != attr_label and orig_label != '':\n self.element.attr.pop(orig_label)\n self.attr_ids[attr_id] = attr_label\n if attr_label not in self.element.attr \\\n or self.element.attr[attr_label] != attr_value:\n self.element.attr[attr_label] = attr_value", "def update(self, attributes):\n for key in attributes:\n k = key.lower()\n if not isinstance(attributes[key], str) or attributes[key] != '':\n k_ = k.strip(' =:\\t\\n').replace('', '')\n self.attributes.update({k_: attributes[key]})\n elif k in self.attributes:\n del self.attributes[k]", "def update_attribute(self, attr, value):\n old_val = self.attrs[attr]\n if not value_is_equal(old_val, value):\n self.old_attrs[attr] = old_val\n self.attrs[attr] = value\n self.set_changed()\n old_type = type(old_val)\n new_type = type(value)\n if new_type != old_type:\n old_is_null = old_val is sqlapi.NULL\n both_are_strings = isinstance(old_val, basestring) and isinstance(value, basestring)\n if not old_is_null and not both_are_strings:\n old_type = (u\"%s\" % old_type)[1:-1]\n new_type = (u\"%s\" % new_type)[1:-1]\n misc.cdblogv(misc.kLogErr, 0, \"BOMEntry.update_attribute: unexpected data type for attribute '%s': expected '%s', got '%s'\" % (attr, old_type, new_type))", "def update(self, **values):\r\n if self.__abstract__:\r\n raise ThunderdomeException('cant update abstract elements')\r\n self.pre_update(**values)\r\n for key in values.keys():\r\n if key not in self._columns:\r\n raise TypeError(\"unrecognized attribute name: '{}'\".format(key))\r\n\r\n for k,v in values.items():\r\n setattr(self, k, v)\r\n\r\n return self.save()", "def test_update_attribute_data(self):\n pass", "def modify_attribute(self, attribute, value):\r\n return self.connection.modify_instance_attribute(self.id, attribute,\r\n value)", "def set_attr_values(self):\n ats = self.attributes # convenient short name\n for aid in ats:\n value = ats[aid]['nv'] if 'nv' in ats[aid] else (\n ats[aid]['value'] if 'value' in ats[aid] else None)\n if value is not None:\n# self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)\n #- self.file.h5save_attribute(self.full_path, aid, value)\n #- self.file.h5commands.append(\"set attribute(%s:%s)-%s\" % (self.full_path,\n #- aid, value))", "def update(self):\n self.attributes = self.call('UPDATE', expect=error.OK, body=self.attributes)", "def do_update(self, arg):\n obj = self.verify(arg, 1)\n if obj:\n args = arg.split(\" \")\n if len(args) < 3:\n print(\"** attribute name missing **\")\n return\n if len(args) < 4:\n print(\"** value missing **\")\n return\n setattr(obj, args[2], args[3])\n obj.save()", "def _attr_updated(self, name, value):\n event = AttributeUpdateEvent(self, name, value)\n events.notify(event)", "def update_attribute(self, attribute_name, attribute_value, strict=True):\n\n if (attribute_name in self._default_attrs) or (not strict):\n print('Setting attribute \"{}\" to \"{}\"'.format(attribute_name, attribute_value))\n self._attrs[attribute_name] = attribute_value\n else:\n raise(Exception('{} is not a valid attribute.'.format(attribute_name)))", "def _save_attrs(self) -> None:\n for attr_req_id in self.attr_req_ids:\n orig_label = self.attr_req_ids[attr_req_id]\n attr_req_label = self.attr_req_labels[attr_req_id].GetValue()\n attr_req_element = self.attr_requirements[self.element][orig_label]\n if attr_req_label == '':\n continue\n if orig_label != attr_req_label and orig_label != '':\n self.attr_requirements[self.element].pop(orig_label)\n self.attr_req_ids[attr_req_id] = attr_req_label\n if attr_req_label not in self.attr_requirements[self.element] \\\n or self.attr_requirements[self.element][\n attr_req_label] != attr_req_element:\n self.attr_requirements[self.element][\n attr_req_label] = attr_req_element", "def update(self, **kwargs):\n for key, value in kwargs.items():\n key = key.upper()\n if not hasattr(self, key):\n self.logger.info(f'[✗] Ignore unknown attribute \"{key}\"')\n else:\n setattr(self, key, value)\n self.logger.info(f'[✓] Attribute \"{key}\" has been updated to \"{value}\"')\n\n assert self.UI in self._SUPPORT_UI, 'unsupported UI'\n assert self.MODE in self._SUPPORT_MODE, 'unsupported MODE'", "def modify_instance_attribute(self, instance_id, attribute, value):\r\n # Allow a bool to be passed in for value of disableApiTermination\r\n if attribute == 'disableApiTermination':\r\n if isinstance(value, bool):\r\n if value:\r\n value = 'true'\r\n else:\r\n value = 'false'\r\n params = {'InstanceId' : instance_id,\r\n 'Attribute' : attribute,\r\n 'Value' : value}\r\n return self.get_status('ModifyInstanceAttribute', params, verb='POST')", "def update_attributes(self, key, value):\n if key not in self.attr:\n raise KeyError(f'{key} is not defined in location attributes.')\n else:\n self.attr[key] = value", "def SetAttributes(self, attr):\r\n \r\n if self._ownsAttr:\r\n del self._attr\r\n \r\n self._attr = attr\r\n self._ownsAttr = False", "def do_update(self, *args):\n if len(args) == 1:\n args = [ele for ele in args[0].split(' ')]\n if args[0] == '':\n print(\"** class name missing **\")\n return\n if args[0] not in self.list_classes:\n print(\"** class doesn't exist **\")\n return\n if len(args) < 2:\n print(\"** instance id missing **\")\n return\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return\n elif len(args) < 4:\n print(\"** value missing **\")\n return\n\n storage.reload()\n dict_objs = storage.all()\n if dict_objs is None or dict_objs == []:\n print(\"** no instance found **\")\n return\n\n key = \"{}.{}\".format(args[0], args[1])\n if key in dict_objs.keys():\n obj = dict_objs[key]\n if args[2] in obj.__class__.__dict__:\n obj.__dict__[args[2]] =\\\n type(obj.__class__.__dict__[args[2]])(args[3])\n else:\n obj.__dict__[args[2]] = args[3]\n storage.save()\n else:\n print(\"** no instance found **\")", "def _update(self, binding, data):\n self._validate_data(data)\n if not data.get('name',False):\n data['name'] = data.get('frontend_label',False) or 'No Label'\n if not data.get('create_variant',False):\n data['create_variant'] = data.get('is_configurable',False)\n binding.write(data)\n self._create_attribute_option(binding, data)\n _logger.debug('%d updated from magento %s', binding.id, self.magento_id)\n return", "def do_update(self, line):\n if line:\n args = shlex.split(line)\n if len(args) < 2:\n print(\"** instance id missing **\")\n return False\n elif len(args) < 3:\n print(\"** attribute name missing **\")\n return False\n elif len(args) == 3:\n print(\"** value missing **\")\n return False\n else:\n obj_name, obj_id, obj_attr, obj_value = args\n obj_repr = \"{}.{}\".format(obj_name, obj_id)\n data = FileStorage()\n data.reload()\n data_loaded = data.all()\n for key, value in data_loaded.items():\n if key == obj_repr:\n obj = eval(obj_name)(**value.to_dict())\n if obj_name in obj.__dict__.keys():\n obj[obj_name] = obj_value\n else:\n setattr(obj, obj_attr, obj_value)\n d = {}\n for s_key, s_value in data_loaded.items():\n d[s_key] = s_value.to_dict()\n with open(data.path(), mode='w', encoding=\"utf-8\") as file:\n file.write(json.dumps(d))\n break\n else:\n print(\"** class doesn't exist **\")\n else:\n print(\"** class name missing **\")", "def attribute_dict(self, attribute_dict):\n self.__attribute_dict.update(attribute_dict)", "def batch_add(self, *args, **kwargs):\n new_attrobjs = []\n strattr = kwargs.get(\"strattr\", False)\n for tup in args:\n if not is_iter(tup) or len(tup) < 2:\n raise RuntimeError(\"batch_add requires iterables as arguments (got %r).\" % tup)\n ntup = len(tup)\n keystr = str(tup[0]).strip().lower()\n new_value = tup[1]\n category = str(tup[2]).strip().lower() if ntup > 2 and tup[2] is not None else None\n lockstring = tup[3] if ntup > 3 else \"\"\n\n attr_objs = self._getcache(keystr, category)\n\n if attr_objs:\n attr_obj = attr_objs[0]\n # update an existing attribute object\n attr_obj.db_category = category\n attr_obj.db_lock_storage = lockstring or \"\"\n attr_obj.save(update_fields=[\"db_category\", \"db_lock_storage\"])\n if strattr:\n # store as a simple string (will not notify OOB handlers)\n attr_obj.db_strvalue = new_value\n attr_obj.save(update_fields=[\"db_strvalue\"])\n else:\n # store normally (this will also notify OOB handlers)\n attr_obj.value = new_value\n else:\n # create a new Attribute (no OOB handlers can be notified)\n kwargs = {\n \"db_key\": keystr,\n \"db_category\": category,\n \"db_model\": self._model,\n \"db_attrtype\": self._attrtype,\n \"db_value\": None if strattr else to_pickle(new_value),\n \"db_strvalue\": new_value if strattr else None,\n \"db_lock_storage\": lockstring or \"\",\n }\n new_attr = Attribute(**kwargs)\n new_attr.save()\n new_attrobjs.append(new_attr)\n self._setcache(keystr, category, new_attr)\n if new_attrobjs:\n # Add new objects to m2m field all at once\n getattr(self.obj, self._m2m_fieldname).add(*new_attrobjs)", "def update_attributes_with_dict(self, attribute_dict):\n\n for attribute_name in attribute_dict.keys():\n \n self.update_attribute(attribute_name, attribute_dict[attribute_name])", "def set_from_dict(self, attribute_dict):\n\n # Iterate through each attribute / value pair in the dictionary.\n for attr, value in attribute_dict.items():\n\n # Get the value currently in self.attr. Use None if this is not a\n # current attribute of self.\n try:\n old_value = getattr(self, attr)\n except AttributeError:\n old_value = None\n\n # Uncertainty values from the GUI will either be None or Decimals.\n # We want to prevent overwriting \"NaN\" with None.\n if (value is None and is_empty(old_value)):\n continue\n\n # Update self.\n setattr(self, attr, value)\n\n # If no value is provided, set to default.\n if self.value is None:\n self.value = self.default", "def update(self, new_data):\n all_keys = [key for key in self.__dict__]\n keys = [key for key in new_data]\n for key in keys:\n if key in all_keys:\n setattr(self, key, new_data[key])\n else:\n return {\n \"message\": \"Error encountered when setting attributes.\",\n \"help\": \"Ensure all fields you're updating are valid.\"\n }\n self.save()", "def update(self, instance, validated_data):\n instance.key = validated_data.get('key', instance.key)\n instance.value = validated_data.get('value', instance.value)\n instance.category = validated_data.get('category', instance.category)\n instance.car = validated_data.get('car', instance.car)\n\n instance.save()\n return instance", "def update(self, verbose=True):\n params = flask.request.json\n for key, value in params.items():\n\n if value == 'None':\n value = None\n\n if key == 'updated_on':\n value = datetime.now()\n\n if key in self.data_model.keys():\n\n # unfuck javascript-style date strings\n if self.data_model[key] == datetime:\n if isinstance(value, datetime):\n pass\n elif isinstance(value, dict):\n value = datetime.fromtimestamp(value['$date'] / 1000.0)\n else:\n try:\n value = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.000Z')\n except ValueError:\n try:\n value = datetime.strptime(value, '%Y-%m-%d %H:%M:%S')\n except Exception as e:\n raise\n except TypeError as e:\n err_msg = \"Incoming attribute '%s' must be datetime!\"\n self.logger.error(err_msg % key)\n self.logger.error(\"Got value '%s' instead...\" % value)\n raise Exception(err_msg % key)\n\n # set the self.attribute values\n setattr(self, key, value)\n\n self.save(verbose)", "def update(self) -> None:\n self._api.update()\n if self.available:\n self._attr_native_value = self._api.data[self.entity_description.key]\n else:\n self._attr_native_value = None" ]
[ "0.6762108", "0.6644434", "0.6610771", "0.6478294", "0.6431708", "0.64186126", "0.62374663", "0.6134519", "0.6097815", "0.6054426", "0.6005432", "0.59968334", "0.59843415", "0.59825784", "0.59301513", "0.5895596", "0.58702755", "0.5737867", "0.57373", "0.5722033", "0.5675206", "0.5674201", "0.5673847", "0.5660752", "0.5653698", "0.5643954", "0.5642714", "0.5636299", "0.5633459", "0.561623" ]
0.7772718
0
Create the ProductTemplate instance and create association with attribute(s) which connect to its Products and Varriants associated with ProductTemplate itself
def create(self, validated_data): # Create the Attribute instance # Creates an instance regardless errors happen later template = ProductTemplate.objects.create( name=validated_data['name'] ) # Create each AttributeProduct instance if validated_data.get('attribute_product'): for item in validated_data['attribute_product']: AttributeProduct.objects.create( attribute=Attribute(item['attribute']['id']), product_template=template ) # Create each AttributeProduct instance if validated_data.get('attribute_variant'): for item in validated_data['attribute_variant']: AttributeVariant.objects.create( attribute=Attribute(item['attribute']['id']), product_template=template ) return template
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_product(self):\n product = self.product_obj.create({\n \"default_code\": 'A2330',\n \"product_tmpl_id\":\n self.ref(\"product.product_product_4_product_template\"),\n \"attribute_value_ids\": [(6, 0, [\n self.ref('product.product_attribute_value_1'),\n self.ref('product_lifecycle.product_attribute_value_6'),\n self.ref('product.product_attribute_value_5')])],\n \"replacement_product_ids\": [(\n 6, 0, [self.ref('product_lifecycle.product_product_4e')]\n )]})\n return product", "def create_product_from_parameters(self, cr, uid, vals, context=None):\n if 'tmpl_id' not in vals:\n raise osv.except_osv('Warning', _('Wrong data'))\n \n product_obj = self.pool.get('product.product')\n multi_type_obj = self.pool.get('product.variant.dimension.type')\n multi_option_obj = self.pool.get('product.variant.dimension.option')\n multi_value_obj = self.pool.get('product.variant.dimension.value')\n \n tmpl_id = int(vals['tmpl_id'])\n multi_fields = {}\n for key, val in vals.items():\n if key.startswith('product_tmpl_id_'):\n dim_key = int(key[16:])\n opt_key = val\n # Create new option, if it not exists\n if hasattr(opt_key, 'startswith') and opt_key.startswith('new_'):\n opt_val = opt_key[4:]\n opt_key = multi_option_obj.create(cr, uid, {\n 'name': opt_val,\n 'code': opt_val,\n 'dimension_id': dim_key,\n })\n else:\n opt_key = int(opt_key)\n \n multi_fields[dim_key] = opt_key\n \n template = self.pool.get('product.template').browse(cr, uid, tmpl_id, context)\n if not template:\n raise osv.except_osv('Warning', _('Wrong template'))\n \n product_name = template.name\n parameters_values = ''\n multi_value_ids = []\n if multi_fields:\n # Set options to template, if it not exists in it\n for dim, opt in multi_fields.items():\n tmps = multi_value_obj.search(cr, uid, [('dimension_id', '=', dim), ('option_id', '=', opt), ('product_tmpl_id', '=', tmpl_id)])\n if not tmps:\n multi_value_obj.create(cr, uid, {\n 'product_tmpl_id': tmpl_id,\n 'dimension_id': dim,\n 'option_id': opt,\n })\n \n product = product_obj.multi_search_one(cr, uid, tmpl_id, multi_fields)\n if len(product) == 1:\n p = product_obj.browse(cr, uid, product[0])\n msg = u'Продукт \"%s\" был использован' % p.product_name\n self.log(cr, uid, product[0], msg)\n return product[0]\n elif len(product) > 1:\n raise osv.except_osv('Error', 'Количество таких продуктов > 1')\n \n for key, val in multi_fields.items():\n key = int(key)\n val = int(val)\n multi_value_id = multi_value_obj.search(cr, uid, [('dimension_id', '=', key), ('option_id', '=', val), ('product_tmpl_id', '=', tmpl_id)], context=context)\n if not multi_value_id:\n raise osv.except_osv('Warning', _('Wrong dimension'))\n else:\n multi_value_ids.append(multi_value_id[0])\n \n type = multi_type_obj.browse(cr, uid, key)\n type_desc = type.description or type.name\n type_name = type.name\n option = multi_option_obj.browse(cr, uid, val)\n option_code = option.code or option.name\n option_name = option.name\n \n product_name += ' %s - %s ' % (type_desc, option_code)\n \n product_id = product_obj.create(cr, uid, {\n 'product_tmpl_id': tmpl_id, \n 'product_name': product_name,\n }, context=context)\n if not product_id:\n raise osv.except_osv('Warning', _('Error in creating product'))\n else:\n product_name = self.pool.get('product.product').browse(cr, uid, product_id).product_name\n msg = _(u'Продукт \"%s\" был создан') % product_name\n self.log(cr, uid, product_id, msg)\n \n for val in multi_value_ids:\n query = \"\"\" INSERT INTO product_product_dimension_rel \n (dimension_id, product_id) \n VALUES\n (%s, %s)\"\"\" % (val, product_id)\n cr.execute(query)\n \n return product_id", "def create(self, validated_data):\n\n # Create the Attribute instance\n product = Product(\n name=validated_data['name'],\n product_template=validated_data['product_template']['id'],\n description=validated_data.get('description'),\n active=validated_data.get('active', False)\n )\n if validated_data['min_price'].get('amount'):\n product.min_price = Money(\n amount=validated_data['min_price'].get('amount', Decimal(0.0)),\n currency=validated_data.get('min_price_currency', settings.DEFAULT_CURRENCY),\n )\n product.save()\n\n # Create each ConnectedProductAttribute instance associated with it\n for item in validated_data.get('attributes', []):\n ConnectedProductAttribute.objects.create(\n product=product,\n connection=item['connection'],\n value=item['value']\n )\n # # Fully working, commented for make structure easier\n # # by allowing variants created only at one location\n # # Create each Variant instance associated with it\n # for item in validated_data.get('variants', []):\n # variant = ProductVariant(\n # name=item['name'],\n # product=product,\n # active=item.get('active', False)\n # )\n # if item.get('price'):\n # if item['price'].get('amount'):\n # variant.price = Money(\n # amount=item['price'].get('amount', Decimal(0.0)),\n # currency=item.get('price_currency', settings.DEFAULT_CURRENCY),\n # )\n # variant.save()\n\n return product", "def test_0010_product_template_import(self):\n with Transaction().start(DB_NAME, USER, context=CONTEXT) as txn:\n # Call method to setup defaults\n self.setup_defaults()\n\n with txn.set_context(\n current_channel=self.channel.id, ps_test=True,\n ):\n self.setup_channels()\n\n self.assertEqual(len(self.ProductTemplate.search([])), 1)\n self.assertEqual(len(self.TemplatePrestashop.search([\n ('channel', '=', self.channel.id)\n ])), 0)\n self.assertEqual(len(self.Product.search([])), 1)\n self.assertEqual(len(self.ProductPrestashop.search([\n ('channel', '=', self.channel.id)\n ])), 0)\n\n product_data = get_objectified_xml('products', 1)\n template = self.ProductTemplate.find_or_create_using_ps_data(\n product_data\n )\n # This should create a template and two variants where one\n # is created by template and other by this combination\n self.assertEqual(len(self.ProductTemplate.search([])), 2)\n self.assertEqual(len(self.TemplatePrestashop.search([\n ('channel', '=', self.channel.id)\n ])), 1)\n self.assertEqual(len(self.Product.search([])), 2)\n self.assertEqual(len(self.ProductPrestashop.search([\n ('channel', '=', self.channel.id)\n ])), 1)\n\n # Product name should be in english and french\n with txn.set_context(language='en_US'):\n template = self.ProductTemplate(template.id)\n self.assertEqual(\n template.name, 'iPod Nano'\n )\n with txn.set_context(language='fr_FR'):\n template = self.ProductTemplate(template.id)\n self.assertEqual(\n template.name, 'iPod Nano French'\n )\n\n # Product description should be in english only\n with txn.set_context(language='en_US'):\n product_desc_en = self.Product(\n template.products[0].id\n ).description\n with txn.set_context(language='fr_FR'):\n product_desc_fr = self.Product(\n template.products[0].id\n ).description\n self.assertEqual(product_desc_en, product_desc_fr)\n\n # Nothing should be created under site_alt\n self.assertEqual(len(self.TemplatePrestashop.search([\n ('channel', '=', self.alt_channel.id)\n ])), 0)\n self.assertEqual(len(self.ProductPrestashop.search([\n ('channel', '=', self.alt_channel.id)\n ])), 0)\n\n # Get template using prestashop data\n self.assertEqual(\n template.id,\n self.ProductTemplate.get_template_using_ps_data(\n product_data\n ).id\n )\n\n # Get template using prestashop ID\n self.assertEqual(\n template.id,\n self.ProductTemplate.get_template_using_ps_id(1).id\n )\n\n # Try creating the same product again, it should NOT create a\n # new one and blow with user error due to sql constraint\n self.assertRaises(\n UserError,\n self.ProductTemplate.create_using_ps_data, product_data\n )", "def create(cls, vlist):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n Product = Pool().get('product.product')\n\n templates = super(Template, cls).create(vlist)\n products = []\n for template in templates:\n products.extend([Product(p) for p in template.products])\n IndexBacklog.create_from_records(products)\n return templates", "def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wcapi = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n # var_url = ''\n price = 0.0\n if variant.variant_id:\n info = {'id':variant.variant_id}\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku':variant.default_code, 'weight':str(weight),\n \"manage_stock\":variant.woo_is_manage_stock})\n else:\n attributes = \\\n self.get_product_attribute(template.product_tmpl_id, instance, common_log_id,\n model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0,\n partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price':str(price), 'sale_price':str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku':variant.default_code,\n \"manage_stock\":variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price':str(price), 'sale_price':str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'update':woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % (res.content)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % (template.name))\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\":attributes})\n res = wcapi.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'create':variants_to_create})\n try:\n response = res.json()\n except Exception as e:\n message = \"Json Error : While update products to WooCommerce for instance %s.\" \\\n \" \\n%s\" % (instance.name, e)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n else:\n variant_id = product.get(\"id\")\n sku = product.get(\"sku\")\n variant = template.woo_product_ids.filtered(lambda x:x.default_code == sku)\n if variant:\n variant.write({\"variant_id\":variant_id, \"exported_in_woo\":True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag", "def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wc_api = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n price = 0.0\n if variant.variant_id:\n info = {'id': variant.variant_id, 'menu_order': variant.sequence}\n # Below are used to set the color in the metadata field.\n product_template_attribute_value = variant.product_id.product_template_attribute_value_ids.filtered(\n lambda attribute: attribute.display_type == 'color') or False\n if product_template_attribute_value and product_template_attribute_value.product_attribute_value_id.html_color:\n meta_data = []\n meta_data.append({'key': 'markersnpens-color-picker',\n 'value': product_template_attribute_value.product_attribute_value_id.html_color})\n info.update({'meta_data': meta_data})\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku': variant.default_code, 'weight': str(weight),\n \"manage_stock\": variant.woo_is_manage_stock})\n else:\n attributes = self.get_product_attribute(template.product_tmpl_id, instance, common_log_id, model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0, partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price': str(price), 'sale_price': str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku': variant.default_code, \"manage_stock\": variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price': str(price), 'sale_price': str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'update': woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % res.content\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % template.name)\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\": attributes})\n res = wc_api.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'create': variants_to_create})\n try:\n response = res.json()\n except Exception as error:\n message = \"Json Error : While update products to WooCommerce for instance %s. \\n%s\" % (\n instance.name, error)\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n else:\n variant_id = product.get(\"id\")\n variant = template.woo_product_ids.filtered(lambda x: x.default_code == product.get(\"sku\"))\n if variant:\n variant.write({\"variant_id\": variant_id, \"exported_in_woo\": True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag", "def test_product_create(self):\n self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])", "def export_direct_in_shopify(self, product_templates):\n shopify_template_id = False\n sequence = 0\n\n shopify_templates = shopify_template_obj = self.env[\"shopify.product.template.ept\"]\n shopify_product_obj = self.env[\"shopify.product.product.ept\"]\n shopify_product_image_obj = self.env[\"shopify.product.image.ept\"]\n\n variants = product_templates.product_variant_ids\n shopify_instance = self.shopify_instance_id\n\n for variant in variants:\n if not variant.default_code:\n continue\n product_template = variant.product_tmpl_id\n shopify_template = shopify_template_obj.search([\n (\"shopify_instance_id\", \"=\", shopify_instance.id),\n (\"product_tmpl_id\", \"=\", product_template.id)])\n\n if not shopify_template:\n shopify_product_template_vals = (\n {\"product_tmpl_id\":product_template.id,\n \"shopify_instance_id\":shopify_instance.id,\n \"shopify_product_category\": product_template.categ_id.id,\n \"name\": product_template.name,\n \"description\": variant.description,\n \"exported_in_shopify\": False,\n \"website_published\":False\n })\n shopify_template = shopify_template_obj.create(shopify_product_template_vals)\n sequence = 1\n shopify_template_id = shopify_template.id\n else:\n if shopify_template_id != shopify_template.id:\n shopify_product_template_vals = (\n {\"product_tmpl_id\": product_template.id,\n \"shopify_instance_id\": shopify_instance.id,\n \"shopify_product_category\": product_template.categ_id.id,\n \"name\": product_template.name,\n \"description\": variant.description\n })\n shopify_template.write(shopify_product_template_vals)\n shopify_template_id = shopify_template.id\n if shopify_template not in shopify_templates:\n shopify_templates += shopify_template\n\n # For adding all odoo images into shopify layer.\n shoify_product_image_list = []\n for odoo_image in product_template.ept_image_ids.filtered(lambda x: not x.product_id):\n shopify_product_image = shopify_product_image_obj.search_read(\n [(\"shopify_template_id\", \"=\", shopify_template_id),\n (\"odoo_image_id\", \"=\", odoo_image.id)], [\"id\"])\n if not shopify_product_image:\n shoify_product_image_list.append({\n \"odoo_image_id\": odoo_image.id,\n \"shopify_template_id\": shopify_template_id\n })\n if shoify_product_image_list:\n shopify_product_image_obj.create(shoify_product_image_list)\n\n if shopify_template and shopify_template.shopify_product_ids and \\\n shopify_template.shopify_product_ids[\n 0].sequence:\n sequence += 1\n\n shopify_variant = shopify_product_obj.search([\n (\"shopify_instance_id\", \"=\", self.shopify_instance_id.id),\n (\"product_id\", \"=\", variant.id),\n (\"shopify_template_id\", \"=\", shopify_template_id)])\n shopify_variant_vals = ({\n \"shopify_instance_id\": shopify_instance.id,\n \"product_id\": variant.id,\n \"shopify_template_id\": shopify_template.id,\n \"default_code\": variant.default_code,\n \"name\": variant.name,\n \"sequence\": sequence\n })\n if not shopify_variant:\n shopify_variant = shopify_product_obj.create(shopify_variant_vals)\n else:\n shopify_variant.write(shopify_variant_vals)\n\n # For adding all odoo images into shopify layer.\n odoo_image = variant.ept_image_ids\n if odoo_image:\n shopify_product_image = shopify_product_image_obj.search_read(\n [(\"shopify_template_id\", \"=\", shopify_template_id),\n (\"shopify_variant_id\", \"=\", shopify_variant.id),\n (\"odoo_image_id\", \"=\", odoo_image[0].id)], [\"id\"])\n if not shopify_product_image:\n shopify_product_image_obj.create({\n \"odoo_image_id\": odoo_image[0].id,\n \"shopify_variant_id\": shopify_variant.id,\n \"shopify_template_id\": shopify_template_id,\n })\n return True", "def prepare_product_data(self, woo_template, publish, update_price,\n update_image, basic_detail, common_log_id, model_id):\n template = woo_template.product_tmpl_id\n instance = woo_template.woo_instance_id\n data = {}\n if basic_detail:\n description = ''\n short_description = ''\n if woo_template.woo_description:\n woo_template_id = woo_template.with_context(lang=instance.woo_lang_id.code)\n description = woo_template_id.woo_description\n\n if woo_template.woo_short_description:\n woo_template_id = woo_template.with_context(lang=instance.woo_lang_id.code)\n short_description = woo_template_id.woo_short_description\n\n weight = self.convert_weight_by_uom(template.weight, instance)\n\n data = {\n 'enable_html_description':True, 'enable_html_short_description':True,\n 'type':'simple', 'name':woo_template.name,\n 'description':description, 'weight':str(weight),\n 'short_description':short_description,\n 'taxable':woo_template.taxable and 'true' or 'false',\n 'shipping_required':'true'\n }\n woo_categ_ids = list(map(int,woo_template.woo_categ_ids.mapped(\"woo_categ_id\")))\n if all(woo_categ_ids):\n categ_ids = [{'id': cat_id} for cat_id in woo_categ_ids]\n data.update({'categories':categ_ids})\n\n woo_tag_ids = list(map(int,woo_template.woo_tag_ids.mapped(\"woo_tag_id\")))\n if all(woo_tag_ids):\n tag_ids = [{'id': tag_id} for tag_id in woo_tag_ids]\n data.update({'tags':tag_ids})\n\n attributes, is_variable = self.get_product_attribute(template, instance, common_log_id,\n model_id)\n if is_variable:\n data.update({'type':'variable'})\n\n if template.attribute_line_ids:\n variations = []\n for variant in woo_template.woo_product_ids:\n variation_data = {}\n product_variant = self.get_variant_data(variant, instance, update_image)\n variation_data.update(product_variant)\n if update_price:\n if data.get('type') == 'simple':\n data.update(self.get_product_price(instance, variant))\n else:\n variation_data.update(self.get_product_price(instance, variant))\n variations.append(variation_data)\n default_att = variations and variations[0].get('attributes') or []\n data.update({\n 'attributes':attributes, 'default_attributes':default_att,\n 'variations':variations\n })\n if data.get('type') == 'simple':\n data.update({'sku':str(variant.default_code),\n \"manage_stock\":variant.woo_is_manage_stock})\n else:\n variant = woo_template.woo_product_ids\n data.update(self.get_variant_data(variant, instance, update_image))\n if update_price:\n data.update(self.get_product_price(instance, variant))\n\n if publish == 'publish':\n data.update({'status':'publish'})\n else:\n data.update({'status':'draft'})\n\n if update_image:\n tmpl_images = []\n tmpl_images += self.get_gallery_images(instance, woo_template, template)\n tmpl_images and data.update({\"images\":tmpl_images})\n return data", "def new_object_data(self):\n self.product_fixture = self.F.ProductFactory.create()\n modifiers = (self.datetime, self.resource_name)\n fields = {\n u\"name\": unicode(\"test_%s_%s\" % modifiers),\n u\"description\": unicode(\"test %s %s\" % modifiers),\n u\"product\": unicode(self.get_detail_url(\n \"product\", self.product_fixture.id)),\n u\"status\": unicode(\"draft\"),\n u\"created_by\": None,\n u\"modified_by\": None,\n u\"modified_on\": self.utcnow.strftime(\"%Y-%m-%d %H:%M:%S\"),\n }\n return fields", "def test_01_product_create(self):\n # Create new product with a replacement product\n product = self.create_product()\n\n # Check recently was created product with default 'In Development'\n # value state and that the replacement was assigned. This case also\n # check the read test.\n self.assertTrue(product)\n self.assertEqual(product.state2, 'draft')\n self.assertTrue(product.replacement_product_ids)\n self.assertEqual(len(product.replacement_product_ids), 1)\n self.assertEqual(product.replacement_product_ids[0].id,\n self.ref('product_lifecycle.product_product_4e'))", "def createProducts2(self): \n shop = self.context\n \n id = \"category\"\n shop.categories.manage_addProduct[\"easyshop.core\"].addCategory(id, title=\"Category\") \n category = shop.categories.get(id)\n \n wftool = getToolByName(self.context, \"portal_workflow\")\n wftool.doActionFor(category, \"publish\")\n\n for i in range(1, 21):\n title = self.createTitle()\n id = title.lower()\n shop.products.manage_addProduct[\"easyshop.core\"].addProduct(id, title=title)\n product = shop.products.get(id)\n\n img = os.path.join(package_home(globals()), '../../tests/test_2.jpg')\n img = open(img)\n \n product.setImage(img)\n\n category.addReference(product, \"categories_products\") \n wftool.doActionFor(product, \"publish\")\n \n self.context.portal_catalog.manage_catalogRebuild()", "def createProduct(self):\n return _libsbml.Model_createProduct(self)", "def setUpTestData(cls):\n Product_type.objects.create(\n name='New_Product', display_name='New Product.')", "def setUp(self):\n super().setUp()\n list_of_product_types = [\n 'default_product_variant',\n 'multiple_product_variants',\n 'ceo_title'\n ]\n self.new_product = eval(f\"get_new_product_with_\" \\\n f\"{list_of_product_types[randint(0, len(list_of_product_types) - 1)]}()\")\n response = ProcessRequest('products.json').send_request(\n 'POST',\n data=self.new_product,\n expected_return_codes=[201],\n )\n self.product_id = response.response['product']['id']", "def setup(self):\n print(\"INIT DATA\")\n\n self.nutella = Product.objects.create(name=\"nutella\", nutriscore=\"e\")", "def create(cls, **kwargs):\n response = cls.get_client().create_product(**kwargs)\n object_details = cls._flatten_object_details(response)\n return cls(**object_details)", "def create_products():\n try:\n # Instantiate the class and separate objects into two lists\n challenge = Challenge()\n # Get all products\n product_base = challenge.get_products(\"product_groups.json\")\n # Divide the products into independent (no parent) and dependent (with parents)\n independent, dependent = challenge.filter_products(product_base)\n if not challenge.save_independent_products(independent):\n Exception(\"Function save_independent_products() couldn't complete\")\n\n if not challenge.save_dependent_products(\n dependent, product_base, len(independent)\n ):\n raise Exception(\"Function save_dependent_products() couldn't complete\")\n\n except Exception as err:\n logging.error(f\"[ERROR] While processing the objects. Traceback: {err}\")\n return False\n else:\n return True", "def post_service_template_create(self, resource_dict):\n pass", "def pre_service_template_create(self, resource_dict):\n pass", "def prepare_template_vals(self, woo_instance, product_response):\n template_info_vals = {\n \"name\":product_response.get(\"name\"),\n \"woo_tmpl_id\":product_response.get(\"id\"),\n \"woo_instance_id\":woo_instance.id,\n \"woo_short_description\":product_response.get(\"short_description\", \"\"),\n \"woo_description\":product_response.get(\"description\", \"\"),\n \"website_published\":True if product_response[\"status\"] == \"publish\" else False,\n \"taxable\":True if product_response[\"tax_status\"] == \"taxable\" else False,\n \"woo_categ_ids\":product_response.get(\"categories\"),\n \"woo_tag_ids\":product_response.get(\"tags\"),\n \"total_variants_in_woo\":len(product_response[\"variations\"]),\n \"woo_product_type\":product_response[\"type\"],\n \"active\":True\n }\n if product_response.get(\"date_created\"):\n template_info_vals.update(\n {\"created_at\":product_response.get(\"date_created\").replace(\"T\", \" \")})\n if product_response.get(\"date_modified\"):\n template_info_vals.update(\n {\"updated_at\":product_response.get(\"date_modified\").replace(\"T\", \" \")})\n return template_info_vals", "def shopify_create_product_data_queue(self, instance, template_ids=''):\n instance.connect_in_shopify()\n only_alphabets = []\n if template_ids:\n # Below one line is used to find only character values from template ids.\n only_alphabets = re.findall(\"[a-zA-Z]+\", template_ids)\n if len(template_ids.split(',')) <= 50:\n # template_ids is a list of all template ids which response did not given by\n # shopify.\n template_ids = list(set(re.findall(re.compile(r\"(\\d+)\"),template_ids)))\n results = shopify.Product().find(ids=','.join(template_ids))\n if results:\n _logger.info('Length of Shopify Products %s import from instance name: %s' % (\n len(results), instance.name))\n template_ids = [template_id.strip() for template_id in template_ids]\n # Below process to identify which id response did not give by Shopify.\n [template_ids.remove(str(result.id)) for result in results if str(result.id) in template_ids]\n else:\n raise Warning(_('Please enter the product template ids 50 or less'))\n else:\n if not instance.shopify_last_date_product_import:\n results = shopify.Product().find(status='active', limit=250)\n if len(results) >= 250:\n results = self.shopify_list_all_products(results)\n #results = self.get_product(results)\n else:\n # updated_at_min =datetime.strptime(pytz.utc.localize(instance.shopify_last_date_product_import).astimezone(\n # pytz.timezone(instance.shopify_store_time_zone[12:] or 'UTC')).strftime(\n # '%Y-%m-%d %H:%M:%S'), \"%Y-%m-%d %H:%M:%S\")\n results = shopify.Product().find(status='active',\n updated_at_min=instance.shopify_last_date_product_import,limit=250) # Change by bhavesh jadav 13/12/2019 limit=250\n if len(results) >= 250:\n results=self.shopify_list_all_products(results)\n if results:\n instance.shopify_last_date_product_import = datetime.now()\n without_gift_card_products = []\n for result in results:\n if result.to_dict().get('variants')[0].get('fulfillment_service') != 'gift_card':\n without_gift_card_products.append(result)\n results = without_gift_card_products\n if not results:\n _logger.info(\n 'No Products found to be imported from Shopify.')\n return False\n _logger.info('Total synced products - {}'.format(len(results)))\n count = 0\n one_time_create = True\n product_queue_list = []\n for result in results:\n if one_time_create:\n product_queue_id = self.shopify_create_product_queue(instance)\n product_queue_list.append(product_queue_id.id)\n _logger.info('Shopify Product Queue created. Queue name is {}'.format(\n product_queue_id.name))\n one_time_create = False\n if template_ids or only_alphabets:\n product_queue_id.message_post(body=\"%s products are not imported\" %(','.join(template_ids+only_alphabets)))\n self.shopify_create_product_data_queue_line(result, instance, product_queue_id)\n count = count + 1\n if count == 100:\n count = 0\n one_time_create = True\n return product_queue_list", "def write(cls, templates, values, *args):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n Product = Pool().get('product.product')\n\n rv = super(Template, cls).write(templates, values, *args)\n\n products = []\n for template in templates:\n products.extend([Product(p) for p in template.products])\n IndexBacklog.create_from_records(products)\n return rv", "def create(self, vals):\n # TDE FIXME: context brol\n tools.image_resize_images(vals)\n if vals[\"no_piece\"]:\n vals[\"order_\"] = self.return_order(vals[\"no_piece\"])\n template = super(product_piece, self).create(vals)\n return template", "def migrate_from_18_8_0(self, globals_dict):\n\n bv2kw = globals_dict['bv2kw']\n products_Product = rt.models.products.Product\n \n @override(globals_dict)\n def create_products_product(id, name, description, category_id, delivery_unit, vat_class, number_of_events, min_asset, sales_account_id, sales_price):\n # if delivery_unit: delivery_unit = settings.SITE.models.products.DeliveryUnits.get_by_value(delivery_unit)\n # if vat_class: vat_class = settings.SITE.models.vat.VatClasses.get_by_value(vat_class)\n if sales_price is not None: sales_price = Decimal(sales_price)\n kw = dict()\n kw.update(id=id)\n if name is not None: kw.update(bv2kw('name',name))\n if description is not None: kw.update(bv2kw('description',description))\n kw.update(category_id=category_id)\n kw.update(delivery_unit=delivery_unit)\n kw.update(vat_class=vat_class)\n #kw.update(number_of_events=number_of_events)\n #kw.update(min_asset=min_asset)\n kw.update(sales_account_id=sales_account_id)\n kw.update(sales_price=sales_price)\n return products_Product(**kw)\n\n return '18.11.0'", "def create_product(sender, instance, **kwargs):\n if kwargs.get(\"created\"): # True just for first time when obj created\n logger.info(f\"Emails send to user with new product <{instance}>\")", "def prepare_woo_template_vals(self, template_data, odoo_template_id, import_for_order,\n woo_instance, common_log_book_id):\n if import_for_order:\n woo_category_ids = self.sync_woo_categ_with_product_v1_v2_v3(woo_instance,\n common_log_book_id,\n template_data[\n \"woo_categ_ids\"],\n woo_instance.sync_images_with_product)\n woo_tag_ids = self.sync_woo_tags_with_product_v1_v2_v3(woo_instance,\n template_data[\"woo_tag_ids\"])\n else:\n woo_category_ids = []\n woo_tag_ids = []\n for woo_category in template_data[\"woo_categ_ids\"]:\n woo_categ = self.env[\"woo.product.categ.ept\"].search(\n [(\"woo_categ_id\", \"=\", woo_category.get(\"id\")),\n ('woo_instance_id', '=', woo_instance.id)], limit=1).id\n woo_categ and woo_category_ids.append(woo_categ)\n for woo_tag in template_data[\"woo_tag_ids\"]:\n product_tag = self.env[\"woo.tags.ept\"].search(\n [(\"woo_tag_id\", \"=\", woo_tag.get(\"id\")),\n ('woo_instance_id', '=', woo_instance.id)], limit=1).id\n product_tag and woo_tag_ids.append(product_tag)\n\n template_data.update({\n \"product_tmpl_id\":odoo_template_id,\n \"exported_in_woo\":True,\n \"woo_categ_ids\":[(6, 0, woo_category_ids)],\n \"woo_tag_ids\":[(6, 0, woo_tag_ids)]\n })\n return template_data", "def create_properties(self, properties):\n self._update_metadata_date(properties)\n self._backend.insert_product_properties(properties)", "def write(self, vals):\n woo_product_product_obj = self.env['woo.product.product.ept']\n if 'active' in vals.keys():\n for woo_template in self:\n woo_template.woo_product_ids and woo_template.woo_product_ids.write(\n {'active':vals.get('active')})\n if vals.get('active'):\n woo_variants = woo_product_product_obj.search(\n [('woo_template_id', '=', woo_template.id),\n ('woo_instance_id', '=', woo_template.woo_instance_id.id),\n ('active', '=', False)])\n woo_variants and woo_variants.write({'active':vals.get('active')})\n res = super(WooProductTemplateEpt, self).write(vals)\n return res" ]
[ "0.68906796", "0.6637699", "0.6543743", "0.64923847", "0.6467413", "0.6255171", "0.61940205", "0.60161066", "0.5978016", "0.5945357", "0.58300525", "0.57808197", "0.5770319", "0.5736123", "0.5729061", "0.57229173", "0.57034343", "0.56682295", "0.5609585", "0.55937654", "0.5555376", "0.5530344", "0.5513479", "0.550801", "0.54909074", "0.541601", "0.5411149", "0.54071754", "0.5398177", "0.5397002" ]
0.7236754
0
Create a product Variant instance and its connectedVarianttAttribute
def create(self, validated_data): variant = ProductVariant( name=validated_data['name'], product=validated_data['product']['id'], active=validated_data.get('active', False) ) if validated_data['price'].get('amount'): variant.price = Money( amount=validated_data['price'].get('amount', Decimal(0.0)), currency=validated_data.get('price_currency', settings.DEFAULT_CURRENCY), ) variant.save() # Create each ConnectedVariantAttribute instance associated with it for item in validated_data.get('attributes', []): ConnectedVariantAttribute.objects.create( variant=variant, connection=item['connection'], value=item['value'] ) return variant
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_product(self):\n product = self.product_obj.create({\n \"default_code\": 'A2330',\n \"product_tmpl_id\":\n self.ref(\"product.product_product_4_product_template\"),\n \"attribute_value_ids\": [(6, 0, [\n self.ref('product.product_attribute_value_1'),\n self.ref('product_lifecycle.product_attribute_value_6'),\n self.ref('product.product_attribute_value_5')])],\n \"replacement_product_ids\": [(\n 6, 0, [self.ref('product_lifecycle.product_product_4e')]\n )]})\n return product", "def create(self, validated_data):\n\n # Create the Attribute instance\n product = Product(\n name=validated_data['name'],\n product_template=validated_data['product_template']['id'],\n description=validated_data.get('description'),\n active=validated_data.get('active', False)\n )\n if validated_data['min_price'].get('amount'):\n product.min_price = Money(\n amount=validated_data['min_price'].get('amount', Decimal(0.0)),\n currency=validated_data.get('min_price_currency', settings.DEFAULT_CURRENCY),\n )\n product.save()\n\n # Create each ConnectedProductAttribute instance associated with it\n for item in validated_data.get('attributes', []):\n ConnectedProductAttribute.objects.create(\n product=product,\n connection=item['connection'],\n value=item['value']\n )\n # # Fully working, commented for make structure easier\n # # by allowing variants created only at one location\n # # Create each Variant instance associated with it\n # for item in validated_data.get('variants', []):\n # variant = ProductVariant(\n # name=item['name'],\n # product=product,\n # active=item.get('active', False)\n # )\n # if item.get('price'):\n # if item['price'].get('amount'):\n # variant.price = Money(\n # amount=item['price'].get('amount', Decimal(0.0)),\n # currency=item.get('price_currency', settings.DEFAULT_CURRENCY),\n # )\n # variant.save()\n\n return product", "def create_variant (self):\n return self.create_name().create_variant('Variant',\n [self.create_topic()])", "def createProduct(self):\n return _libsbml.Model_createProduct(self)", "def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wc_api = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n price = 0.0\n if variant.variant_id:\n info = {'id': variant.variant_id, 'menu_order': variant.sequence}\n # Below are used to set the color in the metadata field.\n product_template_attribute_value = variant.product_id.product_template_attribute_value_ids.filtered(\n lambda attribute: attribute.display_type == 'color') or False\n if product_template_attribute_value and product_template_attribute_value.product_attribute_value_id.html_color:\n meta_data = []\n meta_data.append({'key': 'markersnpens-color-picker',\n 'value': product_template_attribute_value.product_attribute_value_id.html_color})\n info.update({'meta_data': meta_data})\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku': variant.default_code, 'weight': str(weight),\n \"manage_stock\": variant.woo_is_manage_stock})\n else:\n attributes = self.get_product_attribute(template.product_tmpl_id, instance, common_log_id, model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0, partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price': str(price), 'sale_price': str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku': variant.default_code, \"manage_stock\": variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price': str(price), 'sale_price': str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'update': woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % res.content\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % template.name)\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\": attributes})\n res = wc_api.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'create': variants_to_create})\n try:\n response = res.json()\n except Exception as error:\n message = \"Json Error : While update products to WooCommerce for instance %s. \\n%s\" % (\n instance.name, error)\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n else:\n variant_id = product.get(\"id\")\n variant = template.woo_product_ids.filtered(lambda x: x.default_code == product.get(\"sku\"))\n if variant:\n variant.write({\"variant_id\": variant_id, \"exported_in_woo\": True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag", "def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wcapi = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n # var_url = ''\n price = 0.0\n if variant.variant_id:\n info = {'id':variant.variant_id}\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku':variant.default_code, 'weight':str(weight),\n \"manage_stock\":variant.woo_is_manage_stock})\n else:\n attributes = \\\n self.get_product_attribute(template.product_tmpl_id, instance, common_log_id,\n model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0,\n partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price':str(price), 'sale_price':str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku':variant.default_code,\n \"manage_stock\":variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price':str(price), 'sale_price':str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'update':woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % (res.content)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % (template.name))\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\":attributes})\n res = wcapi.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wcapi.post('products/%s/variations/batch' % (data.get('id')),\n {'create':variants_to_create})\n try:\n response = res.json()\n except Exception as e:\n message = \"Json Error : While update products to WooCommerce for instance %s.\" \\\n \" \\n%s\" % (instance.name, e)\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id,\n common_log_id,\n False)\n else:\n variant_id = product.get(\"id\")\n sku = product.get(\"sku\")\n variant = template.woo_product_ids.filtered(lambda x:x.default_code == sku)\n if variant:\n variant.write({\"variant_id\":variant_id, \"exported_in_woo\":True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag", "def button_generate_variants(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n variants_obj = self.pool.get('product.product')\n temp_val_list = []\n\n for product_temp in self.browse(cr, uid, ids, context):\n #for temp_type in product_temp.dimension_type_ids:\n # temp_val_list.append([temp_type_value.id for temp_type_value in temp_type.value_ids] + (not temp_type.mandatory_dimension and [None] or []))\n #TODO c'est quoi ça??\n # if last dimension_type has no dimension_value, we ignore it\n # if not temp_val_list[-1]:\n # temp_val_list.pop()\n res = {}\n for value in product_temp.value_ids:\n if res.get(value.dimension_id, False):\n res[value.dimension_id] += [value.id]\n else:\n res[value.dimension_id] = [value.id]\n for dim in res:\n temp_val_list += [res[dim] + (not dim.mandatory_dimension and [None] or [])]\n\n if temp_val_list:\n list_of_variants = self._create_variant_list(cr, uid, ids, temp_val_list, context)\n existing_product_ids = variants_obj.search(cr, uid, [('product_tmpl_id', '=', product_temp.id)])\n existing_product_dim_value = variants_obj.read(cr, uid, existing_product_ids, ['dimension_value_ids'])\n list_of_variants_existing = [x['dimension_value_ids'] for x in existing_product_dim_value]\n for x in list_of_variants_existing:\n x.sort()\n for x in list_of_variants:\n x.sort()\n list_of_variants_to_create = [x for x in list_of_variants if not x in list_of_variants_existing]\n \n LOGGER.notifyChannel('product_variant_multi', netsvc.LOG_INFO, \"variant existing : %s, variant to create : %s\" % (len(list_of_variants_existing), len(list_of_variants_to_create)))\n count = 0\n for variant in list_of_variants_to_create:\n count += 1\n \n vals = self.product_product_variants_vals(cr, uid, product_temp, variant, context) \n product_id = variants_obj.create(cr, uid, vals, {'generate_from_template' : True})\n if count%50 == 0:\n cr.commit()\n LOGGER.notifyChannel('product_variant_multi', netsvc.LOG_INFO, \"product created : %s\" % (count,))\n LOGGER.notifyChannel('product_variant_multi', netsvc.LOG_INFO, \"product created : %s\" % (count,))\n\n product_ids = self.get_products_from_product_template(cr, uid, ids, context=context)\n\n # FIRST, Generate/Update variant names ('variants' field)\n LOGGER.notifyChannel('product_variant_multi', netsvc.LOG_INFO, \"Starting to generate/update variant names...\")\n self.pool.get('product.product').build_variants_name(cr, uid, product_ids, context=context)\n LOGGER.notifyChannel('product_variant_multi', netsvc.LOG_INFO, \"End of the generation/update of variant names.\")\n # SECOND, Generate/Update product codes and properties (we may need variants name for that)\n LOGGER.notifyChannel('product_variant_multi', netsvc.LOG_INFO, \"Starting to generate/update product codes and properties...\")\n self.pool.get('product.product').build_product_code_and_properties(cr, uid, product_ids, context=context)\n LOGGER.notifyChannel('product_variant_multi', netsvc.LOG_INFO, \"End of the generation/update of product codes and properties.\")\n LOGGER.notifyChannel('product_variant_multi_advanced', netsvc.LOG_INFO, \"Starting to generate/update product names...\")\n\n context['variants_values'] = {}\n for product in self.pool.get('product.product').read(cr, uid, product_ids, ['variants'], context=context):\n context['variants_values'][product['id']] = product['variants']\n self.pool.get('product.product').build_product_name(cr, uid, product_ids, context=context)\n LOGGER.notifyChannel('product_variant_multi_advanced', netsvc.LOG_INFO, \"End of generation/update of product names.\")\n\n return product_ids", "def create(cls, **kwargs):\n response = cls.get_client().create_product(**kwargs)\n object_details = cls._flatten_object_details(response)\n return cls(**object_details)", "def createProduct(self):\n return _libsbml.Reaction_createProduct(self)", "def create_item_variant():\n if not request.json:\n abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('item_code', type=int, required=True, location='json', help=\"Item code missing\")\n parser.add_argument('cost_price', type=float, required=True, location='json', help=\"Cost Price missing\")\n parser.add_argument('selling_price', type=float, required=True, location='json', help=\"Selling Price missing\")\n parser.add_argument('quantity', type=int, required=True, location='json', help=\"Quantity missing\")\n parser.add_argument('username', type=str, required=True, location='json', help=\"Username missing\")\n args = parser.parse_args()\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n new_variant = dict(\n cost_price = args['cost_price'],\n selling_price = args['selling_price'],\n quantity = args['quantity']\n )\n try:\n u = models.Items.query.filter_by(item_code=args['item_code']).first()\n if u is None:\n return make_response(jsonify({'error': 'Item does not exists'}), 400)\n v = models.Variants(**new_variant)\n u.variants = v\n db.session.add(u)\n db.session.commit()\n except sqlalchemy.exc.IntegrityError, e:\n return make_response(jsonify({'error': 'Item code already exists.'}), 400)\n return make_response(jsonify({'success': True}))", "def test_product_create(self):\n self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])", "def create_variant(name, task, state=\"\"):\n\n return Variant(name=name, task=task, state=state)", "def update(self, instance, validated_data):\n\n # Update the Product instance\n instance.name = validated_data.get('name', instance.name)\n instance.description = validated_data.get('description', instance.description)\n instance.active = validated_data.get('active', instance.active)\n if validated_data.get('min_price'):\n if validated_data['min_price'].get('amount'):\n instance.min_price = Money(\n amount=validated_data['min_price'].get('amount', instance.min_price.amount),\n currency=validated_data.get('min_price_currency', instance.min_price_currency),\n )\n elif validated_data.get('min_price_currency'):\n instance.min_price = Money(\n amount=instance.min_price.amount,\n currency=validated_data['min_price_currency'],\n )\n instance.save()\n\n if validated_data.get('attributes'):\n # ConnectedProductAttribute\n # 1. create a list of ids out of passed data\n attributes_ids = [item.get('id') for item in validated_data['attributes']]\n\n # 2. delete any association\n # which is not included in passed data\n for attribute in instance.attributes.all():\n if attribute.id not in attributes_ids:\n attribute.delete()\n\n # 3. create or update all association\n for item in validated_data['attributes']:\n attribute = ConnectedProductAttribute(\n id=item.get('id'),\n product=instance,\n connection=item['connection'],\n value=item['value']\n )\n attribute.save()\n\n # # Fully working, see above at create\n # # ProductVariant\n # # 1. create a list of ids out of passed data\n # variants_ids = [item.get('id') for item in validated_data['variants']]\n\n # # 2. delete any association\n # # which is not included in passed data\n # for variant in instance.variants.all():\n # if variant.id not in variants_ids:\n # variant.delete()\n\n # # 3. create or update all association\n # for item in validated_data['variants']:\n # variant = ProductVariant(\n # id=item.get('id'),\n # name=item['name'],\n # product=instance,\n # active=item.get('active', False)\n # )\n # if item.get('price'):\n # if item['price'].get('amount'):\n # variant.price = Money(\n # amount=item['price']['amount'],\n # currency=item.get('price_currency', settings.DEFAULT_CURRENCY),\n # )\n # elif item.get('price_currency'):\n # variant.price = Money(\n # amount=variant.price.amount,\n # currency=item['price_currency'],\n # )\n # variant.save()\n\n return instance", "def add_product( product_id, reaction, model, compartment = \"default\", arguments = DEFAULT_ARGUMENTS):\n if product_id is None and len( reaction.getListOfProducts()) > 0:\n product_ref = reaction.getListOfProducts()[0]\n product_species = product_ref.getSpecies()\n return product_species\n else:\n if product_id is None:\n reaction_id = reaction.getId();\n product_id = reaction_id + \"_product_nr\" + str(len( reaction.getListOfProducts()));\n \n product_prefix = PRODUCT_PREFIX.get( reaction.getName().lower()) \n if product_prefix is None:\n product_prefix = \"\"\n\n reactant = None\n if len( reaction.getListOfReactants()) > 0:\n reactant = model.getSpecies( reaction.getListOfReactants()[0].getSpecies())\n if reactant != None:\n product_name = product_prefix + reactant.getName();\n else:\n product_name = product_prefix + \"Product\";\n add_species( None, model, id = product_id, name = product_name, compartment = compartment, arguments = arguments);\n \n product_ref = reaction.createProduct()\n check( product_ref, 'create product reference', arguments = arguments);\n check( product_ref.setSpecies( product_id), 'assign product species', arguments = arguments);\n check( product_ref.setMetaId( \"metaid_0000\" + product_id), 'set meta ID', arguments = arguments);\n check( product_ref.addCVTerm( add_cvterm( GENERIC_REACTION_SBO_MAPPING[\"product\"])), 'set controlled vocab SBO term for product', arguments = arguments);\n # check( product_ref.addCVTerm(add_cvterm(STANDOFF_ENTITY_TO_SBO_MAPPING[product.type])), 'set controlled vocab SBO term 2 for product')\n return product_id", "def test_01_product_create(self):\n # Create new product with a replacement product\n product = self.create_product()\n\n # Check recently was created product with default 'In Development'\n # value state and that the replacement was assigned. This case also\n # check the read test.\n self.assertTrue(product)\n self.assertEqual(product.state2, 'draft')\n self.assertTrue(product.replacement_product_ids)\n self.assertEqual(len(product.replacement_product_ids), 1)\n self.assertEqual(product.replacement_product_ids[0].id,\n self.ref('product_lifecycle.product_product_4e'))", "def prepare_simple_product_variant_vals(self, woo_instance, product_response,\n woo_product_template_id):\n template_title = product_response.get(\"name\")\n variant_info = {\n \"name\":template_title, \"default_code\":product_response.get(\"sku\"),\n \"variant_id\":woo_product_template_id, \"woo_instance_id\":woo_instance.id,\n \"exported_in_woo\":True,\n \"product_url\":product_response.get(\"permalink\", \"\"),\n \"woo_is_manage_stock\":product_response[\"manage_stock\"],\n \"updated_at\":product_response.get(\"date_modified\").replace(\"T\", \" \"),\n \"active\":True\n }\n if product_response.get(\"date_created\"):\n variant_info.update(\n {\"created_at\":product_response.get(\"date_created\").replace(\"T\", \" \")})\n if product_response.get(\"date_modified\"):\n variant_info.update(\n {\"updated_at\":product_response.get(\"date_modified\").replace(\"T\", \" \")})\n return variant_info", "def copy(self, cr, uid, id, default=None, context=None):\n ## If no default data is provided, set it to empty dict:\n if not default:\n default = {}\n\n ## Get the product:\n product = self.browse(cr, uid, id, context=context)\n\n ## Set the default code of the default dictionary:\n default[\"default_code\"] = (product.default_code and product.default_code + \" (copy)\") or False\n\n ## Done, return with super method's result:\n return super(ProductVariantModel, self).copy(cr, uid, id, default=default, context=context)", "def _create(self, data):\n model = self.model\n data = self._check_odoo_attribute(data)\n binding = model.create(data)\n self._create_attribute_option(binding, data)\n _logger.debug('%s %d created from magento %s',\n self.model._name, binding.id, self.magento_id)\n return binding", "def create(self, validated_data):\n variants_data = validated_data.pop('variants')\n item = Item.objects.create(**validated_data)\n\n # loop through the variant data and create a variant.\n for variant_data in variants_data:\n properties_data = variant_data.pop('properties')\n variant = Variant.objects.create(item=item, **variant_data)\n variant.last_modified_by = item.last_modified_by\n variant.save()\n\n # loop through the property for a variant and create.\n for property_data in properties_data:\n property = Property.objects.create(variant=variant,\n **property_data)\n property.last_modified_by = item.last_modified_by\n property.save()\n\n return item", "def get_variant_data(self, variant, instance, update_image):\n att = []\n woo_attribute_obj = self.env['woo.product.attribute.ept']\n variation_data = {}\n att_data = {}\n for attribute_value in variant.product_id.product_template_attribute_value_ids:\n if instance.woo_attribute_type == 'select':\n woo_attribute = woo_attribute_obj.search(\n [('name', '=', attribute_value.attribute_id.name),\n ('woo_instance_id', '=', instance.id),\n ('exported_in_woo', '=', True)], limit=1)\n if not woo_attribute:\n woo_attribute = woo_attribute_obj.search(\n [('attribute_id', '=', attribute_value.attribute_id.id),\n ('woo_instance_id', '=', instance.id),\n ('exported_in_woo', '=', True)], limit=1)\n att_data = {\n 'id':woo_attribute and woo_attribute.woo_attribute_id,\n 'option':attribute_value.name\n }\n if instance.woo_attribute_type == 'text':\n att_data = {\n 'name':attribute_value.attribute_id.name,\n 'option':attribute_value.name\n }\n att.append(att_data)\n if update_image:\n variation_data.update(self.get_variant_image(instance, variant))\n\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n\n variation_data.update(\n {\n 'attributes':att, 'sku':str(variant.default_code),\n 'weight':str(weight), \"manage_stock\":variant.woo_is_manage_stock\n })\n return variation_data", "def migrate_from_18_8_0(self, globals_dict):\n\n bv2kw = globals_dict['bv2kw']\n products_Product = rt.models.products.Product\n \n @override(globals_dict)\n def create_products_product(id, name, description, category_id, delivery_unit, vat_class, number_of_events, min_asset, sales_account_id, sales_price):\n # if delivery_unit: delivery_unit = settings.SITE.models.products.DeliveryUnits.get_by_value(delivery_unit)\n # if vat_class: vat_class = settings.SITE.models.vat.VatClasses.get_by_value(vat_class)\n if sales_price is not None: sales_price = Decimal(sales_price)\n kw = dict()\n kw.update(id=id)\n if name is not None: kw.update(bv2kw('name',name))\n if description is not None: kw.update(bv2kw('description',description))\n kw.update(category_id=category_id)\n kw.update(delivery_unit=delivery_unit)\n kw.update(vat_class=vat_class)\n #kw.update(number_of_events=number_of_events)\n #kw.update(min_asset=min_asset)\n kw.update(sales_account_id=sales_account_id)\n kw.update(sales_price=sales_price)\n return products_Product(**kw)\n\n return '18.11.0'", "def create_product():\n mongo = MongoClient(Config.MONGO_URI)\n db_operations = mongo.db.product\n data = request.get_json(force=True) or {}\n if 'title' not in data or 'description' not in data or 'params' not in data:\n return bad_request(t['empty_field'])\n new_product = Product()\n if Product.params_is_valid(data):\n new_product.save_to_db(data, db_operations)\n\n response = jsonify(new_product.to_dict())\n response.status_code = 201\n response.headers['Location'] = url_for('api.get_product_by_id', product_id=new_product._id)\n return response\n else:\n return bad_request(t['invalid_value'])", "def createGeneProduct(self):\n return _libsbml.FbcModelPlugin_createGeneProduct(self)", "def create_product(new_product, owner_name):\n if not isinstance(new_product, dict):\n raise ValueError(f\"New product {new_product} is not a dict\")\n new_instance = Products(\n serial_number=new_product['serial_number'],\n type_name=new_product['type_name'],\n owner_name=owner_name,\n product_condition=new_product['product_condition'] == 'true',\n model=new_product['model'],\n producent=new_product['producent'],\n additonal_info=new_product['additional_info'],\n )\n if not get_critical_level(owner_name, [new_product['type_name']]).all():\n new_critical_entry = CriticalLevels(\n business=owner_name,\n type_name=new_product['type_name']\n )\n else:\n new_critical_entry = None\n return new_instance, new_critical_entry", "def add_product(self):\n self.owner.new_product(self.barcode, self.description, self.price, self._add_product_callback)", "def create_product_from_parameters(self, cr, uid, vals, context=None):\n if 'tmpl_id' not in vals:\n raise osv.except_osv('Warning', _('Wrong data'))\n \n product_obj = self.pool.get('product.product')\n multi_type_obj = self.pool.get('product.variant.dimension.type')\n multi_option_obj = self.pool.get('product.variant.dimension.option')\n multi_value_obj = self.pool.get('product.variant.dimension.value')\n \n tmpl_id = int(vals['tmpl_id'])\n multi_fields = {}\n for key, val in vals.items():\n if key.startswith('product_tmpl_id_'):\n dim_key = int(key[16:])\n opt_key = val\n # Create new option, if it not exists\n if hasattr(opt_key, 'startswith') and opt_key.startswith('new_'):\n opt_val = opt_key[4:]\n opt_key = multi_option_obj.create(cr, uid, {\n 'name': opt_val,\n 'code': opt_val,\n 'dimension_id': dim_key,\n })\n else:\n opt_key = int(opt_key)\n \n multi_fields[dim_key] = opt_key\n \n template = self.pool.get('product.template').browse(cr, uid, tmpl_id, context)\n if not template:\n raise osv.except_osv('Warning', _('Wrong template'))\n \n product_name = template.name\n parameters_values = ''\n multi_value_ids = []\n if multi_fields:\n # Set options to template, if it not exists in it\n for dim, opt in multi_fields.items():\n tmps = multi_value_obj.search(cr, uid, [('dimension_id', '=', dim), ('option_id', '=', opt), ('product_tmpl_id', '=', tmpl_id)])\n if not tmps:\n multi_value_obj.create(cr, uid, {\n 'product_tmpl_id': tmpl_id,\n 'dimension_id': dim,\n 'option_id': opt,\n })\n \n product = product_obj.multi_search_one(cr, uid, tmpl_id, multi_fields)\n if len(product) == 1:\n p = product_obj.browse(cr, uid, product[0])\n msg = u'Продукт \"%s\" был использован' % p.product_name\n self.log(cr, uid, product[0], msg)\n return product[0]\n elif len(product) > 1:\n raise osv.except_osv('Error', 'Количество таких продуктов > 1')\n \n for key, val in multi_fields.items():\n key = int(key)\n val = int(val)\n multi_value_id = multi_value_obj.search(cr, uid, [('dimension_id', '=', key), ('option_id', '=', val), ('product_tmpl_id', '=', tmpl_id)], context=context)\n if not multi_value_id:\n raise osv.except_osv('Warning', _('Wrong dimension'))\n else:\n multi_value_ids.append(multi_value_id[0])\n \n type = multi_type_obj.browse(cr, uid, key)\n type_desc = type.description or type.name\n type_name = type.name\n option = multi_option_obj.browse(cr, uid, val)\n option_code = option.code or option.name\n option_name = option.name\n \n product_name += ' %s - %s ' % (type_desc, option_code)\n \n product_id = product_obj.create(cr, uid, {\n 'product_tmpl_id': tmpl_id, \n 'product_name': product_name,\n }, context=context)\n if not product_id:\n raise osv.except_osv('Warning', _('Error in creating product'))\n else:\n product_name = self.pool.get('product.product').browse(cr, uid, product_id).product_name\n msg = _(u'Продукт \"%s\" был создан') % product_name\n self.log(cr, uid, product_id, msg)\n \n for val in multi_value_ids:\n query = \"\"\" INSERT INTO product_product_dimension_rel \n (dimension_id, product_id) \n VALUES\n (%s, %s)\"\"\" % (val, product_id)\n cr.execute(query)\n \n return product_id", "def create_product(request):\n shop_obj = Shop.objects.get(user=request.user)\n tags = json.loads(request.data[\"tags\"])\n del tags[\"product_count\"]\n data_payload = {\n \"name\": request.data[\"productName\"],\n \"price\": request.data[\"productPrice\"],\n \"description\": request.data[\"description\"],\n \"genre\": tags,\n \"image\": request.data[\"file\"],\n \"shop_slug\": shop_obj.slug,\n \"shop_rel\": shop_obj,\n }\n # Create an instance of the product\n products = Products.objects.create(**data_payload)\n # Save the product into the database\n products.save()\n products_serailzer = ProductSerializer(products)\n return Response(\n data={\"product\": products_serailzer.data}, status=status.HTTP_201_CREATED\n )", "def __init__(self, vendor_id, product_id):\n self.vendor_id = vendor_id\n self.product_id = product_id", "def _create_product_and_project(self,\n unit: Squonk2Unit,\n user_name: str,\n session_title: str,\n params: CommonParams) -> Squonk2AgentRv:\n assert unit\n assert user_name\n assert session_title\n assert params\n\n # Create an AS Product.\n name_truncated, _ = self._build_product_name(user_name, session_title)\n msg: str = f'Creating AS Product \"{name_truncated}\" (unit={unit.uuid})...'\n _LOGGER.info(msg)\n\n sq2_rv: Squonk2AgentRv = self._get_or_create_product(name_truncated, unit.uuid)\n if not sq2_rv.success:\n msg = f'Failed to create AS Product ({sq2_rv.msg})'\n _LOGGER.error(msg)\n return Squonk2AgentRv(success=False, msg=msg)\n product_uuid: str = sq2_rv.msg\n msg = f'Got or created AS Product {product_uuid}'\n _LOGGER.info(msg)\n\n # Create a DM Project (using the same name we used for the AS Product)\n msg = f'Continuing by creating DM Project \"{name_truncated}\"...'\n _LOGGER.info(msg)\n\n sq2_rv: Squonk2AgentRv = self._get_or_create_project(name_truncated, product_uuid)\n if not sq2_rv.success:\n msg = f'Failed to create DM Project ({sq2_rv.msg})'\n _LOGGER.error(msg)\n # First delete the AS Product it should have been attached to\n self._delete_as_product(product_uuid)\n # Then leave...\n return Squonk2AgentRv(success=False, msg=msg)\n project_uuid: str = sq2_rv.msg\n msg = f'Got or created DM Project {project_uuid}...'\n _LOGGER.info(msg)\n\n # Add the user as an Editor to the Project\n msg = f'Adding \"{user_name}\" to DM Project {project_uuid} as Editor...'\n _LOGGER.info(msg)\n dm_rv = DmApi.add_project_editor(self.__org_owner_dm_token,\n project_id=project_uuid,\n editor=user_name)\n if not dm_rv.success:\n msg = f'Failed to add \"{user_name}\" to DM Project ({dm_rv.msg})'\n _LOGGER.error(msg)\n _LOGGER.warning('Rolling back DM Project and AS Product creation...')\n # First delete the DM Project amd the corresponding AS Product...\n self._delete_dm_project(project_uuid)\n self._delete_as_product(product_uuid)\n # Then leave...\n return Squonk2AgentRv(success=False, msg=msg)\n\n msg = f'\"{user_name}\" is now an Editor of DM Project {project_uuid}'\n _LOGGER.info(msg)\n\n # If the second call fails - delete the object created in the first\n\n response_msg: Dict[str, Any] = {\"sq2_project_uuid\": project_uuid,\n \"sq2_product_uuid\": product_uuid}\n return Squonk2AgentRv(success=True, msg=response_msg)", "def createGeneProduct(self):\n return _libsbml.ListOfGeneProducts_createGeneProduct(self)" ]
[ "0.70728076", "0.70317554", "0.67715746", "0.6514791", "0.64029783", "0.6349646", "0.62429774", "0.6187608", "0.6105697", "0.609649", "0.60295576", "0.5962163", "0.59564453", "0.59382737", "0.59142923", "0.58707803", "0.5799128", "0.5735306", "0.5721392", "0.5668691", "0.5642781", "0.562719", "0.5617213", "0.5605455", "0.55964696", "0.556561", "0.55131876", "0.546336", "0.5436455", "0.543358" ]
0.80497056
0
Update a product Variant instance and its connectedVarianttAttribute
def update(self, instance, validated_data): instance.name = validated_data.get('name', instance.name) instance.active = validated_data.get('active', instance.active) if validated_data.get('price'): if validated_data['price'].get('amount'): instance.price = Money( amount=validated_data['price']['amount'], currency=validated_data.get('price_currency', instance.price_currency), ) elif validated_data.get('price_currency'): instance.price = Money( amount=instance.price.amount, currency=validated_data['price_currency'], ) instance.save() if validated_data.get('attributes'): # ConnectedVariantAttribute # 1. create a list of ids out of passed data attributes_ids = [item.get('id') for item in validated_data['attributes']] # 2. delete any association # which is not included in passed data for attribute in instance.attributes.all(): if attribute.id not in attributes_ids: attribute.delete() # 3. create or update all association for item in validated_data['attributes']: attribute = ConnectedVariantAttribute( id=item.get('id'), variant=instance, connection=item['connection'], value=item['value'] ) attribute.save() return instance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self, instance, validated_data):\n\n # Update the Product instance\n instance.name = validated_data.get('name', instance.name)\n instance.description = validated_data.get('description', instance.description)\n instance.active = validated_data.get('active', instance.active)\n if validated_data.get('min_price'):\n if validated_data['min_price'].get('amount'):\n instance.min_price = Money(\n amount=validated_data['min_price'].get('amount', instance.min_price.amount),\n currency=validated_data.get('min_price_currency', instance.min_price_currency),\n )\n elif validated_data.get('min_price_currency'):\n instance.min_price = Money(\n amount=instance.min_price.amount,\n currency=validated_data['min_price_currency'],\n )\n instance.save()\n\n if validated_data.get('attributes'):\n # ConnectedProductAttribute\n # 1. create a list of ids out of passed data\n attributes_ids = [item.get('id') for item in validated_data['attributes']]\n\n # 2. delete any association\n # which is not included in passed data\n for attribute in instance.attributes.all():\n if attribute.id not in attributes_ids:\n attribute.delete()\n\n # 3. create or update all association\n for item in validated_data['attributes']:\n attribute = ConnectedProductAttribute(\n id=item.get('id'),\n product=instance,\n connection=item['connection'],\n value=item['value']\n )\n attribute.save()\n\n # # Fully working, see above at create\n # # ProductVariant\n # # 1. create a list of ids out of passed data\n # variants_ids = [item.get('id') for item in validated_data['variants']]\n\n # # 2. delete any association\n # # which is not included in passed data\n # for variant in instance.variants.all():\n # if variant.id not in variants_ids:\n # variant.delete()\n\n # # 3. create or update all association\n # for item in validated_data['variants']:\n # variant = ProductVariant(\n # id=item.get('id'),\n # name=item['name'],\n # product=instance,\n # active=item.get('active', False)\n # )\n # if item.get('price'):\n # if item['price'].get('amount'):\n # variant.price = Money(\n # amount=item['price']['amount'],\n # currency=item.get('price_currency', settings.DEFAULT_CURRENCY),\n # )\n # elif item.get('price_currency'):\n # variant.price = Money(\n # amount=variant.price.amount,\n # currency=item['price_currency'],\n # )\n # variant.save()\n\n return instance", "def update_variant(self, variant_obj):\n LOG.debug(\"Updating variant %s\", variant_obj.get(\"simple_id\"))\n\n new_variant = self.variant_collection.find_one_and_replace(\n {\"_id\": variant_obj[\"_id\"]},\n variant_obj,\n return_document=pymongo.ReturnDocument.AFTER,\n )\n return new_variant", "def update(self, instance, validated_data):\n\n # Update the Attribute instance\n instance.name = validated_data.get('name', instance.name)\n instance.save()\n\n # Update AttributeProduct\n self.update_attributes(instance, validated_data,\n field_name=\"attribute_product\",\n attr_model=AttributeProduct,\n attr_all=instance.attribute_product.all())\n\n # Update AttributeVariant\n self.update_attributes(instance, validated_data,\n field_name=\"attribute_variant\",\n attr_model=AttributeVariant,\n attr_all=instance.attribute_variant.all())\n\n return instance", "def update_variant_by_id(variant_id, variant_updated):\n\n # get variant\n target_variant = Variant.get(\n lambda s: s.id == variant_id and s.deletedAt is None)\n\n # variant exist?\n if target_variant is None:\n return target_variant, \"Variant Not Found !\"\n\n target_variant.name = variant_updated.name\n target_variant.state = variant_updated.state\n target_variant.task = variant_updated.task\n target_variant.updatedAt = datetime.datetime.utcnow()\n\n return target_variant, \"\"", "def update_variant(item_code):\n if not request.json:\n abort(400)\n parser = reqparse.RequestParser()\n parser.add_argument('cost_price', type=float, required=False, location='json')\n parser.add_argument('selling_price', type=float, required=False, location='json')\n parser.add_argument('quantity', type=int, required=False, location='json')\n parser.add_argument('username', type=str, required=True, location='json', help=\"Username missing\")\n args = parser.parse_args()\n user_code = get_user_code(args['username'])\n if user_code is None:\n return make_response(jsonify({'error': 'User does not exists'}), 400)\n updated_variant = dict(\n cost_price = args['cost_price'] if 'cost_price' in args.keys() and args['cost_price'] != None else None,\n selling_price = args['selling_price'] if 'selling_price' in args.keys() and args['selling_price'] != None else None,\n quantity = args['quantity'] if 'quantity' in args.keys() and args['quantity'] != None else None\n )\n updated_variant_new = {k: v for k, v in updated_variant.items() if v}\n updated_variant = updated_variant_new\n if updated_variant == {}:\n return make_response(jsonify({'error': 'Invalid entries'}), 400)\n u = models.Items.query.filter_by(item_code=item_code).first()\n if u is None:\n return make_response(jsonify({'error': 'Item does not exists'}), 400)\n v = u.variants\n if v is None:\n return invalid_requeset(message='Variant does not exists')\n for param in updated_variant:\n setattr(u.variants, param, updated_variant[param]) \n db.session.commit()\n return make_response(jsonify({'success': True}))", "def _update(self, binding, data):\n self._validate_data(data)\n if not data.get('name',False):\n data['name'] = data.get('frontend_label',False) or 'No Label'\n if not data.get('create_variant',False):\n data['create_variant'] = data.get('is_configurable',False)\n binding.write(data)\n self._create_attribute_option(binding, data)\n _logger.debug('%d updated from magento %s', binding.id, self.magento_id)\n return", "def update(self, product, qty):\n product_id = str(product)\n if product_id in self.basket:\n self.basket[product_id]['qty'] = qty\n self.save()", "def test_02_product_update(self):\n # Update new product state2 from default draft to sellable\n new_product = self.create_product()\n self.assertEqual(new_product.state2, 'draft')\n new_product.state2 = 'sellable'\n self.assertEqual(new_product.state2, 'sellable')\n\n # Same but to an existing demo product.\n demo_product = self.product_obj.browse(\n self.ref('product_lifecycle.product_product_4g'))\n self.assertEqual(demo_product.state2, 'sellable')\n demo_product.state2 = 'draft'\n self.assertEqual(demo_product.state2, 'draft')\n\n # Update new product invividual field (field defined in product.product\n # model).\n self.assertEqual(new_product.default_code, 'A2330')\n new_product.default_code = 'A2330-1'\n self.assertEqual(new_product.default_code, 'A2330-1')\n\n # Same but to an existing demo product.\n self.assertEqual(demo_product.default_code, 'A2329')\n demo_product.default_code = 'A2329-1'\n self.assertEqual(demo_product.default_code, 'A2329-1')\n\n # Update new product commom characteristic (field defined in\n # product.template) and check that affects the another product\n # variants\n self.assertFalse(new_product.description)\n new_product.description = 'This is a New Product'\n self.assertEqual(new_product.description, 'This is a New Product')\n self.assertEqual(demo_product.description, 'This is a New Product')\n demo_product.description = False\n self.assertFalse(demo_product.description)", "def update(self, request, *args, **kwargs):\n obj = self.get_object()\n signals.product_pre_update.send(sender=obj.__class__, product=obj, request=request)\n response = super(ProductViewSet, self).update(request, *args, **kwargs)\n if response.status_code == status.HTTP_200_OK:\n signals.product_post_update.send(sender=obj.__class__, product=self.object, request=request)\n return response", "def update(self, product, qty):\n product_id = str(product)\n if product_id in self.cart:\n self.cart[product_id]['qty'] = qty\n self.save()", "def variant_info(self, variant_info):\n\n self._variant_info = variant_info", "def update_product(admin, product_id):\n return generic_update(Product, product_id, json_body(), admin)", "def update_product(self, product_id, name, archived=False):\n archived = 'y' if archived else 'n'\n return self._make_post_request(self._urls['product'] % product_id,\n data=dict(name=name, archived=archived))", "def test_product_update(self):\n # first performe create\n id = self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"description\": self.product_data[\"description\"],\n \"image_link\": self.product_data[\"image_link\"],\n \"price\": self.product_data[\"price\"]\n }\n self._update_model(\"product\", id, data, [\"name\"])\n self.assertIsNotNone(id)", "def update(self, user, product, quantity):\n\n cart_product = CartProduct.update(user, product, quantity)\n CartProductsView.update(cart_product)", "def update(self, instance, validated_data):\n instance.product_name = validated_data.get('product_name', instance.product_name)\n instance.product_mrp = validated_data.get('product_mrp', instance.product_mrp)\n instance.save()\n return instance", "def create(self, validated_data):\n variant = ProductVariant(\n name=validated_data['name'],\n product=validated_data['product']['id'],\n active=validated_data.get('active', False)\n )\n if validated_data['price'].get('amount'):\n variant.price = Money(\n amount=validated_data['price'].get('amount', Decimal(0.0)),\n currency=validated_data.get('price_currency', settings.DEFAULT_CURRENCY),\n )\n variant.save()\n\n # Create each ConnectedVariantAttribute instance associated with it\n for item in validated_data.get('attributes', []):\n ConnectedVariantAttribute.objects.create(\n variant=variant,\n connection=item['connection'],\n value=item['value']\n )\n\n return variant", "def update_product(body): # noqa: E501\n if connexion.request.is_json:\n body = Product.from_dict(connexion.request.get_json()) # noqa: E501\n return 'do some magic!'", "def update(self):\n self.attributes = self.call('UPDATE', expect=error.OK, body=self.attributes)", "def upsert_variant(self, variant_obj):\n LOG.debug(\"Upserting variant %s\", variant_obj[\"_id\"])\n try:\n result = self.variant_collection.insert_one(variant_obj)\n except DuplicateKeyError as err:\n LOG.warning(\"Variant %s already exists in database - modifying\", variant_obj[\"_id\"])\n result = self.variant_collection.find_one_and_update(\n {\"_id\": variant_obj[\"_id\"]},\n {\"$set\": {\"compounds\": variant_obj.get(\"compounds\", [])}},\n )\n return result", "def update_variations(self, **kwargs):\n pass", "def update_device(self, dev_dict):\n # Note(jprabh1x): added bus,slot,function into fields dict as \n # seperate fields.\n no_changes = ('status', 'instance_uuid', 'id', 'extra_info', 'workload')\n map(lambda x: dev_dict.pop(x, None),\n [key for key in no_changes])\n\n # Note(jprabh1x): populating values for bus,slot,function from address in dev_dict.\n if dev_dict.has_key(\"address\"):\n \t\taddress = pci_utils.parse_address(dev_dict[\"address\"])\n \t\tdev_dict.update({'bus':str(address[1]), 'slot':str(address[2]), 'function':str(address[3])})\n for k, v in dev_dict.items():\n if k in self.fields.keys():\n self[k] = v\n else:\n extra_info = self.extra_info\n extra_info.update({k: str(v)})\n self.extra_info = extra_info", "def update(self) -> None:\n self._api.update()\n if self.available:\n self._attr_native_value = self._api.data[self.entity_description.key]\n else:\n self._attr_native_value = None", "def update_product(product_id, name, price, stock, description):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n UPDATE Products\n SET product_name=?, product_price=?, in_stock=?, description=?\n WHERE id_product=?\n \"\"\",\n (name, price, stock, description, product_id,))", "def testUpdate(self):\n response = self.runPut(self.root, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])", "def obj_update(self, bundle, request=None, **kwargs):\n\n # pull the productversions off, you can't edit them from here\n productversions = bundle.data.pop(\"productversions\", [])\n bundle.data[\"productversions\"] = []\n\n updated_bundle = super(ProductResource, self).obj_update(\n bundle=bundle, request=request, **kwargs)\n\n # create the productversions\n for pv in productversions:\n ProductVersion.objects.get_or_create(\n product=updated_bundle.obj, **pv)\n\n return updated_bundle", "def prepare_product_variant_dict(self, instance, template, data, basic_detail, update_price,\n update_image, common_log_id, model_id):\n common_log_line_obj = self.env['common.log.lines.ept']\n wc_api = instance.woo_connect()\n variants_to_create = []\n flag = True\n for variant in template.woo_product_ids:\n price = 0.0\n if variant.variant_id:\n info = {'id': variant.variant_id, 'menu_order': variant.sequence}\n # Below are used to set the color in the metadata field.\n product_template_attribute_value = variant.product_id.product_template_attribute_value_ids.filtered(\n lambda attribute: attribute.display_type == 'color') or False\n if product_template_attribute_value and product_template_attribute_value.product_attribute_value_id.html_color:\n meta_data = []\n meta_data.append({'key': 'markersnpens-color-picker',\n 'value': product_template_attribute_value.product_attribute_value_id.html_color})\n info.update({'meta_data': meta_data})\n\n if basic_detail:\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n info.update({'sku': variant.default_code, 'weight': str(weight),\n \"manage_stock\": variant.woo_is_manage_stock})\n else:\n attributes = self.get_product_attribute(template.product_tmpl_id, instance, common_log_id, model_id)[0]\n info = self.get_variant_data(variant, instance, False)\n\n if update_image:\n info.update(self.get_variant_image(instance, variant))\n\n if update_price:\n price = instance.woo_pricelist_id.get_product_price(variant.product_id, 1.0, partner=False,\n uom_id=variant.product_id.uom_id.id)\n info.update({'regular_price': str(price), 'sale_price': str(price)})\n\n if template.woo_tmpl_id != variant.variant_id:\n if variant.variant_id:\n data.get('variations').append(info)\n else:\n variants_to_create.append(info)\n flag = True\n elif template.woo_tmpl_id == variant.variant_id:\n del data['variations']\n if basic_detail:\n data.update({'sku': variant.default_code, \"manage_stock\": variant.woo_is_manage_stock})\n if update_price:\n data.update({'regular_price': str(price), 'sale_price': str(price)})\n flag = True\n\n if data.get('variations'):\n variant_batches = self.prepare_batches(data.get('variations'))\n for woo_variants in variant_batches:\n _logger.info('variations batch processing')\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'update': woo_variants})\n _logger.info('variations batch process completed [status: %s]', res.status_code)\n if res.status_code in [200, 201]:\n del data['variations']\n if res.status_code not in [200, 201]:\n message = \"Update Product Variations\\n%s\" % res.content\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n if variants_to_create:\n \"\"\"Needed to update the attributes of template for adding new variant, while update\n process.\"\"\"\n _logger.info(\"Updating attributes of %s in Woo..\" % template.name)\n if data.get(\"variations\"):\n del data['variations']\n data.update({\"attributes\": attributes})\n res = wc_api.put(\"products/%s\" % (data.get(\"id\")), data)\n\n _logger.info(\"Creating variants in Woo..\")\n res = wc_api.post('products/%s/variations/batch' % (data.get('id')), {'create': variants_to_create})\n try:\n response = res.json()\n except Exception as error:\n message = \"Json Error : While update products to WooCommerce for instance %s. \\n%s\" % (\n instance.name, error)\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n return data, flag\n for product in response.get(\"create\"):\n if product.get(\"error\"):\n message = \"Update Product \\n%s\" % (product.get(\"error\").get('message'))\n common_log_line_obj.woo_product_export_log_line(message, model_id, common_log_id, False)\n else:\n variant_id = product.get(\"id\")\n variant = template.woo_product_ids.filtered(lambda x: x.default_code == product.get(\"sku\"))\n if variant:\n variant.write({\"variant_id\": variant_id, \"exported_in_woo\": True})\n\n self.sync_woo_attribute_term(instance, common_log_id)\n\n return data, flag", "def update(self, request, *args, **kwargs):\n response = super(ProductViewSet, self).update(request, *args, **kwargs)\n response.data['message'] = \"Producto ha sido editado\"", "def product(self, product):\n self._product = product", "def get_variant_data(self, variant, instance, update_image):\n att = []\n woo_attribute_obj = self.env['woo.product.attribute.ept']\n variation_data = {}\n att_data = {}\n for attribute_value in variant.product_id.product_template_attribute_value_ids:\n if instance.woo_attribute_type == 'select':\n woo_attribute = woo_attribute_obj.search(\n [('name', '=', attribute_value.attribute_id.name),\n ('woo_instance_id', '=', instance.id),\n ('exported_in_woo', '=', True)], limit=1)\n if not woo_attribute:\n woo_attribute = woo_attribute_obj.search(\n [('attribute_id', '=', attribute_value.attribute_id.id),\n ('woo_instance_id', '=', instance.id),\n ('exported_in_woo', '=', True)], limit=1)\n att_data = {\n 'id':woo_attribute and woo_attribute.woo_attribute_id,\n 'option':attribute_value.name\n }\n if instance.woo_attribute_type == 'text':\n att_data = {\n 'name':attribute_value.attribute_id.name,\n 'option':attribute_value.name\n }\n att.append(att_data)\n if update_image:\n variation_data.update(self.get_variant_image(instance, variant))\n\n weight = self.convert_weight_by_uom(variant.product_id.weight, instance)\n\n variation_data.update(\n {\n 'attributes':att, 'sku':str(variant.default_code),\n 'weight':str(weight), \"manage_stock\":variant.woo_is_manage_stock\n })\n return variation_data" ]
[ "0.701751", "0.64262533", "0.61184746", "0.60352725", "0.5932542", "0.5842131", "0.5804501", "0.57167363", "0.5685463", "0.5658403", "0.5640953", "0.56252694", "0.5607121", "0.55334973", "0.5522642", "0.54955995", "0.54925525", "0.5485186", "0.5482061", "0.5471064", "0.5427064", "0.53933614", "0.53609043", "0.53353024", "0.5331141", "0.5329946", "0.5316325", "0.53060645", "0.5275822", "0.5263462" ]
0.6617582
1
Create the Product instance and its connectedProductAttribute
def create(self, validated_data): # Create the Attribute instance product = Product( name=validated_data['name'], product_template=validated_data['product_template']['id'], description=validated_data.get('description'), active=validated_data.get('active', False) ) if validated_data['min_price'].get('amount'): product.min_price = Money( amount=validated_data['min_price'].get('amount', Decimal(0.0)), currency=validated_data.get('min_price_currency', settings.DEFAULT_CURRENCY), ) product.save() # Create each ConnectedProductAttribute instance associated with it for item in validated_data.get('attributes', []): ConnectedProductAttribute.objects.create( product=product, connection=item['connection'], value=item['value'] ) # # Fully working, commented for make structure easier # # by allowing variants created only at one location # # Create each Variant instance associated with it # for item in validated_data.get('variants', []): # variant = ProductVariant( # name=item['name'], # product=product, # active=item.get('active', False) # ) # if item.get('price'): # if item['price'].get('amount'): # variant.price = Money( # amount=item['price'].get('amount', Decimal(0.0)), # currency=item.get('price_currency', settings.DEFAULT_CURRENCY), # ) # variant.save() return product
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_product(self):\n product = self.product_obj.create({\n \"default_code\": 'A2330',\n \"product_tmpl_id\":\n self.ref(\"product.product_product_4_product_template\"),\n \"attribute_value_ids\": [(6, 0, [\n self.ref('product.product_attribute_value_1'),\n self.ref('product_lifecycle.product_attribute_value_6'),\n self.ref('product.product_attribute_value_5')])],\n \"replacement_product_ids\": [(\n 6, 0, [self.ref('product_lifecycle.product_product_4e')]\n )]})\n return product", "def createProduct(self):\n return _libsbml.Model_createProduct(self)", "def test_product_create(self):\n self._create_model(\"product\", self.product_data, [\"name\", \"description\", \"image_link\", \"price\"])", "def create(cls, **kwargs):\n response = cls.get_client().create_product(**kwargs)\n object_details = cls._flatten_object_details(response)\n return cls(**object_details)", "def create_product():\n mongo = MongoClient(Config.MONGO_URI)\n db_operations = mongo.db.product\n data = request.get_json(force=True) or {}\n if 'title' not in data or 'description' not in data or 'params' not in data:\n return bad_request(t['empty_field'])\n new_product = Product()\n if Product.params_is_valid(data):\n new_product.save_to_db(data, db_operations)\n\n response = jsonify(new_product.to_dict())\n response.status_code = 201\n response.headers['Location'] = url_for('api.get_product_by_id', product_id=new_product._id)\n return response\n else:\n return bad_request(t['invalid_value'])", "def createProduct(self):\n return _libsbml.Reaction_createProduct(self)", "def create_product(new_product, owner_name):\n if not isinstance(new_product, dict):\n raise ValueError(f\"New product {new_product} is not a dict\")\n new_instance = Products(\n serial_number=new_product['serial_number'],\n type_name=new_product['type_name'],\n owner_name=owner_name,\n product_condition=new_product['product_condition'] == 'true',\n model=new_product['model'],\n producent=new_product['producent'],\n additonal_info=new_product['additional_info'],\n )\n if not get_critical_level(owner_name, [new_product['type_name']]).all():\n new_critical_entry = CriticalLevels(\n business=owner_name,\n type_name=new_product['type_name']\n )\n else:\n new_critical_entry = None\n return new_instance, new_critical_entry", "def create(self, validated_data):\n variant = ProductVariant(\n name=validated_data['name'],\n product=validated_data['product']['id'],\n active=validated_data.get('active', False)\n )\n if validated_data['price'].get('amount'):\n variant.price = Money(\n amount=validated_data['price'].get('amount', Decimal(0.0)),\n currency=validated_data.get('price_currency', settings.DEFAULT_CURRENCY),\n )\n variant.save()\n\n # Create each ConnectedVariantAttribute instance associated with it\n for item in validated_data.get('attributes', []):\n ConnectedVariantAttribute.objects.create(\n variant=variant,\n connection=item['connection'],\n value=item['value']\n )\n\n return variant", "def __init__(self, product):\n\n self.codes = list(\n Products.objects.all().values_list(\n 'code',\n flat=True\n )\n )\n\n self.product = product\n self._check_product()\n if self.importable:\n self.product_object = self.import_in_db()\n self.categories = self.create_categories()\n self.brands = self.import_brands()\n self.stores = self.import_stores()", "def update(self, instance, validated_data):\n\n # Update the Product instance\n instance.name = validated_data.get('name', instance.name)\n instance.description = validated_data.get('description', instance.description)\n instance.active = validated_data.get('active', instance.active)\n if validated_data.get('min_price'):\n if validated_data['min_price'].get('amount'):\n instance.min_price = Money(\n amount=validated_data['min_price'].get('amount', instance.min_price.amount),\n currency=validated_data.get('min_price_currency', instance.min_price_currency),\n )\n elif validated_data.get('min_price_currency'):\n instance.min_price = Money(\n amount=instance.min_price.amount,\n currency=validated_data['min_price_currency'],\n )\n instance.save()\n\n if validated_data.get('attributes'):\n # ConnectedProductAttribute\n # 1. create a list of ids out of passed data\n attributes_ids = [item.get('id') for item in validated_data['attributes']]\n\n # 2. delete any association\n # which is not included in passed data\n for attribute in instance.attributes.all():\n if attribute.id not in attributes_ids:\n attribute.delete()\n\n # 3. create or update all association\n for item in validated_data['attributes']:\n attribute = ConnectedProductAttribute(\n id=item.get('id'),\n product=instance,\n connection=item['connection'],\n value=item['value']\n )\n attribute.save()\n\n # # Fully working, see above at create\n # # ProductVariant\n # # 1. create a list of ids out of passed data\n # variants_ids = [item.get('id') for item in validated_data['variants']]\n\n # # 2. delete any association\n # # which is not included in passed data\n # for variant in instance.variants.all():\n # if variant.id not in variants_ids:\n # variant.delete()\n\n # # 3. create or update all association\n # for item in validated_data['variants']:\n # variant = ProductVariant(\n # id=item.get('id'),\n # name=item['name'],\n # product=instance,\n # active=item.get('active', False)\n # )\n # if item.get('price'):\n # if item['price'].get('amount'):\n # variant.price = Money(\n # amount=item['price']['amount'],\n # currency=item.get('price_currency', settings.DEFAULT_CURRENCY),\n # )\n # elif item.get('price_currency'):\n # variant.price = Money(\n # amount=variant.price.amount,\n # currency=item['price_currency'],\n # )\n # variant.save()\n\n return instance", "def test_01_product_create(self):\n # Create new product with a replacement product\n product = self.create_product()\n\n # Check recently was created product with default 'In Development'\n # value state and that the replacement was assigned. This case also\n # check the read test.\n self.assertTrue(product)\n self.assertEqual(product.state2, 'draft')\n self.assertTrue(product.replacement_product_ids)\n self.assertEqual(len(product.replacement_product_ids), 1)\n self.assertEqual(product.replacement_product_ids[0].id,\n self.ref('product_lifecycle.product_product_4e'))", "def product(self, product_id):\r\n return products.Product(self, product_id)", "def product(self, product):\n self._product = product", "def model(self):\n return Product", "def product(self, product):\n\n self._product = product", "def product(self, product):\n\n self._product = product", "def post(self):\n\n args = product_parser.parse_args()\n \n product = db.products.Product()\n product.name = args['name']\n product.description = args['description']\n product.price = args['price']\n product.is_hidden = args['is_hidden']\n \n product.save()\n \n # marshal and transform custom properties\n product_marshaled = marshal(product, product_fields)\n product_marshaled = add_custom_properties(product_marshaled)\n \n return product_marshaled, 201", "def add_product(self):\n self.owner.new_product(self.barcode, self.description, self.price, self._add_product_callback)", "def insert(self, product):\n pass", "def __init__(self, vendor_id, product_id):\n self.vendor_id = vendor_id\n self.product_id = product_id", "def create_product(admin):\n data = json_body()\n required = {\"name\": str, \"price\": int, \"tags\": list}\n optional = {\n \"barcode\": str,\n \"active\": bool,\n \"countable\": bool,\n \"revocable\": bool,\n \"imagename\": str,\n }\n\n # Check all required fields\n check_fields_and_types(data, required, optional)\n\n # Check if a product with this name already exists\n if Product.query.filter_by(name=data[\"name\"]).first():\n raise exc.EntryAlreadyExists()\n\n # Check if a product with this barcode already exists\n if \"barcode\" in data:\n if Product.query.filter_by(barcode=data[\"barcode\"]).first():\n raise exc.EntryAlreadyExists()\n\n # Check the product tags\n tags = data[\"tags\"]\n for tag_id in tags:\n if not isinstance(tag_id, int):\n raise exc.WrongType\n tag = Tag.query.filter_by(id=tag_id).first()\n if not tag:\n raise exc.EntryNotFound\n\n del data[\"tags\"]\n\n # Save the price and delete it from the data dictionary\n price = int(data[\"price\"])\n del data[\"price\"]\n\n try:\n product = Product(**data)\n product.created_by = admin.id\n db.session.add(product)\n db.session.flush()\n product.set_price(price=price, admin_id=admin.id)\n for tag_id in tags:\n tag = Tag.query.filter_by(id=tag_id).first()\n product.tags.append(tag)\n\n db.session.commit()\n except IntegrityError:\n raise exc.CouldNotCreateEntry()\n\n return jsonify({\"message\": \"Created Product.\"}), 201", "def setup(self):\n print(\"INIT DATA\")\n\n self.nutella = Product.objects.create(name=\"nutella\", nutriscore=\"e\")", "def create_products():\n try:\n # Instantiate the class and separate objects into two lists\n challenge = Challenge()\n # Get all products\n product_base = challenge.get_products(\"product_groups.json\")\n # Divide the products into independent (no parent) and dependent (with parents)\n independent, dependent = challenge.filter_products(product_base)\n if not challenge.save_independent_products(independent):\n Exception(\"Function save_independent_products() couldn't complete\")\n\n if not challenge.save_dependent_products(\n dependent, product_base, len(independent)\n ):\n raise Exception(\"Function save_dependent_products() couldn't complete\")\n\n except Exception as err:\n logging.error(f\"[ERROR] While processing the objects. Traceback: {err}\")\n return False\n else:\n return True", "def add_product( product_id, reaction, model, compartment = \"default\", arguments = DEFAULT_ARGUMENTS):\n if product_id is None and len( reaction.getListOfProducts()) > 0:\n product_ref = reaction.getListOfProducts()[0]\n product_species = product_ref.getSpecies()\n return product_species\n else:\n if product_id is None:\n reaction_id = reaction.getId();\n product_id = reaction_id + \"_product_nr\" + str(len( reaction.getListOfProducts()));\n \n product_prefix = PRODUCT_PREFIX.get( reaction.getName().lower()) \n if product_prefix is None:\n product_prefix = \"\"\n\n reactant = None\n if len( reaction.getListOfReactants()) > 0:\n reactant = model.getSpecies( reaction.getListOfReactants()[0].getSpecies())\n if reactant != None:\n product_name = product_prefix + reactant.getName();\n else:\n product_name = product_prefix + \"Product\";\n add_species( None, model, id = product_id, name = product_name, compartment = compartment, arguments = arguments);\n \n product_ref = reaction.createProduct()\n check( product_ref, 'create product reference', arguments = arguments);\n check( product_ref.setSpecies( product_id), 'assign product species', arguments = arguments);\n check( product_ref.setMetaId( \"metaid_0000\" + product_id), 'set meta ID', arguments = arguments);\n check( product_ref.addCVTerm( add_cvterm( GENERIC_REACTION_SBO_MAPPING[\"product\"])), 'set controlled vocab SBO term for product', arguments = arguments);\n # check( product_ref.addCVTerm(add_cvterm(STANDOFF_ENTITY_TO_SBO_MAPPING[product.type])), 'set controlled vocab SBO term 2 for product')\n return product_id", "def assign_data_product(self, input_resource_id='', data_product_id='', create_stream=False):\n # Verify that both ids are valid\n input_resource_obj = self.clients.resource_registry.read(input_resource_id)\n if not input_resource_obj:\n raise BadRequest(\"Source resource %s does not exist\" % input_resource_id)\n data_product_obj = self.clients.resource_registry.read(data_product_id)\n if not data_product_obj:\n raise BadRequest(\"Data Product resource %s does not exist\" % data_product_id)\n\n #find the data producer resource associated with the source resource that is creating the data product\n producer_ids, _ = self.clients.resource_registry.find_objects(input_resource_id, PRED.hasDataProducer, RT.DataProducer, id_only=True)\n if producer_ids is None:\n raise NotFound(\"No Data Producers associated with source resource ID \" + str(input_resource_id))\n #find the 'head' producer\n self.primary_producer = None\n for producer_id in producer_ids:\n producer_obj = self.clients.resource_registry.read(producer_id)\n if not producer_obj:\n raise NotFound(\"Data Producer %s does not exist\" % producer_id)\n if producer_obj.is_primary:\n self.primary_producer = producer_id\n\n if self.primary_producer is None:\n raise NotFound(\"No primary Data Producer associated with source resource ID \" + str(input_resource_id))\n\n #create data producer resource for this data product\n data_producer_obj = IonObject(RT.DataProducer,name=data_product_obj.name, description=data_product_obj.description)\n data_producer_id, rev = self.clients.resource_registry.create(data_producer_obj)\n\n\n # Associate the Product with the Producer\n self.clients.resource_registry.create_association(data_product_id, PRED.hasDataProducer, data_producer_id)\n # Associate the Producer with the main Producer\n self.clients.resource_registry.create_association(data_producer_id, PRED.hasParent, self.primary_producer)\n # Associate the input resource with the child data Producer\n self.clients.resource_registry.create_association(input_resource_id, PRED.hasDataProducer, data_producer_id)\n\n #Create the stream if requested\n log.debug(\"assign_data_product: create_stream %s\" % create_stream)\n if create_stream:\n stream_id = self.clients.pubsub_management.create_stream(name=data_product_obj.name, description=data_product_obj.description)\n log.debug(\"assign_data_product: create stream stream_id %s\" % stream_id)\n # Associate the Stream with the main Data Product\n self.clients.resource_registry.create_association(data_product_id, PRED.hasStream, stream_id)\n\n return", "def add_product(self, product):\n return self._make_post_request(self._urls['products'],\n data=dict(name=product))", "def products(self):\r\n return Products(self)", "def add_product(self, name, energy_points):\n now = datetime.datetime.now()\n date = \"{}-{}-{}\".format(now.year, now.month, now.day)\n Product(productName=name, energyPoints=energy_points, date=date)", "def add_product(self, product: Product):\n log.debug(\"Adding a new product\")\n product_parameters = product.to_db()\n try:\n with DBCursor(self.host) as cursor:\n cursor.execute(\"INSERT INTO items VALUES (?, ?, ?, ?, ?)\", (product_parameters['name'].lower(), product_parameters['units'], product_parameters['last_buy'], product_parameters['cost'], product_parameters['price']))\n except sqlite3.IntegrityError:\n log.critical(\"An integrity error was raised. Maybe a matching name or id.\")\n raise DatabaseIntegrityError(\"There's a matching name or id already stored.\")\n else:\n log.info(f\"{product.__repr__} was added successfully.\")", "def create_product():\n form = ProductForm(request.form)\n if form.validate():\n product = Product()\n product.name = form.name.data\n product.price = form.price.data\n product.quantity = form.quantity.data\n product.description = form.description.data\n product.category = form.category.data\n product.unique_tag = form.unique_tag.data\n db.session.add(product)\n db.session.commit()\n flash(f\"Product {product.name} created!\")\n return redirect(url_for('get_products'))\n\n flash(\"Invalid data\")\n return redirect(url_for('get_products'))" ]
[ "0.7809591", "0.74017674", "0.70743674", "0.6890056", "0.6756093", "0.67468816", "0.6729608", "0.6729589", "0.6467058", "0.6433752", "0.6425072", "0.64164263", "0.6376284", "0.63619643", "0.6321698", "0.6321698", "0.63119274", "0.62768716", "0.6142879", "0.60312605", "0.60166", "0.5988793", "0.5985345", "0.5978205", "0.5963803", "0.59552217", "0.5950516", "0.5947479", "0.5905622", "0.5905125" ]
0.77725405
1
Filters bam file based on mapping, mapping of mate, splicing
def bam_filtering(bam): filt_bam = os.path.splitext(bam)[0] + "_filt.bam" # Filtering bam, removing unmapped, mate unmapped, spliced, secondary mapping cmd = "samtools view -h -F 0x4 -F 0x8 -F 0x100 {0} | awk '{{if ($1 ~ /^@/) {{print}} else if ($6 !~ /N/) {{print}}}}' | samtools view -bh > {1}".format(bam, filt_bam) subprocess.check_output(cmd, shell=True) logger.debug('DONE: {}'.format(cmd)) return filt_bam
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_samfile(temp_alignment, filtered_out):\n # Check the quality and status of each aligned fragment.\n # Write the ones with good quality in the final output file.\n # Keep those that do not map unambiguously for the next round.\n\n unaligned = set()\n temp_sam = ps.AlignmentFile(temp_alignment, \"r\")\n outf = ps.AlignmentFile(filtered_out, \"w\", template=temp_sam)\n for r in temp_sam:\n if r.flag in [0, 16] and r.mapping_quality >= 30:\n outf.write(r)\n else:\n unaligned.add(r.query_name)\n\n print(\"{0} reads left to map.\".format(len(unaligned)))\n temp_sam.close()\n outf.close()\n\n return unaligned", "def parse_bam():\n global sample_name, header, segmentID, bam\n sys.stderr.write(time.strftime(\"%c\") + \" Busy with parsing bam file...\\n\")\n bam = pysam.AlignmentFile(NanoSV.opts_bam, 'rb')\n if not bam.has_index():\n sys.exit('The bam has no index file')\n header = bam.header\n if 'HD' in header:\n if not header['HD']['SO'] == 'coordinate':\n sys.exit('The bam file is not coordinate sorted')\n if 'RG' in header:\n if type(header['RG']) is list:\n sample_name = header['RG'][0]['SM']\n else:\n sample_name = header['RG']['SM']\n else:\n sample_name = re.sub('(\\.sorted)?\\.bam$', '', str(NanoSV.opts_bam))\n\n for line in bam:\n if line.query_name in reads:\n read = reads[line.query_name]\n else:\n read = r.Read(line.query_name, line.infer_read_length())\n reads[line.query_name] = read\n\n if line.flag & 4 or line.mapping_quality < NanoSV.opts_min_mapq:\n continue\n segment = s.Segment(segmentID, line.query_name, line.flag, line.reference_name, line.reference_start+1, line.mapping_quality,\n line.query_alignment_length)\n segment.end = line.reference_start + line.reference_length\n if line.has_tag('MD'):\n matches = sum(map(int, re.findall(r\"(\\d+)\", line.get_tag('MD'))))\n segment.pid = format(matches / segment.length, '.3f')\n else:\n segment.pid = format(line.get_cigar_stats()[0][7] / segment.length, '.3f')\n if segment.pid == \"0.000\":\n segment.pid = format(line.get_cigar_stats()[0][0] / segment.length, '.3f')\n if line.flag & 16:\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip = line.cigartuples[-1][1]\n else:\n segment.clip = 0\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip_2 = line.cigartuples[0][1]\n else:\n segment.clip_2 = 0\n else:\n if line.cigartuples[0][0] == 5 or line.cigartuples[0][0] == 4:\n segment.clip = line.cigartuples[0][1]\n else:\n segment.clip = 0\n if line.cigartuples[-1][0] == 5 or line.cigartuples[-1][0] == 4:\n segment.clip_2 = line.cigartuples[-1][1]\n else:\n segment.clip_2 = 0\n if float(segment.pid) < NanoSV.opts_min_pid:\n continue\n read.addSegment(segment)\n segments[segmentID] = segment\n segmentID += 1", "def filter_reads(alignment_file, readdb, read_dirs, quality_threshold=7, recursive=False, trim=False):\n assert alignment_file.endswith(\"bam\"), \"Alignment file must be in BAM format: {}\".format(alignment_file)\n # grab aligned segment\n if trim:\n assert isinstance(trim, int), \"Trim needs to be an integer: {}\".format(trim)\n else:\n trim = np.inf\n n_bases = 0\n n_files = 0\n with closing(pysam.AlignmentFile(alignment_file, 'rb')) as bamfile:\n name_indexed = pysam.IndexedReads(bamfile)\n name_indexed.build()\n for name, fast5 in parse_read_name_map_file(readdb, read_dirs, recursive=recursive):\n try:\n if trim < n_bases:\n print(\"Filtered {} files for {} bases\".format(n_files, n_bases))\n break\n iterator = name_indexed.find(name)\n for aligned_segment in iterator:\n if aligned_segment.is_secondary or aligned_segment.is_unmapped \\\n or aligned_segment.is_supplementary or aligned_segment.has_tag(\"SA\"):\n continue\n # get data and sanity check\n if aligned_segment.query_qualities is not None:\n if np.mean(aligned_segment.query_qualities) < quality_threshold:\n continue\n n_files += 1\n n_bases += aligned_segment.query_length\n yield fast5, aligned_segment\n except KeyError:\n print(\"Found no alignments for {}\".format(fast5))", "def remap(bamfn, threads, bwaref):\n sai1fn = bamfn + \".1.sai\"\n sai2fn = bamfn + \".2.sai\"\n samfn = bamfn + \".sam\"\n refidx = bwaref + \".fai\"\n\n sai1args = ['bwa', 'aln', bwaref, '-q', '5', '-l', '32', '-k', '3', '-t', str(threads), '-o', '1', '-f', sai1fn, '-b1', bamfn]\n sai2args = ['bwa', 'aln', bwaref, '-q', '5', '-l', '32', '-k', '3', '-t', str(threads), '-o', '1', '-f', sai2fn, '-b2', bamfn]\n samargs = ['bwa', 'sampe', '-P', '-f', samfn, bwaref, sai1fn, sai2fn, bamfn, bamfn]\n bamargs = ['samtools', 'view', '-bt', refidx, '-o', bamfn, samfn] \n\n print \"mapping 1st end, cmd: \" + \" \".join(sai1args)\n subprocess.call(sai1args)\n print \"mapping 2nd end, cmd: \" + \" \".join(sai2args)\n subprocess.call(sai2args)\n print \"pairing ends, building .sam, cmd: \" + \" \".join(samargs)\n subprocess.call(samargs)\n print \"sam --> bam, cmd: \" + \" \".join(bamargs)\n subprocess.call(bamargs)\n\n sortbase = bamfn + \".sort\"\n sortfn = sortbase + \".bam\"\n sortargs = ['samtools','sort','-m','10000000000',bamfn,sortbase]\n print \"sorting, cmd: \" + \" \".join(sortargs)\n subprocess.call(sortargs)\n os.rename(sortfn,bamfn)\n\n indexargs = ['samtools','index',bamfn]\n print \"indexing, cmd: \" + \" \".join(indexargs)\n subprocess.call(indexargs)\n\n # cleanup\n os.remove(sai1fn)\n os.remove(sai2fn)\n os.remove(samfn)", "def supportingReadsFilter(spot, args):\n if spot.tags[\"label\"] == \"INS\":\n errId = 1\n errLab = 'insertion'\n elif spot.tags[\"label\"] == \"DEL\":\n errId = 2\n errLab = 'deletion'\n else:#don't worry about other types\n return False\n\n begin, ending = spot.fetchbounds()\n begin -= args.buffer #abs(begin-ending)*.5\n ending += args.buffer #abs(begin-ending)*.5\n #do the hard work\n reads = args.bam.fetch(str(spot.chrom), begin, ending)\n totSizes = []\n coverage = 0\n nReadsErr = 0\n #For tandem\n strandCnt = {True: 0, False: 0}\n \n #count reads and errSizes\n for i in reads:\n mySize = 0\n coverage += 1\n start = i.pos - 1\n cigar = expandCigar(i.cigar)\n curSize = 0\n extraSize = 0\n readHasErr = False\n \n #What if I just intersect any stretches of errors with my boundaries.\n #Then for insertions I'll keep coordinates\n #For deletions I'll user outer bounds?\n for code in cigar: \n if code != 1:\n start += 1\n #must be in region\n if start < begin:\n continue\n if start >= ending:\n break\n \n if code == errId:\n curSize += 1\n if curSize != 0 and code != errId:\n if curSize >= args.minIndelErr:\n readHasErr = True\n mySize += curSize\n elif curSize > 1:#1bp errors will inflate\n extraSize += curSize\n curSize = 0\n \n\n if readHasErr and mySize >= args.minIndelSize:\n nReadsErr += 1\n totSizes.append(mySize + extraSize)\n strandCnt[i.is_reverse] += 1\n \n spot.tags[\"strandCnt\"] = \"%d,%d\" % (strandCnt[False], strandCnt[True])\n if len(totSizes) == 0:\n logging.debug(\"no %s found!? %s\" % (errLab, str(spot)))\n return True # true you should filter\n \n if len(totSizes) < max(math.ceil(coverage * args.minIndelPct), args.minErrReads):\n logging.debug(\"not large cnt %s found %s \" % (errLab, str(spot)))\n return True\n \n totSizes.sort()\n totSizes = numpy.array(totSizes)\n mean = totSizes.mean()\n median = numpy.percentile(totSizes, 50)\n firstQ = numpy.percentile(totSizes, 25)\n thirdQ = numpy.percentile(totSizes, 75)\n \n logging.debug(\"PassFilt %s\" % (str(spot))) \n logging.debug(\"cov %d\" % coverage )\n logging.debug(\"size %d %s\" % (len(totSizes), str(totSizes)))\n logging.debug(\"mean %d\" % mean )\n logging.debug(\"median %d\" % median)\n logging.debug(\"firstQ %d\" % firstQ)\n logging.debug(\"thirdQ %d\" % thirdQ)\n \n spot.tags[\"szCount\"] = int(nReadsErr)\n spot.tags[\"szMean\"] = int(mean)\n spot.tags[\"szMedian\"] = int(median)\n spot.tags[\"sz1stQ\"] = int(firstQ)\n spot.tags[\"sz3rdQ\"] = int(thirdQ)\n return False", "def map_reads(SRA):\n\n #1. bowtie to rRNA\n print(\"Bowtie alignement on contaminant RNA...\")\n cmd_bowtie = 'bowtie'+ ' ' + '-a' + ' ' + '-p6' + ' ' + '-S' + ' ' + '--un' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + BOWTIE_DIR+'/rRNA' + ' ' + TMP_DIR+SRA+'_trimmed.fastq' + ' ' + '|' + ' ' + 'samtools view -@ 6 -bS' + ' ' + '>' + TMP_DIR+SRA+'_trimmed_rrna.bam'\n output = subprocess.run(cmd_bowtie, shell=True)\n\n # 2. STAR to ref genome\n print(\"STAR alignement to yeast genome...\")\n cmd_STAR = 'STAR --outSAMtype BAM Unsorted --runThreadN 6 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+SRA+'_STAR_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n # 3. Samtools keep uniquely mapped reads and sort\n print(\"Samtools to keep uniquely mapped reads and sort...\")\n cmd_samtools1 = 'samtools view -@ 6 -b -q 255 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam' + ' ' + TMP_DIR+SRA+'_STAR_Aligned.out.bam'\n output = subprocess.run(cmd_samtools1, shell=True)\n\n cmd_samtools2 = 'samtools sort -@ 6 -o' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads.bam'\n output = subprocess.run(cmd_samtools2, shell=True)\n\n cmd_samtools3 = 'samtools index' + ' ' + TMP_DIR+SRA+'_yeast_uniqueMapped_reads_sorted.bam'\n output = subprocess.run(cmd_samtools3, shell=True)", "def bamfile():\n data_path = pkg_resources.resource_filename(\"transposonmapper\", \"data_files/files4test/\")\n filename = \"SRR062634.filt_trimmed.sorted.bam\"\n bamfile = os.path.join(data_path, filename)\n \n return bamfile", "def parse_sam(in_file, out_file, read_type , strand):\n out_handle = open(out_file , 'a')\n if strand == 'watson':\n nt = ['C']\n else:\n nt = ['G']\n count = 0\n # print 'Warning, only works for forward mapped reads'\n mismatch = 0\n clip_count_total = 0\n for line in open(in_file, 'r'):\n modulo_line_no = count % 2\n #alternates between 0 and 1\n if line.startswith('@'):\n continue\n split_line = line.rstrip('\\n').split('\\t')\n #skip read pairs with improper flags.\n #TODO: do this filtering in mark_PCR_duplicates or elsewhere with access to pysam.\n if split_line[1] not in ['0', '99', '147']:\n mismatch += 1\n count += 1\n # continue\n char_count = ''\n clip_count = 0\n for char in split_line[5]:\n if not char.isalpha():\n char_count += char\n elif char == 'S':\n clip_count += int(char_count)\n else:\n char_count = ''\n if clip_count > 6:\n clip_count_total += 1\n count += 1\n # continue\n header = split_line[0].split('|')\n #meth_post list can be present for both R1 and R2 the last Samtools tag added should be the RN:Z: tag, look\n #to the right of this tag only\n meth_pos_list = split_line[0][split_line[0].rindex(':Z:'):].split('|')[1:]\n out_line = [header[0]]\n out_line += split_line[1:9]\n seq = list(split_line[9])\n try:\n meth_pos = [int(n) for n in meth_pos_list[-modulo_line_no].split(',')]\n for n in meth_pos:\n if n >= len(seq):\n break\n if seq[n] not in ['T','A']:\n break\n seq[n] = nt[-modulo_line_no]\n except ValueError:\n pass\n out_line += [''.join(seq)]\n out_line += split_line[10:]\n for item in header[1:]:\n if ':' in item and item not in out_line:\n out_line.append(item)\n # out_line += header[3:6]\n out_handle.write('\\t'.join(out_line) + '\\n')\n count += 1\n print('%s mismatches out of %s' % (mismatch, count))\n print('%s reads out of %s soft clipped more than 5' % (clip_count_total, count))", "def bam_and_merge(bam_file, vars_to_group, fq_threshold, min_reads, filter_type, fdict):\n if bam_file is not None:\n print('Processing bam: {}'.format(bam_file))\n vars_to_group, _ = inspect_bam(bam_file, vars_to_group, fq_threshold, min_reads, filter_type)\n return PickleMe(get_merged_records(vars_to_group, fdict), vars_to_group)", "def _read_bam(bam_fn, precursors):\n mode = \"r\" if bam_fn.endswith(\"sam\") else \"rb\"\n handle = pysam.Samfile(bam_fn, mode)\n reads = defaultdict(realign)\n for line in handle:\n chrom = handle.getrname(line.reference_id)\n # print(\"%s %s %s %s\" % (line.query_name, line.reference_start, line.query_sequence, chrom))\n query_name = line.query_name\n if query_name not in reads:\n reads[query_name].sequence = line.query_sequence\n iso = isomir()\n iso.align = line\n iso.start = line.reference_start\n iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start)\n reads[query_name].set_precursor(chrom, iso)\n\n reads = _clean_hits(reads)\n return reads", "def process_bam(bam, output_dp):\r\n bam_fn = os.path.basename(bam)\r\n coverage_fp = os.path.join(output_dp, bam_fn.replace('.bam', '_coverage.csv'))\r\n reads_fp = os.path.join(output_dp, bam_fn.replace('.bam', '_reads.csv'))\r\n\r\n samfile = pysam.AlignmentFile(bam, \"rb\")\r\n contigs_size = get_ref_lens(samfile)\r\n coverage = coverage_vectors(contigs_size)\r\n\r\n read_output = open(reads_fp, 'w+')\r\n read_output.write('read_length,mapq,start,end,reference')\r\n for l in samfile.fetch():\r\n if l.mapq < 10: continue\r\n if l.rlen < 50: continue\r\n read_output.write('\\n{},{},{},{},{}'.format(l.rlen, l.mapq,\r\n l.reference_start, l.reference_end, samfile.getrname(l.reference_id).split(',')[0]))\r\n coverage[samfile.getrname(l.tid)][\"nb_reads\"] += 1\r\n coverage[samfile.getrname(l.reference_id)][\"positions\"][l.reference_start:l.reference_end] = 1\r\n coverage[samfile.getrname(l.tid)][\"nb_bp\"] += l.rlen\r\n read_output.close()\r\n\r\n coverage_prop = {}\r\n for contig,vector in coverage.items():\r\n if vector['nb_bp'] == 0: # no reads, so output blank file\r\n output = pandas.DataFrame()\r\n output.to_csv(coverage_fp, index=False)\r\n continue\r\n temp = {}\r\n for i in contigs_size:\r\n if contig == i[\"Seq\"]:\r\n temp[\"length\"] = i[\"Length\"]\r\n temp[\"ratio_covered\"] = np.sum(vector[\"positions\"])/float(len(vector[\"positions\"]))\r\n temp[\"number_reads\"] = vector[\"nb_reads\"]\r\n temp[\"number_bp\"] = vector[\"nb_bp\"]\r\n if vector[\"nb_reads\"] > 0 :\r\n coverage_prop[contig] = temp\r\n\r\n output = pandas.DataFrame(coverage_prop).transpose()\r\n output = output.sort_values(['number_bp','ratio_covered'],ascending=[0,0])\r\n output.to_csv(coverage_fp, index=False)\r\n samfile.close()\r\n return coverage_fp, reads_fp", "def inspect_bam(bam_file, variant_dict, threshold, min_reads, filter_type='pagb'):\n sam = pysam.AlignmentFile(bam_file, 'rb') # pylint: disable=no-member\n append_chr = check_for_chr(sam)\n valid_variants = {}\n rejected_variants = []\n for key in variant_dict:\n vargroup = VariantGroup(key, variant_dict[key])\n if len(vargroup.variant_list) > MAX_GROUPED: # TODO: split into smaller groups\n print('Skipping variant group {} ({} of {})\\nSize: {} Chrom: {} Start: {} End: {}'\n ''.format(key, variant_dict.keys().index(key), len(variant_dict.keys()), len(vargroup.variant_list),\n vargroup.chrom, vargroup.pos, vargroup.end))\n rejected_variants.extend(vargroup.variant_list)\n continue\n vargroup.parse_sam(sam, append_chr)\n if filter_type == 'pab': # Probability of A and B\n vargroup.set_filter_fq_pab(threshold)\n if filter_type in ['pagb', 'max_pagb']: # Probability of A given B.\n vargroup.set_filter_fq_pagb(threshold, 'm' in filter_type)\n vargroup.add_filter_min_reads(min_reads)\n\n if vargroup.exists:\n split_vargroups, reject = vargroup.split_and_trim()\n if reject:\n # print('Trimming {} from group {}'.format(len(reject), key))\n rejected_variants.extend(reject)\n for k, s_vargroup in enumerate(split_vargroups):\n s_key = '{}_{}'.format(key, k)\n if not s_vargroup.variant_list:\n pass\n # print('No variant groups from {} passed the threshold of {}'.format(key, threshold))\n else:\n valid_variants[s_key] = s_vargroup.variant_list\n else:\n rejected_variants.extend(vargroup.variant_list)\n\n return valid_variants, rejected_variants", "def main (bam, chr, start, end):\n\n\tbamfile = pysam.Samfile(bam, \"rb\")\n\tout_plus = pysam.Samfile(\"out_plus.bam\", \"wb\", template=bamfile)\n\tout_minus = pysam.Samfile(\"out_minus.bam\", \"wb\", template=bamfile)\n\n\tfor alignedread in bamfile.fetch(chr, start, end):\n\t\tif alignedread.pos >= start and alignedread.aend <= end:\n\t\t\tstrand = \"\"\n\t\t\tfor t in alignedread.tags:\n\t\t\t\tif t[0] == \"XS\":\n\t\t\t\t\tstrand = t[1]\n\t\t\t\n\t\t\tif strand == \"+\":\n\t\t\t\tout_plus.write(alignedread)\n\n\t\t\telif strand == \"-\":\n\t\t\t\tout_minus.write(alignedread)", "def map_STAR(args):\n for type in ['joined', 'merged']:\n for strand in ['watson', 'crick']:\n if strand == 'watson':\n n = 1\n else:\n n = 3\n STAR_index_dir = os.path.join(args.output_dir,'STAR_%s_%s'%(type, strand))\n cmd = \"STAR --runThreadN %s --genomeDir %s\"%(args.threads, STAR_index_dir)\n\n if type == 'merged':\n cmd += \" --readFilesIn %s\" % vars(args)['%s_%s' % (strand, type)]\n else:\n #TODO: define custom parameters for PE reads\n cmd += \" --readFilesIn %s \" % vars(args)['%s_%s_r1' % (strand, type)]\n cmd += \" %s\" % vars(args)['%s_%s_r2' % (strand, type)]\n\n cmd += \" --outSAMattributes NM MD AS --outSAMtype SAM\"\n cmd += \" --outFileNamePrefix %s\" % (os.path.join(args.output_dir,'%s_%s'%(strand, type)))\n cmd += \" --outReadsUnmapped Fastx\" #output of unmapped reads for inspection\n cmd += \" --scoreGapATAC -2 --scoreGapNoncan -2\"\n #outFilterScoreMinOverLread : float: sam as outFilterMatchNmin, but normalized to the read length (sum of mates’ lengths for paired-end reads)\n #outFilterMatchNminOverLread: float: same as outFilterScoreMin, but normalized to read length (sum of mates’ lengths for paired-end reads)\n\n # –outFilterMultimapNmax 1 int: maximum number of loci the read is allowed to map to. Alignments (all of\n # them) will be output only if the read maps to no more loci than this value.\n cmd += \" --outFilterMismatchNoverLmax 0.95\"\n # TODO: implement --alignEndsType endtoend mapping after joined reads are merged\n cmd += \"--outFilterMatchNminOverLread 0.9 --scoreGap -4 \" \\\n \" --alignEndsType EndToEnd\" \\\n \" --alignSoftClipAtReferenceEnds No\" \\\n \" --outSAMorder PairedKeepInputOrder\" \\\n \" --outFilterMultimapNmax 1\" \\\n \" --scoreInsOpen -1\" \\\n #make sure we have a bam file sorted by name\n if args.extraflags:\n cmd += ' %s' % args.extraflags\n log = \"run STAR for % strand on %s reads\"%(strand, type)\n run_subprocess([cmd],args, log)\n log = \"write final log of STAR to normal log\"\n cmd = \"cat %s \" % os.path.join(args.output_dir, '%s_%s' % (strand, type) + 'Log.final.out')\n run_subprocess([cmd], args, log)\n return args", "def test_filter_mapping_file_from_mapping_f(self):\n actual = filter_mapping_file_from_mapping_f(self.tutorial_mapping_f,[\"PC.354\",\"PC.355\"])\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\n self.assertEqual(actual,expected)", "def test_filter_mapping_file_from_mapping_f(self):\r\n actual = filter_mapping_file_from_mapping_f(\r\n self.tutorial_mapping_f, [\"PC.354\", \"PC.355\"])\r\n expected = \"\"\"#SampleID\tBarcodeSequence\tLinkerPrimerSequence\tTreatment\tDOB\tDescription\r\nPC.354\tAGCACGAGCCTA\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._354\r\nPC.355\tAACTCGTCGATG\tYATGCTGCCTCCCGTAGGAGT\tControl\t20061218\tControl_mouse_I.D._355\"\"\"\r\n self.assertEqual(actual, expected)", "def gen_unaligned_bam(bam_filename, analysis_id, metadata, specimen_dict, work_dir, output_dir, num_processes=4, logger=default_logger ):\n\n read_group_sam = os.path.join(output_dir, 'rg_header.sam')\n\n #get the read groups from the original sample level BAM\n exit_code = os.system(\"samtools view -H %s | grep \\\"@RG\\\" > %s\" %(bam_filename, read_group_sam))\n if exit_code != 0:\n print \"Failure in bam splitting during read group extraction from %s\" % bam_filename\n return 1\n \n\n rg_file = open(read_group_sam, \"r\")\n\n #create the read group fastqs\n try:\n cmd = \"bamtofastq outputperreadgroup=1 gz=1 level=1 inputbuffersize=2097152000 tryoq=1 outputdir=%s T=`mktemp -p %s bamtofastq_XXXXXXXXX` < %s\" %(work_dir, work_dir, bam_filename)\n logger.info(\"Running %s\" % cmd)\n subprocess.check_call(cmd, shell=True)\n except:\n print \"Failure in bam splitting\"\n return 1\n \n\n if header_utils.is_valid_analysis(metadata) or FORCE_RUN:\n pool = multiprocessing.Pool(processes=num_processes)\n results = []\n for line in rg_file:\n rg_dict = header_utils.get_read_group_info(line)\n header = header_utils.create_header(output_dir, metadata, rg_dict, specimen_dict)\n r = pool.apply_async(process_rg, (analysis_id, rg_dict, header, work_dir, output_dir))\n results.append(r)\n\n rg_file.close()\n \n out = []\n for r in results:\n out.append(r.get())\n \n utils.clean_up_dir(output_dir)\n if not all( a[0] for a in out ):\n #one of the read group bamtofastq failed\n return 1\n with open(os.path.join(output_dir, \"results.list\"), \"w\") as out_handle:\n for ok, file_name in out:\n out_handle.write(\"%s\\n\" % (file_name))\n\n else:\n print \"Invalid header/metadata for BAM\" % bam_filename\n return 1\n return 0", "def main(argv):\r\n\r\n mapperAbbrs = {'C':'cushaw', 'S':'shrimp', 'B':'bfast', 'W':'bwa-mem', 'N':'novoalign'}\r\n\r\n #Dictionary of commands to use for various mappers - configure your mapper commands here\r\n aligner_dict = {\r\n\t'B,CS,S':[\r\n\t\t'bfast fasta2brg -f DDiFasta -A 0',\r\n\t\t'bfast fasta2brg -f DDiFasta -A 1',\r\n\t\t'bfast index -f DDiFasta -m 1111111111111111111111 -w 14 -i 1 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111110100111110011111111111 -w 14 -i 2 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 10111111011001100011111000111111 -w 14 -i 3 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1111111100101111000001100011111011 -w 14 -i 4 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111111110001111110011111111 -w 14 -i 5 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 11111011010011000011000110011111111 -w 14 -i 6 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1111111111110011101111111 -w 14 -i 7 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111011000011111111001111011111 -w 14 -i 8 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 1110110001011010011100101111101111 -w 14 -i 9 -A 1 -n DDiProcs',\r\n\t\t'bfast index -f DDiFasta -m 111111001000110001011100110001100011111 -w 14 -i 10 -A 1 -n DDiProcs',\r\n\t\t'bfast match -f DDiFasta -A 1 -i 1-10 -k 18 -K 100000 -w 0 -t -n DDiProcs -Q 100000 -l -r DDiFastq1 > DDiBMF',\r\n\t\t'bfast localalign -f DDiFasta -m DDiBMF -A 1 -n DDiProcs -U -q 20 -Q 100000 -t > DDiBAF',\r\n\t\t'rm DDiBMF',\r\n\t\t'bfast postprocess -f DDiFasta -i DDiBAF -o DDiAligned -O 1 -a 3 -z -n DDiProcs -q 20 -Q 100000 -t > DDiSAM',\r\n\t\t'rm DDiBAF'\r\n\t ],\r\n 'C,CS,S':[\r\n 'cushaw3 index DDiFasta -c -p bwtindex',\r\n 'cushaw3 calign -r bwtindex -f DDiFastq1 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'C,NT,S':[\r\n 'cushaw3 index DDiFasta -p bwtindex',\r\n 'cushaw3 align -r bwtindex -f DDiFastq1 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'C,NT,P':[\r\n 'cushaw3 index DDiFasta -p bwtindex',\r\n 'cushaw3 align -r bwtindex -q DDiFastq1 DDiFastq2 -t DDiProcs -multi 1 CushawOpts -o DDiSAM'\r\n ],\r\n 'S,CS,S':[\r\n 'gmapper-cs -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts DDiFastq1 DDiFasta > DDiSAM'\r\n ],\r\n 'S,NT,S':[\r\n 'gmapper-ls -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts DDiFastq1 DDiFasta > DDiSAM'\r\n ],\r\n 'S,NT,P':[\r\n 'gmapper-ls -N DDiProcs -Q -o 1 --strata --all-contigs ShrimpOpts -1 DDiFastq1 -2 DDiFastq2 DDiFasta > DDiSAM'\r\n ],\r\n\t'W,NT,S':[\r\n 'bwa index DDiFasta',\r\n\t 'bwa mem -t DDiProcs BwaMemOpts DDiFasta DDiFastq1 > DDiSAM'\r\n ],\r\n\t'W,NT,P':[\r\n 'bwa index DDiFasta',\r\n\t 'bwa mem -t DDiProcs BwaMemOpts DDiFasta DDiFastq1 DDiFastq2 > DDiSAM'\r\n ],\r\n\t'N,NT,S':[\r\n\t 'novoindex DDiNIX DDiFasta',\r\n 'novoalign -r Random -n 100 -o SAM -d DDiNIX -f DDiFastq1 > DDiSAM'\r\n ],\r\n\t'N,NT,P':[\r\n\t 'novoindex DDiNIX DDiFasta',\r\n 'novoalign -r Random -n 100 -o SAM NovoOpts -d DDiNIX -f DDiFastq1 DDiFastq2 > DDiSAM'\r\n ]\r\n }\r\n\r\n #Arguments that are required\r\n required = ['fastqFiles', 'mappingRefSeqFiles', 'outputDir']\r\n\r\n parser = argparse.ArgumentParser(description='Iteratively calls 3rd party mappers and DDiMap executable')\r\n\r\n #Argument options\r\n parser.add_argument('-q', type=str, metavar='file', nargs='+', help='list of fastq files', dest='fastqFiles')\r\n parser.add_argument('-r', type=str, metavar='file', nargs='+', help='list of files to use for reference sequences', dest='mappingRefSeqFiles')\r\n parser.add_argument('-j', type=str, metavar='file', nargs='+', help='list of files to use for junctions', dest='junctionRefSeqFiles')\r\n parser.add_argument('-o', type=str, metavar='directory', help='output directory', dest='outputDir')\r\n \r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument('-p', '--paired', action='store_true', help='fastq files have paired ends', dest='pairedEnds')\r\n group.add_argument('-s', '--single', action='store_false', help='fastq files have single ends', dest='pairedEnds')\r\n parser.add_argument('-n', type=int, metavar='cpus', help='number of processors to use', dest='nProcs')\r\n parser.add_argument('-c', type=str, metavar='config_file', help='location of config file', dest='configFile')\r\n parser.add_argument('-v', action='store_true', help='turns on verbosity', dest='verbose')\r\n\r\n parser.add_argument('--aligner_order', type=str, metavar='{'+','.join(mapperAbbrs.keys())+'}', help='mapper sequence as a string. ie CSC', dest='alignerOrder')\r\n parser.add_argument('--first_iter', metavar='n', type=int, help='first iteration', dest='firstIter')\r\n parser.add_argument('--max_iters', metavar='n', type=int, help='maximum iterations', dest='maxIters')\r\n parser.add_argument('--read_length', metavar='n', type=int, help='read length', dest='readLength')\r\n parser.add_argument('--read_type', type=str, help='read type', choices=['CS','NT'], dest='readType')\r\n parser.add_argument('--req_frag_conv', help='require frags to converge as well as SNVs', action='store_true', dest='reqFragConv')\r\n parser.add_argument('--no-req_frag_conv', help='does not require frags to converge as well as SNVs', action='store_false', dest='reqFragConv')\r\n\r\n parser.add_argument('--frag_maker_thresh',type=float, metavar='threshold', help='verified frag maker threshold', dest='fragMakerThresh')\r\n parser.add_argument('--frag_thresh', type=float, metavar='threshold', help='unverified frag maker threshold', dest='fragThresh')\r\n parser.add_argument('--min_absolute_cover', type=int, metavar='n', help='minimum absolute cover', dest='minAbsoluteCover')\r\n parser.add_argument('--snv_thresh', type=float, metavar='threshold', help='SNV threshold', dest='SNVthresh')\r\n parser.add_argument('--snv_type2_thresh', type=float, metavar='threshold', help='SNV type 2 threshold', dest='SNVtype2thresh')\r\n parser.add_argument('--snv_type3_thresh', type=float, metavar='threshold', help='SNV type 3 threshold', dest='SNVtype3thresh')\r\n parser.add_argument('--roa_size', type=int, metavar='size', help='Size to use for region of analysis in DDiMAP', dest='roaSize')\r\n\r\n group = parser.add_mutually_exclusive_group()\r\n group.add_argument('--use_DI', action='store_true', help='use reads mapped with deletion and insertion', dest='useDI')\r\n group.add_argument('--no-use_DI', action='store_false', help='do not use reads mapped with deletion and insertion', dest='useDI')\r\n\r\n parser.add_argument('--cushaw_opts', type=str, metavar=\"'options'\", help='cushaw specific options', dest='cushawOpts')\r\n parser.add_argument('--shrimp_opts', type=str, metavar=\"'options'\", help='shrimp specific options', dest='shrimpOpts')\r\n parser.add_argument('--bwamem_opts', type=str, metavar=\"'options'\", help='bwa-mem specific options', dest='bwaMemOpts')\r\n parser.add_argument('--novo_opts', type=str, metavar=\"'options'\", help='novoalign specific options', dest='novoOpts')\r\n\r\n\r\n #Parse args and check for config file\r\n args = parser.parse_args()\r\n if args.configFile:\r\n configFile = args.configFile\r\n if not path.isfile(configFile):\r\n print 'config file specified, but not found'\r\n exit(1)\r\n else:\r\n configFile = 'DDiMap.cfg'\r\n\r\n #Read in settings from config file\r\n Settings = read_config(configFile)\r\n\r\n # Loop over each section and replace values with those passed in on command line. \r\n # Also create a local variable that matches the keys in the settings dictionary.\r\n\r\n for section in Settings.keys():\r\n for key in Settings[section].keys():\r\n if getattr(args, key):\r\n Settings[section][key] = getattr(args, key)\r\n exec '%s = Settings[section][key]' % key\r\n if key in required and not Settings[section][key]:\r\n print '%s not specified on command line or in config file. Aborting...' % key\r\n print Settings[section][key]\r\n parser.print_help()\r\n exit(1)\r\n if (type(Settings[section][key]) == list):\r\n Settings[section][key] = ', '.join(Settings[section][key])\r\n\r\n if useDI: # reads with CIGARs containing both I and D are processed\r\n kFlag='-k'\r\n else: # reads with CIGARs containing both I and D are not processed\r\n kFlag=''\r\n\r\n if pairedEnds:\r\n pair_str='P'\r\n else:\r\n pair_str='S'\r\n\r\n # do the work - set up for the iteration\r\n aligners = list(alignerOrder)\r\n iterMin = len(aligners)\r\n iterMax = max(maxIters, iterMin); # always do as many iters as are in alignerOrder string\r\n aligners = aligners + list(repeat(aligners[-1], iterMax - iterMin)) # define the aligner ID sequence to be used over the iterations\r\n\r\n\r\n # Make paths absolute\r\n fastqFiles = [path.abspath(x) for x in fastqFiles]\r\n mappingRefSeqFiles = [path.abspath(x) for x in mappingRefSeqFiles]\r\n junctionRefSeqFiles = [path.abspath(x) for x in junctionRefSeqFiles]\r\n outputDir = path.abspath(outputDir) + '/'\r\n\r\n # Make sure the output directory exists\r\n\r\n if not path.isdir(outputDir):\r\n makedirs(outputDir)\r\n\r\n # Write configuration file in outputDir\r\n write_config(outputDir, Settings)\r\n\r\n # INITIAL VALUES OF LOOP CONTROL PARAMETERS\r\n converged = False\r\n prevFragList = [] # this will be replaced by counts of fragments created for each baseline refernce sequence\r\n prevSNVList = [] # this will be replaced by counts of SNV candidates found for each baseline reference sequence\r\n\r\n thisIter = firstIter\r\n\r\n\r\n for RefSeqFile in fastqFiles:\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find fastqFile at ' + RefSeqFile\r\n exit(1)\r\n\r\n # Delete old enhanced fast file if present. It should never be...\r\n\r\n enhancedFastaFile = outputDir + 'refSeqEnhanced.fa'\r\n if path.isfile(enhancedFastaFile): # see if one is already here - need to zap it\r\n remove(enhancedFastaFile) # remove if present because fastawrite appends to existing files\r\n output_handle = open(enhancedFastaFile, 'a')\r\n\r\n # Add reference sequences to file with _Ref tag\r\n RefSeqs=[]\r\n for RefSeqFile in mappingRefSeqFiles:\r\n\tprint 'ref seq file = ' + RefSeqFile\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find RefSeqFile at ' + RefSeqFile\r\n exit(1)\r\n RefSeqs = RefSeqs + list(SeqIO.parse(RefSeqFile, 'fasta'))\r\n if (RefSeqs):\r\n formattedRefSeqs = add_ref_tag(RefSeqs)\r\n SeqIO.write(formattedRefSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n \r\n # Create junctions if they are needed and then add to ref seq file as mapping targets for chimeric reads\r\n RefSeqs=[]\r\n for RefSeqFile in junctionRefSeqFiles:\r\n if not path.isfile(RefSeqFile):\r\n print 'Unable to find RefSeqFile at ' + RefSeqFile\r\n exit(1)\r\n RefSeqs = RefSeqs + list(SeqIO.parse(RefSeqFile, 'fasta'))\r\n if (RefSeqs):\r\n formattedRefSeqs = add_ref_tag(RefSeqs)\r\n junctionSeqs = make_junctions(formattedRefSeqs,readLength);\r\n SeqIO.write(junctionSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n output_handle.close() \r\n\r\n\r\n # allows restarts\r\n if thisIter > 1: # there is no previous iteration, so start fresh\r\n prevWorkingDir = outputDir + ('Gen%d/' % (thisIter-1))\r\n for i in range(1, thisIter):\r\n prevWorkingDir = '%sGen%d/' % (outputDir, i) \r\n fragFile = prevWorkingDir + 'fasta.fa'\r\n snvFile = prevWorkingDir + 'snv.csv'\r\n ddimap_convergence_test(fragFile, snvFile, prevFragList, prevSNVList, reqFragConv)\r\n\r\n\r\n while not converged and thisIter <= iterMax:\r\n \r\n print '======= Iteration %d of %d ========' % (thisIter, iterMax)\r\n\r\n # creates working dir if not present\r\n thisWorkingDir = outputDir + ('Gen%d/' % thisIter)\r\n if path.isdir(thisWorkingDir):\r\n rmtree(thisWorkingDir)\r\n makedirs(thisWorkingDir)\r\n \r\n # Delete old enhanced fast file if present. It should never be...\r\n enhancedFastaFile = thisWorkingDir + 'refSeqEnhanced.fa'\r\n if path.isfile(enhancedFastaFile): \r\n remove(enhancedFastaFile) \r\n copyfile(outputDir + 'refSeqEnhanced.fa', enhancedFastaFile)\r\n\r\n output_handle = open(enhancedFastaFile, 'a')\r\n \r\n # Append frags from previous iteration if any (these sequences are tagged as fragments when the file is written by DDiMAP)\r\n if (thisIter > 1):\r\n prevFragFile=prevWorkingDir + '/fasta.fa'\r\n if path.isfile(prevFragFile) and path.getsize(prevFragFile) > 0:\r\n fragSeqs=list(SeqIO.parse(prevFragFile, 'fasta'))\r\n SeqIO.write(fragSeqs, output_handle, 'fasta') # modified MATLAB fastawrite to not put in extra newlines\r\n\r\n output_handle.close() \r\n\r\n # Setup variables for aligner\r\n thisAligner=aligners[thisIter-1]\r\n thisAligned='DDiMAP_%s' % thisAligner\r\n \r\n if path.isfile(thisWorkingDir + 'mapper.log'):\r\n remove(thisWorkingDir + 'mapper.log')\r\n\r\n if not ','.join([thisAligner,readType,pair_str]) in aligner_dict.keys():\r\n print mapperAbbrs[thisAligner] + ' does not support ' + readType + ' read type with ' + ('paired ends' if pairedEnds else 'non paired ends')\r\n exit(1)\r\n\r\n\r\n # execute commands for aligner\r\n\r\n open(thisWorkingDir + 'mapper.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'mapper.log'])\r\n\r\n # set substitutions for aligner commands\r\n commandsubs={'DDiFastq1':fastqFiles[0], \r\n 'DDiProcs':nProcs, \r\n 'DDiFasta':enhancedFastaFile, \r\n 'DDiBMF':thisAligned + '.bmf', \r\n 'DDiBAF':thisAligned + '.baf', \r\n 'DDiSAM':thisAligned + '.sam',\r\n 'DDiNIX':thisAligned + '.nix', \r\n 'DDiAligned':thisAligned, \r\n 'CushawOpts':cushawOpts, \r\n 'ShrimpOpts':shrimpOpts, \r\n 'BwaMemOpts':bwaMemOpts, \r\n 'NovoOpts':novoOpts}\r\n\r\n if (len(fastqFiles) > 1):\r\n commandsubs['DDiFastq2']=fastqFiles[1]\r\n\r\n for command in aligner_dict[','.join([thisAligner,readType,pair_str])]:\r\n cmdlist=re.split('\\s*',command)\r\n #remove empty arguments and subsitute in values from commandsubs \r\n args=filter(None,[str(commandsubs[x]) if x in commandsubs.keys() else x for x in cmdlist])\r\n args=re.split('\\s*',' '.join(args)) \r\n print ' '.join(args) # output actual command\r\n if 'DDiFastq2' in args: #This hasn't been substituted because one wasn't provided\r\n print mapperAbbrs[thisAligner] + ' expects 2 fastq files for use with ' + readType + ' read type with ' + ('paired ends' if pairedEnds else 'non paired ends')\r\n exit(1)\r\n\r\n # Now we need to detect stdout redirection and do it properly using pOpen\r\n if '>' in args: \r\n i = args.index('>')\r\n outfile = args[i+1]\r\n del args[i:i+2]\r\n else:\r\n outfile = None\r\n \r\n log_file = open(thisWorkingDir + 'mapper.log', 'a')\r\n \r\n if (outfile):\r\n with open(thisWorkingDir + outfile, 'w') as output_file:\r\n a=Popen(args, cwd=thisWorkingDir, stdout=output_file, stderr=log_file)\r\n else:\r\n a=Popen(args, cwd=thisWorkingDir, stderr=log_file, stdout=log_file)\r\n\r\n success=a.wait()\r\n log_file.close()\r\n if not success == 0:\r\n print '*** mapper exited with error', success\r\n print 'See ' + thisWorkingDir + 'mapper.log' + ' for more details'\r\n exit(success)\r\n\r\n if verbose:\r\n b.terminate()\r\n # Perform sam to bam conversion for DDiMap\r\n args=['samtools', 'view', '-b', '-S', '-o', thisAligned + '.bam', thisAligned + '.sam']\r\n print ' '.join(args) \r\n\r\n open(thisWorkingDir + 'samtools.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'samtools.log'])\r\n log_file = open(thisWorkingDir + 'samtools.log', 'w')\r\n a=Popen(args, cwd=thisWorkingDir, stderr=log_file, stdout=log_file)\r\n success=a.wait()\r\n log_file.close()\r\n if verbose:\r\n b.terminate()\r\n if not success == 0:\r\n print '*** samtools exited with error', success\r\n print 'See ' + thisWorkingDir + 'samtools.log' + ' for more details' \r\n exit(success)\r\n # remove the uncompressed sam file\r\n args=['rm', thisAligned + '.sam'];\r\n a=Popen(args, cwd=thisWorkingDir)\r\n\r\n # now run the DDiMAP code\r\n thisAlignedFile = thisWorkingDir + thisAligned + '.bam'\r\n args = (['DDiMAP', kFlag, '-r', roaSize, '-f', enhancedFastaFile, '-b', \r\n thisAlignedFile, '-c', minAbsoluteCover, '-n', fragThresh, '-a', \r\n fragMakerThresh, '-p', SNVthresh, '-s', SNVtype2thresh, '-l', \r\n SNVtype3thresh, '-o', thisWorkingDir])\r\n args = [str(x) for x in args]\r\n print ' '.join(args)\r\n open(thisWorkingDir + 'DDiMap.log', 'w').close()\r\n if verbose:\r\n b=Popen(['tail', '-F', thisWorkingDir + 'DDiMap.log'])\r\n log_file = open(thisWorkingDir + 'DDiMap.log', 'a')\r\n a = Popen(args, cwd=thisWorkingDir, stdout=log_file, stderr=log_file)\r\n success=a.wait()\r\n if verbose:\r\n b.terminate()\r\n log_file.close()\r\n if not success == 0:\r\n print '*** DDiMap exited with error', success\r\n print 'See ' + thisWorkingDir + 'DDiMap.log' + ' for more details'\r\n exit(success)\r\n \r\n # now check for convergence\r\n \r\n fragFile = thisWorkingDir + 'fasta.fa'\r\n snvFile = thisWorkingDir + 'snv.csv'\r\n \r\n # call to the convergence test matlab function\r\n # result history kept in currFrags/prevFrags and currSNVs/prevSNVs\r\n \r\n if ddimap_convergence_test(fragFile, snvFile, prevFragList, prevSNVList, reqFragConv):\r\n print 'Convergence found. Stopping...'\r\n break\r\n\r\n prevWorkingDir = thisWorkingDir; # all done with the previous, this will be the next iteration previous directory\r\n thisIter = thisIter+1\r\n else:\r\n print 'Failed to converge'\r\n\r\n print '%10s %10s %10s' % ('Iteration', 'nFrags', 'nSNVs')\r\n for i, (frags, snvs) in enumerate(zip(prevFragList, prevSNVList)):\r\n print '%10d %10d %10d' % (i+1, sum(frags), sum(snvs))\r\n\r\n # put final results into outputDir\r\n # make renamed copies of the final iteration result files, naming them using\r\n copyfile(thisWorkingDir+'fasta.fa',outputDir+'convergedFrags.fa')\r\n copyfile(thisWorkingDir+'dictionary.csv',outputDir+'convergedDictionary.csv')\r\n copyfile(thisWorkingDir+'snv.csv',outputDir+'convergedSNVs.csv')\r\n copyfile(thisWorkingDir+'coverage.csv',outputDir+'convergedCoverage.csv')\r\n copyfile(thisWorkingDir+'refSeqEnhanced.fa',outputDir+'convergedEnhancedRefSeqs.fa')", "def test_filter_mapping_file(self):\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers,\\\n ['a','b','c','d','e','f']), (self.map_headers, self.map_data))\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers, ['a']),\n (['SampleID','Description'],['a\\tx'.split('\\t')]))", "def parse_sam(rows):\n row1, row2 = rows\n mseqs = {}\n failed_list = []\n insert_list = []\n rname = row1['rname']\n qname = row1['qname']\n cigar1 = row1['cigar']\n cigar2 = row2['cigar']\n\n # filtering criteria\n reason = None\n if cigar1 == '*':\n reason = 'R1 unmapped'\n if int(row1['mapq']) < read_mapping_cutoff:\n reason = 'R1 low mapq'\n\n if cigar2 == '*':\n reason = 'R2 unmapped'\n if int(row2['mapq']) < read_mapping_cutoff:\n reason = 'R2 low mapq'\n\n genotype1, genotype2 = None, None\n try:\n genotype1 = row1['rname'].split('-')[1][0]\n genotype2 = row2['rname'].split('-')[1][0]\n except:\n reason = 'discordant map'\n pass\n\n if genotype1 != genotype2:\n reason = 'map conflict'\n\n if reason:\n failed_list.append({'qname': qname,\n 'rname1': row1['rname'],\n 'rname2': row2['rname'],\n 'reason': reason})\n else:\n pos1 = int(row1['pos'])-1 # convert 1-index to 0-index\n _, seq1, qual1, inserts = apply_cigar(cigar1, row1['seq'], row1['qual'])\n \n # report insertions relative to sample consensus\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row1['flag']) else 'R',\n 'refname': rname,\n 'pos': pos1+left,\n 'insert': iseq,\n 'qual': iqual})\n \n seq1 = '-'*pos1 + seq1 # pad sequence on left\n qual1 = '!'*pos1 + qual1 # assign lowest quality to gap prefix so it does not override mate\n \n \n # now process the mate\n pos2 = int(row2['pos'])-1 # convert 1-index to 0-index\n _, seq2, qual2, inserts = apply_cigar(cigar2, row2['seq'], row2['qual'])\n for left, (iseq, iqual) in inserts.iteritems():\n insert_list.append({'qname': qname,\n 'fwd_rev': 'F' if is_first_read(row2['flag']) else 'R',\n 'refname': rname,\n 'pos': pos2+left,\n 'insert': iseq,\n 'qual': iqual})\n seq2 = '-'*pos2 + seq2\n qual2 = '!'*pos2 + qual2\n \n # merge reads\n for qcut in sam2aln_q_cutoffs:\n mseq = merge_pairs(seq1, seq2, qual1, qual2, qcut)\n prop_N = mseq.count('N') / float(len(mseq.strip('-')))\n if prop_N > max_prop_N:\n # fail read pair\n failed_list.append({'qname': qname,\n 'reason': 'merge failure'})\n continue\n mseqs[qcut] = mseq\n\n return rname, mseqs, insert_list, failed_list", "def bam_output(args):\n\n for strand in ['watson', 'crick']:\n merged_sam = os.path.join(args.output_dir, '%s_mergedAligned.out.sam' % strand)\n joined_sam = os.path.join(args.output_dir, '%s_joinedAligned.out.sam' % strand)\n out_sam = tempfile.NamedTemporaryFile(prefix=strand, suffix='.sam', dir=args.output_dir)\n #rewrite sam file merged and joined for watson and crick\n parse_sam(merged_sam, out_sam.name, 'merged', strand)\n #TODO: determine why joined reads have more soft-clips or single read matches\n parse_sam(joined_sam, out_sam.name, 'joined', strand)\n #convert to sorted and indexed bam\n cmd = 'cat %s %s |samtools view -@ 4 -Shb |sambamba sort -m 4GB --tmpdir %s -t %s -o %s /dev/stdin'%(args.header,\n out_sam.name,args.tmpdir, args.threads,\n os.path.join(args.output_dir,'%s.bam' % strand) )\n log = \"make sorted bam file\"\n run_subprocess([cmd], args, log)\n out_sam.close()\n return args", "def main(argv=None):\n\n if not argv:\n argv = sys.argv\n\n # setup command line parser\n parser = E.ArgumentParser(description=__doc__)\n\n parser.add_argument(\"--version\", action='version', version=\"1.0\")\n\n parser.add_argument(\"-m\", \"--merge-pairs\", dest=\"merge_pairs\",\n action=\"store_true\",\n help=\"merge paired-ended reads and output interval \"\n \"for entire fragment. \")\n\n parser.add_argument(\"--max-insert-size\", dest=\"max_insert_size\", type=int,\n help=\"only merge paired-end reads if they are less than \"\n \"# bases apart. \"\n \" 0 turns off this filter. \")\n\n parser.add_argument(\"--min-insert-size\", dest=\"min_insert_size\", type=int,\n help=\"only merge paired-end reads if they are at \"\n \"least # bases apart. \"\n \" 0 turns off this filter. \")\n\n parser.add_argument(\"--bed-format\", dest=\"bed_format\", type=str,\n choices=('3', '4', '5', '6'),\n help=\"bed format to output. \")\n\n parser.set_defaults(\n region=None,\n call_peaks=None,\n merge_pairs=None,\n min_insert_size=0,\n max_insert_size=0,\n bed_format='6',\n )\n\n (args, unknown) = E.start(parser, argv=argv, unknowns=True)\n\n if len(unknown) == 0:\n unknown.append(\"-\")\n\n samfile = pysam.AlignmentFile(unknown[0], \"rb\")\n\n args.bed_format = int(args.bed_format)\n\n if args.merge_pairs is not None:\n counter = merge_pairs(samfile,\n args.stdout,\n min_insert_size=args.min_insert_size,\n max_insert_size=args.max_insert_size,\n bed_format=args.bed_format)\n\n E.info(\"category\\tcounts\\n%s\\n\" % counter.asTable())\n\n else:\n # use until_eof. Files from stdin have no index\n it = samfile.fetch(until_eof=True)\n\n # more comfortable cigar parsing will\n # come with the next pysam release\n BAM_CMATCH = 0\n BAM_CDEL = 2\n BAM_CREF_SKIP = 3\n take = (BAM_CMATCH, BAM_CDEL, BAM_CREF_SKIP)\n outfile = args.stdout\n\n for read in it:\n if read.is_unmapped:\n continue\n\n t = 0\n for op, l in read.cigar:\n if op in take:\n t += l\n\n if read.is_reverse:\n strand = \"-\"\n else:\n strand = \"+\"\n outfile.write(\"%s\\t%d\\t%d\\t%s\\t%d\\t%c\\n\" %\n (read.reference_name,\n read.pos,\n read.pos + t,\n read.qname,\n read.mapq,\n strand))\n\n E.stop()", "def gen_unaligned_bam(bam_filename, analysis_id, metadata, specimen_dict, work_dir, output_dir, num_processes=4, logger=default_logger ):\n\n read_group_sam = os.path.join(output_dir, 'rg_header.sam')\n\n #get the read groups from the original sample level BAM\n exit_code = os.system(\"samtools view -H %s | grep \\\"@RG\\\" > %s\" %(bam_filename, read_group_sam))\n if exit_code != 0:\n print \"Failure in bam splitting during read group extraction from %s\" % bam_filename\n return 1\n\n\n rg_file = open(read_group_sam, \"r\")\n\n #create the read group fastqs\n try:\n cmd = \"bamtofastq outputperreadgroup=1 gz=1 level=1 inputbuffersize=2097152000 tryoq=1 outputdir=%s T=`mktemp -p %s bamtofastq_XXXXXXXXX` < %s\" %(work_dir, work_dir, bam_filename)\n logger.info(\"Running %s\" % cmd)\n subprocess.check_call(cmd, shell=True)\n except:\n print \"Failure in bam splitting\"\n return 1\n\n\n if header_utils.is_valid_analysis(metadata) or FORCE_RUN:\n pool = multiprocessing.Pool(processes=num_processes)\n results = []\n for line in rg_file:\n rg_dict = header_utils.get_read_group_info(line)\n header = header_utils.create_header(output_dir, metadata, rg_dict, specimen_dict)\n r = pool.apply_async(process_rg, (analysis_id, rg_dict, header, work_dir, output_dir))\n results.append(r)\n\n rg_file.close()\n\n out = []\n for r in results:\n out.append(r.get())\n\n utils.clean_up_dir(output_dir)\n if not all( a[0] for a in out ):\n #one of the read group bamtofastq failed\n return 1\n with open(os.path.join(output_dir, \"results.list\"), \"w\") as out_handle:\n for ok, file_name in out:\n out_handle.write(\"%s\\n\" % (file_name))\n\n else:\n print \"Invalid header/metadata for BAM\" % bam_filename\n return 1\n return 0", "def sam2bamBedpe(sam, mapq=10):\n n = os.path.splitext(sam)[0]\n bam = n + \".bam\"\n bedpeAll = n + \"_all.bedpe\"\n bedpeUni = n + \"_unique.bedpe\"\n #sam to bam, filtering mapq\n samview = \"samtools view -b -F 4 -@ 2 -q {mapq} -o {bam} {sam}\".format(\n mapq=mapq, bam=bam, sam=sam)\n #sort by read name\n samsort = \"samtools sort -n -@ 2 {bam} -T {pre} -o {bam}\".format(\n bam=bam, pre=bam.replace(\".bam\", \"\"))\n rmsam = \"rm %s\" % (sam)\n cmds = [samview, samsort, rmsam]\n callSys(cmds, logger)\n bam2bedpe = \"bamToBed -bedpe -i {bam} > {bedpe}\".format(bam=bam,\n bedpe=bedpeAll)\n logger.info(bam2bedpe)\n stat, output = subprocess.getstatusoutput(bam2bedpe)\n getUniqueBedpe(bedpeAll, bedpeUni)\n cmd = \"gzip %s %s\" % (bedpeAll, bedpeUni)\n callSys([cmd], logger)\n return bedpeAll + \".gz\"", "def run_multimapping(SRA):\n\n if not os.path.exists(\"TMP/ambiguous_reads/\"):\n os.mkdir(\"TMP/ambiguous_reads/\")\n\n cmd_STAR = 'STAR --outSAMtype BAM SortedByCoordinate --runThreadN 8 --winAnchorMultimapNmax 200 --seedSearchStartLmax 15 --genomeDir' + ' ' + STAR_TRANSCRIPTOME_DIR + ' ' + '--readFilesIn' + ' ' + TMP_DIR+SRA+'_rrnaUnmapped.fastq' + ' ' + '--outFileNamePrefix' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_'\n output = subprocess.run(cmd_STAR, shell=True)\n\n \n # Keep only multi-mapping reads:\n cmd_filter = 'python code/sam_STAR_mapq_filtering.py' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_Aligned.sortedByCoord.out.bam' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam' + ' ' + 'all'\n output = subprocess.run(cmd_filter, shell=True)\n\n cmd_samtools2 = 'samtools index' + ' ' + TMP_DIR+'ambiguous_reads/'+SRA+'_STAR_transcriptome_multi_mapped_sorted.bam'\n output = subprocess.run(cmd_samtools2, shell=True)", "def test_filter_mapping_file(self):\r\n self.assertEqual(filter_mapping_file(self.map_data, self.map_headers,\r\n ['a', 'b', 'c', 'd', 'e', 'f']), (self.map_headers, self.map_data))\r\n self.assertEqual(\r\n filter_mapping_file(self.map_data, self.map_headers, ['a']),\r\n (['SampleID', 'Description'], ['a\\tx'.split('\\t')]))", "def bwa_sampe(self, files_in, bam_out):\n self.cmd(\"bwa sampe -n 0 -N 0 -P \\\n {fasta} {sai_in_files} '{input_bam}' '{input_bam}'\\\n | python {readgroup_mover} translate --dictfile {rg_dict} \\\n | {samtools} view -b -S > {bam_out}\"\n .format(\n readgroup_mover=self.cmds[\"readgroup_mover\"],\n fasta=self.files[\"reference_genome\"],\n sai_in_files=\"%s %s\" % (files_in[0], files_in[1][0]),\n input_bam=files_in[1][2],\n rg_dict=files_in[1][1],\n samtools=self.cmds[\"samtools\"],\n bam_out=bam_out),\n on_error=lambda: self.create_error_file(bam_out),\n shell=True)\n if self.remove_intermediate:\n self.rm(sai_in)", "def test_mapping_file_removal(self):\r\n # doesn't change in test\r\n mf_exp, _ = parse_mapping_file_to_dict(self.MF_IN_2)\r\n bt_exp = parse_biom_table(self.BT_OUT_2)\r\n mf_obs, bt_obs, nonshared_samples = \\\r\n sync_biom_and_mf(parse_mapping_file_to_dict(self.MF_IN_2)[0],\r\n parse_biom_table(self.BT_IN_1))\r\n self.assertEqual(mf_exp, mf_obs)\r\n self.assertEqual(bt_exp, bt_obs)\r\n self.assertEqual(nonshared_samples, set(['Sample5', 'Sample6']))", "def main_SS(maf_file, segment_file, vaf_threshold = 1.05, filterSegments = False):\n all_mutations = pd.read_csv(maf_file, low_memory=False, delimiter='\\t')\n all_segments = pd.read_csv(segment_file, low_memory=False, delimiter='\\t')\n\n if not os.path.exists(\"./sample_mutations_withCN\"):\n os.makedirs(\"./sample_mutations_withCN\")\n if not os.path.exists(\"./pyclone_input\"):\n os.makedirs(\"./pyclone_input\")\n\n for i, sample in enumerate(all_mutations.Tumor_Sample_Barcode.unique()):\n print(\"Processing sample {}: {}\".format(i+1, sample))\n\n # Subset the mutations and segments to those belonging to the patient\n sample_mutations = all_mutations[all_mutations['Tumor_Sample_Barcode'] == sample]\n sample_segments = all_segments[all_segments['Tumor_Sample_Barcode'] == sample]\n\n patient_VAF = sample_mutations.loc[:, 'VAF']\n filter_VAF_index = (patient_VAF > vaf_threshold)\n\n # Remove the mutations where the condition is true for ALL segments, i.e. it has to be below\n # 0.05 for all sectors. If it's above 0.05 in any sector, keep the mutations. This will keep most\n # of the private mutations.\n num_filtered = filter_VAF_index.loc[filter_VAF_index == False, ]\n print(\"Patient {} has {} mutations with average VAF < {} removed\".format(sample, num_filtered.shape[0], vaf_threshold))\n # Filter out the variants\n sample_mutations = sample_mutations.loc[filter_VAF_index, ]\n # Get the segments dictionary for the patient.\n seg_dict = segments_to_dict(sample_segments)\n\n overlap_seg = pd.DataFrame()\n filtered_seg = pd.DataFrame()\n for _, mut_row in sample_mutations.iterrows():\n # Skip X and Y chromosome\n if (mut_row['Chromosome'] == \"X\" or mut_row['Chromosome'] == \"Y\"):\n continue\n\n # Search for the segment\n buf = search_overlap_singleSample(mut_row, seg_dict)\n # Skip if no overlapping segments\n if (buf.empty):\n continue\n elif filterSegments:\n print(\"--filterSegments specified. Will filter segments of low quality.\")\n if (buf.iloc[0]['numMarker'] < 100) or (buf.iloc[0]['end.pos'] - buf.iloc[0]['start.pos'] < 5000000) or (buf.iloc[0]['CNt'] >= 8):\n if (filtered_seg.empty):\n filtered_seg = buf.iloc[0].to_frame()\n else:\n filtered_seg = pd.concat([filtered_seg, buf.iloc[0]], axis=1)\n else:\n # Get copy number for mutations\n assigned_row = mut_row.copy(deep=True)\n assigned_row['CNt'] = buf.iloc[0]['CNt']\n assigned_row['Major_CN'] = buf.iloc[0]['A']\n assigned_row['Minor_CN'] = buf.iloc[0]['B']\n assigned_row['adjustedCN'] = buf.iloc[0]['adjustedCN']\n # Initialize dataframe for merging.\n if (overlap_seg.empty):\n overlap_seg = assigned_row.to_frame()\n else:\n overlap_seg = pd.concat([overlap_seg, assigned_row], axis=1)\n\n overlap_seg = overlap_seg.transpose()\n overlap_seg.to_csv(\"./sample_mutations_withCN/{}_SNV_withCN.maf\".format(sample),sep=\"\\t\", index=False)\n\n filtered_seg = filtered_seg.transpose()\n print(\"Sample {} has {} segments with marker<100 or smaller than 5 Mb or >= 8 copy number (Canopy guideline)\".format(sample, filtered_seg.shape[0]))\n filtered_seg.to_csv(\"./sample_mutations_withCN/{}_filtered_seg.maf\".format(sample),sep=\"\\t\", index=False)\n\n pyclone_input = overlap_seg.loc[:, ['Hugo_Symbol', 'Chromosome',\n 'Start_position', 'ref_count', 'alt_count', 'VAF', 'Major_CN',\n 'Minor_CN']]\n pyclone_input['mutation_id'] = pyclone_input['Hugo_Symbol'].map(str) + \"_\" + pyclone_input['Chromosome'].map(str) + \":\" + pyclone_input['Start_position'].map(str)\n pyclone_input['normal_cn'] = 2\n towrite = pyclone_input.loc[:, ['mutation_id', 'ref_count', 'alt_count', 'normal_cn', 'Minor_CN', 'Major_CN']]\n towrite.columns = ['mutation_id', 'ref_counts', 'var_counts', 'normal_cn', 'minor_cn', 'major_cn']\n towrite['ref_counts'] = towrite['ref_counts'].map(int)\n towrite['var_counts'] = towrite['var_counts'].map(int)\n towrite.to_csv(\"./pyclone_input/{}_mutations.tsv\".format(sample), sep='\\t', index=False)", "def _filter_subreads(self):\n logging.info(\"Start to filter subreads in fofn.\")\n if op.exists(self.ori_all_reads_fasta) and self.force_redo is not True:\n msg = \"{fa} already exists, skip pls2fasta\".format(fa=self.ori_all_reads_fasta)\n logging.warn(msg)\n else:\n logging.debug(\"{f} does not exist, call pls2fasta\".\n format(f=self.ori_all_reads_fasta))\n filter_summary = op.join(self.filtered_region_dir,\n \"filtered_summary.csv\")\n cmd = \"filter_plsh5.py --debug \" + \\\n \"--filter='MinReadScore=0.80,MinSRL=500,MinRL=100' \" + \\\n \"--trim='True' --outputDir={fr} \".format(\n fr=self.filtered_region_dir) + \\\n \"--outputSummary={sm} \".format(sm=filter_summary) + \\\n \"--outputFofn={rgn} \".format(rgn=self.region_fofn) + \\\n \"{in_fofn}\".format(in_fofn=self.input_fofn)\n logging.info(\"CMD: {cmd}\".format(cmd=cmd))\n _o, _c, _m = backticks(cmd)\n if _c != 0:\n raise RuntimeError(\"CMD failed. \" + str(_o) + ' ' + str(_m))\n\n cmd = \"pls2fasta -trimByRegion \" + \\\n \"-regionTable {rgn} \".format(rgn=self.region_fofn) + \\\n \"{fofn} {fa} \".format(fofn=self.input_fofn,\n fa=self.ori_all_reads_fasta)\n logging.info(\"CMD: {cmd}\".format(cmd=cmd))\n _o, _c, _m = backticks(cmd)\n if _c != 0:\n raise RuntimeError(\"CMD failed. \" + str(_o) + ' ' + str(_m))\n logging.info(\"{f} created.\".format(f=self.ori_all_reads_fasta))\n\n logging.debug(\"Copying {ori_f} to {f}.\".format(\n ori_f=self.ori_all_reads_fasta, f=self.all_reads_fasta))\n shutil.copyfile(self.ori_all_reads_fasta, self.all_reads_fasta)" ]
[ "0.6563839", "0.61846894", "0.6106953", "0.6029072", "0.5986886", "0.5983956", "0.5952436", "0.58762664", "0.57932687", "0.57255226", "0.57188755", "0.5712081", "0.5709512", "0.57006574", "0.56707716", "0.567073", "0.5670114", "0.56360954", "0.5635739", "0.56314486", "0.5609367", "0.5608857", "0.56025285", "0.55925983", "0.5592503", "0.5590115", "0.5574906", "0.5569789", "0.5558316", "0.555185" ]
0.68917465
0
Set up the commandline progress bar with max_value
def initializeProgressBar(max_value): bar = progressbar.ProgressBar(maxval=max_value, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()]) return bar
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_progress_range(self, maximum):\r\n\r\n pass", "def __init__(self, max: int) -> None:\n self.progress_bar = None\n self.progress = 0\n self._max = max", "def bar(self, progress):\n if not hasattr(self, \"_limit\") or not self._limit:\n self._limit = self.terminal_size()\n graph_progress = int(progress * self._limit)\n self.stdout.write(\"\\r\", ending=\"\")\n progress_format = \"[%-{}s] %d%%\".format(self._limit)\n self.stdout.write(\n self.style.SUCCESS(\n progress_format\n % (self.progress_symbol * graph_progress, int(progress * 100))\n ),\n ending=\"\",\n )\n self.stdout.flush()", "def setMaximumValue(self, value: int):\n self.ui.progress.setMaximum(value)", "def init_json_progress_bar(self, bar_max):\n self.json_progress_message_bar = self.iface.messageBar().createMessage(\"Exporting json to \" + self.directory)\n self.json_progress = QProgressBar()\n self.json_progress.setMinimum(0)\n self.json_progress.setMaximum(bar_max)\n self.json_progress.setAlignment(Qt.AlignLeft | Qt.AlignCenter)\n self.json_progress_message_bar.layout().addWidget(self.json_progress)\n self.json_progress_message_bar_widget = self.iface.messageBar().pushWidget(self.json_progress_message_bar, self.iface.messageBar().INFO)", "def set_maximum(self, max_value):\n\n self._progress.setMaximum(max_value)", "def prograssBar(val, final):\n end = \"\"\n maxlen = 50\n step = final // maxlen\n\n print(\"\\r[ \" + \"#\" * (val // step) + \" ] \" +\n str(int(val * 100.0 / final)) + \"% \", end=end)", "def update_progressbar(self, count, value):\n self.status(\"Progress %s/%s\" % (value, count))", "def progress(current, max):\n prog = int(20 * (current / max))\n print(f\"[{''.join('=' for _ in range(prog))}>{''.join(' ' for _ in range(20 - prog - 1))}] ({(current / max) * 100 : 2.2f}%)\", end='\\r')", "def set_progress_value(self, value):\r\n\r\n pass", "def show_progress(show, current, max, text, *args):\n if show:\n progress = round((float(current) / max) * 100.0, 0)\n output = \"\\r\" + text.format(*args) + \" {0}% done. \".format(progress) \n sys.stdout.write(output)\n sys.stdout.flush()", "def start_progress_bar(self):\r\n self.progress[\"value\"] = self.progress_step", "def __call__(self, current_size, max_size=None):\n if max_size is not None:\n self.max_size = max_size\n if self.pb is None:\n self.pb = tqdm(total=self.max_size, unit=\"B\", unit_scale=True)\n self.pb.update(current_size)", "def update_amount(self, newAmount=0, suffix=''):\n if newAmount < self.min:\n newAmount = self.min\n if newAmount > self.max:\n newAmount = self.max\n self.amount = newAmount\n\n # Figure out the new percent done, round to an integer\n diffFromMin = np.float(self.amount - self.min)\n percentDone = (diffFromMin / np.float(self.span)) * 100.0\n percentDone = np.int(np.round(percentDone))\n\n # Figure out how many hash bars the percentage should be\n allFull = self.width - 2 - 18\n numHashes = (percentDone / 100.0) * allFull\n numHashes = np.int(np.round(numHashes))\n\n # Build a progress bar with an arrow of equal signs; special cases for\n # empty and full\n if numHashes == 0:\n self.prog_bar = '%s[>%s]' % (self.prefix, ' '*(allFull-1))\n elif numHashes == allFull:\n self.prog_bar = '%s[%s]' % (self.prefix, '='*allFull)\n if suffix:\n self.prog_bar += ' %s' % (suffix)\n else:\n self.prog_bar = '[%s>%s]' % ('='*(numHashes-1), ' '*(allFull-numHashes))\n # figure out where to put the percentage, roughly centered\n percentPlace = int(len(self.prog_bar)/2 - len(str(percentDone)))\n percentString = ' ' + str(percentDone) + '% '\n # slice the percentage into the bar\n self.prog_bar = ''.join([self.prog_bar[0:percentPlace],\n percentString,\n self.prog_bar[percentPlace+len(percentString):]])\n # prefix and suffix\n self.prog_bar = self.prefix + self.prog_bar\n if suffix:\n self.prog_bar += ' %s' % (suffix)\n # time info - elapsed time and estimated remaining time\n if percentDone > 0:\n elapsed_time = time.time() - self.start_time\n self.prog_bar += '%5ds / %5ds' % (int(elapsed_time),\n int(elapsed_time * (100./percentDone-1)))", "def __init__(self: \"InProgress\", progress: int = 0) -> None:\n self.progress = max(0, min(progress, 100))", "def progress_bar(bar_name: str, current_num: int, total_num: int, output_option: int = 2):\r\n if output_option == 2:\r\n print(\r\n '\\r[{:<50}] {}: {}/{}'.format(\r\n '=' * int(current_num / (2 * total_num) * 100), \r\n bar_name, current_num, total_num\r\n ), \r\n end=''\r\n )\r\n if current_num == total_num:\r\n print()", "def update_progress(progress, max_time, starting_time=start_time):\n \n percent = float(progress)/float(max_time)\n int_percent = int(percent*100)\n elapsed_min = (time.mktime(time.gmtime())-starting_time)/60.0\n if percent > 0:\n eta_min = int(round(elapsed_min/percent))\n else:\n eta_min = '?'\n sys.stdout.write( '\\r[{0}{2}] {1}% ({3}) Elapsed:{4}min ETA:{5}min'.format('#'*(int_percent), int_percent,' '*(100-(int_percent)), progress, int(elapsed_min), eta_min))\n sys.stdout.flush()", "def change_max(self, level, value):\n if value < 0:\n raise AttributeError('max value should be greater than zero')\n if level in self.progress_maxes:\n self.progress_maxes[level] = value", "def _dl_progress_bar(self):\n if not self.show_progress:\n return\n\n if self.file_size:\n ratio = float(self.bytes_read) / self.file_size\n else:\n ratio = 1\n percent = int(ratio * 100)\n\n bar_len = 60\n done = int(bar_len * ratio)\n bar = ('=' * done) + (' ' * (bar_len - done))\n\n progress = '{percent: >3}%: [{bar}]'.format(percent=percent, bar=bar)\n backspace = '\\b' * len(progress)\n print(backspace + '\\r', end='')\n print(progress, end='')", "def set_progress(self, progress: float):", "def mk_bar(bar_size):\n return progressbar.ProgressBar(maxval=bar_size, \\\n widgets=[progressbar.Bar('=', '[', ']') \\\n , ' ', progressbar.Percentage()])", "def __init__(self, maxval=100, basic=False, dest=sys.stderr):\n\n self.maxval = maxval\n self.dest = dest\n self.val = 0\n self.redraw_at = 0\n self.partial = [' ']\n for code in range(0x258f, 0x2587, -1):\n self.partial.append(chr(code))\n self.gran = len(self.partial) - 1\n self.active = False\n self.basic = basic", "def progress_bar(self, count, total, status):\n\n bar_len = 50\n filled_len = int(round(bar_len * count / float(total)))\n\n file_size_bytes = f\"{count:,}/{total:,} Bytes\"\n transfer_percent = round(100.0 * count / float(total), 2)\n file_bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n prefix = f\"[{self.LOGGER.host}:{self.LOGGER.port}]\"\n sys.stdout.write(f\"{prefix} -> |{file_bar}| {file_size_bytes} | {transfer_percent}% | {status}...\\r\")\n sys.stdout.flush()\n\n if count >= total: print()", "def set_progress(self, step):\n if self._max and step > self._max:\n self._max = step\n elif step < 0:\n step = 0\n\n prev_period = int(self._step / self.redraw_freq)\n curr_period = int(step / self.redraw_freq)\n\n self._step = step\n\n if self._max:\n self._percent = self._step / self._max\n else:\n self._percent = 0.0\n\n if prev_period != curr_period or self._max == step:\n self.display()", "def print_progress_bar (time, max_time, elapsed_time):\n\n BAR_LENGTH = 40\n\n bars = '*' * int (time * BAR_LENGTH / max_time)\n spaces = ' ' * (BAR_LENGTH - len (bars))\n day = time / SECONDS_PER_DAY\n sys.stderr.write (\"\\r%3d%% [%s%s] Day %2d, elapsed time: %s\" % \\\n (percent, bars, spaces, day,\n str (elapsed_time)))", "def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)", "def change_max_running(self, _):\n current = self.execution_manager.max_running\n self.details.original_widget = IntInputWidget('Maximum jobs running/pending: ', current, self.__change_max_running)", "def __init__(self, end=1.):\n self.current_val = 0.0\n self.max_val = end\n if in_ipynb():\n if have_ipywidgets:\n self.label = HTML()\n self.progress = IntProgress(min=0,max=100,value=1)\n self.progress.bar_style = 'info'\n self.progressHTML = VBox([self.label, self.progress])\n display(self.progressHTML)\n self.redraw = self._redraw_ipywidgets\n self.cleanup = self._cleanup_ipywidgets\n else:\n self.redraw = self._redraw_ipython\n self.cleanup = self._cleanup_ipython\n else:\n self.redraw = self._redraw_console\n self.cleanup = self._cleanup_console", "def make_progress_bar():\n\n if simple_tregex_mode:\n total_files = len(list(to_iterate_over.keys()))\n else:\n total_files = sum(len(x) for x in list(to_iterate_over.values()))\n\n par_args = {'printstatus': kwargs.get('printstatus', True),\n 'root': root, \n 'note': note,\n 'length': total_files,\n 'startnum': kwargs.get('startnum'),\n 'denom': kwargs.get('denominator', 1)}\n\n term = None\n if kwargs.get('paralleling', None) is not None:\n from blessings import Terminal\n term = Terminal()\n par_args['terminal'] = term\n par_args['linenum'] = kwargs.get('paralleling')\n\n if in_notebook:\n par_args['welcome_message'] = welcome_message\n\n outn = kwargs.get('outname', '')\n if outn:\n outn = outn + ': '\n\n tstr = '%s%d/%d' % (outn, current_iter, total_files)\n p = animator(None, None, init=True, tot_string=tstr, **par_args)\n tstr = '%s%d/%d' % (outn, current_iter + 1, total_files)\n animator(p, current_iter, tstr, **par_args)\n return p, outn, total_files, par_args" ]
[ "0.7111303", "0.6805075", "0.66656774", "0.6650501", "0.6446343", "0.6415051", "0.6409759", "0.63803643", "0.6345776", "0.63089895", "0.6278483", "0.6257398", "0.6244696", "0.6239689", "0.6172171", "0.61557204", "0.6144944", "0.6059261", "0.6045168", "0.6035028", "0.6018355", "0.6003006", "0.600198", "0.59835076", "0.5983162", "0.5981274", "0.5981274", "0.59692603", "0.59548134", "0.592212" ]
0.7495011
0
Testing the case where we have a single command instance, but execute() is run multiple times with different content models. The expected behavior is to pick the correct content model class each time.
def test_model_class_loaded_on_each_execution(message_body): cmd = Command() mock_repo = MagicMock(spec=Repository) headers = { 'PlastronJobId': 'test', 'PlastronCommand': 'update', 'PlastronArg-model': 'Letter', } message = PlastronCommandMessage(headers=headers, body=message_body) namespace = Command.parse_message(message) assert namespace.model == 'Letter' cmd.execute(mock_repo, namespace) assert cmd.model_class is Letter headers = { 'PlastronJobId': 'test', 'PlastronCommand': 'update', 'PlastronArg-model': 'Item', } message = PlastronCommandMessage(headers=headers, body=message_body) namespace = Command.parse_message(message) assert namespace.model == 'Item' cmd.execute(mock_repo, namespace) assert cmd.model_class is Item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create\")\n self.assertEqual(f.getvalue().strip(), \"** class name missing **\")\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create hello\")\n self.assertEqual(f.getvalue().strip(), \"** class doesn't exist **\")\n for _class in self.clis:\n with patch('sys.stdout', new=StringIO()) as f:\n command = \"create\" + \" \" + _class\n HBNBCommand().onecmd(command)\n _id = f.getvalue().strip()\n key = _class + \".\" + _id\n self.assertTrue(key in storage.all().keys())", "def test_execute_given_no_matching_template_should_keep_default(self):\n # Arrange\n class_under_test = TemplateCommand()\n template = \"MultiMedia\"\n enhance_configuration = EnhancementConfiguration()\n\n expected_foreground = None\n expected_background = None\n\n # Act\n class_under_test.execute(template, enhance_configuration)\n\n # Assert\n self.assertEqual(expected_foreground,\n enhance_configuration.foreground_color)\n self.assertEqual(expected_background,\n enhance_configuration.background_color)", "def test_create(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create\")\n self.assertEqual(f.getvalue().strip(),\n \"** class name missing **\")\n\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create Manga\")\n self.assertEqual(f.getvalue().strip(),\n \"** class doesn't exist **\")\n\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create BaseModel\")\n id = f.getvalue().strip()\n self.assertTrue(type(f), str)\n self.assertEqual(len(id), 36)", "def _execute(self, model_obj):", "def test_models_edx_ui_textbook_interaction_selectors_with_valid_statements(\n class_, data\n):\n statement = json.loads(data.draw(custom_builds(class_)).json())\n model = ModelSelector(module=\"ralph.models.edx\").get_first_model(statement)\n assert model is class_", "def test_model_manager_will_return_same_instance_when_instantiated_many_times(self):\n # arrange, act\n # instantiating the model manager class twice\n first_model_manager = ModelManager()\n second_model_manager = ModelManager()\n\n # loading the MLModel objects from configuration\n first_model_manager.load_model(\"tests.mocks.MLModelMock\")\n\n first_model_object = first_model_manager.get_model(qualified_name=\"qualified_name\")\n second_model_object = second_model_manager.get_model(qualified_name=\"qualified_name\")\n\n # assert\n self.assertTrue(str(first_model_manager) == str(second_model_manager))\n self.assertTrue(str(first_model_object) == str(second_model_object))", "def test_multiple_commands_at_same_time(self):", "def test_003(self):\n try:\n exec(\"class Model(Model):\\n\\tpass\")\n\n raise Exception\n except ModelOverwriteError:\n pass", "def test_execute(self):\n with patch('command_executor.subprocess.Popen') as mock_subproc_popen:\n communicate_mock = Mock()\n attrs = {'communicate.return_value': (OUTPUT_RESULT,\n OK_MESSAGE),\n 'returncode': OK_RETURN_CODE}\n communicate_mock.configure_mock(**attrs)\n mock_subproc_popen.return_value = communicate_mock\n actual_result = BaseCommandExecutor(COMMAND).to_execute()\n self.assertIs(actual_result.error, OK_MESSAGE)\n self.assertIs(actual_result.exit_code, OK_RETURN_CODE)\n self.assertMultiLineEqual(actual_result.output,\n EXECUTE_EXPECTED_RESULT)", "def test_count(self):\n\n command = Command()\n modellist = command.get_modellist()\n for model_name, count in modellist:\n # taking model class by it's name\n model = ContentType.objects.get(model=model_name).model_class()\n # testing we've counted objects in this model right\n self.assert_count(model, count)", "def test_Base_model(self):\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"create BaseModel\")\n id = f.getvalue().strip()\n self.assertTrue(type(f), str)\n self.assertEqual(len(id), 36)\n\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"update BaseModel \" + str(id) + \" name Manga\")\n self.assertTrue(type(f), str)\n self.assertEqual(f.getvalue().strip(), \"\")\n\n with patch('sys.stdout', new=StringIO()) as f:\n HBNBCommand().onecmd(\"show BaseModel \" + str(id))\n self.assertTrue(\"name\" in f.getvalue().strip())\n self.assertTrue(\"Manga\" in f.getvalue().strip())", "def _execute_impl(self, commands):\n raise NotImplementedError(\"abstract method\")", "def execute(self, *args, **kwargs):", "def execute(self, *args, **kwargs):", "def _execute_command(\n cls,\n connection_info: misc_utils.ConnectionInfo,\n search_info: misc_utils.SearchInfo,\n command_name: str,\n arguments: List[str],\n ):\n dictionary, ip_address, port = tuple(connection_info)\n is_printing_list, ids, components, search, json = tuple(search_info)\n\n search_filter = cls._get_search_filter(ids, components, search, json)\n if is_printing_list:\n cls._log(cls._list_all_possible_items(dictionary, search_filter, json))\n return\n\n # ======================================================================\n pipeline, api = test_api_utils.initialize_test_api(\n dictionary, server_ip=ip_address, server_port=port\n )\n # ======================================================================\n\n try:\n api.send_command(command_name, arguments)\n except KeyError:\n cls._log(\"'%s' is not a known command\" % (command_name))\n close_matches = CommandSendCommand.get_closest_commands(\n pipeline.dictionaries, command_name\n )\n if close_matches:\n cls._log(\"Similar known commands: {}\".format(close_matches))\n except NotInitializedException:\n temp = CommandSendCommand.get_command_template(\n pipeline.dictionaries, command_name\n )\n cls._log(\n \"'%s' requires %d arguments (%d given)\"\n % (command_name, len(temp.get_args()), len(arguments))\n )\n cls._log(cls.get_command_help_message(pipeline.dictionaries, command_name))\n except CommandArgumentsException as err:\n cls._log(\"Invalid arguments given; %s\" % (str(err)))\n cls._log(cls.get_command_help_message(pipeline.dictionaries, command_name))\n\n # ======================================================================\n pipeline.disconnect()\n api.teardown()\n # ======================================================================", "def test_same_models(self):\n\t\t\n\t\t# TODO: finish\n\t\tpass", "def test_command_integrity(self):\n out = io.StringIO()\n management.call_command('import_data', stdout=out)\n positions = Position.objects.count()\n management.call_command('import_data', stdout=out)\n self.assertEqual(Position.objects.count(), positions)", "def execute(self, *args, **kwargs):\n pass", "def execute(self):", "def execute(self):", "def execute(self):", "def execute(self):", "def test_command(self):\n\n expected = \"PyFunceble has been written by Fun Ilrys.\"\n actual = Command(\"echo '%s'\" % expected).execute()\n\n self.assertEqual(expected + \"\\n\", actual)", "def execute(cls):\n pass", "def test_handleWorldCommand(self):\n world = World(MagicMock())\n p = AvatarProtocol(world)\n p.avatar = MagicMock()\n p.avatar.execute.return_value = {\"hey\": \"ho\"}\n\n FooCls = MagicMock()\n p.commands = {\n 'foo': FooCls,\n }\n\n r = yield p.handleWorldCommand(name='foo', args=['foo', 'bar', 'baz'],\n work=None)\n p.avatar.execute.assert_called_once_with(FooCls, 'foo', 'bar', 'baz')\n self.assertEqual(r, {'data': json.dumps({\"hey\":\"ho\"})})", "def execute(self):\n\t\tpass", "def execute():", "def execute(self):\n pass", "def execute(self):\n pass", "def execute(self):\n pass" ]
[ "0.6008626", "0.59421533", "0.5843393", "0.5799433", "0.5684046", "0.5677871", "0.5666402", "0.5653431", "0.5589043", "0.5587832", "0.5562466", "0.5555742", "0.5520141", "0.5520141", "0.5490762", "0.5461224", "0.542619", "0.5383023", "0.53781736", "0.53781736", "0.53781736", "0.53781736", "0.53723973", "0.536629", "0.5363487", "0.5352735", "0.532257", "0.5319855", "0.5319855", "0.5319855" ]
0.70353436
0
Test retrieving documents and encoding them with vectors.
def test_retrieve_and_encode_simple(test_client, test_collection_name): VECTOR_LENGTH = 100 def fake_encode(x): return test_client.generate_vector(VECTOR_LENGTH) # with TempClientWithDocs(test_client, test_collection_name, 100) as client: test_client.insert_documents(test_collection_name, test_client.create_sample_documents(100)) results = test_client.retrieve_and_encode(test_collection_name, models={'country': fake_encode}) assert list(test_client.collection_schema(test_collection_name)['country_vector_'].keys())[0] == 'vector' assert len(results['failed_document_ids']) == 0 assert 'country_vector_' in test_client.collection_schema(test_collection_name) docs = test_client.retrieve_documents(test_collection_name)['documents'] assert len(docs[0]['country_vector_']) == VECTOR_LENGTH
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_document_retrieval(self):", "def test_all_documents(self):", "def test_documents_for(self):\n # Test the default ES version\n self._test_documents_for(_documents_for)\n\n # Test the DB version\n self._test_documents_for(_db_documents_for)", "def test_get_documents_populated(index_with_documents):\n response = index_with_documents().get_documents()\n assert isinstance(response.results, list)\n assert len(response.results) == 20", "def test_loading_document(self):", "def test_client_document_retrieve(self):\n pass", "def vectorize_doc(document):\n # return document vector for tokenized input doc\n return bc.encode([document])[0]", "def simulate_response(self, documents):", "def test_encode(self):\n data = [item.to_dict() for item in self.docs[0:2]]\n actual = self.doc_cls.objects[0:2]\n self.assertEqual(\n json.loads(actual.to_json()), data\n )", "def test_get_vocabulary(self):\n\n for m in self.models:\n vocab = m.vocabulary\n self.assertTrue(isinstance(vocab, turicreate.SArray))\n self.assertEqual(len(vocab), 25)", "def test_encode(self):\n data = [json.loads(item.to_json()) for item in self.docs]\n self.assertEqual(\n json.loads(self.doc_cls.objects.to_json()), data\n )", "def testVectorize(self):\n \n doc = self.train.tfidf\n\n self.assertEqual(doc[0][self.vectorizer.vocabulary_[\"treatise\"]], 1)\n self.assertEqual(doc[0][self.vectorizer.vocabulary_[\"novel\"]], 0)\n self.assertEqual(doc[1][self.vectorizer.vocabulary_[\"treatise\"]], 1)\n self.assertEqual(doc[1][self.vectorizer.vocabulary_[\"novel\"]], 0)\n self.assertEqual(doc[2][self.vectorizer.vocabulary_[\"treatise\"]], 1)\n self.assertEqual(doc[2][self.vectorizer.vocabulary_[\"novel\"]], 0)\n\n self.assertEqual(doc[3][self.vectorizer.vocabulary_[\"treatise\"]], 0)\n self.assertEqual(doc[3][self.vectorizer.vocabulary_[\"novel\"]], 1)\n self.assertEqual(doc[4][self.vectorizer.vocabulary_[\"treatise\"]], 0)\n self.assertEqual(doc[4][self.vectorizer.vocabulary_[\"novel\"]], 1)\n self.assertEqual(doc[5][self.vectorizer.vocabulary_[\"treatise\"]], 0)\n self.assertEqual(doc[5][self.vectorizer.vocabulary_[\"novel\"]], 1)\n\n doc = self.test.tfidf\n self.assertEqual(doc[0][self.vectorizer.vocabulary_[\"treatise\"]], 1)\n self.assertEqual(doc[0][self.vectorizer.vocabulary_[\"novel\"]], 0)\n\n self.assertEqual(doc[1][self.vectorizer.vocabulary_[\"treatise\"]], 0)\n self.assertEqual(doc[1][self.vectorizer.vocabulary_[\"novel\"]], 1)", "def test_add_many_unicode(self):\n # Some Polish characters (UTF-8)\n chars = (\"\\xc4\\x99\\xc3\\xb3\\xc4\\x85\\xc5\\x9b\\xc5\\x82\"\n \"\\xc4\\x98\\xc3\\x93\\xc4\\x84\\xc5\\x9a\\xc5\\x81\").decode(\"utf-8\")\n\n documents = []\n for char in chars:\n doc = {}\n doc[\"data\"] = char\n doc[\"user_id\"] = get_rand_string()\n doc[\"id\"] = get_rand_string()\n documents.append(doc)\n\n user_ids = [doc[\"user_id\"] for doc in documents]\n ids = [doc[\"id\"] for doc in documents]\n\n self.conn.add(documents)\n self.conn.commit()\n\n results = []\n for doc in documents:\n id = doc[\"id\"]\n res = self.conn.query(\"id:\" + id).results\n if not res:\n self.fail(\"Could not find document (id:%s)\" % id)\n results.append(res[0])\n\n self.assertEquals(len(results), len(chars),\n \"Query didn't return all documents. Expected: %d, got: %d\" % (\n len(chars), len(results)))\n\n # Use sets' symmetric difference to check if we have all documents\n # (same way as in TestAddingDocuments.test_add_many)\n\n query_user_ids = [doc[\"user_id\"] for doc in results]\n query_data = [doc[\"data\"] for doc in results]\n query_ids = [doc[\"id\"] for doc in results]\n\n user_ids_symdiff = set(user_ids) ^ set(query_user_ids)\n data_symdiff = set(chars) ^ set(query_data)\n ids_symdiff = set(ids) ^ set(query_ids)\n\n self.assertEqual(user_ids_symdiff, set([]),\n \"User IDs sets differ (difference:%s)\" % (user_ids_symdiff))\n self.assertEqual(data_symdiff, set([]),\n \"Data sets differ (difference:%s)\" % (data_symdiff))\n self.assertEqual(ids_symdiff, set([]),\n \"IDs sets differ (difference:%s)\" % (ids_symdiff))", "def feature_vecs_DOC(train_pos, train_neg, test_pos, test_neg):\n # Doc2Vec requires LabeledSentence objects as input.\n # Turn the datasets from lists of words to lists of LabeledSentence objects.\n # YOUR CODE HERE\n labeled_train_pos = []\n labeled_train_neg = []\n labeled_test_pos = []\n labeled_test_neg = []\n i = 0\n for line in train_pos:\n labeled_train_pos.append(LabeledSentence(line, ['TRAIN_POS_%i' % i]))\n i += 1\n i = 0\n for line in train_neg:\n labeled_train_neg.append(LabeledSentence(line, ['TRAIN_NEG_%i' % i]))\n i += 1\n i = 0\n for line in test_pos:\n labeled_test_pos.append(LabeledSentence(line, ['TEST_POS_%i' % i]))\n i += 1\n i = 0\n for line in test_neg:\n labeled_test_neg.append(LabeledSentence(line, ['TEST_NEG_%i' % i]))\n i += 1\n\n # Initialize model\n model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=4)\n sentences = labeled_train_pos + labeled_train_neg + labeled_test_pos + labeled_test_neg\n model.build_vocab(sentences)\n\n # Train the model\n # This may take a bit to run \n for i in range(5):\n print \"Training iteration %d\" % (i)\n random.shuffle(sentences)\n model.train(sentences)\n\n # Use the docvecs function to extract the feature vectors for the training and test data\n # YOUR CODE HERE\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n for j in range(len(train_pos)):\n train_pos_vec.append(model.docvecs['TRAIN_POS_%i' % j])\n for j in range(len(train_neg)):\n train_neg_vec.append(model.docvecs['TRAIN_NEG_%i' % j])\n for j in range(len(test_pos)):\n test_pos_vec.append(model.docvecs['TEST_POS_%i' % j])\n for j in range(len(test_neg)):\n test_neg_vec.append(model.docvecs['TEST_NEG_%i' % j])\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def test_get_documents_default(empty_index):\n response = empty_index().get_documents()\n assert isinstance(response.results, list)\n assert response.results == []", "def test_get_document(index_with_documents):\n response = index_with_documents().get_document(\"500682\")\n assert isinstance(response, Document)\n assert hasattr(response, \"title\")\n assert response.title == \"The Highwaymen\"", "def testDocument():\n #Set up a mock document\n d = Document()\n #Set document's sentences\n d[0] = Sentence('This is the first sentence in my email.')\n d[1] = Sentence('This is the second sentence in my email!')\n d[2] = Sentence('Sincerely, last sentence')\n \n #Get document's setences\n print(d[0])\n print(d[1])\n print(d[2])\n #Test getSCount()\n print('self.__sCount should be 3: ', d.getSCount())\n #Set toInfo\n d.setToInfo('[email protected]')\n #Get toInfo\n print('To Info: ', d.getToInfo())\n #Set fromInfo\n d.setFromInfo('[email protected]')\n #Get fromInfo\n print('From Info: ', d.getFromInfo())\n #Set Date\n d.setDate(2017, 3, 6)\n #Get Date\n print('(Year, Month, Day): ', d.getDate())\n #Get Fwd\n d.setFwd('[email protected]')\n #Set Fwd\n print('Forward Info: ', d.getFwd())\n #Set Reply\n d.setReply('[email protected]')\n #Get Reply\n print('Reply Info: ', d.getReply())", "def test_document_listing(self, flag_is_active):\n flag_is_active.return_value = True\n\n # Create a topic and product\n t = topic(save=True)\n p = product(save=True)\n\n # Create 3 documents with the topic and product and one without\n for i in range(3):\n doc = revision(is_approved=True, save=True).document\n doc.topics.add(t)\n doc.products.add(p)\n doc = revision(is_approved=True, save=True).document\n\n self.refresh()\n\n # GET the page and verify the content\n url = reverse('products.documents', args=[p.slug, t.slug])\n r = self.client.get(url, follow=True)\n eq_(200, r.status_code)\n doc = pq(r.content)\n eq_(3, len(doc('#document-list li')))", "def build(self,documents):\n\t\tself.vectorKeywordIndex = self.getVectorKeywordIndex(documents)\n\n\t\tself.documentVectors = [self.createVector(document) for document in documents]", "def test_client_document_list(self):\n pass", "def vectorize_doc_list(docList):\n vecList = bc.encode(docList)\n return vecList", "def get_vectors_for_all_docs(docs, vocab):\n docs_vectors = [get_feature_vector(doc, vocab) for doc in docs]\n return np.array(docs_vectors)", "def get_doc_vector(doc_id, model_id):\n queue = get_vec_queue(app.config)\n data = queue.get_by_id((doc_id, model_id))\n if data is not None:\n return jsonify(doc=data.tolist())\n return jsonify(err=f\"{doc_id} not found\"), 404", "def test_official_test_vectors(self):\n for case in TESTS:\n u = self.load(case.key[::-1], swap=True, raw=True)\n self.assertEqual(u.encrypt(case.plain), case.cipher)\n self.assertEqual(u.decrypt(case.cipher), case.plain)", "def vectorize_documents(documents, model):\n document_vectors = []\n count = 0\n for document in documents:\n count += 1\n sentence_vectors = [vectorize_sentence(sentence, model) for sentence in document]\n document_vector = get_aggregate_vector(sentence_vectors)\n document_vectors.append(document_vector)\n return document_vectors", "def test_extract_embeddings():\n docs = [\"some document\"]\n model = BERTopic(embedding_model=\"distilbert-base-nli-stsb-mean-tokens\")\n bertopic_embeddings = model._extract_embeddings(docs)\n\n assert isinstance(bertopic_embeddings, np.ndarray)\n assert bertopic_embeddings.shape == (1, 768)\n\n sentence_embeddings = embedding_model.encode(docs, show_progress_bar=False)\n assert np.array_equal(bertopic_embeddings, sentence_embeddings)", "def create_vectors(\n dataset_path_train: str, dataset_path_test: str,\n vectors_path_train: str, vectors_path_test: str\n) -> int:\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str,\n \"text_stemmed\": str,\n \"text_lemmatized\": str,\n }\n\n df_train = pd.read_csv(\n f\"/data/{dataset_path_train}\",\n index_col=\"id\",\n dtype={**dtypes, \"target\": int},\n converters={\"tokens\": ast.literal_eval})\n df_train[\"text_preprocessed\"] = df_train[\"tokens\"].apply(\n lambda x: \" \".join(x))\n\n df_test = pd.read_csv(\n f\"/data/{dataset_path_test}\",\n index_col=\"id\",\n dtype=dtypes,\n converters={\"tokens\": ast.literal_eval})\n df_test[\"text_preprocessed\"] = df_test[\"tokens\"].apply(\n lambda x: \" \".join(x))\n\n vectorizer = sklearn.feature_extraction.text.CountVectorizer()\n vectors_train = vectorizer.fit_transform(df_train[\"text_preprocessed\"])\n vectors_test = vectorizer.transform(df_test[\"text_preprocessed\"])\n\n with open(f\"/data/{vectors_path_train}\", \"wb\") as f:\n pickle.dump(vectors_train, f)\n with open(f\"/data/{vectors_path_test}\", \"wb\") as f:\n pickle.dump(vectors_test, f)\n\n return 0", "def test_single_document_processing(self):\n print('submitting document...')\n\n for doc in self.DOCS:\n result = self.client.submit_document(doc)\n\n from pprint import pprint\n print(result)\n self.assertTrue(result != \"\")", "def test_basic_av_by_tags_op_all(self):\n doc1 = Document.objects.create_document(\n title=\"doc1\",\n user=self.testcase_user,\n page_count=2,\n file_name=\"koko.pdf\",\n size='1111',\n lang='ENG',\n )\n doc2 = Document.objects.create_document(\n title=\"doc2\",\n user=self.testcase_user,\n page_count=2,\n file_name=\"kuku.pdf\",\n size='1111',\n lang='ENG',\n )\n doc3 = Document.objects.create_document(\n title=\"doc3\",\n user=self.testcase_user,\n page_count=2,\n file_name=\"momo.pdf\",\n size='1111',\n lang='ENG',\n )\n doc1.tags.add(\n \"green\",\n \"blue\",\n tag_kwargs={'user': self.testcase_user}\n )\n doc2.tags.add(\n \"blue\",\n tag_kwargs={'user': self.testcase_user}\n )\n doc3.tags.add(\n \"green\",\n \"blue\",\n \"red\",\n tag_kwargs={'user': self.testcase_user}\n )\n\n base_url = reverse('admin:search')\n args = \"tag=green&tag=blue&tags_op=all\"\n url = f\"{base_url}?{args}\"\n\n ret = self.client.get(url)\n\n self.assertEqual(\n ret.status_code,\n 200\n )\n self.assertEqual(\n len(ret.context['results_docs']),\n 2\n )\n result_ids = set(\n [doc_.id for doc_ in ret.context['results_docs']]\n )\n self.assertEqual(\n result_ids,\n set([doc1.id, doc3.id])\n )", "def iter_documents(self):\n raise NotImplementedError" ]
[ "0.7132346", "0.70555973", "0.64202774", "0.63451856", "0.62944025", "0.6217141", "0.61046475", "0.60995257", "0.60924697", "0.6030219", "0.6022934", "0.5991151", "0.59872127", "0.589388", "0.5838781", "0.58209395", "0.5817124", "0.5815693", "0.5729778", "0.5727924", "0.5702847", "0.57006174", "0.5698733", "0.5680299", "0.56591964", "0.5655798", "0.5636067", "0.5631109", "0.56304234", "0.5629516" ]
0.7413078
0
For a list of positions and a constant recombination rate (in cM/Mb), return a list "results" of the same length as "positions" such that results[i] is the phredscaled recombination probability between positions[i1] and positions[i].
def uniform_recombination_map(recombrate, positions): return [0] + [ round(centimorgen_to_phred((positions[i] - positions[i - 1]) * 1e-6 * recombrate)) for i in range(1, len(positions)) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeRecombMapBooker(snplist, rholist, chrom):\n if chrom == \"X\":\n mapsize = 44.7\n elif chrom == \"3R\":\n mapsize = 90.9\n elif chrom == \"3L\":\n mapsize = 89.1\n elif chrom == \"2L\":\n mapsize = 63.2\n elif chrom == \"2R\":\n mapsize = 94.8\n elif chrom == \"2RL\":\n mapsize = 158\n elif chrom == \"3RL\":\n mapsize = 180\n poslist = []\n rhocum = []\n cMlist = []\n cMMblist = []\n for i, pos in enumerate(snplist):\n if i == 0:\n rhoTemp = (rholist[i] * (pos))\n else:\n rhoTemp = (rholist[i] * (pos - snplist[i-1]))\n if i == 0:\n rhocum.append(rhoTemp)\n else:\n rhocum.append(rhocum[-1] + rhoTemp)\n poslist.append(pos)\n for i, j in enumerate(rhocum):\n cMperSNP = (j / rhocum[-1])\n cMlist.append(cMperSNP)\n cMMblist.append(((cMlist[i] - cMlist[i-1])*mapsize) / ((snplist[i] - snplist[i-1])/1E6))\n return(poslist, cMMblist, cMlist)", "def burg(sample_list, coefficient_number):\n\n p = sum(sample ** 2 for sample in sample_list)\n a0 = p / len(sample_list)\n\n b1 = sample_list[:len(sample_list) - 1]\n b2 = sample_list[1:]\n\n aa = [0.0 for i in range(coefficient_number)]\n coefficient_list = [0.0 for i in range(coefficient_number)]\n\n for i in range(coefficient_number):\n\n numerator = 0.0\n denominator = 0.0\n\n for j in range(len(sample_list) - i - 1):\n numerator += b1[j] * b2[j]\n denominator += b1[j] ** 2 + b2[j] **2\n\n coefficient_list[i] = 2.0 * numerator / denominator\n a0 *= 1.0 - coefficient_list[i] ** 2\n\n for j in range(i - 1):\n coefficient_list[j] = aa[j] - coefficient_list[i] * aa[i - j - 1]\n\n if i < coefficient_number + 1:\n\n for j in range(i + 1):\n aa[j] = coefficient_list[j]\n\n for j in range(len(sample_list) - i - 2):\n b1[j] -= aa[i] * b2[j]\n b2[j] = b2[j + 1] - aa[i] * b1[j + 1];\n\n return a0, coefficient_list", "def get_success_probabilities_from_results(results: Sequence[Sequence[Sequence[int]]]) \\\n -> Sequence[float]:\n num_shots = len(results[0])\n n_bits = len(results[0][0]) - 1\n\n probabilities = []\n # loop over all binary strings of length n_bits\n for result, bits in zip(results, all_bitstrings(2 * n_bits)):\n # Input nums are written from (MSB .... LSB) = (a_n, ..., a_1, a_0)\n num_a = bit_array_to_int(bits[:n_bits])\n num_b = bit_array_to_int(bits[n_bits:])\n\n # add the numbers\n ans = num_a + num_b\n ans_bits = int_to_bit_array(ans, n_bits + 1)\n\n # a success occurs if a shot matches the expected ans bit for bit\n probability = 0\n for shot in result:\n if np.array_equal(ans_bits, shot):\n probability += 1. / num_shots\n probabilities.append(probability)\n\n return probabilities", "def find_pcts_multi(P, start_b = [], iter = 10000):\n assert len(P) >= 2\n wins_per_player = [0] * len(P)\n all_hole = reduce(lambda x,y: x+y, P)\n for i in range(iter):\n deck = Deck()\n need = 5 - len(start_b)\n b2 = draw_sure(deck, need, all_hole+start_b)\n s = [evaluator.evaluate(start_b+b2, h) for h in P]\n for i, e in enumerate(s):\n if e == min(s):\n wins_per_player[i] += 1\n return [float(x) / sum(wins_per_player) for x in wins_per_player]", "def metropolis_step_PBC(self, positions):\n \"\"\"with brute-force sampling of new positions.\"\"\"\n\n # r = random.random()*random.choice((-1, 1))\n # r is a random number drawn from the uniform prob. dist. in [0,1]\n r = np.zeros(self.num_d)\n for i in range(self.num_d):\n r[i] = np.random.uniform(-1, 1)\n # Pick a random particle\n random_index = np.random.randint(0, high=len(positions))\n new_positions = np.array(positions)\n new_random_position = new_positions[random_index, :]\n # Suggest a new move\n new_positions[random_index, :] = new_random_position + r*self.delta_R\n # Check boundarys, apply PBC if necessary\n pbc = self.periodic_boundary_conditions(new_positions, random_index)\n new_positions[random_index, :] = pbc\n\n test_wavefunction = self.w.wavefunction(new_positions)\n if test_wavefunction**2 <= 1e-14:\n pass\n else:\n acceptance_ratio = self.w.wavefunction_ratio(positions,\n new_positions)\n epsilon = np.random.sample()\n\n if acceptance_ratio > epsilon:\n positions = new_positions\n self.s.distances_update_PBC(positions, random_index)\n # print (self.s.distances)\n self.c += 1.0\n\n else:\n pass\n\n return positions", "def replace_linear_combinations(list_of_3x3_matrices, force_constant_prefactor):\n result = []\n\n for matrix in list_of_3x3_matrices:\n new_matrix = []\n for row in matrix:\n new_row = []\n for entry in row:\n if isinstance(entry, Iterable):\n new_entry = 0\n for value, factor in entry:\n new_entry += value * factor\n new_row.append(new_entry * force_constant_prefactor)\n else:\n new_row.append(entry * force_constant_prefactor)\n new_matrix.append(new_row)\n result.append(new_matrix)\n\n return result", "def train_GPs_on_position(list_of_input_trajectories, list_of_output_trajectories, times_array):\r\n # get list of Xs that line up with inputs and outputs and are limited to them.\r\n # for each input:\r\n # train a GP\r\n # compare actual outputs to predicted using defined function for MSLL\r\n # store MSLL in array size that is the same size as the inputs and the outputs.\r\n # For the last line, if you do it for all inputs, can end up with a square array of inputs to outputs\r\n # Then I need some method of choosing the maximum combination of inputs and outputs. Research this...\r\n\r\n cost_matrix = np.zeros((len(list_of_input_trajectories),len(list_of_output_trajectories)))\r\n for i, input_trajectory_masked in enumerate(list_of_input_trajectories):\r\n input_mask = np.ma.getmask(input_trajectory_masked)\r\n input_trajectory = np.array(input_trajectory_masked[~input_mask].reshape(3,-1))\r\n times_input_mask = input_mask[0,:]\r\n times_input_masked = np.ma.masked_array(times_array, times_input_mask)\r\n input_times = np.array(times_input_masked[~times_input_mask])\r\n\r\n # REFORMAT THE ARRAY TO BE SUITABLE FOR GPy\r\n Y_List = GPy_reformat_3D(input_trajectory) # make sure input_trajectory has shape (3, n_timesteps)\r\n X_List = GPy_reformat_3D(input_times) # times should have shape (n_timesteps)\r\n\r\n\r\n icm = GPy.util.multioutput.ICM(input_dim=1, num_outputs=3, kernel=GPy.kern.RBF(1))\r\n # print(icm)\r\n\r\n gp = GPy.models.GPCoregionalizedRegression(X_List, Y_List, kernel=icm)\r\n gp['.*rbf.var'].constrain_fixed(1.) # For this kernel, B.kappa encodes the variance now.\r\n # gp.optimize(messages=True)\r\n gp.optimize(messages=False)\r\n\r\n # FINDING INDIVIDUAL COSTS\r\n for j, output_trajectory_masked in enumerate(list_of_output_trajectories):#\r\n output_mask = np.ma.getmask(output_trajectory_masked)\r\n output_trajectory = np.array(output_trajectory_masked[~output_mask].reshape(3,-1))\r\n times_output_mask = output_mask[0,:]\r\n times_output_masked = np.ma.masked_array(times_array, times_output_mask)\r\n output_times = np.array(times_output_masked[~times_output_mask])\r\n cost_matrix[i,j] = individual_cost_function(gp, output_trajectory, output_times)\r\n\r\n\r\n # ARRAY OF ROW INDICES, ARRRAY OF COLUMN INDICES\r\n # CALL COMBINED COSTS\r\n # INPUT ARRAY[OUTPUT ARRAY NO MASK] = OUTPUT ARRAY[OUTPUT ARRAY NO MASK]\r\n\r\n return", "def brute_multiply(numbers):\n result = []\n for a in range(len(numbers)):\n product = 1\n for index, b in enumerate(numbers):\n if index != a:\n product *= b\n result.append(product)\n return result", "def correct_pvalues_for_multiple_testing(pvalues, correction_type = \"Benjamini-Hochberg\"):\n pvalues = array(pvalues)\n n = int(pvalues.shape[0])\n new_pvalues = empty(n)\n if correction_type == \"Bonferroni\":\n new_pvalues = n * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n new_pvalues[i] = (n-rank) * pvalue\n elif correction_type == \"Benjamini-Hochberg\":\n values = [ (pvalue, i) for i, pvalue in enumerate(pvalues) ]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = n - i\n pvalue, index = vals\n new_values.append((n/rank) * pvalue)\n for i in range(0, int(n)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n new_pvalues[index] = new_values[i]\n return new_pvalues", "def MAP(cpts, obs, terms):\r\n\r\n # a list to store the computed probabilities\r\n all_sums = []\r\n # initialize all terms to false\r\n for value in range(len(terms)):\r\n terms[value] = [terms[value], '0']\r\n search_array = terms + obs\r\n # if all terms are being watched, just call MPE\r\n if len(search_array) == len(cpts):\r\n return MPE(cpts, obs)\r\n # we need to know what terms we aren't interested in so we start with \r\n # or terms and observations and note the variables that appear in CPT but\r\n # not in those\r\n dont_count = []\r\n for var in cpts:\r\n if [var[0], '0'] not in search_array and [var[0], '1'] not in search_array:\r\n dont_count.append(var[0])\r\n terms.append([var[0],'1'])\r\n # sort the terms to ensure correct ordering\r\n terms.sort()\r\n # creates a list of all possible bit strings\r\n # just an easy way to create all possible truth assignments\r\n seq = [\"\".join(seq) for seq in itertools.product(\"01\", repeat=len(terms))]\r\n # loop through all possible truth assignments\r\n for j in range(len(seq)):\r\n # we initialize at probability = 100%\r\n chance = 1\r\n # assign the truth values\r\n for k in range(len(seq[j])):\r\n terms[k][1] = seq[j][k]\r\n # this computes the probability using the chaining rule\r\n for i in range(len(terms)):\r\n new_terms = terms[:-i-1] + obs\r\n new_terms.sort()\r\n chance *= probability(cpts,terms[-i-1], new_terms)\r\n # add the probabilities to our list\r\n all_sums.append(chance)\r\n combine = []\r\n # note all variables which weren't in obs or Vs\r\n for i in dont_count:\r\n combine.append(terms.index([i,'1']))\r\n # this will store the final probabilities\r\n final_array = [0] * len(seq)\r\n # another complicated looking loop, it just serves to combine probabilities\r\n # for example, if we have a CPT with x_1, x_2, x_3, x_4 and we observe \r\n # x_1 to be true and have Vs = [x_3, x_4] then we need to combine the \r\n # probabilities that are the same except for x_2 = true vs false\r\n for loc in combine:\r\n for sequence in range(len(seq)):\r\n for alt_sequence in range(sequence+1,len(seq)):\r\n if (seq[sequence][:loc] + seq[sequence][loc+1:]) == (seq[alt_sequence][:loc] + seq[alt_sequence][loc+1:]):\r\n final_array[sequence] = all_sums[sequence] + all_sums[alt_sequence]\r\n\r\n # get the truth assignment for the highest probability\r\n location = seq[final_array.index(max(final_array))]\r\n truth_assignment = []\r\n # place the truth assignment in a more readable fashion\r\n for value in range(len(terms)):\r\n if terms[value] in search_array:\r\n if location[value] == '0':\r\n truth_assignment.append(terms[value][0]+ ' = False')\r\n else:\r\n truth_assignment.append(terms[value][0]+ ' = True')\r\n return (truth_assignment)", "def get_system_pairs_prob(self, lst, results, gold_lst):\n system_pairs = []\n\n counters['getsp gold lst'] = len(gold_lst)\n \n if isinstance(results, np.ndarray):\n cur = None\n curmax = -1\n for i, item in enumerate(lst):\n if cur and (item['sent'] != cur['sent'] or\n item['exp'] != cur['exp']):\n cur.update({'confidence': curmax})\n system_pairs.append(cur)\n curmax = -1\n cur = None\n if not cur:\n curmax = results[i][1]\n cur = item\n if results[i][1] > curmax:\n curmax = results[i][1]\n cur = item\n if cur:\n cur.update({'confidence': curmax})\n system_pairs.append(cur)\n\n c = 0\n s_p_new = []\n for it in gold_lst:\n if len(system_pairs) > c:\n if (it['sent'] == system_pairs[c]['sent'] and\n it['exp'] == system_pairs[c]['exp']):\n s_p_new.append(system_pairs[c])\n c += 1\n else:\n it['confidence'] = 0\n s_p_new.append(it)\n if DEBUG: print \"skip\", it\n\n system_pairs = s_p_new\n\n cur = False\n for item in system_pairs:\n if cur and (cur['sent'] == item['sent'] and\n cur['exp'] == item['exp']):\n print \"MUL: \", cur, '\\n', item\n print \"MULTIPLE EXP IN EXP_PAIRS\"\n raise\n cur = item\n\n return system_pairs\n \n\n for i, item in enumerate(lst):\n if results[i]:\n system_pairs.append(item)\n return system_pairs", "def r_combinations(n,r):\n return r_permutations(n,r) / math.factorial(r)", "def estimateProbabilities(X, C):\n p = []\n q = []\n r = []\n n = len(X)\n d = 9 # Dimension of the elements\n c = 2 # Number of classes\n for i in range(d):\n p_i = []\n q_i = []\n r_i = []\n for j in range(c):\n p_tmp = []\n q_tmp = []\n r_tmp = []\n elements = C[j] # Elements on the training set for class w_j\n nj = len(C[j])\n for k in elements:\n # p_i_j\n res = X[k][i]*(X[k][i] + 1)*0.5\n p_tmp.append(res)\n # q_i_j\n res = 1 - X[k][i]*X[k][i]\n q_tmp.append(res)\n # r_i_j\n res = X[k][i]*(X[k][i] - 1)*0.5\n r_tmp.append(res)\n p_i.append(sum(p_tmp) / float(nj))\n q_i.append(sum(q_tmp) / float(nj))\n r_i.append(sum(r_tmp) / float(nj))\n p.append(p_i)\n q.append(q_i)\n r.append(r_i)\n return (p, q, r)", "def algorithm_1_2(p, c, x):\n\n q = np.array(c, dtype=np.float64)\n\n for k in range(1, p + 1):\n for j in range(0, p - k + 1):\n q[j] = (1 - x) * q[j] + x * q[j + 1]\n return q[0]", "def correct_pvalues_for_multiple_testing(pvalues, correction_type=\"Benjamini-Hochberg\"):\r\n from numpy import array, empty\r\n pvalues = array(pvalues)\r\n n = float(pvalues.shape[0])\r\n new_pvalues = empty(n)\r\n if correction_type == \"Bonferroni\":\r\n new_pvalues = n * pvalues\r\n elif correction_type == \"Bonferroni-Holm\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n for rank, vals in enumerate(values):\r\n pvalue, i = vals\r\n new_pvalues[i] = (n - rank) * pvalue\r\n elif correction_type == \"Benjamini-Hochberg\":\r\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\r\n values.sort()\r\n values.reverse()\r\n new_values = []\r\n for i, vals in enumerate(values):\r\n rank = n - i\r\n pvalue, index = vals\r\n new_values.append((n / rank) * pvalue)\r\n for i in range(0, int(n) - 1):\r\n if new_values[i] < new_values[i + 1]:\r\n new_values[i + 1] = new_values[i]\r\n for i, vals in enumerate(values):\r\n pvalue, index = vals\r\n new_pvalues[index] = new_values[i]\r\n return new_pvalues", "def CalculateRoc2(dataArray,prefix,readsize,uniquehits,mappedreads,filename):\r\n starttime= time.time()\r\n uniquehits = float(uniquehits)\r\n readsize = float(readsize)\r\n \r\n \r\n entries = len(dataArray)\r\n \r\n\r\n resultmatrix = np.arange(entries*2)\r\n resultmatrix = resultmatrix.reshape(2,entries)\r\n \r\n maxrq = max(x.rq for x in dataArray)\r\n maxnm = max(x.nm[0] for x in dataArray)\r\n maxGaps= max(x.gaps[0] for x in dataArray)\r\n maxMism= max(x.mism[0] for x in dataArray)\r\n \r\n \r\n minrq = min(x.rq for x in dataArray)\r\n minnm = min(x.nm[0] for x in dataArray)\r\n minmq= min(x.mq[0] for x in dataArray)\r\n minGaps= min(x.gaps[0] for x in dataArray) \r\n minMism= min(x.mism[0] for x in dataArray) \r\n \r\n \r\n # adjust stepsize for rq since the score behaves the other way\r\n quants = [1,2,3,4,5]\r\n tempa = maxrq-minrq\r\n stepsize = tempa/5\r\n \r\n rqQuants = [round(minrq+(i-1)*stepsize,3) for i in quants]\r\n rqQuants.reverse()\r\n rqQuants[-1] =0 # last entry is rounded bigger than the smallest in the dataset\r\n \r\n nmQuants = [i*maxnm/5 for i in quants]\r\n GapsQuants = [i*maxGaps/5 for i in quants]\r\n MismQuants = [i*maxMism/5 for i in quants]\r\n\r\n rocvector = []\r\n \r\n # i = NM,l = RQ, k = MQ\r\n for l in quants: # RQ\r\n for k in quants: # GAPS\r\n for j in quants: # MISMATCH\r\n temparray = [m for m in dataArray if m.gaps[0] <= GapsQuants[k-1] and m.mism[0] <= MismQuants[j-1] and m.rq >=rqQuants[l-1]]\r\n \r\n\r\n tempids = [m.id for m in temparray]\r\n uniquereads = {}\r\n for i in xrange(0,len(tempids)):\r\n uniquereads[tempids[i]] = \"\"\r\n\r\n mappedreads = len(uniquereads)\r\n \r\n \r\n \r\n templength = len(temparray)\r\n \r\n if templength == 0:\r\n continue\r\n else:\r\n tempTP = sum(x.mr[0] for x in temparray)\r\n tempFP =templength-tempTP\r\n F = round((float(mappedreads)/ readsize) ,3)\r\n sens = round((tempTP/ uniquehits) * F,3)\r\n if tempFP == 0:\r\n spec = 0\r\n else:\r\n spec = round((tempFP / uniquehits) * F,3) \r\n \r\n rocvector.append([rqQuants[l-1],GapsQuants[k-1],MismQuants[j-1],tempTP,tempFP,templength,sens,spec,F])\r\n \r\n #print (\"%d\\t%d\\t%d\\t\" % (templength,tempTP,tempFP))\r\n\r\n #0 = NM 4 = TP 7 = sens\r\n #1 = RQ 5 = FP 8 = 1-spec\r\n #2 = GAPS 6 = P 9 = F\r\n #append needed for last entry in AUC calculation\r\n rocvector.append([0,0,0,0,0,0,0,0,0]) \r\n nproc = np.array(rocvector)\r\n \r\n #write the sens and specificity values from nproc according to the enumeration in line 149. \r\n #specificity is in cell -2\r\n # sensitivity is in cell -3\r\n sens = [i[-3] for i in nproc]\r\n spez = [i[-2] for i in nproc]\r\n \r\n # adjust ROC curve. It is necessary that it the 1-specificity ends in 1.\r\n # for the last record copy the predecessor in sens to it\r\n # and write 1 to specificity \r\n spez[-1] = 1\r\n sens[-1] = sens[-2]\r\n \r\n\r\n rocarray1 = np.array([sens,spez])\r\n rocarray1 = rocarray1.flatten('F')\r\n rocarray1= rocarray1.reshape((len(spez),2))\r\n \r\n rocarray = np.array([sens,spez])\r\n rocarray = rocarray.flatten('F')\r\n rocarray = rocarray.reshape((len(spez),2))\r\n rocarray = np.sort(rocarray.view('float,float'), order=['f0','f1'], axis=0).view(np.float)\r\n \r\n rocarrayCorrected = rocarray\r\n \r\n #print rocarrayCorrected\r\n # project points where...\r\n for m in range(len(rocarrayCorrected)-2,-1,-1):\r\n if (rocarrayCorrected[m,1] >= rocarrayCorrected[m+1,1]):\r\n rocarrayCorrected[m,1] = rocarrayCorrected[m+1,1]\r\n\r\n \r\n #print rocarrayCorrected \r\n plt.hold(True)\r\n plt.figure()\r\n plt.subplot(111)\r\n #plt.scatter(spez, sens, c='b', marker='o', facecolor='red')\r\n #plt.plot(rocarray[:,1], rocarray[:,0]\r\n plt.plot(rocarrayCorrected[:,1],rocarrayCorrected[:,0], marker='o', markersize=7,linestyle='--', color='r', label='projected')\r\n plt.plot(rocarray1[:,1], rocarray1[:,0], linestyle=\"None\",label='real',marker='.',color='g')\r\n plt.xlabel('1-specificity')\r\n plt.ylabel('sensitivity')\r\n plt.title(r'ROC:'+filename)\r\n plt.axis([-0.1,1.1,-0.1,1.1])\r\n plt.grid(True)\r\n plt.legend(loc='lower right')\r\n plt.tight_layout()\r\n plt.savefig(prefix + \"_ROC.pdf\",format='pdf')\r\n plt.clf \r\n \r\n \r\n AUC = trapezoidal_rule(rocarrayCorrected[:,1], rocarrayCorrected[:,0])\r\n \r\n fobj = open(prefix+\"_roctable.txt\",\"w\")\r\n fobj.write(\"RQ\\tGAPS\\tMM\\tPTP\\tFP\\tP\\tSn\\t1-Sp\\tF\\r\\n\")\r\n for i in xrange(0,len(rocvector),1):\r\n temp = [str(k) for k in rocvector[i]]\r\n tempstr = \"\\t\".join(temp)\r\n fobj.write(tempstr+\"\\r\\n\")\r\n\r\n endtime= time.time()\r\n return(round(AUC,3))", "def evolve(p: Population, c: Int, s: Int, r: Int) -> List(Float):\n payoffs = []\n for i in range(c):\n p2 = p.match_up(r)\n pp = p2.payoffs()\n p3 = p2.regenerate(s)\n payoffs = payoffs + [relative_average(pp, r)]\n p = p3\n\n return payoffs", "def compute_map(scores, num_instances):\r\n pr = []\r\n for i, score in enumerate(scores):\r\n score = score[np.argsort(-score[:, 1])] # sort by confidence score\r\n FP = 0\r\n TP = 0\r\n pr_ = []\r\n for prediction in score:\r\n if prediction[0]:\r\n TP += 1\r\n else:\r\n FP += 1\r\n pr_.append([TP/(TP+FP), (TP/num_instances)])\r\n pr.append(pr_)\r\n pr = np.array(pr)\r\n\r\n pinterps = [] # lists of interpolated precisions for every confidence level\r\n ranks = np.linspace(0, 1, 11)\r\n idxs_interpolations = [] # list of indexes of the interpolated precisions, just to plot the recall\r\n for pr_ in pr:\r\n pinterp = []\r\n idxs = []\r\n last_idx = -1\r\n for rank in ranks:\r\n idx = (np.abs(pr_[:, 1] - rank)).argmin() # find the closest recall to the rank\r\n\r\n if rank > pr_[idx, 1]: # this makes sure we are taking the closest recall at the right of the rank\r\n if idx+1 < pr_[:, 0].shape[0]:\r\n idx += 1\r\n interpolated_precision = np.max(pr_[idx:, 0]) # find the max precision within the interval\r\n if idx == last_idx: # just some checks for when the recall doesn't exist\r\n pinterp[-1] = 0\r\n idxs[-1] = 0\r\n pinterp.append(0)\r\n idxs.append(0)\r\n else:\r\n pinterp.append(interpolated_precision)\r\n idxs.append(idx)\r\n last_idx = idx\r\n pinterps.append(pinterp)\r\n idxs_interpolations.append(idxs)\r\n APs = np.array(pinterps).mean(axis=1) # the AP is the average of the interpolated precisions\r\n mAP = APs.mean() # mAP is the mean of all the APs\r\n\r\n return pr, pinterps, idxs_interpolations, mAP, APs", "def compute_power(pvals, SNPs):\n\tnsnps = len(pvals)\n\tall_snps = np.arange(0, nsnps)\n\tpos = SNPs\n\tnegs = list(set(all_snps) - set(SNPs))\n\n\tpvals_rank = rank_array(pvals)\n\n\trocr = np.zeros((nsnps, 2))\n\tfor i in all_snps:\n\t\tv = pvals_rank[0:i] # test positives\n\t\tz = list(set(all_snps) - set(v)) # test negatives\n\n\t\tTP = len(set(v) & set(pos))\n\t\tFP = len(set(v) & set(negs))\n\t\tTN = len(set(z) & set(negs))\n\t\tFN = len(set(z) & set(pos))\n\n\t\tTPR = 1.0*TP/(TP+FN); FPR = 1.0*FP/(FP+TN); #FDR = 1.0*FP/(FP+TP)\n\n\t\trocr[i, :] = [FPR, TPR]\n\n\treturn rocr", "def get_scaling_results(path_pattern, ranks):\n configs, results = [], []\n for r in ranks:\n result_dir = path_pattern % r\n configs.append(load_config(result_dir))\n results.append(load_result(result_dir).assign(ranks=r))\n samples = np.array([get_num_samples(c,r) for (c,r) in zip(configs, ranks)]) \n times = np.array([compute_mean_time(r) for r in results])\n throughputs = samples / times\n ideal = ranks * throughputs[0]/4 # Change to the GPU/node\n eff = throughputs / ideal\n return pd.DataFrame(dict(ranks=ranks, samples=samples,\n times=times, throughputs=throughputs,\n ideal=ideal, eff=eff))", "def FindCouplings():\n l1v = np.linspace(l1min, l1max, num=48)\n l2v = np.logspace(np.log10(l2min), np.log10(l2max), num=48)\n l3v = np.linspace(l3min, l3max, num=48)\n gxv = np.linspace(gxmin, gxmax, num=48)\n p = multiprocessing.Pool()\n f = open(file_name, 'w+')\n line = '|l1--l2--l3--gx--minima--mass1--mass2--stable|'\n f.write(line+'\\n')\n f.write('-'*90+'\\n')\n f.close()\n for l1 in l1v:\n for l2 in l2v:\n start_time_loop = time.time()\n params = cartesian((l1, -l2, l3v, gxv))\n print params.shape\n p.map(CheckCouplings, params)\n print(\"--- Loop has taken: %s seconds ---\" % (time.time() - start_time_loop))", "def beta_iter(b,px,py,pyx_c,pm_size,restarts,iterations):\n candidates = []\n for r in range(restarts):\n\t # initialize distribution for bottleneck variable\n\t pm = np.random.rand(pm_size)+1\n\t pm /= pm.sum()\n\t pym_c = np.random.rand(py.size,pm.size)+1 # Starting point for the algorithm\n\t pym_c /= pym_c.sum(axis=0)\n\t # iterate the BA algorithm\n\t for i in range(iterations):\n\t\t pmx_c, z = p_mx_c(pm,px,py,pyx_c,pym_c,b)\n\t\t pm = p_m(pmx_c,px)\n\t\t pym_c = p_ym_c(pm,px,py,pyx_c,pmx_c)\n\t\t if i>0 and np.allclose(pmx_c,pmx_c_old,rtol=1e-3,atol=1e-3):\n\t\t\t\t# if the x->m mapping is not updating any more, we're at convergence and we can stop\n\t\t\t break\n\t\t pmx_c_old = pmx_c\n\t candidates.append({'past_info' : mi_x1x2_c(pm, px, pmx_c),\n\t\t\t\t\t\t 'future_info' : mi_x1x2_c(py, pm, pym_c),\n\t\t\t\t\t\t 'functional' : -np.log2(np.inner(z,px))})\n\t# among the restarts, select the result that gives the minimum\n\t# value for the functional we're actually minimizing (eq 29 in\n\t# Tishby et al 2000).\n selected_candidate = min(candidates, key=lambda c: c['functional'])\n i_p = selected_candidate['past_info']\n i_f = selected_candidate['future_info']\n return [i_p,i_f,b]", "def multiple_testing_correction(pvalues, correction_type=\"FDR\"):\n from numpy import array, empty\n pvalues = array(pvalues)\n sample_size = pvalues.shape[0]\n qvalues = empty(sample_size)\n if correction_type == \"Bonferroni\":\n # Bonferroni correction\n qvalues = sample_size * pvalues\n elif correction_type == \"Bonferroni-Holm\":\n # Bonferroni-Holm correction\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\n values.sort()\n for rank, vals in enumerate(values):\n pvalue, i = vals\n qvalues[i] = (sample_size-rank) * pvalue\n elif correction_type == \"FDR\":\n # Benjamini-Hochberg, AKA - FDR test\n values = [(pvalue, i) for i, pvalue in enumerate(pvalues)]\n values.sort()\n values.reverse()\n new_values = []\n for i, vals in enumerate(values):\n rank = sample_size - i\n pvalue, index = vals\n new_values.append((sample_size/rank) * pvalue)\n for i in range(0, int(sample_size)-1):\n if new_values[i] < new_values[i+1]:\n new_values[i+1] = new_values[i]\n for i, vals in enumerate(values):\n pvalue, index = vals\n qvalues[index] = new_values[i]\n return qvalues", "def calculate_pos_rate(positive_pairs, negative_pairs, gap, extension, matrix, rate, normalize, type):\n\n positive_scores = []\n negative_scores = []\n\n for positive, negative in zip(positive_pairs, negative_pairs):\n\n # calculates the smith-waterman alignment scores\n positive_scores.append(generate_alignment(positive, gap, extension, matrix, normalize)[2])\n negative_scores.append(generate_alignment(negative, gap, extension, matrix, normalize)[2])\n\n return type(positive_scores, negative_scores, rate)", "def oracle_expectation_by_recalls(recalls, keys, probs, values):\n expectations = list()\n for recall in recalls:\n idxs = np.ceil(keys*recall).astype(np.int)\n e = sum(p*values[k][i] for i,k,p in zip(idxs,keys,probs))\n expectations.append(e)\n return np.array(expectations)", "def find_all_cps(xs, cp_prob=1./250, plot=False):\r\n prior_params = mu0, kappa0, alpha0, beta0 = np.mean(xs), 1., 1.01, 1.\r\n post_params = mu_t, kappa_t, alpha_t, beta_t = map(lambda f: np.array([f]), prior_params)\r\n\r\n T = len(xs)\r\n R, M, V = np.zeros((T, T)), np.zeros((T, T)), np.zeros((T, T))\r\n R[0, 0] = 1\r\n M[0, 0] = mu0\r\n V[0, 0] = xs.var()\r\n\r\n mu_pred, sigma2_pred, dof_pred = compute_t_params(mu_t, kappa_t, alpha_t, beta_t)\r\n for t, x in enumerate(xs[1:], start=1):\r\n pred_prob = np.array([nct(x, m, v, d) for m, v, d in zip(mu_pred, sigma2_pred, dof_pred)])\r\n\r\n R[:t + 1, t] = compute_rt(R[:t, t - 1], pred_prob, cp_prob)\r\n\r\n post_params = mu_t, kappa_t, alpha_t, beta_t = update_params(x, prior_params, post_params)\r\n mu_pred, sigma2_pred, dof_pred = compute_t_params(mu_t, kappa_t, alpha_t, beta_t)\r\n\r\n M[:t + 1, t] = mu_pred\r\n V[:t + 1, t] = compute_t_var(sigma2_pred, dof_pred)\r\n\r\n if plot:\r\n mu_hat = np.sum(M*R, axis=0)\r\n var_hat = np.sum(V*R, axis=0)\r\n plot_results(xs, mu_hat, var_hat)\r\n\r\n return R, M, V", "def Repeater(algorithm, runs, nationtxt, schemeIn):\n\n scores = {}\n\n # Make sure appropriate range is used for scores\n\n scoreRange = range(0, 10000)\n\n # score range has to be between these two numbers\n for i in scoreRange:\n scores.update({i : 0})\n\n #~ print \"Running \" + str(algorithm)[0:-18] + \"> \" + str(runs) + \" times...\\n\"\n\n\n minScore = 10**40\n\n\n scheme = schemeIn\n avg = (scheme[0] + scheme[1] + scheme[2] + scheme[3] + scheme[4] + scheme[5] + scheme[6]) / 7.\n p0 = (scheme[0] - avg)**2\n p1 = (scheme[1] - avg)**2\n p2 = (scheme[2] - avg)**2\n p3 = (scheme[3] - avg)**2\n p4 = (scheme[4] - avg)**2\n p5 = (scheme[5] - avg)**2\n p6 = (scheme[6] - avg)**2\n var = (p0 + p1 + p2 + p3 + p4 + p5 + p6) / 7.\n sDev = var**0.5\n\n\n q0 = scheme[1] - scheme[0]\n q1 = scheme[2] - scheme[1]\n q2 = scheme[3] - scheme[2]\n q3 = scheme[4] - scheme[3]\n q4 = scheme[5] - scheme[4]\n q5 = scheme[6] - scheme[5]\n\n for i in range(runs):\n nation = algorithm(nationtxt)\n\n score = randScoreFunction(nation, scheme)\n scores[score] += 1\n\n # keep track of best scores and nation\n if score < minScore:\n minScore = score\n bestNation = nation\n\n maxFreq = 0\n\n scoreCount = 0\n\n for score in scores:\n if scores[score] > maxFreq:\n maxFreq = scores[score]\n maxFreqScore = score\n if score == minScore:\n minScoreFreq = scores[score]\n if scores[score] >= 1:\n scoreCount += 1\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in bestNation:\n\n if bestNation[province][1] == 1:\n one += 1\n if bestNation[province][1] == 2:\n two += 1\n if bestNation[province][1] == 3:\n three += 1\n if bestNation[province][1] == 4:\n four += 1\n if bestNation[province][1] == 5:\n five += 1\n if bestNation[province][1] == 6:\n six += 1\n if bestNation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if scheme[3] != scheme[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n\n return minScore, minScoreFreq, scheme, fivePlus, fivePlusNoDuplicate, usedTrans, scoreCount, sDev, q0, q1, q2, q3, q4, q5, avg", "def theoritical_spectrum(peptide_sequence): \n linear_kmers = []\n cyclic_kmers = []\n for i in range(len(peptide_sequence)):\n for j in range(i,len(peptide_sequence)):\n linear_kmers.append(peptide_sequence[i:j+1])\n for i in range(2,len(peptide_sequence)):\n for j in range(i-1):\n cyclic_kmers.append(peptide_sequence[i:len(peptide_sequence)]+peptide_sequence[0:j+1])\n kmers = linear_kmers+cyclic_kmers \n return sorted(list(map(get_molecular_weight,kmers)))", "def brute_force_rod_cutting(rod_length, length_list, price_list):\n max_profit = 0\n max_profit_cut_list = []\n\n if rod_length == 0:\n return 0, []\n\n for i in range(len(length_list)):\n remaining_rod_len = rod_length - length_list[i]\n if remaining_rod_len >= 0:\n current_profit, current_cut_list = brute_force_rod_cutting(\n remaining_rod_len, length_list, price_list)\n current_profit += price_list[i]\n current_cut_list.append(i)\n\n if (current_profit > max_profit):\n max_profit = current_profit\n max_profit_cut_list = current_cut_list[:]\n return max_profit, max_profit_cut_list", "def computeProbs(psDf, add_masked_seqs=True, filterOut=False, max_cdr3_length=30, allow_stop_codons=False, allow_X=False):\n \n out = []\n for rowi, row in psDf.iterrows():\n \"\"\"If iterrows is slow there are potentially ways to speed this up using psDf.apply()\"\"\"\n vals = {}\n vals['ind'] = rowi\n \n if filterOut:\n fo = filterOutRow(row,\n max_cdr3_length=max_cdr3_length,\n allow_stop_codons=allow_stop_codons,\n allow_X=allow_X)\n if fo:\n \"\"\"vals will be missing keys, which will be assigned Nan in outDf\"\"\"\n continue\n \n aprob_nucseq, aprob_protseq = samplerProb(row, 'a')\n va_rep_prob, ja_rep_prob = rearrangementProb(row, 'a')\n \n vals['a_protseq_prob' ] = aprob_protseq * va_rep_prob * ja_rep_prob\n vals['cdr3a_protseq_prob'] = aprob_protseq\n vals['va_rep_prob' ] = va_rep_prob\n vals['ja_rep_prob' ] = ja_rep_prob\n vals['a_nucseq_prob' ] = aprob_nucseq * va_rep_prob * ja_rep_prob\n \n bprob_nucseq, bprob_protseq = samplerProb(row, 'b')\n vb_rep_prob, jb_rep_prob = rearrangementProb(row, 'b')\n \n vals['b_protseq_prob' ] = bprob_protseq * vb_rep_prob * jb_rep_prob\n vals['cdr3b_protseq_prob'] = bprob_protseq\n vals['vb_rep_prob' ] = vb_rep_prob\n vals['jb_rep_prob' ] = jb_rep_prob\n vals['b_nucseq_prob' ] = bprob_nucseq * vb_rep_prob * jb_rep_prob\n \n if add_masked_seqs:\n cdr3a_protseq_masked, ita, cdr3a_new_nucseq = getMaskedSeqs(row, 'a')\n cdr3b_protseq_masked, itb, cdr3b_new_nucseq = getMaskedSeqs(row, 'b')\n\n vals[ 'cdr3a_protseq_masked'] = cdr3a_protseq_masked\n vals[ 'a_indels'] = ita\n vals[ 'cdr3a_new_nucseq' ] = cdr3a_new_nucseq\n vals[ 'cdr3b_protseq_masked'] = cdr3b_protseq_masked\n vals[ 'b_indels'] = itb\n vals[ 'cdr3b_new_nucseq' ] = cdr3b_new_nucseq\n out.append(vals)\n \n outDf = pd.DataFrame(out).set_index('ind')\n assert outDf.shape[0] == psDf.shape[0]\n return outDf" ]
[ "0.5755035", "0.56898737", "0.5400906", "0.53897715", "0.53442574", "0.5335959", "0.53210104", "0.5311045", "0.53048027", "0.5294955", "0.528744", "0.5275976", "0.5264708", "0.5260833", "0.52288777", "0.52222407", "0.5210756", "0.5198016", "0.5188784", "0.51821774", "0.5164539", "0.515431", "0.51099324", "0.51064193", "0.5097972", "0.5096863", "0.5089971", "0.50873816", "0.5078071", "0.50600713" ]
0.67105913
0
rest_framework can't deal with ManyToMany relations that have a through table. In xos, most of the through tables we have use defaults or blank fields, so there's no reason why we shouldn't be able to save these objects. So, let's strip out these m2m relations, and deal with them ourself.
def save_object(self, obj, **kwargs): obj._complex_m2m_data={}; if getattr(obj, '_m2m_data', None): for relatedObject in obj._meta.get_all_related_many_to_many_objects(): if (relatedObject.field.rel.through._meta.auto_created): # These are non-trough ManyToMany relations and # can be updated just fine continue fieldName = relatedObject.get_accessor_name() if fieldName in obj._m2m_data.keys(): obj._complex_m2m_data[fieldName] = (relatedObject, obj._m2m_data[fieldName]) del obj._m2m_data[fieldName] serializers.ModelSerializer.save_object(self, obj, **kwargs); for (accessor, stuff) in obj._complex_m2m_data.items(): (relatedObject, data) = stuff through = relatedObject.field.rel.through local_fieldName = relatedObject.field.m2m_reverse_field_name() remote_fieldName = relatedObject.field.m2m_field_name() # get the current set of existing relations existing = through.objects.filter(**{local_fieldName: obj}); data_ids = [item.id for item in data] existing_ids = [getattr(item,remote_fieldName).id for item in existing] #print "data_ids", data_ids #print "existing_ids", existing_ids # remove relations that are in 'existing' but not in 'data' for item in list(existing): if (getattr(item,remote_fieldName).id not in data_ids): print "delete", getattr(item,remote_fieldName) item.delete() #(purge=True) # add relations that are in 'data' but not in 'existing' for item in data: if (item.id not in existing_ids): #print "add", item newModel = through(**{local_fieldName: obj, remote_fieldName: item}) newModel.save()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_m2m_fields(self, fieldname, mapping):\n _typemsg = \"Mapping must be a dictionary, with keys valued on the primary keys of instances in the queryset, \" \\\n \"valued on a list of primary keys of instances in the related object queryset\"\n if not isinstance(mapping, dict):\n raise TypeError(_typemsg)\n\n for _, value in mapping.items():\n if not isinstance(value, list):\n raise TypeError(_typemsg)\n\n field = self.model._meta.get_field(fieldname)\n if not isinstance(field, models.ManyToManyField):\n raise TypeError('Field must be a many-to-many type')\n\n field_instance = getattr(self.model, fieldname, None)\n if not field_instance:\n raise ValueError('Field not found')\n\n ThroughModel = field_instance.through\n through_model_fields = ThroughModel._meta.get_fields()\n\n # align which field goes with which through model field\n\n mapping_key_fieldname = ''\n mapping_value_fieldname = ''\n\n for f in through_model_fields:\n if isinstance(f, models.ForeignKey) and f.target_field.model == self.model:\n mapping_key_fieldname = f.attname\n\n elif isinstance(f, models.ForeignKey):\n mapping_value_fieldname = f.attname\n\n\n # delete existing m2m relationships for the provided keys\n key_ids = [i for i in mapping.keys()]\n ThroughModel.objects.filter(**{\n mapping_key_fieldname + '__in': key_ids\n }).delete()\n\n ls = []\n for key, values in mapping.items():\n for value in values:\n ls.append(ThroughModel(**{\n mapping_key_fieldname: key,\n mapping_value_fieldname: value\n }))\n\n ThroughModel.objects.bulk_create(ls)", "def test_many_to_many_through_self(self):\n through_field = Person._meta.get_field(\"parents\")\n through = through_field.remote_field.through\n\n metadata = MetaData(schema=\"unique\")\n sa_models = construct_models(metadata)\n self.assertEqual(sa_models[through].__table__.schema, \"unique\")", "def _save_reverse_relations(self, related_objects, instance):\n for field, related_field, data, kwargs in related_objects:\n # inject the PK from the instance\n if isinstance(field, serializers.ListSerializer):\n for obj in data:\n obj[related_field.name] = instance\n elif isinstance(field, serializers.ModelSerializer):\n data[related_field.name] = instance\n else:\n raise Exception(\"unexpected serializer type\")\n\n # reinject validated_data\n field._validated_data = data\n field.save(**kwargs)", "def create_many_related_manager(superclass, rel):\n class ManyRelatedManager(superclass):\n def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,\n source_field_name=None, target_field_name=None, reverse=False,\n through=None, prefetch_cache_name=None):\n super(ManyRelatedManager, self).__init__()\n self.model = model\n self.query_field_name = query_field_name\n self.core_filters = {'%s__pk' % query_field_name: instance._get_pk_val()}\n self.instance = instance\n self.symmetrical = symmetrical\n self.source_field_name = source_field_name\n self.target_field_name = target_field_name\n self.reverse = reverse\n self.through = through\n self.prefetch_cache_name = prefetch_cache_name\n self._fk_val = self._get_fk_val(instance, source_field_name)\n if self._fk_val is None:\n raise ValueError('\"%r\" needs to have a value for field \"%s\" before '\n 'this many-to-many relationship can be used.' %\n (instance, source_field_name))\n # Even if this relation is not to pk, we require still pk value.\n # The wish is that the instance has been already saved to DB,\n # although having a pk value isn't a guarantee of that.\n if instance.pk is None:\n raise ValueError(\"%r instance needs to have a primary key value before \"\n \"a many-to-many relationship can be used.\" %\n instance.__class__.__name__)\n\n def _get_fk_val(self, obj, field_name):\n \"\"\"\n Returns the correct value for this relationship's foreign key. This\n might be something else than pk value when to_field is used.\n \"\"\"\n if not self.through:\n # Make custom m2m fields with no through model defined usable.\n return obj.pk\n fk = self.through._meta.get_field(field_name)\n if fk.rel.field_name and fk.rel.field_name != fk.rel.to._meta.pk.attname:\n attname = fk.rel.get_related_field().get_attname()\n return fk.get_prep_lookup('exact', getattr(obj, attname))\n else:\n return obj.pk\n\n def get_query_set(self):\n try:\n return self.instance._prefetched_objects_cache[self.prefetch_cache_name]\n except (AttributeError, KeyError):\n db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)\n return super(ManyRelatedManager, self).get_query_set().using(db)._next_is_sticky().filter(**self.core_filters)\n\n def get_prefetch_query_set(self, instances):\n instance = instances[0]\n from django.db import connections\n db = self._db or router.db_for_read(instance.__class__, instance=instance)\n query = {'%s__pk__in' % self.query_field_name:\n set(obj._get_pk_val() for obj in instances)}\n qs = super(ManyRelatedManager, self).get_query_set().using(db)._next_is_sticky().filter(**query)\n\n # M2M: need to annotate the query in order to get the primary model\n # that the secondary model was actually related to. We know that\n # there will already be a join on the join table, so we can just add\n # the select.\n\n # For non-autocreated 'through' models, can't assume we are\n # dealing with PK values.\n fk = self.through._meta.get_field(self.source_field_name)\n source_col = fk.column\n join_table = self.through._meta.db_table\n connection = connections[db]\n qn = connection.ops.quote_name\n qs = qs.extra(select={'_prefetch_related_val':\n '%s.%s' % (qn(join_table), qn(source_col))})\n select_attname = fk.rel.get_related_field().get_attname()\n return (qs,\n attrgetter('_prefetch_related_val'),\n attrgetter(select_attname),\n False,\n self.prefetch_cache_name)\n\n # If the ManyToMany relation has an intermediary model,\n # the add and remove methods do not exist.\n if rel.through._meta.auto_created:\n def add(self, *objs):\n self._add_items(self.source_field_name, self.target_field_name, *objs)\n\n # If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table\n if self.symmetrical:\n self._add_items(self.target_field_name, self.source_field_name, *objs)\n add.alters_data = True\n\n def remove(self, *objs):\n self._remove_items(self.source_field_name, self.target_field_name, *objs)\n\n # If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table\n if self.symmetrical:\n self._remove_items(self.target_field_name, self.source_field_name, *objs)\n remove.alters_data = True\n\n def clear(self):\n self._clear_items(self.source_field_name)\n\n # If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table\n if self.symmetrical:\n self._clear_items(self.target_field_name)\n clear.alters_data = True\n\n def create(self, **kwargs):\n # This check needs to be done here, since we can't later remove this\n # from the method lookup table, as we do with add and remove.\n if not self.through._meta.auto_created:\n opts = self.through._meta\n raise AttributeError(\"Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead.\" % (opts.app_label, opts.object_name))\n db = router.db_for_write(self.instance.__class__, instance=self.instance)\n new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)\n self.add(new_obj)\n return new_obj\n create.alters_data = True\n\n def get_or_create(self, **kwargs):\n db = router.db_for_write(self.instance.__class__, instance=self.instance)\n obj, created = \\\n super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)\n # We only need to add() if created because if we got an object back\n # from get() then the relationship already exists.\n if created:\n self.add(obj)\n return obj, created\n get_or_create.alters_data = True\n\n def _add_items(self, source_field_name, target_field_name, *objs):\n # source_field_name: the PK fieldname in join table for the source object\n # target_field_name: the PK fieldname in join table for the target object\n # *objs - objects to add. Either object instances, or primary keys of object instances.\n\n # If there aren't any objects, there is nothing to do.\n from django.db.models import Model\n if objs:\n new_ids = set()\n for obj in objs:\n if isinstance(obj, self.model):\n if not router.allow_relation(obj, self.instance):\n raise ValueError('Cannot add \"%r\": instance is on database \"%s\", value is on database \"%s\"' %\n (obj, self.instance._state.db, obj._state.db))\n fk_val = self._get_fk_val(obj, target_field_name)\n if fk_val is None:\n raise ValueError('Cannot add \"%r\": the value for field \"%s\" is None' %\n (obj, target_field_name))\n new_ids.add(self._get_fk_val(obj, target_field_name))\n elif isinstance(obj, Model):\n raise TypeError(\"'%s' instance expected, got %r\" % (self.model._meta.object_name, obj))\n else:\n new_ids.add(obj)\n db = router.db_for_write(self.through, instance=self.instance)\n vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)\n vals = vals.filter(**{\n source_field_name: self._fk_val,\n '%s__in' % target_field_name: new_ids,\n })\n new_ids = new_ids - set(vals)\n\n if self.reverse or source_field_name == self.source_field_name:\n # Don't send the signal when we are inserting the\n # duplicate data row for symmetrical reverse entries.\n signals.m2m_changed.send(sender=self.through, action='pre_add',\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=new_ids, using=db)\n # Add the ones that aren't there already\n self.through._default_manager.using(db).bulk_create([\n self.through(**{\n '%s_id' % source_field_name: self._fk_val,\n '%s_id' % target_field_name: obj_id,\n })\n for obj_id in new_ids\n ])\n\n if self.reverse or source_field_name == self.source_field_name:\n # Don't send the signal when we are inserting the\n # duplicate data row for symmetrical reverse entries.\n signals.m2m_changed.send(sender=self.through, action='post_add',\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=new_ids, using=db)\n\n def _remove_items(self, source_field_name, target_field_name, *objs):\n # source_field_name: the PK colname in join table for the source object\n # target_field_name: the PK colname in join table for the target object\n # *objs - objects to remove\n\n # If there aren't any objects, there is nothing to do.\n if objs:\n # Check that all the objects are of the right type\n old_ids = set()\n for obj in objs:\n if isinstance(obj, self.model):\n old_ids.add(self._get_fk_val(obj, target_field_name))\n else:\n old_ids.add(obj)\n # Work out what DB we're operating on\n db = router.db_for_write(self.through, instance=self.instance)\n # Send a signal to the other end if need be.\n if self.reverse or source_field_name == self.source_field_name:\n # Don't send the signal when we are deleting the\n # duplicate data row for symmetrical reverse entries.\n signals.m2m_changed.send(sender=self.through, action=\"pre_remove\",\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=old_ids, using=db)\n # Remove the specified objects from the join table\n self.through._default_manager.using(db).filter(**{\n source_field_name: self._fk_val,\n '%s__in' % target_field_name: old_ids\n }).delete()\n if self.reverse or source_field_name == self.source_field_name:\n # Don't send the signal when we are deleting the\n # duplicate data row for symmetrical reverse entries.\n signals.m2m_changed.send(sender=self.through, action=\"post_remove\",\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=old_ids, using=db)\n\n def _clear_items(self, source_field_name):\n db = router.db_for_write(self.through, instance=self.instance)\n # source_field_name: the PK colname in join table for the source object\n if self.reverse or source_field_name == self.source_field_name:\n # Don't send the signal when we are clearing the\n # duplicate data rows for symmetrical reverse entries.\n signals.m2m_changed.send(sender=self.through, action=\"pre_clear\",\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=None, using=db)\n self.through._default_manager.using(db).filter(**{\n source_field_name: self._fk_val\n }).delete()\n if self.reverse or source_field_name == self.source_field_name:\n # Don't send the signal when we are clearing the\n # duplicate data rows for symmetrical reverse entries.\n signals.m2m_changed.send(sender=self.through, action=\"post_clear\",\n instance=self.instance, reverse=self.reverse,\n model=self.model, pk_set=None, using=db)\n\n return ManyRelatedManager", "def test_many_to_many_with_intermediate(self):\n artist = Artist.objects.create(name=\"Great singer\")\n group = Group.objects.create(name=\"Cool band\")\n\n # can't use group.members.add() with intermediate model\n membership = Membership.objects.create(\n artist=artist,\n group=group,\n invite_reason=\"Need a new drummer\"\n )\n\n # group members visible now\n self.assertEqual(group.members.count(), 1)\n\n # soft-delete intermediate instance\n # so link should be invisible\n membership.delete()\n self.assertEqual(Membership.objects.deleted_only().count(), 1)\n\n self.assertEqual(group.members.count(), 0)\n self.assertEqual(artist.group_set.count(), 0)", "def handle_m2m_field(self, obj, field):\n if field.rel.through._meta.auto_created:\n # self._start_relational_field(field)\n if self.use_natural_keys and hasattr(field.rel.to, 'natural_key'):\n # If the objects in the m2m have a natural key, use it\n def handle_m2m(value):\n natural = value.natural_key()\n nat_key = NATURAL_KEY_JOINER.join(natural)\n field_id = \"%s.%s\" % (obj.pk, nat_key)\n self._start_relational_field(field, field_id=field_id, keytype=\"natural\")\n self.xml.characters(smart_text(nat_key))\n self.xml.endElement(\"source\")\n self.indent(3)\n self.xml.endElement(\"trans-unit\")\n # Iterable natural keys are rolled out as subelements\n # self.xml.startElement(\"object\", {})\n # for key_value in natural:\n # self.xml.startElement(\"natural\", {})\n # self.xml.characters(smart_text(key_value))\n # self.xml.endElement(\"natural\")\n # self.xml.endElement(\"object\")\n else:\n def handle_m2m(value):\n field_id = \"%s.%s\" % (obj.pk, value._get_pk_val())\n self._start_relational_field(field, field_id)\n self.xml.characters(smart_text(value._get_pk_val()))\n self.xml.endElement(\"source\")\n self.indent(3)\n self.xml.endElement(\"trans-unit\")\n # self.xml.addQuickElement(\"object\", attrs={\n # 'pk' : smart_text(value._get_pk_val())\n # })\n for relobj in getattr(obj, field.name).iterator():\n handle_m2m(relobj)", "def add_m2m_factories(self) -> None:\n opts = self.model._meta\n for rel in get_model_relations(self.model):\n if not rel.many_to_many:\n continue\n if self.model == rel.field.model:\n # The ManyToManyField is declared on model.\n related_model = rel.field.related_model\n descriptor_name = rel.field.name\n declaration_name = rel.field.name\n elif self.model == rel.field.related_model:\n # The ManyToManyField is declared on the related_model;\n # working on a 'reverse' m2m relation\n related_model = rel.field.model\n descriptor_name = rel.get_accessor_name()\n declaration_name = rel.name\n else:\n # Rel is an inherited relation as neither end of the relation\n # points to self.model.\n # One relation points to the inherited parent model, the other\n # to the actual related model. If rel.field.model is the parent,\n # the related_model is rel.field.related_model and vice versa.\n if rel.field.model in opts.parents:\n # self.model inherited the actual ManyToManyField.\n # Use the inherited ManyToManyField's name for descriptor\n # and declaration.\n related_model = rel.field.related_model\n descriptor_name = rel.field.name\n declaration_name = rel.field.name\n elif rel.field.related_model in opts.parents:\n # self.model inherited the reverse ManyToManyRelation\n related_model = rel.field.model\n descriptor_name = rel.get_accessor_name()\n declaration_name = rel.name\n else:\n raise TypeError(\n \"Unknown relation: {!s}\".format(rel.get_path_info())\n )\n factory_name = self._get_factory_name_for_model(related_model)\n if not hasattr(self.factory, declaration_name):\n m2m_factory = M2MFactory(\n factory=factory_name,\n descriptor_name=descriptor_name,\n related_model=related_model\n )\n setattr(self.factory, declaration_name, m2m_factory)", "def test_many_to_many_through_self_aliased(self):\n through_field = Person._meta.get_field(\"parents\")\n through = through_field.remote_field.through\n\n metadata = MetaData(schema=\"unique\")\n sa_models = construct_models(metadata)\n aliased(sa_models[through])", "def test_insert_many_to_many():\n\n model1 = get_fake_model({\"name\": models.TextField(primary_key=True)})\n\n model2 = get_fake_model(\n {\n \"name\": models.TextField(primary_key=True),\n \"model1s\": models.ManyToManyField(model1),\n }\n )\n\n row2 = model2.objects.on_conflict(\n [\"name\"], ConflictAction.UPDATE\n ).insert_and_get(name=\"swen\")\n\n row1 = model1.objects.create(name=\"booh\")\n\n row2.model1s.add(row1)\n row2.save()", "def _save_direct_relations(self, kwargs):\n for field_name, field in self.fields.items():\n if field.read_only:\n continue\n if isinstance(self._validated_data, dict) and self._validated_data.get(field.source) is None:\n continue\n if not isinstance(field, serializers.BaseSerializer):\n continue\n if hasattr(self, 'Meta') and hasattr(self.Meta, 'model'):\n # ModelSerializer (or similar) so we need to exclude reverse relations\n try:\n _, direct = self._get_related_field(field)\n except FieldDoesNotExist:\n continue\n if not direct:\n continue\n\n # reinject validated_data\n field._validated_data = self._validated_data[field_name]\n self._validated_data[field_name] = field.save(**kwargs.pop(field_name, {}))", "def relationships(self):", "def call( # type: ignore[override]\n self,\n instance: Model,\n step: builder.BuildStep,\n context: declarations.PostGenerationContext\n ) -> None:\n related_manager = getattr(instance, self.descriptor_name)\n # Get the right field names from the intermediary m2m table.\n source_field = related_manager.through._meta.get_field(\n related_manager.source_field_name\n )\n if isinstance(instance, source_field.related_model):\n # The source_field points to the instance's model.\n source = related_manager.source_field_name\n target = related_manager.target_field_name\n else:\n source = related_manager.target_field_name\n target = related_manager.source_field_name\n\n # Add the relation.\n for related_object in super().call(instance, step, context):\n related_manager.through.objects.create(\n **{source: instance, target: related_object}\n )", "def _make_reverse_relations_valid(self, data):\n for field_name, (field, related_field) in self._get_reverse_fields().items():\n if data.get(field.source) is None:\n continue\n if isinstance(field, serializers.ListSerializer):\n field = field.child\n if isinstance(field, serializers.ModelSerializer):\n # find the serializer field matching the reverse model relation\n for sub_field in field.fields.values():\n if sub_field.source == related_field.name:\n sub_field.required = False\n # found the matching field, move on\n break", "def one_to_many(table, backref):\n return relationship(table, back_populates=backref, lazy=\"dynamic\", viewonly=True)", "def _extract_reverse_relations(self, kwargs):\n # Remove related fields from validated data for future manipulations\n related_objects = []\n for field_name, (field, related_field) in self._get_reverse_fields().items():\n if self._validated_data.get(field.source) is None:\n continue\n serializer = field\n if isinstance(serializer, serializers.ListSerializer):\n serializer = serializer.child\n if isinstance(serializer, serializers.ModelSerializer):\n related_objects.append((\n field,\n related_field,\n self._validated_data.pop(field.source),\n kwargs.pop(field_name, {}),\n ))\n return related_objects", "def _filter_related_m2m(self, rel):\n field = rel.field\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(rel.parent_model, rel.model, field):\n return rel", "def _object_update(self, obj, items):\n # many to many fields are saved after the main object\n m2ms = {}\n for key, value in items.iteritems():\n try:\n field = obj._meta.get_field(key)\n if isinstance(field, ManyToManyField):\n m2ms[key] = value\n else:\n setattr(obj, key, value)\n\n except FieldDoesNotExist:\n raise InvalidParameter(key)\n\n try:\n obj.full_clean()\n obj.save()\n except ValidationError as e:\n raise InvalidParameter(e.message_dict, override=True)\n\n for key, values in m2ms.iteritems():\n manager = getattr(obj, key)\n manager.clear()\n manager.add(*values)", "def test_many_to_many_mapping_cache_with_clear(self):\n len(Driver.objects.get(id=self.driver.id).cars.all())\n self.driver.cars.clear()\n reset_queries()\n\n # Cache for both models should be invalidated as clear is an m2m change\n self.assertEqual(len(Driver.objects.get(id=self.driver.id).cars.all()), 0)\n self.assertEqual(len(connection.queries), 2)", "def _filter_m2m(self, field):\n if isinstance(field, models.ManyToManyField):\n if self._join_allowed(field.model, field.rel.to, field):\n return field", "def test_many_to_many_prefetch_related(self):\n artist = Artist.objects.create(name=\"Great singer\")\n group = Group.objects.create(name=\"Cool band\")\n\n membership = Membership.objects.create(\n artist=artist,\n group=group,\n invite_reason=\"Need a new drummer\"\n )\n\n membership.delete()\n\n query = Group.objects.filter(id=group.id).prefetch_related(\"members\")\n self.assertEqual(\n query[0].members.count(),\n 0\n )", "def many_to_many(name, fromtable, totable):\r\n lfromtable = fromtable.lower()\r\n ltotable = totable.lower()\r\n table = db.Table(name,\r\n Column(ltotable + '_id', Integer, ForeignKey(ltotable + '.id')),\r\n Column(lfromtable + '_id', Integer, ForeignKey(lfromtable + '.id'))\r\n )\r\n\r\n return relationship(totable, secondary=table,\r\n backref=backref(name + '_' + lfromtable + 's', lazy='dynamic'))", "def convert_orgs_to_through_model(apps, schema_editor):\n OrgPlotInvolvement = apps.get_model('dominion', 'OrgPlotInvolvement')\n OldThroughModel = apps.get_model('dominion', 'Plot_orgs')\n to_create_list = []\n for old in OldThroughModel.objects.all():\n to_create_list.append(OrgPlotInvolvement(plot=old.plot, org=old.organization))\n OrgPlotInvolvement.objects.bulk_create(to_create_list)", "def is_m2m_set(self, int_model, model1, model2):\n for m2m in model1._meta.many_to_many:\n if m2m.rel.to == model2 and m2m.rel.through == int_model:\n return True\n for m2m in model2._meta.many_to_many:\n if m2m.rel.to == model1 and m2m.rel.through == int_model:\n return True\n return False", "def remove(cls, obj1, obj2):\n if not cls._meta.many_to_many:\n raise Exception(\"ERROR: Remove called on non many to many model\")\n\n query = RemoveQuery(cls, obj1, obj2)\n yield query.execute()\n\n if obj2 in getattr(obj1, obj2._meta.name):\n getattr(obj1, obj2._meta.name).remove(obj2)\n\n if obj1 in getattr(obj2, obj1._meta.name):\n getattr(obj2, obj1._meta.name).remove(obj1)", "def add(cls, obj1, obj2):\n if not cls._meta.many_to_many:\n raise Exception(\"ERROR: Add called on non many to many model\")\n\n query = AddQuery(cls, obj1, obj2)\n yield query.execute()\n\n if not getattr(obj1, obj2._meta.name):\n setattr(obj1, obj2._meta.name, [obj2])\n else:\n getattr(obj1, obj2._meta.name).append(obj2)\n\n if not getattr(obj2, obj1._meta.name):\n setattr(obj2, obj1._meta.name, [obj1])\n else:\n getattr(obj2, obj1._meta.name).append(obj1)", "def test__is_many_not_a_list(self):\n is_many = BaseResource._is_many(dict())\n self.assertFalse(is_many)", "def save_related(self, request, form, formsets, change):\n pass", "def _relation_check(self):\n seen = set()\n for entity in self.get_entities():\n for field in entity.fields.itervalues():\n if field.is_relation():\n seen.add(field.remote_name)\n missing = seen - set(self.entities.keys())\n if missing:\n raise exceptions.SchemaError(\n 'undefined entities referenced in relations: %s' % (\n ', '.join(missing)))", "def test_remove_relation_types(self):\n pass", "def __duplicate_m2m_fields(self, duplicate):\n fields = set()\n\n for f in self._meta.many_to_many:\n if any(\n [\n f.name in self._clone_m2m_fields,\n self._clone_excluded_m2m_fields\n and f.name not in self._clone_excluded_m2m_fields,\n ]\n ):\n fields.add(f)\n for f in self._meta.related_objects:\n if f.many_to_many:\n if any(\n [\n f.get_accessor_name() in self._clone_m2m_fields,\n self._clone_excluded_m2m_fields\n and f.get_accessor_name()\n not in self._clone_excluded_m2m_fields,\n ]\n ):\n fields.add(f)\n\n # Clone many to many fields\n for field in fields:\n if hasattr(field, \"field\"):\n # ManyToManyRel\n field_name = field.field.m2m_reverse_field_name()\n through = field.through\n source = getattr(self, field.get_accessor_name())\n destination = getattr(duplicate, field.get_accessor_name())\n else:\n through = field.remote_field.through\n field_name = field.m2m_field_name()\n source = getattr(self, field.attname)\n destination = getattr(duplicate, field.attname)\n if all(\n [\n through,\n not through._meta.auto_created,\n ]\n ):\n objs = through.objects.filter(**{field_name: self.pk})\n for item in objs:\n if hasattr(through, \"make_clone\"):\n try:\n item.make_clone(attrs={field_name: duplicate})\n except IntegrityError:\n item.make_clone(\n attrs={field_name: duplicate}, sub_clone=True\n )\n else:\n item.pk = None\n setattr(item, field_name, duplicate)\n item.save()\n else:\n destination.set(source.all())\n\n return duplicate" ]
[ "0.65324277", "0.64302415", "0.607475", "0.60060024", "0.5994023", "0.58564836", "0.5841459", "0.5675029", "0.5641935", "0.56222016", "0.5618012", "0.5562107", "0.55059177", "0.54370886", "0.54183936", "0.5415212", "0.5393096", "0.5389939", "0.5386621", "0.53754026", "0.53401756", "0.5240939", "0.5195052", "0.51716506", "0.5160053", "0.5159902", "0.51595205", "0.5137145", "0.5120733", "0.5090738" ]
0.7114395
0
Set additional append str to stock symbol when forming stock url. Set to sel.cur_quotes_stock_portion_additional_url. Mainly to set the '.SI' for singapore stocks.
def set_stock_sym_append_str(self, append_str): self.com_data_stock_portion_additional_url = append_str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_stock_sym_append_str(self, append_str):\n self.cur_quotes_stock_portion_additional_url = append_str", "def form_cur_quotes_stock_url_str(self):\n self.cur_quotes_stock_portion_url = ''\n for n in self.target_stocks:\n self.cur_quotes_stock_portion_url = self.cur_quotes_stock_portion_url + n +\\\n self.cur_quotes_stock_portion_additional_url + ','\n \n self.cur_quotes_stock_portion_url =self.cur_quotes_stock_portion_url[:-1]", "def form_com_data_stock_url_str(self):\n self.com_data_stock_portion_url = ''\n for n in self.target_stocks:\n self.com_data_stock_portion_url = self.com_data_stock_portion_url +'%22' + n +\\\n self.com_data_stock_portion_additional_url + '%22%2C'\n \n self.com_data_stock_portion_url = self.com_data_stock_portion_url[:-3]", "def form_cur_quotes_property_url_str(self):\n start_str = '&f='\n self.cur_quotes_property_portion_url = start_str + self.cur_quotes_property_str", "def form_url_str(self):\n self.form_com_data_stock_url_str()\n \n self.com_data_full_url = self.com_data_start_url + self.com_data_stock_portion_url +\\\n self.com_data_end_url", "def form_url_str(self, type = 'cur_quotes'):\n if type == 'cur_quotes':\n self.form_cur_quotes_stock_url_str()\n \n # form the property. 2 methods enabled.\n if self.enable_form_properties_fr_exceltable:\n self.form_cur_quotes_property_url_str_fr_excel()\n else:\n self.form_cur_quotes_property_url_str()\n \n self.cur_quotes_full_url = self.cur_quotes_start_url + self.cur_quotes_stock_portion_url +\\\n self.cur_quotes_property_portion_url + self.cur_quotes_end_url", "def add_ticker(self):\r\n ticker = self.addEntry.get().upper()\r\n self.get_quote(ticker)", "def add_spy_info(self):\n spy = normalize_data(retrieve_stock_info('SPY'))\n self.data = self.data.join(spy, how='inner')", "def form_cur_quotes_property_url_str_fr_excel(self):\n from xls_table_extract_module import XlsExtractor\n self.xls_property_data = XlsExtractor(fname = self.properties_excel_table, sheetname= 'Sheet1',\n param_start_key = 'stock_property//', param_end_key = 'stock_property_end//',\n header_key = '', col_len = 2)\n\n self.xls_property_data.open_excel_and_process_block_data()\n\n ## form the header\n self.cur_quotes_parm_headers = [n.encode().strip() for n in self.xls_property_data.data_label_list]\n\n ## form the url str\n start_str = '&f='\n target_properties = ''.join([n[0].encode().strip() for n in self.xls_property_data.data_value_list])\n self.cur_quotes_property_portion_url = start_str + target_properties", "def _add_chart_symbol(self, sym: str):\n return \"=\" + json.dumps({\"symbol\": sym})", "def addStock(self,Stock):\n self.DoS[Stock.get_Symbol()]=Stock", "def _constructUrlForMarketCap(self, company_symbol):\n return self.__MARKET_CAP_BASE_URL + company_symbol", "def add_stock(self, symbol, quantity, unit_price):\n # TODO write SQL statement to grab unit_price\n stock_price_total = quantity * unit_price # TODO write SQL statement\n # TODO deduct stock quantity from market ??\n self.portfolios.append((symbol, quantity, unit_price))\n self.value += stock_price_total", "def append(self, symbol):\n if symbol != '\"':\n self.value += symbol\n return False\n else:\n return True", "def _get_url(self, url, series):\n d = {\n 'apikey' : self.api_key,\n 'language' : self.language\n }\n if isinstance(series, basestring):\n d['seriesname'] = quote(series.encode('utf-8'))\n else:\n d['seriesname'] = series\n url = url % d \n return self.url_base+url", "def url_changed(self, url):\r\n self.url_combo.add_text(self.url_to_text(url))", "def ticker(self, symbol, **kwargs):\n pass", "def update_stock(option, stock):\n lowered_opt = option.lower()\n if lowered_opt == 'f':\n stock[\"five\"]+=1\n elif lowered_opt == 'o':\n stock[\"one\"] += 1\n elif lowered_opt == 'q':\n stock[\"quarter\"] += 1\n elif lowered_opt == 'd':\n stock[\"dime\"] += 1\n else:\n stock[\"nickel\"] +=1", "def produce_link_yahoo(self, value_search:str) -> str:\n for replace, replaced in [(' ', '+'), \n ('$', '%24')]:\n value_search = value_search.replace(replace, replaced)\n return ''.join(['https://search.yahoo.com/search;',\n '_ylt=A2KLfSAI0V5h7z0AOIxXNyoA;',\n '_ylc=X1MDMjc2NjY3OQRfcgMyBGZyA',\n '3lmcC10BGZyMgNzYi10b3AEZ3ByaWQ',\n 'DcFkyVEFrN3VTZy5adHlqMGZuUmhKQ',\n 'QRuX3JzbHQDMARuX3N1Z2cDMTAEb3J',\n 'pZ2luA3NlYXJjaC55YWhvby5jb20Ec',\n 'G9zAzIEcHFzdHIDBHBxc3RybAMwBHF',\n 'zdHJsAzIwBHF1ZXJ5A2ElMjRhcCUyM',\n 'HJvY2t5JTIwbmV0JTIwd29ydGgEdF9',\n 'zdG1wAzE2MzM2MDM4NjQ-',\n f'?p={value_search}&fr2=sb-top&fr=yfp-t'])", "def add_stock_info(day, stock_price):\n global stock_info\n // your code here", "def add_stock(self, identifier, sub, expression, initial_condition):\n\n # todo: consider the case where different flows work over different subscripts\n # todo: properly handle subscripts here\n # todo: build a test case to test the above\n # todo: force the sub parameter to be a list\n # todo: handle docstrings\n initial_condition = initial_condition.replace('\\n','').replace('\\t','') # Todo:pull out\n if sub:\n if isinstance(sub, basestring): sub = [sub] # Todo: rework\n directory, size = get_array_info(sub, self.dictofsubs)\n if re.search(';',initial_condition): # format arrays for numpy\n initial_condition = 'np.'+ np.array(np.mat(initial_condition.strip(';'))).__repr__()\n\n # todo: I don't like the fact that the array is applied even when it isnt needed\n initial_condition += '*np.ones((%s))'%(','.join(map(str,size)))\n\n funcstr = templates['stock'].substitute(identifier=identifier,\n expression=expression,\n initial_condition=initial_condition)\n\n if sub: # this is super bad coding practice, should change it.\n funcstr += '%s.dimension_dir = '%identifier+directory.__repr__()+'\\n'\n\n self.body.append(funcstr)\n self.stocklist.append(identifier)", "def add_string_to_image_url(url, addition):\n filename, ext = splitext(url)\n return ''.join([filename, '-', addition, ext])", "def save(self, *args, **kwargs):\n self.stock_value = self.purchase_price * self.stock_level\n super().save(*args, **kwargs)", "def __init__(self):\n # Param\n ## self.target_stocks use mainly for a few stocks.\n ## it also use when setting the 45 or 50 stocks at a time to url\n self.target_stocks = ['S58.SI','S68.SI'] ##special character need to be converted\n self.full_stocklist_to_retrieve = [] #full range fo stocks\n \n # for difffernt retrieval, based on the dict available to select the file type\n # currently have \"watcher\", \"all\" where watcher is the selected stocks to watch.\n self.stock_retrieval_type = 'watcher' \n\n ## current data .csv file url formation\n #header to match the sequence of the formed url\n self.cur_quotes_parm_headers = ['NAME', 'SYMBOL', 'LATEST_PRICE', 'OPEN', 'CLOSE','VOL',\n 'YEAR_HIGH','YEAR_LOW'] #label to be use when downloading.\n \n # URL forming for price details\n self.cur_quotes_start_url = \"http://download.finance.yahoo.com/d/quotes.csv?s=\"\n self.cur_quotes_stock_portion_url = ''\n self.cur_quotes_stock_portion_additional_url = '.SI'# for adding additonal str to the stock url.\n self.cur_quotes_property_portion_url = ''\n self.cur_quotes_property_str = 'nsl1opvkj' #default list of properties to copy.\n self.cur_quotes_end_url = \"&e=.csv\"\n self.cur_quotes_full_url = ''\n\n # Properties from excel\n self.enable_form_properties_fr_exceltable = 1\n self.properties_excel_table = r'C:\\pythonuserfiles\\yahoo_finance_data_extract\\Individual_stock_query_property.xls'\n\n # Output storage\n self.cur_quotes_csvfile = r'c:\\data\\temp\\stock_data.csv'\n self.cur_quotes_df = object()\n\n ## !!!\n self.cur_quotes_url_list = [] # store of all the url list being query. For debug.\n\n # for debug/printing\n self.store_individual_set_df = []\n self.__print_url = 0 # for printing the url string\n\n # input file path\n # dict based on the file for different type of retrieval\n self.retrieval_type_input_file_dict = {\n \"all\" : r'C:\\pythonuserfiles\\yahoo_finance_data_extract\\stocklist.csv',\n \"watcher\": r'c:\\data\\google_stock_screener.csv'\n }", "def svn_info_t_URL_set(svn_info_t_self, char_URL): # real signature unknown; restored from __doc__\n pass", "def default_save(self,suffix=EMPTYCHAR,extra=EMPTYCHAR):\r\n\r\n pass", "def update_stock_info(self, entry, item_name, item_url, item_stock, item_cost):\n self.items_list.delete(entry)\n self.items_list.insert(\n \"\", \"end\", values=(item_name, item_url, item_stock, item_cost)\n )", "def add_symbol_attribute(self, symbol_attribute):\n self.symbol_attributes.append(symbol_attribute)", "def add_quote(item):\n if type(item) == str:\n return \"\\'\" + item + \"\\'\"\n else:\n return item", "def base_quoted(self):\n\n return urllib.parse.quote(self.base)" ]
[ "0.83196074", "0.7468265", "0.69796675", "0.61658657", "0.61611176", "0.5607167", "0.52325624", "0.5215801", "0.5201767", "0.5153672", "0.4878753", "0.48634982", "0.4798464", "0.47756186", "0.47064564", "0.46863243", "0.46857637", "0.46786737", "0.467215", "0.46650028", "0.4661894", "0.46566454", "0.464783", "0.46333182", "0.46186143", "0.4613182", "0.46121472", "0.4591565", "0.45845062", "0.4580512" ]
0.8144998
1
Form the list of stock portion for the cur quotes url.
def form_com_data_stock_url_str(self): self.com_data_stock_portion_url = '' for n in self.target_stocks: self.com_data_stock_portion_url = self.com_data_stock_portion_url +'%22' + n +\ self.com_data_stock_portion_additional_url + '%22%2C' self.com_data_stock_portion_url = self.com_data_stock_portion_url[:-3]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_cur_quotes_stock_url_str(self):\n self.cur_quotes_stock_portion_url = ''\n for n in self.target_stocks:\n self.cur_quotes_stock_portion_url = self.cur_quotes_stock_portion_url + n +\\\n self.cur_quotes_stock_portion_additional_url + ','\n \n self.cur_quotes_stock_portion_url =self.cur_quotes_stock_portion_url[:-1]", "def get_cur_quotes(self):\n self.form_url_str()\n if self.__print_url: print self.cur_quotes_full_url\n self.downloading_csv(self.cur_quotes_full_url)\n self.cur_quotes_create_dataframe()", "def form_url_str(self):\n self.form_com_data_stock_url_str()\n \n self.com_data_full_url = self.com_data_start_url + self.com_data_stock_portion_url +\\\n self.com_data_end_url", "def get_cur_quotes_fr_list(self):\n\n ## full list with special characters take care\n full_list = self.replace_special_characters_in_list(self.full_stocklist_to_retrieve)\n chunk_of_list = self.break_list_to_sub_list(self.full_stocklist_to_retrieve)\n self.temp_full_data_df = None\n for n in chunk_of_list:\n # print the progress\n sys.stdout.write('.')\n\n # set the small chunk of list\n self.set_target_stocks_list(n)\n self.get_cur_quotes()\n \n ## need take care of cases where the return is empty -- will return Missing symbols list\n if not len(self.cur_quotes_df.columns) < len(self.cur_quotes_parm_headers):\n self.store_individual_set_df.append(self.cur_quotes_df)\n if self.temp_full_data_df is None:\n self.temp_full_data_df = self.cur_quotes_df\n else:\n self.temp_full_data_df = self.temp_full_data_df.append(self.cur_quotes_df)\n\n ## Remove the % symbol fr self.temp_full_data_df columns\n self.rm_percent_symbol_fr_cols()\n\n print 'Done\\n'", "def form_cur_quotes_property_url_str(self):\n start_str = '&f='\n self.cur_quotes_property_portion_url = start_str + self.cur_quotes_property_str", "def set_stock_sym_append_str(self, append_str):\n self.cur_quotes_stock_portion_additional_url = append_str", "def stocks_history(request):\n\n symbol = request.args.get('symbol')\n\n if symbol is None:\n return jsonify([])\n\n client = bigquery.Client()\n qry = client.query(\"\"\"\n SELECT \n date,\n adj_close,\n symbol,\n sma_20,\n std_20,\n sma_50,\n sma_200,\n bb_perc_20\n FROM `ticker-224822.ticker_test_120718.analytics_view`\n where \n symbol = '{symbol}'\n and extract(year from date) >= 2010\n \"\"\".format(symbol=symbol))\n\n results = qry.result()\n results = [dict(row.items()) for row in results]\n resp = custom_jsonify(results)\n resp.headers.add('Access-Control-Allow-Origin', '*')\n resp.headers.add('Access-Control-Allow-Methods', 'GET')\n return resp", "def stock_url(stock_symbol, day=None, month=None, year=None):\r\n\r\n page = \"http://ichart.finance.yahoo.com/table.csv?\"\r\n page = ''.join([page, 's=', stock_symbol])\r\n now = datetime.datetime.now()\r\n if day == None:\r\n day = now.day\r\n if month == None:\r\n month = now.month\r\n if year == None:\r\n year = now.year\r\n page = ''.join([page, '&amp;d=', str(month)])\r\n page = ''.join([page, '&amp;e=', str(day)])\r\n page = ''.join([page, '&amp;f=', str(year)])\r\n page = ''.join([page, '&amp;g=d'])\r\n # Set the start date to Jan 1 1960 and the file will pick up data for as far\r\n # back as possible\r\n page = ''.join([page, '&amp;a=1'])\r\n page = ''.join([page, '&amp;b=1'])\r\n page = ''.join([page, '&amp;c=1960'])\r\n page = ''.join([page, '&amp;ignore=.csv'])\r\n # print(page)\r\n return(page)", "def form_url_str(self, type = 'cur_quotes'):\n if type == 'cur_quotes':\n self.form_cur_quotes_stock_url_str()\n \n # form the property. 2 methods enabled.\n if self.enable_form_properties_fr_exceltable:\n self.form_cur_quotes_property_url_str_fr_excel()\n else:\n self.form_cur_quotes_property_url_str()\n \n self.cur_quotes_full_url = self.cur_quotes_start_url + self.cur_quotes_stock_portion_url +\\\n self.cur_quotes_property_portion_url + self.cur_quotes_end_url", "def prices(symbol):\n to = date.today().strftime(\"%Y%m%d\")\n c = db.cursor()\n c.execute(\"SELECT DATE_ADD(max(date), INTERVAL 1 DAY) FROM quote where symbol = %s\",\n (symbol))\n (_from, ) = c.fetchone()\n if _from == date.today():\n print \"Skipping %s\" % symbol\n return\n print \"Downloading %s\" % symbol\n if _from is None: \n _from = start_date\n else:\n _from = _from.strftime(\"%Y%m%d\")\n prices = stockquote.get_historical_prices(symbol, _from, to)\n headers = prices[0]\n try:\n close = get_idx(headers, 'Close')\n date_ = get_idx(headers, 'Date')\n open = get_idx(headers, 'Open')\n high = get_idx(headers, 'High')\n low = get_idx(headers, 'Low')\n quotes = prices[1:]\n for l in quotes:\n #print \"%s %s\" % (l[date_], l[close])\n try:\n insert(symbol, l[date_], l[close], l[high], l[low], l[open])\n except Exception, e:\n print \"Could not insert %s:%s\" % (symbol, e)\n print \"Inserted %s new quotes for %s\" % (len(quotes), symbol)\n except Exception, e:\n print \"Could not download %s\" % symbol\n print e", "async def _get_stock_data(self, stocks: list):\n\t\tapi_url = 'https://sandbox.tradier.com/v1/markets/quotes'\n\t\tstocks = ','.join(stocks)\n\t\tif not stocks:\n\t\t\treturn []\n\t\ttoken = await self.bot.get_shared_api_tokens('stocks')\n\t\ttoken = token.get('key', None)\n\t\tif not token:\n\t\t\traise ValueError(\n\t\t\t\t'You need to set an API key!\\n'\n\t\t\t\t'Follow this guide for instructions on how to get one:\\n'\n\t\t\t\t'<https://github.com/Flame442/FlameCogs/blob/master/stocks/setup.md>'\n\t\t\t)\n\t\tparams = {'symbols': stocks}\n\t\theaders = {'Authorization': f'Bearer {token}', 'Accept': 'application/json'}\n\t\tasync with aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(api_url, params=params, headers=headers) as r:\n\t\t\t\ttry:\n\t\t\t\t\tr = await r.json()\n\t\t\t\texcept aiohttp.client_exceptions.ContentTypeError:\n\t\t\t\t\t#This might happen when being rate limited, but IDK for sure...\n\t\t\t\t\traise ValueError('Could not get stock data. The API key entered is most likely not valid.')\n\t\tr = r['quotes']\n\t\tif 'quote' not in r:\n\t\t\treturn []\n\t\tr = r['quote']\n\t\tif not isinstance(r, list):\n\t\t\tr = [r]\n\t\tstock = {\n\t\t\tx['symbol']: {\n\t\t\t\t'price': max(1, int(x['last'] * 100)),\n\t\t\t\t#New API does not give this info.\n\t\t\t\t'total_count': None, #int(x['marketCap'] / x['last']) if x['marketCap'] else None\n\t\t\t} for x in r if 'last' in x and x['last'] is not None\n\t\t}\n\t\treturn stock", "def form_cur_quotes_property_url_str_fr_excel(self):\n from xls_table_extract_module import XlsExtractor\n self.xls_property_data = XlsExtractor(fname = self.properties_excel_table, sheetname= 'Sheet1',\n param_start_key = 'stock_property//', param_end_key = 'stock_property_end//',\n header_key = '', col_len = 2)\n\n self.xls_property_data.open_excel_and_process_block_data()\n\n ## form the header\n self.cur_quotes_parm_headers = [n.encode().strip() for n in self.xls_property_data.data_label_list]\n\n ## form the url str\n start_str = '&f='\n target_properties = ''.join([n[0].encode().strip() for n in self.xls_property_data.data_value_list])\n self.cur_quotes_property_portion_url = start_str + target_properties", "def reqData(self):\r\n #self.reqGlobalCancel()\r\n #self.add_historical(\"Stock('TSLA', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('IBM', 'SMART', 'USD')\")\r\n #self.add_historical(\"Stock('MSFT', 'SMART', 'USD')\")\r\n self.add_historical(\"Stock('FB', 'SMART', 'USD')\")", "def get_quote(self, ticker):\r\n key = 'GLC0GTVKR51SY1V'\r\n quote_url = 'https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol=' + ticker.upper() + '&apikey=' + key\r\n key_metrics_url = 'https://www.alphavantage.co/query?function=OVERVIEW&symbol=' + ticker.upper() + '&apikey=' + key\r\n\r\n quote_response = requests.get(quote_url)\r\n string = quote_response.json()\r\n\r\n key_metrics_response= requests.get(key_metrics_url)\r\n metrics_str = key_metrics_response.json()\r\n color_tag = None\r\n\r\n if quote_response and 'Global Quote' in string:\r\n\r\n current_price = round(float(string['Global Quote']['05. price']), 2)\r\n change = round(float(string['Global Quote']['09. change']), 2)\r\n change_pct = string['Global Quote']['10. change percent'][:5] + \"%\"\r\n previous_price = round(float(string['Global Quote']['08. previous close']), 2)\r\n\r\n yearly_high = metrics_str['52WeekHigh']\r\n mark_cap = round(int(metrics_str['MarketCapitalization'])/10E8, 2)\r\n mark_cap_str = str(mark_cap) + \"B\"\r\n\r\n if ticker not in self.holdings:\r\n self.holdings[ticker] = current_price\r\n tuples = [ticker, current_price, change, change_pct, yearly_high, mark_cap_str]\r\n\r\n if current_price > previous_price:\r\n color_tag = 'green'\r\n else:\r\n color_tag = 'red'\r\n self.treeview.insert(parent='', index='end', values=tuples, tags=(color_tag,))\r\n return current_price\r\n else:\r\n return None", "def _get_quotes(symbols):\n # have to format symbols list to from (\"SYM1\", \"SYM2\", .... ,\"SYMN\")\n symbols = \"(\" + \",\".join(['\\\"' + s.upper() + '\"' for s in symbols]) + \")\"\n query = 'SELECT * FROM yahoo.finance.quote WHERE symbol in {0}'.format(symbols)\n payload = {\n \"q\": query, 'format':'json', \"env\":'store://datatables.org/alltableswithkeys'\n }\n try:\n resp = requests.get('http://query.yahooapis.com/v1/public/yql?', params=payload)\n resp.raise_for_status()\n except requests.exceptions.RequestException as e:\n print(e)\n return\n return json.loads(resp.text)[\"query\"][\"results\"][\"quote\"]", "async def stock(self, ctx, ticker: str):\n symbols = await self.bot.aiojson(\"https://api.robinhood.com/quotes/\"\\\n f\"?symbols={ticker.upper()}\")\n if not symbols:\n await ctx.send(\"Stock not found. This stock is probably not tradeable on robinhood.\")\n return\n symbols_result = symbols[\"results\"][0]\n instrument = await self.bot.aiojson(symbols_result[\"instrument\"])\n fundamentals = await self.bot.aiojson(\n f\"https://api.robinhood.com/fundamentals/{ticker.upper()}/\")\n\n current_price = (symbols_result[\"last_trade_price\"] if\n \"last_extended_hours_trade_price\" in symbols_result\n else symbols_result[\"last_extended_hours_trade_price\"])\n diff = Decimal(Decimal(current_price) -\n Decimal(symbols_result[\"previous_close\"]))\n percentage = str(100 * diff / Decimal(current_price))[:6]\n\n if not percentage.startswith(\"-\"):\n percentage = \"+\" + percentage\n\n current_price_string = self.format_currency(current_price)\n diff_string = self.format_currency(diff)\n bid_price_string = self.format_currency(Decimal(symbols_result[\"bid_price\"]))\n ask_price_string = self.format_currency(Decimal(symbols_result[\"ask_price\"]))\n tradeable_string = (\n \":white_check_mark:\" if instrument[\"tradeable\"] else \":x:\")\n\n update_timestamp = parser.parse(symbols_result[\"updated_at\"])\n\n symbol = symbols_result[\"symbol\"]\n change_color = await self.get_stock_change_color(symbol)\n\n embed = discord.Embed(title=f\"{symbol}'s stocks info\",\n color=change_color,\n timestamp=update_timestamp)\n\n embed.add_field(name=\"Name\", value=instrument[\"name\"])\n embed.add_field(name=\"Current Price\", value=current_price_string)\n embed.add_field(name=\"Change from yesterday\", value=f\"{diff_string} ({percentage}%)\")\n embed.add_field(name=\"Bid size\", value=f\"{symbols_result['bid_size']} ({bid_price_string})\")\n embed.add_field(name=\"Ask size\", value=f\"{symbols_result['ask_size']} ({ask_price_string})\")\n embed.add_field(name=\"Current Volume\", value=fundamentals[\"volume\"])\n embed.add_field(name=\"Average Volume\", value=fundamentals[\"average_volume\"])\n embed.add_field(name=\"Tradeable on Robinhood\", value=tradeable_string)\n embed.add_field(name=\"Country\", value=f\":flag_{instrument['country'].lower()}:\")\n\n await ctx.send(embed=embed)", "def __init__(self):\n # Param\n ## self.target_stocks use mainly for a few stocks.\n ## it also use when setting the 45 or 50 stocks at a time to url\n self.target_stocks = ['S58.SI','S68.SI'] ##special character need to be converted\n self.full_stocklist_to_retrieve = [] #full range fo stocks\n \n # for difffernt retrieval, based on the dict available to select the file type\n # currently have \"watcher\", \"all\" where watcher is the selected stocks to watch.\n self.stock_retrieval_type = 'watcher' \n\n ## current data .csv file url formation\n #header to match the sequence of the formed url\n self.cur_quotes_parm_headers = ['NAME', 'SYMBOL', 'LATEST_PRICE', 'OPEN', 'CLOSE','VOL',\n 'YEAR_HIGH','YEAR_LOW'] #label to be use when downloading.\n \n # URL forming for price details\n self.cur_quotes_start_url = \"http://download.finance.yahoo.com/d/quotes.csv?s=\"\n self.cur_quotes_stock_portion_url = ''\n self.cur_quotes_stock_portion_additional_url = '.SI'# for adding additonal str to the stock url.\n self.cur_quotes_property_portion_url = ''\n self.cur_quotes_property_str = 'nsl1opvkj' #default list of properties to copy.\n self.cur_quotes_end_url = \"&e=.csv\"\n self.cur_quotes_full_url = ''\n\n # Properties from excel\n self.enable_form_properties_fr_exceltable = 1\n self.properties_excel_table = r'C:\\pythonuserfiles\\yahoo_finance_data_extract\\Individual_stock_query_property.xls'\n\n # Output storage\n self.cur_quotes_csvfile = r'c:\\data\\temp\\stock_data.csv'\n self.cur_quotes_df = object()\n\n ## !!!\n self.cur_quotes_url_list = [] # store of all the url list being query. For debug.\n\n # for debug/printing\n self.store_individual_set_df = []\n self.__print_url = 0 # for printing the url string\n\n # input file path\n # dict based on the file for different type of retrieval\n self.retrieval_type_input_file_dict = {\n \"all\" : r'C:\\pythonuserfiles\\yahoo_finance_data_extract\\stocklist.csv',\n \"watcher\": r'c:\\data\\google_stock_screener.csv'\n }", "def get_all_stocks():\n url = r\"https://brapi.ga/api/quote/list\"\n response = requests.get(url)\n return [stock[\"stock\"] for stock in response.json()[\"stocks\"]]", "async def stocks(self, ctx):\n\t\tpass", "def quandl_stocks(symbol, start_date=(2000, 1, 1), end_date=None):\n\n query_list = ['WIKI' + '/' + symbol + '.' + str(k) for k in range(1, 13)]\n\n start_date = datetime.date(*start_date)\n\n if end_date:\n end_date = datetime.date(*end_date)\n else:\n end_date = datetime.date.today()\n\n return quandl.get(query_list,\n returns='pandas',\n start_date=start_date,\n end_date=end_date,\n collapse='daily',\n order='asc'\n )", "def stockButtonClicked(self):\n # Clear text edit box and get the stock symbol from combobox.\n self.central.text3.clear()\n stocksymbol = self.central.combobox.currentText()\n\n URL = 'https://finance.yahoo.com/quote/{0}/profile?p={0}'.format(stocksymbol)\n\n # Safely get the web page using the above URL.\n try:\n r = requests.get(URL)\n except:\n logging.error(\"Failed to get the web page: \" + URL)\n self.central.text3.setText(\"Failed to get the web page: \" + URL)\n return\n\n # Safely turn the response from requests into soup.\n try:\n html = r.text.encode('utf-8')\n soup = bs4.BeautifulSoup(html, 'lxml')\n except:\n logging.error(\"Failed on the soup\")\n self.central.text3.setText(\"Failed on the soup\")\n return\n\n # Safely extract data from the table.\n try:\n table = soup.find_all(\"table\")\n rows = table[0].find_all('tr')\n data = []\n for row in rows:\n cols = row.find_all('td')\n cols = [str.text.strip() for str in cols]\n data.append([str for str in cols if str])\n\n textdisplay = ''\n\n for x in data:\n for y in x:\n print(y)\n textdisplay += y\n textdisplay += '\\n'\n if y.isdigit():\n textdisplay += '\\n'\n self.central.text3.setText(textdisplay)\n\n except:\n logging.error(\"Failed to extract data from the table\")\n self.central.text3.setText(\"Failed to extract data from the table\")\n return\n\n self.updateGraph(symbol=stocksymbol)", "def getSongsURL(self, songIDList, br):\n for i, songID in enumerate(songIDList):\n if not isinstance(songID, str):\n songIDList[i] = str(songID)\n self.apiLog.info(\"songIDList : %s\" % repr(songIDList))\n \n if isinstance(br, int):\n br = NEAPI_BRS[br][0]\n currAPIVersion = self.config['apiVersion']\n currAPIURL = URL_NEAPIS[sys._getframe().f_code.co_name]\n currAPIURL = currAPIURL[min(currAPIVersion, len(currAPIURL) - 1)]\n\n currDict = {\n 'ids' : repr(songIDList).replace(\" \", \"\").replace(\"'\", \"\").replace(\"\\\"\", \"\"),\n 'br' : dict(NEAPI_BRS)[br]\n }\n\n currC, currR = self._mySubmit(currAPIURL, currDict)\n self.apiLog.info(\"%s Json Loads Begin\", sys._getframe().f_code.co_name)\n currR = json.loads(currR)\n self.apiLog.info(\"%s Json Loads End\", sys._getframe().f_code.co_name)\n self.updateCookie(currC)\n self.checkCode(currR['code'])\n\n sortedData = range(len(songIDList))\n self.apiLog.info(\"songIDList : %s\" % repr(songIDList))\n for song in currR['data']:\n sortedData[songIDList.index(str(song['id']))] = song\n\n currR['data'] = sortedData\n return currR, currAPIURL[2]", "def set_stock_sym_append_str(self, append_str):\n self.com_data_stock_portion_additional_url = append_str", "def quotes_list(request: HttpRequest) -> HttpResponse: # noqa: 201\n quotes = Quote.objects.all()\n return render(\n request,\n \"quotes/quotes_list.html\",\n {\"quotes\": quotes},\n )", "def get_stocks():\n print(\"fetching remote...\")\n code_dataframes = pd.read_html(\n 'http://kind.krx.co.kr/corpgeneral/corpList.do?method=download&searchType=13', header=0)[0]\n # 우리가 필요한 것은 회사명과 종목코드이기 때문에 필요없는 column들은 제외해준다.\n print(\"parsing and filtering data...\")\n code_dataframes.종목코드 = code_dataframes.종목코드.map('{:06d}'.format)\n # 한글로된 컬럼명을 영어로 바꿔준다.\n code_dataframes = code_dataframes[['회사명', '종목코드']]\n code_dataframes = code_dataframes.rename(\n columns={'회사명': 'name', '종목코드': 'code'})\n codes = code_dataframes['code']\n names = code_dataframes['name']\n stocks = []\n for i in range(len(names)):\n stocks.append({\n 'name': names[i],\n 'code': codes[i]\n })\n return stocks", "def old_start_to_scrape_stocks():\n # the way it works is: 20 stocks are displayed per page, and the r= paramater in the url tells where to start listing with the stocks\n res = req.get(stocks_url.format('1'), headers={'user-agent': ua.random})\n soup = bs(res.content, 'lxml')\n # get last page number to get pages that need to be iterated through\n last_page_num = int(soup.findAll('a', {'class': 'screener-pages'})[-1].text)\n # the last page should be the (last page number - 1) * 20 + 1\n last_r = (last_page_num - 1) * 20 + 1 + 1 # add another one for range to work\n for p in range(21, last_r, 20):\n pass", "def set_full_stocklist_to_retrieve(self, list_of_stocks):\n self.full_stocklist_to_retrieve = list_of_stocks", "def get_url(ticker: str, period: int) -> str:\n periods = {\n 1: 'period1=1577836800&period2=1585699200', # First Quarter 2020\n 2: 'period1=1585699200&period2=1593561600', # Second Quarter 2020\n 3: 'period1=1593561600&period2=1601510400', # Third Quarter 2020\n 4: 'period1=1601510400&period2=1609459200', # Fourth Quarter 2020\n 5: 'period1=1609459200&period2=1617235200' # First Quarter 2021\n }\n return (f\"https://finance.yahoo.com/quote/%5E{ticker}/history?{periods[period]}&interval=1d\"\n '&filter=history&frequency=1d&includeAdjustedClose=true')", "def stock_1_query(self):\n return f\"\"\"\n SELECT Date, '{self.stock_1}'\n FROM closing_prices;\"\"\"", "def stock_3_query(self):\n return f\"\"\"\n SELECT '{self.stock_3}'\n FROM closing_prices;\"\"\"" ]
[ "0.78473824", "0.66010034", "0.6469332", "0.6243975", "0.59980655", "0.5917665", "0.5836436", "0.58335614", "0.5813822", "0.58052963", "0.5770366", "0.5768073", "0.5726045", "0.5700043", "0.5691203", "0.56370264", "0.56281304", "0.5598005", "0.55803543", "0.55655724", "0.5563228", "0.55541235", "0.5528523", "0.5463383", "0.54525954", "0.54522055", "0.54405594", "0.5429061", "0.5381754", "0.53593844" ]
0.68197006
1
Download the json file from the self.com_data_full_url. The save file is defaulted to the self.saved_json_file.
def download_json(self): cache.clear() url = URL(self.com_data_full_url) f = open(self.saved_json_file, 'wb') # save as test.gif f.write(url.download(timeout = 50)) #increse the time out time for this f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_json(self):\n # make the path dir if it doesn't exist\n if not self.path.is_dir():\n self.path.mkdir(parents=True)\n\n # open a file, send a request for the json and write to the file\n with self.file.open('w') as json_file:\n try:\n json_data = json.dumps(requests.get(self.endpoint).json())\n json_file.write(json_data)\n except json.JSONDecodeError as error:\n print(\"Error fetching json: \", error)", "def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH", "def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def download_json(self):\n # create directories for threads and images if they don't exist\n if not self.path.is_dir():\n self.path.mkdir(parents=True)\n if not self.images_path.is_dir():\n self.images_path.mkdir(parents=True)\n\n # open file, send request and write data to a file\n with self.file.open('w') as json_file:\n try:\n json_data = json.dumps(requests.get(self.endpoint).json())\n json_file.write(json_data)\n except json.JSONDecodeError as error:\n print(\"Error fetching json: \", error)", "def save(self, file: io.BufferedWriter):\n if self.downloaded:\n json.dump(self.data, file, indent=2)", "def get_com_data(self):\n self.form_url_str()\n if self.__print_url: print self.com_data_full_url\n self.download_json()\n self.get_datalist_fr_json()", "def save_data(url, file):\n with open(file, 'w') as f:\n json.dump(get_json_data(url), f)", "def download_doc_as_json(self):\n if self.webdriver.name == \"chrome\": # this will check browser name\n print(\"Download has been disabled for the Chrome browser \\n\")\n else:\n select_export_doc_as_jason_sitem = self.locator_finder_by_xpath(self.select_export_doc_as_jason_id)\n select_export_doc_as_jason_sitem.click()\n time.sleep(1)\n select_export_doc_confirm_btn_sitem = self.locator_finder_by_id(self.select_export_doc_confirm_btn_id)\n select_export_doc_confirm_btn_sitem.click()\n time.sleep(2)\n # self.clear_download_bar()", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def save_data_file(self):\n with open(self.files['data'], 'w') as outfile:\n outfile.write(self.to_json())\n outfile.close()", "def saveData(self):\n file_location = self.json_File_Location.replace(\".json\", \"_Update.json\")\n json_file = open(file_location, \"w+\")\n json_file.write(json.dumps(self.data, indent=4, separators=(', ', ' : ')))\n json_file.close()", "def download_file(self, filename=None):\n raw_data = self._service.download_object(self._datasets_id, filename)\n\n with open(filename, 'wb') as f:\n f.write(raw_data)\n\n return filename", "def save_data(self):\n # Command to get the download data\n pass", "def download(self):\n\n with open(self.dataset_path) as dataset_file:\n dataset = json.load(dataset_file)\n\n path = \"\".join([POST_HIT_PATH, dataset[\"dataset\"][\"data_path\"]])\n if not os.path.exists(path):\n os.makedirs(path)\n\n protocole = dataset[\"dataset\"][\"protocole\"]\n\n download_links = []\n\n for resource in dataset[\"dataset\"][\"resources\"]:\n file_path = \"\".join([path, resource[\"filename\"]])\n\n #Check if the the download link has not been used before (One download link for all)\n if resource[\"download_link\"] not in download_links:\n \n print(\"DOWNLOADING : {}\".format(resource[\"filename\"]))\n f = urllib.request.urlopen(resource[\"download_link\"])\n data = f.read()\n with open(file_path, \"wb\") as donwload_file:\n donwload_file.write(data)\n\n download_links.append(resource[\"download_link\"])\n\n \n #Extract all files from the tar archives if necessary\n if tarfile.is_tarfile(file_path):\n tf = tarfile.open(file_path)\n tf.exractall()", "def download_and_update():\n with tempfile.TemporaryDirectory(dir=TEMP_DOWNLOAD_DIR) as temp_dir:\n reistijden_jsonfile = os.path.join(temp_dir, 'reistijdenAmsterdam.geojson')\n\n r = requests.get(REISTIJDEN_TARGET_URL)\n with open(reistijden_jsonfile, 'w') as f:\n f.write(r.text)\n\n _parse_and_store_geojson(reistijden_jsonfile)", "def download_file(self, filename: str, save_dir: str) -> None:\n raise NotImplementedError()", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)", "def save_json(self, data, json_path=None):\n if json_path is None:\n json_path = self.json_path\n with open(json_path, encoding='utf-8', mode='w') as f:\n json.dump(data, f, indent=4, ensure_ascii=False)\n\n print(json_path)", "def save_data(self, filename):\n with open(settings.DIR_PATH + '/' + filename, 'w', encoding='utf-8') as f:\n json.dump(self.data, f, indent=4)", "def SaveJSON(self, filename):\n data = {\n 'files': self._files,\n 'ebuilds': self._ebuilds,\n }\n json.dump(data, open(filename, 'w'))", "def _get_json(self, url, file=None):\n r = requests.get(url)\n # If status is not OK, raise error.\n if not r.ok:\n r.raise_for_status()\n # Otherwise load JSON.\n data = json.loads(r.text)\n # Optionally save JSON to disk.\n if file is not None:\n with open(file, 'w') as f:\n json.dump(data, f)\n return data", "def get_latest_data():\n try:\n print '\\nRequesting new data.....\\n'\n response = get(\"https://api.myjson.com/bins/2csub\")\n if response.status_code is 200:\n print '\\nSuccess (200) in downloading data\\n'\n current_json = response.json()\n set_backup_data(current_json)\n else: \n current_json = get_backup_data()\n except ConnectionError:\n current_json = get_backup_data()\n return current_json", "def fetch(self,url=URL):\n\t\tlog.info('downloading latest PHE case data')\n#\t\tself.data=lookup_json(url)\n\t\tself.fetch_csv() #JSON discontinued; switched back to CSV\n\t\tself.edition=self.latest_samples\n\t\tlog.info(f'Last samples from {self.edition}')", "def download_country_data(\n url=default_url,\n filename=default_data_file,\n force=False\n):\n if not os.path.isfile(filename) or force:\n text = requests.get(url).text\n with open(filename, 'w') as fp:\n fp.write(text)", "def download(self):\n #the link has some meta data in it that we need to get a hold of so we cant use metaData.getLink()\n data = None\n\n for link in self.metaData.jsonObj['links']:\n if link.get('rel') == \"content\":\n data = link\n\n assert data is not None\n\n response = self._adapter.getRequest(data['href'], self._baseHeader)\n return {\"filename\": data['title'], \"mime\": data['type'], \"binary\": response['Body'] }", "def save_json():\r\n with open(os.path.join(cwd, 'data.json'), 'w') as f:\r\n json.dump(data, f)" ]
[ "0.76825815", "0.6899236", "0.6899236", "0.6703963", "0.6665916", "0.65556526", "0.6494851", "0.63685983", "0.6284138", "0.6250484", "0.6167223", "0.61638594", "0.6151558", "0.6141404", "0.61134374", "0.6105554", "0.6081908", "0.6049193", "0.5993746", "0.599304", "0.59168667", "0.58459616", "0.5812633", "0.57891345", "0.57884234", "0.5772025", "0.5771923", "0.573212", "0.5706042", "0.5686925" ]
0.80634254
0
Return the binned (sum of all frames) data
def getBinnedData(self): return self._array.sum(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bin_binarise(self):\n pass", "def _binarization(self):\n for feat in self.cat_feats:\n lbl = preprocessing.LabelBinarizer()\n lbl.fit(self.dataframe[feat].values)\n val = lbl.transform(self.dataframe[feat].values)\n self.dataframe_d_copy = self.dataframe_d_copy.drop(feat,axis=1)\n \n for j in range(val.shape[1]):\n new_col_name = feat + f'__bin_{j}'\n self.dataframe_d_copy[new_col_name] = val[:,j] \n self.binary_encoders[feat] = lbl\n joblib.dump(self.binary_encoders, f\"{self.output_path}/_binary_encoder.pkl\")\n return self.dataframe_d_copy", "def binn_fft(self):\n bin_res = []\n for fft_bin in BINS:\n bin_res.append(self.bin_spec_y(fft_bin[0], fft_bin[1]))\n return bin_res", "def grouped_bins(self):\n # Load the vector version #\n df = self.grouped_vectors\n # Empty data frame to contain result #\n result = pandas.DataFrame()\n # Iterate #\n for i, row in df.iterrows():\n # Compute a data frame containing the recreated bins #\n current = binner(row[self.sum_col], self.sum_col, self.bin_width)\n # Keep the current values of the group columns as an index #\n col_values = [row[col] for col in self.group_cols]\n current = current.assign(**dict(zip(self.group_cols, col_values)))\n current = current.set_index(self.group_cols)\n # Append #\n result = result.append(current)\n # Return #\n return result", "def bins (self):\n return self._bins", "def bins (self):\n return self._bins", "def bin_data(data, num_bins):\n\tslices = np.linspace(0, 100, num_bins+1, True).astype(np.int)\n\tcounts = np.diff(slices)\n\n\tmean = np.add.reduceat(data, slices[:-1]) / counts\n\treturn mean", "def binning ( self , axis , name = '' ) :\n assert isinstance ( axis , ROOT.TAxis ),\\\n 'Invalid axis %s/%s' % ( axis , type ( axis ) )\n\n ## uniform binning?\n if not axis.IsVariableBinSize() : \n return ROOT.RooFit.Binning ( axis.GetNbins() , axis.GetXmin() , axis.GetXmax() )\n ##\n xbins = axis.GetXbins().GetArray()\n rb = ROOT.RooBinning ( axis.GetNbins() , xbins , name )\n ##\n self.aux_keep.append ( rb )\n ##\n return ROOT.RooFit.Binning ( rb )", "def bins(self):\n return self._bins", "def bin_data(bins, data2bin, bindata, mode='mean', nbinned=False):\n assert mode in ['mean', 'median', 'std', 'max', 'min'], \"mode not recognized: {}\".format(mode)\n digitized = np.digitize(bindata, bins)\n binned = np.zeros(len(bins)) * np.nan\n if nbinned: \n numbinned = np.zeros(len(bins))\n\n if mode == 'mean':\n for i, _ in enumerate(bins):\n binned[i] = np.nanmean(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n elif mode == 'median':\n for i, _ in enumerate(bins):\n binned[i] = np.nanmedian(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n elif mode == 'std':\n for i, _ in enumerate(bins):\n binned[i] = np.nanstd(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n elif mode == 'max':\n for i, _ in enumerate(bins):\n binned[i] = np.nanmax(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n elif mode == 'min':\n for i, _ in enumerate(bins):\n binned[i] = np.nanmin(data2bin[np.logical_and(np.isfinite(bindata), digitized == i+1)])\n if nbinned:\n numbinned[i] = np.count_nonzero(np.logical_and(np.isfinite(data2bin), digitized == i+1))\n else:\n raise ValueError('mode must be mean, median, std, max, or min')\n \n if nbinned:\n return np.array(binned), np.array(numbinned)\n else:\n return np.array(binned)", "def _calcBins(self, contribs, parValues, fraction, minReq):\n # single set of R for this calculation\n bins = np.zeros(self.binCount)\n binObs = np.zeros(self.binCount)\n for bi in range(self.binCount):\n val, obs = self._calcBin(\n self._binMask(bi, parValues),\n fraction, minReq)\n bins[bi] = val\n binObs[bi] = obs\n cdf = self._calcCDF(bins)\n return bins, binObs, cdf", "def bin_by_resel(data_table , binsize = 6, weighted = True, verbose = True):\n exptimes_ = []\n wvlns_, fluxs_, fluxErrs_, fluxErr_lowers_, gross_s_, gcount_s_ = [], [], [], [], [], []\n \n print(f\"function `bin_by_resel` is Binning by {binsize}\")\n for i in range(len(data_table)):\n exptimes_.append(data_table[i]['EXPTIME'])\n wvln_, flux_, fluxErr_,fluxErr_lower_, gross_, gcount_ = data_table[i][\n 'WAVELENGTH', 'FLUX', 'ERROR', 'ERROR_LOWER', 'GROSS', 'GCOUNTS']\n if weighted == True:\n weightsarr_ = np.nan_to_num(gcount_/gross_, nan = 1E-30) # Exposure time can be calculated by gross counts divided by gross counts/second\n # Dividing this way results in NaNs which are messy. replace nans with a value << exptime\n # This way, weight is ~0 unless all values in a chunk are NaN\n wvln_ = downsample_1d(myarr = wvln_, weightsarr = weightsarr_, factor = binsize)\n flux_ = downsample_1d(myarr = flux_, weightsarr = weightsarr_, factor = binsize)\n fluxErr_ = downsample_1d(myarr = fluxErr_, weightsarr = weightsarr_, factor = binsize, in_quad = True) # Errors are summed/averaged in quadrature\n fluxErr_lower_ = downsample_1d(myarr = fluxErr_lower_, weightsarr = weightsarr_, factor = binsize, in_quad = True)\n gross_ = downsample_1d(myarr = gross_, weightsarr = weightsarr_, factor = binsize)\n gcount_ = downsample_1d(myarr = gcount_, weightsarr = weightsarr_, factor = binsize)\n\n elif weighted == False:\n weightsarr_ = -1\n \n wvln_ = downsample_1d(myarr = wvln_, weighted = False, factor = binsize)\n flux_ = downsample_1d(myarr = flux_, weighted = False, factor = binsize)\n fluxErr_ = downsample_1d(myarr = fluxErr_, weighted = False, factor = binsize, in_quad = True)\n fluxErr_lower_ = downsample_1d(myarr = fluxErr_lower_, weighted = False, factor = binsize, in_quad = True)\n gross_ = downsample_1d(myarr = gross_, weighted = False, factor = binsize)\n gcount_ = downsample_1d(myarr = gcount_, weighted = False, factor = binsize)\n \n wvlns_.append(wvln_)\n fluxs_.append(flux_)\n fluxErrs_.append(fluxErr_)\n fluxErr_lowers_.append(fluxErr_lower_)\n gross_s_.append(gross_)\n gcount_s_.append(gcount_)\n \n return Table([exptimes_,wvlns_, fluxs_, fluxErrs_, fluxErr_lowers_, gross_s_, gcount_s_], names=['EXPTIME', 'WAVELENGTH', 'FLUX', 'ERROR', 'ERROR_LOWER', 'GROSS', 'GCOUNTS'])", "def xbins (self):\n return self._xbins", "def test_bins(self):\n\n for filename in ['%s/population_padang_1.asc' % TESTDATA,\n '%s/test_grid.asc' % TESTDATA]:\n\n R = read_layer(filename)\n rmin, rmax = R.get_extrema()\n\n for N in [2, 3, 5, 7, 10, 16]:\n linear_intervals = R.get_bins(N=N, quantiles=False)\n\n assert linear_intervals[0] == rmin\n assert linear_intervals[-1] == rmax\n\n d = (rmax - rmin) / N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], rmin + i * d)\n\n quantiles = R.get_bins(N=N, quantiles=True)\n A = R.get_data(nan=True).flat[:]\n\n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask)\n l2 = len(A)\n\n if filename == '%s/test_grid.asc' % TESTDATA:\n # Check that NaN's were removed\n assert l1 == 35\n assert l2 == 30\n\n # Assert that there are no NaN's\n assert not numpy.alltrue(numpy.isnan(A))\n\n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements / N\n\n # Count elements in each bin and check\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n\n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no\n # more than 1\n assert abs(count - refcount) <= 1\n assert abs(count - average_elements_per_bin) <= 3\n else:\n # The last bin is allowed vary by more\n pass\n\n i0 = i1", "def findBins(): \n\n df = pd.read_csv('significantData.csv')\n df = df.sort_values('RecordingTimestamp')\n df.to_csv('significantData.csv', index=False)\n read_in = pd.read_csv('significantData.csv')\n count = 0\n this = []\n return_bins = {}\n word = (read_in['AOI[Neutral_Left]Hit_0']).tolist()\n \n if word[0] == '1':\n return_bins.update({'start_value': 1})\n else: \n return_bins.update({'start_value': 0})\n for v, w in zip(word[:-1], word[1:]):\n if v == w and v != '': \n print v\n count = count + 1\n else: \n total = count\n this.append(count)\n my_list = sorted(list(set(this)))\n return_bins.update({'my_list': my_list})\n return return_bins", "def bin_stats(x,y,xbins,stat='average'):\n nbins=len(xbins)\n if stat=='average' or stat=='mean': func=mean\n elif stat=='median': func=median\n elif stat=='rms' or stat=='std' : func=std\n elif stat=='std_robust' or stat=='rms_robust': func=std_robust\n elif stat=='mean_robust': func=mean_robust\n elif stat=='median_robust': func=median_robust\n elif stat=='sum': func=sum\n results=[]\n for i in range(nbins):\n if i<nbins-1:\n good=(greater_equal(x,xbins[i])\n *less(x,xbins[i+1]))\n else: good=(greater_equal(x,xbins[-1]))\n if sum(good)>1.: results.append(func(compress(good,y)))\n else:\n results.append(0.)\n print('Bin starting at xbins[%i] has %i points' % (i,sum(good)))\n return array(results)", "def bin_the_data(neuron_spikes, first, last, bin_size):\n neuron_activity = []\n timebins = range(first, int(last) + int(last) % bin_size, bin_size)\n for spike in neuron_spikes:\n activity = []\n spike_time = spike[0]\n i = 0\n for bin_size in timebins:\n k = 0\n while spike_time < bin_size:\n i += 1\n if i >= np.size(spike):\n break\n spike_time = spike[i]\n k += 1\n activity.append(k)\n neuron_activity.append(activity)\n return neuron_activity, timebins", "def bin(serie, bins):\n return serie.apply(lambda x: _bin(bins, x))", "def bin_statistics(data,bin_against,bin_edges,data_signal=[]):\n\n assert isinstance(data, pd.DataFrame), 'data must be of type pd.DataFram' \n try: bin_against = np.asarray(bin_against) \n except: 'bin_against must be of type np.ndarray'\n try: bin_edges = np.asarray(bin_edges)\n except: 'bin_edges must be of type np.ndarray' \n\n # Determine variables to analyze\n if len(data_signal)==0: # if not specified, bin all variables\n data_signal=data.columns.values\n else:\n assert isinstance(data_signal, list), 'must be of type list'\n\n # Pre-allocate list variables\n bin_stat_list = []\n bin_std_list = []\n\n # loop through data_signal and get binned means\n for signal_name in data_signal:\n # Bin data\n bin_stat = binned_statistic(bin_against,data[signal_name],\n statistic='mean',bins=bin_edges)\n # Calculate std of bins\n std = []\n stdev = pd.DataFrame(data[signal_name])\n stdev.set_index(bin_stat.binnumber,inplace=True)\n for i in range(1,len(bin_stat.bin_edges)):\n try:\n temp = stdev.loc[i].std(ddof=0)\n std.append(temp[0])\n except:\n std.append(np.nan)\n bin_stat_list.append(bin_stat.statistic)\n bin_std_list.append(std)\n \n # Convert to DataFrames\n bin_mean = pd.DataFrame(np.transpose(bin_stat_list),columns=data_signal)\n bin_std = pd.DataFrame(np.transpose(bin_std_list),columns=data_signal)\n\n # Check for nans \n if bin_mean.isna().any().any():\n print('Warning: some bins may be empty!')\n\n return bin_mean, bin_std", "def binStats(self, nodes, columnNames, binSize=100, method=max, resetStats=False):\n\n # init summary properties\n for node in nodes:\n print(node)\n if resetStats or (not hasattr(node, 'binnedStats')):\n node.binnedStats = {}\n for col in columnNames:\n node.binnedStats[col] = []\n\n binStart = 0\n binEndBefore = binStart + binSize\n numberOfItems = len(nodes[0].stats[columnNames[0]])\n\n while binEndBefore <= numberOfItems:\n for col in columnNames:\n for node in nodes:\n colData = node.stats[col]\n data = colData[binStart: binEndBefore]\n agg = list(map(method, (data,)))[0]\n node.binnedStats[col].append(agg)\n\n binStart = binEndBefore\n binEndBefore += binSize", "def combine_bins(self, f):\n\n f = int(f)\n if self._counts is None:\n key = 'cps'\n else:\n key = 'counts'\n data = getattr(self, key)\n if len(self) % f == 0:\n padded_counts = np.copy(data)\n else:\n pad_len = f - len(self) % f\n pad_counts = unumpy.uarray(np.zeros(pad_len), np.zeros(pad_len))\n padded_counts = np.concatenate((data, pad_counts))\n padded_counts.resize(int(len(padded_counts) / f), f)\n combined_counts = np.sum(padded_counts, axis=1)\n if self.is_calibrated:\n combined_bin_edges = self.bin_edges_kev[::f]\n if combined_bin_edges[-1] != self.bin_edges_kev[-1]:\n combined_bin_edges = np.append(\n combined_bin_edges, self.bin_edges_kev[-1])\n else:\n combined_bin_edges = None\n\n kwargs = {key: combined_counts,\n 'bin_edges_kev': combined_bin_edges,\n 'input_file_object': self._infileobject,\n 'livetime': self.livetime}\n obj = Spectrum(**kwargs)\n return obj", "def binning(S, bands):\n B = np.zeros((S.shape[0], len(bands)), dtype=S.dtype)\n for i, b in enumerate(bands):\n B[:, i] = np.mean(S[:, b[0] : b[1]], axis=1)\n\n return B", "def security(self, bin_size): \n return resample(self.data, bin_size)[:-1]", "def bin_av(datadir,filename):\n data = xr.open_dataset(os.path.join(datadir,filename))\n keys_tot = [i for i in data.keys() if i!='DEPTH']\n datanew_tot = {i:[] for i in keys_tot}\n #print(datanew_tot)\n depth_max = int(data.DEPTH.max()) # as integer\n #print(depth_max)\n for i in range(1,depth_max):\n for key in keys_tot:\n datanew_tot[key].append(float(data[key][(data.DEPTH < i+0.5) & \\\n (data.DEPTH >= i-0.5)].mean().values))\n #print(datanew_tot)\n datanew = data.copy()\n datanew=datanew.rename({'scan':'scan_old'})\n #print(datanew)\n datanew['DEPTH'] = ('scan', np.arange(1,depth_max))\n for key in keys_tot:\n datanew[key] = ('scan', datanew_tot[key])\n #print(datanew)\n print('Bin averaging finished')\n print('overwriting data')\n os.rename(os.path.join(datadir,filename),os.path.join(datadir,filename+'.bak'))\n datanew.to_netcdf(os.path.join(datadir,filename))", "def get_bins(data) :\n\tbins=np.unique(data)\n\treturn np.append(bins[~np.isnan(bins)], max(bins) +1)", "def binned_fft(self):\n self.fft()\n self.fft_bins_y = self.binn_fft()\n self.fft_bins_y = np.asarray(self.fft_bins_y) * UPDATE_FACTOR + self.last_fft_bins_y *(1 - UPDATE_FACTOR)\n self.last_fft_bins_y = self.fft_bins_y", "def bin_input(neu, bin_size, overlap = 0):\n win_d = bin_size - overlap\n n_bins = int(((neu.shape[0]-bin_size)/win_d)+1)\n FRmat = np.empty([n_bins, neu.shape[1]])\n for i in range(n_bins):\n FRmat[i,:] = np.sum(neu[i*win_d:i*win_d+bin_size,:], axis = 0)\n \n return FRmat", "def createBins():\n theBins = []\n startFreq = 60\n for a in range(32):\n endFreq = int(startFreq*1.12+12)\n theRange = (startFreq, endFreq)\n startFreq = endFreq\n theBins.append(theRange)\n return(theBins)", "def felix_binning(xs, ys, delta=1):\n \n #bins = np.arange(start, end, delta)\n #occurance = np.zeros(start, end, delta)\n BIN_STEP = delta\n BIN_START = xs.min()\n BIN_STOP = xs.max()\n\n indices = xs.argsort()\n datax = xs[indices]\n datay = ys[indices]\n\n print(\"In total we have: \", len(datax), ' data points.')\n #do the binning of the data\n bins = np.arange(BIN_START, BIN_STOP, BIN_STEP)\n print(\"Binning starts: \", BIN_START, ' with step: ', BIN_STEP, ' ENDS: ', BIN_STOP)\n\n bin_i = np.digitize(datax, bins)\n bin_a = np.zeros(len(bins)+1)\n bin_occ = np.zeros(len(bins)+1)\n\n for i in range(datay.size):\n bin_a[bin_i[i]] += datay[i]\n bin_occ[bin_i[i]] += 1\n\n binsx, data_binned = [], []\n for i in range(bin_occ.size-1):\n if bin_occ[i] > 0:\n binsx.append(bins[i]-BIN_STEP/2)\n data_binned.append(bin_a[i]/bin_occ[i])\n\n #non_zero_i = bin_occ > 0\n #binsx = bins[non_zero_i] - BIN_STEP/2\n #data_binned = bin_a[non_zero_i]/bin_occ[non_zero_i]\n\n return binsx, data_binned", "def test_bins(self):\n\n \n for filename in ['data/population_padang_1.asc', \n 'data/test_grid.asc']: \n \n R = read_coverage(filename)\n \n min, max = R.get_extrema() #use_numeric=True)\n \n for N in [2,3,5,7,10,16]:\n linear_intervals = R.get_bins(N=N, quantiles=False) \n \n assert linear_intervals[0] == min\n assert linear_intervals[-1] == max \n \n d = (max-min)/N\n for i in range(N):\n assert numpy.allclose(linear_intervals[i], min + i*d) \n \n \n quantiles = R.get_bins(N=N, quantiles=True)\n\n A = R.get_data(nan=True).flat[:] \n \n mask = numpy.logical_not(numpy.isnan(A)) # Omit NaN's\n l1 = len(A)\n A = A.compress(mask) \n l2 = len(A)\n \n if filename == 'data/test_grid.asc':\n # Check that NaN's were removed\n \n assert l1 == 35\n assert l2 == 30\n \n \n # Assert that there are no NaN's \n assert not numpy.alltrue(numpy.isnan(A))\n \n number_of_elements = len(A)\n average_elements_per_bin = number_of_elements/N\n \n # Count elements in each bin and check\n\n i0 = quantiles[0]\n for i1 in quantiles[1:]:\n count = numpy.sum((i0 < A) & (A < i1))\n if i0 == quantiles[0]:\n refcount = count\n \n \n if i1 < quantiles[-1]:\n # Number of elements in each bin must vary by no more than 1\n assert abs(count - refcount) <= 1 \n assert abs(count - average_elements_per_bin) <= 3\n \n \n else:\n # The last bin is allowed vary by more\n pass\n \n i0 = i1" ]
[ "0.66195166", "0.6542668", "0.6503437", "0.62473", "0.62198627", "0.62198627", "0.6209935", "0.61888206", "0.61871463", "0.6124896", "0.6068046", "0.60338694", "0.60149175", "0.59968144", "0.59640676", "0.5954214", "0.59471065", "0.5930796", "0.59244406", "0.5913006", "0.5883551", "0.586979", "0.5853279", "0.58198845", "0.5779617", "0.57638", "0.5747608", "0.5729738", "0.57216245", "0.5716586" ]
0.7058887
0
Return the virtual chip size
def getVirtualChipSize(self): return self._vChipSize
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getChipSize(self):\n return self._chipSize", "def get_dev_size(self):\n\t\treturn call_sdk_function('PrlSrvCfgHdd_GetDevSize', self.handle)", "def __get_size(self):\n\t\treturn 4*self.version + 17", "def get_size(self):\n\t\treturn call_sdk_function('PrlSrvCfgHddPart_GetSize', self.handle)", "def get_disk_size(self):\n\t\treturn call_sdk_function('PrlVmDevHd_GetDiskSize', self.handle)", "def get_size_on_disk(self):\n\t\treturn call_sdk_function('PrlVmDevHd_GetSizeOnDisk', self.handle)", "def vm_size(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vm_size\")", "def get_size(self):", "def getSize(self) -> int:\n ...", "def getSize(self) -> int:\n ...", "def termsize(self):\n try:\n with open(\"/dev/tty\") as tty:\n cmd = ['stty', 'size']\n lines, cols = Uprocess().check_output(cmd, stdin=tty).split()\n return (int(lines), int(cols))\n except (OSError, IOError):\n pass\n return (24, 80)", "def getVoxelSize(self):\n\t\treturn self.voxelsize", "def vm_size(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"vm_size\")", "def effectivedb_size(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n retout = out.get(get_key(zonekeys.EFFDB_SIZE, self._SW_VER), None)\n if retout is not None:\n return int(retout)\n return None", "def vsize(self):\n if hasattr(self, \"_vsize\"):\n return self._vsize\n else:\n return None", "def _size(self):\n return self._logicalSize", "def get_ram_size(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetRamSize', self.handle)", "def get_size(self):\n ...", "def dev_size(device):\n device_path = \"/sys/block/\"\n num_sectors = open(device_path + device + \"/size\").read().rstrip(\"\\n\")\n sector_size = (\n open(device_path + device + \"/queue/hw_sector_size\")\n .read()\n .rstrip(\"\\n\")\n )\n return int(num_sectors) * int(sector_size)", "def get_video_ram_size(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetVideoRamSize', self.handle)", "def size(self):\n return uint16_packer.unpack(self[0:2])[0]", "def get_size(self):\n raise NotImplementedError", "def get_size(self):\n return self.__size", "def get_part_size(self): # -> int:\n ...", "def getSize(self) -> long:\n ...", "def getSize(self):\n\n return self.size", "def getSize(self):\r\n return self.size", "def getSize(self):\n return 1", "def get_size(self):\n return self.size", "def get_size(self):\n return self.size" ]
[ "0.7537076", "0.7193751", "0.71853673", "0.7046478", "0.69768065", "0.69764334", "0.68312764", "0.68164086", "0.6792047", "0.6792047", "0.6791824", "0.6762953", "0.6718725", "0.6702376", "0.6696075", "0.66841346", "0.6680521", "0.66476923", "0.6647065", "0.6641362", "0.66255057", "0.66010267", "0.6593816", "0.65935504", "0.65850806", "0.6559696", "0.65593684", "0.653471", "0.6525136", "0.6525136" ]
0.8628087
0
Return the comments in the data file If n is not provided then all the comments are returned as a list of string values. If n is provided then the n'th comment is returned
def getComment(self, n = None): if n is None: return self._comments else: return self._comments[n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_comments():\n comments = list(get_comments(TEST_SUBREDDIT, TEST_START_DATE, TEST_END_DATE, TEST_MAX))\n\n # prints the dictionary of variables for each comment\n for x in comments:\n print(x.d_)", "def _get_comments(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n return filter(lambda x: len(x) == 40, os.listdir(self.paths['comments']))", "def get_comments(self, isbn, n):\n result = []\n self.cursor.execute(\"\"\"SELECT * FROM comment WHERE ISBN=%s ORDER BY avg_usefulness DESC LIMIT %s\"\"\",\n (str(isbn), n))\n for comment in self.cursor.fetchall():\n result.append(comment)\n return result", "def _readComments(self): \n self.NSCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readSpecialComments()\n self.NNCOML = nappy.utils.text_parser.readItemFromLine(self.file.readline(), int)\n self._readNormalComments()", "def get_list_of_comments(path):\n\n # opens comments file\n try:\n return [\n re.sub(\" +\", \" \", comment.strip().rstrip())\n for comment in list(open(path, \"r\"))\n ]\n except Exception as e:\n print(\"Error loading comments file: \", e)\n sys.exit(1)", "def _readNormalComments(self):\n self.NCOM = self._readLines(self.NNCOML)\n return self.NCOM", "def get_comments(self):\n raise NotImplementedError", "def skip_comments(filepointer):\n\tcomments = []\n\tdata = '#'\n\ttry:\n\t\tpos = filepointer.tell()\n\texcept:\n\t\tprint(\"Could not read file.\")\n\t\treturn None\t\n\t\n\twhile data[0] == '#':\n\t\tdata = filepointer.readline()\n\t\tif not data:\n\t\t\traise Exception(\"Unexpected end of file while reading comments.\")\n\n\t\tif data[0] == '#':\n\t\t\tcomments.append(data)\n\t\t\tpos = filepointer.tell()\n\t\telse:\n\t\t\tfilepointer.seek(pos)\n\treturn comments", "def extract_comments(comments_file, output_filename=direc+\"/comments.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting comments from \" + comments_file + \"...\")\r\n comments_dict = {}\r\n with open(output_filename, \"w\", encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(comments_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n if child.attrib['PostId'] not in comments_dict:\r\n comments_dict[child.attrib['PostId']] = []\r\n comments_dict[child.attrib['PostId']].append(child.attrib['Id'])\r\n clean_comment = clean_markdown(child.attrib['Text'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['PostId'] + \"\\t\" + clean_comment + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n f.write(line)\r\n\r\n current += 1\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting comments from \" + comments_file + \".\\n\")\r\n return comments_dict", "def getAllComments(self):\r\n return [(ind, comment) for ind, comment in enumerate(self.comments)]", "def get_comment(self, index):\r\n\r\n # Get request to get all the comments for all exercises\r\n comments = requests.get(API.url_comment, headers = self.headers).json()\r\n # Parse the response\r\n for my_comment in comments:\r\n if my_comment['id'] == index:\r\n print(my_comment['comment'])", "def comment_for_run (ins, exp, runnum) :\n return dict_of_recs_for_run(ins, exp, runnum)['comment']", "def comments(self) -> list:\n return self._node[\"app_data\"][\"ui_data\"].get(\"comments\", [])", "def test_store_comments(parallel, read_basic):\n text = \"\"\"\n# header comment\na b c\n# comment 2\n# comment 3\n1 2 3\n4 5 6\n\"\"\"\n table = read_basic(text, parallel=parallel, check_meta=True)\n assert_equal(table.meta[\"comments\"], [\"header comment\", \"comment 2\", \"comment 3\"])", "def get_comments(self,comments):\n all_comments = []\n for comment in comments:\n try :\n all_comments.append({\n 'comment':comment['data']['body'],\n 'score':comment['data']['score']\n })\n except: pass\n return all_comments", "def get_comments(qint,conn):\n\n comms = ('SELECT DISTINCT ip.value '\n 'FROM interaction i, interactionprop ip, cvterm cvt '\n 'WHERE i.interaction_id = ip.interaction_id AND ip.type_id = cvt.cvterm_id '\n 'AND cvt.is_obsolete=0 AND cvt.name != \\'comments on source\\' '\n 'AND cvt.name != \\'internalnotes\\' AND i.uniquename = %s')\n comnts = connect(comms, qint, conn)\n return(comnts)", "def get_comments(self):\n\t\treturn self._client.get_comments(self)", "def getComment(self, ind):\r\n if ind >= 0 and ind < len(self.comments):\r\n return self.comments[ind]\r\n return None", "def process_comment(self, data):\r\n if not self.is_suppress:\r\n return [data]", "def test_get_specific_comment_info():\n a, b, c, d = get_specific_comment_info('g99c7c0')\n print('time created:', a, 'type:', type(a))\n print('permalink:', b, 'type:', type(b))\n print('karma score:', c, 'type:', type(c))\n print('submission id:', d, 'type:', type(d))", "def comments(number):\n if g.browse_mode == \"normal\":\n item = g.model.songs[int(number) - 1]\n fetch_comments(item)\n\n else:\n g.content = generate_songlist_display()\n g.message = \"Comments only available for video items\"", "def get_comments(self, key):\n if key not in self._config:\n raise ValueError(\"%s not in self.config\"%key)\n return self._config[key][\"comments\"]", "def _parse_comment(i, doc):\n\n if doc[i].strip() != \"/**\":\n raise ParseFailure(i, \"Expected beginning of block comment\")\n\n e = i + 1\n while e < len(doc) and doc[e].strip() != \"*/\":\n e += 1\n\n return e + 1, [x.rstrip() for x in doc[i + 1: e]]", "def _skeleton_to_nml_comments(self):\n\n nml_comments = []\n for nodes in self.nodes:\n comment_nodes = nodes[nodes['comment'].notnull()]\n for _, row in comment_nodes.iterrows():\n nml_comment = wknml.Comment(\n node=row['id'].values[0],\n content=row['comment'].values[0]\n )\n nml_comments.append(nml_comment)\n\n return nml_comments", "def parse_comment(self, node):\n\n data = []\n\n if node is not None:\n comment_id_pattern = re.compile('comment-(\\d+)')\n for comment_node in node.find_all('div', class_='comment'):\n item = {}\n item['is_deletable'] = False\n item['is_editable'] = False\n \n comment_id_result = comment_id_pattern.search(comment_node.get('id'))\n if comment_id_result:\n item['id'] = int(comment_id_result.group(1))\n \n comment_body_node = comment_node.find('div', class_='comment-body')\n if comment_body_node is not None:\n item['content'] = ''\n for p in comment_body_node.find_all(recursive=False):\n if 'class' in p.attrs and 'author' in p['class']:\n item['author'] = p.get_text()\n item['profile_url'] = self.get_link(p.get('href'))\n author_id = self._parse_user_id_from_url(item['profile_url'])\n if self.userId == author_id:\n item['is_deletable'] = True\n item['is_editable'] = True\n elif 'class' in p.attrs and 'age' in p['class']:\n item['date'] = p.abbr['title']\n item['date_ago'] = timeago.format(self._parse_datetime(item['date']), datetime.now(TIMEZONE))\n elif 'class' in p.attrs and 'edit' in p['class']:\n continue\n elif p.name == 'form':\n continue\n else:\n item['content'] += str(p)\n\n data.append(item)\n\n return data", "def comments(self, limit=100, all=False):\n source, edge = self.id, \"comments\"\n return lazygen(Comment, source, edge,\n limit=limit, get_all=all)", "def _get_review_comments_body(\n self, pull_request_number: int) -> List[Tuple[str, str]]:\n review_comments = get_pull_request_review_comments(\n self._repo_name, pull_request_number, self._auth)\n if not review_comments:\n return []\n review_comments_msg = []\n for comment in review_comments:\n review_comments_msg.append((comment['path'], comment['body']))\n return review_comments_msg", "def _dump_comment(comment: List[str]) -> List[str]:\n return [\"/**\"] + comment + [\"*/\"]", "def comments(self):\n comments_url = self.data['comments_url']\n return json.load(urllib2.urlopen(comments_url))", "def all_user_comments(username):\n return commentslist" ]
[ "0.6559512", "0.6508631", "0.62756485", "0.61539084", "0.614451", "0.6051848", "0.6036511", "0.59882265", "0.597855", "0.5907632", "0.5848902", "0.58425957", "0.5839838", "0.5800919", "0.5784357", "0.5766973", "0.5750142", "0.5720209", "0.5689544", "0.56683725", "0.5658421", "0.5623971", "0.5602438", "0.5590741", "0.5574753", "0.557155", "0.55658233", "0.55518126", "0.5522019", "0.55007595" ]
0.73399085
0
Run git, process it's output. Print any errors. On success, print a formatted version of the repo's authors. Return the total number of authors printed.
def get_authors(): # git log --encoding=utf-8 --full-history --reverse # --format=format:%at;%an;%ae gitcmd = ( 'git', 'log', '--encoding=utf-8', '--full-history', '--reverse', '--format=format:%at;%an;%ae' ) git = subprocess.Popen( gitcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) gitout, giterr = git.communicate() # Check for errors. if giterr: print('\nGit error:\n {}'.format(giterr.decode('utf-8'))) return 0 elif gitout: # String mode was fast enough, just use it's lines. authorcnt = parse_authors(gitout.splitlines()) return authorcnt print('\nGit error:\n No output from the git command.') return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(*args):\n\n # Default options\n repos = []\n user_map = NullUserMap()\n plotout = None\n printout = True\n gitlab = None\n\n if \"--help\" in args:\n print(main.__doc__)\n return 0\n\n # Parse command-line \n it = iter(args)\n for a in it:\n if a == \"--users\":\n user_map = FileUserMap(next(it))\n elif a == \"--pdf\":\n plotout = next(it)\n elif a == \"--noprint\":\n printout = False\n elif a == \"--gitlab\":\n gitlab = next(it), next(it)\n else:\n repos.append(a)\n \n # Setup backend\n if gitlab is None:\n coretype = GitCLIBackend\n coreargs = repos\n else:\n coretype = GitlabBackend\n coreargs = gitlab\n\n # Dictionary for storing the data to be presented\n commits = {}\n \n # Find the bound for searching -- the beginning of the week, one year ago\n today = datetime.now().replace(hour=0, minute=0,second=0,microsecond=0)\n year_ago = today.replace(year = today.year - 1)\n _, __, dow = year_ago.isocalendar()\n year_ago-= timedelta(days=(dow-1))\n \n # Processes the git logs and stores some intermediate results in the three\n # dictionaries instantiated above\n for email, date, stats in coretype(*coreargs, since=year_ago.strftime(\"%Y-%m-%d\")):\n \n # Trim date of commit to midnight of that day\n date = date.replace(hour=0,minute=0,second=0,microsecond=0)\n user = user_map.map(email)\n \n if not user in commits:\n commits[user] = {}\n if not date in commits[user]:\n commits[user][date] = 0\n \n commits[user][date] += 1\n \n # Print plaintext report\n if printout:\n \n for user, cal in commits.items():\n \n print(\"Annual summary for %s\" % (user))\n \n for date, count in sorted(cal.items(), key=lambda x: x[0]):\n strdate = date.strftime(\"%x\")\n print(\" %s: %2d commits\" % (strdate, count))\n \n print(\"\")\n\n # Draw plots\n if plotout is not None:\n\n with PdfPages(plotout) as pdf:\n \n labels = []\n offsets = {}\n \n cdict = ((205.,247.,237.), (15.,191.,148.))\n \n cdict = {\n 'red': (\n (0.0, cdict[0][0]/255, cdict[0][0]/255),\n (1.0, cdict[1][0]/255, cdict[1][0]/255)\n ),\n 'green':(\n (0.0, cdict[0][1]/255, cdict[0][1]/255),\n (1.0, cdict[1][1]/255, cdict[1][1]/255)\n ),\n 'blue': (\n (0.0, cdict[0][2]/255, cdict[0][2]/255),\n (1.0, cdict[1][2]/255, cdict[1][2]/255)\n )\n }\n \n plt.register_cmap(name='Sea', data=cdict)\n colormap = plt.get_cmap('Sea')\n \n min_yr, min_week, _ = year_ago.isocalendar()\n max_yr, max_week, _ = today.isocalendar()\n \n week_counts = {yr: weeks_in_year(yr) for yr in range(min_yr, max_yr+1)}\n \n # Generate labels for each week -- \n # Add year to the label of the first week of each year as well as \n # the very first week in the history\n lastmon = None\n for yr, weeks in sorted(week_counts.items(), key=lambda x: x[0]):\n cur = datetime(year=yr, month=1, day=4) # jan 4 is always in week 1 of the iso year\n for i in range(weeks):\n mon = cur.strftime(\"%b\")\n if mon != lastmon:\n labels.append(cur.strftime(\"%b\"))\n else:\n labels.append(\"\")\n offsets[(yr, i+1)] = len(offsets)\n cur += timedelta(days=7)\n lastmon = mon\n \n for user in commits:\n \n fig = plt.figure(figsize=(7.5, 1.65))\n\n gs = gridspec.GridSpec(2, 1, height_ratios=[8, 1]) \n ax, cax = plt.subplot(gs[0]), plt.subplot(gs[1])\n \n maxcommits = ceil(max(commits[user].values()) * 1.5)\n \n for date, count in commits[user].items():\n yr, wk, dow = date.isocalendar()\n offset = offsets[(yr, wk)]\n \n ax.add_patch(\n patches.Rectangle(\n (offset+0.05, dow - 1 + 0.05),\n 0.95, 0.95,\n linewidth=0,\n facecolor=colormap(1. * (count - 1) / (maxcommits) )\n )\n )\n \n ax.set_title(\"Commit summary for %s\" % user, y=1.28)\n \n ax.xaxis.tick_top()\n ax.set_xticks([x for x in np.arange(len(offsets)) if labels[int(x)] != \"\"])\n ax.set_xticks(np.arange(len(offsets)), minor=True)\n \n ax.set_xticklabels([x for x in labels if x != \"\"])\n ax.set_xlim(offsets[(min_yr, min_week)], offsets[(max_yr, max_week)]+1)\n\n ax.set_ylim(0, 7)\n ax.set_yticks(np.arange(7))\n ax.set_yticklabels([\"S \",\"M \",\"T \",\"W \",\"R \",\"F \",\"S \"])\n ax.invert_yaxis()\n \n if maxcommits <= 10:\n top = maxcommits\n step = 1.\n else:\n top = (maxcommits - 1) + 11 - ((maxcommits - 1) % 11)\n step = top/11.\n\n colorticks = np.arange(0., top+(step/2), step) / (top)\n colorlabels = [\"%d\" % (x*top) for x in colorticks]\n \n cbar = colorbar.ColorbarBase(\n cax, cmap=colormap,\n orientation='horizontal'\n )\n cbar.set_ticks(colorticks)\n cbar.set_ticklabels(colorlabels)\n cax.set_xlim(colorticks[0], colorticks[-1])\n \n for label in ax.get_xticklabels():\n label.set_horizontalalignment('left')\n \n for label in ax.get_yticklabels():\n label.set_horizontalalignment('center')\n label.set_verticalalignment('top')\n \n for item in (\n [ax.xaxis.label, ax.yaxis.label] +\n ax.get_xticklabels() + ax.get_yticklabels() +\n cax.get_xticklabels()\n ):\n item.set_fontsize(7)\n \n ax.title.set_fontsize(10)\n fig.subplots_adjust(top=0.7, bottom=0.15)\n \n pdf.savefig(fig)\n\n return 0", "def display_repos_and_commits(github_id):\r\n\r\n repo_list = get_repos(github_id)\r\n\r\n for repo in repo_list:\r\n commits_count = get_commits(github_id, repo)\r\n print('Repo: {} Number of commits: {}'.format(repo, commits_count))", "def merge(self):\n commits = self._github_api.get_pr_commits(self.number)\n\n def format_commit_author(commit):\n author = commit['commit']['author']\n name = author['name']\n email = author['email']\n return f'{name} <{email}>'\n commit_authors = [format_commit_author(commit) for commit in commits]\n co_authored_by_re = re.compile(\n r'^Co-authored-by:\\s*(.*)', re.MULTILINE)\n\n def extract_co_authors(commit):\n message = commit['commit']['message']\n return co_authored_by_re.findall(message)\n commit_co_authors = []\n for commit in commits:\n commit_co_authors.extend(extract_co_authors(commit))\n\n all_commit_authors = commit_authors + commit_co_authors\n distinct_authors = sorted(set(all_commit_authors),\n key=lambda x: commit_authors.count(x),\n reverse=True)\n\n for i, author in enumerate(distinct_authors):\n print(\"Author {}: {}\".format(i + 1, author))\n\n if len(distinct_authors) > 1:\n primary_author, distinct_other_authors = get_primary_author(\n self.cmd, distinct_authors)\n else:\n # If there is only one author, do not prompt for a lead author\n primary_author = distinct_authors.pop()\n distinct_other_authors = []\n\n commit_title = f'{self.title} (#{self.number})'\n commit_message_chunks = []\n if self.body is not None:\n # Remove comments (i.e. <-- comment -->) from the PR description.\n body = re.sub(r\"<!--.*?-->\", \"\", self.body, flags=re.DOTALL)\n # avoid github user name references by inserting a space after @\n body = re.sub(r\"@(\\w+)\", \"@ \\\\1\", body)\n commit_message_chunks.append(body)\n\n committer_name = run_cmd(\"git config --get user.name\").strip()\n committer_email = run_cmd(\"git config --get user.email\").strip()\n\n authors = (\"Authored-by:\" if len(distinct_other_authors) == 0\n else \"Lead-authored-by:\")\n authors += \" %s\" % primary_author\n if len(distinct_authors) > 0:\n authors += \"\\n\" + \"\\n\".join([\"Co-authored-by: %s\" % a\n for a in distinct_other_authors])\n authors += \"\\n\" + \"Signed-off-by: %s <%s>\" % (committer_name,\n committer_email)\n commit_message_chunks.append(authors)\n\n commit_message = \"\\n\\n\".join(commit_message_chunks)\n\n # Normalize line ends and collapse extraneous newlines. We allow two\n # consecutive newlines for paragraph breaks but not more.\n commit_message = \"\\n\".join(commit_message.splitlines())\n commit_message = re.sub(\"\\n{2,}\", \"\\n\\n\", commit_message)\n\n if DEBUG:\n print(\"*** Commit title ***\")\n print(commit_title)\n print()\n print(\"*** Commit message ***\")\n print(commit_message)\n\n if DEBUG:\n merge_hash = None\n else:\n result = self._github_api.merge_pr(self.number,\n commit_title,\n commit_message)\n if not result['merged']:\n message = result['message']\n self.cmd.fail(f'Failed to merge pull request: {message}')\n merge_hash = result['sha']\n\n print(\"Pull request #%s merged!\" % self.number)\n print(\"Merge hash: %s\" % merge_hash)", "def get_git_commiter_count(path):\n process = subprocess.Popen(['git', 'shortlog', '-sn'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n committers = stdout.decode(\"ISO-8859-1\")\n return len(committers.split('\\n'))", "def main(argd):\n if argd['DIR']:\n try:\n os.chdir(argd['DIR'])\n except FileNotFoundError:\n print('\\nDirectory not found: {}'.format(argd['DIR']))\n return 1\n\n authorcnt = get_authors()\n return 0 if authorcnt else 1", "def get_authors(git_url, from_sha, to_sha):\n matches = re.match(\"(?P<git_server>.*):(?P<git_repo>.*)\", git_url)\n if matches is None:\n return (1, f\"could not understand the git url {git_url} for authors detection\")\n git_server = matches.group(\"git_server\")\n git_repo = matches.group(\"git_repo\")\n if git_server is None:\n return (\n 1,\n f\"could not understand the git server in {git_url} for authors detection\",\n )\n if git_repo is None:\n return (\n 1,\n f\"could not understand the git repo in {git_url} for authors detection\",\n )\n\n if \"git.yelpcorp.com\" in git_server:\n ssh_command = (\n f\"ssh {git_server} authors-of-changeset {git_repo} {from_sha} {to_sha}\"\n )\n return _run(command=ssh_command, timeout=5.0)\n else:\n # TODO: PAASTA-16927: support getting authors for services on GHE\n return 1, f\"Fetching authors not supported for {git_server}\"", "def parse_authors():\n import subprocess\n try:\n output = subprocess.check_output(['git', 'shortlog', '-s'],\n universal_newlines=True)\n except Exception as ex:\n print('ex = {!r}'.format(ex))\n return []\n else:\n striped_lines = (l.strip() for l in output.split('\\n'))\n freq_authors = [line.split(None, 1) for line in striped_lines if line]\n freq_authors = sorted((int(f), a) for f, a in freq_authors)[::-1]\n # keep authors with uppercase letters\n authors = [a for f, a in freq_authors if a.lower() != a]\n return authors", "def git_status(c):\n c.run(\"git submodule foreach git status\")", "def git(*args):\n cmd = [\"git\"] + list(args)\n try:\n return subprocess.check_output(cmd).decode(\"utf8\").strip()\n except subprocess.CalledProcessError as err:\n print(err)\n sys.exit(err.returncode)", "def main():\n# logging.basicConfig(level=logging.DEBUG)\n try:\n user = sys.ARGV[1]\n except:\n user = 'hmm01i'\n repos = getRepos(user)\n print(\"%i Personal Repos\" % len(repos))\n logging.debug(repos)\n #print(\"Repo,[size, last update]\")\n #for k in repos.keys():\n # print(str(k),repos[k])", "def main():\n parser = argparse.ArgumentParser('compute git hashes')\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-k', '--keep-dot-git', action='store_true')\n parser.add_argument('path', nargs='+')\n args = parser.parse_args()\n args.depth = -1 # for debug print\n status = 0\n for path in args.path:\n try:\n try:\n mode, gitclass, size = classify(path)\n except ValueError:\n print('%s: unhashable!' % path)\n status = 1\n continue\n hasher = generic_hash(path, mode, size, args)\n result = hasher.hexdigest()\n if args.debug:\n print('%s %s %s\\t%s' % (strmode(mode), gitclass, result,\n path))\n else:\n print('%s: %s hash = %s' % (path, gitclass, result))\n except OSError as err:\n print(str(err))\n status = 1\n sys.exit(status)", "def __gitStatistics(self):\n self.vcs.gitStatistics(self.project.getProjectPath())", "def get_churn_per_commit(dateshas, excludestr):\n\tprint \"sha;date;churn\" # CSV header line\n\ttotal = 0\n\tfor date, sha in dateshas:\n\t\tcommit = None\n\t\tif excludestr:\n\t\t\t# Example command with filtering:\n\t\t\t# git show abcde -w -C --name-status --format=format: \n\t\t\t#\t\tOutputs all the changed files with just their filenames, \n\t\t\t#\t\tas paths from the repository root. -w flag ignores \n\t\t\t#\t\twhitespace differences, -C flag detects move moves and \n\t\t\t#\t\trenames and ignores those.\n\t\t\t# cut -f2,3:\n\t\t\t#\t\tCuts out the filename (column 2) and the rename \n\t\t\t#\t\tdestination (column 3, if exists). This is done to not \n\t\t\t#\t\thave the M/A/D/R modification indicator from the \n\t\t\t#\t\t--name-status output.\n\t\t\t# grep -v '^Documentation/':\n\t\t\t#\t\tFilters out all the files which are in the specified \n\t\t\t#\t\tfolders.\n\t\t\t# xargs -L 500 git show abcde -w -C --shortstat -- dummy\n\t\t\t#\t\txargs carries all the files that grep outputs over to git \n\t\t\t#\t\tshow, which formats the\tresult into a line of the form \n\t\t\t#\t\t'X files changed, Y insertions(+), Z deletions(-)'.\n\t\t\t#\t\tUsing xargs because OS X has a wonky and unpredictable \n\t\t\t#\t\targument list length limit,\tso this should makes the \n\t\t\t#\t\tscript more portable. 'dummy' is specified to ensure an \n\t\t\t#\t\tempty set from grep does not lead to 'git show' showing \n\t\t\t#\t\teverything.\n\t\t\tshow = subprocess.Popen(['git', 'show', sha, '-w', '-C', \n\t\t\t\t\t\t'--name-status', '--format=format:'], \n\t\t\t\t\t\tstdout=subprocess.PIPE)\n\t\t\tcut = subprocess.Popen(['cut', '-f2,3'], stdin=show.stdout, \n\t\t\t\t\t\tstdout=subprocess.PIPE)\n\t\t\tgrep = subprocess.Popen(['grep', '-v', excludestr], \n\t\t\t\t\t\tstdin=cut.stdout, stdout=subprocess.PIPE)\n\t\t\txargs = subprocess.Popen(['xargs', '-L', '500', 'git', 'show', \n\t\t\t\t\t\tsha, '-w', '-C', '--shortstat', \n\t\t\t\t\t\t'--format=format:', '--', 'dummy'], \n\t\t\t\t\t\tstdin=grep.stdout, stdout=subprocess.PIPE)\n\t\t\tcommit = xargs.stdout.readlines()\n\t\telse:\n\t\t\t# If there is no excludestr, we can simply ask for the shortstat \n\t\t\t# information.\n\t\t\tshow = subprocess.Popen(['git', 'show', sha, '-w', '-C', \n\t\t\t\t\t\t'--shortstat', '--format=format:'], \n\t\t\t\t\t\tstdout=subprocess.PIPE)\n\t\t\tcommit = show.stdout.readlines()\n\n\t\t# Remove leading/trailing newlines\n\t\tcommit = [x[:-1] for x in commit if x != '\\n']\n\n\t\t# Because of the xargs approach, there might be multiple result \n\t\t# lines. Iterate over all of them and sum the churn. That is, if there \n\t\t# are actually results left after directory filtering\n\t\tchurn = 0\n\t\tfor line in commit:\n\t\t\tif len(line) > 0:\n\t\t\t\ttry:\n\t\t\t\t\tadded = int(line.split()[3])\n\t\t\t\texcept:\n\t\t\t\t\tadded = 0\n\t\t\tchurn += added\n\t\tif churn > 0:\n\t\t\ttotal += churn\n\t\t\tprint \"%s;%s;%d\" % (sha[:8],str(date), churn)\n\n\treturn total", "def get_author_and_comment_count_per_sub(config: Config, sanitized_authors: dict):\n\n start = time.time()\n raw_result = get_raw_author_and_comment_count_per_sub(config, sanitized_authors)\n end = time.time()\n print(f\"get_raw_author_and_comment_count_per_sub took {end - start} seconds\")\n\n start = time.time()\n sanitized_result = sanitize_author_and_comment_count_per_sub(raw_result)\n end = time.time()\n print(f\"sanitize_author_and_comment_count_per_sub took {end - start} seconds\")\n\n return sanitized_result", "def parse_git_log(cls, repo_path, commit=None, pkgs=False, verbosity=-1):\n cmd = shlex.split(cls._git_cmd)\n # custom git log format, see the \"PRETTY FORMATS\" section of the git\n # log man page for details\n format_lines = [\n '# BEGIN COMMIT',\n '%h', # abbreviated commit hash\n '%cd', # commit date\n '%an <%ae>', # Author Name <[email protected]>\n '%cn <%ce>', # Committer Name <[email protected]>\n '%B', # commit message\n '# END MESSAGE BODY',\n ]\n format_str = '%n'.join(format_lines)\n cmd.append(f'--pretty=tformat:{format_str}')\n\n if commit:\n if '..' in commit:\n cmd.append(commit)\n else:\n cmd.append(f'{commit}..origin/HEAD')\n else:\n cmd.append('origin/HEAD')\n\n git_log = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=repo_path)\n line = git_log.stdout.readline().decode().strip()\n if git_log.poll():\n error = git_log.stderr.read().decode().strip()\n logger.warning('skipping git checks: %s', error)\n return\n\n count = 1\n with base.ProgressManager(verbosity=verbosity) as progress:\n while line:\n hash = git_log.stdout.readline().decode().strip()\n commit_date = git_log.stdout.readline().decode().strip()\n author = git_log.stdout.readline().decode('utf-8', 'replace').strip()\n committer = git_log.stdout.readline().decode('utf-8', 'replace').strip()\n\n message = []\n while True:\n line = git_log.stdout.readline().decode('utf-8', 'replace').strip('\\n')\n if line == '# END MESSAGE BODY':\n # drop trailing newline if it exists\n if not message[-1]:\n message.pop()\n break\n message.append(line)\n\n # update progress output\n progress(f'{hash} commit #{count}, {commit_date}')\n count += 1\n\n commit = GitCommit(hash, commit_date, author, committer, message)\n if not pkgs:\n yield commit\n\n # file changes\n while True:\n line = git_log.stdout.readline().decode()\n if line == '# BEGIN COMMIT\\n' or not line:\n break\n if pkgs:\n parsed = cls._parse_file_line(line.strip())\n if parsed is not None:\n atom, status = parsed\n yield GitPkgChange(atom, status, commit)", "def calc_CI(commits, author):\n\t# delete contents\n\topen('modifications.csv', 'w').close()\n\topen('introductions.csv', 'w').close()\n\n\tfor count, commit in enumerate(commits):\n\t\t# status update\n\t\tif (count + 1) % 50 == 0:\n\t\t\tprint commit, '.. ..', count + 1, ' / ', len(commits)\n\n\t\t# c2f does seems to result in a tie error, so c2b and b2f is used instead\t\t\n\t\t#getting the blobs\n\t\tquery = (\"for x in $(echo \" + commit + \" | ~/lookup/getValues c2b |\" +\n\t\t\t# splitting on the semicolon and discarding the newlines\n\t\t\t\" awk -v RS='[;\\\\n]' 1 |\" +\n\t\t\t# discarding the commit's hash (it appears before the blobs' hashes)\n\t\t\t\" tail -n+2); do\" +\n\t\t\t\t# for each blob, we look up it's filename\n\t\t\t\t\" echo $x | ~/lookup/getValues b2f;\" + \n\t\t\t\" done |\" +\n\t\t\t# we discard the first field of the results (blobs' hash)\n\t\t\t\" cut -d ';' -f2 |\" +\n\t\t\t# we check whether one of the modified files is a CI configuration file\n\t\t\t\" egrep '\" + \"|\".join(ci_files) + \"'\")\n\t\tresult = bash(query)\n\t\tif result:\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\ttime = search(commit, 'commit')[2]\n\t\t\t\t\n\t\t\tif check_if_introduction(commit, result):\n\t\t\t\tf = open(\"introductions.csv\", \"a\")\n\t\t\t\tprint 'introduction'\n\t\t\telse:\n\t\t\t\tf = open(\"modifications.csv\", \"a\")\n\t\t\t\tprint 'modification'\n\t\t\tf.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote: -->', commit", "def git(*args):\n\n return subprocess.check_output(('git',) + args).decode()", "def log(self, current_path):\n p = Popen(\n [\"git\", \"log\", \"--pretty=format:%H%n%an%n%ar%n%s\", \"-10\"],\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = []\n line_array = my_output.decode(\"utf-8\").splitlines()\n i = 0\n PREVIOUS_COMMIT_OFFSET = 4\n while i < len(line_array):\n if i + PREVIOUS_COMMIT_OFFSET < len(line_array):\n result.append(\n {\n \"commit\": line_array[i],\n \"author\": line_array[i + 1],\n \"date\": line_array[i + 2],\n \"commit_msg\": line_array[i + 3],\n \"pre_commit\": line_array[i + PREVIOUS_COMMIT_OFFSET],\n }\n )\n else:\n result.append(\n {\n \"commit\": line_array[i],\n \"author\": line_array[i + 1],\n \"date\": line_array[i + 2],\n \"commit_msg\": line_array[i + 3],\n \"pre_commit\": \"\",\n }\n )\n i += PREVIOUS_COMMIT_OFFSET\n return {\"code\": p.returncode, \"commits\": result}\n else:\n return {\"code\": p.returncode, \"message\": my_error.decode(\"utf-8\")}", "def get_commits(git_path):\n\n proc = subprocess.Popen(\n [\"git\", \"--git-dir=%s\" % git_path, \"log\", \"--full-history\",\n \"--format=NEW COMMIT%n%ct%n%aN%n%aE\", \"--numstat\"],\n stdout=subprocess.PIPE)\n line_stack = []\n\n def peek_line():\n if not line_stack:\n line_stack.append(proc.stdout.readline())\n return line_stack[-1]\n\n def pop_line():\n if line_stack:\n return line_stack.pop()\n return proc.stdout.readline()\n\n def push_line(line):\n line_stack.append(line)\n\n def read_commit():\n while peek_line() and not peek_line().strip():\n pop_line()\n if not peek_line(): return None\n assert peek_line().strip() == \"NEW COMMIT\"\n pop_line()\n\n date = int(pop_line())\n name = pop_line().strip()\n email = pop_line().strip()\n author = sanitize_author(name, email)\n\n if peek_line().strip() == \"NEW COMMIT\":\n return date, author, 0, 0, 0\n\n pop_line()\n insertion_count = 0\n deletion_count = 0\n file_count = 0\n while peek_line().strip() and peek_line().strip() != \"NEW COMMIT\":\n insertions, deletions, path = pop_line().strip().split(None, 2)\n if insertions == \"-\": insertions = 0\n if deletions == \"-\": deletions = 0\n insertion_count += int(insertions)\n deletion_count += int(deletions)\n file_count += 1\n\n return date, author, insertion_count, deletion_count, file_count\n\n while True:\n commit = read_commit()\n if commit is None:\n break\n yield commit", "def calc_CI_diff(commits, author):\n\t# delete contents\n\topen('modifications.csv', 'w').close()\n\topen('introductions.csv', 'w').close()\n\n\tfor count, commit in enumerate(commits):\n\t\t#status update\n\t\tif (count + 1) % 50 == 0:\n\t\t\tprint commit, '.. ..', count + 1, ' / ', len(commits)\n\n\t\t# cmputeDiff2.perl seems to produce junk to the stdout occasionally\n\t\tdiff = bash(\"echo \" + commit + \" | ssh da4 ~/lookup/cmputeDiff2.perl\")\n\n\t\t# if a CI configuration file is in the diff\n\t\tif re.search(\"|\".join(ci_files), diff):\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\ttime = search(commit, 'commit')[2]\n\n\t\t\tfor blob in diff.split():\n\t\t\t\t# looking for the CI config blob and checking if parent blob exists\n\t\t\t\tif re.search(\"|\".join(ci_files), blob):\n\t\t\t\t\t# if we have both an introduction and a modification\n\t\t\t\t\t# in the same commit, we count it as an introduction\n\t\t\t\t\tif blob.endswith(';'):\n\t\t\t\t\t# if we don't have the parent blob, after the last semicolon,\n\t\t\t\t\t# it is an introduction\n\t\t\t\t\t\tf = open(\"introductions.csv\", \"a\")\n\t\t\t\t\t\tprint 'introduction'\n\t\t\t\t\telse:\n\t\t\t\t\t\tf = open(\"modifications.csv\", \"a\")\n\t\t\t\t\t\tprint 'modification'\n\t\t\t\t\tbreak\n\t\t\tf.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote: -->', commit", "def calc_CI_introductions(commits, author):\n\n\t# using a dictionary that has the commits' hashes as keys,\n\t# so as to not search multiple times for the same commit\n\tCI_checked = {}\n\n\t# delete contents\n\topen('introductions.csv', 'w').close()\n\t\n\t# for every commit, we look up whether the author included a CI file,\n\t# that did not exist in the parent commit\n\tfor count, commit in enumerate(commits):\n\t\t# status update\n\t\tif (count + 1) % 50 == 0:\n\t\t\tprint count + 1, ' / ', len(commits)\n\t\n\t\ttree_hash, parent_commit_hash, time = search(commit, 'commit')\n\t\tif tree_hash not in CI_checked:\n\t\t\tCI_checked[tree_hash] = ci_lookup(tree_hash)\n\t\t\n\t\t# controlling for the case of multiple parent commits\n\t\tall_parent_CI = False\n\t\tfor parent in parent_commit_hash.split(':'):\n\t\t\t# controlling for the case of no parent commits\n\t\t\tif parent == '':\n\t\t\t\tbreak\n\t\t\n\t\t\tparent_tree_hash = search(parent, 'commit')[0]\n\t\t\n\t\t\tif parent_tree_hash not in CI_checked:\n\t\t\t\tparent_CI = ci_lookup(parent_tree_hash)\n\t\t\t\tCI_checked[parent_tree_hash] = parent_CI\n\t\t\telse:\n\t\t\t\tparent_CI = CI_checked[parent_tree_hash]\n\t\t\t\n\t\t\t# checking all the parent commits for the usage of CI\n\t\t\tall_parent_CI = all_parent_CI or parent_CI\n\t\t\n\t\t# if the tree has a CI file, while the parent tree does not, increase the CI score\n\t\tif CI_checked[tree_hash] and not all_parent_CI:\n\t\t\t\n\t\t\tout = bash('echo ' + commit + ' | ~/lookup/getValues c2P')\n\t\t\tmain_proj = out.strip().split(';')[1]\n\t\t\tf = open(\"introductions.csv\", \"a\")\n\t\t\tf.write(author + ', ' + 'CI' + ', ' + str(time) + ', ' + main_proj + '\\n')\n\t\t\tf.close()\n\t\t\tprint 'wrote'\n\tprint (current_time()-start_time)/len(commits), 'seconds per commit'", "def contributors(lancet, output):\n sorting = pygit2.GIT_SORT_TIME | pygit2.GIT_SORT_REVERSE\n commits = lancet.repo.walk(lancet.repo.head.target, sorting)\n contributors = ((c.author.name, c.author.email) for c in commits)\n contributors = OrderedDict(contributors)\n\n template_content = content_from_path(\n lancet.config.get(\"packaging\", \"contributors_template\")\n )\n template = Template(template_content)\n output.write(template.render(contributors=contributors).encode(\"utf-8\"))", "def run(self, repo_url):\n\n # only supports git.door43.org\n print('* Checking the repository URL...', end=' ')\n if 'git.door43.org' not in repo_url:\n self.errors.append('Only git.door43.org repositories are supported.')\n print('')\n return False\n\n # get gogs user and repository name\n pos = repo_url.find('https://git.door43.org/')\n if pos != 0:\n self.errors.append('Invalid repository URL: {0}'.format(repo_url))\n print('')\n return False\n\n parts = filter(bool, repo_url[23:].split('/'))\n if len(parts) != 2:\n self.errors.append('Not able to determine user and project: {0}'.format(repo_url))\n print('')\n return False\n\n gogs_user = parts[0]\n repo_name = parts[1]\n print('finished.')\n\n # get most recent commit\n print('* Getting the most recent commit...', end=' ')\n commits_html = get_url(join_url_parts(repo_url, 'commits', 'master'))\n\n # parse the dom\n commits_dom = BeautifulSoup(commits_html, 'html.parser')\n commit_row = commits_dom.body.find('table', {'id': 'commits-table'}).find('tbody').find('tr')\n if not commit_row:\n self.errors.append('Commit data was not found for {0}'.format(repo_url))\n\n # commit values: 0=author, 1=sha_and_message, 2=date\n commit_values = commit_row.find_all('td')\n sha_a_tag = commit_values[1].find('a')\n short_sha = sha_a_tag.text\n print('finished.')\n\n # check the meta data\n\n # if not tS, check the usfm directory (1 file per book)\n\n # if tS, check the chapter directories (1 directory per chapter, 1 file per chunk)\n\n # check live.door43.org\n live_url = join_url_parts('https://live.door43.org/u', gogs_user, repo_name, short_sha)\n\n # first, check if the page exists\n print('* Verifying that the output file exists...', end=' ')\n try:\n get_url(live_url)\n except HTTPError as err:\n self.errors.append('Not able to open {0}, {1}'.format(live_url, str(err)))\n print('')\n return False\n print('finished.')\n\n # next, validate the HTML\n print('* Validating the generated HTML...', end=' ')\n validator_url = 'https://validator.nu/?out=json&charset=UTF-8&parser=html5&doc={0}'.format(\n urllib.quote(live_url))\n friendly_url = 'https://validator.nu/?charset=UTF-8&parser=html5&doc={0}'.format(\n urllib.quote(live_url))\n validator_results = json.loads(get_url(validator_url))\n\n html_warnings = [m for m in validator_results['messages'] if m['type'] == 'info' and m['subType'] == 'warning']\n if html_warnings:\n for html_warning in html_warnings:\n self.warnings.append('HTML Validation Warning: {0}'.format(html_warning['message']))\n self.warnings.append('For details check {0}'.format(friendly_url))\n\n html_errors = [m for m in validator_results['messages'] if m['type'] == 'error']\n if html_errors:\n for html_error in html_errors:\n self.errors.append('HTML Validation Error: {0}'.format(html_error['message']))\n self.errors.append('For details check {0}'.format(friendly_url))\n print('')\n return False\n print('finished.')\n\n return True", "def status():\n if not check_for_wit():\n raise NoWitError(f'No .wit folder exists in {os.getcwd()}')\n if not os.path.exists(refs_path):\n print('No files have been committed yet')\n return False\n print(f'Current commit ID: {get_current_commit_id()}')\n print('Changes to be committed:')\n print('-' * 20)\n for num, file in enumerate(get_files_to_be_committed()):\n print(f'{num + 1}: {file}')\n print('\\n')\n print('Changes not staged for commit')\n print('-' * 20)\n for num, file in enumerate(get_files_not_staged()):\n print(f'{num + 1}: {file}')\n for file in deleted_files:\n print(f'{file} - deleted from main folder')\n print('\\n')\n print('Untracked files')\n print('-' * 20)\n for num, file in enumerate(get_untracked_files()):\n print(f'{num + 1}: {file}')", "def query_git():\n return subprocess.run(\n shlex.split('git status --porcelain=2 --branch'),\n stdout=subprocess.PIPE,\n stderr=subprocess.DEVNULL)", "def check_previous_contributions(repo, author):\n cmds = [github_cli, 'search', 'prs', '--author', author, '--repo', repo, '--json', 'number,state']\n\n with subprocess.Popen(cmds, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n returncode = p.returncode\n print(err)\n print(returncode)\n ntries = 1\n if returncode:\n while returncode and ntries < 10:\n ntries += 1\n time.sleep(10)\n with subprocess.Popen(cmds, stdout=subprocess.PIPE) as p:\n result, err = p.communicate()\n returncode = p.returncode\n print(\"New returncode : \", returncode)\n\n\n return json.loads(result)", "def _git_check_output_lines(\n cmd: List[str], cwd: Path, exit_on_error: bool = True\n) -> List[str]:\n logger.debug(\"[%s]$ %s\", cwd, \" \".join(cmd))\n try:\n return check_output(\n [\"git\"] + cmd,\n cwd=str(cwd),\n encoding=\"utf-8\",\n stderr=PIPE,\n env={\"LC_ALL\": \"C\"},\n ).splitlines()\n except CalledProcessError as exc_info:\n if not exit_on_error:\n raise\n if exc_info.returncode != 128:\n sys.stderr.write(exc_info.stderr)\n raise\n\n # Bad revision or another Git failure. Follow Black's example and return the\n # error status 123.\n for error_line in exc_info.stderr.splitlines():\n logger.error(error_line)\n sys.exit(123)", "def run(organization, top_n, username, pat):\n print()\n try:\n raw_repos = get_repos(organization, username=username, pat=pat)\n except Exception as ex:\n click.echo('Error collecting repos')\n sys.exit(1)\n\n repos = []\n\n with Halo(text='Retrieving repos...', spinner='dots'):\n for raw_repo in raw_repos:\n repos.append(Repo(raw_repo))\n\n if len(repos) == 0:\n print('No public repos were found')\n sys.exit(0)\n\n with Halo(text='Retrieving pull requests...', spinner='dots'):\n try:\n with ThreadPoolExecutor(max_workers=5) as executor:\n future_to_repo = {executor.submit(get_prs, repo.pr_url, username, pat): repo for repo in repos}\n for future in as_completed(future_to_repo):\n repo = future_to_repo[future]\n\n repo.pr_count = future.result()\n except Exception as exc:\n print('%r generated an exception: %s' % (repo.name, exc))\n sys.exit(1)\n\n top_star = sorted(repos, key=lambda repo: repo.stars, reverse=True)[:top_n]\n top_fork = sorted(repos, key=lambda repo: repo.forks, reverse=True)[:top_n]\n top_prs = sorted(repos, key=lambda repo: repo.pr_count, reverse=True)[:top_n]\n top_contrib = sorted(repos, key=lambda repo: repo.contrib, reverse=True)[:top_n]\n\n print_stars(top_star, top_n)\n print_forks(top_fork, top_n)\n print_prs(top_prs, top_n)\n print_contrib(top_contrib, top_n)", "def git(*args):\n return subprocess.check_output([\"git\"] +\n list(args)).decode(\"utf-8\").strip().split(\"\\n\")", "def one_line_git_summary(path):\n return _run_command(path, 'git show --oneline -s')" ]
[ "0.64920014", "0.627672", "0.62206215", "0.61437243", "0.6047011", "0.6009975", "0.59818316", "0.5933847", "0.58700925", "0.58233637", "0.5719015", "0.559531", "0.5551623", "0.554489", "0.55376357", "0.55274796", "0.5512924", "0.55097973", "0.54649127", "0.5457391", "0.53778857", "0.5374456", "0.53651583", "0.5360568", "0.5359236", "0.5355471", "0.53530085", "0.5351227", "0.534309", "0.53306854" ]
0.7638564
0
Parse a string timestamp into a Time.
def parse_time(s): return time.gmtime(float(s))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_time(time_string):\n return calendar.timegm(time.strptime(time_string, \"%Y%m%dT%H%M%SZ\"))", "def parse_timestamp(timestamp):\n if not timestamp or timestamp == '0000-00-00T00:00:00Z':\n return struct_time((0, 0, 0, 0, 0, 0, 0, 0, 0))\n return strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')", "def from_str(cls, timestamp_str):\n units = timestamp_str.split(\":\")\n seconds_ms = units[-1].split(\".\")\n hours = int(units[0])\n minutes = int(units[1])\n seconds = int(seconds_ms[0])\n milliseconds = int(seconds_ms[1])\n return cls(hours, minutes, seconds, milliseconds)", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\r\n return datetime.datetime.strptime(timestr, fmt)", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\n return datetime.datetime.strptime(timestr, fmt)", "def _parse_time_str(self, time_str):\n time_fmt = \"%I:%M%p\"\n time_str = re.sub(\n r\":+\",\n \":\",\n re.sub(r\"\\s+\", \"\", re.sub(r\"to|from|\\.\", \"\", time_str.lower())).replace(\n \"o\", \"0\"\n ),\n )\n if \":\" not in time_str:\n time_fmt = \"%I%p\"\n elif len(time_str) < 6:\n time_fmt = \"%I%p\"\n time_str = time_str.replace(\":\", \"\")\n return datetime.strptime(time_str, time_fmt).time()", "def parse_timestamp(str_timestamp):\n try:\n dt = parse(str_timestamp)\n except Exception as e:\n api.abort(422, \"date from the request cannot be parsed: {}\".format(e))\n return dt", "def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)", "def time_from_string(time):\n _type = type(time)\n try:\n if _type == datetime.time:\n return time\n elif _type == datetime.datetime:\n return datetime.datetime.time(time)\n else:\n try:\n return datetime.datetime.time(datetime.datetime.strptime(time, '%I:%M %p'))\n except ValueError:\n return datetime.datetime.time(datetime.datetime.strptime(time, '%H:%M:%S'))\n except ValueError:\n return time\n except TypeError:\n return time", "def parse_time_str(self, time_str):\n try:\n return datetime.strptime(self.force_hour_two_digits(time_str), TIME_FORMAT).time()\n except ValueError:\n return None", "def parseTime(string):\t\n \n if string == \"\":\n result = None\n if 'T' in string:\n string = string.replace('T', ' ')\n if 'Z' in string:\n string = string.replace('Z', '') \n\n if len(string) < 19:\n # string has some single digits\n p = \"\"\"^([0-9]{4})-([0-9]{1,2})-([0-9]{1,2}) \n ([0-9]{1,2}):([0-9]{1,2}):([0-9]{1,2}).*$\"\"\"\n s = re.findall(p, string)\n if len(s) > 0:\n string = '{0}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'\\\n .format(*[int(x) for x in s[0]])\n\n for date_format in DATE_FORMATS:\n try:\n result = datetime.datetime.strptime(string, date_format)\n except ValueError:\n pass\n\n return result", "def get_datetime_from_timestamp(timestamp: str) -> datetime.datetime:\n return datetime.datetime.strptime(timestamp, DateFormat)", "def parse_timestamp(ts):\n m = re.match('(?P<Y>\\d\\d\\d\\d)(?P<M>\\d\\d)?(?P<D>\\d\\d)?' +\n '(?P<HM>\\d\\d\\d\\d)?(?P<S>\\d\\d)?(?P<MS>\\.\\d+)?' +\n '(?P<Z>[\\+\\-]\\d\\d\\d\\d)?(?P<P>\\^\\d+)?', ts)\n if m:\n year = int(m.group('Y'))\n month = int((m.group('M') or 0))\n day = int((m.group('D') or 0))\n if m.group('HM'):\n hour = int(m.group('HM')[0:2])\n minute = int(m.group('HM')[2:])\n else:\n hour = minute = 0\n seconds = int((m.group('S') or 0))\n if m.group('MS'):\n millis = int(m.group('MS')[1:]) * 100000\n else:\n millis = 0\n # This raises ValueError on bad input\n return datetime.datetime(year, month, day, hour,\n minute, seconds, millis,\n tzinfo=datetime.timezone.utc)\n else:\n raise ValueError('invalid format (%s) for timestamp' % ts)", "def parse(s):\n\n rise = False\n set = False\n if s[-1:] == \"R\":\n rise = True\n s = s[:-1]\n elif s[-1:] == \"T\":\n set = True\n s = s[:-1]\n \n x = s.split(\":\")\n if len(x) == 1:\n x.append(\"0\")\n if len(x) == 2:\n x.append(\"0\")\n \n return Time(int(x[0]), int(x[1]), int(x[2]), after_sunrise=rise,\n after_sunset=set)", "def _parse_time(time_string: str) -> datetime:\n\n # Strings with timezone (+01:00) in v2 are not easily parsed. But time\n # zones are not important here, so we just omit them.\n time_string = time_string.rsplit('+')[0]\n\n time_formats = [\n '%Y-%m-%dT%H:%M:%S.%fZ', # Default\n '%Y-%m-%dT%H:%M:%SZ', # Imported UNCCD data\n '%Y-%m-%dT%H:%M:%S.%f', # Stripped timezone format (v2)\n ]\n for t_format in time_formats:\n try:\n return datetime.strptime(time_string, t_format)\n except ValueError:\n continue", "def parse_timestamp(ts_str):\n dt = dateutil.parser.parse(ts_str)\n return (time.mktime(dt.timetuple()) + dt.microsecond/1000000.0)", "def parse_timestamp_str(timestamp_str):\n\n # timezone conversion isn't handled by strptime... datetime is supposed to be expressed in\n # the computer's localtime. Split out the timezone and adjust datetime manually if\n # necessary.\n (tm_str, tm_zone) = timestamp_str.rsplit(\"-\", 1)\n\n # use datetime rather than time to keep the fractional seconds\n dt = datetime.datetime.strptime(tm_str, \"%Y%m%d-%H%M%S-%f\")\n\n if tm_zone not in time.tzname:\n # timezone mismatch.\n if tm_zone in ['GMT', 'UTC']:\n # computer is not UTC, but string is. Subtract the local offset to get dt from UTC\n # into the local timezone.\n dt = dt + datetime.timedelta(seconds=time.localtime(dt.timestamp()).tm_gmtoff)\n elif 'UTC' in time.tzname or 'GMT' in time.tzname:\n if tm_zone in _TZ_HOUR_OFFSETS:\n # computer is in UTC, but string isn't. For the timezones we recognize, add in\n # their offset to move dt into UTC\n dt = dt - datetime.timedelta(hours=_TZ_HOUR_OFFSETS[tm_zone])\n else:\n raise Error(\"unknown GMT offset for timezone %s\" % tm_zone)\n else:\n raise Error(\n \"unable to convert between two non-GMT timezones: %s and %s\" %\n (tm_zone, time.tzname[0])\n )\n\n return dt.timestamp()", "def parse_timestamp(node):\n\n if len(node) != 3:\n raise ValueError(\"Invalid timestamp object.\")\n\n return Timestamp(node[0], node[1], node[2])", "def timestamp_from_string(self, timestamp):\n formats = (self.TIMESTAMP_FORMAT,\n self.DEFAULT_TIMESTAMP_FORMAT,\n self.OLD_DEFAULT_TIMESTAMP_FORMAT)\n for format_string in formats:\n try:\n history_time = time.strptime(timestamp, format_string)\n except ValueError:\n pass\n else:\n return int(time.mktime(history_time))\n self.debug('The timestamp \"%s\" does not match any of the formats %s' % (timestamp, formats))", "def str2time(s):\n return datetime.strptime(str(s), '%Y%m%d%H%M%S')", "def get_time_from_string(text):\n field = text.split(':')\n hr = int(field[0])\n mn = int(field[1])\n field = field[2].split('.')\n sec = int(field[0])\n usec = int(field[1])\n return datetime.time(hr, mn, sec, usec)", "def time_string2dt(time_string: str)-> datetime:\n return parse(time_string, fuzzy=True)", "def parse_timestamp(ts):\n return DateTimeField()._to_python(ts)", "def parse(str):\n if len(str) != 16:\n raise ValueError(\"Invalid time length %d\" % len(str))\n if (str[-1]) == 'R':\n return parse_relative_time(str)\n return parse_absolute_time(str)", "def parse_time(dt: str) -> datetime:\n return datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%SZ\")", "def parse_time(value: str) -> datetime:\n\n try:\n return datetime.strptime(value, \"%Y-%m-%dT%H:%M:%SZ\")\n except ValueError:\n return datetime.min", "def str_to_time(timestamp,tz=None):\n # HINT: Use the code from the previous exercise and update the timezone\n # Use localize if timezone is a string; otherwise replace the timezone if not None\n try:\n import dateutil.parser \n import pytz \n #result = ''\n thedate = dateutil.parser.parse(timestamp) ### new\n \n if thedate.tzinfo != None:\n thedate = dateutil.parser.parse(timestamp)\n elif type(tz) == str:\n e = pytz.timezone(tz)\n thedate = e.localize(thedate)\n elif tz is not None:\n thedate = thedate.replace(tzinfo=tz)\n \n return thedate\n except:\n return None", "def decode_timestamp(self, string):\n\n if isinstance(string, str):\n return datetime.strptime(string, self.timestamp_format)\n else:\n return string", "def convert_timestamp(ts):\n format = '%Y-%m-%d %H:%M:%S'\n return datetime.strptime(ts, format)", "def parse(timestring):\n for parser in _PARSERS:\n match = parser['pattern'].match(timestring)\n if match:\n groups = match.groups()\n ints = tuple(map(int, groups))\n time = parser['factory'](ints)\n return time\n\n raise TimeError('Unsupported time format {}'.format(timestring))" ]
[ "0.75230086", "0.7408081", "0.73725647", "0.73398465", "0.73247117", "0.72148603", "0.71765757", "0.7144907", "0.7143915", "0.7115504", "0.71063656", "0.70861506", "0.708549", "0.70443153", "0.70397437", "0.7023746", "0.6990175", "0.69791627", "0.6971797", "0.6964172", "0.6946025", "0.69141376", "0.6820886", "0.68127775", "0.67829365", "0.6725844", "0.67246294", "0.67202497", "0.671133", "0.670273" ]
0.7556363
0
Preprocesses a single utterance wav/text pair this writes the mel scale spectogram to disk and return a tuple to write to the train.txt file
def _process_utterance(lf0_dir, mgc_dir, bap_dir, cmp_dir, linear_dir, basename, wav_path, text, hparams): if hparams.trim_silence: tar_wavfile = wav_path[:-4] + "_trim.wav" print("raw wav path:%s" % wav_path) wav_raw, fs = sf.read(wav_path) wav_trim = audio.trim_silence(wav_raw, hparams) sf.write(tar_wavfile, wav_trim, fs) wav_path = tar_wavfile nFFTHalf, alpha, bap_dim = audio.get_config(hparams.sample_rate) mcsize = hparams.num_mgc - 1 filename = basename #os.path.basename(wav_path).split(".")[0] print('extract feats for %s' % wav_path) # extract f0,sp,ap os.system("analysis %s %s/%s.f0 %s/%s.sp %s/%s.bapd" % (wav_path, lf0_dir, filename, mgc_dir, filename, bap_dir, filename)) # get float64??? # interpolate f0 f0 = np.fromfile("%s/%s.f0" % (lf0_dir, filename),dtype=np.float64) continuous_f0 = interp1d(f0, kind="slinear") continuous_f0.tofile("%s/%s.f0c" % (lf0_dir, filename)) # convert f0 to lf0 os.system("x2x +da %s/%s.f0c > %s/%s.f0a" % (lf0_dir, filename, lf0_dir, filename)) os.system("x2x +af %s/%s.f0a | sopr -magic 0.0 -LN -MAGIC -1.0E+10 > %s/%s.lf0" % ( lf0_dir, filename, lf0_dir, filename)) # convert sp to mgc os.system("x2x +df %s/%s.sp | sopr -R -m 32768.0 | " "mcep -a %f -m %d -l %d -e 1.0E-8 -j 0 -f 0.0 -q 3 " "> %s/%s.mgc" % (mgc_dir, filename, alpha, mcsize, nFFTHalf, mgc_dir, filename)) # convert ap to bap os.system("x2x +df %s/%s.bapd > %s/%s.bap" % (bap_dir, filename, bap_dir, filename)) # merge mgc,lf0 and bap to cmp os.system("merge +f -s 0 -l 1 -L %d %s/%s.mgc < %s/%s.lf0 > %s/%s.ml" % ((mcsize+1), mgc_dir, filename, lf0_dir, filename, cmp_dir, filename)) os.system("merge +f -s 0 -l %d -L %d %s/%s.ml < %s/%s.bap > %s/%s.cmp" % (bap_dim, (mcsize+2), cmp_dir, filename, bap_dir, filename, cmp_dir, filename)) #if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length: # return None #Compute the linear scale spectrogram from the wav wav = audio.load_wav(wav_path, hparams.sample_rate) linear_spectrogram = audio.linearspectrogram(wav, hparams).astype(np.float32) linear_frames = linear_spectrogram.shape[1] #sanity check #assert linear_frames == mel_frames lf0 = np.fromfile("%s/%s.lf0" % (lf0_dir, filename), dtype=np.float32) mgc = np.fromfile("%s/%s.mgc" % (mgc_dir, filename), dtype=np.float32) bap = np.fromfile("%s/%s.bap" % (bap_dir, filename), dtype=np.float32) cmp = np.fromfile("%s/%s.cmp" % (cmp_dir, filename), dtype=np.float32) cmp_dim = mcsize + 1 + 1 + bap_dim cmp_frames = cmp.shape[0] / cmp_dim #print(f0[:100]) #print(continuous_f0[:100]) print(lf0.shape) print(continuous_f0.shape) print(mgc.shape) print(bap.shape) print(cmp_frames) print(continuous_f0.dtype) print(mgc.dtype) print(bap.dtype) assert (mgc.shape[0]/(mcsize+1)) == (continuous_f0.shape[0]/1) == (bap.shape[0]/bap_dim) == cmp_frames assert cmp_dim == hparams.num_mels #assert len(out) >= cmp_frames * audio.get_hop_size(hparams) #time resolution adjustement #ensure length of raw audio is multiple of hop size so that we can use #transposed convolution to upsample #out = out[:mel_frames * audio.get_hop_size(hparams)] #assert len(out) % audio.get_hop_size(hparams) == 0 #time_steps = len(out) # Write the spectrogram and audio to disk #audio_filename = 'audio-{}.npy'.format(index) cmp_mat = cmp.reshape(-1, cmp_dim) cmp_filename = 'cmp-{}.npy'.format(basename) linear_filename = 'linear-{}.npy'.format(basename) #np.save(os.path.join(wav_dir, audio_filename), out.astype(out_dtype), allow_pickle=False) np.save(os.path.join(cmp_dir, cmp_filename), cmp_mat, allow_pickle=False) np.save(os.path.join(linear_dir, linear_filename), linear_spectrogram.T, allow_pickle=False) # Return a tuple describing this training example return (cmp_filename, linear_filename, cmp_frames, text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _process_utterance(pml_dir, wav_dir, index, wav_path, pml_path, hparams):\n try:\n # Load the audio as numpy array\n wav = audio.load_wav(wav_path)\n except FileNotFoundError: # catch missing wav exception\n print('file {} present in csv metadata is not present in wav folder. skipping!'.format(\n wav_path))\n return None\n\n # rescale wav\n if hparams.rescale:\n wav = wav / np.abs(wav).max() * hparams.rescaling_max\n\n # Assert all audio is in [-1, 1]\n if (wav > 1.).any() or (wav < -1.).any():\n raise RuntimeError('wav has invalid value: {}'.format(wav_path))\n\n # Mu-law quantize\n if is_mulaw_quantize(hparams.input_type):\n # [0, quantize_channels)\n out = mulaw_quantize(wav, hparams.quantize_channels)\n\n constant_values = mulaw_quantize(0, hparams.quantize_channels)\n out_dtype = np.int16\n\n elif is_mulaw(hparams.input_type):\n # [-1, 1]\n out = mulaw(wav, hparams.quantize_channels)\n constant_values = mulaw(0., hparams.quantize_channels)\n out_dtype = np.float32\n\n else:\n # [-1, 1]\n out = wav\n constant_values = 0.\n out_dtype = np.float32\n\n # Get the PML features from the cmp file\n pml_cmp = np.fromfile(pml_path, dtype=np.float32)\n pml_features = pml_cmp.reshape((-1, hparams.pml_dimension))\n pml_frames = pml_features.shape[0]\n\n if pml_frames > hparams.max_pml_frames and hparams.clip_pmls_length:\n return None\n\n # Find parameters\n n_fft = (hparams.num_freq - 1) * 2\n\n if hparams.use_lws:\n # Ensure time resolution adjustement between audio and mel-spectrogram\n l, r = audio.pad_lr(wav, n_fft, audio.get_hop_size(hparams))\n\n # Zero pad audio signal\n out = np.pad(out, (l, r), mode='constant', constant_values=constant_values)\n else:\n # Ensure time resolution adjustement between audio and mel-spectrogram\n l_pad, r_pad = audio.librosa_pad_lr(wav, n_fft, audio.get_hop_size(hparams))\n\n # Reflect pad audio signal (Just like it's done in Librosa to avoid frame inconsistency)\n out = np.pad(out, (l_pad, r_pad), mode='constant', constant_values=constant_values)\n\n # print(len(out), pml_frames, audio.get_hop_size(hparams), pml_frames * audio.get_hop_size(hparams))\n assert len(out) >= pml_frames * audio.get_hop_size(hparams)\n\n # time resolution adjustment\n # ensure length of raw audio is multiple of hop size so that we can use\n # transposed convolution to upsample\n out = out[:pml_frames * audio.get_hop_size(hparams)]\n assert len(out) % audio.get_hop_size(hparams) == 0\n time_steps = len(out)\n\n # Write the spectrogram and audio to disk\n audio_filename = os.path.join(wav_dir, 'audio-{}.npy'.format(index))\n pml_filename = os.path.join(pml_dir, 'pml-{}.npy'.format(index))\n np.save(audio_filename, out.astype(out_dtype), allow_pickle=False)\n np.save(pml_filename, pml_features, allow_pickle=False)\n\n # global condition features\n if hparams.gin_channels > 0:\n raise RuntimeError('When activating global conditions, please set your speaker_id rules in line 129 of '\n 'datasets/wavenet_preprocessor.py to use them during training')\n else:\n speaker_id = '<no_g>'\n\n # Return a tuple describing this training example\n return audio_filename, pml_path, pml_filename, speaker_id, time_steps, pml_frames", "def tts(model, text):\n\tif USE_CUDA:\n\t\tmodel = model.cuda()\n\t\n\t# NOTE: dropout in the decoder should be activated for generalization!\n\t# model.decoder.eval()\n\tmodel.encoder.eval()\n\tmodel.postnet.eval()\n\n\tsequence = np.array(text_to_sequence(text))\n\tsequence = Variable(torch.from_numpy(sequence)).unsqueeze(0)\n\tif USE_CUDA:\n\t\tsequence = sequence.cuda()\n\n\t# Greedy decoding\n\tmel_outputs, linear_outputs, gate_outputs, alignments = model(sequence)\n\n\tlinear_output = linear_outputs[0].cpu().data.numpy()\n\tspectrogram = audio._denormalize(linear_output)\n\talignment = alignments[0].cpu().data.numpy()\n\n\t# Predicted audio signal\n\twaveform = audio.inv_spectrogram(linear_output.T)\n\n\treturn waveform, alignment, spectrogram", "def write_megam_file(train_toks, encoding, stream, bernoulli: bool = ..., explicit: bool = ...):\n ...", "def inference(self):\n embeddings = self.process_speaker(speaker_speech_path=self.main_configs.SPEAKER_SPEECH_PATH)\n with open(self.main_configs.INPUT_TEXTS_PATH, \"r\") as file:\n texts = file.readlines()\n specs = self.synthesize_spectrograms(texts=texts, embeddings=embeddings)\n specs = specs[0]\n wav = self.generate_waveform(specs)\n return wav", "def remix(self):\n self.original = audio.LocalAudioFile(self.infile)\n #for i, segment in enumerate(self.original.analysis.segments):\n # segment.encode(\"seg_%s.mp3\" % i)\n print \"\\n\\n\\n\"\n loudnesses = [x.timbre[0] for i, x in enumerate(self.original.analysis.segments)]\n brightnesses = [x.timbre[1] for i, x in enumerate(self.original.analysis.segments)]\n flatnesses = [x.timbre[2] for i, x in enumerate(self.original.analysis.segments)]\n attacks = [x.timbre[3] for i, x in enumerate(self.original.analysis.segments)]\n timbre5 = [x.timbre[4] for i, x in enumerate(self.original.analysis.segments)]\n timbre6 = [x.timbre[5] for i, x in enumerate(self.original.analysis.segments)]\n timbre7 = [x.timbre[6] for i, x in enumerate(self.original.analysis.segments)]\n timbre8 = [x.timbre[7] for i, x in enumerate(self.original.analysis.segments)]\n timbre9 = [x.timbre[8] for i, x in enumerate(self.original.analysis.segments)]\n timbre10 = [x.timbre[9] for i, x in enumerate(self.original.analysis.segments)]\n timbre11 = [x.timbre[10] for i, x in enumerate(self.original.analysis.segments)]\n timbre12 = [x.timbre[11] for i, x in enumerate(self.original.analysis.segments)]\n\n print \"AVERAGES\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (avg(loudnesses),avg(brightnesses),avg(flatnesses),avg(attacks),avg(timbre5),avg(timbre6),avg(timbre7),avg(timbre8),avg(timbre9),avg(timbre10),avg(timbre11),avg(timbre12))\n print\n print \"STDVS\"\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % ('loud','bright','flat','attack','t5','t6','t7','t8','t9','t10','t11','t12')\n print \"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\" % (stddev(loudnesses),stddev(brightnesses),stddev(flatnesses),stddev(attacks),stddev(timbre5),stddev(timbre6),stddev(timbre7),stddev(timbre8),stddev(timbre9),stddev(timbre10),stddev(timbre11),stddev(timbre12))\n\n\n print \"\\tLoud\\tBright\\tFlat\\tAttack\\ttim5\\ttim6\\ttim7\\ttim8\\ttim9\\ttim10\\ttim11\\ttim12\"\n for segment in self.original.analysis.segments:\n if are_kicks(segment): print \"Kick\",\n elif are_snares(segment): print \"Snar\",\n elif are_hats(segment): print \"Hats\",\n else: print \"else\",\n print \"\\t%s\\t%s\\t%s\\t%s\\t%s\" % (segment.timbre[0], segment.timbre[1], segment.timbre[2], segment.timbre[3], segment.timbre[4])\n\n kicks = self.original.analysis.segments.that(are_kicks)\n #if kicks: kicks.encode('kicks.mp3')\n snares = self.original.analysis.segments.that(are_snares)\n #if snares: snares.encode('snares.mp3')\n hats = self.original.analysis.segments.that(are_hats)\n #if hats: hats.encode('hats.mp3')\n\n # Time to replace\n hat_sample = audio.AudioData(self.sample_path + self.template['hats'], sampleRate=44100, numChannels=2, verbose=False)\n kick_sample = audio.AudioData(self.sample_path + self.template['kick'], sampleRate=44100, numChannels=2, verbose=False)\n snare_sample = audio.AudioData(self.sample_path + self.template['snare'], sampleRate=44100, numChannels=2, verbose=False)\n \n empty = audio.AudioData(ndarray=numpy.zeros(((self.original.sampleRate * self.original.analysis.duration), 2), dtype=numpy.int16), numChannels=2, sampleRate=44100)\n\n last = 0\n for segment in kicks:\n if last + len(kick_sample.data) > segment.start:\n print \"Adding kick at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(kick_sample.data)] += kick_sample.data\n last = segment.start\n\n last = 0\n for segment in snares:\n if last + len(snare_sample.data) > segment.start:\n print \"Adding snare at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(snare_sample.data)] += snare_sample.data \n last = segment.start\n for segment in hats:\n if last + len(hat_sample.data) > segment.start:\n print \"Adding hat at %s\" % segment.start\n empty.data[self.original.sampleRate*segment.start:self.original.sampleRate*segment.start + len(hat_sample.data)] += hat_sample.data\n last = segment.start\n\n audio.mix(empty, self.original, 0.5).encode('mixed.mp3')", "def load_and_process_audio(self):\n output_vector = None\n doa = None\n if self.model == \"gcc_cnn\":\n output_vector, doa = self.format_gcc_cnn()\n elif self.model == \"gcc_dsp\":\n output_vector, doa = self.format_gcc_dsp()\n elif self.model == \"raw_cnn\":\n output_vector, doa = self.format_raw_audio_cnn()\n elif self.model == \"raw_resnet\":\n output_vector, doa = self.format_raw_audio_cnn()\n else:\n print(\"Error -> No file found\")\n\n return output_vector, doa", "def processText(self, text: str, filename: str) :\n execution_time = 0.\n\n directory = os.path.join(self.execution_time_dir, AUDIO_DIR, self.getTTS().getName())\n make_dir(directory)\n time_for_generating_audio_fpath = os.path.join(directory, filename + \".txt\")\n \n audio_fpath = self.getTTS().getAudioPath(\n text=text, audio_dir=self.audio_dir, filename=filename)\n \n if self.recompute or not os.path.exists(audio_fpath):\n # print(audio_fpath)\n start_time = time.time()\n self.getTTS().generateAudio(text=text, audio_fpath=audio_fpath)\n save_execution_time(fpath=time_for_generating_audio_fpath, execution_time=time.time() - start_time)\n \n ## add execution time for generating audio\n execution_time += get_execution_time(\n fpath=time_for_generating_audio_fpath) \n \n transcription_dir = os.path.join(self.transcription_dir, self.getTTS().getName())\n \n transcriptions = {}\n for asr in self.asrs :\n directory = os.path.join(\n self.execution_time_dir, TRANSCRIPTION_DIR, self.getTTS().getName(), asr.getName())\n make_dir(directory)\n time_for_recognizing_audio_fpath = os.path.join(\n directory, filename + \".txt\")\n\n if self.recompute :\n start_time = time.time()\n # TODO: \n # change recognize audio -> input audio instead of fpath\n # audio = asr.loadAudio(audio_fpath=audio_fpath)\n # transcription = asr.recognizeAudio(audio=audio)\n # asr.saveTranscription(transcription_fpath, transcription)\n transcription = asr.recognizeAudio(audio_fpath=audio_fpath)\n asr.setTranscription(transcription)\n asr.saveTranscription(transcription_dir=transcription_dir, filename=filename)\n save_execution_time(fpath=time_for_recognizing_audio_fpath, execution_time=time.time() - start_time)\n \n transcription = asr.loadTranscription(\n transcription_dir=transcription_dir, filename=filename)\n num_retry = 0\n while transcription == \"\" and num_retry < self.max_num_retry :\n start_time = time.time()\n asr.recognizeAudio(audio_fpath=audio_fpath)\n asr.saveTranscription(\n transcription_dir=transcription_dir, filename=filename)\n save_execution_time(\n fpath=time_for_recognizing_audio_fpath, execution_time=time.time() - start_time)\n transcription = asr.loadTranscription(\n transcription_dir=transcription_dir, filename=filename)\n\n if asr.getName() == \"wit\" :\n random_number = float(random.randint(9, 47))/10.\n time.sleep(random_number)\n\n num_retry += 1\n\n transcriptions[asr.getName()] = preprocess_text(transcription)\n\n ## add execution time for generating audio\n execution_time += get_execution_time(\n fpath=time_for_recognizing_audio_fpath) \n \n\n cases = self.caseDeterminer(text, transcriptions)\n # if sum(cases.values()) == 0 :\n # print(text)\n # print(transcriptions[\"wav2vec2\"])\n # print(cases)\n # print()\n \n for asr_name, case in cases.items() :\n self.saveCase(self.case_dir, self.getTTS().getName(), asr_name, filename, str(case))\n\n # print(f\"Execution time: {execution_time}\")\n return cases, execution_time", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def convert_to_wav(txt_file, sph_path, target_dir):\n wav_dir = os.path.join(target_dir, 'wav/')\n txt_dir = os.path.join(target_dir, 'txt/')\n os.makedirs(wav_dir, exist_ok=True)\n os.makedirs(txt_dir, exist_ok=True)\n path_to_data = os.path.dirname(txt_file)\n\n def process(x):\n file_path = x[\"audio_file\"]\n text = x[\"transcription\"]\n start_time = x[\"start_time\"]\n duration = x[\"end_time\"] - start_time\n file_name = os.path.splitext(os.path.basename(file_path))[0]\n file_name = str(start_time) + \"_\" + str(duration) + file_name\n text = text.strip().upper()\n with open(os.path.join(txt_dir, file_name + '.txt'), 'w') as f:\n f.write(text)\n cmd = \"sox -v 0.6 -t wav {} -r {} -b 16 -c 1 -t wav {} trim {} {}\".format(\n os.path.join(path_to_data, file_path),\n args.sample_rate,\n os.path.join(wav_dir, file_name + \".wav\"),\n start_time,\n duration)\n subprocess.call([cmd], shell=True)\n print('Converting wav to wav for {}.'.format(txt_file))\n # generate processed data\n data = read_transcription_file(txt_file, sph_path)\n with ThreadPool(10) as pool:\n pool.map(process, data)", "def concatenate(self, use_trim=False, top_db=None):\n spkr_num = len(self.raw_data_dir)\n print(\"No.speakers : %d\" % spkr_num)\n f_info = open(os.path.join(self.out_data_dir, 'info.pkl'), 'wb') # f to write down information of processed data\n info = []\n f_setting = open(os.path.join(self.out_data_dir, 'setting.txt'), 'wt') # f to write down settings of processing pipe\n\n # Concatenating......\n print(\"Concatenation begins...\")\n if use_trim:\n print(\"Removing nonspeech samples is activated.\")\n for i, folder in enumerate(self.raw_data_dir):\n spkr_id = self.get_spkr_name(folder_name=folder)\n print(\"%d: %sth speaker processing...\" % (i, spkr_id))\n utter_all = []\n for root, _, utter_names in os.walk(folder):\n if len(utter_names) > 0:\n utter_names = list(filter(lambda x: x.endswith(self.file_suffix), utter_names))\n for utter_name in utter_names:\n utter_path = os.path.join(root, utter_name) # absolute path of each utterance\n utter = self.load_audio(utter_path)\n utter_all.append(utter)\n utter_all = np.concatenate(utter_all, axis=0)\n\n if use_trim:\n utter_trim = []\n intervals = librosa.effects.split(utter_all, top_db=top_db)\n for interval in intervals:\n utter_trim.append(utter_all[interval[0]: interval[1]])\n utter_trim = np.concatenate(utter_trim, axis=0)\n sf.write(os.path.join(self.out_data_dir, self.name_ptn(spkr_id=spkr_id)), utter_trim, self.sr)\n info.append((os.path.join(self.out_data_dir, self.name_ptn(spkr_id=spkr_id)), utter_trim.shape[0], i))\n else:\n sf.write(os.path.join(self.out_data_dir, self.name_ptn(spkr_id=spkr_id)), utter_all, self.sr)\n info.append((os.path.join(self.out_data_dir, self.name_ptn(spkr_id=spkr_id)), utter_all.shape[0], i))\n # Concatenating succeed\n\n print(\"Writing necessary files...\")\n pickle.dump(info, f_info)\n f_info.close()\n\n info = sorted(info, key=lambda x: x[1])\n print(\"Speaker number: {}\".format(spkr_num), file=f_setting)\n print(\"Minimum length: {}\\t{}\".format(info[0][0], info[0][1]), file=f_setting)\n print(\"Maximum length: {}\\t{}\".format(info[-1][0], info[-1][1]), file=f_setting)\n print(\"sr: {}\".format(self.sr), file=f_setting)\n print(\"Use trim: {}\".format(use_trim), file=f_setting)\n print(\"top_db: {}\".format(top_db), file=f_setting)\n f_setting.close()", "def get_large_audio_transcription(path):\n # open the audio file using pydub\n r = sr.Recognizer()\n sound = AudioSegment.from_mp3(path)\n sound.export(\"tmp.wav\", format=\"wav\")\n sound = AudioSegment.from_wav('tmp.wav')\n # split audio sound where silence is 700 miliseconds or more and get chunks\n chunks = split_on_silence(sound,\n # experiment with this value for your target audio file\n min_silence_len = 500,\n # adjust this per requirement\n silence_thresh = sound.dBFS-14,\n # keep the silence for 1 second, adjustable as well\n keep_silence=500,\n )\n folder_name = \"audio-chunks\"\n # create a directory to store the audio chunks\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n whole_text = \"\"\n\n chapter=(str(path.split('/')[-1])).split('_')[3]\n # if chapter == '01':\n # target=2\n # else:\n # target=1\n target=2\n # process each chunk\n for i, audio_chunk in enumerate(chunks, start=1):\n # export audio chunk and save it in\n # the `folder_name` directory.\n if i==1:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened,language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n #print(chunk_filename, \":\", text)\n whole_text += text\n # return the text for all chunks detected\n else:\n chunk_filename = os.path.join(folder_name, f\"chunk{i}.wav\")\n audio_chunk.export(chunk_filename, format=\"wav\")\n # recognize the chunk\n with sr.AudioFile(chunk_filename) as source:\n audio_listened = r.record(source)\n # try converting it to text\n try:\n text = r.recognize_google(audio_listened, language=\"en-US\")\n\n except sr.UnknownValueError as e:\n print(\"Error:\", str(e))\n else:\n #text = f\"{text.capitalize()}. \"\n # print(chunk_filename, \":\", text)\n if chapter == '01':\n whole_text += ' ' +text\n if str(text).isalnum():\n if str(text).split(' ')[0]==' ':\n whole_text += text\n else: whole_text += ' '+text\n # return the text for all chunks detected\n\n if i==target:\n break\n if os.path.isfile('tmp.wav') :os.remove('tmp.wav')\n subprocess.run([\"rm\", \"-rf\", folder_name])\n return whole_text", "def __init__(self, origin_dir, dest_dir, val_percentage=0.2, test_percentage=0.3):\r\n self.origin_dir = origin_dir\r\n self.dest_dir = dest_dir\r\n self.val_percentage = val_percentage\r\n self.test_percentage = test_percentage\r\n\r\n self.all_wavs = [] # all wav info list\r\n self.data_index = {\"train\": [], \"valid\": [], \"test\": []}\r\n\r\n # Detail information for an audio\r\n # utt_id: audio hash id, noise_volume: , age: the age of speaker,\r\n # keyword_id: keyword int id, 你好小顺(0), 小顺小顺(1)\r\n # noise_type: 电视剧/动漫/游戏/音乐/直播/说话声/无噪声\r\n # speaker_id: speaker id\r\n # record_speed: fast,normal, slow\r\n # record_equipment: record equipment\r\n # gender: gender of speaker\r\n self.wav_desc = {\r\n \"utt_id\": \"\",\r\n \"noise_volume\": \"00db\",\r\n \"age\": \"00\",\r\n \"keyword_id\": 0,\r\n \"noise_type\": \"TV\",\r\n \"speaker_id\": \"\",\r\n \"distance\": \"\",\r\n \"record_speed\": \"\",\r\n \"record_equipment\": \"\",\r\n \"gender\": \"\"}\r\n\r\n self.keywords_dict = {\"你好小顺\": 0, \"小顺小顺\": 1}\r\n\r\n if not os.path.exists(self.dest_dir):\r\n os.mkdir(os.path.join(self.dest_dir))\r\n os.mkdir(os.path.join(self.dest_dir, \"resources\"))\r\n os.mkdir(os.path.join(self.dest_dir, \"audios\"))", "def tonify(self, tone_generator=None, verbose=False):\n if tone_generator is None:\n tone_generator = ToneGenerator('tonifyoutput.wav')\n tone_generator.file.setnchannels(len(self.sheets))\n # Find the max length (in seconds) of the data sheets\n max_length = 0.0\n for sheet in self.sheets:\n if len(sheet) > max_length:\n max_length = len(sheet)\n nframes = int(max_length * tone_generator.sample_rate)\n tone_generator.file.setnframes(nframes)\n\n tone_strs = []\n for d in self.sheets:\n if verbose:\n print \"File:\", d.data.name\n print \"Frequencies:\", self.freqs[self.sheets.index(d)]\n values = []\n tone_generator.setfreqs(self.freqs[self.sheets.index(d)])\n for i in range(0, len(d.times)):\n duration = d.durations[i]\n calls = d.calls[i]\n if verbose:\n print \"\\ttone: (%d, %d, %d) for %f seconds\" % (calls[0], calls[1],\n calls[2], duration)\n tone = tone_generator.get_tone((calls[0], calls[1], calls[2]), duration)\n values.append(str(tone))\n try:\n delta = float((d.times[i + 1] - d.times[i]).seconds)\n if float(delta) - duration < 0.0:\n silence_duration = 0.0\n else:\n silence_duration = float(delta) - duration\n except IndexError:\n break\n if verbose:\n print \"\\tsilence for\", silence_duration,\"seconds\"\n silence = tone_generator.get_silence(silence_duration)\n values.append(str(silence))\n if len(d) < max_length:\n end_silence = tone_generator.get_silence(max_length - len(d))\n values.append(str(end_silence))\n value_str = ''.join(values)\n tone_strs.append(value_str)\n \n if verbose:\n print \"Writing to file... (may take several minutes)\"\n combined = interleave_binarystr(tone_strs)\n tone_generator.file.writeframes(combined)\n if verbose:\n print \"Finished writing.\"\n tone_generator.close()", "def _text_write_preprocess(self):\n self.check()\n\n max_name_len = np.max([len(name) for name in self.name])\n fieldtypes = [\"U\" + str(max_name_len), \"f8\", \"f8\"]\n comp_names = self._get_lon_lat_component_names()\n frame_obj = self._get_frame_obj()\n frame_desc_str = _get_frame_desc_str(frame_obj)\n\n component_fieldnames = []\n for comp_name in comp_names:\n # This will add e.g. ra_J2000 and dec_J2000 for FK5\n component_fieldnames.append(comp_name + \"_\" + frame_desc_str)\n fieldnames = [\"source_id\"] + component_fieldnames\n stokes_names = [\"I\", \"Q\", \"U\", \"V\"]\n fieldshapes = [()] * 3\n\n if self.stokes_error is not None:\n stokes_error_names = [(f\"{k}_error\") for k in [\"I\", \"Q\", \"U\", \"V\"]]\n\n n_stokes = 0\n stokes_keep = []\n for si, total in enumerate(np.nansum(self.stokes.to(\"Jy\"), axis=(1, 2))):\n if total > 0:\n fieldnames.append(stokes_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n if self.stokes_error is not None:\n fieldnames.append(stokes_error_names[si])\n fieldshapes.append((self.Nfreqs,))\n fieldtypes.append(\"f8\")\n n_stokes += 1\n stokes_keep.append(total > 0)\n\n assert n_stokes >= 1, \"No components with nonzero flux.\"\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n fieldnames.append(\"subband_frequency\")\n else:\n fieldnames.append(\"frequency\")\n fieldtypes.append(\"f8\")\n fieldshapes.extend([(self.Nfreqs,)])\n elif self.reference_frequency is not None:\n fieldnames.extend([(\"reference_frequency\")])\n fieldtypes.extend([\"f8\"])\n fieldshapes.extend([()] * n_stokes + [()])\n if self.spectral_index is not None:\n fieldnames.append(\"spectral_index\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_rise_lst\"):\n fieldnames.append(\"rise_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n if hasattr(self, \"_set_lst\"):\n fieldnames.append(\"set_lst\")\n fieldtypes.append(\"f8\")\n fieldshapes.append(())\n\n dt = np.dtype(list(zip(fieldnames, fieldtypes, fieldshapes)))\n\n arr = np.empty(self.Ncomponents, dtype=dt)\n arr[\"source_id\"] = self.name\n\n for comp_ind, comp in enumerate(comp_names):\n arr[component_fieldnames[comp_ind]] = getattr(self.skycoord, comp).deg\n\n for ii in range(4):\n if stokes_keep[ii]:\n arr[stokes_names[ii]] = self.stokes[ii].T.to(\"Jy\").value\n if self.stokes_error is not None:\n arr[stokes_error_names[ii]] = self.stokes_error[ii].T.to(\"Jy\").value\n\n if self.freq_array is not None:\n if self.spectral_type == \"subband\":\n arr[\"subband_frequency\"] = self.freq_array.to(\"Hz\").value\n else:\n arr[\"frequency\"] = self.freq_array.to(\"Hz\").value\n elif self.reference_frequency is not None:\n arr[\"reference_frequency\"] = self.reference_frequency.to(\"Hz\").value\n if self.spectral_index is not None:\n arr[\"spectral_index\"] = self.spectral_index\n\n if hasattr(self, \"_rise_lst\"):\n arr[\"rise_lst\"] = self._rise_lst\n if hasattr(self, \"_set_lst\"):\n arr[\"set_lst\"] = self._set_lst\n\n return arr", "def test_process_mono_file(self):\n test_path = pathlib.Path(__file__).parent.absolute() / 'data/mono.wav'\n self.default_kwargs['input_file'] = test_path\n self.default_kwargs['output_file'] = pathlib.Path(self.temp_file.name)\n self.encoder = FileEncoder(**self.default_kwargs)\n self.encoder.process()", "def calculate_mixture_features(args):\n workspace = args.workspace\n speech_dir = args.speech_dir\n noise_dir = args.noise_dir\n data_type = args.data_type\n fs = cfg.sample_rate\n dir_name = args.dir_name\n\n fid_clean = open(speech_dir, 'r')\n lines_clean = fid_clean.readlines()\n fid_clean.close()\n\n fid_reverb = open(noise_dir, 'r')\n lines_reverb = fid_reverb.readlines()\n fid_reverb.close()\n\n for files_clean, files_reverb in zip(lines_clean, lines_reverb):\n\n files_clean = files_clean.strip('\\n')\n files_reverb = files_reverb.strip('\\n')\n\n fid = open(files_clean,'r')\n wavLines_clean = fid.readlines()\n fid.close()\n fid = open(files_reverb,'r')\n wavLines_reverb = fid.readlines()\n fid.close()\n\n cnt = 0 \n\n for wavs_clean, wavs_reverb in zip(wavLines_clean, wavLines_reverb):\n \n t1 = time.time()\n # cnt = 0\n\n wav_name_clean, wav_path_clean = wavs_clean.split()\n wav_name_reverb, wav_path_reverb = wavs_reverb.split()\n \n # Read clean speech audio. \n (speech_audio, _) = read_audio(wav_path_clean, target_fs=fs)\n \n # Read reverb speech audio. \n (noise_audio, _) = read_audio(wav_path_reverb, target_fs=fs)\n \n # Cut reverb speech to the same length as clean speech. \n if len(noise_audio) > len(speech_audio):\n noise_audio = noise_audio[0: len(speech_audio)]\n \n # Extract spectrogram. \n mixed_complx_x = calc_sp(noise_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, dir_name, \"%s.p\" % wav_name_reverb)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, wav_name_reverb]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n # print(mixed_complx_x)\n # print(speech_x)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))", "def main_pkl(f_name, out_fname):\n # Read in the file\n fid = open(f_name, 'r')\n out_arr = fid.read().split('\\n')\n process_data = []\n\n # Loop over all the data\n for ele in out_arr:\n twit_split = ele.split('||')\n\n # Check if the data has the correct format (3 ||)\n if len(twit_split) != 4:\n logging.info('Twitter sample: {}'.format(ele))\n continue\n assert (len(twit_split[-1]) == 0)\n # Convert timestamp and add to process_data\n time_stamp = convert_timestamp(twit_split[-2])\n if time_stamp:\n process_data.append({'handle': twit_split[0], 'text': twit_split[1], 'time': time_stamp})\n else:\n logging.debug('Time Stamp Not Detected: {}'.format(ele))\n\n save_pickle({'dat': process_data}, out_fname)\n logging.info('Length of raw data: {} process data: {} pickle name:{}'.format(\n len(out_arr), len(process_data), out_fname))", "def main():\n # transcribe_audio()\n summarize()", "def spectre_tsv(f):\n \n skip = 0\n while True:\n try: \n wav, flux = np.loadtxt(f, skiprows = skip, unpack = True)\n \n except ValueError:\n # Si les première lignes ont un en-tête\n skip += 1\n \n else:\n break\n \n return wav,flux", "def create_wav_file(self, ):\n\n f_out = open(self.wav_file, 'w')\n u_utt2spk = open(self.utt2spk, 'w')\n for file in glob.glob(self.wav_folder+'/*.wav'):\n base = os.path.basename(file).split('.')[0]\n # write to scp file\n f_out.write(base + '\\t' + file + '\\n')\n u_utt2spk.write(base + '\\t' + 'tts' + '\\n')", "def process_recognition(result, filename, output_directory, lexical):\n if result.reason == speechsdk.ResultReason.RecognizedSpeech:\n if lexical:\n text = f\"{format(result.text)}\\t{json.loads(result.json)['NBest'][0]['Lexical']}\"\n else:\n text = f\"{format(result.text)}\"\n logging.info(f\"[INFO] - Recognition successful: {filename} -> {result.text}\")\n elif result.reason == speechsdk.ResultReason.NoMatch:\n logging.warning(filename + \"\\t\" + f\"No speech could be recognized: {result.no_match_details}\")\n text = \"\"\n elif result.reason == speechsdk.ResultReason.Canceled:\n cancellation_details = result.cancellation_details\n logging.error(filename+\"\\t\"+ f\"Speech Recognition canceled: {cancellation_details.reason}\")\n if cancellation_details.reason == speechsdk.CancellationReason.Error:\n logging.error(f\"Error details: {cancellation_details.error_details}\")\n text = \"\"\n return text", "def main():\n # Load and prep training files\n raw_speech_text = hg.load_training_file('trump_train.txt')\n speech_text = hg.prep_training(raw_speech_text)\n tweet_data = load_tweets('trump_tweets.json')\n raw_tweets = \"\"\n for dct in tweet_data:\n raw_tweets += \"{} \".format(dct['text'])\n tweets = hg.prep_training(raw_tweets)\n corpus = speech_text + tweets\n corpus = strip_punctuation(corpus)\n dict_1 = hg.map_one_to_one(corpus)\n dict_2 = hg.map_two_to_one(corpus)\n text = []\n \n # Introduction\n print(\"\\nTrump Speech Generator\\n\")\n print(\"Select words to add to speech\")\n print(\"\\'x\\' to exit\")\n print(\"\\'p\\' to add punctuation\")\n print(\"Select \\'p\\' before selecting the word you want to punctuate\")\n\n # Select first word\n options = corpus\n print ()\n selection = select_word(corpus)\n text.append(selection)\n \n # Select second word\n last = text[0]\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n # Select subsequent word\n while True:\n last = \"{} {}\".format(text[-2].strip(punctuation),\n text[-1].strip(punctuation))\n options = word_after_two(last, dict_2)\n if options == []:\n last = last.split()[1]\n options = word_after_one(last, dict_1)\n while options == []:\n last = random.choice(corpus)\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n print_text(text)", "def get_speech(self, phrase):\n src = os.path.join(constants.CONFIG_PATH, self.voice)\n text = phrase\n\n def preprocess(syllables):\n temp = []\n for syllable in syllables:\n for p in self.punctuation:\n syllable = syllable.replace(p, \"\")\n if syllable.isdigit():\n syllable = atc.num2chinese(syllable)\n new_sounds = lazy_pinyin(syllable, style=pypinyin.TONE3)\n for e in new_sounds:\n temp.append(e)\n else:\n temp.append(syllable)\n return temp\n \n if not os.path.exists(src):\n logger.error('{} 合成失败: 请先下载 syllables.zip (https://sourceforge.net/projects/hantts/files/?source=navbar) 并解压到 ~/.wukong 目录下'.format(self.SLUG))\n return None\n logger.debug(\"{} 合成中...\".format(self.SLUG))\n delay = 0\n increment = 355 # milliseconds\n pause = 500 # pause for punctuation\n syllables = lazy_pinyin(text, style=pypinyin.TONE3)\n syllables = preprocess(syllables)\n \n # initialize to be complete silence, each character takes up ~500ms\n result = AudioSegment.silent(duration=500*len(text))\n for syllable in syllables:\n path = os.path.join(src, syllable+\".wav\")\n sound_file = Path(path)\n # insert 500 ms silence for punctuation marks\n if syllable in self.punctuation:\n short_silence = AudioSegment.silent(duration=pause)\n result = result.overlay(short_silence, position=delay)\n delay += increment\n continue\n # skip sound file that doesn't exist\n if not sound_file.is_file():\n continue\n segment = AudioSegment.from_wav(path)\n result = result.overlay(segment, position=delay)\n delay += increment\n\n tmpfile = ''\n with tempfile.NamedTemporaryFile() as f:\n tmpfile = f.name\n result.export(tmpfile, format=\"wav\")\n logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))\n return tmpfile", "def text_to_mp3(client: texttospeech.TextToSpeechClient,\n voice: texttospeech.VoiceSelectionParams,\n audio_config: texttospeech.AudioConfig,\n text: str,\n output_file_path: Path) -> None:\n lines = text.splitlines()\n\n logger.info(f'Synthesising {len(lines)} lines ...')\n\n output_file_log = output_file_path.parent / (output_file_path.stem + '_log.json')\n\n with output_file_path.open(mode='wb') as output_file:\n for (i, text_chunk) in enumerate(lines):\n # skip empty lines\n if len(text_chunk) > 0:\n input_text = texttospeech.SynthesisInput(text=text_chunk)\n try:\n logger.info(f'Synthesising speech for chunk `{i}`, size: `{len(text_chunk)}`')\n response = client.synthesize_speech(input=input_text, voice=voice, audio_config=audio_config)\n except Exception as e:\n # If a line could not be synthesised properly, return it along with the error message\n # It is possible that textract could not extract the text properly.\n logger.error(f'Speech synthesising failed! Chunk text: `{input_text}`\\nError: {e}\\n')\n _error_log = {\n 'chunk_number': i,\n 'chunk_length': len(text_chunk),\n 'chunk_text': str(text_chunk),\n 'Error message': traceback.format_exc()\n }\n with open(f'{output_file_log}', 'w') as log_out:\n json.dump(_error_log, log_out)\n continue\n output_file.write(response.audio_content)\n logger.info(f'Audio content written to `{output_file_path}`!')\n\n logger.info(f'Output saved to `{output_file_path}`')\n logger.info(f'logs at `{output_file_log}`')", "def __process_element(data):\n print('processing {}'.format(data))\n x_i = data[0]\n y_i = data[1]\n\n file_name = FeatureExtractor.get_file_name(x_i, feature_name)\n try:\n # try to load if file already exist\n np.load(out_path / file_name, allow_pickle=True)\n print('info: {} loaded from .npy !'.format(file_name))\n new_labels.append([file_name, y_i])\n except FileNotFoundError or OSError or EOFError:\n # OSError and EOFError are raised if file are inconsistent\n audio_src, _ = librosa.load(str(source_path / x_i), sr=SR_HPSS)\n # Normalize audio signal\n audio_src = librosa.util.normalize(audio_src)\n # first HPSS\n D_harmonic, D_percussive = ono_hpss(audio_src, N_FFT_HPSS_1, N_HOP_HPSS_1)\n # second HPSS\n D2_harmonic, D2_percussive = ono_hpss(D_percussive, N_FFT_HPSS_2, N_HOP_HPSS_2)\n\n # compute melgram\n mel_harmonic = log_melgram(D2_harmonic, SR_HPSS, N_FFT_HPSS_2, N_HOP_HPSS_2, N_MELS_HPSS)\n mel_percussive = log_melgram(D2_percussive, SR_HPSS, N_FFT_HPSS_2, N_HOP_HPSS_2, N_MELS_HPSS)\n # concat\n mel_total = np.vstack((mel_harmonic, mel_percussive))\n\n # this is kind-of standard\n FeatureExtractor.save_feature(mel_total, feature_name, out_path, x_i, y_i, new_labels)", "def main(args):\n print('loading {}'.format(args.stem_path))\n y, fs = librosa.load(args.stem_path, sr=44100)\n notes = mono_anal(y, fs)\n jam = output_to_jams(y, fs, notes, args)\n jam_path = args.stem_path.split('.')[0]+'.jams'\n jam.save(jam_path)\n print('jams file generated')\n return 0", "def preprocess_sound(data, sample_rate):\n # Convert to mono.\n\n if len(data.shape) > 1:\n data = np.mean(data, axis=1)\n # Resample to the rate assumed by VGGish.\n if sample_rate != params.SAMPLE_RATE:\n data = resampy.resample(data, sample_rate, params.SAMPLE_RATE)\n\n # Compute log mel spectrogram features.\n log_mel = mel_features.log_mel_spectrogram(\n data,\n audio_sample_rate=params.SAMPLE_RATE,\n log_offset=params.LOG_OFFSET,\n window_length_secs=params.STFT_WINDOW_LENGTH_SECONDS,\n hop_length_secs=params.STFT_HOP_LENGTH_SECONDS,\n num_mel_bins=params.NUM_MEL_BINS,\n lower_edge_hertz=params.MEL_MIN_HZ,\n upper_edge_hertz=params.MEL_MAX_HZ)\n\n # Frame features into examples.\n features_sample_rate = 1.0 / params.STFT_HOP_LENGTH_SECONDS\n example_window_length = int(round(\n params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))\n example_hop_length = int(round(\n params.EXAMPLE_HOP_SECONDS * features_sample_rate))\n log_mel_examples = mel_features.frame(\n log_mel,\n window_length=example_window_length,\n hop_length=example_hop_length)\n return log_mel_examples", "def set_fname_encoder(self):\n\n fp = open(self.meta_path, 'r')\n wav_names = []\n next(fp)\n for i, line in tqdm(enumerate(fp)):\n audio_name, _, _, _ = line.split()\n wav_name = os.path.basename(audio_name)\n wav_names.append(wav_name)\n self.fname_encoder.fit(wav_names)", "def process_meter():\n\n if not os.path.isdir(out_dir):\n os.mkdir(out_dir)\n\n allRhymeInfo = {}\n\n for filename in os.listdir(in_dir):\n if not filename.endswith(\".txt\"):\n continue\n\n fileRhymeInfo = {\n \"scheme\": [],\n \"endWords\": [],\n \"tidied\": [],\n }\n\n with open(in_dir + filename, \"r\", encoding=\"utf-8\") as f:\n txt = f.read()\n\n # Assume standard line break structure.\n # Split on \"\\n\\n\"\n segs = txt.split(\"\\n\\n\")\n\n # Print to a new file\n with open(out_dir + filename, \"w\", encoding=\"utf-8\") as f:\n for seg in segs:\n scheme, rhymingWords, tidiedLines = findRhymes(seg)\n\n # add info to file's dictionary to be returned\n fileRhymeInfo[\"segs\"].append(seg)\n fileRhymeInfo[\"scheme\"].append(scheme)\n fileRhymeInfo[\"endWords\"].append(rhymingWords)\n fileRhymeInfo[\"tidied\"].append(tidiedLines)\n\n # write info out to file\n for i, ln in enumerate(tidiedLines):\n # First line in seg is header; no rhymes\n if i == 0:\n print(ln, file=f)\n continue\n\n # Later lines: print original line, plus a rhyme tag\n w = rhymingWords[i-1]\n s = scheme[i-1]\n\n print(f\"{ln:45s} [{w:^12s}]: ({s})\", file=f)\n print(file=f)\n\n allRhymeInfo[filename] = fileRhymeInfo\n return allRhymeInfo", "def pre_process(self):\n t1_start = perf_counter()\n wav_arr_raw = np.array(self.raw_data['spectrum_0'].attrs['wavelengths'])\n self.wavelengths = wav_arr_raw\n self.back_spectra_arr = np.array(self.raw_data['spectrum_0'].attrs['background'])\n\n corr_data = []\n times_proc = []\n\n # extract reference point for 0 seconds\n time_ref = str(self.raw_data['spectrum_0'].attrs['creation_timestamp'])\n\n # spectrometer adds 'b' and quotation marks to timestamps that must be removed\n # some spectra are taken on X.000000s which does not have a .%f component - use try and except\n try:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time_ref = datetime.strptime((time_ref.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n\n print('Measurement was started at {}, \\n normalising times and applying a background correction \\n'.format(time_ref))\n\n # applies background correction\n for counter, spectra in enumerate(self.raw_data.keys()):\n corr_data.append(self.raw_data[spectra]-self.back_spectra_arr)\n time = str(self.raw_data[spectra].attrs['creation_timestamp'])\n try:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S.%f\")\n except ValueError:\n time = datetime.strptime((time.replace('b','')).replace('\\'',''),\"%Y-%m-%dT%H:%M:%S\")\n deltatime = time - time_ref\n times_proc.append(deltatime.total_seconds())\n\n self.times = np.array(times_proc)\n print('Measurement contains {} spectra with {} wavelengths \\n'.format(len(self.times),len(self.wavelengths)))\n\n # data is stored as a pd Dataframe with elapsed times as indices and wavelengths as columns\n pre_proc_data = pd.DataFrame(corr_data, index = self.times, columns = self.wavelengths)\n\n # data may be disordered in time when iterated through\n # sort the data by elapsed time\n self.pre_proc_data = pre_proc_data.sort_index(axis=0)\n self.times = np.sort(self.times)\n\n t1_stop = perf_counter()\n print(\"Elapsed time for pre-processing:\", t1_stop-t1_start)\n\n return self.pre_proc_data" ]
[ "0.616037", "0.5901794", "0.58221334", "0.5771103", "0.5722", "0.57093835", "0.570432", "0.5699671", "0.5650356", "0.5633378", "0.56267637", "0.5594447", "0.55906874", "0.5556254", "0.5471796", "0.54641896", "0.5443846", "0.5442978", "0.5431923", "0.53901076", "0.5388333", "0.53690207", "0.5340682", "0.53231627", "0.53191346", "0.53118163", "0.529449", "0.52919084", "0.52892315", "0.5276879" ]
0.62324125
0
Prototype of function computing LPT deplacement. Returns output tensorflow and mesh tensorflow tensors
def lpt_prototype(mesh, nc=FLAGS.nc, bs=FLAGS.box_size, batch_size=FLAGS.batch_size, a0=FLAGS.a0, a=FLAGS.af, nsteps=FLAGS.nsteps): stages = np.linspace(a0, a, nsteps, endpoint=True) klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0] plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1] ipklin = iuspline(klin, plin) # Define the named dimensions # Parameters of the small scales decomposition n_block_x = FLAGS.nx n_block_y = FLAGS.ny n_block_z = 1 halo_size = FLAGS.hsize if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z): new_size = int(0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z)) print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size)) halo_size = new_size # Parameters of the large scales decomposition downsampling_factor = FLAGS.dsample lnc = nc // 2**downsampling_factor # fx_dim = mtf.Dimension("nx", nc) fy_dim = mtf.Dimension("ny", nc) fz_dim = mtf.Dimension("nz", nc) tfx_dim = mtf.Dimension("tx", nc) tfy_dim = mtf.Dimension("ty", nc) tfz_dim = mtf.Dimension("tz", nc) # Dimensions of the low resolution grid x_dim = mtf.Dimension("nx_lr", lnc) y_dim = mtf.Dimension("ny_lr", lnc) z_dim = mtf.Dimension("nz_lr", lnc) tx_dim = mtf.Dimension("tx_lr", lnc) ty_dim = mtf.Dimension("ty_lr", lnc) tz_dim = mtf.Dimension("tz_lr", lnc) nx_dim = mtf.Dimension('nx_block', n_block_x) ny_dim = mtf.Dimension('ny_block', n_block_y) nz_dim = mtf.Dimension('nz_block', n_block_z) sx_dim = mtf.Dimension('sx_block', nc // n_block_x) sy_dim = mtf.Dimension('sy_block', nc // n_block_y) sz_dim = mtf.Dimension('sz_block', nc // n_block_z) k_dims = [tx_dim, ty_dim, tz_dim] batch_dim = mtf.Dimension("batch", batch_size) pk_dim = mtf.Dimension("npk", len(plin)) pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim]) # Compute necessary Fourier kernels kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False) kx = mtf.import_tf_tensor(mesh, kvec[0].squeeze().astype('float32'), shape=[tfx_dim]) ky = mtf.import_tf_tensor(mesh, kvec[1].squeeze().astype('float32'), shape=[tfy_dim]) kz = mtf.import_tf_tensor(mesh, kvec[2].squeeze().astype('float32'), shape=[tfz_dim]) kv = [ky, kz, kx] # kvec for low resolution grid kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False) kx_lr = mtf.import_tf_tensor(mesh, kvec_lr[0].squeeze().astype('float32') / 2**downsampling_factor, shape=[tx_dim]) ky_lr = mtf.import_tf_tensor(mesh, kvec_lr[1].squeeze().astype('float32') / 2**downsampling_factor, shape=[ty_dim]) kz_lr = mtf.import_tf_tensor(mesh, kvec_lr[2].squeeze().astype('float32') / 2**downsampling_factor, shape=[tz_dim]) kv_lr = [ky_lr, kz_lr, kx_lr] # kvec for high resolution blocks padded_sx_dim = mtf.Dimension('padded_sx_block', nc // n_block_x + 2 * halo_size) padded_sy_dim = mtf.Dimension('padded_sy_block', nc // n_block_y + 2 * halo_size) padded_sz_dim = mtf.Dimension('padded_sz_block', nc // n_block_z + 2 * halo_size) kvec_hr = flowpm.kernels.fftk([ nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size, nc // n_block_z + 2 * halo_size ], symmetric=False) kx_hr = mtf.import_tf_tensor(mesh, kvec_hr[0].squeeze().astype('float32'), shape=[padded_sx_dim]) ky_hr = mtf.import_tf_tensor(mesh, kvec_hr[1].squeeze().astype('float32'), shape=[padded_sy_dim]) kz_hr = mtf.import_tf_tensor(mesh, kvec_hr[2].squeeze().astype('float32'), shape=[padded_sz_dim]) kv_hr = [ky_hr, kz_hr, kx_hr] shape = [batch_dim, fx_dim, fy_dim, fz_dim] lr_shape = [batch_dim, x_dim, y_dim, z_dim] hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim] part_shape = [batch_dim, fx_dim, fy_dim, fz_dim] # Begin simulation initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv) # Reshaping array into high resolution mesh field = mtf.slicewise(lambda x: tf.expand_dims( tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc], output_dtype=tf.float32, output_shape=hr_shape, name='my_reshape', splittable_dims=lr_shape[:-1] + hr_shape[1:4] + part_shape[1:3]) for block_size_dim in hr_shape[-3:]: field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name) for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]): field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size) field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)]) high = field low = mesh_utils.downsample(field, downsampling_factor, antialias=True) low = mtf.reshape(low, low.shape[:-1]) high = mtf.reshape(high, high.shape[:-1]) for block_size_dim in hr_shape[-3:]: low = mtf.slice(low, halo_size // 2**downsampling_factor, block_size_dim.size // 2**downsampling_factor, block_size_dim.name) # Hack usisng custom reshape because mesh is pretty dumb low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low], output_dtype=tf.float32, output_shape=lr_shape, name='my_dumb_reshape', splittable_dims=lr_shape[:-1] + hr_shape[:4]) state = mtfpm.lpt_init( low, high, 0.1, kv_lr, kv_hr, halo_size, hr_shape, lr_shape, part_shape[1:], downsampling_factor=downsampling_factor, antialias=True, ) # Here we can run our nbody final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor) # paint the field final_field = mtf.zeros(mesh, shape=hr_shape) for block_size_dim in hr_shape[-3:]: final_field = mtf.pad(final_field, [halo_size, halo_size], block_size_dim.name) final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size) # Halo exchange for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]): final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim, halo_size) # Remove borders for block_size_dim in hr_shape[-3:]: final_field = mtf.slice(final_field, halo_size, block_size_dim.size, block_size_dim.name) #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim]) # Hack usisng custom reshape because mesh is pretty dumb final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field], output_dtype=tf.float32, output_shape=[batch_dim, fx_dim, fy_dim, fz_dim], name='my_dumb_reshape', splittable_dims=part_shape[:-1] + hr_shape[:4]) return initc, final_field ##
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(input_legs, tensor_list, leg_list, ent_list, cutoff):\n # find tensor common to all input legs\n input_inds = [set([leg_list.index(legs) for legs in leg_list\n if legs.__contains__(qs)]) for qs in input_legs]\n ind = list(set.intersection(*input_inds))[0]\n svd_ten = tensor_list[ind] # tensor to decompose\n svd_legs = leg_list[ind] # all tensor legs\n start_shape = svd_ten.shape # index dimensions\n # permute tensor with leg and index data\n top_ind = [svd_legs.index(qs) for qs in input_legs]\n top_ind.sort() # sort input indicies for consistency\n others = list(svd_legs)\n [others.remove(qs) for qs in input_legs]\n bot_ind = [svd_legs.index(legs) for legs in others]\n top_legs = [svd_legs[i] for i in top_ind]\n bot_legs = [svd_legs[i] for i in bot_ind]\n # Search for unused leg number\n emptys = [elements.any() == 0 for elements in ent_list[1:]]\n if any(emptys):\n new_leg = emptys.index(True)+1\n else:\n new_leg = max(list(itertools.chain.from_iterable(leg_list)))+1\n ##### permute legs of svd_ten and update svd_inds as we go along\n top_js = list(top_ind)\n top_js.sort(reverse = True)\n svd_ind = list(range(len(start_shape))) # number all tensor legs\n for element in top_js:\n for i in list(range(svd_ind.index(element),0,-1)):\n svd_ind[i], svd_ind[i-1] = svd_ind[i-1], svd_ind[i]\n svd_ten = svd_ten.swapaxes(i,i-1)\n # outgoing leg dimensions\n top_dim = np.prod([start_shape[top_ind[i]] for i in range(len(top_ind))])\n bot_dim = np.prod([start_shape[bot_ind[i]] for i in range(len(bot_ind))])\n # used to shape post SVD U and V\n top_shape = [start_shape[shapes] for shapes in top_ind]\n bot_shape = [start_shape[shapes] for shapes in bot_ind]\n # singular value decomposition\n F = svd_ten.reshape(top_dim, bot_dim)\n # print(top_dim, bot_dim, input_legs)\n # pass statement fixing top_dim = 0 ??\n if top_dim == 0 or bot_dim ==0:\n print('This should not happen!!!')\n pass\n if top_dim * bot_dim < 1024:\n U, S, V = np.linalg.svd(F)\n U, S, V = trun(U,S,V,cutoff)\n else:\n if (top_dim==bot_dim):\n # expand both matrux dimensions by 1\n print('dimensionality equality')\n w = np.eye(top_dim,top_dim+1)\n m = np.tensordot(np.tensordot(w,F,[[0],[0]]),w,[[1],[0]])\n U, S, V = ssl.svds(m, top_dim)\n # undo isometry on the decomposed tensors\n U = np.tensordot(w,U,[[1],[0]])\n V = np.tensordot(w,V,[[1],[1]])\n else:\n #expand smaller dimension, be it the top (U) or bottom (V)\n m_dim = min(top_dim, bot_dim) # smallest dimension\n w = np.eye(m_dim,m_dim+1) # dimension increasing isometry\n if m_dim == top_dim:\n U, S, V = ssl.svds(np.tensordot(w,F,[[0],[0]]), m_dim) # sparse SVD\n U = np.tensordot(w,U,[[1],[0]])\n else:\n U, S, V = ssl.svds(np.tensordot(w,F,[[0],[1]]).transpose()\n , m_dim) # sparse SVD\n print(V.shape, w.shape)\n V = np.tensordot(w,V,[[1],[1]])\n # print('SVD done. New virtual leg # ' + str(new_leg))\n U,S,V = trun(U,S,V,cutoff)\n leg_list[ind] = top_legs + [new_leg]\n tensor_list[ind] = U.reshape(tuple(top_shape + [len(S)]))\n leg_list.append([new_leg] + bot_legs)\n tensor_list.append(np.dot(np.diag(S),V).reshape(tuple([len(S)] + bot_shape)))\n if len(ent_list) == new_leg: # append Schmidt values at new_leg index\n ent_list.append(S)\n elif (ent_list[new_leg] == [0])[0]:\n ent_list[new_leg] = S\n return new_leg", "def fc_decoder(latent_tensor, output_shape, is_training=True):\n del is_training\n d1 = tf.layers.dense(latent_tensor, 1200, activation=tf.nn.tanh)\n d2 = tf.layers.dense(d1, 1200, activation=tf.nn.tanh)\n d3 = tf.layers.dense(d2, 1200, activation=tf.nn.tanh)\n d4 = tf.layers.dense(d3, np.prod(output_shape))\n return tf.reshape(d4, shape=[-1] + output_shape)", "def train(self, total_time):\n\n start_time = time.time()\n time_so_far = 0\n\n while time_so_far < total_time:\n\n with tf.GradientTape(persistent=True) as tape:\n\n # DATA LOSS\n data_idxs_batch = np.random.choice(self.pdf_dataset.shape[0], self.batch_size)\n data_batch = self.pdf_dataset[data_idxs_batch, :]\n data_batch = tf.Variable(data_batch, dtype = tf.float32)\n data_xyts = data_batch[:, :3]\n data_target = data_batch[:, 3:4]\n data_p_out = self.net_p(data_xyts)\n\n data_loss = self.data_weight*self.MSE(data_p_out, data_target)\n\n\n # PDE LOSS\n idxs_batch = np.random.choice(self.PDE_dataset.shape[0], self.batch_size)\n pde_batch = self.PDE_dataset[idxs_batch, :]\n pde_batch = tf.Variable(pde_batch, dtype = tf.float32)\n residual = FP_2D(self.net_p, self.net_D, self.net_U, pde_batch, tape)\n\n target = tf.zeros(residual.shape, dtype = tf.float32)\n pde_loss = self.pde_weight*self.MSE(residual, target)\n\n\n # BC LOSS\n idxs_batch = np.random.choice(self.BC_dataset.shape[0], self.batch_size)\n BC_batch = self.BC_dataset[idxs_batch, :]\n BC_batch = tf.Variable(BC_batch, dtype = tf.float32)\n p_out = self.net_p(BC_batch)\n target = tf.fill(p_out[:, 0:1].shape, np.float32(0))\n BC_loss = self.BC_weight*self.MSE(p_out, target)\n\n\n # NORMALIZING LOSS\n segment_area = (self.xlims[1] - self.xlims[0])*(self.ylims[1] - self.ylims[0])/(self.dims**2)\n xyts = get_random_norm_slice(self.xlims, self.ylims, self.tlims, self.dims)\n xyts = tf.Variable(xyts, dtype = tf.float32)\n p_out = self.net_p(xyts)\n pdf_integral = segment_area*tf.math.reduce_sum(p_out)\n norm_loss = self.norm_weight*(pdf_integral - 1.)**2\n\n\n total_loss = data_loss + pde_loss + BC_loss + norm_loss # note the weightings are applied before this\n\n\n trainables = self.net_p.trainable_variables + self.net_D.trainable_variables + self.net_U.trainable_variables\n\n grads = tape.gradient(total_loss, trainables)\n\n del tape\n\n self.optimizer.apply_gradients(zip(grads, trainables))\n\n\n time_so_far_prev = time_so_far\n time_so_far = (time.time() - start_time)/3600.\n\n if self.iterations % 10 == 0:\n\n self.total_losses.append(total_loss.numpy())\n self.data_losses.append(data_loss.numpy())\n self.pde_losses.append(pde_loss.numpy())\n self.BC_losses.append(BC_loss.numpy())\n self.norm_losses.append(norm_loss)\n\n tf.print('It: %d, Total loss: %.3e, Data loss: %.3e, PDE loss: %.3e, BC loss: %.3e, norm_loss: %.3e, Time: %.2fh' \\\n % (self.iterations, total_loss, data_loss, pde_loss, BC_loss, norm_loss, time_so_far))\n sys.stdout.flush()\n\n\n self.iterations += 1", "def _eunn_loop(state, capacity, diag_vec_list, off_vec_list, diag, fft):\n i = 0\n def layer_tunable(x, i):\n\n diag_vec = diag_vec_list.read(i)\n off_vec = off_vec_list.read(i)\n\n diag = tf.multiply(x, diag_vec)\n off = tf.multiply(x, off_vec)\n\n def even_input(off, size):\n\n def even_s(off, size):\n off = tf.reshape(off, [-1, size//2, 2])\n off = tf.reshape(tf.reverse(off, [2]), [-1, size])\n return off\n\n def odd_s(off, size):\n off, helper = tf.split(off, [size-1, 1], 1)\n size -= 1\n off = even_s(off, size)\n off = tf.concat([off, helper], 1)\n return off\n\n off = tf.cond(tf.equal(tf.mod(size, 2), 0), lambda: even_s(off, size), lambda: odd_s(off, size))\n return off\n\n def odd_input(off, size):\n helper, off = tf.split(off, [1, size-1], 1)\n size -= 1\n off = even_input(off, size)\n off = tf.concat([helper, off], 1)\n return off\n\n size = int(off.get_shape()[1])\n off = tf.cond(tf.equal(tf.mod(i, 2), 0), lambda: even_input(off, size), lambda: odd_input(off, size))\n\n layer_output = diag + off\n i += 1\n\n return layer_output, i\n\n def layer_fft(state, i):\n\n diag_vec = diag_vec_list.read(i)\n off_vec = off_vec_list.read(i)\n diag = tf.multiply(state, diag_vec)\n off = tf.multiply(state, off_vec)\n\n hidden_size = int(off.get_shape()[1])\n # size = 2**i\n dist = capacity - i\n normal_size = (hidden_size // (2**dist)) * (2**(dist-1))\n normal_size *= 2\n extra_size = tf.maximum(0, (hidden_size % (2**dist)) - (2**(dist-1)))\n hidden_size -= normal_size\n\n def modify(off_normal, dist, normal_size):\n off_normal = tf.reshape(tf.reverse(tf.reshape(off_normal, [-1, normal_size//(2**dist), 2, (2**(dist-1))]), [2]), [-1, normal_size])\n return off_normal\n\n def do_nothing(off_normal):\n return off_normal\n\n off_normal, off_extra = tf.split(off, [normal_size, hidden_size], 1)\n off_normal = tf.cond(tf.equal(normal_size, 0), lambda: do_nothing(off_normal), lambda: modify(off_normal, dist, normal_size))\n helper1, helper2 = tf.split(off_extra, [hidden_size-extra_size, extra_size], 1)\n off_extra = tf.concat([helper2, helper1], 1)\n off = tf.concat([off_normal, off_extra], 1)\n\n layer_output = diag + off\n i += 1\n\n return layer_output, i\n\n if fft:\n layer_function = layer_fft\n else:\n layer_function = layer_tunable\n output, _ = tf.while_loop(lambda state, i: tf.less(i, capacity), layer_function, [state, i])\n\n if not diag is None:\n output = tf.multiply(output, diag)\n\n\n return output", "def main():\n # lr_decay = 0.5\n # decay_every = 100\n args = get_arguments()\n \n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n \n tf.set_random_seed(args.random_seed)\n \n coord = tf.train.Coordinator()\n \n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_list,\n input_size,\n args.random_scale,\n args.random_mirror,\n args.ignore_label,\n IMG_MEAN,\n coord)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n # config.gpu_options.allow_growth = True\n # config.allow_soft_placement = True\n # config.intra_op_parallelism_threads = 1\n sess = tf.Session(config = config)\n net = unext(image_batch, is_train = True, reuse = False, n_out = NUM_CLASSES)\n \n # Predictions: ignoring all predictions with labels greater or equal than n_classes\n raw_output = net.outputs\n raw_prediction = tf.reshape(raw_output, [-1, args.num_classes])\n label_proc = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes, one_hot=False) # [batch_size, h, w]\n raw_gt = tf.reshape(label_proc, [-1,])\n indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, args.num_classes - 1)), 1)\n gt = tf.cast(tf.gather(raw_gt, indices), dtype = tf.int32)\n prediction = tf.gather(raw_prediction, indices)\n \n main_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = prediction, labels = gt)\n\n t_vars = tf.trainable_variables()\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in t_vars if 'kernel' in v.name]\n #reduced_loss = 0.5 * tf.reduce_mean(main_loss) + generalised_dice_loss(prediction, gt) + tf.add_n(l2_losses)\n reduced_loss = tf.reduce_mean(main_loss) + tf.add_n(l2_losses)\n\n # Processed predictions: for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, dimension = 3)\n pred = tf.expand_dims(raw_output_up, dim = 3)\n \n # Image summary.\n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n loss_summary = tf.summary.scalar('TotalLoss', reduced_loss)\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n\n # Using Poly learning rate policy \n base_lr = tf.constant(args.learning_rate)\n step_ph = tf.placeholder(dtype=tf.float32, shape=())\n learning_rate = tf.train.exponential_decay(base_lr, step_ph, args.num_steps, args.power)\n\n lr_summary = tf.summary.scalar('LearningRate', learning_rate)\n #train_op = tf.train.MomentumOptimizer(learning_rate, args.momentum).minimize(reduced_loss, var_list = t_vars)\n train_op = tf.train.AdamOptimizer(learning_rate).minimize(reduced_loss, var_list = t_vars)\n init = tf.global_variables_initializer()\n sess.run(init)\n \n # Saver for storing checkpoints of the model.\n saver = tf.train.Saver(var_list = tf.global_variables(), max_to_keep = 10)\n\n ckpt = tf.train.get_checkpoint_state(SNAPSHOT_DIR)\n if ckpt and ckpt.model_checkpoint_path:\n #restore_vars = list([t for t in tf.global_variables() if not 'uconv1' in t.name])\n loader = tf.train.Saver(var_list = tf.global_variables())\n load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])\n load(loader, sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found.')\n load_step = 0\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord = coord, sess = sess)\n\n # Iterate over training steps.\n save_summary_every = 10\n for step in range(args.num_steps):\n start_time = time.time()\n \n feed_dict = {step_ph: step}\n if not step % args.save_pred_every == 0:\n loss_value, _, l_summary, lr_summ = sess.run([reduced_loss, train_op, loss_summary, lr_summary], feed_dict=feed_dict)\n duration = time.time() - start_time\n elif step % args.save_pred_every == 0:\n loss_value, _, summary, l_summary, lr_summ = sess.run([reduced_loss, train_op, total_summary, loss_summary, lr_summary], feed_dict=feed_dict)\n duration = time.time() - start_time\n save(saver, sess, args.snapshot_dir, step)\n summary_writer.add_summary(summary, step)\n\n if step % save_summary_every == 0:\n \n summary_writer.add_summary(l_summary, step)\n summary_writer.add_summary(lr_summ, step)\n \n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n \n coord.request_stop()\n coord.join(threads)", "def deepnn(self, x, ind):\n '''\n print(x.shape)\n indices = ind\n self.ind = ind\n depth = 18\n oh = tf.one_hot(indices, depth)\n self.oh = oh\n\n s = tf.matmul(oh, self.scale)\n self.s = s\n \n '''\n \n \n #h_layer1 = tf.layers.dense(inputs=x, units=1, name='h0', activation=tf.nn.relu)\n #h_layer0 = tf.layers.dense(inputs=x, units=100, name='h0', activation=tf.nn.relu)\n #h_layer0_drop = tf.nn.dropout(h_layer0, self.keep_prob, name='h0_drop')\n #h_layer1 = tf.layers.dense(inputs = h_layer0, units = 1, name = 'h1', activation = tf.nn.relu)\n \n stop1 = int(self._xdims/3)\n x1 = x[:,:stop1]\n x2 = x[:,stop1:2*stop1]\n x3 = x[:,2*stop1:3*stop1]\n #x4 = x[:,3*stop1:4*stop1]\n #x5 = x[:,4*stop1:]\n d1 = tf.layers.dense(inputs=x1, units=100, name='h0_1', activation=tf.nn.relu)\n d1_layer0_drop = tf.nn.dropout(d1, self.keep_prob, name='d1_drop')\n d2 = tf.layers.dense(inputs=x2, units=100, name='h0_2', activation=tf.nn.relu)\n d2_layer0_drop = tf.nn.dropout(d2, self.keep_prob, name='d2_drop')\n d3 = tf.layers.dense(inputs=x3, units=100, name='h0_3', activation=tf.nn.relu)\n d3_layer0_drop = tf.nn.dropout(d3, self.keep_prob, name='d3_drop')\n #d4 = tf.layers.dense(inputs=x2, units=100, name='h0_4', activation=tf.nn.relu)\n #d4_layer0_drop = tf.nn.dropout(d4, self.keep_prob, name='d2_drop')\n #d5 = tf.layers.dense(inputs=x3, units=100, name='h0_5', activation=tf.nn.relu)\n #d5_layer0_drop = tf.nn.dropout(d5, self.keep_prob, name='d3_drop')\n \n layer_2 = tf.concat([d1,d2,d3],1)#,d4,d5], 1)\n h_layer1 = tf.layers.dense(inputs=layer_2, units=1, name='output', activation=tf.nn.relu)\n \n '''\n out = tf.multiply(s,h_layer1)\n self.out = out\n self.h0 = h_layer1\n return out\n '''\n return h_layer1", "def deconv_decoder(latent_tensor, output_shape, is_training=True):\n del is_training\n d1 = tf.layers.dense(latent_tensor, 256, activation=tf.nn.relu)\n d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu)\n d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64])\n d3 = tf.layers.conv2d_transpose(\n inputs=d2_reshaped,\n filters=64,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d4 = tf.layers.conv2d_transpose(\n inputs=d3,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d5 = tf.layers.conv2d_transpose(\n inputs=d4,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n d6 = tf.layers.conv2d_transpose(\n inputs=d5,\n filters=output_shape[2],\n kernel_size=4,\n strides=2,\n padding=\"same\",\n )\n return tf.reshape(d6, [-1] + output_shape)", "def _GetHostTrainLoop(\n self, strategy: tf.distribute.TPUStrategy\n ) -> Callable[..., Any]:\n replicas_per_host = strategy.extended.num_replicas_per_host\n\n def Split(batch, replicas_per_host, axis=0):\n \"\"\"Splits a NestedMap into replicas_per_host pieces.\"\"\"\n def _SplitFn(t):\n return tf.sparse.split if isinstance(t, tf.SparseTensor) else tf.split\n\n split = batch.Transform(lambda t: _SplitFn(t)(t, replicas_per_host, axis))\n return [\n nest.map_structure_up_to(batch, lambda t: t[i], split) # pylint: disable=cell-var-from-loop\n for i in range(replicas_per_host)\n ]\n\n def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n \"\"\"Fetch and shard one batch per attached device.\"\"\"\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )\n\n def _Step(batch: py_utils.NestedMap):\n \"\"\"A single forward/backward step.\n\n Processes the given input batch and updates the distributed metrics\n accumulator. We use FProp (instead of FPropDefaultTheta) and\n _BPropForVariables (instead of BProp) in order to permit the tf.distribute\n library to handle threading values across devices.\n\n Args:\n batch: NestedMap of input batch data.\n \"\"\"\n with tf.name_scope('tpu_train'):\n with py_utils.GradientTape(persistent=True):\n batch.Update(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Dequeue(batch)\n )\n metrics_dict, _ = self.task.FPropDefaultTheta(batch)\n # py_utils.ComputeGradientsSimple() needs to access the tape, so BProp\n # needs to be within the GradientTape context.\n self.task.BProp()\n\n self._metrics_dict_structure = metrics_dict\n self._metrics_mgr.AccumulateStepMetrics(metrics_dict)\n\n @tf.function\n def _TpuFunction():\n \"\"\"Runs several training steps and returns a flattened metrics list.\"\"\"\n self._metrics_mgr.ResetState()\n\n for _ in tf.range(self._steps_per_loop):\n batch = _GetShardedBatch()\n # Note: running the enqueue in strategy.run() could potentially cause\n # deadlock and cause the job to hang. Here we run it outside.\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.Enqueue(batch)\n strategy.run(_Step, args=(batch,))\n\n return self._metrics_mgr.FinalizeMetricsWithStructure(\n self._metrics_dict_structure\n )\n\n # Trace the train function so it can create the optimizer slot vars and save\n # them at step 0.\n return _TpuFunction.get_concrete_function()", "def test_decoder(latent_tensor, output_shape, is_training=False):\n del is_training\n output = tf.layers.dense(latent_tensor, np.prod(output_shape), name=\"d1\")\n return tf.reshape(output, shape=[-1] + output_shape)", "def TEM_Train(X_feature,Y_action,Y_startend,LR,istrain,config): \n net=tf.layers.conv1d(inputs=X_feature,filters=512,kernel_size=3,strides=1,padding='same')\n # net=tf.layers.batch_normalization(net,training=istrain)\n net=tf.nn.relu(net)\n net=tf.layers.conv1d(inputs=net,filters=512,kernel_size=3,strides=1,padding='same')\n # net=tf.layers.batch_normalization(net,training=istrain)\n net=tf.nn.relu(net)\n net=0.1*tf.layers.conv1d(inputs=net,filters=2,kernel_size=1,strides=1,padding='same')\n net=tf.nn.sigmoid(net)\n\n anchors_action = net[:,:,0]\n # print(\"anchors_action: \", anchors_action)\n anchors_startend = net[:,:,1]\n \n loss=TEM_loss(anchors_action,anchors_startend,Y_action,Y_startend,config)\n\n TEM_trainable_variables=tf.trainable_variables()\n l2 = 0.001 * sum(tf.nn.l2_loss(tf_var) for tf_var in TEM_trainable_variables)\n cost = loss[\"loss_action\"]+loss[\"loss_startend\"]+l2\n loss['l2'] = l2\n loss['cost'] = cost\n # optimizer=tf.train.AdamOptimizer(learning_rate=LR).minimize(cost,var_list=TEM_trainable_variables)\n opt = tf.train.AdamOptimizer(learning_rate=LR)\n grads = opt.compute_gradients(cost, var_list=TEM_trainable_variables)\n gs = []\n for i, (g, v) in enumerate(grads):\n \tif g is not None:\n \t\tgrads[i] = (tf.clip_by_norm(g, 15), v)\n \t\tgs.append(g)\n optimizer = opt.apply_gradients(grads)\n return optimizer,loss,TEM_trainable_variables", "def backward_D(self):\n base_function._unfreeze(self.net_D)\n #print(self.input_P2.shape, self.img_gen.shape)\n self.loss_dis_img_gen = self.backward_D_basic(self.net_D, self.input_P2, self.img_gen)", "def diff_effector2(state, th0, alpha, beta, beta_p, p, d):\n dt_state = np.zeros_like(state)\n #print(len(state))\n if alpha == 1:\n for j in range(len(state)):\n if j == 0:\n dt_state[j] = p*beta*th0+2*beta_p*state[-1]-(beta_p+d[\"d_eff\"])*state[j]\n else:\n dt_state[j] = beta_p*state[j-1]- (beta_p+d[\"d_eff\"])*state[j] \n \n else: \n for j in range(len(state)):\n if j == 0:\n dt_state[j] = p*beta*th0 - (beta+d[\"d_prec\"])*state[j] \n elif j < (alpha-1):\n dt_state[j] = beta*state[j-1]-(beta+d[\"d_prec\"])*state[j] \n elif j == (alpha-1):\n # the problem with the 4 and 2 is that since differentiation takes 1 day it should divide twice giving 4 cells\n # however, if it has arrived in the final states if should double every half day\n dt_state[j] = beta*state[j-1]+2*beta_p*state[-1] - (d[\"d_eff\"]+beta_p)*state[j] \n\n else:\n assert j >= alpha\n dt_state[j] = beta_p*state[j-1]- (beta_p+d[\"d_eff\"])*state[j] \n \n return dt_state", "def process_parameters(task, addr_space, model, export_path, alpha):\n\n global recovered_c_structs\n global recovered_python_objects\n global false_positives\n global hyperparameters\n\n all_layers = []\n shape = OrderedDict()\n name_to_weights = {}\n tot_num_elements = 0\n tensor_offsets = {}\n\n all_layers = bfs(model)\n \n for path, layer in all_layers:\n layer_dict = layer.in_dict.dereference().val\n layer_name = layer.ob_type.dereference().name\n recovered_python_objects += 1\n\n print\n print path, layer.ob_type.dereference().name\n\n if \"Dropout\" in layer_name:\n shape[path] = layer_dict['p'] # dropout rate\n recovered_python_objects += 1\n hyperparameters += 1\n print \"Dropout Rate:\", shape[path]\n\n elif \"ReLU\" in layer_name:\n shape[path] = None\n\n elif layer_dict['_parameters'].ma_used == 0 and layer_dict['_buffers'].ma_used == 0:\n shape[path] = None\n print \"No Weights\"\n continue\n \n if layer_dict['_parameters'].ma_used > 0:\n tensor_dict = layer_dict['_parameters'].val\n for key in tensor_dict:\n if tensor_dict[key] == None:\n continue\n tensor = tensor_dict[key].tensor.dereference()\n uid = path + \".\" + key\n print \"Path:\", uid\n print \"Num Elements:\", tensor.num_elements\n print \"Shape:\", tensor.shape\n recovered_python_objects += 1\n recovered_c_structs += 2\n shape[uid] = tensor.shape\n final_addr = tensor.storage.buf\n name_to_weights[uid] = extract_data(addr_space, tensor.num_elements, final_addr)\n tensor_offsets[uid] = int(tensor.obj_offset)\n tot_num_elements += tensor.num_elements\n\n if layer_dict['_buffers'].ma_used > 0:\n tensor_dict = layer_dict['_buffers'].val\n for key in tensor_dict:\n if tensor_dict[key] == None:\n continue\n tensor = tensor_dict[key].tensor.dereference()\n uid = path + \".\" + key\n print \"Path:\", uid\n print \"Num Elements:\", tensor.num_elements\n print \"Shape:\", tensor.shape\n recovered_python_objects += 1\n recovered_c_structs += 2\n shape[uid] = tensor.shape\n final_addr = tensor.storage.dereference().buf\n if key != \"num_batches_tracked\":\n name_to_weights[uid] = extract_data(addr_space, tensor.num_elements, final_addr)\n else:\n found_object = obj.Object(\"int\",\n offset=final_addr,\n vm=addr_space)\n name_to_weights[uid] = [int(found_object)]\n print name_to_weights[uid]\n tensor_offsets[uid] = int(tensor.obj_offset)\n tot_num_elements += tensor.num_elements\n\n export_weights(task, name_to_weights, tot_num_elements, export_path, alpha, str(task.pid))\n export_offsets(task, tensor_offsets, export_path, alpha)\n\n print \"\\nMODEL SUMMARY\"\n for key in shape:\n print key\n print shape[key]\n print\n\n print \"\\nEVAL TABLE SUMMARY\"\n print \"Layers:\", len(all_layers)\n print \"Tensors:\", len(name_to_weights)\n print \"Weights:\", tot_num_elements\n print \"Hyper Parameters:\", hyperparameters\n print \"Precision:\", len(name_to_weights), \"/\", len(name_to_weights) + false_positives, \"=\", float(len(name_to_weights)) / float(len(name_to_weights) + false_positives)\n print \"Python Objects:\", recovered_python_objects\n print \"C Structs:\", recovered_c_structs", "def _partition_D(model):\n\n D1_indices = [] # A list of the indices for the unknown nodal displacements\n D2_indices = [] # A list of the indices for the known nodal displacements\n D2 = [] # A list of the values of the known nodal displacements (D != None)\n\n # Create the auxiliary table\n for node in model.Nodes.values():\n \n # Unknown displacement DX\n if node.support_DX==False and node.EnforcedDX == None:\n D1_indices.append(node.ID*6 + 0)\n # Known displacement DX\n elif node.EnforcedDX != None:\n D2_indices.append(node.ID*6 + 0)\n D2.append(node.EnforcedDX)\n # Support at DX\n else:\n D2_indices.append(node.ID*6 + 0)\n D2.append(0.0)\n\n # Unknown displacement DY\n if node.support_DY == False and node.EnforcedDY == None:\n D1_indices.append(node.ID*6 + 1)\n # Known displacement DY\n elif node.EnforcedDY != None:\n D2_indices.append(node.ID*6 + 1)\n D2.append(node.EnforcedDY)\n # Support at DY\n else:\n D2_indices.append(node.ID*6 + 1)\n D2.append(0.0)\n\n # Unknown displacement DZ\n if node.support_DZ == False and node.EnforcedDZ == None:\n D1_indices.append(node.ID*6 + 2)\n # Known displacement DZ\n elif node.EnforcedDZ != None:\n D2_indices.append(node.ID*6 + 2)\n D2.append(node.EnforcedDZ)\n # Support at DZ\n else:\n D2_indices.append(node.ID*6 + 2)\n D2.append(0.0)\n\n # Unknown displacement RX\n if node.support_RX == False and node.EnforcedRX == None:\n D1_indices.append(node.ID*6 + 3)\n # Known displacement RX\n elif node.EnforcedRX != None:\n D2_indices.append(node.ID*6 + 3)\n D2.append(node.EnforcedRX)\n # Support at RX\n else:\n D2_indices.append(node.ID*6 + 3)\n D2.append(0.0)\n\n # Unknown displacement RY\n if node.support_RY == False and node.EnforcedRY == None:\n D1_indices.append(node.ID*6 + 4)\n # Known displacement RY\n elif node.EnforcedRY != None:\n D2_indices.append(node.ID*6 + 4)\n D2.append(node.EnforcedRY)\n # Support at RY\n else:\n D2_indices.append(node.ID*6 + 4)\n D2.append(0.0)\n\n # Unknown displacement RZ\n if node.support_RZ == False and node.EnforcedRZ == None:\n D1_indices.append(node.ID*6 + 5)\n # Known displacement RZ\n elif node.EnforcedRZ != None:\n D2_indices.append(node.ID*6 + 5)\n D2.append(node.EnforcedRZ)\n # Support at RZ\n else:\n D2_indices.append(node.ID*6 + 5)\n D2.append(0.0)\n \n # Legacy code on the next line. I will leave it here until the line that follows has been proven over time.\n # D2 = atleast_2d(D2)\n \n # Convert D2 from a list to a matrix\n D2 = array(D2, ndmin=2).T\n\n # Return the indices and the known displacements\n return D1_indices, D2_indices, D2", "def generative_model(self):\n activation = tf.nn.relu\n if self.linear:\n activation = None\n\n # p(x | z, s)\n if self.batch is not None:\n h = tf.concat([self.z, self.batch], 1)\n else:\n h = self.z\n \n #h = dense(h, self.n_hidden,\n # activation=tf.nn.relu, bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n h = dense(h, self.n_hidden,\n activation=activation, bn=True, keep_prob=None, phase=self.training_phase)\n \n for layer in range(2, self.n_layers + 1):\n if self.batch is not None:\n h = tf.concat([h, self.batch], 1)\n h = dense(h, self.n_hidden, activation=activation, \\\n bn=True, keep_prob=self.dropout_rate, phase=self.training_phase)\n\n if self.batch is not None:\n h = tf.concat([h, self.batch], 1) \n \n #mean gamma\n self.px_scale = dense(h, self.n_input, activation=tf.nn.softmax, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n \n #dispersion\n if self.dispersion == \"gene-cell\":\n self.px_r = dense(h, self.n_input, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)\n elif self.dispersion == \"gene\":\n self.px_r = tf.Variable(tf.random_normal([self.n_input]), name=\"r\")\n else:\n if self.dispersion is False:\n self.px_r = tf.ones([self.n_input])\n else:\n if self.batch_ind is None:\n raise ValueError(\"batch dispersion with no batch info\")\n else:\n self.px_r = tf.Variable(tf.random_normal([self.num_batches, self.n_input]), name=\"r\")\n\n \n #mean poisson\n self.px_rate = self.px_scale \n if self.scalings:\n self.px_rate = self.px_scale * tf.exp(self.library)\n\n #dropout\n if self.zi:\n self.px_dropout = dense(h, self.n_input, activation=None, \\\n bn=False, keep_prob=None, phase=self.training_phase)", "def deconvolution_fusion_para(x, y):\n input_memory_type = x.get(\"addr_type\") \\\n if \"addr_type\" in x else 0\n output_memory_type = y.get(\"addr_type\") \\\n if \"addr_type\" in y else 0\n valid_shape = x.get(\"valid_shape\") \\\n if \"valid_shape\" in x else ()\n slice_offset = x.get(\"slice_offset\") \\\n if \"slice_offset\" in x else ()\n output_offset = y.get(\"slice_offset\") \\\n if \"slice_offset\" in y else ()\n l1_fusion_type = x.get(\"L1_fusion_type\") \\\n if \"L1_fusion_type\" in x else -1\n fmap_l1_addr_flag = x.get(\"L1_addr_flag\", False)\n fmap_l1_valid_size = x.get(\"L1_valid_size\", 0)\n\n\n\n l1_fusion_enable_flag = get_L1_info(\"L1_fusion_enabled\")\n\n if input_memory_type not in (0, 1, 2):\n args_dict = {\n \"errCode\": \"E65008\",\n \"input_memory_type_range\": \"(0, 1, 2)\",\n \"input_memory_type\": str(input_memory_type)\n }\n raise RuntimeError(args_dict,\n err_man.get_error_message(args_dict))\n if output_memory_type not in (0, 1, 2):\n args_dict = {\n \"errCode\": \"E65009\",\n \"output_memory_type_range\": \"(0, 1, 2)\",\n \"output_memory_type\": str(output_memory_type)\n }\n raise RuntimeError(args_dict,\n err_man.get_error_message(args_dict))\n if valid_shape and not slice_offset:\n reason = \"valid shape exists, slice shape cannot be []\"\n args_dict = {\n \"errCode\": \"E60108\",\n \"reason\": reason\n }\n raise RuntimeError(args_dict,\n err_man.get_error_message(args_dict))\n if valid_shape and not output_offset:\n reason = \"valid shape exists, output offset cannot be []\"\n args_dict = {\n \"errCode\": \"E60108\",\n \"reason\": reason\n }\n raise RuntimeError(args_dict,\n err_man.get_error_message(args_dict))\n\n valid_shape = shape_to_list(valid_shape)\n slice_offset = shape_to_list(slice_offset)\n output_offset = shape_to_list(output_offset)\n\n if not l1_fusion_enable_flag:\n input_memory_type = 0\n output_memory_type = 0\n valid_shape = []\n slice_offset = []\n output_offset = []\n l1_fusion_type = -1\n fmap_l1_addr_flag = False\n fmap_l1_valid_size = 0\n\n fusion_para = {\"input_memory_type\": input_memory_type,\n \"output_memory_type\": output_memory_type,\n \"valid_shape\": valid_shape,\n \"slice_offset\": slice_offset,\n \"output_offset\": output_offset,\n \"l1_fusion_type\": l1_fusion_type,\n \"fmap_l1_addr_flag\": fmap_l1_addr_flag,\n \"fmap_l1_valid_size\": fmap_l1_valid_size}\n\n return fusion_para", "def genfb(h, n, u, v, f, dt, dx, dy, du,dv,dn, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # generalized forward backward feedback timestep\n \n beta = np.float32(beta)\n eps = np.float32(eps)\n gamma = np.float32(gamma)\n mu = np.float32(mu)\n \n \n dn_m1,dn_m2,dn_m0 = dn # unpack\n dndt_x(h, n, u, v, dx, dy, dn_m0)\n \n# test_out = dn_m0.copy()\n# dndt(h, n, u, v, dx, dy, test_out)\n \n# test_dif = dn_m0-test_out\n# if np.max(np.abs(test_dif[1:-1,1:-1] )) >1E-5 :\n# test_dif[1:-1,5][np.abs(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"dn diff 2\")\n# print (test_dif[:,5])\n \n #dn_m0[:]=test_out \n\n # must do the following before the u and v !\n n1 = n + ((p32+beta)* dn_m0 - (p5+beta+beta)* dn_m1+ (beta)* dn_m2)*dt\n #del dn_m2\n du_m0,du_m1,du_m2,du_p1 = du # unpack\n dudt_x(h, n1, f, u, v, dx, dy, du_p1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n\n dv_m0,dv_m1,dv_m2,dv_p1 = dv # unpack \n dvdt_x(h, n1, f, u, v, dx, dy, dv_p1, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn,nu=nu,mu=mu)\n \n# test_out = du_p1.copy()\n# dudt(h, n1, f, u, v, dx, dy, test_out)\n \n# test_dif = du_p1-test_out\n# if np.max(np.abs(test_dif[1:-1,5] )) >1E-5 :\n# test_dif[1:-1,5][np.abs(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"du diff\")\n# print (test_dif[:,5])\n \n# #du_p1[:] = test_out\n\n# test_out = dv_p1.copy()\n# dvdt(h, n1, f, u, v, dx, dy, test_out)\n \n# test_dif = dv_p1-test_out\n# if np.max(np.abs(test_dif[1:-1,5] )) >1E-5 :\n# test_dif[1:-1,5][np.max(test_dif[1:-1,5] ) <1E-5]=0.0\n# print (\"dv diff\")\n# print (test_dif[:,5])\n \n #dv_p1[:] = test_out\n \n u1 = u+ ((p5+gamma+eps+eps)*du_p1 +(p5-gamma-gamma-eps-eps-eps)*du_m0 +gamma*du_m1+eps*du_m2)*dt\n # del du_m2\n v1 = v+ ((p5+gamma+eps+eps)*dv_p1 +(p5-gamma-gamma-eps-eps-eps)*dv_m0 +gamma*dv_m1+eps*dv_m2)*dt\n # del dv_m2\n\n\n \n \n dv = [ dv_p1,dv_m0,dv_m1,dv_m2 ]\n du = [ du_p1,du_m0,du_m1,du_m2 ]\n dn = [ dn_m0,dn_m1,dn_m2 ]\n# n[:,:], u[:,:], v[:,:], = n1, u1, v1\n return n1, u1, v1, du,dv,dn", "def __call__(self, x_1d, is_training, reuse=False, nfilt=32):\n with tf.variable_scope(self.name):\n x = tf.reshape(x_1d, [-1, self.input_dim, self.input_dim, self.channels])\n\n e1 = unet_conv(x, nfilt*1, 'e1', reuse, is_training)\n e2 = unet_conv(e1, nfilt*2, 'e2', reuse, is_training)\n e3 = unet_conv(e2, nfilt*4, 'e3', reuse, is_training)\n e4 = unet_conv(e3, nfilt*8, 'e4', reuse, is_training)\n e5 = unet_conv(e4, nfilt*8, 'e5', reuse, is_training)\n e6 = unet_conv(e5, nfilt*8, 'e6', reuse, is_training, s=1)\n e7 = unet_conv(e6, nfilt*8, 'e7', reuse, is_training, s=1)\n e8 = unet_conv(e7, nfilt*8, 'e8', reuse, is_training, s=1)\n\n d1 = unet_conv_t(e8, e7, nfilt*8, 'd1', reuse, is_training, s=1)\n d2 = unet_conv_t(d1, e6, nfilt*8, 'd2', reuse, is_training, s=1)\n d3 = unet_conv_t(d2, e5, nfilt*8, 'd3', reuse, is_training, s=1)\n d4 = unet_conv_t(d3, e4, nfilt*8, 'd4', reuse, is_training)\n d5 = unet_conv_t(d4, e3, nfilt*4, 'd5', reuse, is_training)\n d6 = unet_conv_t(d5, e2, nfilt*2, 'd6', reuse, is_training)\n d7 = unet_conv_t(d6, e1, nfilt*1, 'd7', reuse, is_training)\n out = unet_conv_t(\n d7, None, self.channels, 'out', reuse, is_training,\n activation=tf.nn.tanh, use_batch_norm=False, use_dropout=False)\n\n out_1d = tf.reshape(out, (-1, self.output_dim*self.output_dim*self.channels))\n\n tensors = [\n x, e1, e2, e3, e4, e5, e6, e7, e8, d1, d2, d3, d4, d5, d6, d7, out, out_1d]\n\n for tensor in tensors:\n print(tensor)\n\n return out_1d", "def distill_dataset(tfrecord_folder,\n batch_size,\n beam_size,\n model,\n model_ckpt,\n vocab,\n dataset_type,\n strategy,\n distillation_save_path):\n \n prepare_batch_wmt = lambda b: prepare_batch_for_lm_wmt(action_refinement=1, batch=b)\n prepare_permutation = prepare_permutation_without_pt\n\n # create a distillation pipeline\n dataset = wmt_dataset(tfrecord_folder, batch_size, shuffle=False)\n prepare_batch = prepare_batch_wmt\n \n dataset = strategy.experimental_distribute_dataset(dataset)\n \n def dummy_loss_function(b):\n inputs = prepare_permutation(b, dataset_type, vocab.size())\n inputs_clone = [tf.identity(x) for x in inputs]\n _ = model(inputs_clone)\n loss, _ = model.loss(inputs, training=True)\n\n @tf.function(input_signature=[dataset.element_spec])\n def wrapped_dummy_loss_function(b):\n # distribute the model across many gpus using a strategy\n # do this by wrapping the loss function using data parallelism\n strategy.run(dummy_loss_function, args=(b,))\n \n def decode_function(b):\n # perform beam search using the current model and also\n # get the log probability of sequence\n if dataset_type in ['wmt', 'django']:\n maxit = 150 \n elif dataset_type in ['gigaword']:\n maxit = 40\n inputs = prepare_batch(b)\n cap, logp = beam_search(\n inputs, model, dataset_type, beam_size=beam_size, max_iterations=maxit)\n cap = tf.strings.reduce_join(\n vocab.ids_to_words(cap), axis=2, separator=' ')\n src = tf.strings.reduce_join(\n vocab.ids_to_words(inputs[VALUES]), axis=1, separator=' ')\n return src, cap, logp\n\n @tf.function(input_signature=[dataset.element_spec]) \n def wrapped_decode_function(b):\n # distribute the model across many gpus using a strategy\n # do this by wrapping the loss function\n return strategy.run(decode_function, args=(b,))\n\n # run the model for a single forward pass\n # and load en existing checkpoint into the trained model\n for batch in dataset:\n wrapped_dummy_loss_function(batch)\n break\n \n print(\"----------Done initializing the weights of the model-----------\") \n model.load_weights(model_ckpt)\n print(\"----------Done loading the weights of the model-----------\") \n \n # loop through the entire dataset once (one epoch)\n b_idx = 0\n \n f1 = open(os.path.join(distillation_save_path, \"src_distillation.BPE.txt\"), \"w\")\n f2 = open(os.path.join(distillation_save_path, \"tgt_distillation.BPE.txt\"), \"w\")\n \n # eliminate all elements in the array whose \n # batch dimension is zero\n def eliminate_empty(arr):\n result = []\n for x in arr:\n if x.shape[0] != 0:\n result.append(x)\n return result\n \n def parse_output(s):\n return s.decode(\"utf-8\").replace(\n \"<pad>\", \"\").replace(\"<start>\", \"\").replace(\n \"<end>\", \"\").replace(\" \", \" \").strip()\n \n for batch in dataset:\n print(\"Batch index\", b_idx)\n b_idx += 1\n\n # process the dataset batch dictionary into the standard\n # model input format; perform beam search\n src, cap, log_p = wrapped_decode_function(batch)\n if strategy.num_replicas_in_sync == 1:\n src = src.numpy()\n cap = cap.numpy()\n else:\n # when evaluating on multi gpus, the data might be distributed\n # in a way such that some gpus receive empty inputs, \n # i.e. the batch dimension is zero\n src = tf.concat(eliminate_empty(src.values), axis=0).numpy()\n cap = tf.concat(eliminate_empty(cap.values), axis=0).numpy()\n log_p = tf.concat(eliminate_empty(log_p.values), axis=0)\n\n # format the model predictions into a string\n for i in range(cap.shape[0]):\n if dataset_type in ['wmt', 'django', 'gigaword']:\n model_sentence = parse_output(cap[i, 0])\n print(\"{}: [p = {}] {}\".format(i, \n np.exp(log_p[i, 0].numpy()),\n model_sentence))\n print(parse_output(src[i]), file=f1)\n print(model_sentence, file=f2)\n \n f1.flush()\n f2.flush()\n \n f1.close()\n f2.close()", "def get_unet():\n inputs = Input((img_rows, img_cols, 1))\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)\n\n up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2),\n padding='same')(conv5), conv4], axis=3)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2),\n padding='same')(conv6), conv3], axis=3)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2),\n padding='same')(conv7), conv2], axis=3)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2),\n padding='same')(conv8), conv1], axis=3)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=[inputs], outputs=[conv10])\n\n model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss,\n metrics=[dice_coef])\n\n return model", "def forward_pass(self):\n # Have to use one_hot labels since sparse softmax doesn't allow\n # second derivatives.\n one_hot_train_labels = tf.one_hot(self.data.train_labels, self.way)\n train_embeddings_ = self.embedding_fn(\n self.data.train_images,\n depth_multiplier=self.depth_multiplier,\n reuse=tf.AUTO_REUSE)\n train_embeddings = train_embeddings_['embeddings']\n embedding_vars_dict = train_embeddings_['params']\n\n with tf.variable_scope('linear_classifier', reuse=tf.AUTO_REUSE):\n embedding_depth = train_embeddings.shape.as_list()[-1]\n fc_weights = weight_variable([embedding_depth, MAX_WAY])\n fc_bias = bias_variable([MAX_WAY])\n\n embedding_vars_keys = []\n embedding_vars = []\n embedding_vars_copy_ops = []\n for name, var in embedding_vars_dict.iteritems():\n embedding_vars_keys.append(name)\n if not self.is_training:\n with tf.variable_scope('weight_copy'):\n shape = var.shape.as_list()\n var_copy = tf.Variable(\n tf.zeros(shape), collections=[tf.GraphKeys.LOCAL_VARIABLES])\n var_copy_op = tf.assign(var_copy, var)\n embedding_vars_copy_ops.append(var_copy_op)\n embedding_vars.append(var_copy)\n else:\n embedding_vars.append(var)\n\n fc_vars_copy_ops = []\n if not self.is_training:\n with tf.variable_scope('weight_copy'):\n # fc_weights copy\n fc_weights_copy = tf.Variable(\n tf.zeros(fc_weights.shape.as_list()),\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n fc_weights_copy_op = tf.assign(fc_weights_copy, fc_weights)\n fc_vars_copy_ops.append(fc_weights_copy_op)\n\n # fc_bias copy\n fc_bias_copy = tf.Variable(\n tf.zeros(fc_bias.shape.as_list()),\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n fc_bias_copy_op = tf.assign(fc_bias_copy, fc_bias)\n fc_vars_copy_ops.append(fc_bias_copy_op)\n\n fc_weights = fc_weights_copy\n fc_bias = fc_bias_copy\n\n fc_vars = [fc_weights, fc_bias]\n num_embedding_vars = len(embedding_vars)\n num_fc_vars = len(fc_vars)\n\n def _cond(step, *args):\n del args\n num_steps = self.num_update_steps\n if not self.is_training:\n num_steps += self.additional_test_update_steps\n return step < num_steps\n\n def _body(step, *args):\n \"\"\"The inner update loop body.\"\"\"\n updated_embedding_vars = args[0:num_embedding_vars]\n updated_fc_vars = args[num_embedding_vars:num_embedding_vars +\n num_fc_vars]\n train_embeddings = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['embeddings']\n\n if self.proto_maml_fc_layer_on_support_set:\n # Set fc layer weights with prototypical equivalent values.\n prototypes = self.proto_maml_prototypes(train_embeddings)\n pmaml_fc_weights = self.proto_maml_fc_weights(\n prototypes, zero_pad_to_max_way=True)\n pmaml_fc_bias = self.proto_maml_fc_bias(\n prototypes, zero_pad_to_max_way=True)\n train_logits = tf.matmul(train_embeddings,\n pmaml_fc_weights) + pmaml_fc_bias\n else:\n updated_fc_weights, updated_fc_bias = updated_fc_vars\n train_logits = tf.matmul(train_embeddings,\n updated_fc_weights) + updated_fc_bias\n\n train_logits = train_logits[:, 0:self.way]\n loss = tf.losses.softmax_cross_entropy(one_hot_train_labels, train_logits)\n\n if self.debug_log:\n print_op = tf.print(['step: ', step, updated_fc_bias[0], 'loss:', loss])\n else:\n print_op = tf.no_op()\n\n embedding_grads = tf.gradients(loss, updated_embedding_vars)\n # Only computes fc grad when it's not created from prototypes.\n if not self.proto_maml_fc_layer_on_support_set:\n fc_grads = tf.gradients(loss, updated_fc_vars)\n\n if self.first_order:\n\n def _stop_grads(grads):\n return [tf.stop_gradient(dv) for dv in grads]\n\n embedding_grads = _stop_grads(embedding_grads)\n if not self.proto_maml_fc_layer_on_support_set:\n fc_grads = _stop_grads(fc_grads)\n\n # Apply gradients\n def _apply_grads(variables, grads):\n \"\"\"Applies gradients using SGD on a list of variables.\"\"\"\n v_new = []\n for (v, dv) in zip(variables, grads):\n if (not self.train_batch_norm and\n ('offset' in v.name or 'scale' in v.name)):\n v_new.append(v)\n else:\n v_new.append(v - self.alpha * dv)\n return v_new\n\n with tf.control_dependencies([print_op]):\n updated_embedding_vars = _apply_grads(updated_embedding_vars,\n embedding_grads)\n # Only apply fc grad when it's not created from prototypes.\n if not self.proto_maml_fc_layer_on_support_set:\n updated_fc_vars = _apply_grads(updated_fc_vars, fc_grads)\n step = step + 1\n return tuple([step] + list(updated_embedding_vars) +\n list(updated_fc_vars))\n\n # MAML meta updates using query set examples from an episode.\n if self.zero_fc_layer:\n # To account for variable class sizes, we initialize the output\n # weights to zero. See if truncated normal initialization will help.\n zero_weights_op = tf.assign(fc_weights, tf.zeros_like(fc_weights))\n zero_bias_op = tf.assign(fc_bias, tf.zeros_like(fc_bias))\n fc_vars_init_ops = [zero_weights_op, zero_bias_op]\n else:\n fc_vars_init_ops = fc_vars_copy_ops\n\n if self.proto_maml_fc_layer_init:\n train_embeddings = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['embeddings']\n prototypes = self.proto_maml_prototypes(train_embeddings)\n pmaml_fc_weights = self.proto_maml_fc_weights(\n prototypes, zero_pad_to_max_way=True)\n pmaml_fc_bias = self.proto_maml_fc_bias(\n prototypes, zero_pad_to_max_way=True)\n fc_vars = [pmaml_fc_weights, pmaml_fc_bias]\n\n with tf.control_dependencies(fc_vars_init_ops + embedding_vars_copy_ops):\n # We will first compute gradients using the initial weights\n # Don't want to restore it during eval.\n step = tf.Variable(\n 0,\n trainable=False,\n name='inner_step_counter',\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n loop_vars = [step] + embedding_vars + fc_vars\n step_and_all_updated_vars = tf.while_loop(\n _cond, _body, loop_vars, swap_memory=True)\n step = step_and_all_updated_vars[0]\n all_updated_vars = step_and_all_updated_vars[1:]\n updated_embedding_vars = all_updated_vars[0:num_embedding_vars]\n updated_fc_weights, updated_fc_bias = all_updated_vars[\n num_embedding_vars:num_embedding_vars + num_fc_vars]\n\n # Forward pass the training images with the updated weights in order to\n # compute the means and variances, to use for the query's batch norm.\n support_set_moments = None\n if not self.transductive_batch_norm:\n support_set_moments = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['moments']\n\n test_embeddings = self.embedding_fn(\n self.data.test_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n moments=support_set_moments, # Use support set stats for batch norm.\n depth_multiplier=self.depth_multiplier,\n reuse=True,\n backprop_through_moments=self.backprop_through_moments)['embeddings']\n\n if not self.proto_maml_fc_layer_on_query_set:\n self.test_logits = (tf.matmul(test_embeddings, updated_fc_weights) +\n updated_fc_bias)[:, 0:self.way]\n else:\n train_embeddings = self.embedding_fn(\n self.data.train_images,\n params=collections.OrderedDict(\n zip(embedding_vars_keys, updated_embedding_vars)),\n depth_multiplier=self.depth_multiplier,\n reuse=True)['embeddings']\n prototypes = self.proto_maml_prototypes(train_embeddings)\n pmaml_fc_weights = self.proto_maml_fc_weights(prototypes)\n pmaml_fc_bias = self.proto_maml_fc_bias(prototypes)\n self.test_logits = (\n tf.matmul(test_embeddings, pmaml_fc_weights) + pmaml_fc_bias)", "def get_batch(batch_data, config):\n N = len(batch_data['obs_traj_rel'])\n P = config.P\n OF = config.flow_size\n T_in = config.obs_len\n T_pred = config.pred_len\n\n returned_inputs = []\n traj_obs_gt = np.zeros([N, T_in, P], dtype='float32')\n traj_pred_gt = np.zeros([N, T_pred, P], dtype='float32')\n # --- xy input\n for i, (obs_data, pred_data) in enumerate(zip(batch_data['obs_traj_rel'],\n batch_data['pred_traj_rel'])):\n for j, xy in enumerate(obs_data):\n traj_obs_gt[i, j, :] = xy\n for j, xy in enumerate(pred_data):\n traj_pred_gt[i, j, :] = xy\n returned_inputs.append(traj_obs_gt)\n # ------------------------------------------------------\n # Social component (through optical flow)\n if config.add_social:\n obs_flow = np.zeros((N, T_in, OF),dtype ='float32')\n # each batch\n for i, flow_seq in enumerate(batch_data['obs_optical_flow']):\n for j , flow_step in enumerate(flow_seq):\n obs_flow[i,j,:] = flow_step\n returned_inputs.append(obs_flow)\n # -----------------------------------------------------------\n # Person pose input\n if config.add_kp:\n obs_kp = np.zeros((N, T_in, KP, 2), dtype='float32')\n # each bacth\n for i, obs_kp_rel in enumerate(batch_data['obs_kp_rel']):\n for j, obs_kp_step in enumerate(obs_kp_rel):\n obs_kp[i, j, :, :] = obs_kp_step\n return returned_inputs,traj_pred_gt", "def backward(Data):\n\n # data\n graph = tf.Graph()\n X_train = Data[0]\n Y_train = Data[1]\n X_val = Data[2]\n Y_val = Data[3]\n maxvalue = Data[4]\n\n if X_train.shape[0] != Y_train.shape[0]:\n raise Exception(\"The quantity of Input X and Compare Y_ are not same!\")\n\n Loss = []\n Loss_val = []\n\n with graph.as_default():\n print(\"This is the process of all the Dose!\")\n print(\"There are %d data in training.\" % X_train.shape[0])\n print(\"There are %d data in cross validation.\" % X_val.shape[0])\n print(\"Features of X: %d\" % X_train.shape[1])\n print(\"Learning rate is: %f\" % learning_rate)\n # Init all the parameters\n global_step = tf.Variable(0, trainable=False)\n\n # multi threads\n # queue = tf.FIFOQueue(capacity=64, dtypes=[tf.float32, tf.float32], shapes=[[7], []])\n # enqueue_op = queue.enqueue_many([X, Y_])\n # X\n\n STEPS = int(Epoch * X_train.shape[0] / BATCH_SIZE) + 1\n epoch = 0\n\n with tf.name_scope('inputs'):\n x = tf.placeholder(tf.float32, [None, Forward.INPUT_NODE], name='x_Input')\n y_ = tf.placeholder(tf.float32, [None, Forward.OUTPUT_NODE], name='y_Exact')\n y = Forward.forward(x, REGULARIZER, maxvalue, Is_model_high=False)\n\n # test\n # maxValue = tf.constant(maxValue, dtype=tf.float32)\n # minValue = tf.constant(minValue, dtype=tf.float32)\n # y = y * (maxValue - minValue) + minValue\n\n # lost function\n with tf.name_scope('loss'):\n loss_mse = tf.reduce_mean(tf.square(y - y_))\n loss = loss_mse + tf.add_n(tf.get_collection(\"losses\"))\n tf.summary.scalar('loss', loss)\n\n # Todo\n # LM algorithm\n\n # learning_rate = tf.train.exponential_decay(\n # LEARNING_RATE_BASE,\n # global_step,\n # X.shape[0] / BATCH_SIZE,\n # LEARNING_RATE_DECAY,\n # staircase=True\n # )\n\n with tf.name_scope('train'):\n # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step)\n # train_step = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, global_step)\n train_step = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.99).minimize(loss, global_step)\n\n # EMA algorithm\n ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)\n ema_op = ema.apply(tf.trainable_variables())\n with tf.control_dependencies([train_step, ema_op]):\n train_op = tf.no_op(name='train')\n\n # ready for storing the model\n saver = tf.train.Saver()\n\n with tf.Session() as sess:\n init_op = tf.global_variables_initializer()\n sess.run(init_op)\n\n # Get the check point\n ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH_TEST)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n\n # begin multi threads\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess, coord)\n print(\"Begin the multi threads!\")\n\n # Graph\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter(\"./logs/\", sess.graph)\n\n # Training\n for i in range(STEPS):\n start = (i * BATCH_SIZE) % int(X_train.shape[0])\n end = start + BATCH_SIZE\n # if finish all the data\n if end >= X_train.shape[0]:\n end = X_train.shape[0]\n\n _, loss_value, step = sess.run([train_op, loss, global_step],\n feed_dict={x: X_train[start:end], y_: Y_train[start:end]})\n\n if i % 4000 == 0:\n print(\"Steps are: %d , loss is: %g.\" % (step, loss_value))\n rs = sess.run(merged, feed_dict={x: X_train[start:end], y_: Y_train[start:end]})\n writer.add_summary(rs, i)\n saver.save(sess, os.path.join(MODEL_SAVE_PATH_TEST, MODEL_NAME), global_step)\n\n # a round\n if end == X_train.shape[0]:\n # get the results\n epoch += 1\n\n loss_total = sess.run(loss, feed_dict={x: X_train, y_: Y_train})\n loss_val = sess.run(loss, feed_dict={x: X_val, y_: Y_val})\n\n Loss.append(loss_total)\n Loss_val.append(loss_val)\n print(\"After %d epoch(s), steps: %d, loss total: %g, loss validation: %g.\\n\" % (epoch, step, loss_total, loss_val))\n saver.save(sess, os.path.join(MODEL_SAVE_PATH_TEST, MODEL_NAME), global_step)\n\n # close the multi threads\n coord.request_stop()\n coord.join(threads)\n print(\"Close the multi threads!\")\n\n return Loss", "def call(self, reshaped_input):\n \"\"\"\n In Keras, there are two way to do matrix multiplication (dot product)\n 1) K.dot : AxB -> when A has batchsize and B doesn't, use K.dot\n 2) tf.matmul: AxB -> when A and B both have batchsize, use tf.matmul\n \n Error example: Use tf.matmul when A has batchsize (3 dim) and B doesn't (2 dim)\n ValueError: Shape must be rank 2 but is rank 3 for 'net_vlad_1/MatMul' (op: 'MatMul') with input shapes: [?,21,64], [64,3]\n \n tf.matmul might still work when the dim of A is (?,64), but this is too confusing.\n Just follow the above rules.\n \"\"\"\n \n ''' Computation of N_v in Equation 3 of the paper '''\n activation = K.dot(reshaped_input, self.cluster_weights)\n \n activation += self.cluster_biases\n \n activation = tf.nn.softmax(activation)\n\n activation = tf.reshape(activation,\n [-1, self.max_samples, self.cluster_size])\n\n activation = tf.transpose(activation,perm=[0,2,1])\n \n reshaped_input = tf.reshape(reshaped_input,[-1,\n self.max_samples, self.feature_size])\n\n vlad = tf.matmul(activation,reshaped_input)\n vlad = tf.transpose(vlad,perm=[0,2,1])\n vlad = tf.nn.l2_normalize(vlad,1)\n vlad = tf.reshape(vlad,[-1, self.cluster_size*self.feature_size])\n Nv = tf.nn.l2_normalize(vlad,1)\n \n # Equation 3 in the paper\n # \\hat{y} = W_N N_v\n vlad = K.dot(Nv, self.Wn)\n\n return vlad", "def get_GP_samples(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,\n num_tcn_grid_times, cov_grid, input_dim,method, gp_params, lab_vitals_only, pad_before): ##,med_cov_grid \n\n n_mc_smps, M = gp_params.n_mc_smps, gp_params.M\n grid_max = tf.shape(X)[1]\n Z = tf.zeros([0,grid_max,input_dim])\n \n N = tf.shape(T)[0] #number of observations\n \n #setup tf while loop (have to use this bc loop size is variable)\n def cond(i,Z):\n return i<N\n \n def body(i,Z):\n Yi = tf.reshape(tf.slice(Y,[i,0],[1,num_obs_values[i]]),[-1]) #MM: tf.reshape(x, [-1]) flattens tensor x (e.g. [2,3,1] to [6]), slice cuts out all Y data of one patient\n Ti = tf.reshape(tf.slice(T,[i,0],[1,num_obs_times[i]]),[-1])\n ind_kfi = tf.reshape(tf.slice(ind_kf,[i,0],[1,num_obs_values[i]]),[-1])\n ind_kti = tf.reshape(tf.slice(ind_kt,[i,0],[1,num_obs_values[i]]),[-1])\n Xi = tf.reshape(tf.slice(X,[i,0],[1,num_tcn_grid_times[i]]),[-1])\n X_len = num_tcn_grid_times[i]\n \n GP_draws = draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti,method=method, gp_params=gp_params)\n pad_len = grid_max-X_len #pad by this much\n #padding direction:\n if pad_before:\n print('Padding GP_draws before observed data..')\n padded_GP_draws = tf.concat([tf.zeros((n_mc_smps,pad_len,M)), GP_draws],1) \n else:\n padded_GP_draws = tf.concat([GP_draws,tf.zeros((n_mc_smps,pad_len,M))],1) \n\n if lab_vitals_only:\n Z = tf.concat([Z,padded_GP_draws],0) #without covs\n else: #with covs\n medcovs = tf.slice(cov_grid,[i,0,0],[1,-1,-1])\n tiled_medcovs = tf.tile(medcovs,[n_mc_smps,1,1])\n padded_GPdraws_medcovs = tf.concat([padded_GP_draws,tiled_medcovs],2)\n Z = tf.concat([Z,padded_GPdraws_medcovs],0) #with covs\n \n return i+1,Z \n \n i = tf.constant(0)\n #with tf.control_dependencies([tf.Print(tf.shape(ind_kf), [tf.shape(ind_kf), tf.shape(ind_kt), num_obs_values], 'ind_kf & ind_kt & num_obs_values')]):\n i,Z = tf.while_loop(cond,body,loop_vars=[i,Z],\n shape_invariants=[i.get_shape(),tf.TensorShape([None,None,None])])\n\n return Z", "def depth_rendering_gpu(ref_view, disparity_map, lf_size = (64, 512, 512, 3)):\n lf_one_way = int(math.floor(math.sqrt(lf_size[0])))\n\n x_indices = np.arange(lf_size[1])\n y_indices = np.arange(lf_size[2])\n b_indices = np.arange(lf_size[0])\n\n #Create a grid of size lf_size[:3] consisting of the pixel co ordinates of each image\n _, x, y = np.meshgrid(b_indices, x_indices, y_indices, indexing= 'ij')\n\n # Create a grid of size (lf_size[0], 2) consiting of the row, col lf positions\n grid = np.meshgrid(np.arange(lf_one_way), np.arange(lf_one_way), indexing= 'ij')\n stacked = np.stack(grid, 2)\n positions = stacked.reshape(-1, 2)\n\n # Compute the distance from each lf position from the reference view\n # Repeat the elements of this to match the size of the disparity map\n ref_pos = np.array(\n [lf_one_way // 2, lf_one_way // 2])\n distance = (np.tile(ref_pos, (lf_size[0], 1)) - positions).T\n dis_repeated = np.repeat(distance, lf_size[1] * lf_size[2], axis = 1)\n dis_repeated = dis_repeated.reshape(2, lf_size[0], lf_size[1], lf_size[2])\n\n # Tile the disparity map so that there is one for each lf_position - lf_size[0]\n tiled_map = np.tile(disparity_map, (lf_size[0], 1, 1))\n\n # Compute the shifted pixels\n x_shifted = (x.astype(np.float32) - tiled_map * dis_repeated[0]).flatten()\n y_shifted = (y.astype(np.float32) - tiled_map * dis_repeated[1]).flatten()\n\n #indices for linear interpolation in a square around the central point\n x_low = np.floor(x_shifted).astype(int)\n x_high = x_low + 1\n\n y_low = np.floor(y_shifted).astype(int)\n y_high = y_low + 1\n\n #Place co-ordinates outside the image back into the image\n x_low_clip = np.clip(x_low, 0, ref_view.shape[0] - 1)\n x_high_clip = np.clip(x_high, 0, ref_view.shape[0] - 1)\n y_low_clip = np.clip(y_low, 0, ref_view.shape[1] - 1)\n y_high_clip = np.clip(y_high, 0, ref_view.shape[1] - 1)\n\n #Gather the interpolation points\n interp_pts_1 = np.stack((x_low_clip, y_low_clip))\n interp_pts_2 = np.stack((x_low_clip, y_high_clip))\n interp_pts_3 = np.stack((x_high_clip, y_low_clip))\n interp_pts_4 = np.stack((x_high_clip, y_high_clip))\n\n #Index into the images\n desired_shape = lf_size\n ref_view = torch.tensor(ref_view, dtype=torch.float32).cuda()\n res_1 = torch_tensor_sample(ref_view, interp_pts_1, desired_shape)\n res_2 = torch_tensor_sample(ref_view, interp_pts_2, desired_shape)\n res_3 = torch_tensor_sample(ref_view, interp_pts_3, desired_shape)\n res_4 = torch_tensor_sample(ref_view, interp_pts_4, desired_shape)\n\n #Compute interpolation weights\n x_low_f = x_low.astype(np.float32)\n d_x_low = 1.0 - (x_shifted.astype(np.float32) - x_low_f)\n d_x_high = 1.0 - d_x_low\n y_low_f = y_low.astype(np.float32)\n d_y_low = 1.0 - (y_shifted.astype(np.float32) - y_low_f)\n d_y_high = 1.0 - d_y_low\n\n w1 = torch.from_numpy(d_x_low * d_y_low)\n w2 = torch.from_numpy(d_x_low * d_y_high)\n w3 = torch.from_numpy(d_x_high * d_y_low)\n w4 = torch.from_numpy(d_x_high * d_y_high)\n\n #THEY AGREE AT THIS POINT\n weighted_1 = torch.mul(repeat_weights(w1, desired_shape).cuda(), res_1)\n weighted_2 = torch.mul(repeat_weights(w2, desired_shape).cuda(), res_2)\n weighted_3 = torch.mul(repeat_weights(w3, desired_shape).cuda(), res_3)\n weighted_4 = torch.mul(repeat_weights(w4, desired_shape).cuda(), res_4)\n\n novel_view = torch.add(torch.add(weighted_1, weighted_2), weighted_3)\n torch.add(novel_view, weighted_4, out=novel_view)\n return novel_view", "def BuildTrainGraph(self):\n # Replace this with an actual training op\n self.train_step_ = None\n\n # Replace this with an actual loss function\n self.train_loss_ = None\n\n #### YOUR CODE HERE ####\n # See hints in instructions!\n\n # Define approximate loss function.\n # Note: self.softmax_ns (i.e. k=200) is already defined; use that as the\n # number of samples.\n # Loss computation (sampled, for training)\n #print(self.W_out_.get_shape())\n #print(self.b_out_.get_shape())\n #print(self.outputs_.get_shape())\n #print(tf.reshape(self.outputs_, [-1,self.H]).get_shape())\n #print(tf.reshape(self.outputs_, [self.batch_size_*self.max_time_,self.H]).get_shape())\n #print(self.x_.get_shape())\n #print(tf.reshape(self.x_, [-1, self.W_out_.get_shape()[-1]]).get_shape())\n #print(self.target_y_.get_shape())\n #print(tf.reshape(self.target_y_, [self.batch_size_*self.max_time_,]).get_shape())\n \n #per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=tf.transpose(self.W_out_), biases=self.b_out_, \n #labels=tf.reshape(self.target_y_, \n #[self.batch_size_*self.max_time_,1]),\n #inputs=tf.reshape(self.outputs_, \n #[self.batch_size_*self.max_time_,self.H]), \n #num_sampled=self.softmax_ns, num_classes=self.V, \n #name=\"per_example_sampled_softmax_loss\")\n #partition_strategy=\"div\" ???\n \n #per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=tf.transpose(self.W_out_), biases=self.b_out_, \n #labels=self.target_y_,\n #inputs=tf.reshape(self.outputs_, [-1,self.W_out_.get_shape()[0]]), \n #num_sampled=self.softmax_ns, num_classes=self.V, \n #name=\"per_example_sampled_softmax_loss\")\n #per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=self.W_out_, biases=self.b_out_, \n #labels=self.target_y_,\n #inputs=tf.reshape(self.x_, [-1, self.W_out_.get_shape()[-1]]), \n #num_sampled=self.softmax_ns, num_classes=self.V, \n #name=\"per_example_sampled_softmax_loss\")\n #per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=tf.transpose(self.W_out_), biases=self.b_out_, \n #labels=tf.expand_dims(self.target_y_, 1), inputs=self.x_, \n #num_sampled=self.softmax_ns, num_classes=self.V, \n #name=\"per_example_sampled_softmax_loss\")\n with tf.name_scope(\"training_loss_function\"):\n per_example_train_loss_ = tf.nn.sampled_softmax_loss(weights=tf.transpose(self.W_out_), biases=self.b_out_, \n labels=tf.reshape(self.target_y_, \n [self.batch_size_*self.max_time_,1]),\n inputs=tf.reshape(self.outputs_, \n [self.batch_size_*self.max_time_,self.H]), \n num_sampled=self.softmax_ns, num_classes=self.V, \n name=\"per_example_sampled_softmax_loss\")\n #partition_strategy=\"div\" ???\n self.train_loss_ = tf.reduce_mean(per_example_train_loss_, name=\"sampled_softmax_loss\")\n \n #optimizer_ = tf.train.AdamOptimizer()\n #gradient clipping: tf.clip_by_global_norm\n\n\n\n # Define optimizer and training op\n #tvars = tf.trainable_variables()\n #grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), self.max_grad_norm)\n \n #optimizer_ = tf.train.AdamOptimizer(learning_rate=self.learning_rate_)\n #gradients, v = zip(*optimizer_.compute_gradients(self.train_loss_))\n #gradients, _ = tf.clip_by_global_norm(gradients, self.max_grad_norm_)\n #self.train_step_ = optimizer_.apply_gradients(zip(gradients, v))\n \n #self.train_step_ = optimizer_.apply_gradients(zip(grads, tvars))\n #gradient clipping: tf.clip_by_global_norm, self.max_grad_norm\n #self.train_step_ = optimizer_.minimize(self.train_loss_)\n with tf.name_scope(\"optimizer_and_training_op\"):\n optimizer_ = tf.train.AdamOptimizer(learning_rate=self.learning_rate_)\n gradients, v = zip(*optimizer_.compute_gradients(self.train_loss_))\n gradients, _ = tf.clip_by_global_norm(gradients, self.max_grad_norm_)\n self.train_step_ = optimizer_.apply_gradients(zip(gradients, v))\n\n\n\n #### END(YOUR CODE) ####", "def forward(self, z_t_1, h_x, phi_table, t, temp=0):\n \n# sparsemax.device = z_t_1.device\n \n z_category, z_category_sparse = self.gen_z_t_dist_now(z_t_1, h_x)\n \n# if t > self.t_thres:\n# \n# if self.use_gumbel_softmax:\n# # print(t, 'inference here')\n# # device = z_category.device\n# \n# averaged_z_t = 0\n# \n# log_prob = Variable(torch.log(z_category))\n# \n# for k in range(self.sampling_times): \n# curr_z_t = F.gumbel_softmax(log_prob, tau = 0.05)\n# \n# # curr_z_t = sparsemax(log_prob)\n# \n# \n# averaged_z_t += curr_z_t\n# \n# del curr_z_t\n# \n# # averaged_z_t = averaged_z_t.to(device)\n# \n# z_t = averaged_z_t/self.sampling_times\n# \n# # print('diff::', torch.norm(z_t - z_category))\n# # \n# # print()\n# else:\n# z_t = z_category\n# \n# else:\n z_t = z_category\n \n if len(z_t.shape) == 2:\n phi_z = torch.mm(z_t, torch.t(phi_table))\n else:\n \n phi_table_full = (torch.t(phi_table)).view(1, phi_table.shape[1], phi_table.shape[0])\n \n phi_table_full = phi_table_full.repeat(phi_table.shape[1], 1, 1)\n \n phi_z = torch.bmm(z_t, phi_table_full)\n# mu = self.h_to_mu(h_combined)\n# logvar = self.h_to_logvar(h_combined)\n# std = torch.exp(0.5 * logvar) \n# epsilon = torch.randn(z_t_1.size(), device=z_t_1.device) # sampling z by re-parameterization\n# z_t = epsilon * std + mu # [batch_sz x z_sz]\n return z_t, z_category, phi_z, z_category_sparse", "def __init__(self, sess, max_iter=50001, optim='adagrad', learning_rate=1e-2,\n d_per_iter=1, g_per_iter=2, d_update=True, g_update=True,\n real_n=1000, real_dim=2, fake_n=1000, z_dim=3, g_out_dim=2,\n g_layers_depth=5, g_layers_width=None, g_activations=None,\n d_out_dim=1, d_layers_depth=5, d_layers_width=5,\n d_activations=None, d_batch_size=25, x_lims=None, y_lims=None,\n grid_gran=21, grid_n=None, dataset='gaussian', expt='test_low_alpha'):\n self.sess = sess\n self.max_iter = max_iter\n self.optim = optim\n self.learning_rate = learning_rate\n\n self.d_per_iter = d_per_iter \n self.g_per_iter = g_per_iter\n self.d_update = d_update\n self.g_update = not d_update \n\n self.real_n = real_n \n self.real_dim = real_dim \n self.fake_n = fake_n\n\n self.z_dim = z_dim\n self.g_out_dim = g_out_dim\n self.g_layers_depth = g_layers_depth\n self.g_layers_width = [[5]] * (g_layers_depth - 1) + [[g_out_dim]]\n self.g_activations = [tf.nn.tanh, tf.nn.elu]\n\n self.d_out_dim = d_out_dim\n self.d_layers_depth = d_layers_depth\n self.d_layers_width = d_layers_width\n self.d_activations = [tf.nn.tanh, tf.nn.relu]\n self.d_batch_size = d_batch_size\n\n self.x_lims = [-6., 2.]\n self.y_lims = [-2., 6.]\n self.grid_gran = grid_gran\n self.grid_n = grid_gran ** 2\n self.grid, self.x_grid, self.y_grid = self.make_grid()\n\n self.dataset = dataset \n self.real_points = load_2d_data(dataset, real_n, real_dim)\n\n self.expt = expt\n\n self.build_model()", "def step(self, time, inputs, state, name = None):\n with ops.name_scope(name, 'PGDecoderStep', (time, inputs, state)):\n cell_outputs, cell_state = self._cell(inputs, state)\n # the first cell state contains attention, which is context\n attention = cell_state[0].attention\n att_cell_state = cell_state[0].cell_state\n alignments = cell_state[0].alignments\n\n with tf.variable_scope('calculate_pgen'):\n p_gen = _linear([attention, inputs, att_cell_state], 1, True)\n p_gen = tf.sigmoid(p_gen)\n\n if self._output_layer is not None:\n cell_outputs = self._output_layer(cell_outputs)\n\n vocab_dist = tf.nn.softmax(cell_outputs) * p_gen\n\n # z = tf.reduce_sum(alignments,axis=1)\n # z = tf.reduce_sum(tf.cast(tf.less_equal(alignments, 0),tf.int32))\n alignments = alignments * (1 - p_gen)\n\n # x = tf.reduce_sum(tf.cast(tf.less_equal((1-p_gen), 0),tf.int32))\n # y = tf.reduce_sum(tf.cast(tf.less_equal(alignments[3], 0),tf.int32))\n\n # this is only for debug\n # alignments2 = tf.Print(alignments2,[tf.shape(inputs),x,y,alignments[2][9:12]],message=\"zeros in vocab dist and alignments\")\n\n # since we have OOV words, we need expand the vocab dist\n vocab_size = tf.shape(vocab_dist)[-1]\n extended_vsize = vocab_size + self.source_oov_words\n batch_size = tf.shape(vocab_dist)[0]\n extra_zeros = tf.zeros((batch_size, self.source_oov_words))\n # batch * extend vocab size\n vocab_dists_extended = tf.concat(\n axis = -1, values = [vocab_dist, extra_zeros]\n )\n # vocab_dists_extended = tf.Print(vocab_dists_extended,[tf.shape(vocab_dists_extended),self.source_oov_words],message='vocab_dists_extended size')\n\n batch_nums = tf.range(0, limit = batch_size) # shape (batch_size)\n batch_nums = tf.expand_dims(batch_nums, 1) # shape (batch_size, 1)\n attn_len = tf.shape(self.source_extend_tokens)[\n 1\n ] # number of states we attend over\n batch_nums = tf.tile(\n batch_nums, [1, attn_len]\n ) # shape (batch_size, attn_len)\n indices = tf.stack(\n (batch_nums, self.source_extend_tokens), axis = 2\n ) # shape (batch_size, enc_t, 2)\n shape = [batch_size, extended_vsize]\n attn_dists_projected = tf.scatter_nd(indices, alignments, shape)\n\n final_dists = attn_dists_projected + vocab_dists_extended\n # final_dists = tf.Print(final_dists,[tf.reduce_sum(tf.cast(tf.less_equal(final_dists[0],0),tf.int32))],message='final dist')\n # note: sample_ids will contains OOV words\n sample_ids = self._helper.sample(\n time = time, outputs = final_dists, state = cell_state\n )\n\n (finished, next_inputs, next_state) = self._helper.next_inputs(\n time = time,\n outputs = cell_outputs,\n state = cell_state,\n sample_ids = sample_ids,\n )\n\n outputs = tf.contrib.seq2seq.BasicDecoderOutput(\n final_dists, sample_ids\n )\n return (outputs, next_state, next_inputs, finished)" ]
[ "0.57856417", "0.5665937", "0.55605024", "0.55246353", "0.5397714", "0.5371809", "0.5283407", "0.52216864", "0.51886463", "0.5177959", "0.5160277", "0.5157784", "0.51423675", "0.51132643", "0.51029354", "0.51028657", "0.5083043", "0.5074328", "0.50735605", "0.50595444", "0.5043701", "0.50255084", "0.5020398", "0.50144184", "0.50101197", "0.4998113", "0.4995197", "0.49941882", "0.49680164", "0.4965816" ]
0.58466405
0
Signal for volume > 20 SMA
def signal_volume(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def signal_rsi(self):\n pass", "def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)", "def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5,\n weight=0.5, plot=False):\n\n if weight >= 1:\n weight = 0.99\n if weight <= 0:\n weight = 0.01\n\n # Step 1: feature extraction\n signal = audioBasicIO.stereo_to_mono(signal)\n st_feats, _ = stf.feature_extraction(signal, sampling_rate,\n st_win * sampling_rate,\n st_step * sampling_rate)\n\n # Step 2: train binary svm classifier of low vs high energy frames\n # keep only the energy short-term sequence (2nd feature)\n st_energy = st_feats[1, :]\n en = np.sort(st_energy)\n # number of 10% of the total short-term windows\n st_windows_fraction = int(len(en) / 10)\n\n # compute \"lower\" 10% energy threshold\n low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15\n\n # compute \"higher\" 10% energy threshold\n high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15\n\n # get all features that correspond to low energy\n low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]\n\n # get all features that correspond to high energy\n high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]\n\n # form the binary classification task and ...\n features = [low_energy.T, high_energy.T]\n # normalize and train the respective svm probabilistic model\n\n # (ONSET vs SILENCE)\n features_norm, mean, std = at.normalize_features(features)\n svm = at.train_svm(features_norm, 1.0)\n\n # Step 3: compute onset probability based on the trained svm\n prob_on_set = []\n for index in range(st_feats.shape[1]):\n # for each frame\n cur_fv = (st_feats[:, index] - mean) / std\n # get svm probability (that it belongs to the ONSET class)\n prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])\n prob_on_set = np.array(prob_on_set)\n\n # smooth probability:\n prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)\n\n # Step 4A: detect onset frame indices:\n prog_on_set_sort = np.sort(prob_on_set)\n\n # find probability Threshold as a weighted average\n # of top 10% and lower 10% of the values\n nt = int(prog_on_set_sort.shape[0] / 10)\n threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +\n weight * np.mean(prog_on_set_sort[-nt::]))\n\n max_indices = np.where(prob_on_set > threshold)[0]\n # get the indices of the frames that satisfy the thresholding\n index = 0\n seg_limits = []\n time_clusters = []\n\n # Step 4B: group frame indices to onset segments\n while index < len(max_indices):\n # for each of the detected onset indices\n cur_cluster = [max_indices[index]]\n if index == len(max_indices)-1:\n break\n while max_indices[index+1] - cur_cluster[-1] <= 2:\n cur_cluster.append(max_indices[index+1])\n index += 1\n if index == len(max_indices)-1:\n break\n index += 1\n time_clusters.append(cur_cluster)\n seg_limits.append([cur_cluster[0] * st_step,\n cur_cluster[-1] * st_step])\n\n # Step 5: Post process: remove very small segments:\n min_duration = 0.2\n seg_limits_2 = []\n for s_lim in seg_limits:\n if s_lim[1] - s_lim[0] > min_duration:\n seg_limits_2.append(s_lim)\n seg_limits = seg_limits_2\n\n if plot:\n time_x = np.arange(0, signal.shape[0] / float(sampling_rate), 1.0 /\n sampling_rate)\n\n plt.subplot(2, 1, 1)\n plt.plot(time_x, signal)\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.subplot(2, 1, 2)\n plt.plot(np.arange(0, prob_on_set.shape[0] * st_step, st_step), \n prob_on_set)\n plt.title('Signal')\n for s_lim in seg_limits:\n plt.axvline(x=s_lim[0], color='red')\n plt.axvline(x=s_lim[1], color='red')\n plt.title('svm Probability')\n plt.show()\n\n return seg_limits", "def slotVolume(self, a0):\n self.sampleGroup.action('volume', value=a0)", "def signal_spectral(signal, FS):\n # check inputs\n if signal is None or signal == []:\n print(\"Signal is empty.\")\n\n # ensure numpy\n signal = np.array(signal)\n # f, spectrum = st.welch_spectrum(signal, sampling_rate=FS)\n spectrum = np.fft.fft(signal, FS)[:len(signal)//2]\n f = np.fft.fftfreq(len(signal))[:len(signal)//2]\n\n cum_ff = np.cumsum(spectrum)\n spect_diff = np.diff(spectrum)\n #energy, _ = st.signal_energy(spectrum, f)[:]\n\n args, names = [], []\n\n if dict['spectral_maxpeaks']['use'] == 'yes':\n # spectral_maxpeaks\n try:\n spectral_maxpeaks = np.sum([1 for nd in range(len(spect_diff[:-1])) if (spect_diff[nd+1]<0 and spect_diff[nd]>0)])\n except:\n spectral_maxpeaks = None\n args += [spectral_maxpeaks]\n names += ['spectral_maxpeaks']\n\n # if dict['spect_var']['use'] == 'yes':\n # # spect_variation\n # try:\n # spect_var = np.convolve(energy)\n # spect_var /= np.max(np.abs(spect_var))\n # except:\n # spect_var = None\n # args += [spect_var]\n # names += ['spect_var']\n\n if dict['curve_distance']['use'] == 'yes':\n # curve_distance\n try:\n curve_distance = np.sum(np.linspace(0, cum_ff[-1], len(cum_ff)) - cum_ff)\n except:\n curve_distance = None\n args += [curve_distance]\n names += ['curve_distance']\n\n if dict['spectral_roll_off']['use'] == 'yes':\n # spectral_roll_off\n try:\n spectral_roll_off = spectral_roll(f, spectrum, cum_ff, 0.95)[0]\n except:\n spectral_roll_off = None\n args += [spectral_roll_off]\n names += ['spectral_roll_off']\n\n if dict['spectral_roll_on']['use'] == 'yes':\n # spectral_roll_on\n try:\n spectral_roll_on = spectral_roll(f, spectrum, cum_ff, 0.05)[0]\n except:\n spectral_roll_on = None\n args += [spectral_roll_on]\n names += ['spectral_roll_on']\n\n if dict['spectral_dec']['use'] == 'yes':\n # spectral_decrease\n try:\n spectral_dec = (1/np.sum(spectrum)) * np.sum((spectrum[:] - spectrum[1])/np.linspace(1, len(spectrum), len(spectrum),1))\n except:\n spectral_dec = None\n args += [spectral_dec]\n names += ['spectral_dec']\n\n if dict['spectral_slope']['use'] == 'yes':\n # spectral_slope\n sum_f = np.sum(f)\n len_f = len(f)\n try:\n spectral_slope = (len_f * np.dot(f, spectrum) - sum_f * np.sum(spectrum)) / (len_f * np.dot(f, f) - sum_f ** 2)\n except:\n spectral_slope = None\n args += [spectral_slope]\n names += ['spectral_slope']\n\n sum_spectrum = np.sum(spectrum)\n norm_spectrum = spectrum / sum_spectrum\n # spectral_centroid\n try:\n spectral_centroid = np.dot(f, norm_spectrum)\n except:\n spectral_centroid = None\n\n # spectral_spread\n try:\n spectral_spread = np.dot(((f - spectral_centroid) ** 2), norm_spectrum)\n except:\n spectral_spread = None\n\n if dict['spectral_spread']['use'] == 'yes':\n args += [spectral_spread]\n names += ['spectral_spread']\n\n if dict['spectral_kurtosis']['use'] == 'yes':\n # spectral_kurtosis\n try:\n spectral_kurtosis = np.sum(((f - spectral_centroid) ** 4) * norm_spectrum) / (spectral_spread**2)\n except:\n spectral_kurtosis = None\n args += [spectral_kurtosis]\n names += ['spectral_kurtosis']\n\n if dict['spectral_skewness']['use'] == 'yes':\n # spectral_skewness\n try:\n spectral_skewness = np.sum(((f - spectral_centroid) ** 3) * norm_spectrum) / (spectral_spread ** (3 / 2))\n except:\n spectral_skewness = None\n args += [spectral_skewness]\n names += ['spectral_skewness']\n\n if dict['max_frequency']['use'] == 'yes':\n # max_frequency\n try:\n max_frequency = f[np.where(cum_ff > cum_ff[-1]*0.95)[0][0]]\n except:\n max_frequency = None\n args += [max_frequency]\n names += ['max_frequency']\n\n if dict['fundamental_frequency']['use'] == 'yes':\n # fundamental_frequency\n try:\n fundamental_frequency = f[np.where(cum_ff > cum_ff[-1]*0.5)[0][0]]\n except:\n fundamental_frequency = None\n args += [fundamental_frequency]\n names += ['fundamental_frequency']\n\n # if dict['max_power_spectrum']['use'] == 'yes':\n # # max_power_spectrum\n # try:\n # max_power_spectrum = np.max(spectrum)\n # except:\n # max_power_spectrum = None\n # args += max_power_spectrum\n # names += 'max_power_spectrum'\n\n # if dict['mean_power_spectrum']['use'] == 'yes':\n # # mean_power_spectrum\n # try:\n # mean_power_spectrum = np.mean(spectrum)\n # except:\n # mean_power_spectrum = None\n # args += mean_power_spectrum\n # names += 'mean_power_spectrum'\n #\n # if dict['spectral_skewness']['use'] == 'yes':\n # try:\n # spectral_skewness = np.mean(spectrum)\n # except:\n # spectral_skewness = None\n # args += spectral_skewness\n # names += 'spectral_skewness'\n #\n # if dict['spectral_kurtosis']['use'] == 'yes':\n # try:\n # spectral_kurtosis = np.mean(spectrum)\n # except:\n # spectral_kurtosis = None\n # args += spectral_kurtosis\n # names += 'spectral_kurtosis'\n\n # if dict['spectral_hist_']['use'] == 'yes':\n # # histogram\n # try:\n # _hist = list(np.histogram(spectrum, bins=int(np.sqrt(len(spectrum))), density=True)[0])\n # except:\n # if len(signal) > 1:\n # _hist = [None] * int(np.sqrt(len(signal)))\n # else:\n # _hist = [None]\n # args += [i for i in _hist]\n # names += ['spectral_hist_' + str(i) for i in range(len(_hist))]\n\n #return utils.ReturnTuple(tuple(args), tuple(names))\n return args, names", "def adc(self, signal):", "def toa_incoming_shortwave_flux(srad0, srad0u):\n return srad0 - srad0u", "def SimpleMovingAverage(self, timeperiod = 14): \r\n return ta.SMA(self.data.close,timeperiod)", "def sma(self) -> float:\n return self._sma", "def f_volume(self,event):\n \n def delta(event):\n if event.num == 5 or event.delta < 0: #indique un sens de rotation de l'event \"tourner molette\"\n return -1 \n return 1 \n self.count += delta(event)\n if self.count < 0: #sinon on descend dans les negatifs si on tourne trop la molette\n self.count =0 #--> pas tres utile ni pratique pour remonter\n \n elif self.count > 100: #de meme si on a trop tourner dans l'autre sens.\n self.count =100 \n \n self.control_volume.set(self.count) #On attribut la nouvelle valeur à \"control_volume\"", "def spike_amplitude(V, t_spike):\n # handle no spike found\n if t_spike is None:\n return None\n Vmax = V[t_spike]\n Vmin = np.min(V[t_spike+1:t_spike+500])\n\n return Vmax - Vmin", "def add_signal_to_noise(self):\n\n # noise\n noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n noise.data.data = self.td_noise.data\n\n # signal\n signal = lal.CreateREAL8TimeSeries('blah',\n self.ext_params.geocent_peak_time, 0, self.td_signal.delta_t,\n lal.StrainUnit, int(self.td_signal.duration /\n self.td_signal.delta_t))\n signal.data.data = self.td_signal.data\n\n win = lal.CreateTukeyREAL8Window(len(signal.data.data),0.1)\n win.data.data[len(signal.data.data):] = 1.0\n #signal.data.data *= win.data.data\n\n # --- Scale to a target snr\n print '---'\n if self.target_snr is not None:\n\n tmp_sig = pycbc.types.TimeSeries(signal.data.data,\n delta_t=self.td_signal.delta_t)\n\n current_snr = pycbc.filter.sigma(tmp_sig, psd=self.psd,\n low_frequency_cutoff=self.f_low,\n high_frequency_cutoff=0.5/self.delta_t)\n\n signal.data.data *= self.target_snr / current_snr\n # ----\n\n # sum\n noise_plus_signal = lal.AddREAL8TimeSeries(noise, signal)\n\n self.td_response = \\\n pycbc.types.timeseries.TimeSeries(\\\n initial_array=np.copy(noise_plus_signal.data.data),\n delta_t=noise_plus_signal.deltaT,\n epoch=noise_plus_signal.epoch)\n\n # Finally, zero-pad the signal vector to have the same length as the actual data\n # vector\n no_noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n\n no_noise.data.data = np.zeros(\\\n int(self.td_noise.duration / self.td_noise.delta_t))\n\n signal = lal.AddREAL8TimeSeries(no_noise, signal)\n\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=np.copy(signal.data.data),\n delta_t=signal.deltaT, epoch=noise_plus_signal.epoch)\n\n del noise, signal, noise_plus_signal", "def total_volume(self):", "def rt60_sabine(S, V, a, m, c):\n\n return (24 * np.log(10) / c) * V / (a * S + 4 * m * V)", "def arima_sma(prices, signal, name):\n\n sma_window = signal['params']['sma_window']\n sma_close = talib.SMA(prices['close'], sma_window).to_numpy()[:, None]\n signal['data'] = arima(sma_close, signal['params']['arima_window'], name)", "def analytic(self):\r\n data = self.input.data\r\n sampling_rate = self.input.sampling_rate\r\n\r\n a_signal =\\\r\n ts.TimeSeries(data=np.zeros(self.freqs.shape + data.shape,\r\n dtype='D'), sampling_rate=sampling_rate)\r\n if self.freqs.ndim == 0:\r\n w = self.wavelet(self.freqs, self.sd,\r\n sampling_rate=sampling_rate, ns=5,\r\n normed='area')\r\n\r\n # nd = (w.shape[0] - 1) / 2\r\n a_signal.data[...] = (np.convolve(data, np.real(w), mode='same') +\r\n 1j * np.convolve(data, np.imag(w), mode='same'))\r\n else:\r\n for i, (f, sd) in enumerate(zip(self.freqs, self.sd)):\r\n w = self.wavelet(f, sd, sampling_rate=sampling_rate,\r\n ns=5, normed='area')\r\n\r\n # nd = (w.shape[0] - 1) / 2\r\n a_signal.data[i, ...] = (\r\n np.convolve(data, np.real(w), mode='same') +\r\n 1j * np.convolve(data, np.imag(w), mode='same'))\r\n\r\n return a_signal", "def sma(self, sma: float):\n\n self._sma = sma", "def volume(self):\n vol = self.daily['Volume']\n sma = vol.rolling(20).mean()\n std = vol.rolling(20).std()\n upper = sma + std\n lower = sma - std\n\n if vol[-1] > upper[-1]:\n self.debug += '\\nVolume > 1 STD above sma: buys + 1 and sells + 1'\n self.sells += 1\n self.buys += 1\n else:\n self.debug += '\\nVolume in normal levels'", "def f_sin(k):\n return k * k * k * pk(k, suppression)", "def spectral(w, s=1.0):\n n_in, n_out = w.size()\n n = max(n_out, n_in)\n gain = s / math.sqrt(n)\n return w.normal_(0, 1).mul_(gain)", "def note(freq):\n data = np.sin(2.0 * np.pi * freq * t) * amp\n return data", "def noise(self, freq: int, /) -> None:", "def volume(mid, vols):\n bt = mid.ticks_per_beat\n trk = MidiTrack()\n trk.name = \"Volume variation\"\n trk.append(Message(\"control_change\",\n control=7,\n time=0,\n value=vols[0]))\n\n for i, vol in enumerate(vols):\n trk.append(Message(\"control_change\",\n control=7,\n time=bt,\n value=vol))\n\n mid.tracks.append(trk)\n return mid", "def fluxes(wavelength, s, line, lowlow= 14, lowhigh=6, highlow=6, highhigh = 14, lmin=0, lmax=0, fmin=0, fmax=0, \n broad=2.355, plot=True, verbose=True, plot_sus = False, fcal = True, fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1,\n # s must be an array, no a list\n try: \n index_maximo_del_rango = s.tolist().index(np.nanmax(s))\n #print \" is AN ARRAY\"\n except Exception:\n #print \" s is A LIST -> must be converted into an ARRAY\" \n s = np.array(s)\n \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n if np.isnan(np.nanmedian(f_spec)): \n # The data are NAN!! Nothing to do\n if verbose or warnings: print(\" There is no valid data in the wavelength range [{},{}] !!\".format(lmin,lmax))\n \n resultado = [0, line, 0, 0, 0, 0, 0, 0, 0, 0, 0, s ] \n\n return resultado\n \n else: \n \n ## 20 Sep 2020\n f_spec_m=signal.medfilt(f_spec,median_kernel) # median_kernel = 35 default\n \n \n # Remove nans\n median_value = np.nanmedian(f_spec)\n f_spec = [median_value if np.isnan(x) else x for x in f_spec] \n \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # We have to find some \"guess numbers\" for the Gaussian. Now guess_centre is line\n guess_centre = line\n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n \n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n #print line #f_cont\n # if line == 8465.0:\n # print w_cont\n # print f_cont_filtered\n # plt.plot(w_cont,f_cont_filtered)\n # plt.show()\n # plt.close()\n # warnings=True\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value b = \",bb,\": cont = 0 * w_spec + \", bb)\n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n \n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = a + b*np.array(w_cont) \n \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n \n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line)\n mini = np.nanmin(min_w)\n # guess_peak = f_spec[min_w.tolist().index(mini)] # WE HAVE TO SUSTRACT CONTINUUM!!!\n guess_peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n \n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a \n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n low_limit = ws[sorted_by_flux[0]]\n except Exception:\n plot=True\n low_limit = 0\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n high_limit = ws[sorted_by_flux[0]] \n except Exception:\n plot=True\n high_limit = 0 \n \n # Guess centre will be the highest value in the range defined by [low_limit,high_limit]\n \n try: \n rango = np.where((high_limit >= wavelength ) & (low_limit <= wavelength)) \n index_maximo_del_rango = s.tolist().index(np.nanmax(s[rango]))\n guess_centre = wavelength[index_maximo_del_rango]\n except Exception:\n guess_centre = line #### It was 0 before\n \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre, guess_peak, broad/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(gauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n \n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n \n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n # if low_limit < fit[0] < high_limit:\n if fit[0] < guess_centre - broad or fit[0] > guess_centre + broad:\n # if verbose: print \" Fitted center wavelength\", fit[0],\"is NOT in the range [\",low_limit,\",\",high_limit,\"]\"\n if verbose: print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n # print \"Re-do fitting fixing center wavelength\"\n # p01 = [guess_peak, broad]\n # fit1, pcov1 = curve_fit(gauss_fix_x0, w_spec, f_spec-continuum, p0=p01, maxfev=100000) # If this fails, increase maxfev...\n # fit_error1 = np.sqrt(np.diag(pcov1))\n # fit[0]=guess_centre\n # fit_error[0] = 0.\n # fit[1] = fit1[0]\n # fit_error[1] = fit_error1[0]\n # fit[2] = fit1[1]\n # fit_error[2] = fit_error1[1] \n \n fit[0]=guess_centre\n fit_error[0] = 0.000001\n fit[1]=guess_peak\n fit_error[1] = 0.000001\n fit[2] = broad/2.355\n fit_error[2] = 0.000001 \n else:\n if verbose: print(\" Fitted center wavelength\", fit[0],\"IS in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n \n if verbose: print(\" Fit parameters = \", fit[0], fit[1], fit[2])\n if fit[2] == broad and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelength (cw), peak at (cv) & sigma = broad/2.355 given.\") \n gaussian_fit = gauss(w_spec, fit[0], fit[1], fit[2])\n \n \n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations\n gaussian_flux = gauss_flux(fit[1],fit[2])\n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n index=0\n s_s=np.zeros_like(s)\n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n \n # Plotting \n ptitle = 'Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit)\n if plot :\n plt.figure(figsize=(10, 4))\n # Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.8)\n # Plot median input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec_m), \"orange\", lw=3, alpha = 0.5) # 2021: era \"g\"\n # Plot spectrum - gauss subtracted\n plt.plot(wavelength,s_s,\"g\",lw=3, alpha = 0.6)\n \n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$ ]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.3)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n plt.plot(w_spec, residuals, 'k')\n plt.title(ptitle)\n plt.show()\n \n # Printing results\n if verbose :\n print(\"\\n - Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # Plot independent figure with substraction if requested \n if plot_sus: plot_plot(wavelength,[s,s_s], xmin=lmin, xmax=lmax, ymin=fmin, ymax=fmax, fcal=fcal, frameon=True, ptitle=ptitle)\n \n # 0 1 2 3 4 5 6 7 8 9 10 11\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s ]\n return resultado \n except Exception:\n if verbose: \n print(\" - Gaussian fit failed!\")\n print(\" However, we can compute the integrated flux and the equivalent width:\")\n \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n \n if verbose:\n print(\" Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n \n resultado = [0, guess_centre, 0, 0, 0, 0, 0, flux, flux_error, ew, ew_error, s ] # guess_centre was identified at maximum value in the [low_limit,high_limit] range but Gaussian fit failed\n \n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n # plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n # plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n # plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n \n \n return resultado", "def normalize_signal(signal):\n gain = 1.0 / (np.max(np.abs(signal)) + 1e-9)\n return signal * gain", "def bsm_vega(S0, K, T, r, sigma):\n \n from math import log, sqrt\n from scipy import stats\n \n S0 = float(S0)\n d1 = (log(S0 / K) + (r + 0.5 * sigma ** 2) * T / (sigma * sqrt(T))\n vega = S0 * stats.normcdf(d1, 0.0, 1.0) * sqrt(T)\n return vega\n \n# Implied volatility function\n\ndef bsm_call_imp_vol(S0, K, T, r, C0, sigma_est, it = 100):\n \"\"\" \n Implied volatility of European call option in BSM model\n \n Parameters\n ==========\n S0 : Float\n Initial stock/index level\n K : Float\n Strike Price\n T : Float\n Maturity Date (in year fractions)\n r : Float\n Constant risk-free short rate\n sigma_est : Float\n Estimate of impl. volatility\n it : integer\n Number of iterations\n \n Returns\n =======\n sigma_est : Float\n Numerically estimated implied volatility\n \"\"\"\n for i in range(it):\n sigma_est -= ((bsm_call_value(S0, K, T, r, sigma_est) - C0)\n / bsm_vega(S0, K, T, r, sigma_est))\n return sigma_est", "def filter_audio(audio):\n\n # Calculate voice energy for every 123 ms block\n apower = lr.amplitude_to_db(np.abs(lr.stft(audio, n_fft=2048)), ref=np.max)\n\n # Summarize energy of every rate, normalize\n apsums = np.sum(apower, axis=0) ** 2\n apsums -= np.min(apsums)\n apsums /= np.max(apsums)\n\n # Smooth the graph for saving short spaces and pauses, remove sharpness\n apsums = np.convolve(apsums, np.ones((9,)), 'same')\n # Normalize again\n apsums -= np.min(apsums)\n apsums /= np.max(apsums)\n\n # Set noise limit to 35% over voice\n apsums = np.array(apsums > 0.35, dtype=bool)\n\n # Extend the blocks every on 125ms\n # before separated samples (2048 at block)\n apsums = np.repeat(apsums, np.ceil(len(audio) / len(apsums)))[:len(audio)]\n\n return audio[apsums]", "def increase_volume(current_sound_samples,change_amount):\n for sample in current_sound_samples:\n sample*=change_amount", "def signal(x):\r\n if x >= 0.0:\r\n return 1.0\r\n return -1.0", "def volume_oscillator(period1,period2):\n return period1/period2" ]
[ "0.6216598", "0.61762625", "0.5933593", "0.585632", "0.58003706", "0.57687545", "0.5738506", "0.5722117", "0.5672636", "0.5607543", "0.5602781", "0.559767", "0.5591002", "0.5564034", "0.55251837", "0.5514501", "0.54904765", "0.54731554", "0.5459457", "0.5459255", "0.5444476", "0.5442944", "0.5441151", "0.5432282", "0.54295295", "0.5409217", "0.5391245", "0.53693897", "0.53503245", "0.5346955" ]
0.6485503
0
Signal for RSI > 60
def signal_rsi(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slot_timer(self, _sender, _data):\r\n if self.connected:\r\n if time.time() - self._time_last_received > 60:\r\n self.debug(\"### did not receive anything for a long time, disconnecting.\")\r\n self.force_reconnect()\r\n self.connected = False\r\n if time.time() - self._time_last_subscribed > 1800:\r\n # sometimes after running for a few hours it\r\n # will lose some of the subscriptons for no\r\n # obvious reason. I've seen it losing the trades\r\n # and the lag channel channel already, and maybe\r\n # even others. Simply subscribing again completely\r\n # fixes this condition. For this reason we renew\r\n # all channel subscriptions once every hour.\r\n self.debug(\"### refreshing channel subscriptions\")\r\n self.channel_subscribe(False)", "def subscribe(receiver, updateInterval=10):", "def IR_sensor(self):\n self.serial.reset_input_buffer() # clear buffer\n self.send(b\"kk\\n\")\n # next line depends on read timeout\n result = self.serial.read(1)\n if result == b'':\n print(\"no IR data returned\")\n return 2 # if 2 returned do it again\n else:\n result = int.from_bytes(result, \"big\")\n return result", "def alarm(self, interval, call):", "def signal(self):\n pass", "def subscribe(receiver, updateInterval=None):", "def on_timer(self):\n self.read_serial_data()\n # self.update_monitor()", "def signal_oi(self):\n pass", "def slot_keepalive_timer(self, _sender, _data):\r\n if self.connected:\r\n #self.debug(\"### sending keepalive\")\r\n self._try_send_raw(\"2::\")", "def rtsOn():\n pass", "def signal_volume(self):\n pass", "def juguemos(self):\r\n msje = {\"status\": \"queremos_jugar\",\r\n \"data\": {\"nada\": None}}\r\n sleep(0.1)\r\n self.server_signal_2.emit(msje)", "def on_r_joy_x(self):\r\n self.log()", "def ScanSignal(self, service, antenna, scan_count):\n raise NotImplementedError", "def signal_on (self, intervals = 20):\n while self.bat == False:\n signals = stereo('left_sig.wav', 'right_sig.wav')\n print (f\"{pd.Timestamp.now()} playing signal\")\n self.df_signal.loc[pd.Timestamp.now().strftime('%d-%m-%Y-%H:%M:%S')] = 'play'\n df = pd.concat([self.df_signal, self.df_feeder, self.df_pump], axis=1, sort = True)\n df.to_csv(f\"{pd.Timestamp.now().strftime('%Y-%m-%d')}.csv\")\n # print (df)\n signals.run() # play signals from both feeders\n flag_t = 0\n t_end = time.time()+intervals\n while time.time() < t_end:\n self.read_rfid()\n self.decide()\n if self.bat == True and flag_t == 0:\n self.which_pump()\n flag_t += 1\n if self.bat == True:\n break", "def openCircuit(srv):", "def realtime(self):", "def signal_qual(self):\n sig = None\n while sig is None:\n sig = self.acquire_signal_quality()\n time.sleep(0.5)\n return sig", "def on_R1(self):\r\n self.log()", "def signal_handler(*args):\n if station:\n station.shutdown()", "async def send_ir(self):\n fan_speed = self.fan_mode\n # tweak for some ELECTRA_AC devices\n if HVAC_FAN_MAX_HIGH in self._fan_list and HVAC_FAN_AUTO_MAX in self._fan_list:\n if self.fan_mode == FAN_HIGH:\n fan_speed = HVAC_FAN_MAX\n if self.fan_mode == HVAC_FAN_MAX:\n fan_speed = HVAC_FAN_AUTO\n\n\n # Set the swing mode - default off\n self._swingv = STATE_OFF if self._fix_swingv is None else self._fix_swingv\n self._swingh = STATE_OFF if self._fix_swingh is None else self._fix_swingh\n\n if SWING_BOTH in self._swing_list or SWING_VERTICAL in self._swing_list:\n if self._swing_mode == SWING_BOTH or self._swing_mode == SWING_VERTICAL:\n self._swingv = STATE_AUTO\n\n if SWING_BOTH in self._swing_list or SWING_HORIZONTAL in self._swing_list:\n if self._swing_mode == SWING_BOTH or self._swing_mode == SWING_HORIZONTAL:\n self._swingh = STATE_AUTO\n\n _dt = dt_util.now()\n _min = _dt.hour * 60 + _dt.minute\n\n # Populate the payload\n payload_data = {\n \"StateMode\": self._state_mode,\n \"Vendor\": self._vendor,\n \"Model\": self._model,\n \"Power\": self.power_mode,\n \"Mode\": self._last_on_mode if self._keep_mode else self._hvac_mode,\n \"Celsius\": self._celsius,\n \"Temp\": self._target_temp,\n \"FanSpeed\": fan_speed,\n \"SwingV\": self._swingv,\n \"SwingH\": self._swingh,\n \"Quiet\": self._quiet,\n \"Turbo\": self._turbo,\n \"Econo\": self._econo,\n \"Light\": self._light,\n \"Filter\": self._filter,\n \"Clean\": self._clean,\n \"Beep\": self._beep,\n \"Sleep\": self._sleep,\n \"Clock\": int(_min),\n \"Weekday\": int(_dt.weekday()),\n }\n self._state_mode = DEFAULT_STATE_MODE\n for key in self._toggle_list:\n setattr(self, '_' + key.lower(), 'off')\n\n payload = (json.dumps(payload_data))\n \n # Publish mqtt message\n if float(self._mqtt_delay) != float(DEFAULT_MQTT_DELAY):\n await asyncio.sleep(float(self._mqtt_delay))\n \n await mqtt.async_publish(self.hass, self.topic, payload)\n\n # Update HA UI and State\n self.async_schedule_update_ha_state()", "def request_heartbeat(serialport, gid, uid):\n pack = bytearray(16)\n pack[0] = 0x02\n pack[1] = gid\n pack[2] = uid\n pack[3] = 0x00\n\n for i in range(5):\n serialport.write(pack)\n line = serialport.read(16)\n if line[3] == 1:\n return True\n\n return False", "def __set_signal_refresh(self):\n self._signal_refresh = True", "def tick(self):\n self.connect()", "def signal(self, args):\n pass", "async def on_bits_donated(self, msg: Message, bits: int):\n global timer_seconds\n global timer_instance\n if timer_instance > 0: # if a pomo timer is running\n await channel.send_message(f'{bits} donated while a Pomodoro is running! - increasing the timer by {int(bits * bits_exchangerate)} seconds!')\n timer_seconds += int(bits * bits_exchangerate)", "def spin(self):\n rate = rospy.Rate(5) # hz\n while not rospy.is_shutdown():\n if self.parameters.charging:\n self.dynamic_reconfigure_server.update_configuration({\"charging_percentage\": min(100, self.battery.percentage + self.charging_increment)})\n else:\n self.dynamic_reconfigure_server.update_configuration({\"charging_percentage\": max(0, self.battery.percentage - self.charging_increment)})\n self.battery.header.stamp = rospy.get_rostime() # get_rostime() returns the time in rospy.Time structure\n self.battery_publisher.publish(self.battery)\n rate.sleep()", "def talker(self):\n wind_pub = rospy.Publisher('wind_sensor/wind_vector', Vector3Stamped, queue_size=10)\n rpy_pub = rospy.Publisher('wind_sensor/roll_pitch_yaw', Vector3Stamped, queue_size=10)\n temp_pub = rospy.Publisher('wind_sensor/temperature', Float64, queue_size=10)\n battery_pub = rospy.Publisher('wind_sensor/battery_voltage', Float64, queue_size=10)\n rospy.init_node('wind_sensor_node', anonymous=True, log_level=rospy.get_param(\"log_level\", rospy.INFO))\n rate = rospy.Rate(8) # refresh rate in hz\n rospy.sleep(5)\n while not rospy.is_shutdown():\n self.wnd.update()\n wind_vector = self.wnd.get_wind_vector()\n vec_msg = Vector3Stamped()\n vec_msg.header.stamp = rospy.Time.now()\n vec_msg.vector.x = -wind_vector[0]\n vec_msg.vector.y = -wind_vector[1]\n vec_msg.vector.z = 0\n\n rpy_vector = self.wnd.get_rpy()\n rpy_msg = Vector3Stamped()\n rpy_msg.header.stamp = rospy.Time.now()\n rpy_msg.vector.x = -rpy_vector[0]\n rpy_msg.vector.y = rpy_vector[1]\n rpy_msg.vector.z = -rpy_vector[2]\n\n battery_msg = Float64()\n battery_msg = self.wnd.get_battery_charge()\n\n temp_msg = Float64()\n temp_msg = self.wnd.get_temp()\n temp_msg -= 273.15 # convert to celsius from kelvin\n stdoutdata = sp.getoutput(\"hcitool con\")\n if \"DC:73:74:12:94:80\" not in stdoutdata.split():\n rospy.logerr(\"Connection Failed, Reconnecting!\")\n self.wnd.close()\n self.wnd = WindSensor()\n rospy.sleep(5)\n wind_pub.publish(vec_msg)\n rpy_pub.publish(rpy_msg)\n battery_pub.publish(battery_msg)\n temp_pub.publish(temp_msg)\n rate.sleep()", "def adc(self, signal):", "def on_tick(self, time):\n pass" ]
[ "0.61080366", "0.6046958", "0.5819819", "0.57507235", "0.5733319", "0.563774", "0.56190205", "0.5609547", "0.55858874", "0.5540874", "0.5450107", "0.5448421", "0.5423741", "0.5396912", "0.53902006", "0.5377324", "0.5354077", "0.53379124", "0.53039145", "0.53007734", "0.52944636", "0.52901685", "0.5284343", "0.52823836", "0.52702695", "0.5261448", "0.525921", "0.5244827", "0.52403045", "0.5239982" ]
0.63109714
0
Get one news from mongoDB
def get_one_news(self): # pylint: disable=no-self-use return operations.get_one_news()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obj_get(self, request=None, **kwargs):\n return Document(self.get_collection(request).find_one({\n \"_id\": ObjectId(kwargs.get(\"pk\"))\n }))", "def get(self):\n return GlobalNews.retrieve()", "def search_news(self, key=None):\r\n\r\n # Connect tp MongoDB database to search param\r\n try:\r\n conn = MongoClient()\r\n except:\r\n print(\"Could not connect to MongoDB\") # database\r\n\r\n # Connect to database\r\n db = conn.NewsScrapy\r\n\r\n # Created or Switched to collection names: newscollections\r\n collection = db.newscollections\r\n regx = re.compile(\"key\", re.IGNORECASE)\r\n\r\n matches = collection.find_one({\"text\": regx})\r\n if not matches:\r\n titles = collection.find_one({\"title\": regx})\r\n print (titles)\r\n\r\n print (matches)", "def one(self):\n try:\n return self[0]\n except IndexError:\n raise self.document.DoesNotExist", "def find_one():\n fmter.tpl._straightline(\"one document\", 100)\n result = users.find_one({})\n print(type(result))\n ppt(result)\n \n fmter.tpl._straightline(\"none result\", 100)\n result = users.find_one({\"_id\": 100})\n print(type(result))\n ppt(result)", "def getFirstDocument(address=\"\", database=\"\", collection=\"\"):\n\n document = []\n client = connectMongo(address, database, collection)\n\n document.append(client.find_one())\n\n return document", "def find_one(self, collection, query):\n obj = getattr(self.db, collection)\n result = obj.find_one(query)\n return result", "def get(cls, db, id):\n doc = cls.collection(db).find_one(filter={ '_id': ObjectId(id) })\n return Todo(**doc)", "def first(self, **kwargs):\n return self.find(**kwargs).first()", "async def get_news(q: str = None):\n\treturn aggregate_news(q)", "def get_single_data(document_id):\n data = collection.find_one({'_id': ObjectId(document_id)})\n return data", "def get_one(collection: Collection, query: Dict[str, Any]):\n data = collection.find_one(query)\n if data is None:\n raise CannotFindItemInDatabase(query, data, collection.name)\n return data", "def get_article(self, slug):\n\t\tarticle = Blog.objects.get(slug=slug)\n\t\treturn article", "def scrape():\n\n news, table = scrape_mars.scrape()\n \n # Query nasa_news collection and find latest article\n nasa_news = mongo.db.nasa_news\n latest_news = news[0]\n nasa_news.update({}, latest_news, upsert=True)\n\n # Query featured_img collection and find latest full-size photo\n \n\n # Query table collection and find latest full-size photo\n table = mongo.db.mars_facts\n #table.update({}, table, upsert=True)\n\n return redirect(\"/\", code=302)", "def fetch(cls, slug):\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n raise exceptions.NotFound(f'Article of slug {slug} nonexistent')\n else:\n return article", "def fetch(cls, slug):\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n raise exceptions.NotFound(f'Article with slug {slug} nonexistent')\n else:\n return article", "def get(self, id, model_type=None):\n # If collection is not specified, use the collection when this client is\n if not model_type:\n collection = self._collection\n else:\n collection = self._db[model_type]\n\n print 'mongo.get(): id={}'.format(id)\n if id:\n obj = collection.find_one({'_id': ObjectId(id)})\n if not obj:\n raise DbProviderError(\"DB record for {} is not found\".format(id))\n obj['_id'] = str(obj['_id'])\n else:\n obj = {}\n return obj", "def get_article(db:Session, article_id:int):\n return db.query(ArticleModel).filter(ArticleModel.id==article_id).first()", "def intilise_database():\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n mydb=myclient['subreddit']\n maintable = mydb[\"posts2\"]\n return maintable", "def get_from_db(date, source, connection, logger):\n check_date(date, logger)\n result = execute_news(date, connection, source, logger)\n if len(result) == 0:\n raise SystemExit(f\"Sorry, there are no articles for {date}!\")\n else:\n return result", "def get(self, query_data=None, id_obj=None):\n if id_obj:\n return self.collection.find_one({'_id': id_obj})\n return self.collection.find_one(query_data)", "async def get_one(self, where):\n\n pass", "def news(id):\n\tarticle = data.get_article(id)\n\tif not article:\n\t\treturn \"article not found\", 404\n\treturn render_template(\"article.html\", article=article)", "def get_blog():\n conn = pymongo.Connection(\"localhost\",27017)\n db = conn[\"paperDB\"]\n infoDB = db.infoDB\n record = infoDB.find_one()\n\n del record['_id']\n del record['comment'] # reserve space\n\n # Since there's only one blog per page, use namedtuple\n blog = namedtuple('Blog', record.keys())(*record.values())\n print \"Blog loaded.\"\n return blog", "def get(self):\n\n doc_id = request.args.get(\"id\")\n\n try:\n doc = get_doc(doc_id)\n\n except Exception as e:\n print(e)\n return {\n \"error\": 1,\n \"describe\": \"找不到你要的博客\"\n }\n\n return jsonify({\n \"errno\": 0,\n \"describe\": \"ok\",\n \"doc\": doc\n })", "def get_one(self, index, *args, **kw):\n person = M.People.query.get(index=index)\n log.debug('person {}'.format(person))\n if(person):\n kw['_id'] = person._id\n return super(PeopleAPIController, self).get_one(*args, **kw)", "def get_news(self, keyword, since=None, to=None, page=None):\n payload = {}\n url = \"https://newsapi.org/v2/everything\"\n payload['q'] = keyword\n if since is not None:\n try:\n start_dt = dateutil.parser.parse(since)\n if to is not None:\n to_dt = dateutil.parser.parse(to)\n else:\n to_dt = datetime.datetime.now()\n except ValueError:\n raise IOError('since parameter can not be converted to datetime')\n payload['from'] = start_dt.isoformat()\n payload['to'] = to_dt.isoformat()\n payload['language'] = 'en'\n payload['pageSize'] = 20\n payload['sortBy'] = 'popularity'\n payload['excludeDomains'] = 'startribune.com'\n if page is not None and type(page) == int and page > 0:\n payload['page'] = page\n r = requests.get(url, auth=self.auth, params=payload)\n return r.content", "def get_task(task_id):\n return db.task.find_one({'_id': ObjectId(task_id)})", "async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)", "def get(self, *args):\n return self.docs.get(*args)" ]
[ "0.6565657", "0.6507733", "0.64140624", "0.628049", "0.62576056", "0.6170488", "0.6133731", "0.6109496", "0.60684144", "0.605034", "0.6038439", "0.60371333", "0.5964813", "0.5918189", "0.5864173", "0.5860944", "0.58509344", "0.5848587", "0.5828884", "0.5821581", "0.580858", "0.5784866", "0.57842517", "0.5781701", "0.57595396", "0.57530487", "0.57418406", "0.5719366", "0.5703656", "0.5702645" ]
0.6957589
0