query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Get news summaries for user
def get_news_summaries_for_user(self, user_id, page_num): # pylint: disable=no-self-use print 'get summaries for user is called! with %s and %s' % (user_id, page_num) return operations.get_news_summaries_for_user(user_id, page_num)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_news_summaries_for_user(user_id, page_num):\n LOGGER.debug('get_news_summaries_for_user is called with %s and %s', user_id, str(page_num))\n return operations.get_news_summaries_for_user(user_id, page_num)", "def getNewsFeed(self, userId: int) -> List[int]:\n news = []\n for posts in self.user_post[userId]:\n news.append(posts)\n for users in self.user_followed[userId]:\n news += self.user_post[users]\n news.sort(key=lambda x: -x[1])\n res = []\n for i, j in news:\n if i not in set(res):\n res.append(i)\n return res[:10]", "def getNewsFeed(self, userId: int):\n if userId not in self.followList:\n self.followList[userId] = [userId]\n res = []\n\n for user in self.followList[userId]:\n if self.tweetTimeLine.get(user, [user]):\n res += self.tweetTimeLine.get(user, [])\n res.sort()\n res = res[:10]\n # print(res)\n return [i[1] for i in res]", "def getNewsFeed(self, userId: int) -> List[int]:\n if userId not in self.users:\n self._create_user(userId)\n all_users = list(self.users[userId])\n # num_each_posted = [len(self.users_tweet[user_id]) for user_id in all_users]\n heap = []\n for user_id in all_users:\n for item in self.users_tweet[user_id]:\n heappush(heap, item)\n ret = [x[1] for x in nlargest(10, heap)]\n return ret", "def getNewsFeed(self, userId: int):\n tweets = heapq.merge(*(self.tweets[u] for u in self.followees[userId] | {userId}))\n return [t for _, t in itertools.islice(tweets, 10)]", "def getNewsFeed(self, userId: int) -> 'List[int]':\n news = [v for v in self.posts[userId]]\n for id in self.users[userId]:\n news += self.posts[id]\n return [id for _, id in sorted(news, reverse=True)[:10]]", "def getNewsFeed(self, userId):\n tws = []\n # 先获取用户自己发的推特\n if self.twitter_pool[userId]:\n tws = self.twitter_pool[userId]\n # 再看关注的人发的推特\n if self.user_pool[userId].user_id:\n follows_user = self.user_pool[userId].follows\n if follows_user:\n for user_id in follows_user:\n tws.extend(self.twitter_pool[user_id])\n\n # 按时间排序取前 10 条\n if tws:\n tws = sorted(tws, key=lambda x: x[1], reverse=True)[:10]\n return tws", "def getNewsFeed(self, userId):\n users = list(self.follows.get(userId, set()) | set([userId]))\n pointers = [len(self.tweets.get(u, [])) - 1 for u in users]\n h = []\n heapq.heapify(h)\n for i in xrange(len(users)):\n if pointers[i] >= 0:\n heapq.heappush(h, self.tweets[users[i]][pointers[i]] + [i])\n pointers[i] -= 1\n feed = []\n while h and len(feed) < 10:\n time, tweet, i = heapq.heappop(h)\n feed.append(tweet)\n if pointers[i] >= 0:\n heapq.heappush(h, self.tweets[users[i]][pointers[i]] + [i])\n pointers[i] -= 1\n return feed", "def getNewsFeed(self, userId):\r\n tweets = []\r\n \r\n tweets += self.tweets_by_user[userId]\r\n for other in self.follows[userId]:\r\n tweets += self.tweets_by_user[other]\r\n \r\n last_10_tweets = sorted(tweets)[-10:]\r\n return [tweetId for _, tweetId in last_10_tweets][::-1]", "def getNewsFeed(self, userId):\n users = [userId]\n if userId in self.follow_map:\n users.extend(self.follow_map[userId])\n \n heap = MinHeap(10)\n for uid in set(users):\n for tweet in self.user_tweets.get(uid, []):\n heap.push(tweet)\n return [e[0] for e in heap.items()]", "def getNewsFeed(self, userId: int) -> List[int]:\n users = {userId, *self.follow_map[userId]}\n return self.merge([self.posts[user] for user in users if len(self.posts[user]) > 0], 10)", "def get_news(request):\n return get_all_posts(request, PostType.NEWS)", "def getNewsFeed(self, userId):\n tweets = self.tweets\n star = self.followstar.get(userId, set()) | set([userId])\n tw = []\n for people in star:\n if people in tweets:\n tw.append((tweets[people][-1][0], tweets[people][-1][1], people, len(tweets[people])-1))\n heapq.heapify(tw)\n \n ans = []\n while len(ans) < 10 and len(tw) != 0:\n u = heapq.heappop(tw)\n ans.append(u[1])\n if u[3] > 0:\n heapq.heappush(tw, (tweets[u[2]][u[3]-1][0], tweets[u[2]][u[3]-1][1], u[2], u[3]-1))\n return ans", "def getNewsFeed(self, userId: int) -> List[int]:\n if userId not in self.users.keys():\n return []\n mine = self.users[userId].tweets[-10:]\n combine = []\n\n for followee in self.users[userId].followees.keys():\n others = self.users[followee].tweets[-10:]\n i, j = 0, 0\n while (i + j < 10 and (len(mine) + len(others) > 0)):\n if len(mine) and len(others):\n if self.tweetTime[mine[-1]] > self.tweetTime[others[-1]]:\n combine.append(mine.pop())\n i += 1\n else:\n combine.append(others.pop())\n j += 1\n elif len(mine) == 0:\n combine.append(others.pop())\n j += 1\n elif len(others) == 0:\n combine.append(mine.pop())\n i += 1\n\n mine = combine[:10]\n combine = []\n mine.reverse()\n mine.reverse()\n\n return mine", "def query_newsfeed(user, **kwargs):\n page = kwargs.get(\"page\", 0)\n max_items = kwargs.get(\"max_items\", 5)\n if page and max_items:\n start_item = (page-1)*max_items\n end_item = page*max_items\n else:\n start_item = \"\"\n end_item = \"\"\n notification_query = \"\"\"\n SELECT a.* \n FROM notifications_notification a \n WHERE ( ( NOT EXISTS (\n SELECT 1 \n FROM notifications_notification b\n WHERE b.target_object_id = a.target_object_id \n AND b.timestamp > a.timestamp\n AND b.recipient_id=%(user_id)d\n ) ) AND a.recipient_id=%(user_id)d )\n GROUP BY a.target_object_id\n ORDER BY a.timestamp DESC\n \"\"\"\n if start_item >= 0 and end_item :\n notification_query += \"LIMIT %(start_item)d,%(end_item)s\"\n \n notification_query = notification_query % {\"user_id\" : user.id, \n \"start_item\" : start_item, \n \"end_item\" : end_item,\n }\n notification_list = Notification.objects.raw(notification_query)\n return notification_list", "def getCurrentUserNewsfeed():\n if not g.user:\n return redirect(url_for('login'))\n return getUserNewsfeed(g.user)", "def getNewsFeed(self, userId: 'int') -> 'List[int]':\n self.followees[userId].add(userId)\n feeds = heapq.merge(*[iter(self.tweets[idx]) for idx in self.followees[userId]])\n return [idx for _, idx in itertools.islice(feeds, 10)]", "async def get_news(q: str = None):\n\treturn aggregate_news(q)", "def getNewsFeed(self, userId: int) -> List[int]:\n # Time Complexity: O(num_user)\n data = []\n ret = []\n\n if userId in self.tweets:\n data.append((self.tweets[userId][-1], userId, len(self.tweets[userId]) - 1))\n\n for follow in self.followees.get(userId, []):\n if follow in self.tweets:\n data.append((self.tweets[follow][-1], follow, len(self.tweets[follow]) - 1))\n\n heapq.heapify(data)\n\n for _ in range(10):\n if not data:\n break\n\n (_, tweet_id), user_id, user_ind = heapq.heappop(data)\n ret.append(tweet_id)\n\n if user_ind > 0:\n heapq.heappush(data, (self.tweets[user_id][user_ind - 1], user_id, user_ind - 1))\n\n return ret", "def top_news():\n data = get_top_news()\n return jsonify(data)", "def show_news_list():\r\n\tnews_list = Page.objects.filter(tags='news').order_by('-created')\r\n\treturn {'news_list': news_list}", "def all_news(request):\n\n all_news = News.objects.all().order_by(\"-date_added\")\n context = {\n 'news': all_news,\n 'show_without_bag': True\n }\n return render(request, 'news/news.html', context)", "def news_feed(request):\n\n all_friends = get_all_friends(request)\n news_feed = get_news_feed(request)\n user_profile = get_users_profile(request.user.id)\n\n context = {\n 'news_feed': news_feed,\n 'user_profile': user_profile,\n 'status_form': StatusForm,\n }\n\n return render(request, 'status/news_feed.html', context)", "def get_news(self):\n if self.api_key_entry.get() == \"\":\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = (now-datetime.timedelta(days=14))\n #today = now.strftime()\n query = \"\"\n for cat in self.sorted_categories():\n query += f\"{cat},\"\n search = api.get_top_headlines(q=query,\n sources=\"bbc-news,the-verge\",\n language=\"en\")\n news = \"\"\n for article in search[\"articles\"]:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)", "async def news(self):\n url = f\"https://newsapi.org/v2/top-headlines?country=nz&apiKey={self.bot.news_api_key}\"\n async with ClientSession() as session:\n async with session.get(url) as response:\n r = await response.json()\n firstArticle = r[\"articles\"][0]\n nSource = firstArticle[\"source\"][\"name\"]\n nTitle = firstArticle[\"title\"]\n nTimestamp = firstArticle[\"publishedAt\"]\n embed = discord.Embed(\n title=f\"News Title: {nTitle}\", description=f\"News Source: {nSource}\"\n )\n embed.add_field(name=\"News Content\", value=firstArticle[\"description\"])\n embed.set_image(url=firstArticle[\"urlToImage\"])\n embed.set_footer(text=f\"News Timestamp: {nTimestamp}\")\n\n channel = self.bot.get_channel(self.bot.main_channel_id)\n await channel.send(embed=embed)", "def get_news(keywords, news='all'):\n if news is 'all':\n return news_client.get_everything(q=keywords)\n elif news is 'top':\n return news_client.get_top_headlines(q=keywords)\n else:\n raise AttributeError(\"Optional argument news expected 'top' or 'all'\")", "def get(self):\n return GlobalNews.retrieve()", "def fetch_news(n):\n\n # This is the list we will use the pass back the news information.\n data = []\n\n # Get news stories from the MEN RSS feed.\n response = feedparser.parse('https://www.manchestereveningnews.co.uk/?service=rss')\n\n # Loop through the news items, and the pull out the data we need.\n for news in response.entries[:n]:\n data.append({\n 'headline': news.title,\n 'content': news.description,\n })\n\n return data", "def news(self):\n\n # Get articles with search term, if available, from each News API source\n news_api_articles = pd.DataFrame()\n\n q = urllib.parse.quote(\" OR \".join(self.search_terms), safe='')\n\n response = requests.get(\"https://newsapi.org/v2/everything?q=\" + q + \"&from=\" + datetime.now().strftime(\n \"%Y-%m-%d\") + \"&sortBy=popularity&pageSize=100&apiKey=\" + self.__news_api_key)\n\n if response.status_code == 200:\n data = json.loads(response.text)\n\n source_articles = []\n\n for article in data['articles']:\n source_articles.append([article['title'],\n article['description'],\n article['url'],\n article['publishedAt']])\n\n source_articles = pd.DataFrame(source_articles, columns=['title', 'description', 'url', 'publishedAt'])\n news_api_articles = pd.concat([news_api_articles, source_articles])\n\n news_api_articles = news_api_articles.reset_index(drop='True')\n\n news_api_articles['publishedAt'] = news_api_articles['publishedAt'].apply(pd.to_datetime)\n\n news_api_articles = news_api_articles.fillna(' ')\n\n term_in_title = news_api_articles['title'].apply(self.any_term)\n\n news_api_articles = news_api_articles[term_in_title]\n\n if (len(news_api_articles) > 10):\n news_api_articles = news_api_articles[0:10]\n\n else:\n print(\"News API failed to return any items\")\n\n # Create shortened links using bitly if access token is provided\n if self.__bitly_access_token != '':\n\n bitly_urls = []\n\n for index, article in news_api_articles.iterrows():\n url = article['url']\n bitly_response = requests.get(\"https://api-ssl.bitly.com/v3/shorten\",\n params={'longUrl': url, 'access_token': self.__bitly_access_token})\n\n if bitly_response.status_code == 200:\n data = json.loads(bitly_response.text)\n bitly_urls.append(data['data']['url'])\n\n news_api_articles['url'] = bitly_urls\n\n # Store final list to TwitterBot object\n self.list = news_api_articles\n\n return", "def view_status(request, pk):\n\n status = Status.objects.get(pk=pk)\n user_profile = get_users_profile(request.user.id)\n\n context = {\n 'news_feed': [status],\n 'user_profile': user_profile,\n }\n\n return render(request, 'status/view_status.html', context)" ]
[ "0.8280784", "0.6733929", "0.6724858", "0.66080564", "0.6577181", "0.65227485", "0.6511483", "0.64573556", "0.645486", "0.6422935", "0.64102906", "0.63754475", "0.633218", "0.6303468", "0.627628", "0.6270833", "0.6261432", "0.6200072", "0.61192656", "0.6112744", "0.6112427", "0.6061837", "0.6060309", "0.59929824", "0.58657044", "0.5788125", "0.5783984", "0.5782728", "0.57728815", "0.5767946" ]
0.8474866
0
log news click for user
def log_news_click_for_user(self, user_id, news_id): # pylint: disable=no-self-use print 'log news click for user is called with %s and %s' % (user_id, news_id) return operations.log_news_click_for_user(user_id, news_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_news_click_for_user(user_id, news_id):\n LOGGER.debug('log_news_click_for_user is called with %s and %s', user_id, news_id)\n return operations.log_news_click_for_user(user_id, news_id)", "def click(cls, user, link):\r\n pass", "def click(cls, user, link):\n pass", "def log_successful_login(sender, request, user, **kwargs):\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.info(u\"Login success - user.id: {0}\".format(user.id))\r\n else:\r\n AUDIT_LOG.info(u\"Login success - {0} ({1})\".format(user.username, user.email))", "def log_successful_logout(sender, request, user, **kwargs):\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.info(u\"Logout - user.id: {0}\".format(request.user.id))\r\n else:\r\n AUDIT_LOG.info(u\"Logout - {0}\".format(request.user))", "def log_login(sender, request, user, **kwargs):\n stracks.user(user).log(\"? has logged in\", action=stracks.login())", "def log_user_logged_in(sender, request, user, **kwargs):\n\n ip = get_client_ip(request)\n date = datetime.now()\n log.info('Login user: %s , IP: %s , Date: %s', user, ip, str(date))", "def log_page_view(self, page, userid):\n\t\tself.log_page_view.logger_.log('{} {}'.format(page, userid))", "def log(self,):\n if self.request.user.is_anonymous():\n self.fail()\n else:\n self.success()\n if conf.LOGIN_GUARD_FREQUENCY_ALERT_ON:\n self.alert()", "def action(self, user, channel, msg):\n user = user.split('!', 1)[0]\n self.logger.log(\"* %s %s\" % (user, msg))", "def on_a(self):\r\n self.log()", "def action(self, user, channel, msg):\n # i.e. /me <something>\n user = user.split('!', 1)[0]\n self.logger.log(\"* %s %s\" % (user, msg))", "def on_login(self, username):", "def on_login(self, username):", "def get_user_notifications(self, login):", "def _onLog(self, client:mqtt.Client, userdata:Any, level:int, buf:str) -> None:\n\t\tself.lowLevelLogging and self.messageHandler and self.messageHandler.logging(self, mqtt.LOGGING_LEVEL[level], f'MQTT: {buf}')", "def log_event(event):\r\n tracker.send(event)", "def login_actions(sender, request, user, **kwargs):\n # Changer l'expiration de la session\n timeout = ConfigurationForm.get_option_for(user, 'session_timeout')\n request.session.set_expiry(timedelta(seconds=timeout))\n logins = user.profile.get_data('logins', 0)\n user.profile.set_data('logins', logins + 1, save=True)\n messages.success(request, _(\"Hello, {name}!\").format(name=capfirst(user)))\n # Actions publiques et enregistrements\n record.send(None, actor=user, action='user.login')", "def user_logged_in(self, sender, request, user, **kwargs):", "def history_log(self, user, action=CHANGE, message=''):\n LogEntry.objects.log_action(\n user_id=user.pk,\n content_type_id=ContentType.objects.get_for_model(self).pk,\n object_id=self.pk,\n object_repr=force_text(self),\n action_flag=action,\n change_message=message\n )", "def create_log_entry_when_user_logs_in(sender, request, user, **kwargs):\n create_user_log(\n request=request,\n user=user,\n type=_account_const.AUTHENTICATION,\n action=_account_const.LOGIN\n )", "def log(self, msg):\n\n\t\tself.eyetribe.log_message(msg)", "def log(self, message):", "def user(self, uid):", "def track_activity(func):\n @wraps(func)\n def f(*args, **kwargs):\n if g.user is None: return\n entry = Action()\n entry.user_id = g.user.id\n entry.path = request.path\n entry.verb = request.method\n db.session.add(entry)\n db.session.commit()\n\n return func(*args, **kwargs)\n return f", "def log(self, *args, **kwargs):\n self.game_view.log(*args, **kwargs)", "def plain(self, *args):\n self.mylog.log(logging.INFO + 1, *args)", "def event_logged(self, event):\n prefix = termcolor.colored(\">>>\", \"yellow\")\n logging.info(\"%s %s\" % (prefix, event.colored_str()))\n if event.to:\n subject = \"%s: %s\" % (self.name, event.phase)\n self.send_message(event.to, subject, event_email(event, parser=self.parser))", "def user_history(self):\n self.query_1 = \"SELECT * FROM orders WHERE user_id=%s\"\n self.input_1 = (self.user_id,) \n self.event = \"user_history\"\n self.message = \"Order history fetched successfully.\"\n self.error = \"Unable to fetch order history.\"", "def userLog(self, logStr):\n if self.ioLoopInst is not None:\n cmd = {'cmd': 'userLog', 'value': logStr}\n self._sendMessageToWeb(cmd)\n else:\n print(\"UserLog: \" + logStr)" ]
[ "0.7703432", "0.65589684", "0.64338", "0.6382208", "0.62468106", "0.6230457", "0.60436696", "0.5973681", "0.59569526", "0.5947858", "0.58712655", "0.5857679", "0.5771925", "0.5771925", "0.5735387", "0.5714753", "0.5699279", "0.56922024", "0.5637997", "0.5601566", "0.5588085", "0.55850345", "0.5584022", "0.5582001", "0.5568965", "0.5556623", "0.55459654", "0.5481684", "0.54417986", "0.5438927" ]
0.7882628
0
recursively print 1 char from myString global Int > None
def print_a_char(i): if i == len(myString): # base case: end of string, just return print("debuggin base case ... now i = " + str(i)) return else: print("debuggin recursion ... now i = " + str(i)) # recursive case: # print char at current index # increment index, # call itself recursively print( myString[i] ) i = i + 1 print_a_char(i) # end of function print_a_char()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strprint(self, mystr):\n if self.is_verbose is True:\n print(mystr)\n else:\n pass\n return", "def _print_with_depth(self, string, depth):\n print(\"{0}{1}\".format(\" \" * depth, string))", "def simple(self, string):\n\n temp = self\n i = 0\n while temp != 0:\n if string[i] < temp.ch:\n temp = temp.left\n elif string[i] > temp.ch:\n temp = temp.right\n else:\n i = i + 1\n if i == len(string):\n return temp.flag\n temp = temp.center\n\n return 0", "def getFisrtCharThatAppearsOnce(myString):\n myString = \"\".join(myString.lower().split())\n charDict = {key:[0, 0] for key in string.ascii_lowercase}\n for pos, char in enumerate(myString):\n charDict[char][0] += 1\n charDict[char][1] = pos\n charDict = {key:values for key, values in charDict.items() if values[0] == 1}\n sortedCharDict = sorted(charDict.items(), key=operator.itemgetter(1))\n strOut = sortedCharDict[0][0] if sortedCharDict else False\n return strOut", "def print_(self, s: str) -> None:", "def pf_one_node(self, depth):\n if self.op == NodeOperation.CHAR:\n line = 'char: {}'.format(self.char)\n else:\n line = 'op: {}'.format(self.op)\n out = '-' * depth + line\n return out", "def wc(substring):\n n = 0\n try:\n while True:\n n += (yield)\n except GeneratorExit:\n print(substring, n, flush=True)", "def _return_char(num_image):\n\n # check image (segment) against the full dictionary of\n # characters and return first match\n for digit, digit_image in DIGITDICT_FULL.items():\n if _np.array_equal(digit_image, num_image):\n return digit\n\n # if no match found then return None\n return None", "def printStr(str):\n if str_chk.match(str): return str\n return repr(str)", "def sentence_printer(sentence):\r\n if len(sentence) == 0:\r\n return 2;\r\n for letter in sentence:\r\n sys.stdout.write(letter)", "def writechar(self, char: int, /) -> None:", "def slowprint(string):\n print(string)", "def first_not_repeating_character(string):\n counter = Counter(string)\n for key, value in counter.items():\n if value <= 1:\n return key\n break\n return '_'", "def getFirstChar(self):\n if self.i1 is None:\n self.firstChar = None\n else:\n chrNum = int(self.i1 // 10)\n if chrNum < 26:\n # should result in something like A4 for 4, B6 for 16\n self.firstChar = chr(ASCII_LETTER_A + chrNum) + str(self.i1 % 10)\n else:\n runLog.warning(\n \"invalid location. ring {0} is too many rings!\".format(self.i1),\n self,\n )", "def missing_char(str, n):\r\n if n<=len(str):\r\n str = str.replace(str[n], \"\")\r\n return str", "def first_recurring_char(s: str) -> str:\n h = {} # using dictionary as hash\n for ch in s:\n if ch in h:\n return ch\n\n h[ch] = 0\n return None", "def character(m) -> str:\n return m[0]", "def print_string(self, text: str) -> Optional[int]:\n return self._write_bytes(text.encode(\"DDRSCII\"))", "def str_recursive(node):\n\n if node == None:\n return \"\"\n else:\n return str(node.item) + \" \" + LinkedList.str_recursive(node.next)", "def main():\n\tprint 'Introduce string: '\n\ts = raw_input()\n\treturn if_unique_chars_one(s)", "def _children_str_at_line(self, line: int) -> str:\n if self.upper_printer is None and self.lower_printer is None:\n return \"\"\n upper_total_rows = self.upper_printer._total_rows() if self.upper_printer else 1\n lower_total_rows = self.lower_printer._total_rows() if self.lower_printer else 1\n if 0 <= line < upper_total_rows:\n return (\n self.upper_printer._str_at_line(line) if self.upper_printer else \"...\"\n )\n elif upper_total_rows < line < upper_total_rows + lower_total_rows + 1:\n return (\n self.lower_printer._str_at_line(line - upper_total_rows - 1)\n if self.lower_printer\n else \"...\"\n )\n return \"\"", "def test_string_insertion(a_string, a_character):\n for position in range(0, len(a_string)+1):\n print a_string[:position] + a_character + a_string[position:]", "def _silent_get(x, i):\n if i < len(x):\n return x[i]\n else:\n return ''", "def fn(i, s=\"\", n=0):\n if i == len(word): return ans.append(s + (str(n) if n else \"\"))\n fn(i+1, s, n+1)\n fn(i+1, s + (str(n) if n else \"\") + word[i], 0)", "def printout(string):\n print(string)", "def print_output(char_list):\n if char_list == []:\n print(\"NULL\")\n else:\n print(*char_list, sep=\"\")", "def x_ian(x, word):\n if x == \"\":\n return True\n else:\n letWordLoc = word.find(x[0])\n if (letWordLoc != -1):\n return x_ian(x[1:], word[(letWordLoc + 1):])\n else:\n return False", "def get_char_to_display(in_value):\n\treturn '#' if in_value else ' '", "def digits_only(self, mystring):\r\n result = \"\"\r\n for ch in mystring:\r\n if ch.isdigit() or ch == '-':\r\n result += ch\r\n return result", "def none_string(line):\n out_line = None if line.lower() == \"none\" or len(line) == 0 else line\n return out_line" ]
[ "0.55965906", "0.5496015", "0.5463261", "0.54267794", "0.5329273", "0.5281513", "0.52523005", "0.5160701", "0.5143802", "0.5138744", "0.5135421", "0.5116006", "0.5073823", "0.5069713", "0.504991", "0.5034474", "0.50065297", "0.50034696", "0.49649817", "0.49551603", "0.4927775", "0.4924847", "0.49227768", "0.49179882", "0.4905351", "0.4901337", "0.4895858", "0.4890105", "0.48729938", "0.48649427" ]
0.7064136
0
Sets the web handler from browserHandler.browserHandler.WebHandler
def connect_browser_handler(self, wh): self.web_handler = wh
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_handler(self, handler):\n self._handler = handler", "def setup_handlers(web_app):\n\n mlw_handlers = [\n ('/mlw/load_workspace', MLW_load_workspace_handler),\n ('/mlw/save_workspace', MLW_save_workspace_handler),\n ('/mlw/install_requirements', MLW_install_requirements_handler),\n ('/mlw/notify_still_alive', MLW_notify_still_alive_handler)\n ]\n\n # add the baseurl to our paths\n base_url = web_app.settings['base_url']\n mlw_handlers = [\n (ujoin(base_url, x[0]), x[1])\n for x in mlw_handlers\n ]\n print(\"base_url: {}\".format(base_url))\n print(mlw_handlers)\n\n web_app.add_handlers('.*', mlw_handlers)", "def set_handler(self, handler):\n self.next_handler = handler", "def website(self, website):\n\n self._website = website", "def website(self, website):\n\n self._website = website", "def setDataRequestHandler(self, handler):\n self.dataRequestHandler = handler", "def register_handler(self, handler):\r\n self.handler = handler", "def web_shell(self, web_shell):\n\n self._web_shell = web_shell", "def setServerEventHandler(self, handler):\n self.serverEventHandler = handler", "def set_type(self, handler_type):\n try:\n self.handler = self.HANDLER_TYPES[handler_type].__func__\n except KeyError:\n handler_names = ', '.join(['\"%s\"' % t for t in self.HANDLER_TYPES.keys()])\n raise ValueError(u'Unsupported handler_type %s, options are %s.' %\n (handler_type, handler_names))", "def _init_browser(self):\n # Initialize the browser\n br = mechanize.Browser()\n # Ignore the robots.txt\n br.set_handle_robots(False)\n return br", "def handler(self, handler):\n if self.local_vars_configuration.client_side_validation and handler is None: # noqa: E501\n raise ValueError(\"Invalid value for `handler`, must not be `None`\") # noqa: E501\n\n self._handler = handler", "def add_handler(self, handler):\n pass", "def set_event_handler(self, handler):\r\n if self._handler:\r\n handler.pre_listeners = self._handler.pre_listeners\r\n handler.post_listeners = self._handler.post_listeners\r\n self._handler = handler\r\n self._handler.c = self\r\n self._handleFn = handler._handle1", "def __init__(self, parent, html_file, js_server_call_fn):\n super(BrowserWidget, self).__init__()\n\n self.parent = parent\n self.view = WebViewEx(self)\n self.view.setPage(WebPage()) #ensure we can see javascript errros\n self.connection = ServerConnection(js_server_call_fn)\n self.setMaximumHeight(100000)\n \n #seems we need absolute paths in the html file for QtWebView to work !?\n self.view.setUrl(QtCore.QUrl.fromLocalFile(html_file))\n \n #make the connection back to the server...\n self.frame = self.view.page().mainFrame()\n self.frame.addToJavaScriptWindowObject('server_connection', self.connection)\n \n #self._sizeHint = QtCore.QSize(600,800)\n \n #adjust the size policy\n self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)", "def webAdd( self, web ):\n web.add( self )", "def set_up(self, web_driver):\n self.driver = web_driver\n self.wait = WebDriverWait(self.driver, 60)\n\n self.google_page = GoogleSearchPage(self.driver, locator, conf, message)\n self.flipkart_page = FlipkartPage(self.driver, locator, message)", "def __init__(self, handler):\n self.__handler = handler", "def get_web_browser(self, settings=None):\n browser = SeleniumBrowser(self.get_wsgi_application())\n if settings is not None:\n settings(browser)\n self._browsers.append(browser)\n return browser", "def getHandler(self):\n raise NotImplementedError(\"Shouldn't be called\")", "def web(self):\n if not self.__web:\n self.__web = Web(self)\n return self.__web", "def __init__(self, newbrowser=None):\n # Initialize Cookies\n CHandler = urllib2.HTTPCookieProcessor(cookielib.CookieJar())\n self.newbrowser = urllib2.build_opener(CHandler)\n self.newbrowser.addheaders = [\n ('User-agent', 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:19.0) Gecko/20100101 Firefox/19.0')]\n urllib2.install_opener(self.newbrowser)\n self.error_dict={} # to be returned by get_html if any thing goes wrong", "def webAdd( self, web ):\n web.addNamed( self )", "def web_id(self, web_id):\n\n self._web_id = web_id", "def item_web_url(self, item_web_url):\n\n self._item_web_url = item_web_url", "def set_handle(self, handle): # -> None:\n ...", "def addhandler(self, txt, handler):\n self.handlers[txt] = handler\n rlog(0, 'webserver', '%s handler added' % txt)", "def _set_url(self): \n self.url = self.geturl()", "def browser(self):\n return", "def fast_web_view(self, fast_web_view):\n\n self._fast_web_view = fast_web_view" ]
[ "0.6446334", "0.57812834", "0.57771283", "0.5610197", "0.5610197", "0.5527198", "0.54383373", "0.54195005", "0.5405691", "0.5367285", "0.5364556", "0.53571427", "0.53558046", "0.53449744", "0.532374", "0.53024596", "0.52624846", "0.52460796", "0.52252847", "0.5213725", "0.5194164", "0.51621103", "0.51588345", "0.51504767", "0.5145471", "0.513239", "0.5094447", "0.5093124", "0.50854814", "0.50792295" ]
0.7992587
0
Opens float chat if not available Closes float chat if already opened
def toggle_chat(self): if not self.float_chat_on: self.float_chat_toplevel = tk.Toplevel(self) # toplevel for float chat self.float_chat_toplevel.title('Float chat') self.float_chat_toplevel.protocol('WM_DELETE_WINDOW', self.on_float_chat_close) self.float_chat_toplevel.attributes('-topmost', True) # setting float chat top most # create and align chat box chat_box = ChatBox.ChatBox(self.float_chat_toplevel) chat_box.pack(expand=tk.YES, fill=tk.Y) chat_box.align_window() self.float_chat_toplevel.resizable(False, True) self.web_handler.listen_chat(chat_box.update_callback) self.buttons['float_chat'].config(text='Stop float chat') # change button text self.float_chat_on = True # change float chat on status else: self.on_float_chat_close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_float_chat_close(self):\n self.web_handler.stop_listening() # stops web handler from listening\n self.float_chat_toplevel.destroy() # close float chat\n self.buttons['float_chat'].config(text='Start float chat') # change button text\n self.float_chat_on = False # change float chat on status", "def on_chat_open(self, request, trigger_context):\n raise NotImplementedError", "def view_contact_chat(self):\n if self._user.chats == {}:\n print(\"No chats to be viewed yet\")\n self.homepage()\n \n print('-=' * 30)\n chats = self._user.list_chats()\n user_choice = self._int_input_in_range(\"Pick whose contact chat to be viewed: \"\n ,range_ = (1, len(chats)))\n if not user_choice:\n return self.homepage()\n \n chat, contact = chats[user_choice - 1]\n chat_content = chat.get_content(self._user)\n print('-=' * 12 + \" Chat Window \" + '-=' * 12)\n if chat_content != []:\n for line in chat_content:\n print(line.rstrip()) \n else:\n print('This chat is empty, send your first msg now')\n \n user_choice = self._int_input_in_range(' (1) Send new msg \\n (2) Back to homepage \\n Your choice: '\n , range_ = (1,2))\n if user_choice == 1:\n print('HINT: send (0) to exist the chat window')\n return self._send_msg(contact)\n else:\n return self.homepage()", "async def set_chat(self, args):\n value = args if isinstance(args, bool) else args.lower() in ('yes', 'true', '1')\n if self.chat == value:\n return\n self.chat = value\n if self.chat_message is not None:\n await self.delete_message(self.chat_message)\n await self.set_trigger('chat_init', None)\n await self.set_trigger('chat', None)\n tag = 'chat' if self.chat else 'chat_init'\n self.chat_message = await self.send_tag(tag, emoji.TRIGGERS[tag], 'Chat enabled' if self.chat else 'Chat muted')\n if not self.chat:\n await self.shell_terminate_all(self.shell_chat)", "async def flood(event, pl):\r\n global chats\r\n\r\n if (\r\n 'action' in event.object.message and\r\n event.object.message.action.type == 'chat_invite_user'\r\n ):\r\n chats += 1\r\n print(f'\\033[35m[*]\\033[0m New chat | Total chats: {chats}')\r\n\r\n while True:\r\n try:\r\n await api.messages.send(\r\n peer_id=event.object.message.peer_id,\r\n message=config.FLOOD_MSG,\r\n keyboard=keyboard.menu,\r\n random_id=0\r\n )\r\n\r\n await asyncio.sleep(0.1)\r\n\r\n except vk_dev.VkErr as err:\r\n error_code = int(re.findall(r\"\\[.+\\]\", err.text)[0][5:-1])\r\n if error_code == 7:\r\n \r\n chats -= 1\r\n print(\r\n \"\\033[31m[*]\\033[0m Bot kick from chat | \"\r\n f\"Total chats: {chats}\"\r\n )\r\n return\r\n\r\n elif error_code == 9:\r\n await asyncio.sleep(5)\r\n else:\r\n print(err)", "async def legsessionopen(self, ctx):\n\n new_value = await self.toggle_dm_setting(ctx.author.id, \"leg_session_open\")\n\n if new_value:\n message = f\":white_check_mark: You will now receive DMs when you \" \\\n f\"are a {self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} \" \\\n f\"and a new Legislative Session is opened.\"\n else:\n message = f\":white_check_mark: You will no longer receive DMs when you are \" \\\n f\"a {self.bot.mk.LEGISLATURE_LEGISLATOR_NAME} \" \\\n f\"and a new Legislative Session is opened.\"\n\n await ctx.send(message)", "async def on_chat_message(self, chat_message):\n pass", "def start(bot, update, session, chat, user):\n if chat.is_maintenance:\n call_tg_func(update.message.chat, 'send_message', ['Hello there'],\n {'reply_markup': admin_keyboard})\n else:\n call_tg_func(update.message.chat, 'send_message', [help_text],\n {'reply_markup': main_keyboard, 'parse_mode': 'HTML'})", "def on_chat(self, event, text):\n return None", "def start(update, context):\n chats = load_chats()\n chats.append( str( update.message.chat_id ) )\n save_channels(chats)\n update.message.reply_text('Chat registered!')", "def _do_start(self, chat_id, user_id, args, update):\n \n self.tclient.send_message('Hallo! Ich bin ein Bot, um dir zu helfen, dir deine Nasensprüche zu merken!', chat_id)", "def chat(self) -> \"api.Chat\":\n raise NotImplementedError", "def switchToChat(self):\n \n self.lastView = self.currentView\n self.currentView = 1\n self.stacked.setCurrentIndex(1)\n self.show()", "def ext_fb_trigger(self):\n if os.path.isfile(self.feedback_file):\n self.toggle_feedback()\n os.remove(self.feedback_file)", "def chat():\n kwargs = {\"title\": u\"chat channel\", \"entries\": log.getLogEntries()}\n return render_template(\"chat.html\", **kwargs)", "def chat():\n # Duck asks if user wants to have a chat\n print(\"\\nAnyway...\")\n time.sleep(1)\n print(\"\\nLet's have a little chat now.\")\n time.sleep(1)\n print(\"\\nSo you want to have a chat, right? (Y/N)\\n\")\n answer = input()\n answer = answer.lower()\n # White Duck talks for a bit\n if answer in yes:\n print(\"\\nGreat. I'm a bit of a chatterbox sometimes.\")\n time.sleep(2)\n print(\"\\nBut sometimes I just go quiet.\")\n time.sleep(3)\n print(\"\\nOne time I went quiet for a whole year.\")\n time.sleep(3)\n print(\"\\nAfter that I just couldn't stop talking!\")\n time.sleep(3)\n print(\"\\n...\")\n time.sleep(3)\n print(\"\\nBut enough of that for now. Let's talk about you!\")\n time.sleep(2)\n # The chat will continue if user wishes so\n print(\"\\nIf you want? (Y/N)\\n\")\n answer = input()\n answer = answer.lower()\n if answer in yes:\n print(\"\\nGood stuff!\")\n time.sleep(1)\n print(\"\\nSo...\")\n time.sleep(1)\n chat_continues()\n # Otherwise he proposes they play a game\n elif answer in no:\n print(\"\\nOh ok then. Let's play a little game instead.\")\n time.sleep(1)\n game_intro()\n # Chat replay if invalid answer is given\n else:\n print(\"\\nWrong answer!\")\n chat()\n # Duck gets a bit cross if user doesn't want to chat\n elif answer in no:\n print(\"\\nWhat do you mean, you don't want to have a chat?\")\n time.sleep(1)\n # User is given another chance to change their mind\n print(\"\\nChange your mind time! C'mon, have a chat! (Y/N)\\n\")\n answer = input()\n answer = answer.lower()\n if answer in yes:\n print(\"\\nThat's the spirit!\")\n time.sleep(1)\n chat_continues()\n elif answer in no:\n print(\"\\nQUACK!\")\n time.sleep(2)\n print(\"\\nHmmm... Let's play a little game instead.\")\n time.sleep(2)\n game_intro()\n else:\n print(\"\\nBut that's not what I asked! Try again.\")\n time.sleep(1)\n chat()\n else:\n print(\"\\nWrong answer. Back to the beginning.\")\n time.sleep(1)\n chat()", "def run_chat_client():\r\n while must_run:\r\n print_menu()\r\n action = select_user_action()\r\n perform_user_action(action)\r\n print(\"Thanks for watching. Like and subscribe! 👍\")", "def on_chat_close(self, request, trigger_context):\n raise NotImplementedError", "def on_chat_start(self, request, trigger_context):\n raise NotImplementedError", "def start(self, bot, update):\n print(update.message[\"chat\"])\n start_text = \"Eu sou o bot da IEEE Computer Society UnB \" \\\n \"e gerencio os repositórios da instituição. \" \\\n \"Digite /help para saber mais sobre meus comandos.\"\n bot.send_message(chat_id=update.message.chat_id, text=start_text)\n\n start_text = \"Agora vamos lá. Em que posso ajudá-lo?\"\n bot.send_message(chat_id=update.message.chat_id, text=start_text)\n return", "def joingroup_command(update,context):\n update.message.reply_text('Want to chat with other CTF players or ask questions to admins? Use the following channel:\\r\\nhttps://t.me/joinchat/CYsj-xwzlFqIbQPPeo04bw')", "def start(update: Update, context: CallbackContext) -> None:\n if update.effective_chat:\n context.bot.send_message(chat_id=update.effective_chat.id,\n text=\"I'm a bot, please talk to me!\")", "def is_open(self) -> bool:\n pass", "def open_close(self, is_open, name='', t=None):\n if t is None:\n t = time.localtime()\n self.clear()\n self.logo()\n self.set_cursor(4,1)\n if is_open:\n self.send('auf %s' % string.rjust(name, 13)[:13])\n else:\n self.send('geschlossen')\n self.set_cursor(4,2)\n self.send(time.strftime('seit %d.%m. %H:%M', t))", "def open(self):\n self.state = True\n self.mainwindow.sendMessage('a')\n print(\"opening \" + self.name)", "def on_chat_message(self, message):\n if message['target'] == '':\n self.service.chat_all(message['text'], self.name)\n else:\n targets = list(filter(lambda p: p.name == message['target'], self.service.protocols))\n print(targets)\n if len(targets) == 1:\n target = targets[0]\n target.send_chat(message['text'], self.name, target.name, whisper=True)\n if self.name != target.name:\n self.send_chat(message['text'], self.name, target.name, whisper=True)\n else:\n log.warn(\"Trying to chat player {name}, but this player is not found!\",\n name=message['target'])", "def OnB(self, event):\n bp = event.GetEventObject()\n friend = bp.parameterVal\n app1 = wx.PySimpleApp(0)\n wx.InitAllImageHandlers()\n frame_1 = ChatScreen(self.user, self.passw,friend, None, -1, \"\")\n TextThread()\n app1.SetTopWindow(frame_1)\n frame_1.Show()\n app1.MainLoop()", "def is_open(self):\n return (not self.interface.is_open)", "def open(self, irc, msg, args):\n status = urlopen(\"http://portal.shack:8088/status\").read()\n status = json.loads(status)\n \n if status['status'] == 'open':\n irc.reply(\"shack is open.\", prefixNick=False)\n elif status['status'] == 'closed':\n irc.reply(\"shack is closed.\", prefixNick=False)\n else:\n irc.reply(random.choice(self.dunno), prefixNick=False)", "def is_open(self):\n return self.name == \"open\"" ]
[ "0.7377202", "0.64777136", "0.6359903", "0.6113372", "0.58211136", "0.57748777", "0.5760266", "0.5738265", "0.57245386", "0.5675755", "0.5588814", "0.55749667", "0.5561621", "0.55025995", "0.5502298", "0.54930097", "0.5478247", "0.54635215", "0.54177946", "0.5400971", "0.53965104", "0.53934044", "0.5390844", "0.53879607", "0.53788674", "0.5374724", "0.5370756", "0.5348264", "0.5341109", "0.53399575" ]
0.76807743
0
Called when float chat is to be closed
def on_float_chat_close(self): self.web_handler.stop_listening() # stops web handler from listening self.float_chat_toplevel.destroy() # close float chat self.buttons['float_chat'].config(text='Start float chat') # change button text self.float_chat_on = False # change float chat on status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_chat_close(self, request, trigger_context):\n raise NotImplementedError", "def CloseForum(self, event):\n pass", "def handle_close(self, msg):\n self.log.debug(\"handle_close[%s](%s)\", self.comm_id, msg)\n if self._close_callback:\n self._close_callback(msg)", "def leave(self):\n print('%r: leaving', self)\n self.telepathy_text_chan.Close()", "def endMessage(self):", "def disconnect(self):\r\n\t\tself.ui.chatlist.clear()\r\n\t\t#self.top.quit()\r\n\t\tself.chat.disconnect(self.my_uri)\r\n\t\r\n\t\tprint('Disconnected')", "def onClose (self):\n \n pass", "def close(self):\n\n self.en_time = time.strftime('%H:%M %A %d %B')\n self.is_active = False", "def handle_close(self):\n self.active = False\n self.close()", "def on_closing(event=None):\r\n my_msg.set(\"{quit}\")\r\n send()", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def received_CLOSING(self):\n\n\t\tself.player_frame.notify_rival_closing()\n\t\tself.player_frame.master.go_to_previous_screen(False)", "def on_connection_closed(self):", "def handleClose(self):\n logging.info(\"%s %s\", self.address, \"closed\")\n self.logbook.clients_disconnected_count += 1", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(event=None):\n my_msg.set(\"{quit}\")\n send()", "def on_closing(self, *args):\n pass", "def on_stop(self):\n # we are going to close all the open positions when the bot stops\n self.close_open_positions()\n self.candles.stop()", "def __window_close(self):\n pass", "def on_chat_end(self, request, trigger_context):\n raise NotImplementedError", "def on_close(self):\n # remove callback so it doesn't get called in the future\n self._notifier.remove_callback(self.process_notification)", "def on_close(self):\n logging.log(logging.INFO, \"Closing [%s]\" % self.__module__)", "def on_close(self):\n self.subscrib.unsubscribe(self.channel)\n self.thread.stop()", "def on_before_close(self):\n pass", "def handle_closing_connection(self, msg):\n if self.presentation.get_presentation_content():\n self.presentation.reset()\n self.layout.reset_presentation()", "def close(self):\n self.state = False\n self.mainwindow.sendMessage('a')\n print(\"closing \" + self.name)" ]
[ "0.78410995", "0.6853194", "0.68365043", "0.67821735", "0.6760789", "0.67238986", "0.67188543", "0.67052376", "0.66676116", "0.6519735", "0.6502221", "0.6502221", "0.6502221", "0.6484066", "0.6447902", "0.64328486", "0.64257556", "0.64257556", "0.64257556", "0.64257556", "0.6419924", "0.64047825", "0.6387346", "0.63716197", "0.63286805", "0.6319528", "0.6315179", "0.62735605", "0.6273308", "0.6271806" ]
0.8624791
0
Fetch the list of favourited projects for the currently logged in user
def get(self): # fetch parameter get_parser = reqparse.RequestParser(bundle_errors=True) get_parser.add_argument("user_id", required=True, help="User ID required to fetch favourite projects") args = get_parser.parse_args(strict=True) # get user_id user_id = args["user_id"] # set up return json data ret = { "_id": "", "user_id": "", "favourite_projects": [] } # convert user_id (string) into ObjectId try: user_id = ObjectId(user_id) except: return {"message": "invalid user id"}, 400 # fetch the favourites list of the user if 'user_id' in args.keys(): # check if user is in the database user = self.users.find_one({"_id": user_id}) if user is None: return {"message": "user not found"}, 404 else: # check if user has any favourites user_favourites = self.favourites.find_one({"user_id": user_id}) if user_favourites is None: return {"message": "user does not have any favourites"}, 400 else: # add return _id and user_id data ret["_id"] = str(user_favourites["_id"]) ret["user_id"] = str(user_favourites["user_id"]) # update project details if needed update_project_details = [] for project in user_favourites["favourite_projects"]: project_id = str(project["_id"]) project_id = ObjectId(project_id) doc = self.projects.find_one({"_id": project_id}) if doc: update_project_details.append(deepcopy(doc)) # ret details # fetch the username for each user id ret_members = [] for member_id in doc["members"]: mem = self.users.find_one({"_id": member_id}) mem_dict = {"_id": str(member_id), "username": mem["username"]} ret_members.append(mem_dict) leader = self.users.find_one({"_id": doc["leader"]}) ret_leader = {"_id": str(doc["leader"]), "username": leader["username"]} # json format for each project ret_project = { "project_id": str(doc["_id"]), "title": doc["title"], "leader": ret_leader, "max_people": doc["max_people"], "cur_people": doc["cur_people"], "members": ret_members, "description": doc["description"], "course": doc["course"], "technologies": doc["technologies"], "languages": doc["languages"], "tags": doc["tags"] } ret["favourite_projects"].append(ret_project) # update the favourites list for this user and send back the updated details new_favourites = {"favourite_projects": update_project_details} self.favourites.update({"user_id": user_id}, {"$set": new_favourites}, upsert=False) return ret, 200 else: return {"message": "user id required to fetch the favourites list"}, 400
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_returns_projects_favourited_by_user_if_favourited_by_me_is_true(self):\n # Arrange\n # Make all projects to be accessible for user\n self.test_project_2.private = False\n self.test_project_2.save()\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.save()\n self.test_user.favorites = [self.test_project_1]\n self.test_user.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"favoritedByMe\": \"true\"},\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)", "def get_favorites(request):\n companies = request.user.profile.companies.all()\n context = {'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)", "def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n [models.projects.c.project_id, models.projects.c.name, models.projects.c.path, models.projects.c.creation_date, models.projects.c.user_id, func.count(models.objects.c.object_id).label('object_count')])\n .select_from(models.projects.outerjoin(models.objects))\n .where(and_(models.projects.c.active == True, models.projects.c.user_id == current_user.id))\n .group_by(models.projects.c.project_id)\n .order_by(models.projects.c.project_id))\n projects = [dict(row) for row in result]\n for project in projects:\n user = models.User.query.filter_by(\n id=project['user_id']).first()\n if user:\n project['email'] = user.email\n return projects", "def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Projects.objects.filter(username = username).order_by('-id')", "def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})", "def open_projects_user(user):\n return Project.objects.prefetch_related('task_set').filter(user=user, open=True)", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def get_for(user):\n return Project.objects.filter(\n user_group__members=user\n ).distinct()", "def favorites(self):\n path = self._get_path('favorites')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return self._clean_return(response)", "def get_queryset(self):\n queryset = Project.objects.filter(contributor__user=self.request.user.pk)\n return queryset", "def fetch_project(search_info):\n search = search_collection.find_one({\"_id\": SEARCH_ID})\n user = user_collection.find_one({\"_id\": search_info[\"USER_ID\"]})\n user_bookmarks = user[\"bookmarks\"]\n user_contributions = user[\"contributions\"]\n user_outgoing = user[\"outgoing\"]\n try:\n project_id_list = search[search_info[\"search_query\"]]\n except KeyError:\n project_id_list = None\n except AttributeError:\n project_id_list = None\n if project_id_list != None:\n projects_list = list()\n for id in project_id_list:\n project = project_collection.find_one({\"_id\": id})\n if project == None:\n continue\n if user_bookmarks == None:\n project[\"bookmark\"] = False\n else:\n project[\"bookmark\"] = True if id in user_bookmarks else False\n if user_outgoing == None:\n project[\"contribution\"] = False\n\n else:\n project[\"contribution\"] = True if id in user_outgoing else False\n projects_list.append(project)\n return projects_list\n else:\n return []", "def get_projects_user_can_view(user):\n if hasattr(user, 'worker'):\n # Workers need to be able to view all data\n projects = Project.objects.all()\n else:\n projects = get_objects_for_user(\n user,\n 'view_project_data',\n klass=Project)\n sites = get_objects_for_user(user, 'view_site_data', klass=Site)\n site_projects = Project.objects\\\n .filter(id__in=[i.project_id for i in sites])\\\n .exclude(id__in=[p.id for p in projects])\n\n return projects | site_projects", "def projects():\n \n if 'username' in session:\n current_user = mongo.db.user.find_one({'username': session['username']}) \n projects = mongo.db.projects.find().sort('date',pymongo.DESCENDING)\n return render_template('pages/projects.html', title='Projects', projects=projects, current_user=current_user)\n \n flash('Please login to view user projects.', 'warning')\n return redirect(url_for('login'))", "def favorites(request):\n cur_user = request.user # Gets the current logged-in user\n fav_products = Favorite.objects.all() # Gets all \"Favorite\" model objects\n\n # Gets the favorites of the current user\n fav_prod_filtered = fav_products.filter(users_id=cur_user).order_by('-id')\n\n # Adds pagination for up to 6 products per page\n paginator = Paginator(fav_prod_filtered, 6)\n page = request.GET.get('page')\n\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n context = {\n 'favorites': products,\n 'paginate': True,\n }\n return render(request, 'favorites/favorites.html', context)", "def home(request):\n projects_newest = Project.approved_projects().all().select_related(\"screenshot\").order_by('-id')[:10]\n projects_newest = [project for project in projects_newest]\n return render(request, 'home.html', {\n \"form\": AuthenticationForm(),\n 'projects_popular': projects_newest,\n 'projects_newest': projects_newest\n })", "def get_projects_of_user(self, user_id):\n res = self.conn.cursor().execute(\"\"\"SELECT * FROM projects p JOIN users_projects up \n ON p.id = up.project_id \n WHERE owner=? OR up.user_id=?\n GROUP BY p.id\n ORDER BY last_update DESC\"\"\", (user_id, user_id,))\n return res.fetchall()", "def get_projects():\n return Project.query.all()", "def _getFavorites(self):\n url = self._genFavoritesUrlByUser(self._username)\n doc = html.document_fromstring(requests.get(url).text)\n out = dict()\n pages = get_pages(doc)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = str(f.attrib['href']).split('/')[-2]\n # topic_id =\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n for p in range(2, pages):\n url = 'http://habrahabr.ru/users/{0}/favorites/page{1}/'.format(self._username, p)\n # if show_progress:\n # print('parsing page{0}... url={1}'.format(p, url))\n doc = html.document_fromstring(requests.get(url).text)\n favs = doc.xpath(\"//div[@class='user_favorites']//a[@class='post_title']\")\n for f in favs:\n # out[f.text] = f.attrib['href'][-7:-1]\n out[f.text] = str(f.attrib['href']).split('/')[-2]\n return out", "def get_accessible_projects(user):\n query = Q(deprecated_files=False)\n\n query &= get_public_projects_query()\n\n if user.is_authenticated:\n query |= get_restricted_projects_query(user)\n\n if user.is_credentialed:\n query |= get_credentialed_projects_query(user)\n\n query |= get_projects_accessible_through_events(user)\n\n return PublishedProject.objects.filter(query).distinct()", "def get_queryset(self):\n\n user = get_authentication(self.request)\n queryset = Favorites.objects.filter(user=user, is_used=True)\n\n return queryset", "def post(self):\n # fetch parameter\n get_parser = reqparse.RequestParser(bundle_errors=True)\n get_parser.add_argument(\"user_id\", required=True, help=\"User ID required to ad to their favourite projects\")\n get_parser.add_argument(\"project_id\", required=True, help=\"Project ID required to add to the favourite projects\")\n args = get_parser.parse_args(strict=True)\n\n # get user_id and project_id\n user_id = args[\"user_id\"]\n project_id = args[\"project_id\"]\n\n # convert parameter ids into objectids\n try:\n user_id = ObjectId(user_id)\n project_id = ObjectId(project_id)\n except:\n return {\"message\": \"invalid user id or project id\"}, 400\n\n # add project to the user's favourites\n if ('user_id' or 'project_id') not in args.keys():\n return {\"message\": \"both user and project id are required\"}, 400\n else:\n # check if user is valid\n user = self.users.find_one({\"_id\": user_id})\n project = self.projects.find_one({\"_id\": project_id})\n if user is None:\n return {\"message\": \"user not found\"}, 404\n elif project is None:\n return {\"message\": \"project not found\"}, 404\n else:\n # add project to favourites\n user_favourites = self.favourites.find_one({\"user_id\": user_id})\n if user_favourites is None:\n # insert a new doc into favourites collection\n favourites_list = []\n favourites_list.append(deepcopy(project)) \n self.favourites.insert({\n \"user_id\": user_id,\n \"favourite_projects\": favourites_list\n })\n else:\n new_favourite_list = user_favourites[\"favourite_projects\"]\n\n # check if this project is already in the user's favourites\n for proj in new_favourite_list:\n if proj[\"_id\"] == project_id:\n return {\"message\": \"project is already in the favourites list\"}, 400\n\n new_favourite_list.append(deepcopy(project))\n updated_list = {\"favourite_projects\": new_favourite_list}\n\n self.favourites.update({\"user_id\": user_id}, {\"$set\": updated_list}, upsert=False)\n \n return {\"status\": \"project has been added to favourites successfully\"}, 200", "def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)", "def get_favourites(self, username):\n self.cur.execute(\"SELECT video_ID FROM favourites WHERE username = \\\"{}\\\"\".format(username))\n favourites = []\n for ID in self.cur.fetchall():\n favourites.append(ID[0])\n return favourites", "def get_featured_projects(self):\n featured_projs = FeaturedProject.objects.order_by('id')[0:3]\n highlighted = []\n activities = personalize_activities_dict(self.request.user)\n try:\n for featured in featured_projs:\n try:\n activity = activities[featured.project.id_label]\n if featured.description:\n activity['commentary'] = featured.description\n highlighted.append(activity)\n except KeyError:\n pass\n return highlighted\n except (ValueError, TypeError):\n return []", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def get_project_list(token):\n session = requests.Session()\n session.headers.update({'Authorization': f'Token {token}'})\n url = get_project_list_url()\n r = session.get(url=url)\n return r", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def get_favorites(self):\n url = \"https://api.imgur.com/3/account/{0}/favorites\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True)\n return [_get_album_or_image(thing, self) for thing in resp]" ]
[ "0.7222293", "0.6640633", "0.662446", "0.65211076", "0.64192235", "0.6397125", "0.6359353", "0.62913567", "0.6281064", "0.6238408", "0.62245435", "0.62229395", "0.62035525", "0.6191851", "0.61816716", "0.61535966", "0.6113703", "0.61077994", "0.6061517", "0.60476655", "0.60471356", "0.604354", "0.6037239", "0.60312223", "0.5997083", "0.5985646", "0.5984023", "0.5981372", "0.5964486", "0.5962718" ]
0.7558755
0
Add the selected project to the user's favourites list
def post(self): # fetch parameter get_parser = reqparse.RequestParser(bundle_errors=True) get_parser.add_argument("user_id", required=True, help="User ID required to ad to their favourite projects") get_parser.add_argument("project_id", required=True, help="Project ID required to add to the favourite projects") args = get_parser.parse_args(strict=True) # get user_id and project_id user_id = args["user_id"] project_id = args["project_id"] # convert parameter ids into objectids try: user_id = ObjectId(user_id) project_id = ObjectId(project_id) except: return {"message": "invalid user id or project id"}, 400 # add project to the user's favourites if ('user_id' or 'project_id') not in args.keys(): return {"message": "both user and project id are required"}, 400 else: # check if user is valid user = self.users.find_one({"_id": user_id}) project = self.projects.find_one({"_id": project_id}) if user is None: return {"message": "user not found"}, 404 elif project is None: return {"message": "project not found"}, 404 else: # add project to favourites user_favourites = self.favourites.find_one({"user_id": user_id}) if user_favourites is None: # insert a new doc into favourites collection favourites_list = [] favourites_list.append(deepcopy(project)) self.favourites.insert({ "user_id": user_id, "favourite_projects": favourites_list }) else: new_favourite_list = user_favourites["favourite_projects"] # check if this project is already in the user's favourites for proj in new_favourite_list: if proj["_id"] == project_id: return {"message": "project is already in the favourites list"}, 400 new_favourite_list.append(deepcopy(project)) updated_list = {"favourite_projects": new_favourite_list} self.favourites.update({"user_id": user_id}, {"$set": updated_list}, upsert=False) return {"status": "project has been added to favourites successfully"}, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def favourite(self, favourite):\n\n self._favourite = favourite", "def post(self, project_id):\n project_model = ProjectDBModel.query.get(project_id)\n if not project_model:\n ns.abort(404, status=PROJECT_NOT_FOUND_ERROR)\n try:\n data = request.get_json()\n users = FavoritesProjectDBModel.add_project_to_favorites_of_user_id(\n data['user_id'], project_id)\n response_object = {\n \"project_id\": project_id,\n \"users_id\": users,\n }\n return response_object, 201\n except KeyError:\n ns.abort(404, status=MISSING_VALUES_ERROR)", "def set_favorite(request):\n company_id = request.data.get('id')\n company = Company.objects.get(id=company_id)\n\n request.user.profile.companies.add(company)\n return Response({'favorite': True})", "def favorite(self):\n url = \"https://api.imgur.com/3/album/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method=\"POST\")", "def test_returns_projects_favourited_by_user_if_favourited_by_me_is_true(self):\n # Arrange\n # Make all projects to be accessible for user\n self.test_project_2.private = False\n self.test_project_2.save()\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.save()\n self.test_user.favorites = [self.test_project_1]\n self.test_user.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"favoritedByMe\": \"true\"},\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )", "def cmd_account_favorites(client, args):\n account_favorites = client.get_account_favorites(args.username)\n data = [item.__dict__ for item in account_favorites]\n generate_output({'account_favorites': data}, args.output_file)", "def addToFavorites(self, shortName, absPath):\n logger.debug(\"Func: addToFavorites\")\n\n # old Name userFavoritesAdd\n bookmarksData = self.loadFavorites()\n bookmarksData.append([shortName, absPath])\n self._dumpJson(bookmarksData, self._pathsDict[\"bookmarksFile\"])\n return bookmarksData", "def put(self):\n # fetch parameter\n get_parser = reqparse.RequestParser(bundle_errors=True)\n get_parser.add_argument(\"user_id\", required=True, help=\"User ID required to acccess the user's favourite projects\")\n get_parser.add_argument(\"project_id\", required=True, help=\"Project ID required to remove a project\")\n args = get_parser.parse_args(strict=True)\n\n # get user_id and project_id\n user_id = args[\"user_id\"]\n project_id = args[\"project_id\"]\n\n # convert parameter ids into objectids\n try:\n user_id = ObjectId(user_id)\n project_id = ObjectId(project_id)\n except:\n return {\"message\": \"invalid user id or project id\"}, 400\n\n # add project to the user's favourites \n if ('user_id' or 'project_id') not in args.keys():\n return {\"message\": \"both user and project id are required\"}, 400\n else:\n # check if user is valid\n user = self.users.find_one({\"_id\": user_id})\n project = self.projects.find_one({\"_id\": project_id})\n if user is None:\n return {\"message\": \"user not found\"}, 404\n elif project is None:\n return {\"message\": \"project not found\"}, 404\n else:\n # remove project from the user's favourites\n user_favourites = self.favourites.find_one({\"user_id\": user_id})\n if user_favourites is None:\n return {\"message\": \"user does not have any favourite projects\"}, 400\n else:\n new_favourite_list = user_favourites[\"favourite_projects\"]\n\n # try to remove the project if it is in the favourites\n try:\n new_favourite_list.remove(project)\n except:\n return {\"message\": \"the project is not in the favourites list\"}, 400\n\n if new_favourite_list is None:\n new_favourite_list = []\n\n updated_list = {\"favourite_projects\": new_favourite_list}\n self.favourites.update({\"user_id\": user_id}, {\"$set\": updated_list}, upsert=False)\n \n return {\"status\": \"project has been removed from favourites successfully\"}, 200", "def update_favourites(self, item_info, status):\r\n if status == \"Add\":\r\n return self.model.add_to_favourites(item_info)\r\n elif status == \"Remove\":\r\n return self.model.delete_from_favourites(item_info)", "def add_to_fav(request, q_id):\n if request.method == 'POST':\n Quotes.objects.add_to_user_fav(request.session['id'], q_id)\n return redirect('/quotes')", "def favorites(self):\n path = self._get_path('favorites')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return self._clean_return(response)", "def get_favourites(self, username):\n self.cur.execute(\"SELECT video_ID FROM favourites WHERE username = \\\"{}\\\"\".format(username))\n favourites = []\n for ID in self.cur.fetchall():\n favourites.append(ID[0])\n return favourites", "def add_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one(\r\n {\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$push\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": 1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))", "def auto_fav(q, count=5, result_type=\"recent\"):\n\n result = search_tweets(q, count, result_type)\n\n for tweet in result[\"statuses\"]:\n try:\n # don't favorite your own tweets\n if tweet[\"user\"][\"screen_name\"] == TWITTER_HANDLE:\n continue\n\n result = t.favorites.create(_id=tweet[\"id\"])\n print(\"favorited: %s\" % (result[\"text\"].encode(\"utf-8\")))\n\n # when you have already favorited a tweet, this error is thrown\n except TwitterHTTPError as e:\n print(\"error: %s\" % (str(e)))", "def get(self):\n # fetch parameter\n get_parser = reqparse.RequestParser(bundle_errors=True)\n get_parser.add_argument(\"user_id\", required=True, help=\"User ID required to fetch favourite projects\")\n args = get_parser.parse_args(strict=True)\n\n # get user_id\n user_id = args[\"user_id\"]\n\n # set up return json data\n ret = {\n \"_id\": \"\",\n \"user_id\": \"\",\n \"favourite_projects\": []\n }\n\n # convert user_id (string) into ObjectId\n try:\n user_id = ObjectId(user_id)\n except:\n return {\"message\": \"invalid user id\"}, 400\n\n # fetch the favourites list of the user\n if 'user_id' in args.keys():\n # check if user is in the database\n user = self.users.find_one({\"_id\": user_id})\n if user is None:\n return {\"message\": \"user not found\"}, 404\n else:\n # check if user has any favourites\n user_favourites = self.favourites.find_one({\"user_id\": user_id})\n if user_favourites is None:\n return {\"message\": \"user does not have any favourites\"}, 400 \n else:\n # add return _id and user_id data\n ret[\"_id\"] = str(user_favourites[\"_id\"])\n ret[\"user_id\"] = str(user_favourites[\"user_id\"])\n\n # update project details if needed\n update_project_details = []\n for project in user_favourites[\"favourite_projects\"]:\n project_id = str(project[\"_id\"])\n project_id = ObjectId(project_id)\n\n doc = self.projects.find_one({\"_id\": project_id})\n if doc:\n update_project_details.append(deepcopy(doc))\n\n # ret details\n # fetch the username for each user id\n ret_members = []\n for member_id in doc[\"members\"]:\n mem = self.users.find_one({\"_id\": member_id})\n mem_dict = {\"_id\": str(member_id), \"username\": mem[\"username\"]}\n ret_members.append(mem_dict)\n\n leader = self.users.find_one({\"_id\": doc[\"leader\"]})\n ret_leader = {\"_id\": str(doc[\"leader\"]), \"username\": leader[\"username\"]}\n\n # json format for each project\n ret_project = {\n \"project_id\": str(doc[\"_id\"]),\n \"title\": doc[\"title\"],\n \"leader\": ret_leader,\n \"max_people\": doc[\"max_people\"],\n \"cur_people\": doc[\"cur_people\"],\n \"members\": ret_members,\n \"description\": doc[\"description\"],\n \"course\": doc[\"course\"],\n \"technologies\": doc[\"technologies\"],\n \"languages\": doc[\"languages\"],\n \"tags\": doc[\"tags\"]\n }\n ret[\"favourite_projects\"].append(ret_project)\n \n # update the favourites list for this user and send back the updated details\n new_favourites = {\"favourite_projects\": update_project_details}\n self.favourites.update({\"user_id\": user_id}, {\"$set\": new_favourites}, upsert=False)\n \n return ret, 200 \n else:\n return {\"message\": \"user id required to fetch the favourites list\"}, 400", "def favorites(self):\n if not self._user_favorites_loaded:\n self._user_favorites = self._getFavorites()\n self._user_favorites_loaded = True\n return deepcopy(self._user_favorites)", "def save_to_favorites_list():\n\n #get show id from the event handler/post request\n show_id = str(request.form.get(\"id\"))\n #get button content from the event handler/post request\n button_content = request.form.get(\"button_content\")\n\n button_content_encoded = button_content.encode('utf-8')\n\n #save utf-8 encoded checkmark as a string variable\n check_mark = \"\\xe2\\x9c\\x93\"\n\n #find the current logged in user\n email = session.get(\"current_user\")\n\n if email:\n\n #use email to find the user_id\n user_id = User.find_user_id_with_email(email)\n\n #if the show has not been favorited yet\n if check_mark not in button_content_encoded:\n #add row in favorites table\n favorite = Favorite.add_to_favorites(show_id, user_id)\n\n #pass back the show_id and that the show has been favorited\n payload = {\"show_id\":show_id,\"favorite\":\"True\"}\n return jsonify(payload)\n else:\n #delete row in favorites table\n Favorite.delete_favorite(show_id)\n\n #pass back the show_id and that the show has been unfavorited\n payload = {\"show_id\":show_id,\"favorite\":\"False\"}\n return jsonify(payload)\n else:\n flash(\"You need to be logged in to see that page.\")\n return redirect(\"/login\")", "async def create(self, favorite: Favorite) -> Favorite:", "def favorite(self, item):\n self._createAction(item, \"archive\")", "def mark_favorite(request, object_id):\n feed_item = get_object_or_404(FeedItem, id=object_id)\n fav_item, is_new = FavoriteItem.objects.get_or_create(feed_item=feed_item)\n if request.is_ajax():\n return JSONResponse({'status': 'ok', 'text': 'Marked as favorite'}, False)\n return redirect(request.META.get('HTTP_REFERER', 'feed_item_list'))", "def favorite(self):\n url = \"https://api.imgur.com/3/image/{0}/favorite\".format(self.id)\n return self._imgur._send_request(url, needs_auth=True, method='POST')", "def toggle_favorite(self, user, article, is_favoriting):\n if user not in article.favorited_by.all() and is_favoriting:\n article.favorited_by.add(user)\n if user in article.favorited_by.all() and not is_favoriting:\n article.favorited_by.remove(user)\n article.favoritesCount = article.favorited_by.all().count()\n article.save()", "def favorites(request):\n cur_user = request.user # Gets the current logged-in user\n fav_products = Favorite.objects.all() # Gets all \"Favorite\" model objects\n\n # Gets the favorites of the current user\n fav_prod_filtered = fav_products.filter(users_id=cur_user).order_by('-id')\n\n # Adds pagination for up to 6 products per page\n paginator = Paginator(fav_prod_filtered, 6)\n page = request.GET.get('page')\n\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n\n context = {\n 'favorites': products,\n 'paginate': True,\n }\n return render(request, 'favorites/favorites.html', context)", "def see_favorits(request):\n user_name = request.user\n print(user_name)\n # product = UserFavorite.objects.filter(user_name=user_name)\n list_favorits = UserFavorite.objects.all().filter(user_name=user_name)\n favorits_query = list_favorits\n favorits_list = []\n for favorite in favorits_query:\n favorits_list.append(Product.objects.get(pk=favorite.product.id))\n print(favorits_list)\n context = {\n # 'product' : product,\n 'user_name' : user_name,\n 'product' : favorits_list\n }\n\n\n return render(request,\"favorits.html\",context)", "def post_favorite(request, pk=None):\n post = Post.objects.get(pk=pk).original_or_self()\n if post.favorites.filter(pk=request.user.pk).exists():\n post.favorites.remove(request.user)\n else:\n post.favorites.add(request.user)\n post.save()\n\n referer = request.META['HTTP_REFERER']\n if referer:\n return redirect(referer)\n else:\n return redirect('posts:post', pk=post.pk)", "def add_to_fav(show_id, name):\n db = get_db()\n db.execute(\n 'INSERT INTO shows_users (show_id, user_id)'\n ' VALUES (?, ?)',\n (show_id, session['user_id'])\n )\n\n flash('\\\"%s\\\" has been successfully added to your favourite TV Shows!' % name)\n db.commit()\n return redirect(request.referrer)", "def cmd_account_gallery_favorites(client, args):\n gallery_favorites = client.get_gallery_favorites(args.username)\n data = [item.__dict__ for item in gallery_favorites]\n generate_output({'gallery_favorites': data}, args.output_file)", "def get_favorites(request):\n companies = request.user.profile.companies.all()\n context = {'user_id': request.user.id}\n serializer = CompanySerializers(companies, context=context)\n return Response(serializer.data)", "def cmd_album_favorite(client, args):\n favorite_album = client.album_favorite(args.album_id)\n generate_output({'favorite_album': favorite_album})", "def get_favorites(self):\n url = \"https://api.imgur.com/3/account/{0}/favorites\".format(self.name)\n resp = self._imgur._send_request(url, needs_auth=True)\n return [_get_album_or_image(thing, self) for thing in resp]" ]
[ "0.65495306", "0.65230185", "0.64037746", "0.6359352", "0.63081443", "0.620794", "0.6202838", "0.61783636", "0.6137469", "0.60920554", "0.60460544", "0.5985217", "0.5984209", "0.59489393", "0.5923535", "0.58728683", "0.58598346", "0.58549684", "0.58362305", "0.5825522", "0.5808739", "0.5784779", "0.57822514", "0.57256734", "0.5712459", "0.57029223", "0.5694877", "0.56697154", "0.5663519", "0.566179" ]
0.67139345
0
Donations to Wikimedia come in discrete units due the radio boxes with suggested donation amounts. The vast majority of impressions do not lead to a donation. This object encapsulates a multinomial distribution that is parameterized in a slightly unusual way. You specify the positive values, a distribution over positive values and the donation rate.
def __init__(self, p_donate, pos_amounts=[1.0,], pos_amounts_distribution=[1.0,]): self.p_donate = p_donate self.pos_amounts = pos_amounts self.pos_amounts_distribution = pos_amounts_distribution zero = np.array([0,]) p_donate = np.array([p_donate],) pos_amounts = np.array(pos_amounts) pos_amounts_distribution = np.array(pos_amounts_distribution) self.values = np.concatenate([zero,pos_amounts]) self.distribution = np.concatenate([1 - p_donate, p_donate * pos_amounts_distribution])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_discrete_distribution():\n rng = utils.RandomState(0)\n distribution = dist.DiscreteDistribution(rng)\n with pytest.raises(NotImplementedError):\n distribution.sample([])\n with pytest.raises(NotImplementedError):\n distribution.log_probability([], None)\n with pytest.raises(NotImplementedError):\n distribution.support([])", "def multinomial(self, size=None, n=1, pvals=[0.5, 0.5], ndim=None,\r\n dtype='int64'):\r\n return self.gen(multinomial, size, n, pvals, ndim=ndim, dtype=dtype)", "def change_p_donate(self, p_donate):\n return DonationProb(p_donate, self.pos_amounts, self.pos_amounts_distribution)", "def _build_multinomial_weights(self) -> None:\n weights_obs = ramp_up_weights(\n len(self.obs), self.tpe.full_weight_num, self.tpe.equal_weight\n )\n counts_obs = numpy.bincount(\n self.obs, minlength=len(self.choices), weights=weights_obs\n )\n counts_obs = counts_obs + self.tpe.prior_weight\n self.weights = counts_obs / counts_obs.sum()", "def initializeDistribution(self):\n if self.nPoints is None:\n self.xArray = np.arange(self.lowerBound,self.upperBound+1)\n else:\n self.xArray = np.linspace(self.lowerBound,self.upperBound,self.nPoints)\n\n # Here the actual calculation of discrete distribution parameters is performed\n self.pdfArray = 1.0/self.xArray.size * np.ones(self.xArray.size)\n paramsDict={}\n paramsDict['outcome'] = self.xArray\n paramsDict['state'] = self.pdfArray\n\n self.categoricalDist = Categorical()\n self.categoricalDist.initializeFromDict(paramsDict)\n initialPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(initialPerm)", "def set_uniform_probabilities(self, sentence_aligned_corpus):\n ...", "def get_discrete_distribution():\n random_int = random.randint(1, 4)\n if (random_int == 1) | (random_int == 2):\n return 0\n if random_int == 3:\n return 3\n if random_int == 4:\n return 5\n raise ValueError(\"Unable to generate discrete distribution with \", random_int)", "def define_pdf(self, values: torch.Tensor, weights: torch.Tensor, inds: torch.Tensor) -> Distribution:\n\n raise NotImplementedError()", "def get_delta_distribution(non_zero_value):\n support = np.arange(0, MAX_MARK_VALUE + 1)\n probs = [0] * len(support)\n probs[non_zero_value] = 1.0\n return ProbabilityDistribution(support, probs)", "def multinomial_class(\n distribution_or_probs: Union[tfd.Distribution, jnp.DeviceArray]\n) -> jnp.DeviceArray:\n if isinstance(distribution_or_probs, tfd.Distribution):\n return jnp.argmax(distribution_or_probs.logits_parameter(), axis=1)\n return jnp.argmax(distribution_or_probs, axis=1)", "def __init__(self, num_values, probability):\n self.num_values = num_values\n self.probability = probability\n self.register_bounds = GeometricDistribution._compute_register_bounds(num_values, probability)\n self._register_probs = GeometricDistribution._compute_register_probs(num_values, probability)", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def probability_from_internal(internal_values, constr):\n return internal_values / internal_values.sum()", "def lift(self, lift):\n p_donate = self.p_donate + self.p_donate * lift\n return DonationProb(p_donate, self.pos_amounts, self.pos_amounts_distribution)", "def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.multinomial((20,20), 1, [0.1]*10))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n numpy_val1 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def __new__(\n cls,\n mu: Union[float, np.ndarray] = 0.0,\n sigma: Union[float, np.ndarray] = 1.0,\n seed: Optional[int] = 0,\n ):\n\n if sigma == 0:\n # If sigma is zero, return a DiscreteDistribution with a single atom\n return DiscreteDistribution([1.0], [np.exp(mu)], seed=seed)\n\n return super().__new__(cls)", "def multinomial_prob(counts, probs):\n return nCkarray(*counts.values) * (probs ** counts).prod()", "def initializeDistribution(self):\n self.convertToDistrDict['Jacobi'] = self.convertJacobiToBeta\n self.convertToQuadDict ['Jacobi'] = self.convertBetaToJacobi\n self.measureNormDict ['Jacobi'] = self.stdProbabilityNorm\n #this \"if\" section can only be called if distribution not generated using readMoreXML\n if (not self.upperBoundUsed) and (not self.lowerBoundUsed):\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,self.low)\n else:\n if self.lowerBoundUsed == False:\n a = 0.0\n else:\n a = self.lowerBound\n if self.upperBoundUsed == False:\n b = sys.float_info.max\n else:\n b = self.upperBound\n self._distribution = distribution1D.BasicBetaDistribution(self.alpha,self.beta,self.high-self.low,a,b,self.low)\n self.preferredPolynomials = 'Jacobi'\n self.compatibleQuadrature.append('Jacobi')\n self.compatibleQuadrature.append('ClenshawCurtis')", "def sample(self, probabilities):\n return self.sample_bernoulli(probabilities)", "def multinomial_nll(true_counts, logits):\n counts_per_example = tf.reduce_sum(true_counts, axis=-1)\n dist = tfp.distributions.Multinomial(total_count=counts_per_example,\n logits=logits)\n return (-tf.reduce_sum(dist.log_prob(true_counts)) / \n tf.cast(tf.shape(true_counts)[0], dtype=tf.float32))", "def multinomial(random_state, size=None, n=1, pvals=[0.5, 0.5],\r\n ndim=None, dtype='int64'):\r\n n = tensor.as_tensor_variable(n)\r\n pvals = tensor.as_tensor_variable(pvals)\r\n # until ellipsis is implemented (argh)\r\n tmp = pvals.T[0].T\r\n ndim, size, bcast = _infer_ndim_bcast(ndim, size, n, tmp)\r\n bcast = bcast + (pvals.type.broadcastable[-1],)\r\n op = RandomFunction(multinomial_helper,\r\n tensor.TensorType(dtype=dtype, broadcastable=bcast),\r\n ndim_added=1)\r\n return op(random_state, size, n, pvals)", "def valid_donation(donation):\n if donation < 0:\n raise ValueError(\"Donation has to be more than 0.\")\n if donation < 0.1:\n raise ValueError(\"Donation has to be more than 10 cents\")\n return donation", "def __mul__(self, other):\n\n\t\tassert set(self.keys()) == set(other.keys())\n\t\tdistribution, total = {}, 0.0\n\n\t\tfor key in self.keys():\n\t\t\tx, y = self.probability(key), other.probability(key)\n\t\t\tdistribution[key] = (x + eps) * (y + eps)\n\t\t\ttotal += distribution[key]\n\n\t\tfor key in self.keys():\n\t\t\tdistribution[key] /= total\n\n\t\t\tif distribution[key] <= eps / total:\n\t\t\t\tdistribution[key] = 0.0\n\t\t\telif distribution[key] >= 1 - eps / total:\n\t\t\t\tdistribution[key] = 1.0\n\n\t\treturn DiscreteDistribution(distribution)", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicBernoulliDistribution(self.p)\n else:\n self.raiseAnError(IOError,'Truncated Bernoulli not yet implemented')", "def random_distribution(m):\n urls = []\n for i in range(m):\n p = 'https://en.wikipedia.org/wiki/Special:Random'\n urls.append(p)\n return Counter(distribution(urls))", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Bernoulli'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def distribution(self, token):\n if token not in self._dict:\n token = 'UNKNOWN_TOKEN' # yes, yes, bad coupling I know...\n if self.smooth:\n smoothing_dict = self.good_turing_mapping\n return Distribution(self._dict[token], smoothing_dict,\n self.count_counts[token])\n else:\n if self._dict[token]:\n return Distribution(self._dict[token])\n else:\n # no information -> use unigram\n return self.unigram_distribution", "def __init__(\n self, dist: rv_discrete, *args: Any, seed: int = 0, **kwds: Any\n ) -> None:\n\n rv_discrete_frozen.__init__(self, dist, *args, **kwds)\n Distribution.__init__(self, seed=seed)", "def pmi(cls, *marginals):\n return (_log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) -\n _log2(_product(marginals[UNIGRAMS])))", "def initializeDistribution(self):\n if self.lowerBoundUsed == False and self.upperBoundUsed == False:\n self._distribution = distribution1D.BasicBinomialDistribution(self.n,self.p)\n else:\n self.raiseAnError(IOError,'Truncated Binomial not yet implemented')" ]
[ "0.5947847", "0.5744744", "0.5591083", "0.55679595", "0.5545277", "0.5531983", "0.55294925", "0.55198205", "0.54576534", "0.544243", "0.5440499", "0.54399353", "0.5424996", "0.54166985", "0.5410182", "0.5375491", "0.53508025", "0.5302159", "0.5289846", "0.5281966", "0.52680683", "0.52679795", "0.5262408", "0.52619696", "0.52297956", "0.52141756", "0.5210093", "0.5202725", "0.5194954", "0.5179155" ]
0.6726229
0
Returns a DonationProb object whose donation rate was increased by lift percent
def lift(self, lift): p_donate = self.p_donate + self.p_donate * lift return DonationProb(p_donate, self.pos_amounts, self.pos_amounts_distribution)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_p_donate(self, p_donate):\n return DonationProb(p_donate, self.pos_amounts, self.pos_amounts_distribution)", "def get_opinion_percent(self):\n return (self.get_percent()+100)/2", "def pct(self):\n\t\treturn self.bottle.pct()", "def priceit(self):\n paytree = np.zeros((self.steps+1,self.steps+1))\n paytree[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n paytree[i-1][j] = (paytree[i][j]*self.upprob +paytree[i][j+1]*(1-self.upprob))/discount\n return paytree[0][0]", "def get_percentage(self):\n return self.PotTax_percentage", "def get_response_probability(self, ind):\n return self.rp_t[ind]", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def expected_outstanding_repayment(prediction_data, probability_of_default):\n return prediction_data.assign(probability_of_default=probability_of_default).assign(\n expected_repayment=lambda df: df.outstanding_balance\n * (1 - df.probability_of_default)\n )", "def percent_raised(self):\n total_cost = self.total_cost()\n if total_cost:\n return round(self.total_raised() * 100 / total_cost, 2)\n else:\n return 0", "def get_payoff(model, attack_policy, defense_policy):\n ave_discount_reward = get_payoff_mixed(model, [attack_policy], [defense_policy], [1.0], [1.0])\t\n return ave_discount_reward", "def get_probability(self, reaction):\n return self.__getitem__(reaction)", "def penalty(self):\n return 0", "def getAvgDonation(self):\n try:\n return self.getTotDonation()/self.getNumDonations()\n except ZeroDivisionError:\n return 0.0", "def penalty(self):\n assert len(self.weights) == len(self.means), \"Dimensions!\"\n out = np.exp(self.data['riskfree'] * self.data['maturity'])\n for weight, mean in zip(self.weights, self.means):\n out -= weight * np.exp(mean * self.data['maturity'])\n return (out**2).mean()**.5", "def get_percent_interest(self):\n return self.__percentage_interest", "def get_percent(self):\n return self.percent", "def discounted_reward(self, discount):\n\n tl = len(self)\n return (1 - discount) * np.sum(discount ** np.arange(tl) * self.rewards)", "def getProbationPeriod(self,probationPercent, pointCount):\n return min(\n math.floor(probationPercent * pointCount),\n probationPercent * 5000)", "def get_raw_probability(self):\n\t\tproba = RunOrder.BASE_SUCCESS_PROBABILITY\n\t\tproba += (self.additional_percents + self.hidden_percents) * 10\n\t\treturn proba", "def discount(self, period):\n\t\treturn 1.0/compound(period)", "def success_chance(dc,modifier=0,adv=False,disadv=False):\r\n if adv:\r\n return 1-((dc-modifier-1)/20)**2\r\n elif disadv:\r\n return (1-(dc-modifier-1)/20)**2\r\n return 1-(dc-modifier-1)/20", "def get_response_probability(self, ind):\n pass", "def relative_rate(self) -> \"double\":\n return _beamforming_swig.doaesprit_sptr_relative_rate(self)", "def get_cost(org, target, amount):\n rep, _ = target.Dominion.reputations.get_or_create(organization=org)\n base = 200\n if amount > 0:\n base -= rep.respect + rep.affection\n else:\n base += rep.respect + rep.affection\n if base < 0:\n base = 0\n return base * abs(amount)", "def relative_rate(self):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_relative_rate(self)", "def perc_greedy(population, percentage=80):\n \n\n #initialization\n res_arr = [2] * 10\n total_knights = 80\n\n medians = get_medians(population, percentage);\n\n while(total_knights > 0):\n \n # find \"easiest\" to acheive\n ind = medians.index(min(medians))\n\n # calculate the number of knights to assign to that castle\n assign = min(total_knights, medians[ind]-res_arr[ind] + 1)\n\n # make assignment\n res_arr[ind] += assign\n total_knights -= assign\n\n # mark that castle as \"done\"\n medians[ind] = 100\n \n # get the score of result inst against input population\n res_inst = CBInstance(res_arr)\n res_score = grade_inst(res_inst, population)\n \n return res_inst", "def fidelity_promo(percent: float) -> Promotion:\n return lambda order: (\n order.total() * percent / 100 if order.customer.fidelity >= 1000 else 0\n )", "def _calc_lift(self):\n for key, val in self.posteriors.items():\n if key == self.control_bucket_name:\n continue\n lift_over_control = np.divide(val.get_posterior_sample(),\n self.posteriors[\n self.control_bucket_name]\n .get_posterior_sample()) - 1\n if key not in self.lift.keys():\n self.lift[key] = {}\n self.lift[key][self.control_bucket_name] = lift_over_control\n else:\n self.lift[key][self.control_bucket_name] = lift_over_control\n if self.debug:\n percent_positive_lift = sum(i > 0 for i in\n lift_over_control) / \\\n len(lift_over_control)\n print('percent positive lift for {0} over {1} = {2:.2%}'\n .format(key, self.control_bucket_name,\n percent_positive_lift))\n\n if self.compare_variants:\n comparisons = list(range(0, len(self.variant_bucket_names)))\n combs = combinations(comparisons, 2)\n for combination in combs:\n denom = self.posteriors[\n self.variant_bucket_names[combination[0]]]\n num = self.posteriors[\n self.variant_bucket_names[combination[1]]]\n lift = np.divide(num.get_posterior_sample(),\n denom.get_posterior_sample()) - 1\n if num.get_variant_name() not in self.lift.keys():\n self.lift[num.get_variant_name()] = {}\n self.lift[num.get_variant_name()][\n denom.get_variant_name()] = lift\n else:\n self.lift[num.get_variant_name()][\n denom.get_variant_name()] = lift\n if self.debug:\n percent_positive_lift = sum(i > 0 for i in lift) \\\n / len(lift)\n print('percent positive lift for {0} over {1} = {2:.2%}'\n .format(num.get_variant_name(),\n denom.get_variant_name(),\n percent_positive_lift))", "def do_damage(self) -> float:\n res = 0.05 + self.experience / 100\n self.experience = self.experience + 1\n return res", "def breakdown_prob(self):\n if self.age <= 1:\n return self._breakdown_ratio + (self.breakdowns*self._broken_before)\n else:\n return ((self.age*self._breakdown_ratio) +\n (self.breakdowns*self._broken_before))" ]
[ "0.6890115", "0.59565574", "0.58431065", "0.5806238", "0.5804415", "0.5795312", "0.57341367", "0.5695293", "0.5571195", "0.55701756", "0.55421734", "0.553602", "0.55187565", "0.55174226", "0.55142564", "0.54630965", "0.5456505", "0.5456421", "0.54472786", "0.5411732", "0.5405623", "0.5405124", "0.538922", "0.5381024", "0.5359843", "0.5356911", "0.5350831", "0.534571", "0.5341261", "0.53328323" ]
0.7925812
0
Returns a DonationProb object whose donation rate was changed to p_donate but the positive values and the distribution over positive values is unchanged
def change_p_donate(self, p_donate): return DonationProb(p_donate, self.pos_amounts, self.pos_amounts_distribution)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lift(self, lift):\n p_donate = self.p_donate + self.p_donate * lift\n return DonationProb(p_donate, self.pos_amounts, self.pos_amounts_distribution)", "def __init__(self, p_donate, pos_amounts=[1.0,], pos_amounts_distribution=[1.0,]):\n self.p_donate = p_donate\n self.pos_amounts = pos_amounts\n self.pos_amounts_distribution = pos_amounts_distribution\n zero = np.array([0,])\n p_donate = np.array([p_donate],)\n pos_amounts = np.array(pos_amounts)\n pos_amounts_distribution = np.array(pos_amounts_distribution)\n self.values = np.concatenate([zero,pos_amounts])\n self.distribution = np.concatenate([1 - p_donate, p_donate * pos_amounts_distribution])", "def expected_outstanding_repayment(prediction_data, probability_of_default):\n return prediction_data.assign(probability_of_default=probability_of_default).assign(\n expected_repayment=lambda df: df.outstanding_balance\n * (1 - df.probability_of_default)\n )", "def valid_donation(donation):\n if donation < 0:\n raise ValueError(\"Donation has to be more than 0.\")\n if donation < 0.1:\n raise ValueError(\"Donation has to be more than 10 cents\")\n return donation", "def p(self) -> Probability:\n ...", "def dropout(X, p=0.):\n if p > 0:\n retain_prob = 1 - p\n X *= t_rng.binomial(X.shape, p=retain_prob, dtype=theano.config.floatX)\n X /= retain_prob\n return X", "def predator_change(self, prey, predators):\n\n # Calculate the rate of population change\n return self.predator_growth_rate * predators * prey", "def get_delta_distribution(non_zero_value):\n support = np.arange(0, MAX_MARK_VALUE + 1)\n probs = [0] * len(support)\n probs[non_zero_value] = 1.0\n return ProbabilityDistribution(support, probs)", "def make_change_dp(amount, denominations):", "def reconstructed_probability(self, x: torch.Tensor) -> torch.Tensor:\n with torch.no_grad():\n pred = self.predict(x)\n recon_dist = Normal(pred['recon_mu'], pred['recon_sigma'])\n x = x.unsqueeze(0)\n p = recon_dist.log_prob(x).exp().mean(dim=0).mean(dim=-1) # vector of shape [batch_size]\n return p", "def valid_donation(donation):\n donation = float(donation)\n if donation < 0:\n raise ValueError(\"Donation has to be more than 0.\")\n if donation < 0.1:\n raise ValueError(\"Donation has to be more than 10 cents\")\n return donation", "def decay_proportion(L=102.4, p1=database['K+'], p=75, target_rate=53957518.001):\r\n tau = p1.tau*1e-3/c\r\n if target_rate == None:\r\n return np.exp(-(((L*p1.mass)/(p*c))/tau))\r\n return target_rate*np.exp(-(((L*p1.mass)/(p*c))/tau))", "def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))", "def dwindle(self, rate):\n\n if self.generation % rate == 0:\n self.mutation_prob /= 2", "def mutate(self, probability, rate):\n for i in range(self.number_of_transitions):\n shape = np.shape(self.weights[i])\n size = self.weights[i].size\n weights = self.weights[i].flatten()\n for j in range(len(weights)):\n if np.random.uniform(0, 1) < probability:\n weights[j] = weights[j] + rate * np.random.normal(0, 1 / np.sqrt(shape[0]))\n self.weights[i] = weights.reshape(shape)\n for j in range(len(self.biases[i])):\n if np.random.uniform(0, 1) < probability:\n self.biases[i][j] = self.biases[i][j] + rate * np.random.normal(0, 1)", "def confirmProbability(self, totalDice, bidCount):\n result = self.choose(totalDice, bidCount) * P**bidCount * (1 - P)**(totalDice-bidCount)\n return result", "def priceit(self):\n paytree = np.zeros((self.steps+1,self.steps+1))\n paytree[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n paytree[i-1][j] = (paytree[i][j]*self.upprob +paytree[i][j+1]*(1-self.upprob))/discount\n return paytree[0][0]", "def prob_update(self):\n pass", "def breakdown_prob(self):\n if self.age <= 1:\n return self._breakdown_ratio + (self.breakdowns*self._broken_before)\n else:\n return ((self.age*self._breakdown_ratio) +\n (self.breakdowns*self._broken_before))", "def p_donate_ci(self, a=5, alpha =1, beta=1):\n ones = self.counts[1:]\n zeros = self.counts[0]\n dist = beta_dist(ones + alpha, zeros + beta, 10000)\n lower_bound = np.percentile(dist, a / 2.0)\n upper_bound = np.percentile(dist, 100 - a / 2.0)\n mean = np.mean(dist)\n return (lower_bound, self.p_donate, upper_bound)", "def prob_distr(self, x):\n return 1.0/x", "def probability(p):\n return p > random.uniform(0.0, 1.0)", "def proba_from_log_odds(self, log_odds):\n return (1/(1 + math.exp(log_odds)))", "def gPenalty(d):\n return -1/(d+0.2)**2 if d > -0.1 else 0", "def dealer_probs():\n # Pdf of any current hand (value, hard) and final value; p(v_f | v_c) where v_f = final value, v_c = current value\n probabilities = {}\n\n # End nodes: (value, True) for value >= 17 and (value, False) for value > 17\n # Dependencies (in order of increasing requirements):\n # Hard values, value >= 11, possiblity of bust, no possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value\n # Soft values, 17 >= value >= 11 (value, False) depends on (value', False) for 17 >= value' > value, (value', True) for 17 > value' > 11\n # Hard values, 11 > value >= 2 , no possibility of bust, possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value and (value', False) for 17 >= value' > 13\n\n\n # End nodes\n for value in xrange(17, 22):\n probabilities[(value, True)] = {value: 1.0}\n if value == 17: continue # on soft 17, dealer will still hit\n probabilities[(value, False)] = {value: 1.0}\n\n # Hard values, 17 > value >= 11, possibility of bust, no possibility of going soft with an ace\n for value in xrange(16, 10, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(1, min(10, 21-value)+1):\n next_prob = probabilities[(value + next_card, True)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Soft values, 17 >= value >= 11\n for value in xrange(17, 10, -1):\n probabilities[(value, False)] = {}\n current_prob = probabilities[(value, False)]\n for next_card in xrange(1, 11):\n next_value = value + next_card\n hard = False\n if next_value > 21:\n next_value -= 10\n hard = True\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Hard values, 11 > value >= 2, no possibility of bust, possibility of going soft with an ace\n for value in xrange(10, 1, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(2, 12):\n next_value = value + next_card\n hard = (next_card != 11)\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n return probabilities", "def calculate_probability(self):\n return 0", "def prey_change(self, prey, predators):\n\n # Calculate the rate of population change\n return self.prey_growth_rate * prey * (1 - prey) - self.prey_death_rate * prey * predators", "def prob_choice(p):\n \n return np.random.random_sample() < p", "def get_ratio_guarantee_advance(self):\n return (\n self.ratio_guarantee_advance *\n self.get_period_guarantee_advance *\n self.ratio2_guarantee_advance\n )", "def rssiToPdr(cls, rssi, modulation = None):\n\n if SimSettings.SimSettings().individualModulations == 0:\n\n rssiPdrTable = {\n -97: 0.0000, # this value is not from experiment\n -96: 0.1494,\n -95: 0.2340,\n -94: 0.4071,\n # <-- 50% PDR is here, at RSSI=-93.6\n -93: 0.6359,\n -92: 0.6866,\n -91: 0.7476,\n -90: 0.8603,\n -89: 0.8702,\n -88: 0.9324,\n -87: 0.9427,\n -86: 0.9562,\n -85: 0.9611,\n -84: 0.9739,\n -83: 0.9745,\n -82: 0.9844,\n -81: 0.9854,\n -80: 0.9903,\n -79: 1.0000, # this value is not from experiment\n }\n\n minRssi = min(rssiPdrTable.keys())\n maxRssi = max(rssiPdrTable.keys())\n\n if rssi < minRssi:\n pdr = 0.0\n elif rssi > maxRssi:\n pdr = 1.0\n else:\n floorRssi = int(math.floor(rssi))\n pdrLow = rssiPdrTable[floorRssi]\n pdrHigh = rssiPdrTable[floorRssi+1]\n # linear interpolation\n pdr = (pdrHigh - pdrLow) * (rssi - float(floorRssi)) + pdrLow\n\n assert pdr >= 0.0\n assert pdr <= 1.0\n\n return pdr\n\n elif SimSettings.SimSettings().individualModulations == 1:\n # print(\"come here in other pdr computation\")\n assert modulation is not None\n # get the noise floor\n noise = Modulation.Modulation().receiverNoise\n # print 'noise = {0}'.format(noise)\n # print 'noise dbm = {0}'.format(_mWTodBm(noise))\n # print 'signal dbm = {0}'.format(rssi)\n # get the signal in milliWatt\n signal = _dBmTomW(rssi)\n # print 'signal = {0}'.format(signal)\n\n if SimSettings.SimSettings().measuredData == 1:\n return Modulation.Modulation().predictPRR(modulation, rssi)\n # if 0.85 >= Modulation.Modulation().predictPRR(modulation, rssi) >= 0.7:\n # return 0.75\n # elif 1.0 >= Modulation.Modulation().predictPRR(modulation, rssi) > 0.85:\n # return 0.71\n # else:\n # return 0.0\n # pass\n else:\n if signal < noise:\n # RSSI has not to be below noise level. If this happens, return very low SINR (-10.0dB)\n return 0.0\n\n # SNR\n snr = signal / noise\n # print('in rssitopdr: {0} mW'.format(snr))\n # print('in rssitopdr: {0} dbm'.format(_mWTodBm(snr)))\n\n return Propagation.Propagation()._computePdrFromSINR(_mWTodBm(snr), modulation=modulation, chunkSize=SimSettings.SimSettings().packetSize)\n\n # # BER\n # ber = Modulation.Modulation().getBER(snr, modulation, SimSettings.SimSettings().packetSize)\n #\n # # pdr = round(math.pow((1 - ber), (SimSettings.SimSettings().packetSize * 8)), 3)\n # pdr = Modulation.Modulation()._toPDR(ber, packetSize=SimSettings.SimSettings().packetSize)\n #\n # assert pdr >= 0.0\n # assert pdr <= 1.0\n #\n # return pdr" ]
[ "0.7298788", "0.62568116", "0.6176293", "0.57929885", "0.56549275", "0.556", "0.55039704", "0.5481789", "0.5442987", "0.54265326", "0.54142517", "0.5376003", "0.53016835", "0.52867156", "0.528147", "0.524017", "0.5222751", "0.5217876", "0.52153885", "0.5213066", "0.51969784", "0.5187894", "0.5181577", "0.5169976", "0.5162145", "0.5132655", "0.5126864", "0.51036346", "0.50800514", "0.50765985" ]
0.8732032
0
ShiftTradeSettings a model defined in Swagger
def __init__(self): self.swagger_types = { 'enabled': 'bool', 'auto_review': 'bool', 'allow_direct_trades': 'bool', 'min_hours_in_future': 'int', 'unequal_paid': 'str', 'one_sided': 'str', 'weekly_min_paid_violations': 'str', 'weekly_max_paid_violations': 'str', 'requires_matching_queues': 'bool', 'requires_matching_languages': 'bool', 'requires_matching_skills': 'bool', 'requires_matching_planning_groups': 'bool', 'activity_category_rules': 'list[ShiftTradeActivityRule]' } self.attribute_map = { 'enabled': 'enabled', 'auto_review': 'autoReview', 'allow_direct_trades': 'allowDirectTrades', 'min_hours_in_future': 'minHoursInFuture', 'unequal_paid': 'unequalPaid', 'one_sided': 'oneSided', 'weekly_min_paid_violations': 'weeklyMinPaidViolations', 'weekly_max_paid_violations': 'weeklyMaxPaidViolations', 'requires_matching_queues': 'requiresMatchingQueues', 'requires_matching_languages': 'requiresMatchingLanguages', 'requires_matching_skills': 'requiresMatchingSkills', 'requires_matching_planning_groups': 'requiresMatchingPlanningGroups', 'activity_category_rules': 'activityCategoryRules' } self._enabled = None self._auto_review = None self._allow_direct_trades = None self._min_hours_in_future = None self._unequal_paid = None self._one_sided = None self._weekly_min_paid_violations = None self._weekly_max_paid_violations = None self._requires_matching_queues = None self._requires_matching_languages = None self._requires_matching_skills = None self._requires_matching_planning_groups = None self._activity_category_rules = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def settings(self) -> SchemaActionModel:\n from whyqd.models import SchemaActionModel\n\n action_settings = {\n \"name\": self.name,\n \"title\": self.title,\n \"description\": self.description,\n \"structure\": self.structure,\n }\n if self.modifiers:\n action_settings[\"modifiers\"] = self.modifiers\n return SchemaActionModel(**action_settings)", "async def get_trace_settings(\n self,\n model_name: str = ...,\n headers: dict[str, t.Any] = ...,\n as_json: t.Literal[False] = ...,\n ) -> service_pb2.TraceSettingResponse:", "async def update_trace_settings(\n self,\n model_name: str = ...,\n settings: dict[str, t.Any] = ...,\n headers: dict[str, t.Any] = ...,\n as_json: t.Literal[False] = ...,\n ) -> service_pb2.TraceSettingResponse:\n ...", "def settings(self):\r\n return SettingResource(self)", "def save(self):\n self.client._perform_empty(\n \"PUT\", \"/projects/%s/apiservices/%s/settings\" % (self.project_key, self.service_id),\n body = self.settings)", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_pessoa': 'int',\n 'id_cartao': 'int',\n 'id_bandeira': 'int',\n 'id_tipo_cartao': 'int',\n 'numero_cartao': 'str',\n 'nome_plastico': 'str',\n 'cvv2': 'str',\n 'data_geracao': 'str',\n 'data_validade': 'str',\n 'cpf': 'str',\n 'tipo_portador': 'str',\n 'trilha1': 'str',\n 'trilha2': 'str',\n 'trilha_cvv1': 'str',\n 'trilha_cvv2': 'str',\n 'flag_virtual': 'int',\n 'nome_bandeira': 'str',\n 'flag_titular': 'int',\n 'sequencial_cartao': 'int',\n 'id_status': 'int',\n 'descricao_status_cartao': 'str',\n 'data_status': 'str',\n 'id_estagio': 'int',\n 'descricao_estagio': 'str',\n 'data_estagio': 'str',\n 'numero_bin': 'str',\n 'id_produto': 'int',\n 'descricao_produto': 'str',\n 'id_status_conta': 'int',\n 'descricao_status_conta': 'int',\n 'data_embossing': 'str',\n 'codigo_desbloqueio': 'str',\n 'nome_pessoa': 'str',\n 'tipo_pessoa': 'str',\n 'data_nascimento': 'str',\n 'id_endereco': 'int',\n 'id_tipo_endereco': 'int',\n 'descricao_tipo_endereco': 'str',\n 'cep': 'str',\n 'logradouro': 'str',\n 'numero_endereco': 'str',\n 'complemento_endereco': 'str',\n 'bairro': 'str',\n 'cidade': 'str',\n 'uf': 'str',\n 'pais': 'str',\n 'senha_criptografada': 'str',\n 'icvv': 'str',\n 'id_status_impressao': 'int'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_pessoa': 'idPessoa',\n 'id_cartao': 'idCartao',\n 'id_bandeira': 'idBandeira',\n 'id_tipo_cartao': 'idTipoCartao',\n 'numero_cartao': 'numeroCartao',\n 'nome_plastico': 'nomePlastico',\n 'cvv2': 'cvv2',\n 'data_geracao': 'dataGeracao',\n 'data_validade': 'dataValidade',\n 'cpf': 'cpf',\n 'tipo_portador': 'tipoPortador',\n 'trilha1': 'trilha1',\n 'trilha2': 'trilha2',\n 'trilha_cvv1': 'trilhaCVV1',\n 'trilha_cvv2': 'trilhaCVV2',\n 'flag_virtual': 'flagVirtual',\n 'nome_bandeira': 'nomeBandeira',\n 'flag_titular': 'flagTitular',\n 'sequencial_cartao': 'sequencialCartao',\n 'id_status': 'idStatus',\n 'descricao_status_cartao': 'descricaoStatusCartao',\n 'data_status': 'dataStatus',\n 'id_estagio': 'idEstagio',\n 'descricao_estagio': 'descricaoEstagio',\n 'data_estagio': 'dataEstagio',\n 'numero_bin': 'numeroBin',\n 'id_produto': 'idProduto',\n 'descricao_produto': 'descricaoProduto',\n 'id_status_conta': 'idStatusConta',\n 'descricao_status_conta': 'descricaoStatusConta',\n 'data_embossing': 'dataEmbossing',\n 'codigo_desbloqueio': 'codigoDesbloqueio',\n 'nome_pessoa': 'nomePessoa',\n 'tipo_pessoa': 'tipoPessoa',\n 'data_nascimento': 'dataNascimento',\n 'id_endereco': 'idEndereco',\n 'id_tipo_endereco': 'idTipoEndereco',\n 'descricao_tipo_endereco': 'descricaoTipoEndereco',\n 'cep': 'cep',\n 'logradouro': 'logradouro',\n 'numero_endereco': 'numeroEndereco',\n 'complemento_endereco': 'complementoEndereco',\n 'bairro': 'bairro',\n 'cidade': 'cidade',\n 'uf': 'uf',\n 'pais': 'pais',\n 'senha_criptografada': 'senhaCriptografada',\n 'icvv': 'icvv',\n 'id_status_impressao': 'idStatusImpressao'\n }\n\n self._id_conta = None\n self._id_pessoa = None\n self._id_cartao = None\n self._id_bandeira = None\n self._id_tipo_cartao = None\n self._numero_cartao = None\n self._nome_plastico = None\n self._cvv2 = None\n self._data_geracao = None\n self._data_validade = None\n self._cpf = None\n self._tipo_portador = None\n self._trilha1 = None\n self._trilha2 = None\n self._trilha_cvv1 = None\n self._trilha_cvv2 = None\n self._flag_virtual = None\n self._nome_bandeira = None\n self._flag_titular = None\n self._sequencial_cartao = None\n self._id_status = None\n self._descricao_status_cartao = None\n self._data_status = None\n self._id_estagio = None\n self._descricao_estagio = None\n self._data_estagio = None\n self._numero_bin = None\n self._id_produto = None\n self._descricao_produto = None\n self._id_status_conta = None\n self._descricao_status_conta = None\n self._data_embossing = None\n self._codigo_desbloqueio = None\n self._nome_pessoa = None\n self._tipo_pessoa = None\n self._data_nascimento = None\n self._id_endereco = None\n self._id_tipo_endereco = None\n self._descricao_tipo_endereco = None\n self._cep = None\n self._logradouro = None\n self._numero_endereco = None\n self._complemento_endereco = None\n self._bairro = None\n self._cidade = None\n self._uf = None\n self._pais = None\n self._senha_criptografada = None\n self._icvv = None\n self._id_status_impressao = None", "def settings(self):\n from hubspot3.settings import SettingsClient\n\n return SettingsClient(**self.auth, **self.options)", "def create_settings():\n\n settings = {}\n\n settings['induction'] = {'type': 'DT'}\n\n settings['selection'] = {'type': 'Base',\n 'its': 1,\n 'param': 1}\n\n settings['prediction'] = {'type': 'MI',\n 'its': 0.1,\n 'param': 0.95}\n\n settings['queries'] = {}\n\n settings['metadata'] = {}\n\n settings['model_data'] = {}\n\n return settings", "def api_settings(settings):\n settings.REST_FRAMEWORK['PAGINATE_BY_PARAM'] = 'limit'\n settings.REST_FRAMEWORK['PAGINATE_PARAM'] = 'offset'\n settings.REST_FRAMEWORK['SEARCH_PARAM'] = 'search'\n settings.REST_FRAMEWORK['SEARCHTYPE_PARAM'] = 'searchtype'\n settings.REST_FRAMEWORK['MAX_PAGINATE_BY'] = 500\n settings.REST_FRAMEWORK['PAGINATE_BY'] = 500\n return settings", "async def economyset_showsettings(self, ctx: commands.Context):\r\n guild = ctx.guild\r\n if await bank.is_global():\r\n conf = self.config\r\n else:\r\n conf = self.config.guild(guild)\r\n await ctx.send(\r\n box(\r\n _(\r\n \"----Economy Settings---\\n\"\r\n \"Minimum slot bid: {slot_min}\\n\"\r\n \"Maximum slot bid: {slot_max}\\n\"\r\n \"Slot cooldown: {slot_time}\\n\"\r\n \"Payday amount: {payday_amount}\\n\"\r\n \"Payday cooldown: {payday_time}\\n\"\r\n \"Amount given at account registration: {register_amount}\\n\"\r\n \"Maximum allowed balance: {maximum_bal}\"\r\n ).format(\r\n slot_min=humanize_number(await conf.SLOT_MIN()),\r\n slot_max=humanize_number(await conf.SLOT_MAX()),\r\n slot_time=humanize_number(await conf.SLOT_TIME()),\r\n payday_time=humanize_number(await conf.PAYDAY_TIME()),\r\n payday_amount=humanize_number(await conf.PAYDAY_CREDITS()),\r\n register_amount=humanize_number(await bank.get_default_balance(guild)),\r\n maximum_bal=humanize_number(await bank.get_max_balance(guild)),\r\n )\r\n )\r\n )", "def CreateOrUpdateAWSSettings(self, body):\n # [POST] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/cloud-connect-aws/CreateOrUpdateAWSSettings\n FULL_URL = self.base_url+'/cloud-connect-aws/entities/settings/v1'\n HEADERS = self.headers\n BODY = body\n result = self.Result()\n try:\n response = requests.request(\"POST\", FULL_URL, json=BODY, headers=HEADERS, verify=False)\n returned = result(response.status_code, response.headers, response.json())\n except Exception as e:\n returned = result(500, {}, str(e))\n \n return returned", "def test_valid_settings() -> None:\n SwaggerTesterSettings()", "def test_user_settings_serialization(self):\n\n # Construct a json representation of a UserSettings model\n user_settings_model_json = {}\n user_settings_model_json['language'] = 'testString'\n user_settings_model_json['notification_language'] = 'testString'\n user_settings_model_json['allowed_ip_addresses'] = '32.96.110.50,172.16.254.1'\n user_settings_model_json['self_manage'] = True\n\n # Construct a model instance of UserSettings by calling from_dict on the json representation\n user_settings_model = UserSettings.from_dict(user_settings_model_json)\n assert user_settings_model != False\n\n # Construct a model instance of UserSettings by calling from_dict on the json representation\n user_settings_model_dict = UserSettings.from_dict(user_settings_model_json).__dict__\n user_settings_model2 = UserSettings(**user_settings_model_dict)\n\n # Verify the model instances are equivalent\n assert user_settings_model == user_settings_model2\n\n # Convert model instance back to dict and verify no loss of data\n user_settings_model_json2 = user_settings_model.to_dict()\n assert user_settings_model_json2 == user_settings_model_json", "def __init__(self):\n self.swagger_types = {\n 'app_id': 'int',\n 'app_sw_rev': 'str',\n 'avg_hops': 'float',\n 'avg_latency': 'int',\n 'charge': 'int',\n 'estimated_latency_to_mote': 'int',\n 'hw_model': 'int',\n 'hw_rev': 'int',\n 'id': 'int',\n 'join_sys_time': 'datetime',\n 'last_voltage': 'int',\n 'lost_packet_count': 'int',\n 'mac_address': 'str',\n 'max_current': 'int',\n 'max_num_links': 'int',\n 'max_num_neighbors': 'int',\n 'need_neighbor': 'bool',\n 'num_good_neighbors': 'int',\n 'num_joins': 'int',\n 'num_links': 'int',\n 'num_neighbors': 'int',\n 'num_parents': 'int',\n 'power_cost_rx_link': 'int',\n 'power_cost_tx_link': 'int',\n 'reliability': 'float',\n 'rx_packet_count': 'int',\n 'stack_sw_rev': 'str',\n 'state': 'str',\n 'state_reason': 'str',\n 'state_sys_time': 'datetime',\n 'used_current': 'int'\n }\n\n self.attribute_map = {\n 'app_id': 'appId',\n 'app_sw_rev': 'appSwRev',\n 'avg_hops': 'avgHops',\n 'avg_latency': 'avgLatency',\n 'charge': 'charge',\n 'estimated_latency_to_mote': 'estimatedLatencyToMote',\n 'hw_model': 'hwModel',\n 'hw_rev': 'hwRev',\n 'id': 'id',\n 'join_sys_time': 'joinSysTime',\n 'last_voltage': 'lastVoltage',\n 'lost_packet_count': 'lostPacketCount',\n 'mac_address': 'macAddress',\n 'max_current': 'maxCurrent',\n 'max_num_links': 'maxNumLinks',\n 'max_num_neighbors': 'maxNumNeighbors',\n 'need_neighbor': 'needNeighbor',\n 'num_good_neighbors': 'numGoodNeighbors',\n 'num_joins': 'numJoins',\n 'num_links': 'numLinks',\n 'num_neighbors': 'numNeighbors',\n 'num_parents': 'numParents',\n 'power_cost_rx_link': 'powerCostRxLink',\n 'power_cost_tx_link': 'powerCostTxLink',\n 'reliability': 'reliability',\n 'rx_packet_count': 'rxPacketCount',\n 'stack_sw_rev': 'stackSwRev',\n 'state': 'state',\n 'state_reason': 'stateReason',\n 'state_sys_time': 'stateSysTime',\n 'used_current': 'usedCurrent'\n }\n\n self._app_id = None\n self._app_sw_rev = None\n self._avg_hops = None\n self._avg_latency = None\n self._charge = None\n self._estimated_latency_to_mote = None\n self._hw_model = None\n self._hw_rev = None\n self._id = None\n self._join_sys_time = None\n self._last_voltage = None\n self._lost_packet_count = None\n self._mac_address = None\n self._max_current = None\n self._max_num_links = None\n self._max_num_neighbors = None\n self._need_neighbor = None\n self._num_good_neighbors = None\n self._num_joins = None\n self._num_links = None\n self._num_neighbors = None\n self._num_parents = None\n self._power_cost_rx_link = None\n self._power_cost_tx_link = None\n self._reliability = None\n self._rx_packet_count = None\n self._stack_sw_rev = None\n self._state = None\n self._state_reason = None\n self._state_sys_time = None\n self._used_current = None", "def setPineAPSettings(self, settings):\n self.request('setPineAPSettings', {'settings': json.dumps(settings)})", "def set_sws_params(self) -> None:\n typ_params: Dict = params.TypicalParam().an_sws\n self.set_params(typ_params)", "def set_sws_params(self) -> None:\n typ_params: Dict = params.TypicalParam().san_sws\n self.set_params(typ_params)", "def __init__(self):\n self.swagger_types = {\n 'max_occupancy_percent_for_deferred_work': 'int',\n 'default_shrinkage_percent': 'float',\n 'shrinkage_overrides': 'ShrinkageOverrides',\n 'planning_period': 'ValueWrapperPlanningPeriodSettings',\n 'start_day_of_weekend': 'str'\n }\n\n self.attribute_map = {\n 'max_occupancy_percent_for_deferred_work': 'maxOccupancyPercentForDeferredWork',\n 'default_shrinkage_percent': 'defaultShrinkagePercent',\n 'shrinkage_overrides': 'shrinkageOverrides',\n 'planning_period': 'planningPeriod',\n 'start_day_of_weekend': 'startDayOfWeekend'\n }\n\n self._max_occupancy_percent_for_deferred_work = None\n self._default_shrinkage_percent = None\n self._shrinkage_overrides = None\n self._planning_period = None\n self._start_day_of_weekend = None", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_produto': 'int',\n 'id_pessoa': 'int',\n 'id_parentesco': 'int',\n 'tipo_portador': 'str',\n 'nome_impresso': 'str',\n 'id_tipo_cartao': 'int',\n 'flag_ativo': 'int',\n 'data_cadastro_portador': 'str',\n 'data_cancelamento_portador': 'str'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_produto': 'idProduto',\n 'id_pessoa': 'idPessoa',\n 'id_parentesco': 'idParentesco',\n 'tipo_portador': 'tipoPortador',\n 'nome_impresso': 'nomeImpresso',\n 'id_tipo_cartao': 'idTipoCartao',\n 'flag_ativo': 'flagAtivo',\n 'data_cadastro_portador': 'dataCadastroPortador',\n 'data_cancelamento_portador': 'dataCancelamentoPortador'\n }\n\n self._id_conta = None\n self._id_produto = None\n self._id_pessoa = None\n self._id_parentesco = None\n self._tipo_portador = None\n self._nome_impresso = None\n self._id_tipo_cartao = None\n self._flag_ativo = None\n self._data_cadastro_portador = None\n self._data_cancelamento_portador = None", "def _model_structure(self):\n self.model_structure = {\n 'title': str,\n 'description': str,\n 'tags': [str],\n 'references': [str],\n 'categories': [int],\n 'authors': [dict],\n 'defined_type': str,\n 'funding': str,\n 'license': str\n }", "def __init__(self, settings):\n \n # storing otmbs settings\n self.settings = settings", "def settings() -> Settings:\n return Settings()", "def __init__(self, planned: DutyTimes=None, actual: DutyTimes=None, shift_type: str=None):\n self.openapi_types = {\n 'planned': DutyTimes,\n 'actual': DutyTimes,\n 'shift_type': str\n }\n\n self.attribute_map = {\n 'planned': 'planned',\n 'actual': 'actual',\n 'shift_type': 'shiftType'\n }\n\n self._planned = planned\n self._actual = actual\n self._shift_type = shift_type", "def __init__(self):\n self.swagger_types = {\n 'id': 'str',\n 'domain': 'str',\n 'custom_domain': 'str',\n 'customer_email': 'str',\n 'customer_name': 'str',\n 'company': 'str',\n 'date_created': 'datetime',\n 'date_validity': 'datetime',\n 'status': 'str',\n 'account_id': 'str',\n 'cluster_id': 'str',\n 'task_id': 'str',\n 'version': 'str',\n 'is_latest': 'bool',\n 'product_id': 'str',\n 'variation_id': 'str'\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'domain': 'domain',\n 'custom_domain': 'custom_domain',\n 'customer_email': 'customer_email',\n 'customer_name': 'customer_name',\n 'company': 'company',\n 'date_created': 'date_created',\n 'date_validity': 'date_validity',\n 'status': 'status',\n 'account_id': 'account_id',\n 'cluster_id': 'cluster_id',\n 'task_id': 'task_id',\n 'version': 'version',\n 'is_latest': 'is_latest',\n 'product_id': 'product_id',\n 'variation_id': 'variation_id'\n }\n\n self._id = None\n self._domain = None\n self._custom_domain = None\n self._customer_email = None\n self._customer_name = None\n self._company = None\n self._date_created = None\n self._date_validity = None\n self._status = None\n self._account_id = None\n self._cluster_id = None\n self._task_id = None\n self._version = None\n self._is_latest = None\n self._product_id = None\n self._variation_id = None", "def settings(self):\r\n return settings.Settings(self)", "def get_settings(self):\n settings = self.client._perform_json(\n \"GET\", \"/projects/%s/apiservices/%s/settings\" % (self.project_key, self.service_id))\n\n return DSSAPIServiceSettings(self.client, self.project_key, self.service_id, settings)", "def SetStowMode(self):\n handler = self.get_command_object(\"SetStowMode\")\n handler()", "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def get_settings(self):\n return DSSWorkspaceSettings(self, self.client._perform_json(\"GET\", \"/workspaces/%s\" % self.workspace_key))", "def settings(self):\n return {}" ]
[ "0.56864434", "0.5336318", "0.5249357", "0.51228476", "0.4982549", "0.48665702", "0.48596522", "0.47971547", "0.4786752", "0.4769292", "0.47213194", "0.47105515", "0.47027385", "0.469973", "0.46692365", "0.46668038", "0.46638718", "0.46509078", "0.46421686", "0.46204582", "0.45979273", "0.45932063", "0.45916674", "0.45686927", "0.45658785", "0.45658088", "0.45496422", "0.45384297", "0.45121792", "0.4510481" ]
0.5606454
1
Gets the auto_review of this ShiftTradeSettings. Whether automatic shift trade review is enabled according to the rules defined in for this management unit
def auto_review(self): return self._auto_review
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def auto_review(self, auto_review):\n \n self._auto_review = auto_review", "def review(self):\n return self._review", "def auto(self):\n return self._auto", "def auto_mode(self):\n return self._auto_mode", "def review(self) -> object:\n return self._review", "def auto_devops_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_devops_enabled\")", "def auto_devops_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_devops_enabled\")", "def auto_devops_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"auto_devops_enabled\")", "def auto_patching_settings(self) -> Optional['outputs.AutoPatchingSettingsResponse']:\n return pulumi.get(self, \"auto_patching_settings\")", "def is_auto_apply(self):\n\t\treturn bool(call_sdk_function('PrlVmDevNet_IsAutoApply', self.handle))", "def is_reviewed(self, obj) -> bool: # pylint:disable=R0201\n return obj.profile.is_reviewed", "def get_review_status(self):\n if not hasattr(self, 'credential_review'):\n status = 'Awaiting review'\n elif self.credential_review.status <= 20:\n status = 'Awaiting review'\n elif self.credential_review.status == 30:\n status = 'Awaiting a response from reference'\n elif self.credential_review.status >= 40:\n status = 'Awaiting final approval'\n\n return status", "def manual_approvals(self) -> typing.Optional[bool]:\n return self._values.get(\"manual_approvals\")", "def auto_renew(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_renew\")", "def auto_renew(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_renew\")", "def auto_assign(self) -> Optional[pulumi.Input[Union[str, 'BfdEnabled']]]:\n return pulumi.get(self, \"auto_assign\")", "def AutoNegotiation(self):\n\t\treturn self._get_attribute('autoNegotiation')", "def is_tools_auto_update_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsToolsAutoUpdateEnabled', self.handle))", "def manual_check(self):\n return self.__manual_check", "def enabled(self):\n return self._get('enabled')", "def enable_renewal(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_renewal\")", "def auto_renew(self) -> Optional[bool]:\n return pulumi.get(self, \"auto_renew\")", "def auto_renew(self) -> Optional[bool]:\n return pulumi.get(self, \"auto_renew\")", "def adjustable(self) -> bool:\n return pulumi.get(self, \"adjustable\")", "def __str__(self):\n if self.recommend:\n review = 'recommended by {}: {}'.format(self.reviewer, self.comments)\n else:\n review = 'not recommended by {}: {}'.format(self.reviewer, self.comments)\n\n return review", "def auto_renew(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_renew\")", "def __get_current_auto_os_patch_state_for_packagekit(self):\n self.composite_logger.log_debug(\"Fetching current automatic OS patch state in packagekit service. This includes checks on whether the service is installed, current auto patch enable state and whether it is set to enable on reboot\")\n self.__init_auto_update_for_packagekit()\n is_service_installed, enable_on_reboot_value, download_updates_value, apply_updates_value = self.__get_current_auto_os_updates_setting_on_machine()\n\n apply_updates = self.__get_extension_standard_value_for_apply_updates(apply_updates_value)\n\n if apply_updates == self.apply_updates_enabled or enable_on_reboot_value:\n return Constants.AutomaticOSPatchStates.ENABLED\n # OS patch state is considered to be disabled: a) if it was successfully disabled or b) if the service is not installed\n elif not is_service_installed or (apply_updates == self.apply_updates_disabled and not enable_on_reboot_value):\n return Constants.AutomaticOSPatchStates.DISABLED\n else:\n return Constants.AutomaticOSPatchStates.UNKNOWN", "def auto_cohort(self):\r\n if not self.is_cohorted:\r\n return False\r\n\r\n return bool(self.cohort_config.get(\r\n \"auto_cohort\", False))", "def auto_approve_purchase_order(self):\n return self._auto_approve_purchase_order", "def auto_backup_settings(self) -> Optional['outputs.AutoBackupSettingsResponse']:\n return pulumi.get(self, \"auto_backup_settings\")" ]
[ "0.62337404", "0.5877003", "0.58669287", "0.5605534", "0.55631214", "0.5485725", "0.5485725", "0.54195344", "0.53595024", "0.53475046", "0.5258285", "0.52451265", "0.520811", "0.5148728", "0.5148728", "0.5133972", "0.51075655", "0.5106319", "0.5101208", "0.5100062", "0.5099781", "0.50945413", "0.50945413", "0.5090786", "0.507481", "0.50746727", "0.5044495", "0.50303507", "0.5011028", "0.50061715" ]
0.7241317
0
Sets the auto_review of this ShiftTradeSettings. Whether automatic shift trade review is enabled according to the rules defined in for this management unit
def auto_review(self, auto_review): self._auto_review = auto_review
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setAuto(self, auto):\n # type: (bool)->None\n\n self._validator.validate_one(\n 'auto', VALID_OPTS['auto'], auto)\n self._ifAttributes['auto'] = auto", "def auto_review(self):\n return self._auto_review", "def auto(self, auto):\n self._auto = auto", "def set_reviewing(self, revert=None):\n self.connection.set_reviewing(self.mturk_id, revert=revert)\n self.update()", "def review(self, review):\n self._review = review", "def review(self, review: object):\n\n self._review = review", "def setautoupdate(self, auto_update=1):\n # (net_bn* net, int auto_update)\n cnetica.SetNetAutoUpdate_bn.argtypes = [c_void_p, c_int]\n cnetica.SetNetAutoUpdate_bn.restype = None\n cnetica.SetNetAutoUpdate_bn(self.net, auto_update)", "def set_auto_apply(self, bAutoApply):\n\t\tcall_sdk_function('PrlVmDevNet_SetAutoApply', self.handle, bAutoApply)", "def auto_renew(self, auto_renew):\n\n self._auto_renew = auto_renew", "def auto_mode(self, enabled):\n self.set_auto_mode(enabled)", "def set_auto_start(self, nVmAutoStart):\n\t\tcall_sdk_function('PrlVmCfg_SetAutoStart', self.handle, nVmAutoStart)", "def with_manual_check_always(self):\n self.__manual_check = constants.ALWAYS\n return self", "def set_auto_pilot_mode(self):\n self._kernel.set_auto_pilot_mode()", "def mode_auto(self):\n if self.__check_mode_change():\n self.communications.set_status(\"Bot Auto Mode Set\")\n self.patrol()", "def auto(self):\n return self._auto", "def auto_record(self, auto_record):\n\n self._auto_record = auto_record", "def recommended(self, recommended):\n\n self._recommended = recommended", "def set_auto_start_delay(self, nVmAutoStartDelay):\n\t\tcall_sdk_function('PrlVmCfg_SetAutoStartDelay', self.handle, nVmAutoStartDelay)", "def startUpdateReviewPosTool(self, revItem = None):\n \n self.currentRevItem = revItem\n self.iface.mapCanvas().setMapTool(self._updateReviewPos)\n self._rcltool.setEnabled(True)", "def setAutomaticMode(self, enabling: bool) -> None:\n ...", "def stock_status_changed_auto(self, stock_status_changed_auto):\n if stock_status_changed_auto is None:\n raise ValueError(\"Invalid value for `stock_status_changed_auto`, must not be `None`\")\n\n self._stock_status_changed_auto = stock_status_changed_auto", "def allow_automatic(self, allow_automatic):\n\n self._allow_automatic = allow_automatic", "def reset_reviews(self):\n # FIXME: this state does not make sense\n self.review_date_set = False\n self.review_comment_set = False", "def auto_approve_purchase_order(self, auto_approve_purchase_order):\n\n self._auto_approve_purchase_order = auto_approve_purchase_order", "def set_autoselection_mode(self, auto_mode):\n if auto_mode in [\n SI5324.AUTOMODE_Manual,\n SI5324.AUTOMODE_Auto_Revertive,\n SI5324.AUTOMODE_Auto_Non_Revertive]:\n self._set_register_field(SI5324._FIELD_Autoselection, auto_mode)\n else:\n raise I2CException(\n \"Incorrect Auto Selection mode specified ({}).\".format(auto_mode)\n + \" Choose from AUTOMODE_Manual, AUTOMODE_Auto_Non_Revertive,\"\n + \" or AUTOMODE_Auto_Revertive.\")", "def autoselect(self, autoselect):\n # type: (bool) -> None\n\n if autoselect is not None:\n if not isinstance(autoselect, bool):\n raise TypeError(\"Invalid type for `autoselect`, type has to be `bool`\")\n\n self._autoselect = autoselect", "def auto_confirmation_enabled(self, auto_confirmation_enabled):\n\n self._auto_confirmation_enabled = auto_confirmation_enabled", "def updateAuto(self):\r\n if self.varAutoParse.get():\r\n self.optionProfile.config(state=tk.NORMAL)\r\n else:\r\n self.optionProfile.config(state=tk.DISABLED)", "def review(self, performance_rating):\n self.correct = performance_rating >= 0.6\n now = datetime.datetime.now()\n if self.date_last_reviewed is None:\n self.date_last_reviewed = now\n percent_overdue = self.percent_overdue\n self.difficulty += percent_overdue / 17 * (8 - 9 * performance_rating)\n self.difficulty = max(0, min(self.difficulty, 1)) # clamp difficulty to [0, 1]\n difficulty_weight = 3 - 1.7 * self.difficulty\n if self.correct:\n self.days_between = 1 + (difficulty_weight - 1) * percent_overdue\n else:\n self.days_between = max(1, 1 / (difficulty_weight ** 2))\n self.date_last_reviewed = now", "def set_adjust_mem_auto(self, bAdjustMemAuto):\n\t\tcall_sdk_function('PrlDispCfg_SetAdjustMemAuto', self.handle, bAdjustMemAuto)" ]
[ "0.629535", "0.62901366", "0.6205562", "0.59484947", "0.586132", "0.5606449", "0.55544823", "0.5457604", "0.5437223", "0.5340885", "0.5207839", "0.51955974", "0.51841444", "0.5166985", "0.51591945", "0.5131413", "0.50916165", "0.50884414", "0.50818217", "0.5068563", "0.5065162", "0.5062116", "0.50612193", "0.5028402", "0.49903408", "0.49851593", "0.49600616", "0.49263936", "0.49118373", "0.4908316" ]
0.81262857
0
Gets the allow_direct_trades of this ShiftTradeSettings. Whether direct shift trades between agents are allowed
def allow_direct_trades(self): return self._allow_direct_trades
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allow_direct_trades(self, allow_direct_trades):\n \n self._allow_direct_trades = allow_direct_trades", "def allow_gateway_transit(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_gateway_transit\")", "def allow_gateway_transit(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_gateway_transit\")", "def allow_gateway_transit(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_gateway_transit\")", "def trace_allow(self):\n return self._trace_allow", "def get_direct(self):\n return self._direct", "def can_turn_without_moving(self):\n return self.turn", "def direct(self) -> Optional[bool]:\n return self._direct", "def my_trades(self, **params):\n return self._get('option/myTrades', signed=True, params=params, version=None)", "def allow_purchase_order(self):\n return self._allow_purchase_order", "def isAllowDelay(self):\n return self.__allowDelay", "def isSiteSyndicationAllowed(self):\n return self.enabled", "def get_is_portal_enabled(self):\n return self.is_portal_enabled", "def drag_drop_enabled(self):\n ret_val = self._drag_drop_enabled()\n return ret_val", "def allow_forwarded_traffic(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_forwarded_traffic\")", "def tethering_disabled(self):\n return self._tethering_disabled", "def points_allowed(self):\n return self._points_allowed", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def trades(self) -> list[TradeOffer]:\n return self._connection.trades", "def allow_forwarded_traffic(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_forwarded_traffic\")", "def allow_forwarded_traffic(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_forwarded_traffic\")", "def is_export_policy_inherited(self):\n return self._is_export_policy_inherited", "def my_trades(self, **params):\n return self._get('myTrades', signed=True, params=params)", "def multi_share_enabled(self) -> bool:\n return pulumi.get(self, \"multi_share_enabled\")" ]
[ "0.747426", "0.53346044", "0.5100105", "0.5100105", "0.5003391", "0.49880004", "0.49498343", "0.49094033", "0.48874655", "0.48220852", "0.47878334", "0.47765717", "0.47406754", "0.47323847", "0.47250593", "0.4720025", "0.4684115", "0.46295232", "0.46295232", "0.46295232", "0.46295232", "0.46295232", "0.46295232", "0.46295232", "0.4619926", "0.46193212", "0.46193212", "0.460978", "0.4594461", "0.45841104" ]
0.79995286
0
Sets the allow_direct_trades of this ShiftTradeSettings. Whether direct shift trades between agents are allowed
def allow_direct_trades(self, allow_direct_trades): self._allow_direct_trades = allow_direct_trades
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allow_direct_trades(self):\n return self._allow_direct_trades", "def set_direct(self, direct):\n self._direct = direct", "def setDirect(self, direct):\n self._direct = direct", "def allow_purchase_order(self, allow_purchase_order):\n\n self._allow_purchase_order = allow_purchase_order", "def set_enabled(self, bEnabled):\n\t\tcall_sdk_function('PrlShare_SetEnabled', self.handle, bEnabled)", "def allow_gateway_transit(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_gateway_transit\")", "def allows_recurring_payments(self, allows_recurring_payments):\n\n self._allows_recurring_payments = allows_recurring_payments", "def allow_manual(self, allow_manual):\n\n self._allow_manual = allow_manual", "def allow_gateway_transit(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_gateway_transit\")", "def allow_gateway_transit(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_gateway_transit\")", "def allow_origins(self, allow_origins):\n\n self._allow_origins = allow_origins", "def manual_steering(self, turn_d_s):\n self._estimated_turn_rate_d_s = turn_d_s", "def set_direct_change(self, direct_change):\n self._direct_change = direct_change", "def _set_direct_mode(self, enable_direct_mode=True):\n if self.is_crippled_fs() and not enable_direct_mode:\n # TODO: ?? DIRECT - should we call git annex upgrade?\n raise CommandNotAvailableError(\n cmd=\"git-annex indirect\",\n msg=\"Can't switch to indirect mode on that filesystem.\")\n\n self.call_annex(['direct' if enable_direct_mode else 'indirect']),\n self.config.reload()\n\n # For paranoid we will just re-request\n self._direct_mode = None\n assert(self.is_direct_mode() == enable_direct_mode)\n\n # All further workarounds were stripped - no direct mode is supported", "def set_drag_drop_enabled(self, enable):\n self._set_drag_drop_enabled(enable)", "def fee_xtra_shares(self, fee_xtra_shares):\n\n self._fee_xtra_shares = fee_xtra_shares", "def allowed(self, allowed):\n if allowed is None:\n raise ValueError(\"Invalid value for `allowed`, must not be `None`\") # noqa: E501\n\n self._allowed = allowed", "def set_object_transfer_only_permissions(self, agent):\n\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 0, PermissionsMask.Copy)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 0, PermissionsMask.Modify)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Transfer)", "def give_permissions(self):\n self._activate()\n self.configure(state=\"enabled\")", "def my_trades(self, **params):\n return self._get('option/myTrades', signed=True, params=params, version=None)", "def proxy_enabled(self, proxy_enabled):\n\n self._proxy_enabled = proxy_enabled", "def allowed_vehicles(self, allowed_vehicles):\n\n self._allowed_vehicles = allowed_vehicles", "def allowed_vehicles(self, allowed_vehicles):\n\n self._allowed_vehicles = allowed_vehicles", "def set_object_mod_transfer_permissions(self, agent):\n\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 0, PermissionsMask.Copy)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Modify)\n self.update_object_permissions(agent, PermissionsTarget.NextOwner, 1, PermissionsMask.Transfer)", "def allow_automatic(self, allow_automatic):\n\n self._allow_automatic = allow_automatic", "def enable(self):\n options = self.get_direction_options()\n self.direction = options[0]\n self.state['enabled'] = True\n self.sound_manager.play_loop('std')", "def set_legal_moves(self, legal_moves):\n\n self._legal_moves = legal_moves", "def _setEnabled(self, indexlist):\n for index in self._items.keys():\n self.enable(index, index in indexlist)", "def set_is_portal_enabled(self, is_portal_enabled):\n self.is_portal_enabled = is_portal_enabled", "def respect_terminiation_periods_enabled(self, respect_terminiation_periods_enabled):\n\n self._respect_terminiation_periods_enabled = respect_terminiation_periods_enabled" ]
[ "0.7494934", "0.54587704", "0.51661927", "0.4795799", "0.47381973", "0.4686422", "0.4665296", "0.46351576", "0.46070793", "0.46070793", "0.45323443", "0.44950825", "0.4449625", "0.44402674", "0.43935964", "0.4376929", "0.4340441", "0.43230873", "0.43227234", "0.4282742", "0.4269001", "0.42532754", "0.42532754", "0.42529762", "0.424924", "0.42368305", "0.42180082", "0.42169836", "0.42097747", "0.41978747" ]
0.8692463
0
Gets the min_hours_in_future of this ShiftTradeSettings. The minimum number of hours in the future shift trades are allowed
def min_hours_in_future(self): return self._min_hours_in_future
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_hours_in_future(self, min_hours_in_future):\n \n self._min_hours_in_future = min_hours_in_future", "def min_myproxy_hours(self):\n return int(self.__get_option('min_myproxy_valid_hours'))", "def min_voms_proxy_hours(self):\n return int(self.__get_option('min_voms_proxy_valid_hours'))", "def get_power_consumption_prev_hour(self):\n return self.power_consumption_prev_hour", "def interval_hours(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"interval_hours\")", "def get_min_hour():\n hour = _get_first_or_last_successful_hour(is_last=False)\n\n # `hour` is None when we haven't run build_crash_stats at all.\n # Therefore, there's no crash stats data.\n #\n # On the UI, the date-time picker choose a point of time. Therefore,\n # if we choose, say, 3pm, this means we want the crash stats until 2:59pm.\n # Therefore, we need to increment by 1.\n return (hour or 0) + 1", "def getTimeLeftMin(self):\n return self.getTimeLeftSec() / 60.0;", "def ingestion_wait_time_in_hours(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"ingestion_wait_time_in_hours\")", "def MinLeaseTime(self):\n if self.force_auto_sync:\n self.get('MinLeaseTime')\n return self._MinLeaseTime", "def MinLifetime(self):\r\n\t\treturn self._get_attribute('minLifetime')", "def hours(self):\n return self.config['hours']", "def min_retire_time(self):\n return self._min_retire_time", "def getWeeklyPlayTimeLeft(self):\n _, w = self.__stats.playLimits\n return w[0] - self._getWeeklyPlayHours()", "def reminder_minutes_before_start(self):\n if \"reminderMinutesBeforeStart\" in self._prop_dict:\n return self._prop_dict[\"reminderMinutesBeforeStart\"]\n else:\n return None", "def hours(self):\n return int(self.minutes / 60)", "def chi_min(self):\n chis = [tr.chi_min for tr in self._trc]\n return min(chis) if chis else None", "def remaintime_min(self):\n return self._get_time_info([\"Remain_Time_M\", \"remainTimeMinute\"])", "def initialtime_min(self):\n return self._get_time_info([\"Initial_Time_M\", \"initialTimeMinute\"])", "def get_min_tim(self):\n return self.get_shortest_mode().tim", "def locked_temp_min(self) -> int:\r\n # TODO: Force this to return an int.\r\n if self.temperature_scale == \"C\":\r\n return self.locked_temp_min_c\r\n elif self.temperature_scale == \"F\":\r\n return self.locked_temp_min_f\r\n else:\r\n return self._locked_temp_min\r\n\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"locked_temp_min\"))", "def getMinValue(self):\n return self.MIN_VALUE", "def min_time(self) -> str:\n return self._min_time", "def get_lowht(self):\n return self._lowht", "def get_image_cleaner_interval_hours(self) -> Union[int, None]:\n interval_hours = self._get_image_cleaner_interval_hours(enable_validation=True)\n\n return interval_hours", "def getHoursOffset(self):\n return _libsbml.Date_getHoursOffset(self)", "def get_tmin(self):\n tmin = min(sorted(self.srcData.keys()))\n return tmin", "def _get_min_tx_interval(self):\n return self.__min_tx_interval", "def min_week_days(self) -> int:\n return self._data['week_data']['min_days']", "def minTime(OPTIONS):\n time_options = [OPTIONS[i][-1] for i in range(len(OPTIONS))]\n time_options.sort()\n min_time = time_options[0]\n for i in range(len(OPTIONS)):\n if OPTIONS[i][-1] == min_time:\n LEAST_TIME_OPTION = OPTIONS[i]\n else:\n continue\n return LEAST_TIME_OPTION", "def minimum(self):\n return self.properties.get('minimum')" ]
[ "0.7420574", "0.6219645", "0.62086207", "0.58073217", "0.55174065", "0.5414188", "0.53939056", "0.53677446", "0.529913", "0.5277726", "0.52073365", "0.5196492", "0.51657456", "0.51279366", "0.51175094", "0.50999427", "0.50849766", "0.5078288", "0.50627273", "0.5060968", "0.5030726", "0.5029283", "0.50290406", "0.50287324", "0.5027843", "0.50209016", "0.5017835", "0.5013032", "0.5002278", "0.49681437" ]
0.8458117
0
Sets the min_hours_in_future of this ShiftTradeSettings. The minimum number of hours in the future shift trades are allowed
def min_hours_in_future(self, min_hours_in_future): self._min_hours_in_future = min_hours_in_future
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def min_hours_in_future(self):\n return self._min_hours_in_future", "def min_time(self, min_time: str):\n\n self._min_time = min_time", "def min_voms_proxy_hours(self):\n return int(self.__get_option('min_voms_proxy_valid_hours'))", "def min_myproxy_hours(self):\n return int(self.__get_option('min_myproxy_valid_hours'))", "async def update_trade_minimums(self):\n\n trade_base_btc_pair = '{}-BTC'.format(config['trade_base'])\n\n if config['trade_base'] != 'BTC':\n trade_base_rate = self.base_rates[trade_base_btc_pair]\n else:\n trade_base_rate = 1.0\n\n base_mult = await self.get_pair_base_mult(config['trade_base'], trade_base_btc_pair)\n self.min_trade_size = trade_base_rate * config['trade_min_size_btc'] * base_mult\n self.min_safe_trade_size = self.min_trade_size * (1.0 + config['trade_min_safe_percent'])", "def min_players(self, min_players):\n\n self._min_players = min_players", "def xp_per_min(self, xp_per_min):\n\n self._xp_per_min = xp_per_min", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value", "def set_min(self, min):\n self.set_val((min, self.val[1]))", "def min_value(self, min_value):\n\n self._min_value = min_value", "def min_value(self, min_value):\n\n self._min_value = min_value", "def min_value(self, min_value):\n\n self._min_value = min_value", "def set_min_time(self, time):\n self.widget().setMinimumTime(time)", "def min(self, min):\n\n self._min = min", "def min(self, min):\n\n self._min = min", "def set_minimum(self, min_value):\n\n self._progress.setMinimum(min_value)", "def userMinimum(self, new_min: float) -> None:\n self._user_minimum = new_min\n self.reset_limits()", "def minimum_instances(self, minimum_instances):\n if (self.local_vars_configuration.client_side_validation and\n minimum_instances is not None and minimum_instances > 2147483647): # noqa: E501\n raise ValueError(\"Invalid value for `minimum_instances`, must be a value less than or equal to `2147483647`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n minimum_instances is not None and minimum_instances < -2147483648): # noqa: E501\n raise ValueError(\"Invalid value for `minimum_instances`, must be a value greater than or equal to `-2147483648`\") # noqa: E501\n\n self._minimum_instances = minimum_instances", "def val_future_end_time(value):\n today = timezone.now() + timezone.timedelta(minutes=settings.MIN_INTERVIEW_DURATION)\n if value < today:\n raise ValidationError(f'Datetime should be atleast {settings.MIN_INTERVIEW_DURATION} min after current Date and time')", "def _set_min_tx_interval(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"min-tx-interval\", rest_name=\"min-tx-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"min_tx_interval must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"min-tx-interval\", rest_name=\"min-tx-interval\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='uint32', is_config=True)\"\"\",\n })\n\n self.__min_tx_interval = t\n if hasattr(self, '_set'):\n self._set()", "def current_low_priority(self, current_low_priority):\n if self.local_vars_configuration.client_side_validation and current_low_priority is None: # noqa: E501\n raise ValueError(\"Invalid value for `current_low_priority`, must not be `None`\") # noqa: E501\n\n self._current_low_priority = current_low_priority", "def buy_min_amount(self, buy_min_amount):\n\n self._buy_min_amount = buy_min_amount", "def _set_minimum(self, time):\n if time > self._maximum:\n self._maximum = time\n self._minimum = time", "def set_minVal(self, val):\n self.minVal = val", "def set_locked_temp_min(self, value: int = 0):\r\n if self._temperature_scale == \"F\":\r\n self._locked_temp_min = celsius_to_kelvin(\r\n fahrenheit_to_celsius(value)\r\n )\r\n elif self._temperature_scale == \"C\":\r\n self._locked_temp_min = celsius_to_kelvin(value)\r\n else:\r\n self._locked_temp_min = value\r\n\r\n self._logger.info(log_message_formatter(\r\n \"set\", f\"{self}\", \"locked_temp_min\", value))", "def set_lowht(self, lowht):\n self._lowht = lowht", "def set_min_participants(self, min_part):\n self.min_participants = min_part", "def minimum_temperature(self, minimum_temperature):\n\n self._minimum_temperature = minimum_temperature", "def min_wait_between_retries(self, min_wait_between_retries: ConfigNodePropertyInteger):\n\n self._min_wait_between_retries = min_wait_between_retries" ]
[ "0.7488132", "0.5778957", "0.55238605", "0.5499553", "0.5439674", "0.53993577", "0.5261042", "0.5257334", "0.5257334", "0.52563965", "0.5238442", "0.5238442", "0.5238442", "0.5131546", "0.512785", "0.512785", "0.5121968", "0.509475", "0.5084545", "0.5068271", "0.50552446", "0.5043918", "0.49995527", "0.4952149", "0.4923012", "0.4906466", "0.4870477", "0.48437038", "0.48208362", "0.4800357" ]
0.8711309
0
Gets the unequal_paid of this ShiftTradeSettings. How to handle shift trades which involve unequal paid times
def unequal_paid(self): return self._unequal_paid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unequal_paid(self, unequal_paid):\n allowed_values = [\"Allow\", \"Disallow\", \"AdminReview\"]\n if unequal_paid.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for unequal_paid -> \" + unequal_paid)\n self._unequal_paid = \"outdated_sdk_version\"\n else:\n self._unequal_paid = unequal_paid", "def opposite(self, round_up=False):\n off = 0 if not round_up else len(self) % 2\n return self.options[(self.idx + ((len(self) // 2) + off)) % len(self)]", "def get_unpaid_items(self, acc: Account) -> list:\n unpaid_items = []\n for item, bal in self.get_item_balances(acc):\n assert isinstance(item, AccountEntry)\n priority = item.type.payback_priority if item.type is not None else 0\n if self.type == INVOICE_DEFAULT:\n if bal > Decimal(0):\n unpaid_items.append((priority, item, bal))\n elif self.type == INVOICE_CREDIT_NOTE:\n if bal < Decimal(0):\n unpaid_items.append((priority, item, bal))\n else:\n raise Exception(\"jacc.models.Invoice.get_unpaid_items() unimplemented for invoice type {}\".format(self.type))\n return [i[1:] for i in sorted(unpaid_items, key=lambda x: x[0])]", "def get_unpaid_invoices(self):\n unpaid = []\n\n # cycle through all (active) projects\n for project in self.project_list:\n # cycle through the invoices of the project\n for invoice in project.get_invoice_list():\n # append it, if it has no paid_date set (None)\n if invoice.get_paid_date() is None and invoice.get_date() is not None:\n unpaid.append(invoice)\n\n # sort the invoices by due date\n unpaid = sorted(unpaid, key=lambda x: x.get_due_date())\n\n return unpaid", "def paid(self):\n return self.get('paid')", "def get_payoffs(self):\n return self.game.get_payoffs()", "def test_underpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n diff = A(500) - A(480)\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "def get_payoffs(self):\n raise NotImplementedError", "def unpaid_interest(self, unpaid_interest):\n\n self._unpaid_interest = unpaid_interest", "def unpaid_interest(self, unpaid_interest):\n\n self._unpaid_interest = unpaid_interest", "def unmigrated(self):\n return self.exclude(\n Q(diff_hash__isnull=False) &\n (Q(parent_diff_hash__isnull=False) | Q(parent_diff64='')))", "def get_missing(self, other_draft):\n missing = {}\n this_picks = self.order_dict\n other_picks = other_draft.order_dict\n\n\n for k, v in this_picks.items():\n if k not in other_picks:\n missing[k] = v\n\n return sorted(missing.items(), key=lambda e: e[1])", "def decays(self):\n return self._base.decays", "def steam_power_depreciation(self):\n return self._steam_power_depreciation", "def _get_ldp_sync_hold_down(self):\n return self.__ldp_sync_hold_down", "def get_debt(self):\n sum_import = self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n ).aggregate(Sum(\"amount\"))\n return sum_import.get(\"amount__sum\", None)", "def getNotAfter(self):\n\n return self.get_POW().getNotAfter()", "def __ne__(self, other):\n if not isinstance(other, EnrollmentSettingsV2):\n return True\n\n return self.to_dict() != other.to_dict()", "def payoff(self, underlying: Union[float, np.array]) -> Union[float, np.array]:\n return np.maximum(0, underlying - self._strike) if self._is_call \\\n else np.maximum(0, self._strike - underlying)", "def ne(self, y):\n difference = self - y\n difference = type(difference).stack([difference, -difference])\n return difference._ltz().sum(0)", "def get_unpaid_invoices(self, with_vat=True):\n\n return self.call(method='getUnpaidInvoices', args=[with_vat])", "def get_possible_tw(self):\n ev = self.ev\n f = np.array([np.abs(a - b) for a in ev for b in ev if not np.isclose(a, b)])\n return f[~(np.triu(np.abs(f[:, None] - f) <= settings.EQ_COMPARE_TOL, 1)).any(0)]", "def amount_already_paid_in_period(self):\n assert self.type == \"N\", _(\"Subscription must be normal to use this method\")\n period_start, period_end = self.get_current_period()\n price_per_day = (\n self.get_price_for_full_period() / (period_end - period_start).days\n )\n days_already_used = (date.today() - period_start).days\n amount = int(price_per_day * days_already_used)\n if amount > self.get_price_for_full_period():\n amount = self.get_price_for_full_period()\n if amount < 0:\n amount = 0\n return amount", "def __ne__(self, other):\n if not isinstance(other, FlashSwapCurrencyPair):\n return True\n\n return self.to_dict() != other.to_dict()", "def get_payment(self):\n return self._payment_per_hour * self._hours_worked", "def test_overpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(580), A(0), A(0))], D(580))\n diff = A(500) - A(580)\n self.assert_balances(\n bank=A(580, 0, 0),\n invoiced=A(500),\n paid=A(-580),\n partial=A(580).net_amount,\n tax=A(580).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "def get_payoffs(self, own_move, opponent_move):\n if own_move and opponent_move:\n return [self.game_matrix[1], self.game_matrix[1]]\n elif not own_move and opponent_move:\n return [self.game_matrix[0], self.game_matrix[3]]\n elif own_move and not opponent_move:\n return [self.game_matrix[3], self.game_matrix[0]]\n else:\n return [self.game_matrix[2], self.game_matrix[2]]", "def get_bprop_not_equal(self):\n\n def bprop(x, y, out, dout):\n return zeros_like(x), zeros_like(y)\n return bprop", "def __ne__(self, other):\n if not isinstance(other, DeliveryReportDeliveredSecondsByResolution):\n return True\n\n return self.to_dict() != other.to_dict()", "def get_pnl_trades(self):\n\n if self._pnl_trades is None:\n tsc = TimeSeriesCalcs()\n self._pnl_trades = tsc.calculate_individual_trade_gains(self._signal, self._pnl)\n\n return self._pnl_trades" ]
[ "0.6139955", "0.53387636", "0.49372396", "0.49044767", "0.48526916", "0.47926265", "0.47427806", "0.4700392", "0.46879274", "0.46879274", "0.46811366", "0.46606246", "0.4617068", "0.46066138", "0.45941156", "0.45581588", "0.45421192", "0.45329323", "0.45126745", "0.4495702", "0.44829884", "0.4471612", "0.4469212", "0.4468711", "0.44600508", "0.445019", "0.44365728", "0.44242278", "0.43792972", "0.43696523" ]
0.73930126
0
Sets the unequal_paid of this ShiftTradeSettings. How to handle shift trades which involve unequal paid times
def unequal_paid(self, unequal_paid): allowed_values = ["Allow", "Disallow", "AdminReview"] if unequal_paid.lower() not in map(str.lower, allowed_values): # print("Invalid value for unequal_paid -> " + unequal_paid) self._unequal_paid = "outdated_sdk_version" else: self._unequal_paid = unequal_paid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unequal_paid(self):\n return self._unequal_paid", "def unpaid_interest(self, unpaid_interest):\n\n self._unpaid_interest = unpaid_interest", "def unpaid_interest(self, unpaid_interest):\n\n self._unpaid_interest = unpaid_interest", "def unapproved(self, unapproved):\n\n self._unapproved = unapproved", "def set_dues(net_id, paid):\n if paid not in(0, 1):\n raise AttributeError(\"Paid must be either 0 for false or 1 for true\")\n connection = get_connection()\n cursor = connection.cursor()\n sql_string = \"UPDATE Member SET dues_paid=\"+str(paid)+\" WHERE netID='\"+net_id+\"'\"\n cursor.execute(sql_string)\n connection.commit()", "def test_underpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n diff = A(500) - A(480)\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "def transaction_amount_paid(self, transaction_amount_paid):\n\n self._transaction_amount_paid = transaction_amount_paid", "def give_raise(self):\r\n self.hourly_pay = 12.00", "def _positize_repay(self):\n for e in self._repay.entries:\n e.amount = abs(e.amount)", "def under_payout(self, under_payout):\n\n self._under_payout = under_payout", "def paid(self, paid):\n\n self._paid = paid", "def setImpossiblePenultimates(self, impossible_penultimates):\n return self._set(impossiblePenultimates=impossible_penultimates)", "def test_overpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(580), A(0), A(0))], D(580))\n diff = A(500) - A(580)\n self.assert_balances(\n bank=A(580, 0, 0),\n invoiced=A(500),\n paid=A(-580),\n partial=A(580).net_amount,\n tax=A(580).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "def setNoCheckout(self) -> None:\n ...", "def reset(self):\n for key in self.portfolio.keys():\n self.portfolio[key] = {'holdings': 0}\n self.buys[key] = 0\n self.portfolio['balance'] = 2500000.0", "def pay_fee(self, fee):\n self.wallet -= fee", "def reset_players(self):\n self.dealer.reset()\n for player in self.players:\n player.reset()\n if player.bank <= 500:\n player.set_bank(1000)", "def resetPlayerBetAmount(self, players):\n\t\tfor x in players:\n\t\t\tx.betAmount = []", "def __ne__(self, other: 'UserSettings') -> bool:\n return not self == other", "def value_not_in(self, value_not_in):\n\n self._value_not_in = value_not_in", "def set_share(self, total_people):\n self.paid = self._get_paid()\n self.share = round(self.paid/Decimal(total_people), 2)", "def overdue_periods(self, overdue_periods):\n\n self._overdue_periods = overdue_periods", "def isolation_policy_num_not(self, isolation_policy_num_not):\n\n self._isolation_policy_num_not = isolation_policy_num_not", "def lose(self) -> None:\n self._actual_money -= self._bet", "def reset_uncertainties(self):\n\n # Make a new temporary ExoParameter using the original self.template\n # dictionary and copy the uncertainty values.\n blank = ExoParameter(\"fake\", attr_dict=self.template)\n self.uncertainty = blank.uncertainty\n self.uncertainty_lower = blank.uncertainty_lower\n self.uncertainty_upper = blank.uncertainty_upper", "def __ne__(self, other):\n if not isinstance(other, EnrollmentSettingsV2):\n return True\n\n return self.to_dict() != other.to_dict()", "def value_not(self, value_not):\n\n self._value_not = value_not", "def test_adjusted_payment_still_below_invoice(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(580), # debited (600) + adjustment (-20) = invoiced (580)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def _decrease_money(self, amount):\n if 1 in self.money:\n self.money[1] -= amount", "def reset_parameters(self): \n self.deposit_intent = 0\n self.contribution_intent = 0\n self.sponsor_intent = 0\n self.teo_exchange_intent = 0\n self.euro_exchange_intent = 0\n self.withdraw_intent = 0\n \n self.hour_wallet = self.monthly_hours\n self.staged_euro = 0\n self.staged_teo = 0\n self.contributed_hours = 0\n self.exchanged_euros = 0\n self.exchanged_teos = 0\n self.withdrawn_euros = 0" ]
[ "0.66380477", "0.5880214", "0.5880214", "0.51383543", "0.500558", "0.49487096", "0.48211223", "0.47970998", "0.47960222", "0.47937986", "0.47918844", "0.47356665", "0.47185376", "0.4710121", "0.46704188", "0.4662278", "0.46506834", "0.46480542", "0.46471024", "0.46138376", "0.45692575", "0.455226", "0.45483717", "0.4544388", "0.45361128", "0.4532241", "0.45321447", "0.45220736", "0.45129302", "0.45112428" ]
0.7029199
0
Gets the one_sided of this ShiftTradeSettings. How to handle onesided shift trades
def one_sided(self): return self._one_sided
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def side(self):\n return self._side.copy()", "def dwarsdoorsnede(self):\n return self._dwarsdoorsnede.get_waarde()", "def get_wl(self, strict=True):\n ax, wl, ds, ss = self.get_response()\n if (ax is True) and (strict is True):\n wl = None\n return wl", "def __returnCurrentSettingLocal__(self):\n return self.dmdParams", "def side(self) -> Union[MqexsSide, str]:\n return self.__side", "def one_sided(self, one_sided):\n allowed_values = [\"Allow\", \"Disallow\", \"AdminReview\"]\n if one_sided.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for one_sided -> \" + one_sided)\n self._one_sided = \"outdated_sdk_version\"\n else:\n self._one_sided = one_sided", "def wireframe_only(self):\n return self._wireframe_only", "def getCashierShift(self):\n return self._CashierShift", "def half_bit(self):\n return self._half_bit", "def get_side_after(self, side: Side) -> Side:\n t_side, flipped = side, False\n if self.transpose:\n t_side = t_side + 1 if side % 2 == 0 else t_side - 1\n if self.fliplr:\n if side in [Side.LEFT, Side.RIGHT]:\n t_side += 2\n else:\n flipped = True\n if self.flipud:\n if side in [Side.TOP, Side.BOTTOM]:\n t_side += 2\n else:\n flipped = True\n return t_side, flipped", "def getToWinding(self):\n return self._ToWinding", "def side_wall(self):\n return self.container['side_wall']", "def get1DSpline( self ):\n return self._splines[self._layout.dims_order[-1]]", "def get_setting(self, id):\n return __settings__.getSetting(id)", "def _share_short_axis(self, share, side, level):\n if share is None or self._panel_side: # not None\n return\n s = side[0]\n axis = 'x' if s in 'lr' else 'y'\n caxs = getattr(self, '_' + s + 'panels')\n paxs = getattr(share, '_' + s + 'panels')\n caxs = [pax for pax in caxs if not pax._panel_filled]\n paxs = [pax for pax in paxs if not pax._panel_filled]\n for cax, pax in zip(caxs, paxs): # may be uneven\n getattr(cax, '_share' + axis + '_setup')(pax, level)", "def is_skew_symmetric(self):\n return self._info['skew_symmetric']", "def sitewide_key_pair(self):\n setting = ConfigurationSetting.sitewide(\n self._db, Configuration.KEY_PAIR\n )\n return Configuration.key_pair(setting)", "def get_fan_tripped(self):\n return self.__fan_trip", "def isOnWhichSymmetryLine(self):\n\n if self.ring is None:\n return None\n else:\n if not self.ring % 2:\n # only odd numbered rings can cut lines of symmetry\n return None\n\n r = self.ring\n if self.pos == 1 and self.ring == 1:\n symmetryLine = BOUNDARY_CENTER\n elif self.pos == (r - 1) * 6 - ((r + 1) // 2 - 2):\n # edge 1: 1/3 symmetry line (bottom horizontal side in 1/3 core view, theta = 0)\n symmetryLine = BOUNDARY_0_DEGREES\n elif self.pos == (r + 1) // 2:\n # edge 2: 1/6 symmetry line (bisects 1/3 core view, theta = pi/3)\n symmetryLine = BOUNDARY_60_DEGREES\n elif self.pos == 1 + ((r + 1) // 2 - 1) * 3:\n # edge 3: 1/3 symmetry line (left oblique side in 1/3 core view, theta = 2*pi/3)\n symmetryLine = BOUNDARY_120_DEGREES\n else:\n symmetryLine = None\n\n return symmetryLine", "def sides(self):\n\n return Finger.side_choices", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def _get_set_dscp(self):\n return self.__set_dscp", "def in_side(self, side):\n if side == \"U\":\n return self.z() == 1\n if side == \"D\":\n return self.z() == -1\n if side == \"F\":\n return self.y() == -1\n if side == \"B\":\n return self.y() == 1\n if side == \"R\":\n return self.x() == 1\n if side == \"L\":\n return self.x() == -1", "def IsLeftSnappable(self):\r\n \r\n return self.HasFlag(self.optionLeftSnapped)", "def get_dds(self):\n return self._dds", "def _get_potential_left_way(self, lanelet):\n if lanelet.adj_left:\n if lanelet.adj_left_same_direction:\n potential_left_way = self.right_ways.get(lanelet.adj_left)\n else:\n potential_left_way = self.left_ways.get(lanelet.adj_left)\n if potential_left_way:\n adj_left = self.lanelet_network.find_lanelet_by_id(lanelet.adj_left)\n vertices = (\n adj_left.right_vertices\n if lanelet.adj_left_same_direction\n else adj_left.left_vertices[::-1]\n )\n if _vertices_are_equal(lanelet.left_vertices, vertices):\n return potential_left_way\n\n return None" ]
[ "0.5588246", "0.4999655", "0.49308547", "0.49244335", "0.49237114", "0.49048492", "0.4892378", "0.4798444", "0.4798198", "0.47741646", "0.4773227", "0.47707933", "0.4730155", "0.46796873", "0.4664128", "0.46336004", "0.46076268", "0.45964298", "0.4591801", "0.45813593", "0.4579192", "0.4579192", "0.4579192", "0.4579192", "0.4579192", "0.4579192", "0.4558375", "0.45555592", "0.45457274", "0.45441443" ]
0.6997625
0
Sets the one_sided of this ShiftTradeSettings. How to handle onesided shift trades
def one_sided(self, one_sided): allowed_values = ["Allow", "Disallow", "AdminReview"] if one_sided.lower() not in map(str.lower, allowed_values): # print("Invalid value for one_sided -> " + one_sided) self._one_sided = "outdated_sdk_version" else: self._one_sided = one_sided
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def one_sided(self):\n return self._one_sided", "def set_side(self, side):\n raise RuntimeError(\"the 'set_side' method must be overriden\")", "def setSide(self, side):\r\n self.pack(side=side)", "def setSide(self, side):\r\n self.pack(side=side)", "def setSide(self, side):\r\n self.pack(side=side)", "def side(self, side):\n\n self._side = side", "def change_sides(self, sides):\n self.sides = sides", "def manual_steering(self, turn_d_s):\n self._estimated_turn_rate_d_s = turn_d_s", "def _share_short_axis(self, share, side, level):\n if share is None or self._panel_side: # not None\n return\n s = side[0]\n axis = 'x' if s in 'lr' else 'y'\n caxs = getattr(self, '_' + s + 'panels')\n paxs = getattr(share, '_' + s + 'panels')\n caxs = [pax for pax in caxs if not pax._panel_filled]\n paxs = [pax for pax in paxs if not pax._panel_filled]\n for cax, pax in zip(caxs, paxs): # may be uneven\n getattr(cax, '_share' + axis + '_setup')(pax, level)", "def make_empty_side(self, side):\n if side == u'right':\n for k,v in self.d.items():\n item = v[0]\n mnemo = ''\n self.d[k] = [item, mnemo]\n\n if side == u'left':\n for k,v in self.d.items():\n item = ''\n mnemo = v[1]\n self.d[k] = [item, mnemo]\n\n self.clear_controls()\n self.set_value(self.n_parent, self.n)", "def set_single(self) -> None:\n self._has_single = True", "def force_symmetric(self, force_symmetric):\n\n self._force_symmetric = force_symmetric", "def set_left(self, spd):\n self.l_motor.set(-spd)", "def change_sides(self, sides):\n self.sides = list(sides)", "def put_side_set(self, id, sideSetElements, sideSetSides):\n # Find the side set.\n _idx = self._f.variables[\"ss_prop1\"][:]\n assert id in _idx, \"Could not find side set with id %i.\" % id\n # 1-based indexing!\n idx = np.argwhere(_idx == id)[0][0] + 1\n\n elem_ss_name = \"elem_ss%i\" % idx\n side_ss_name = \"side_ss%i\" % idx\n\n self._f.variables[elem_ss_name][:] = sideSetElements\n self._f.variables[side_ss_name][:] = sideSetSides", "def setDirect(self, direct):\n self._direct = direct", "def SetTrSet(self,value):\n self.ds = value", "def set_direct(self, direct):\n self._direct = direct", "def changeRingSetting(self):\n #Input code to accommodate function of Ring setting", "def set_direction(self, direction):\n\n def same_axis(direction1, direction2):\n y_axis = [Direction.Y_POSITIVE, Direction.Y_NEGATIVE]\n x_axis = [Direction.X_POSITIVE, Direction.X_NEGATIVE]\n return ((direction1 in x_axis and direction2 in x_axis)\n or (direction1 in y_axis and direction2 in y_axis))\n\n if direction is None:\n return\n elif not same_axis(self.direction, direction):\n self.direction = direction", "def turn_on(self, **kwargs) -> None:\n self.heater.turn_on()", "def Direction(self, direction):\r\n \r\n self.dock_direction = direction\r\n return self", "def dock_direction_set(self, value):\r\n \r\n self._dock_direction = value", "def put_side_set(self, side_set_id, side_set_elem_list,\n side_set_side_list):\n ierr = exolib.py_expss(self.exoid, side_set_id,\n side_set_elem_list + self._o,\n side_set_side_list + self._o)\n if ierr:\n raise ExodusIIWriterError(\"Error putting side set\")", "def set_direction(self, direction: int) -> None: \r\n self.direction = direction\r\n if (direction == Directions.turn_left or\r\n direction == Directions.turn_right):\r\n self.stop_timer = time.time() + self.driving_time_turning\r\n else:\r\n self.stop_timer = time.time() + self.driving_time", "def set_splitted(self, bSplitted):\n\t\tcall_sdk_function('PrlVmDevHd_SetSplitted', self.handle, bSplitted)", "def turn_draughts(self):\n self.is_draughts = True", "def setClientClockDrift(self, drift):\n self.clientClockDrift = drift", "def switch_frequency_plot_channel_one(self):\n if self.plot_channel_key_booleans[0]:\n self.plot_channel_key_booleans[0] = False\n self.parent_widget.graph_channel_one_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (255, 255, 255))\n\n else:\n self.plot_channel_key_booleans[0] = True\n self.parent_widget.graph_channel_one_button.setStyleSheet(\n \"background-color:rgb(%d,%d,%d)\" % (LINE_COLORS[0]))", "def put_side_set_param(self, side_set_id, num_sides_in_set,\n num_dist_fact_in_set=0):\n ierr = exolib.py_expsp(self.exoid, side_set_id, num_sides_in_set,\n num_dist_fact_in_set)\n if ierr:\n raise ExodusIIWriterError(\"Error putting side set params\")" ]
[ "0.5757493", "0.569288", "0.5436307", "0.5436307", "0.5436307", "0.5415714", "0.5269162", "0.5172204", "0.508731", "0.50309396", "0.4948026", "0.49474117", "0.49171144", "0.48586398", "0.48435292", "0.47978467", "0.47255656", "0.47182512", "0.47144097", "0.47002354", "0.46988374", "0.46963716", "0.4678008", "0.46686104", "0.46674982", "0.46672446", "0.4663347", "0.46602175", "0.46318042", "0.46317825" ]
0.614867
0
Gets the weekly_min_paid_violations of this ShiftTradeSettings. How to handle shift trades which result in violations of weekly minimum paid time constraint
def weekly_min_paid_violations(self): return self._weekly_min_paid_violations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weekly_min_paid_violations(self, weekly_min_paid_violations):\n allowed_values = [\"Allow\", \"Disallow\", \"AdminReview\"]\n if weekly_min_paid_violations.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for weekly_min_paid_violations -> \" + weekly_min_paid_violations)\n self._weekly_min_paid_violations = \"outdated_sdk_version\"\n else:\n self._weekly_min_paid_violations = weekly_min_paid_violations", "def weekly_max_paid_violations(self):\n return self._weekly_max_paid_violations", "def min_week_days(self) -> int:\n return self._data['week_data']['min_days']", "def weekly_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RotationWeeklySettingArgs']]]]:\n return pulumi.get(self, \"weekly_settings\")", "def weekly_max_paid_violations(self, weekly_max_paid_violations):\n allowed_values = [\"Allow\", \"Disallow\", \"AdminReview\"]\n if weekly_max_paid_violations.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for weekly_max_paid_violations -> \" + weekly_max_paid_violations)\n self._weekly_max_paid_violations = \"outdated_sdk_version\"\n else:\n self._weekly_max_paid_violations = weekly_max_paid_violations", "def _getWeeklyPlayHours(self):\n serverRegionalSettings = BigWorld.player().serverSettings['regional_settings']\n weekDaysCount = account_shared.currentWeekPlayDaysCount(time_utils._g_instance.serverUTCTime, serverRegionalSettings['starting_time_of_a_new_day'], serverRegionalSettings['starting_day_of_a_new_week'])\n return self._getDailyPlayHours() + sum(self.__stats.dailyPlayHours[1:weekDaysCount])", "def min_days_for_renewal(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"min_days_for_renewal\")", "def get_min_increments(self):\n\n return self.min_increments", "def xp_per_min(self):\n return self._xp_per_min", "def min_failing_periods_to_alert(self) -> float:\n return pulumi.get(self, \"min_failing_periods_to_alert\")", "def weekPay(self):\n pay = self.hourlyPay * self.hoursWorked\n return pay", "def min_days_for_renewal(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min_days_for_renewal\")", "def min_days_for_renewal(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"min_days_for_renewal\")", "def getWeeklyPlayTimeLeft(self):\n _, w = self.__stats.playLimits\n return w[0] - self._getWeeklyPlayHours()", "def weekly_viewed(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n this_week_start = today - timedelta(days=7)\n last_week_start = today - timedelta(days=14)\n week_per_min = []\n lastweek_per_min = []\n thisweek_viewed = []\n lastweek_viewed = []\n for index, row in df.iterrows():\n if row['session_start'].date() >= this_week_start:\n per_min = get_cards_per_min(row)\n week_per_min.append(per_min)\n thisweek_viewed.append(row['total_looked_at'])\n if last_week_start <= row['session_start'].date() < this_week_start:\n per_min = get_cards_per_min(row)\n lastweek_per_min.append(per_min)\n lastweek_viewed.append(row['total_looked_at'])\n week_viewed_result = total_viewed(thisweek_viewed, lastweek_viewed)\n week_viewed_result['total_viewed_weekly'] = week_viewed_result.pop('total_viewed')\n\n return week_viewed_result", "def setMinW(self, w):\n return self._set(minW=w)", "def get_rollover_weeks(shop):\n d = {}\n ods, r = get_rollovers(shop)\n\n for od in ods:\n week = int(od.eta.strftime('%W'))+1\n if d.has_key(week):\n d[week] += int(od.plan)\n else:\n d[week] = int(od.plan)\n\n # remove the pulled from this week\n this_week = int(datetime.datetime.today().strftime('%W'))+1 \n if d.has_key(this_week):\n d[this_week] = d[this_week] - get_pulled(shop)[1] \n\n # build the return list of (week, '00:00') tuples\n l = []\n d = sorted(d.items()) # sort dictionary by week\n for key, minutes in d:\n formatted_time = _get_display_hours(minutes)\n l.append((key,formatted_time))\n\n return l", "def find_waste_min(sol_total, solutions):\n\n\twaste_min = solutions[0][2] + 1\n\n\tfor sol_num in range(0, sol_total): # finds solution with lowest waste\n\t\tif(waste_min > solutions[sol_num][2]):\n\t\t\topt = []\n\n\t\t\twaste_min = solutions[sol_num][2]\n\t\t\tn = solutions[sol_num][0]\n\t\t\tm = solutions[sol_num][1]\n\n\t\t\topt.append(waste_min)\n\t\t\topt.append(n)\n\t\t\topt.append(m)\n\n\treturn opt", "def interval_weeks(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interval_weeks\")", "def _get_min_expense(self):\n pass", "def weekly_sales(self):\n last_seven_day = timezone.now() - timedelta(days=7)\n items = self.item_set.filter(status=\"sold\", updated_at__gte=last_seven_day)\n total_sales = 0\n for item in items:\n total_sales += item.price\n return total_sales", "def get_week_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof - TimeDelta(days=(asof.isoweekday() - 1) % 7)", "def _find_min_diff(self, rejected):\n\t\t# TODO: optimize search for a minimum\n\t\tindexes = [i for i in range(self._size) if i not in rejected]\n\t\tvector = copy(self._diffEi)\n\t\tfor i in sorted(rejected,reverse=True):\n\t\t\tdel vector[i]\n\t\treturn min(zip(indexes,vector), key=itemgetter(1))[0]", "def inclusive_minimum(self):\n\n return self._inclusive_minimum", "def getWeeksToExpire(self):\n cert = self.getLatestValidCertification()\n if cert == None:\n return ''\n date = cert.getValidTo().asdatetime().date();\n return date - date.today()", "def min(self):\n\n return time_stat(self, stat=\"min\")", "def min_ps(self):\n return self._min_ps", "def weekly_per_min_comparison(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n this_week_start = today - timedelta(days=7)\n last_week_start = today - timedelta(days=14)\n week_per_min = []\n lastweek_per_min = []\n thisweek_viewed = []\n lastweek_viewed = []\n for index, row in df.iterrows():\n if row['session_start'].date() >= this_week_start:\n per_min = get_cards_per_min(row)\n week_per_min.append(per_min)\n thisweek_viewed.append(row['total_looked_at'])\n if last_week_start <= row['session_start'].date() < this_week_start:\n per_min = get_cards_per_min(row)\n lastweek_per_min.append(per_min)\n lastweek_viewed.append(row['total_looked_at'])\n week_average = 0\n lastweek_average = 0\n if len(week_per_min) > 0 and len(lastweek_per_min) > 0:\n week_average = sum(week_per_min) / len(week_per_min)\n lastweek_average = sum(lastweek_per_min) / len(lastweek_per_min)\n elif len(week_per_min) == 0:\n week_average = 0\n elif len(lastweek_per_min) == 0:\n lastweek_average = 0\n if week_average > lastweek_average:\n color_code = \"09B109\"\n arrow = \"\\u2191\"\n elif week_average < lastweek_average:\n color_code = \"CE2929\"\n arrow = \"\\u2193\"\n else:\n color_code = \"000000\"\n arrow = \"\\u003D\"\n try:\n difference = abs((week_average - lastweek_average) / lastweek_average) * 100\n except ZeroDivisionError:\n difference = 100\n # if no sessions last week, difference is up 100%\n # if both averages are zero, this will display '0 100% =' in black\n result = make_results_dict(week_average, difference, color_code, arrow)\n result['weekly_cards_min'] = result.pop('metric')\n return result", "def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)", "def find_min(self):\n return self.min" ]
[ "0.69751316", "0.67295474", "0.64157695", "0.5974583", "0.564592", "0.528988", "0.526266", "0.5223079", "0.5202363", "0.5194301", "0.519075", "0.51646215", "0.51646215", "0.5089577", "0.50641555", "0.4979813", "0.49489027", "0.49106267", "0.48645687", "0.4855907", "0.4828529", "0.4824126", "0.4806164", "0.4786895", "0.47792011", "0.4771093", "0.4763748", "0.4748948", "0.47367224", "0.47335523" ]
0.84837914
0
Sets the weekly_min_paid_violations of this ShiftTradeSettings. How to handle shift trades which result in violations of weekly minimum paid time constraint
def weekly_min_paid_violations(self, weekly_min_paid_violations): allowed_values = ["Allow", "Disallow", "AdminReview"] if weekly_min_paid_violations.lower() not in map(str.lower, allowed_values): # print("Invalid value for weekly_min_paid_violations -> " + weekly_min_paid_violations) self._weekly_min_paid_violations = "outdated_sdk_version" else: self._weekly_min_paid_violations = weekly_min_paid_violations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weekly_min_paid_violations(self):\n return self._weekly_min_paid_violations", "def weekly_max_paid_violations(self, weekly_max_paid_violations):\n allowed_values = [\"Allow\", \"Disallow\", \"AdminReview\"]\n if weekly_max_paid_violations.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for weekly_max_paid_violations -> \" + weekly_max_paid_violations)\n self._weekly_max_paid_violations = \"outdated_sdk_version\"\n else:\n self._weekly_max_paid_violations = weekly_max_paid_violations", "def weekly_limit(self, weekly_limit):\n\n self._weekly_limit = weekly_limit", "def setMinW(self, w):\n return self._set(minW=w)", "def weekly_days(self, weekly_days):\n\n self._weekly_days = weekly_days", "def set_weekly(self, interval, *, days_of_week, first_day_of_week,\n **kwargs):\n self.set_daily(interval, **kwargs)\n self.__days_of_week = set(days_of_week)\n self.__first_day_of_week = first_day_of_week", "def xp_per_min(self, xp_per_min):\n\n self._xp_per_min = xp_per_min", "def weekly_max_paid_violations(self):\n return self._weekly_max_paid_violations", "def weekly_progress(self, weekly_progress):\n\n self._weekly_progress = weekly_progress", "def weekly_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RotationWeeklySettingArgs']]]]:\n return pulumi.get(self, \"weekly_settings\")", "def min_week_days(self) -> int:\n return self._data['week_data']['min_days']", "def win_lead_first_per(self, win_lead_first_per):\n\n self._win_lead_first_per = win_lead_first_per", "def min_players(self, min_players):\n\n self._min_players = min_players", "def week(self, week):\n\n self._week = week", "def buy_min_amount(self, buy_min_amount):\n\n self._buy_min_amount = buy_min_amount", "def donations_per_week(self, donations_per_week):\n\n self._donations_per_week = donations_per_week", "def _set_date_weekly(self):\n dt_weekday = dt.now()\n try:\n dt_weekday = self._get_datetime_or_error()\n except ValueError:\n self._dt_string = \"\"\n raise InvalidDateError(detail={\n \"message\": \"Invalid Date Provided\",\n \"period\": self.period.value,\n \"date\": self._given_date\n })\n week_start = dt_weekday - timedelta(days=dt_weekday.weekday())\n self.date['year'] = week_start.year\n self.date['month'] = week_start.month\n self.date['day'] = week_start.day", "async def update_trade_minimums(self):\n\n trade_base_btc_pair = '{}-BTC'.format(config['trade_base'])\n\n if config['trade_base'] != 'BTC':\n trade_base_rate = self.base_rates[trade_base_btc_pair]\n else:\n trade_base_rate = 1.0\n\n base_mult = await self.get_pair_base_mult(config['trade_base'], trade_base_btc_pair)\n self.min_trade_size = trade_base_rate * config['trade_min_size_btc'] * base_mult\n self.min_safe_trade_size = self.min_trade_size * (1.0 + config['trade_min_safe_percent'])", "def test_status_weeks_at_year_start(self):\n d = datetime(2013, 12, 31, 12, 13, 45, 0)\n with self.app.app_context():\n u = user(username='testuser', save=True)\n s = status(content='my status update', created=d, user=u, save=True)\n eq_(s.week_start.strftime(\"%Y-%m-%d\"), \"2013-12-30\")\n eq_(s.week_end.strftime(\"%Y-%m-%d\"), \"2014-01-05\")", "def set_RA0_user_constraints(self, RA_monthly_values_per_kW=5):\n # Create user constraints based on resource hours and regulation scenario\n new_hourly_timeseries = self.previous_initial_hourly_timeseries.copy(deep=True)\n\n # Create user constraints based on resource hours and regulation scenario\n if self.regulation_scenario == 1: # ENERGY reservations based on resource hours & service prices\n # SOC must be sufficient at beginning of each RA period\n new_hourly_timeseries.loc[self.window_start_index,\n 'Energy Min (kWh)'] = self.battery_discharging_power_max * self.RA_length\n # Set prices of other services as 0 during this window, except for energy arbitrage\n incompatible_services = ['FR Price ($/kW)', 'Reg Up Price ($/kW)', 'Reg Down Price ($/kW)',\n 'NSR Price ($/kW)', 'SR Price ($/kW)']\n for service in incompatible_services:\n new_hourly_timeseries.loc[self.window_index, service] = 0\n elif self.regulation_scenario == 2: # ONE-SIDED reservations based on resource hours\n # SOC must be sufficient at beginning of each RA period\n new_hourly_timeseries.loc[self.window_start_index, 'Energy Min (kWh)'] = \\\n self.battery_discharging_power_max * self.RA_length\n elif self.regulation_scenario == 3: # Reservations based on PREVIOUS DISPATCH\n raise ValueError(\"regulation_scenario 3 doesn't exist yet for RA\") # TODO\n else:\n raise ValueError(\"regulation_scenario must be 1, 2 or 3\")\n\n # Calculate RA values\n RA_values = RA_monthly_values_per_kW * 12 * self.battery_discharging_power_max\n\n # Create a new hourly timeseries dataframe as the Scenario time series file for a new SV run\n new_shortname = \"runID{}_constraintRA0_rs{}_hr{}-{}\".format(self.previous_runID, self.regulation_scenario,\n self.app_hours[0], self.app_hours[1])\n new_hourly_timeseries_path = self.runID_result_folder_path + \\\n \"/_new_hourly_timeseries_{}.csv\".format(new_shortname)\n new_hourly_timeseries.to_csv(new_hourly_timeseries_path, index=False)\n\n # Update attributes\n self.new_shortname = new_shortname\n self.new_hourly_timeseries_path = new_hourly_timeseries_path\n self.values = RA_values", "def set_min(self, min):\n self.set_val((min, self.val[1]))", "def min_hours_in_future(self, min_hours_in_future):\n \n self._min_hours_in_future = min_hours_in_future", "def set_min_participants(self, min_part):\n self.min_participants = min_part", "def kills_per_min(self, kills_per_min):\n\n self._kills_per_min = kills_per_min", "def userMinimum(self, new_min: float) -> None:\n self._user_minimum = new_min\n self.reset_limits()", "def minimum_instances(self, minimum_instances):\n if (self.local_vars_configuration.client_side_validation and\n minimum_instances is not None and minimum_instances > 2147483647): # noqa: E501\n raise ValueError(\"Invalid value for `minimum_instances`, must be a value less than or equal to `2147483647`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n minimum_instances is not None and minimum_instances < -2147483648): # noqa: E501\n raise ValueError(\"Invalid value for `minimum_instances`, must be a value greater than or equal to `-2147483648`\") # noqa: E501\n\n self._minimum_instances = minimum_instances", "def flag_day_sleep_length_less_than(self, sleep_period_col: str, min_sleep_in_minutes: int):\n\n for wearable in self.wearables.values():\n if sleep_period_col not in wearable.data.keys():\n warnings.warn(\"%s is not a valid entry in the data. \"\n \"Maybe you need to run ``SleepBoudaryDetector.detect_sleep_boundaries(...)`` first. \"\n \"Aborting....\" % sleep_period_col)\n return\n\n sleep_time_per_day = wearable.get_total_sleep_time_per_day(sleep_period_col)\n days_with_problem = sleep_time_per_day < min_sleep_in_minutes\n days_with_problem = days_with_problem[days_with_problem[sleep_period_col] == True].index.values\n\n if days_with_problem.size > 0:\n wearable.data.loc[\n wearable.data[wearable.get_experiment_day_col()].isin(\n days_with_problem), self.invalid_col] |= InvCode.FLAG_DAY_SHORT_SLEEP", "def set_weights(self,weights):\n for i,layer in enumerate(weights):\n #checking for any values equal to minval\n if np.any(layer==self.minval):\n weights[i]=np.where(weights[i]==self.minval,self.replace_min,weights[i])\n super().set_weights(list(weights))", "def testWeeklyOvertimes(self):\n dates = self.dates\n for day_num in xrange(28, 31):\n dates.append(utils.add_timezone(\n datetime.datetime(2011, 4, day_num)\n ))\n for day_num in xrange(5, 9):\n dates.append(utils.add_timezone(\n datetime.datetime(2011, 5, day_num)\n ))\n for day in dates:\n self.make_logs(day)\n\n def check_overtime(week0=Decimal('55.00'), week1=Decimal('55.00'),\n overtime=Decimal('30.00')):\n self.login_user(self.superuser)\n response = self.client.get(self.url, self.args)\n weekly_totals = response.context['weekly_totals'][0][0][0][2]\n self.assertEqual(weekly_totals[0], week0)\n self.assertEqual(weekly_totals[1], week1)\n self.assertEqual(weekly_totals[5], overtime)\n check_overtime()\n #Entry on following Monday doesn't add to week1 or overtime\n self.make_logs(utils.add_timezone(datetime.datetime(2011, 5, 9)))\n check_overtime()\n #Entries in previous month before last_billable do not change overtime\n self.make_logs(utils.add_timezone(datetime.datetime(2011, 4, 24)))\n check_overtime()\n #Entry in previous month after last_billable change week0 and overtime\n self.make_logs(utils.add_timezone(\n datetime.datetime(2011, 4, 25, 1, 0)\n ))\n check_overtime(Decimal('66.00'), Decimal('55.00'), Decimal('41.00'))", "def min(self, min):\n\n self._min = min" ]
[ "0.7291479", "0.6118862", "0.6029776", "0.5733139", "0.5726334", "0.57095027", "0.5663417", "0.55886453", "0.5575012", "0.5281766", "0.5271762", "0.51604384", "0.50848013", "0.49081635", "0.4861198", "0.48589855", "0.48533848", "0.48374414", "0.48121262", "0.47811553", "0.4771263", "0.47624567", "0.46986017", "0.46913314", "0.4662539", "0.4629346", "0.46173623", "0.45738426", "0.4564196", "0.45374808" ]
0.7584628
0
Gets the weekly_max_paid_violations of this ShiftTradeSettings. How to handle shift trades which result in violations of weekly maximum paid time constraint
def weekly_max_paid_violations(self): return self._weekly_max_paid_violations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weekly_max_paid_violations(self, weekly_max_paid_violations):\n allowed_values = [\"Allow\", \"Disallow\", \"AdminReview\"]\n if weekly_max_paid_violations.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for weekly_max_paid_violations -> \" + weekly_max_paid_violations)\n self._weekly_max_paid_violations = \"outdated_sdk_version\"\n else:\n self._weekly_max_paid_violations = weekly_max_paid_violations", "def weekly_min_paid_violations(self):\n return self._weekly_min_paid_violations", "def weekPay(self):\n pay = self.hourlyPay * self.hoursWorked\n return pay", "def get_week_of_max_relative_span(self):\n c = self.connection.cursor()\n self.print_datetime_output('Group time series by week and compute mean price')\n query = \"SELECT min((strftime('%Y%m%d', timestamp)/7*7 - 19000101) + 19000106) AS start_day, timestamp, \" \\\n \"min(close_USD), max(close_USD) FROM \" + self.db_table + \" GROUP BY (strftime('%Y%m%d', timestamp) - \" \\\n \"19000106)/7\"\n c.execute(query)\n min_max_close_week_df = pd.DataFrame(c.fetchall())\n min_max_close_week_df.columns = ['date', 'date_2', 'min', 'max']\n min_max_close_week_df['min'] = min_max_close_week_df['min'].astype(float)\n min_max_close_week_df['max'] = min_max_close_week_df['max'].astype(float)\n\n min_max_close_week_df['rel_span'] = ((min_max_close_week_df['max'] - min_max_close_week_df['min']) /\n min_max_close_week_df['min'])\n max_rel_span = min_max_close_week_df['rel_span'].max()\n week_max_rel_span = min_max_close_week_df[min_max_close_week_df['rel_span'] == max_rel_span]['date_2'].values[0]\n self.print_datetime_output('The week with max relative span is: %s' % week_max_rel_span)\n return week_max_rel_span", "def weekly_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RotationWeeklySettingArgs']]]]:\n return pulumi.get(self, \"weekly_settings\")", "def weekly_min_paid_violations(self, weekly_min_paid_violations):\n allowed_values = [\"Allow\", \"Disallow\", \"AdminReview\"]\n if weekly_min_paid_violations.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for weekly_min_paid_violations -> \" + weekly_min_paid_violations)\n self._weekly_min_paid_violations = \"outdated_sdk_version\"\n else:\n self._weekly_min_paid_violations = weekly_min_paid_violations", "def getWeeksToExpire(self):\n cert = self.getLatestValidCertification()\n if cert == None:\n return ''\n date = cert.getValidTo().asdatetime().date();\n return date - date.today()", "def _max_days(self):\n # type: (...) -> Union[int, Tuple[int]]\n\n return self.value.max_days", "def upper_earning_limit(self):\n\t\treturn self._upper_earning_limit", "def getWeeklyPlayTimeLeft(self):\n _, w = self.__stats.playLimits\n return w[0] - self._getWeeklyPlayHours()", "def get_max_win_strength(self):\n if self.maxWinStrength is None:\n self.calculate_max_win_strength()\n return self.maxWinStrength", "def _getWeeklyPlayHours(self):\n serverRegionalSettings = BigWorld.player().serverSettings['regional_settings']\n weekDaysCount = account_shared.currentWeekPlayDaysCount(time_utils._g_instance.serverUTCTime, serverRegionalSettings['starting_time_of_a_new_day'], serverRegionalSettings['starting_day_of_a_new_week'])\n return self._getDailyPlayHours() + sum(self.__stats.dailyPlayHours[1:weekDaysCount])", "def find_maximum_slope(self):\n slopes=[self.slope1, self.slope2, self.slope3, self.slope4, self.slope5, self.slope6, self.slope7, self.slope8]\n return max(slopes)", "def wave_get_max_pulses():\n return _u2i(_pigpio_command(_control, _PI_CMD_WVSP, 2, 0))", "def interval_weeks(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interval_weeks\")", "def max_retention_days(self) -> int:\n return pulumi.get(self, \"max_retention_days\")", "def get_payoffs(self):\n return self.game.get_payoffs()", "def get_rollover_weeks(shop):\n d = {}\n ods, r = get_rollovers(shop)\n\n for od in ods:\n week = int(od.eta.strftime('%W'))+1\n if d.has_key(week):\n d[week] += int(od.plan)\n else:\n d[week] = int(od.plan)\n\n # remove the pulled from this week\n this_week = int(datetime.datetime.today().strftime('%W'))+1 \n if d.has_key(this_week):\n d[this_week] = d[this_week] - get_pulled(shop)[1] \n\n # build the return list of (week, '00:00') tuples\n l = []\n d = sorted(d.items()) # sort dictionary by week\n for key, minutes in d:\n formatted_time = _get_display_hours(minutes)\n l.append((key,formatted_time))\n\n return l", "def get_seven_days_stat(cls):\n return cls.get_specified_days_stat(7)", "def calcSalary(self):\n salary = (self.weekPay()) * 52\n return salary", "def limit_max_power(self, params=None):\n if self.inv is None:\n raise der.DERError('DER not initialized')\n\n try:\n if params is not None:\n ena = params.get('Ena')\n if ena is not None:\n if ena is True:\n self.inv.device.write(0xf100, util.u16_to_data(1)) # F142 R/W AdvancedPwrControlEn Int32 0-1\n else:\n self.inv.device.write(0xf100, util.u16_to_data(0)) # F142 R/W AdvancedPwrControlEn Int32 0-1\n wmax = params.get('WMaxPct')\n if wmax is not None:\n self.ts.log('Changing power to %d' % params.get('WMaxPct'))\n self.inv.device.write(0xf002, util.u16_to_data(params.get('WMaxPct')))\n else:\n params = {}\n if util.data_to_u16(self.inv.device.read(0xf100, 1)) == 0:\n params['Ena'] = False\n else:\n params['Ena'] = True\n params['WMaxPct'] = util.data_to_u16(self.inv.device.read(0xf001, 1))\n\n except Exception, e:\n raise der.DERError(str(e))\n\n return params", "def get_db_backup_interval_max(self):\n return self.get_db_business_hours().max", "def _get_max_suppress_time(self):\n return self.__max_suppress_time", "def hr_report():\n\n # Load the peak data.\n db = Persistence()\n if not (activities := db.load_all()):\n print(\"No data to report on\")\n return\n\n # Find the maximum for each value.\n max = _load_max_values(activities)\n\n # Totals for the current week\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta()\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n # Print the peak data for each week.\n current_weekday = None\n for activity in activities:\n\n # Time to break to a new week?\n if current_weekday is None or current_weekday > activity.start_time.weekday():\n if current_weekday:\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta(0)\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n _print_header()\n\n # Capture the weekday.\n if current_weekday is None or current_weekday != activity.start_time.weekday():\n week_work_days = week_work_days + 1\n\n current_weekday = activity.start_time.weekday()\n\n # Print the detail.\n _print_detail(activity, max)\n\n # Find the duration.\n duration = activity.end_time - activity.start_time\n\n # Accumulate for this week\n week_distance_total = week_distance_total + activity.distance\n if activity.elevation:\n week_elevation_total = week_elevation_total + activity.elevation\n week_duration_total = week_duration_total + duration\n week_5sec_average.append(activity.peak_5sec_hr)\n week_30sec_average.append(activity.peak_30sec_hr)\n week_60sec_average.append(activity.peak_60sec_hr)\n if activity.peak_5min_hr:\n week_5min_average.append(activity.peak_5min_hr)\n if activity.peak_10min_hr:\n week_10min_average.append(activity.peak_10min_hr)\n if activity.peak_20min_hr:\n week_20min_average.append(activity.peak_20min_hr)\n if activity.peak_30min_hr:\n week_30min_average.append(activity.peak_30min_hr)\n if activity.peak_60min_hr:\n week_60min_average.append(activity.peak_60min_hr)\n if activity.peak_90min_hr:\n week_90min_average.append(activity.peak_90min_hr)\n if activity.peak_120min_hr:\n week_120min_average.append(activity.peak_120min_hr)\n\n # Final footer.\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n\n # Print the summary.\n _print_summary(max)", "def maxs(self):\n return self._maxs", "def get_time_last_week():\n current_time = arrow.utcnow() # Get the current UTC Time\n return current_time.shift(weeks=-1) # Return the shifted time by -1 weeks", "def get_limits(self):\n return self._get(limits.Limits)", "def _get_max_expense(self):\n pass", "def failing_periods(self) -> 'outputs.DynamicThresholdFailingPeriodsResponse':\n return pulumi.get(self, \"failing_periods\")", "def weekly_limit(self, weekly_limit):\n\n self._weekly_limit = weekly_limit" ]
[ "0.71017396", "0.6778176", "0.56508315", "0.5590555", "0.5535422", "0.5526067", "0.5260225", "0.5252201", "0.5142804", "0.51288325", "0.5119122", "0.5046455", "0.4955736", "0.49338713", "0.49301615", "0.48914012", "0.48864976", "0.4871266", "0.48201635", "0.47905493", "0.47873145", "0.47395667", "0.47218713", "0.47142696", "0.46869102", "0.46856534", "0.46765462", "0.46469787", "0.4644818", "0.46427622" ]
0.84426326
0
Sets the weekly_max_paid_violations of this ShiftTradeSettings. How to handle shift trades which result in violations of weekly maximum paid time constraint
def weekly_max_paid_violations(self, weekly_max_paid_violations): allowed_values = ["Allow", "Disallow", "AdminReview"] if weekly_max_paid_violations.lower() not in map(str.lower, allowed_values): # print("Invalid value for weekly_max_paid_violations -> " + weekly_max_paid_violations) self._weekly_max_paid_violations = "outdated_sdk_version" else: self._weekly_max_paid_violations = weekly_max_paid_violations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weekly_max_paid_violations(self):\n return self._weekly_max_paid_violations", "def weekly_limit(self, weekly_limit):\n\n self._weekly_limit = weekly_limit", "def weekly_min_paid_violations(self, weekly_min_paid_violations):\n allowed_values = [\"Allow\", \"Disallow\", \"AdminReview\"]\n if weekly_min_paid_violations.lower() not in map(str.lower, allowed_values):\n # print(\"Invalid value for weekly_min_paid_violations -> \" + weekly_min_paid_violations)\n self._weekly_min_paid_violations = \"outdated_sdk_version\"\n else:\n self._weekly_min_paid_violations = weekly_min_paid_violations", "def weekly_min_paid_violations(self):\n return self._weekly_min_paid_violations", "def weekly_days(self, weekly_days):\n\n self._weekly_days = weekly_days", "def limit_max_power(self, params=None):\n if self.inv is None:\n raise der.DERError('DER not initialized')\n\n try:\n if params is not None:\n ena = params.get('Ena')\n if ena is not None:\n if ena is True:\n self.inv.device.write(0xf100, util.u16_to_data(1)) # F142 R/W AdvancedPwrControlEn Int32 0-1\n else:\n self.inv.device.write(0xf100, util.u16_to_data(0)) # F142 R/W AdvancedPwrControlEn Int32 0-1\n wmax = params.get('WMaxPct')\n if wmax is not None:\n self.ts.log('Changing power to %d' % params.get('WMaxPct'))\n self.inv.device.write(0xf002, util.u16_to_data(params.get('WMaxPct')))\n else:\n params = {}\n if util.data_to_u16(self.inv.device.read(0xf100, 1)) == 0:\n params['Ena'] = False\n else:\n params['Ena'] = True\n params['WMaxPct'] = util.data_to_u16(self.inv.device.read(0xf001, 1))\n\n except Exception, e:\n raise der.DERError(str(e))\n\n return params", "def overdue_periods(self, overdue_periods):\n\n self._overdue_periods = overdue_periods", "def set_max_evaluations(self,ev):\n self.max_evaluations = ev", "def donations_per_week(self, donations_per_week):\n\n self._donations_per_week = donations_per_week", "def weekly_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RotationWeeklySettingArgs']]]]:\n return pulumi.get(self, \"weekly_settings\")", "def set_RA0_user_constraints(self, RA_monthly_values_per_kW=5):\n # Create user constraints based on resource hours and regulation scenario\n new_hourly_timeseries = self.previous_initial_hourly_timeseries.copy(deep=True)\n\n # Create user constraints based on resource hours and regulation scenario\n if self.regulation_scenario == 1: # ENERGY reservations based on resource hours & service prices\n # SOC must be sufficient at beginning of each RA period\n new_hourly_timeseries.loc[self.window_start_index,\n 'Energy Min (kWh)'] = self.battery_discharging_power_max * self.RA_length\n # Set prices of other services as 0 during this window, except for energy arbitrage\n incompatible_services = ['FR Price ($/kW)', 'Reg Up Price ($/kW)', 'Reg Down Price ($/kW)',\n 'NSR Price ($/kW)', 'SR Price ($/kW)']\n for service in incompatible_services:\n new_hourly_timeseries.loc[self.window_index, service] = 0\n elif self.regulation_scenario == 2: # ONE-SIDED reservations based on resource hours\n # SOC must be sufficient at beginning of each RA period\n new_hourly_timeseries.loc[self.window_start_index, 'Energy Min (kWh)'] = \\\n self.battery_discharging_power_max * self.RA_length\n elif self.regulation_scenario == 3: # Reservations based on PREVIOUS DISPATCH\n raise ValueError(\"regulation_scenario 3 doesn't exist yet for RA\") # TODO\n else:\n raise ValueError(\"regulation_scenario must be 1, 2 or 3\")\n\n # Calculate RA values\n RA_values = RA_monthly_values_per_kW * 12 * self.battery_discharging_power_max\n\n # Create a new hourly timeseries dataframe as the Scenario time series file for a new SV run\n new_shortname = \"runID{}_constraintRA0_rs{}_hr{}-{}\".format(self.previous_runID, self.regulation_scenario,\n self.app_hours[0], self.app_hours[1])\n new_hourly_timeseries_path = self.runID_result_folder_path + \\\n \"/_new_hourly_timeseries_{}.csv\".format(new_shortname)\n new_hourly_timeseries.to_csv(new_hourly_timeseries_path, index=False)\n\n # Update attributes\n self.new_shortname = new_shortname\n self.new_hourly_timeseries_path = new_hourly_timeseries_path\n self.values = RA_values", "def give_raise(self):\r\n self.hourly_pay = 12.00", "def weekPay(self):\n pay = self.hourlyPay * self.hoursWorked\n return pay", "def buy_max_amount(self, buy_max_amount):\n\n self._buy_max_amount = buy_max_amount", "def weekly_progress(self, weekly_progress):\n\n self._weekly_progress = weekly_progress", "def sell_max_amount(self, sell_max_amount):\n\n self._sell_max_amount = sell_max_amount", "def max_players(self, max_players):\n\n self._max_players = max_players", "def setOverdraftLimit(self, newLimit):\n\n # checks if account can have an overdraft\n if self.overdraft == False:\n print(\"\\nUpdate Overdraft Limit - {self.name}\\nOverdraft unavailable\".format(self=self))\n # if overdraft available, updates overdraft limit with newLimit\n else:\n print(\"\\nUpdate Overdraft Limit - {self.name}\".format(self=self))\n print(\"Updated from £{self.overdraftLimit:.2f}\".format(self=self), end = \" \")\n self.overdraftLimit = newLimit\n print(\"to £{self.overdraftLimit:.2f}\".format(self=self))", "def set_weekly(self, interval, *, days_of_week, first_day_of_week,\n **kwargs):\n self.set_daily(interval, **kwargs)\n self.__days_of_week = set(days_of_week)\n self.__first_day_of_week = first_day_of_week", "def update_willorder_limit(\n sender, instance, created, using, update_fields, *args, **kwargs\n):\n # only triggers when 'been_paid' is passed to the kwarg update field when calling save method on invoice\n if update_fields and \"been_paid\" in update_fields:\n order_details = InvoiceService(instance.order).limit_details\n # goes through all the current order_details and sets limits on them when the invoice is paid\n for order_detail, order_numbers in order_details.items():\n try:\n willorder_limit = OrderLimit.objects.get(\n invoice=instance, detail=order_detail\n )\n if order_numbers > willorder_limit.limit:\n willorder_limit.limit = order_numbers\n willorder_limit.save()\n except OrderLimit.DoesNotExist:\n OrderLimit.objects.create(\n invoice=instance, detail=order_detail, limit=order_numbers\n )\n\n # update discounts as redeemed when billed\n discounts = instance.discounts.all()\n # sets record on discounts when paid\n if discounts.exists():\n for discount in discounts:\n discount.redeemed += 1\n discount.save()\n discount.redeemed_by.add(instance.order.user)", "def max_installments(self, max_installments):\n\n self._max_installments = max_installments", "def apply_raise(self):\n self.pay = int(self.pay * self.raise_amnt)", "def setPTLimits(*args):\n args[0].Limit.PTLimit.pt_limit = args[1]", "def testWeeklyOvertimes(self):\n dates = self.dates\n for day_num in xrange(28, 31):\n dates.append(utils.add_timezone(\n datetime.datetime(2011, 4, day_num)\n ))\n for day_num in xrange(5, 9):\n dates.append(utils.add_timezone(\n datetime.datetime(2011, 5, day_num)\n ))\n for day in dates:\n self.make_logs(day)\n\n def check_overtime(week0=Decimal('55.00'), week1=Decimal('55.00'),\n overtime=Decimal('30.00')):\n self.login_user(self.superuser)\n response = self.client.get(self.url, self.args)\n weekly_totals = response.context['weekly_totals'][0][0][0][2]\n self.assertEqual(weekly_totals[0], week0)\n self.assertEqual(weekly_totals[1], week1)\n self.assertEqual(weekly_totals[5], overtime)\n check_overtime()\n #Entry on following Monday doesn't add to week1 or overtime\n self.make_logs(utils.add_timezone(datetime.datetime(2011, 5, 9)))\n check_overtime()\n #Entries in previous month before last_billable do not change overtime\n self.make_logs(utils.add_timezone(datetime.datetime(2011, 4, 24)))\n check_overtime()\n #Entry in previous month after last_billable change week0 and overtime\n self.make_logs(utils.add_timezone(\n datetime.datetime(2011, 4, 25, 1, 0)\n ))\n check_overtime(Decimal('66.00'), Decimal('55.00'), Decimal('41.00'))", "def userDefinedLimits(self, user_defined_limits: bool) -> None:\n self._user_defined_limits = user_defined_limits\n self.reset_limits()", "def set_penalty_start_eventnums(self):\n for period in self.Periods:\n period.PenaltyStartEventNums = {self.HomeTeamId: None, self.VisitorTeamId: None}\n if period.Number <= 4:\n fouls_to_give = {self.HomeTeamId: 4, self.VisitorTeamId: 4}\n else:\n # in overtime periods teams start with 3 fouls to give\n fouls_to_give = {self.HomeTeamId: 3, self.VisitorTeamId: 3}\n\n for event in period.Events:\n if event.is_foul_that_counts_toward_penalty():\n foul_team = event.team_id\n event_time = event.seconds_remaining\n if event_time <= 120 and fouls_to_give[foul_team] > 1:\n # only 1 foul to give in final 2 minutes regardless of how many fouls committed up until then\n fouls_to_give[foul_team] = 1\n if fouls_to_give[foul_team] > 0:\n fouls_to_give[foul_team] -= 1\n if fouls_to_give[foul_team] == 0:\n # team entered penalty on this foul\n if 'Shooting' in event.get_foul_type():\n # shooting foul - start tracking at final ft so we don't count FTs as penalty\n final_fts_at_time_of_foul = [\n pbp_event for pbp_event in period.Events\n if pbp_event.seconds_remaining == event_time and\n pbp_event.team_id != foul_team and\n (pbp_event.is_ft_1_of_1() or pbp_event.is_ft_2_of_2() or pbp_event.is_ft_3_of_3())\n ]\n if len(final_fts_at_time_of_foul) == 0:\n # Example of when this happens: lane violation\n # just use last event that occured at time of foul\n events_at_time_of_foul = [\n pbp_event for pbp_event in period.Events\n if pbp_event.seconds_remaining == event_time\n ]\n start_event = events_at_time_of_foul[-1].order\n elif final_fts_at_time_of_foul[-1].is_missed_ft():\n # if FT is missed need to see if it was oreb or dreb\n rebounds_after_ft = [\n pbp_event for pbp_event in period.Events\n if pbp_event.order > event.order and\n pbp_event.is_rebound()\n ]\n # use first rebound after missed FT as bonus start event\n start_event = rebounds_after_ft[0].order\n else:\n # use last FT as bonus start event\n start_event = final_fts_at_time_of_foul[-1].order\n else:\n # non shooting foul - start tracking bonus at this event\n start_event = event.order\n offense_team = utils.swap_team_id_for_game(foul_team, [self.HomeTeamId, self.VisitorTeamId])\n period.PenaltyStartEventNums[offense_team] = start_event", "def change_max(self, level, value):\n if value < 0:\n raise AttributeError('max value should be greater than zero')\n if level in self.progress_maxes:\n self.progress_maxes[level] = value", "def work_hours_setting(self, work_hours_setting):\n\n self._work_hours_setting = work_hours_setting", "def max_master_payor_admins(self, max_master_payor_admins):\n\n self._max_master_payor_admins = max_master_payor_admins", "def _modify_pipeline_config(self, workers_num, epoch_time, params_dict):\n self._restore_quota_config()\n nas_time_dict, ft_time_dict = dict(), dict()\n for step_name in params_dict:\n step_time = epoch_time * params_dict[step_name]['epochs']\n if 'max_samples' in params_dict[step_name]:\n step_time = step_time * params_dict[step_name]['max_samples'] / workers_num\n nas_time_dict[step_name] = step_time\n else:\n ft_time_dict[step_name] = step_time\n nas_total_time = sum([value for key, value in nas_time_dict.items()])\n if nas_total_time == 0:\n return\n ft_total_time = sum([value for key, value in ft_time_dict.items()])\n left_time = self.max_runtime\n if not self.only_search:\n if ft_total_time > 0.9 * self.max_runtime:\n ft_total_time = 0.9 * self.max_runtime\n left_time = self.max_runtime - ft_total_time\n scale = left_time / nas_total_time\n for key, value in nas_time_dict.items():\n self.restrict_config.duration[key] = float(scale * value)\n self.restrict_config.trials = copy.deepcopy(self.temp_trials)\n logging.info('Max duration modified as {}'.format(self.restrict_config.duration))" ]
[ "0.72136927", "0.6369418", "0.6363601", "0.58544475", "0.5277122", "0.51809937", "0.51373297", "0.49580207", "0.49343422", "0.49329597", "0.4872632", "0.4870285", "0.48624778", "0.4831024", "0.47729272", "0.47707817", "0.47574925", "0.47397655", "0.47320494", "0.47287974", "0.4672291", "0.46452457", "0.46294305", "0.46045074", "0.4600535", "0.45923364", "0.45881817", "0.4578071", "0.45711434", "0.45685393" ]
0.7683507
0
Gets the requires_matching_queues of this ShiftTradeSettings. Whether to constrain shift trades to agents with matching queues
def requires_matching_queues(self): return self._requires_matching_queues
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def requires_matching_queues(self, requires_matching_queues):\n \n self._requires_matching_queues = requires_matching_queues", "def requires_matching_planning_groups(self):\n return self._requires_matching_planning_groups", "def requires_matching_skills(self):\n return self._requires_matching_skills", "def queues(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"queues\")", "def is_valid(self):\n return (\n self.data['queueType'] in self.VALID_QUEUES\n and self.data['matchMode'] in self.VALID_MODES\n and self.data['matchDuration'] > 800\n )", "def requires_matching_planning_groups(self, requires_matching_planning_groups):\n \n self._requires_matching_planning_groups = requires_matching_planning_groups", "def get_available_queues(self):\n return {settings.CELERY_DEFAULT_QUEUE}", "def support_queues(self):\r\n return support_queues.SupportQueues(self)", "def get_declared_queues(self):\n return self.queues.copy()", "def support_queues(self):\n return support_queues.SupportQueues(self)", "def match_constraints(self) -> Optional['outputs.MatchResources']:\n return pulumi.get(self, \"match_constraints\")", "def requires_matching_skills(self, requires_matching_skills):\n \n self._requires_matching_skills = requires_matching_skills", "def EnableQueueStatMatchCapability(self):\n\t\treturn self._get_attribute('enableQueueStatMatchCapability')", "def workers_ready(self, qty=None):\n agents = self.agents_status()\n if any([a['state'] != 'RUNNING' for a in agents]):\n return False\n if qty and len(agents) != qty:\n return False\n return True", "def _acceptable(self, team):\r\n current = [c for c in self.configurations if self._validateNoSpies(c, team)]\r\n return bool(len(current) > 0)", "def can_relax_constraints(self):\n if len(self.mand_classroom_constraints) == 0:\n if len(self.high_classroom_constraints) > 0:\n return True\n else:\n for cc in self.low_classroom_constraints:\n if cc.can_relax_constraints():\n return True\n\n if len(self.mand_timeblock_ids) == 0:\n if len(self.high_timeblock_ids) > 0:\n return True\n\n return False", "def required(self):\n\n return bool(self.qualifiers.get(\"required\", False))", "def match_constraints(self) -> Optional['outputs.MatchResourcesPatch']:\n return pulumi.get(self, \"match_constraints\")", "def calculate_queues(self):\n\t\t#queues = [get_queue(lane) for lane in self.Vissim_Lanes]\n\t\t\n\t\tqueues = [0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\t\treturn queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def score(self):\n return len([req for req in list(set(self.knowledges)) if req in \n self.key_requirements or req in self.other_requirements])", "def queues(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"queues\")" ]
[ "0.74711424", "0.6019627", "0.5670279", "0.56513906", "0.56271684", "0.5523794", "0.5453449", "0.5273352", "0.5258663", "0.5240057", "0.5237409", "0.5223872", "0.52098256", "0.5192199", "0.51543", "0.5153797", "0.5139222", "0.513476", "0.49981728", "0.49849987", "0.49849987", "0.49849987", "0.49849987", "0.49849987", "0.49849987", "0.49849987", "0.49849987", "0.49849987", "0.49647644", "0.49459004" ]
0.81816566
0
Sets the requires_matching_queues of this ShiftTradeSettings. Whether to constrain shift trades to agents with matching queues
def requires_matching_queues(self, requires_matching_queues): self._requires_matching_queues = requires_matching_queues
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def requires_matching_queues(self):\n return self._requires_matching_queues", "def requires_matching_planning_groups(self, requires_matching_planning_groups):\n \n self._requires_matching_planning_groups = requires_matching_planning_groups", "def requires_matching_skills(self, requires_matching_skills):\n \n self._requires_matching_skills = requires_matching_skills", "def requires_matching_languages(self, requires_matching_languages):\n \n self._requires_matching_languages = requires_matching_languages", "def requires_matching_planning_groups(self):\n return self._requires_matching_planning_groups", "def setup_queues_and_bindings(self):\n self._channel.exchange_declare(self.setup_queue, exchange=self.exchange, passive=True)", "def is_valid(self):\n return (\n self.data['queueType'] in self.VALID_QUEUES\n and self.data['matchMode'] in self.VALID_MODES\n and self.data['matchDuration'] > 800\n )", "def matches_add_fe_prereqs(self, fe_matches):\n #*** This assumes that policy doesn't do dumb stuff like try\n #*** to match tcp and udp in same rule...\n #\n #*** TBD: expand this to deal with all prerequisites and write tests\n\n if 'tcp_src' in fe_matches or 'tcp_dst' in fe_matches:\n #*** Set ip protocol to TCP\n fe_matches['ip_proto'] = 6\n if 'udp_src' in fe_matches or 'udp_dst' in fe_matches:\n #*** Set ip protocol to UDP\n fe_matches['ip_proto'] = 17\n if 'ip_proto' in fe_matches:\n #*** Set eth_type to IP:\n fe_matches['eth_type'] = 2048\n return fe_matches", "def rx_queue_settings(self, rx_queue_settings):\n\n self._rx_queue_settings = rx_queue_settings", "def match_order_queries(self, order_obj, matched_queries):\n if order_obj.order_action == \"buy\":\n self.reverse_sort = True\n max_buy = self.buy_list[-1].stock_value if self.buy_list else 0\n self.buy_list = self.insert_into_queue(order_obj, self.buy_list)\n if max_buy != self.buy_list[-1].stock_value:\n self.generate_matched_orders(order_obj.order_action,\n matched_queries)\n else:\n min_sell = self.sell_list[0].stock_value if self.sell_list else 0\n self.sell_list = self.insert_into_queue(order_obj, self.sell_list)\n if min_sell != self.sell_list[0].stock_value:\n self.generate_matched_orders(order_obj.order_action,\n matched_queries)", "def queues(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"queues\")", "def _set_queues(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_queues_openconfig_qos__qos_queues, is_container='container', yang_name=\"queues\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queues must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_queues_openconfig_qos__qos_queues, is_container='container', yang_name=\"queues\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__queues = t\n if hasattr(self, '_set'):\n self._set()", "def add_qb_wr_constraint(self, qb_wr_clusters = 1, qb_wr_cluster_size = 2):\n # Add function to be called on refresh.\n self.constraint_fns[self.add_qb_wr_constraint] = [qb_wr_clusters,\n qb_wr_cluster_size]\n\n # QB-WR clusters\n qb_wr_variables = {team: pulp.LpVariable(name=\"qb-wr-count-%s\" %team, cat='Binary')\n for team in self.db.teams()}\n\n for team in self.db.teams():\n self.prob += (qb_wr_variables[team] * qb_wr_cluster_size <= sum(\n self.player_vars[pid] for pid in self.db.pid_teams(team)\n if self.db.position(pid) in {Positions.QB, Positions.WR}),\n \"Team %s QB WR Cluster * %s must be <= # Active QB & WR on team.\" %(team, qb_wr_cluster_size))\n\n self.prob += (sum(qb_wr_variables.values()) >= qb_wr_clusters,\n \"Must have at least %s cluster of %s QB+WR on same team.\" \n %(qb_wr_clusters, qb_wr_cluster_size))", "def _set_queues(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_queues_openconfig_qos_elements__qos_queues, is_container='container', yang_name=\"queues\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queues must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_queues_openconfig_qos_elements__qos_queues, is_container='container', yang_name=\"queues\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__queues = t\n if hasattr(self, '_set'):\n self._set()", "def setup_queues():\n sqs = boto.connect_sqs()\n sqs.create_queue('mls_parse_requests')\n sqs.create_queue('mls_fetcher')", "def EnableQueueStatMatchCapability(self):\n\t\treturn self._get_attribute('enableQueueStatMatchCapability')", "def triggered(self, station_requirement=1, **station_trigger_kwargs):\n stations_hit = 0\n for station in self.subsets:\n if station.triggered(**station_trigger_kwargs):\n stations_hit += 1\n if stations_hit>=station_requirement:\n return True\n return stations_hit>=station_requirement", "def _set_queues(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=yc_queues_openconfig_qos_interfaces__qos_queues, is_container='container', yang_name=\"queues\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"queues must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=yc_queues_openconfig_qos_interfaces__qos_queues, is_container='container', yang_name=\"queues\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__queues = t\n if hasattr(self, '_set'):\n self._set()", "def requires_matching_skills(self):\n return self._requires_matching_skills", "def required_trophies(self, required_trophies):\n\n self._required_trophies = required_trophies", "def _remove_matched_freezers(self, match_events: List[Event]):\n if len(self.__freeze_map) == 0:\n # freeze option disabled\n return False\n self.__active_freezers = [freezer for freezer in self.__active_freezers if freezer not in match_events]", "def can_relax_constraints(self):\n if len(self.mand_classroom_constraints) == 0:\n if len(self.high_classroom_constraints) > 0:\n return True\n else:\n for cc in self.low_classroom_constraints:\n if cc.can_relax_constraints():\n return True\n\n if len(self.mand_timeblock_ids) == 0:\n if len(self.high_timeblock_ids) > 0:\n return True\n\n return False", "def get_available_queues(self):\n return {settings.CELERY_DEFAULT_QUEUE}", "def modify_queue_settings(q_settings):\r\n db = get_db()\r\n db.execute(UPDATE_QUEUE_SETTINGS, qsettings_dict_to_db_tuple_modify(q_settings))\r\n db.commit()\r\n permissions.update_permissions(q_settings['qid'],\r\n get_uids(q_settings['admins']),\r\n get_uids(q_settings['managers']) if q_settings.has_key('managers') else None,\r\n get_uids(q_settings['blocked_users']) if q_settings.has_key('blocked_users') else None)", "def tx_queue_settings(self, tx_queue_settings):\n\n self._tx_queue_settings = tx_queue_settings", "def constraint(self):\n with self.mutating:\n self.queue = heapq.nsmallest(self.max_size, self.queue)\n heapq.heapify(self.queue)", "def _acceptable(self, team):\r\n current = [c for c in self.configurations if self._validateNoSpies(c, team)]\r\n return bool(len(current) > 0)", "def SetMustLinks( self, mustLinks ):\n\t\tself.mustLinkConstraints = [ frozenset(constraint) for constraint in mustLinks ]", "def get_declared_queues(self):\n return self.queues.copy()", "def support_queues(self):\r\n return support_queues.SupportQueues(self)" ]
[ "0.7026056", "0.626371", "0.6080613", "0.5387939", "0.4977356", "0.4948154", "0.48699456", "0.48108634", "0.47921792", "0.4688709", "0.4672739", "0.46499202", "0.46497968", "0.4629668", "0.46294284", "0.46173754", "0.45999214", "0.45791215", "0.45584536", "0.455672", "0.4547426", "0.45230398", "0.45079672", "0.45042184", "0.44992828", "0.44686446", "0.44612333", "0.44520193", "0.44482166", "0.44443253" ]
0.82478404
0
Gets the requires_matching_languages of this ShiftTradeSettings. Whether to constrain shift trades to agents with matching languages
def requires_matching_languages(self): return self._requires_matching_languages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def requires_matching_languages(self, requires_matching_languages):\n \n self._requires_matching_languages = requires_matching_languages", "def requires_matching_skills(self):\n return self._requires_matching_skills", "def best_match_language(self):\n if not self.accept_language:\n return None\n return self.accept_language.best_match(\n i18n.get_available_languages())", "def requires_matching_planning_groups(self):\n return self._requires_matching_planning_groups", "def supported_languages(self):\n return SUPPORT_LANGUAGES", "def has_languages(self):\n return bool(self._languages)", "def match_conditions(self) -> Optional[Sequence['outputs.MatchCondition']]:\n return pulumi.get(self, \"match_conditions\")", "def match_conditions(self) -> Optional[Sequence['outputs.MatchCondition']]:\n return pulumi.get(self, \"match_conditions\")", "def match_conditions(self) -> Optional[Sequence['outputs.MatchCondition']]:\n return pulumi.get(self, \"match_conditions\")", "def allows_language_choice(self, allows_language_choice):\n\n self._allows_language_choice = allows_language_choice", "def requires_matching_skills(self, requires_matching_skills):\n \n self._requires_matching_skills = requires_matching_skills", "def match_conditions(self) -> Optional[Sequence['outputs.MatchConditionPatch']]:\n return pulumi.get(self, \"match_conditions\")", "def match_conditions(self) -> Optional[Sequence['outputs.MatchConditionPatch']]:\n return pulumi.get(self, \"match_conditions\")", "def match_conditions(self) -> Optional[Sequence['outputs.MatchConditionPatch']]:\n return pulumi.get(self, \"match_conditions\")", "def compare_language(language):\n if language in module.availableLanguages:\n return True\n else:\n return False", "def match_constraints(self) -> Optional['outputs.MatchResources']:\n return pulumi.get(self, \"match_constraints\")", "def can_handle_language(cls, language: Hashable) -> bool:\n\n # if language_list is set to `None` it means: support all languages\n if language is None or cls.language_list is None:\n return True\n\n return language in cls.language_list", "def match_constraints(self) -> Optional['outputs.MatchResourcesPatch']:\n return pulumi.get(self, \"match_constraints\")", "def getAvailableTranslations(self):\n\n supported = set()\n for project in self.__projects:\n supported.update(project.getTranslations().keys())\n\n return supported", "def license_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"license_rules\")", "def license_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"license_rules\")", "def auto_build_bot_locales(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_build_bot_locales\")", "def get_minimum_should_match(self):\n return self.__minimum_should_match", "def is_accepting(self):\n return (self.position == 1) and (self.lhs.content == LANGUAGE)", "def language_supported(self, iso_lang=\"ca-ES\"): # -> bool\n test_lang = \"\"\n if len(iso_lang) == 0:\n return False\n try:\n for sep in [\"-\", \"_\"]:\n if sep in iso_lang:\n test_lang = iso_lang.split(sep)[0]\n break\n except (AttributeError, NameError):\n return False\n try:\n for _test in [iso_lang, test_lang]:\n if _test in gtts.tts.tts_langs():\n return True\n except NameError:\n pass\n return False", "def get_learning_languages(self):\n return self.userlanguage_set.exclude(level='N')", "def matching_supported(self, options=None):\n if self.is_comment:\n return False\n\n if self.is_html_rule: # HTML rules are not supported yet\n return False\n\n options = options or {}\n keys = set(options.keys())\n if not keys.issuperset(self._options_keys):\n # some of the required options are not given\n return False\n\n return True", "def requires_matching_planning_groups(self, requires_matching_planning_groups):\n \n self._requires_matching_planning_groups = requires_matching_planning_groups", "def detect_language(self, text):\n language_ratios = {}\n words = set([word.lower() for word in nltk.word_tokenize(text) if len(word)>2])\n\n for language in self._get_available_languages():\n stopwords_set = set(stopwords.words(language))\n common_elements = words.intersection(stopwords_set)\n language_ratios[language] = len(common_elements)\n\n return max(language_ratios, key=language_ratios.get)", "def language_supported(self,\n _iso_lang=\"en-US\",\n alt_local_url=\"\"): # -> bool\n _found_name = \"\"\n if alt_local_url.startswith(\"http\"):\n self.url = alt_local_url\n if self.ok:\n return self.ok\n if not bool(self.verified_voices):\n self.update_rhvoice_checklist()\n if not bool(self.verified_voices):\n self.ok = False\n return False\n self.ok = False\n for _search in [_iso_lang.lower(), _iso_lang.split(\"-\")[0].lower()]:\n for item in self.checklist:\n if item[0].lower().startswith(_search):\n self.checked_lang = item[0]\n self.ok = True\n break\n if len(self.checked_lang) != 0:\n break\n if len(self.checked_lang) != 0:\n for item in self.checklist:\n if bool(self.common.debug):\n print(item)\n if item[2] == _iso_lang.lower():\n self.checked_lang = item[0]\n self.ok = True\n break\n if self.ok:\n help_heading = self.help_heading\n help_url = self.help_url\n print(f\"\"\"\nChecking {help_heading} voices for `{_iso_lang}`\n========================================\n\n<{help_url}>\n\"\"\")\n return self.ok" ]
[ "0.7019373", "0.58443666", "0.5455803", "0.53113437", "0.5213689", "0.51799524", "0.50188375", "0.50188375", "0.50188375", "0.501323", "0.49779373", "0.49723494", "0.49723494", "0.49723494", "0.49518347", "0.49059954", "0.48904064", "0.4838629", "0.48077455", "0.47781244", "0.47781244", "0.47681588", "0.46245456", "0.46151954", "0.4603162", "0.45818755", "0.45603505", "0.4559487", "0.45506212", "0.45322463" ]
0.7820583
0
Sets the requires_matching_languages of this ShiftTradeSettings. Whether to constrain shift trades to agents with matching languages
def requires_matching_languages(self, requires_matching_languages): self._requires_matching_languages = requires_matching_languages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def requires_matching_languages(self):\n return self._requires_matching_languages", "def allows_language_choice(self, allows_language_choice):\n\n self._allows_language_choice = allows_language_choice", "def requires_matching_skills(self, requires_matching_skills):\n \n self._requires_matching_skills = requires_matching_skills", "def requires_matching_planning_groups(self, requires_matching_planning_groups):\n \n self._requires_matching_planning_groups = requires_matching_planning_groups", "def test_set_language(self):\n # Test for default languages\n self.assertEqual(self.scraper.language_original, 'jpn')\n self.assertEqual(self.scraper.language_translated, 'eng')\n\n # Test after setting supported languages\n self.scraper.set_languages('jpn', 'eng')\n self.assertEqual(self.scraper.language_translated, 'jpn')\n self.assertEqual(self.scraper.language_original, 'eng')\n\n # Test after setting non-supported languages\n self.scraper.set_languages('eng', 'lol')\n self.assertEqual(self.scraper.language_translated, 'jpn')\n self.assertEqual(self.scraper.language_original, 'eng')", "def matches_add_fe_prereqs(self, fe_matches):\n #*** This assumes that policy doesn't do dumb stuff like try\n #*** to match tcp and udp in same rule...\n #\n #*** TBD: expand this to deal with all prerequisites and write tests\n\n if 'tcp_src' in fe_matches or 'tcp_dst' in fe_matches:\n #*** Set ip protocol to TCP\n fe_matches['ip_proto'] = 6\n if 'udp_src' in fe_matches or 'udp_dst' in fe_matches:\n #*** Set ip protocol to UDP\n fe_matches['ip_proto'] = 17\n if 'ip_proto' in fe_matches:\n #*** Set eth_type to IP:\n fe_matches['eth_type'] = 2048\n return fe_matches", "def requires_matching_skills(self):\n return self._requires_matching_skills", "def setApplicableValidators(self, *args):\n return _libsbml.SBMLDocument_setApplicableValidators(self, *args)", "def requires_matching_queues(self, requires_matching_queues):\n \n self._requires_matching_queues = requires_matching_queues", "def on_matching_rules(self, matching_rules):\n pass", "def languages(self, languages):\n self._languages = languages", "def best_match_language(self):\n if not self.accept_language:\n return None\n return self.accept_language.best_match(\n i18n.get_available_languages())", "def languages(self, languages):\n\n self._languages = languages", "def requires_matching_planning_groups(self):\n return self._requires_matching_planning_groups", "def is_forced(self, lang):\r\n return False", "def can_handle_language(cls, language: Hashable) -> bool:\n\n # if language_list is set to `None` it means: support all languages\n if language is None or cls.language_list is None:\n return True\n\n return language in cls.language_list", "def supported_languages(self):\n return SUPPORT_LANGUAGES", "def enableVocolaTakesLanguages(self):\n key = \"VocolaTakesLanguages\"\n self.userregnl.set(key, 1)", "def __availableTranslationsLoaded(self):\n origLanguage = self.__plugin.getPreferences(\"OriginalLanguage\")\n transLanguage = self.__plugin.getPreferences(\"TranslationLanguage\")\n \n self.__updateLanguages()\n \n origIndex = self.origLanguageComboBox.findData(origLanguage)\n self.origLanguageComboBox.setCurrentIndex(origIndex)\n self.on_origLanguageComboBox_currentIndexChanged(origIndex)\n self.transLanguageComboBox.setCurrentIndex(\n self.transLanguageComboBox.findData(transLanguage))", "def set_required_for_language(form_class):\n\n css_classname = 'required-for-language'\n\n fields = form_class._meta.model.get_required_translatable_fields()\n for name, (code, _) in itertools.product(fields, settings.LANGUAGES):\n field_name = build_localized_fieldname(name, lang=code)\n field = form_class.base_fields[field_name]\n if field.required is False:\n attrs = field.widget.attrs\n attrs['required_for_language'] = True\n attrs['class'] = attrs.get('class', '') + ' ' + css_classname", "def matching_supported(self, options=None):\n if self.is_comment:\n return False\n\n if self.is_html_rule: # HTML rules are not supported yet\n return False\n\n options = options or {}\n keys = set(options.keys())\n if not keys.issuperset(self._options_keys):\n # some of the required options are not given\n return False\n\n return True", "def language_supported(self,\n _iso_lang=\"en-US\",\n alt_local_url=\"\"): # -> bool\n _found_name = \"\"\n if alt_local_url.startswith(\"http\"):\n self.url = alt_local_url\n if self.ok:\n return self.ok\n if not bool(self.verified_voices):\n self.update_rhvoice_checklist()\n if not bool(self.verified_voices):\n self.ok = False\n return False\n self.ok = False\n for _search in [_iso_lang.lower(), _iso_lang.split(\"-\")[0].lower()]:\n for item in self.checklist:\n if item[0].lower().startswith(_search):\n self.checked_lang = item[0]\n self.ok = True\n break\n if len(self.checked_lang) != 0:\n break\n if len(self.checked_lang) != 0:\n for item in self.checklist:\n if bool(self.common.debug):\n print(item)\n if item[2] == _iso_lang.lower():\n self.checked_lang = item[0]\n self.ok = True\n break\n if self.ok:\n help_heading = self.help_heading\n help_url = self.help_url\n print(f\"\"\"\nChecking {help_heading} voices for `{_iso_lang}`\n========================================\n\n<{help_url}>\n\"\"\")\n return self.ok", "def train(self, learned_patterns):\n self.learned_patterns = learned_patterns\n self.select_strong_subjective_patterns()", "def setStrategy(self, strategy):\r\n if strategy in NNPlayer.LEGAL_STRATEGY:\r\n self.strategy = strategy\r\n return True\r\n return False", "def test_language_fix(self):\n #TODO\n \n for lang in self.LANGUAGES:\n activate(lang)\n \n self.assertEqual(lang, get_language())", "def match_constraints(self) -> Optional['outputs.MatchResourcesPatch']:\n return pulumi.get(self, \"match_constraints\")", "def closest_match(desired_language: {str, Language}, supported_languages: list,\n max_distance: int=25) -> (str, int):\n # Quickly return if the desired language is directly supported\n if desired_language in supported_languages:\n return desired_language, 0\n\n # Reduce the desired language to a standard form that could also match\n desired_language = standardize_tag(desired_language)\n if desired_language in supported_languages:\n return desired_language, 0\n\n match_distances = [\n (supported, tag_distance(desired_language, supported))\n for supported in supported_languages\n ]\n match_distances = [\n (supported, distance) for (supported, distance) in match_distances\n if distance <= max_distance\n ] + [('und', 1000)]\n\n match_distances.sort(key=itemgetter(1))\n return match_distances[0]", "def compare_language(language):\n if language in module.availableLanguages:\n return True\n else:\n return False", "def required_trophies(self, required_trophies):\n\n self._required_trophies = required_trophies", "def allowed_vehicles(self, allowed_vehicles):\n\n self._allowed_vehicles = allowed_vehicles" ]
[ "0.660035", "0.5910095", "0.5873693", "0.5462056", "0.4720534", "0.46974784", "0.46014965", "0.4505857", "0.44450456", "0.4354574", "0.4335083", "0.43275222", "0.4273099", "0.42656523", "0.42637634", "0.42247966", "0.41790164", "0.41742778", "0.41720313", "0.41189757", "0.4111221", "0.41092005", "0.4107724", "0.40735266", "0.402172", "0.40044892", "0.39956838", "0.39940107", "0.39890793", "0.39845088" ]
0.81081593
0
Gets the requires_matching_skills of this ShiftTradeSettings. Whether to constrain shift trades to agents with matching skills
def requires_matching_skills(self): return self._requires_matching_skills
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def requires_matching_skills(self, requires_matching_skills):\n \n self._requires_matching_skills = requires_matching_skills", "def check_skill_requirements(self, skill_string):\r\n requirements_met = True\r\n # Shortened for easier usage, same practice in other functions\r\n # below.\r\n skill = self.__skills[skill_string]\r\n\r\n # Checks if skill is at it's maximum level or at\r\n # the maximum level it can be at current character level\r\n if self.__char_lvl.get() == \"\" or skill.skill_level == skill.skill_max_level or skill.lvl_req[skill.skill_level] > int(self.__char_lvl.get()):\r\n requirements_met = False\r\n\r\n\r\n\r\n # If there is a prerequired skill, checks if it's level is high\r\n # enough.\r\n if skill.prereq_skill_name != \"-\":\r\n if self.__skills[skill.prereq_skill_name].\\\r\n skill_level < skill.prereq_skill_lvl:\r\n self.reset(skill_string)\r\n requirements_met = False\r\n\r\n if not requirements_met:\r\n self.skill_up_disable(skill_string)\r\n else:\r\n self.skill_up_enable(skill_string)\r\n\r\n # If very little skill points are left after upgrading,\r\n # requirements of all skills are tested to see if there's enough\r\n # left to upgrade them.\r\n if self.__skill_points < MOST_SKILL_POINTS_POSSIBLY_REQUIRED:\r\n self.check_if_enough_skill_points()", "def requires_matching_planning_groups(self):\n return self._requires_matching_planning_groups", "def required_skills(self, required_skills):\n\n self._required_skills = required_skills", "def requires_matching_languages(self):\n return self._requires_matching_languages", "def match_conditions(self) -> Optional[Sequence['outputs.MatchCondition']]:\n return pulumi.get(self, \"match_conditions\")", "def match_conditions(self) -> Optional[Sequence['outputs.MatchCondition']]:\n return pulumi.get(self, \"match_conditions\")", "def match_conditions(self) -> Optional[Sequence['outputs.MatchCondition']]:\n return pulumi.get(self, \"match_conditions\")", "def needsScores(self):\n return self.opt.needsScores()", "def get_minimum_should_match(self):\n return self.__minimum_should_match", "def match_conditions(self) -> Optional[Sequence['outputs.MatchConditionPatch']]:\n return pulumi.get(self, \"match_conditions\")", "def match_conditions(self) -> Optional[Sequence['outputs.MatchConditionPatch']]:\n return pulumi.get(self, \"match_conditions\")", "def match_conditions(self) -> Optional[Sequence['outputs.MatchConditionPatch']]:\n return pulumi.get(self, \"match_conditions\")", "def match_constraints(self) -> Optional['outputs.MatchResources']:\n return pulumi.get(self, \"match_constraints\")", "def skills(self):\n if \"skills\" in self._prop_dict:\n return self._prop_dict[\"skills\"]\n else:\n return None", "def match_constraints(self) -> Optional['outputs.MatchResourcesPatch']:\n return pulumi.get(self, \"match_constraints\")", "def skill_grants(self):\n people_grants = self.people.rules.filter(\n free=True,\n skill__isnull=False\n ).values_list('skill', flat=True)\n tradition_grants = self.tradition.rules.filter(\n free=True,\n skill__isnull=False\n ).values_list('skill', flat=True)\n universal_grants = Rule.objects.filter(\n universal_flag=True\n ).values_list('skill', flat=True)\n skill_grants = list(tradition_grants) + list(people_grants) + list(universal_grants)\n return HeaderSkill.objects.filter(id__in=skill_grants)", "def score(self):\n return len([req for req in list(set(self.knowledges)) if req in \n self.key_requirements or req in self.other_requirements])", "def required(self):\n\n return bool(self.qualifiers.get(\"required\", False))", "def requires_matching_queues(self):\n return self._requires_matching_queues", "def scoreSkills(self, skills, work_hist_skills, req_skills):\n\n if work_hist_skills:\n score = len(set(work_hist_skills).intersection(req_skills))\n else:\n score = len(set(skills).intersection(req_skills))\n\n req_skills_len = len(req_skills)\n\n return score/req_skills_len if score != 0 else 0", "def meets_requirements(self, requirements):\n return len(self.completed_requirements(requirements)) == len(requirements)", "def requires_matching_languages(self, requires_matching_languages):\n \n self._requires_matching_languages = requires_matching_languages", "def match_skills(item):\n\n text = item.text\n if any([skill in text for skill in skill_names]):\n return True\n return False", "def requires_matching_planning_groups(self, requires_matching_planning_groups):\n \n self._requires_matching_planning_groups = requires_matching_planning_groups", "def isValidSkill(self, skill):\n try:\n skills = self.skillparser.getSkills()\n skilldetails = skills[skill]\n if skilldetails[1] in self.picks:\n return True\n else:\n return False\n except KeyError:\n return False", "def check_prerequisites(self, prequisites):\n for prereq in prequisites:\n # check for origin requirements\n if prereq.origin: \n if prereq.origin not in self.origins:\n print(f\"ORIGIN WRONG\")\n return False\n # check for additional header requirements\n if prereq.additional_header:\n if (not prereq.additional_header.open_flag and \n prereq.additional_header not in self.headers.all()):\n print(f\"WE DONT HAVE THE RIGHT HEADER SELECTED\")\n return False \n # check for header/skill requirements\n # did the user purchase the required header, or is the header open?\n if prereq.header:\n if (not prereq.header.open_flag and \n prereq.header not in self.headers.all()):\n print(f\"WE DONT HAVE THE RIGHT HEADER SELECTED\")\n return False \n # check for the number of different skills in the header.\n purchased_skills = HeaderSkill.objects.filter(\n header=prereq.header,\n skill__id__in=self.skills.values_list('skill__skill_id', flat=True)\n )\n if prereq.number_of_different_skills > purchased_skills.count(): \n print(f\"NUMBER OF SKILLS WRONG:{prereq.number_of_different_skills}:{purchased_skills.count()}\")\n return False\n # figure out the total skill points\n total = 0\n for skill in purchased_skills:\n total += skill.header.cost * skill.characterskills_set.get(character=self).count\n # check for skill requirements\n if prereq.skill:\n try:\n result = self.skills.get(skill__skill=prereq.skill)\n return result.count >= prereq.number_of_purchases\n except CharacterSkills.DoesNotExist:\n return False\n # if we made it this far, we can assume all prerequisites\n # have been met.\n return True", "def can_relax_constraints(self):\n if len(self.mand_classroom_constraints) == 0:\n if len(self.high_classroom_constraints) > 0:\n return True\n else:\n for cc in self.low_classroom_constraints:\n if cc.can_relax_constraints():\n return True\n\n if len(self.mand_timeblock_ids) == 0:\n if len(self.high_timeblock_ids) > 0:\n return True\n\n return False", "def check_skill_prerequisites(self, skill, header):\n try: \n skill_type = ContentType.objects.get_for_model(Skill)\n skill_prerequisites = Prerequisite.objects.filter(\n content_type__pk=skill_type.id,\n object_id=skill.id\n )\n return self.check_prerequisites(skill_prerequisites)\n except Prerequisite.DoesNotExist:\n return True\n return True", "def hasRequiredAttributes(self):\n return _libsbml.Trigger_hasRequiredAttributes(self)" ]
[ "0.7397422", "0.58501714", "0.52970845", "0.52919626", "0.52800035", "0.52033305", "0.52033305", "0.52033305", "0.5199282", "0.51981103", "0.51299804", "0.51299804", "0.51299804", "0.5121373", "0.5107792", "0.5013068", "0.5012293", "0.49951705", "0.4964476", "0.4931047", "0.49305603", "0.4901724", "0.48968232", "0.48788476", "0.47612137", "0.473293", "0.47224703", "0.4694208", "0.4686414", "0.46793118" ]
0.7993277
0
Sets the requires_matching_skills of this ShiftTradeSettings. Whether to constrain shift trades to agents with matching skills
def requires_matching_skills(self, requires_matching_skills): self._requires_matching_skills = requires_matching_skills
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def requires_matching_skills(self):\n return self._requires_matching_skills", "def required_skills(self, required_skills):\n\n self._required_skills = required_skills", "def requires_matching_languages(self, requires_matching_languages):\n \n self._requires_matching_languages = requires_matching_languages", "def check_skill_requirements(self, skill_string):\r\n requirements_met = True\r\n # Shortened for easier usage, same practice in other functions\r\n # below.\r\n skill = self.__skills[skill_string]\r\n\r\n # Checks if skill is at it's maximum level or at\r\n # the maximum level it can be at current character level\r\n if self.__char_lvl.get() == \"\" or skill.skill_level == skill.skill_max_level or skill.lvl_req[skill.skill_level] > int(self.__char_lvl.get()):\r\n requirements_met = False\r\n\r\n\r\n\r\n # If there is a prerequired skill, checks if it's level is high\r\n # enough.\r\n if skill.prereq_skill_name != \"-\":\r\n if self.__skills[skill.prereq_skill_name].\\\r\n skill_level < skill.prereq_skill_lvl:\r\n self.reset(skill_string)\r\n requirements_met = False\r\n\r\n if not requirements_met:\r\n self.skill_up_disable(skill_string)\r\n else:\r\n self.skill_up_enable(skill_string)\r\n\r\n # If very little skill points are left after upgrading,\r\n # requirements of all skills are tested to see if there's enough\r\n # left to upgrade them.\r\n if self.__skill_points < MOST_SKILL_POINTS_POSSIBLY_REQUIRED:\r\n self.check_if_enough_skill_points()", "def requires_matching_planning_groups(self, requires_matching_planning_groups):\n \n self._requires_matching_planning_groups = requires_matching_planning_groups", "def required_trophies(self, required_trophies):\n\n self._required_trophies = required_trophies", "def power_play_goals_against(self, power_play_goals_against):\n\n self._power_play_goals_against = power_play_goals_against", "def requires_matching_queues(self, requires_matching_queues):\n \n self._requires_matching_queues = requires_matching_queues", "def scoreSkills(self, skills, work_hist_skills, req_skills):\n\n if work_hist_skills:\n score = len(set(work_hist_skills).intersection(req_skills))\n else:\n score = len(set(skills).intersection(req_skills))\n\n req_skills_len = len(req_skills)\n\n return score/req_skills_len if score != 0 else 0", "def candidate_skills(self, source_object: Dict) -> CandidateSkillYielder:\n pass", "def apply_skill_effects(self, behavior):\n b_type = type(behavior)\n if issubclass(b_type, ESAttackUp):\n if b_type == ESAttackUPRemainingEnemies \\\n and behavior.enemy_count is not None \\\n and self.enemies > behavior.enemy_count:\n return False\n if self.enraged is None:\n if b_type == ESAttackUPCooldown and behavior.turn_cooldown is not None:\n self.enraged = -behavior.turn_cooldown + 1\n return False\n else:\n self.enraged = behavior.turns\n return True\n else:\n if self.enraged == 0:\n self.enraged = behavior.turns\n return True\n else:\n return False\n elif b_type == ESDamageShield:\n if self.damage_shield == 0:\n self.damage_shield = behavior.turns\n return True\n else:\n return False\n elif b_type == ESStatusShield:\n if self.status_shield == 0:\n self.status_shield = behavior.turns\n return True\n else:\n return False\n return True", "def check_if_enough_skill_points(self):\r\n for skill_string in self.__skills:\r\n if (self.__skills[skill_string].points_to_up >\r\n self.__skill_points):\r\n self.skill_up_disable(skill_string)", "def matches_add_fe_prereqs(self, fe_matches):\n #*** This assumes that policy doesn't do dumb stuff like try\n #*** to match tcp and udp in same rule...\n #\n #*** TBD: expand this to deal with all prerequisites and write tests\n\n if 'tcp_src' in fe_matches or 'tcp_dst' in fe_matches:\n #*** Set ip protocol to TCP\n fe_matches['ip_proto'] = 6\n if 'udp_src' in fe_matches or 'udp_dst' in fe_matches:\n #*** Set ip protocol to UDP\n fe_matches['ip_proto'] = 17\n if 'ip_proto' in fe_matches:\n #*** Set eth_type to IP:\n fe_matches['eth_type'] = 2048\n return fe_matches", "def power_play_goals(self, power_play_goals):\n\n self._power_play_goals = power_play_goals", "def learn_from_match(self, winner, ttt, *args, **kwargs):\n if winner == self.name:\n reward = 1\n elif winner == 'draw':\n reward = 0\n else:\n reward = -1\n\n states_list = zip(self._match, self._match[1:] + [(None, None)])\n for (action, state), (_, result) in reversed(states_list):\n lrate = exp(-self.lrate_coeff * self.match_count)\n estimated_optimal_future = max(self.Q[result].values()) if result else 0.0\n learned = reward + self.discount * estimated_optimal_future\n self._update_q(ttt, state, action, lrate * (learned - self.Q[state][action]))\n\n reward = 0 # reward is given only when winning or losing\n\n self.match_count += 1", "def meets_requirements(self, requirements):\n return len(self.completed_requirements(requirements)) == len(requirements)", "def set_needs_levels(self, repair_value=1, energy_value=1, play_value=1):\n msg = _clad_to_engine_iface.ForceSetNeedsLevels(\n newNeedLevel = [repair_value, energy_value, play_value])\n self.conn.send_msg(msg)", "def triggered(self, station_requirement=1, **station_trigger_kwargs):\n stations_hit = 0\n for station in self.subsets:\n if station.triggered(**station_trigger_kwargs):\n stations_hit += 1\n if stations_hit>=station_requirement:\n return True\n return stations_hit>=station_requirement", "def setApplicableValidators(self, *args):\n return _libsbml.SBMLDocument_setApplicableValidators(self, *args)", "def SetStrengthThresh(self, strength):\n return _hypre.HypreBoomerAMG_SetStrengthThresh(self, strength)", "def qualifies(self, weapon):\n return True", "def requires_matching_planning_groups(self):\n return self._requires_matching_planning_groups", "def check_prerequisites(self, prequisites):\n for prereq in prequisites:\n # check for origin requirements\n if prereq.origin: \n if prereq.origin not in self.origins:\n print(f\"ORIGIN WRONG\")\n return False\n # check for additional header requirements\n if prereq.additional_header:\n if (not prereq.additional_header.open_flag and \n prereq.additional_header not in self.headers.all()):\n print(f\"WE DONT HAVE THE RIGHT HEADER SELECTED\")\n return False \n # check for header/skill requirements\n # did the user purchase the required header, or is the header open?\n if prereq.header:\n if (not prereq.header.open_flag and \n prereq.header not in self.headers.all()):\n print(f\"WE DONT HAVE THE RIGHT HEADER SELECTED\")\n return False \n # check for the number of different skills in the header.\n purchased_skills = HeaderSkill.objects.filter(\n header=prereq.header,\n skill__id__in=self.skills.values_list('skill__skill_id', flat=True)\n )\n if prereq.number_of_different_skills > purchased_skills.count(): \n print(f\"NUMBER OF SKILLS WRONG:{prereq.number_of_different_skills}:{purchased_skills.count()}\")\n return False\n # figure out the total skill points\n total = 0\n for skill in purchased_skills:\n total += skill.header.cost * skill.characterskills_set.get(character=self).count\n # check for skill requirements\n if prereq.skill:\n try:\n result = self.skills.get(skill__skill=prereq.skill)\n return result.count >= prereq.number_of_purchases\n except CharacterSkills.DoesNotExist:\n return False\n # if we made it this far, we can assume all prerequisites\n # have been met.\n return True", "def match_constraints(self) -> Optional['outputs.MatchResources']:\n return pulumi.get(self, \"match_constraints\")", "def supports(self, requirements: typing.List[str]) -> bool:\n # MGA: NYI\n return True", "def match_constraints(self) -> Optional['outputs.MatchResourcesPatch']:\n return pulumi.get(self, \"match_constraints\")", "def setFitnesses(self, chromosomes: ChromList) -> ChromList:\n raise NotImplementedError", "def match_skills(item):\n\n text = item.text\n if any([skill in text for skill in skill_names]):\n return True\n return False", "def check_openmm_requirements(cls, combine_nonbonded_forces: bool) -> None:", "def setWarshipTarget(self):\n # first look for closest target of target type\n closestShip = self.getNearestTarget()\n\n if closestShip == None and (self.targets != [] or self.takenOverByEmpire != ''):\n # No Targets available\n if self.myGalaxy.shipsUnderAssault() == 0:\n self.myGalaxy.count = self.myGalaxy.maxCount\n else:\n if self.currentTarget != closestShip:\n # target aquired\n self.currentTarget = closestShip" ]
[ "0.67894024", "0.6205574", "0.58606046", "0.56571096", "0.55566597", "0.521438", "0.5066747", "0.4984988", "0.46794915", "0.46336862", "0.45962888", "0.45614943", "0.45473316", "0.4485718", "0.44463813", "0.44048196", "0.4387923", "0.43499947", "0.43493375", "0.43269297", "0.43229195", "0.43069285", "0.4296184", "0.42915884", "0.4290073", "0.42890456", "0.42887217", "0.42857483", "0.42809483", "0.42363077" ]
0.82426614
0
Gets the requires_matching_planning_groups of this ShiftTradeSettings. Whether to constrain shift trades to agents with matching planning groups
def requires_matching_planning_groups(self): return self._requires_matching_planning_groups
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def requires_matching_planning_groups(self, requires_matching_planning_groups):\n \n self._requires_matching_planning_groups = requires_matching_planning_groups", "def enable_groups(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_groups\")", "def match_constraints(self) -> Optional['outputs.MatchResourcesPatch']:\n return pulumi.get(self, \"match_constraints\")", "def requires_matching_queues(self):\n return self._requires_matching_queues", "def match_constraints(self) -> Optional['outputs.MatchResources']:\n return pulumi.get(self, \"match_constraints\")", "def supplemental_groups(self) -> Optional[pulumi.Input['SupplementalGroupsStrategyOptionsPatchArgs']]:\n return pulumi.get(self, \"supplemental_groups\")", "def group_required(group_names):\n\ttry:\n\t\tuser = CrequestMiddleware.get_request().user\n\t\tif user.is_authenticated():\n\t\t\ttest = user.groups.filter(name=group_names).exists()\n\texcept (AttributeError):\n\t\ttest = False\n\n\n\treturn user_passes_test(test)", "def EnableGroupStatMatchCapability(self):\n\t\treturn self._get_attribute('enableGroupStatMatchCapability')", "def _check_groups_support(self, groups=()):\n available_groups = set(self.df[self.col_group].unique())\n for group in groups:\n assert group in available_groups, \"Group %s is not in the dataset provided\" % group", "def supplemental_groups(self) -> pulumi.Input['SupplementalGroupsStrategyOptionsArgs']:\n return pulumi.get(self, \"supplemental_groups\")", "def _acceptable(self, team):\r\n current = [c for c in self.configurations if self._validateNoSpies(c, team)]\r\n return bool(len(current) > 0)", "def requires_matching_skills(self):\n return self._requires_matching_skills", "def get_minimum_should_match(self):\n return self.__minimum_should_match", "def has_groups(self, resolvables, all=True):\n total_checks = 0\n\n for group in resolvables:\n if self.has_group(group):\n total_checks += 1\n\n if not all:\n return True\n\n return True if all and total_checks == len(resolvables) else False", "def group_required(*group_names):\n\n def in_groups(current_user):\n if not settings.ENABLE_PERMISSIONS:\n return True\n if current_user.is_authenticated:\n if current_user.groups.filter(name__in=group_names).exists():\n return True\n return False\n\n return user_passes_test(in_groups)", "def global_node_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalReplicationGroupGlobalNodeGroupArgs']]]]:\n return pulumi.get(self, \"global_node_groups\")", "def param_groups(self):\n return self.optimizer.param_groups", "def get_groups_using_malware():\n global groups_using_malware\n\n if not groups_using_malware:\n groups_using_malware = rsh.groups_using_malware(get_srcs())\n \n return groups_using_malware", "def get_relevant_perm_groups(self):\n\n groups = Group.objects.filter(Q(name=\"everyone\") | Q(name=self.admin_group_name()) | Q(name=self.participants_group_name()))\n return groups", "def winningTeamPenalty(r):\n \n #Check if home or away had more goals at the 'event' time\n homecheck = int(r['about.goals.home'] > r['about.goals.away'])\n awaycheck = int(r['about.goals.away'] > r['about.goals.home'])\n \n #If home had more goals and the penalty was on the home team, set to 1\n if (homecheck > 0) and (r['against.homeTeam'] == 1):\n return 1\n #If away had more and the penalty was not on home team, set to 1\n if (awaycheck > 0) and (r['against.homeTeam'] == 0):\n return 1\n #Any other situation should be a zero in this column\n else:\n return 0", "def potential_groups(self, player) -> Set[Group]:\n directions = [\n (-1, 1), # up-right diagonal\n (0, 1), # horizontal\n (1, 1), # down-right diagonal\n (1, 0), # vertical\n ]\n groups = set()\n\n for row in range(len(self.state[0])):\n for col in range(len(self.state[0][0])):\n for row_diff, col_diff in directions:\n if self.is_potential_group(player, row, col, row_diff, col_diff):\n groups.add(Group(\n player,\n start=Square(row, col),\n end=Square(row + 3 * row_diff, col + 3 * col_diff),\n ))\n\n return groups", "def group_required(*group_names):\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)", "def get_enable_windows_gmsa(self) -> bool:\n return self._get_enable_windows_gmsa(enable_validation=True)", "def granted_groups(self):\n return [\n g\n for g in Group.objects.filter()\n if ManagedObject.objects.filter(GroupAccess.Q(g) & Q(id=self.id)).exists()\n ]", "def _group_matcher(group):\n return (group.uuid == _DB_UUID and\n group.name == _INST_GROUP_DB['name'] and\n group.user_id == _INST_GROUP_DB['user_id'] and\n group.project_id == _INST_GROUP_DB['project_id'] and\n group.created_at == _TS_NOW and\n group.updated_at == _TS_NOW and\n group.members == _INST_GROUP_DB['members'] and\n group.policies == [_INST_GROUP_DB['policy']['policy']] and\n group.id == 1)", "def score_group_conflicts(self):\n group_conflict_score = 0\n multiplier = 4\n \n for day_num in range(self.num_days):\n \n current_day = self.days[ day_num ]\n num_conflicts = 0\n \n for groups in current_day.values():\n for group in groups:\n if not group.available( day_num ):\n num_conflicts += 1\n \n group_conflict_score += multiplier * ( num_conflicts ** 2 )\n \n self.group_conflict_score = group_conflict_score\n return self.group_conflict_score", "def valid_match(self, home_stats, away_stats):\n # If either of the statistics is None then you cannot evaluate the\n # match\n if home_stats is None or away_stats is None:\n return False\n # Otherwise if we have not specified a number of matches to ignore then\n # this match can be evaluated.\n if self.ignore_matches is None:\n return True\n # If we have specified a number of matches to ignore, check both teams\n # have played at least that many:\n if (len(home_stats.games) < self.ignore_matches or\n len(away_stats.games) < self.ignore_matches):\n return False\n return True", "def server_group_tuples(self) -> Optional[Sequence['outputs.RuleRuleActionForwardGroupConfigServerGroupTuple']]:\n return pulumi.get(self, \"server_group_tuples\")", "def group_required(*group_names):\n\n def in_groups(u):\n if u.is_authenticated():\n if bool(u.groups.filter(name__in=group_names)) | u.is_superuser:\n return True\n return False\n return user_passes_test(in_groups)", "def has_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for g in self.groups.query(name=group.name):\n if g.name == group.name:\n return True\n\n return False" ]
[ "0.78293633", "0.5144219", "0.4976061", "0.49256355", "0.48759344", "0.48727217", "0.47323728", "0.47321418", "0.46929336", "0.46308273", "0.46221137", "0.45919544", "0.45839018", "0.4579318", "0.45729378", "0.45659393", "0.45462197", "0.4495398", "0.44463462", "0.44000325", "0.43852058", "0.43726832", "0.43722516", "0.43611842", "0.43541953", "0.43529934", "0.43516272", "0.43344036", "0.43330237", "0.4325022" ]
0.8434141
0
Sets the requires_matching_planning_groups of this ShiftTradeSettings. Whether to constrain shift trades to agents with matching planning groups
def requires_matching_planning_groups(self, requires_matching_planning_groups): self._requires_matching_planning_groups = requires_matching_planning_groups
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def requires_matching_planning_groups(self):\n return self._requires_matching_planning_groups", "def requires_matching_queues(self, requires_matching_queues):\n \n self._requires_matching_queues = requires_matching_queues", "def requires_matching_skills(self, requires_matching_skills):\n \n self._requires_matching_skills = requires_matching_skills", "def requires_matching_languages(self, requires_matching_languages):\n \n self._requires_matching_languages = requires_matching_languages", "def _check_groups_support(self, groups=()):\n available_groups = set(self.df[self.col_group].unique())\n for group in groups:\n assert group in available_groups, \"Group %s is not in the dataset provided\" % group", "def setMatchExprs(self, match_expr, job_query_expr,\n factory_query_expr, start_expr):\n self.adParams['GlideClientMatchingGlideinCondorExpr'] = \"%s\" % match_expr\n self.adParams['GlideClientConstraintJobCondorExpr'] = \"%s\" % job_query_expr\n self.adParams['GlideClientMatchingInternalPythonExpr'] = \"%s\" % factory_query_expr\n self.adParams['GlideClientConstraintFactoryCondorExpr'] = \"%s\" % start_expr", "def enable_groups(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_groups\")", "def solve(\n self,\n projections: Projections,\n initial_squad: Squad,\n next_gw: int = None,\n force_chips: Dict[int, str] = None,\n force_players: Dict[str, list] = None,\n force_transfers: Dict[int, dict] = None,\n price_changes: Dict[str, Iterable] = None,\n time_limit: float = None,\n optimizer: type = pulp.GUROBI,\n message: bool = True\n ):\n if next_gw is None:\n next_gw = sorted([int(column.split('_')[0]) for column in projections.columns if column.endswith('Pts')])[0]\n # Set up useful references\n initial_players = initial_squad.players\n initial_itb = initial_squad.itb\n initial_fts = initial_squad.fts\n active_chip = initial_squad.active_chip\n players = projections.index\n positions = ('G', 'D', 'M', 'F')\n teams = projections['Team'].unique()\n gw_interval = list(range(next_gw, next_gw + self.horizon))\n\n # Initialise optimisation model\n prob = LpProblem('FPL_transfer_optimisation')\n\n # Initialise decision variables\n default_args = {'index': players, 'columns': gw_interval, 'column_type': 'gw', 'model': prob}\n lineup = DecisionMatrix.lp_variable('lineup', **default_args)\n bench_gk = DecisionMatrix.lp_variable('bench_gk', **default_args)\n bench_1 = DecisionMatrix.lp_variable('bench_1', **default_args)\n bench_2 = DecisionMatrix.lp_variable('bench_2', **default_args)\n bench_3 = DecisionMatrix.lp_variable('bench_3', **default_args)\n squad = DecisionMatrix.lp_variable('squad', **default_args)\n prob += squad == lineup + bench_gk + bench_1 + bench_2 + bench_3\n squad[next_gw - 1] = pd.Series(squad.index).isin(initial_players).astype(int)\n captain = DecisionMatrix.lp_variable('captain', **default_args)\n vice_captain = DecisionMatrix.lp_variable('vice_captain', **default_args)\n transfer_in = DecisionMatrix.lp_variable('transfer_in', **default_args)\n transfer_out = DecisionMatrix.lp_variable('transfer_out', **default_args)\n itb = DecisionSeries(data=[initial_itb], index=[next_gw - 1], model=prob)\n # itb is previous GW's itb + revenue from outgoing players + cost of incoming players\n for i, gw in enumerate(gw_interval):\n itb[gw] = itb[gw - 1] + (transfer_out[gw] * projections['SV']).sum() - \\\n (transfer_in[gw] * projections['BV']).sum() - self.budget_decay_rate\n\n # Add problem constraints to optimisation model\n prob += squad == squad.lag(1) + transfer_in - transfer_out # New squad is previous squad plus transfers\n prob += squad.drop(next_gw - 1, axis=1) <= 1 # Each player can only appear in the squad once\n prob += lineup.sum() == 11 # Lineup contains 11 players\n prob += bench_gk.sum() == 1 # There is 1 bench GK;\n prob += bench_1.sum() == 1 # 1 1st bench slot;\n prob += bench_2.sum() == 1 # 1 2nd bench slot;\n prob += bench_3.sum() == 1 # 1 3rd bench slot;\n prob += captain.sum() == 1 # 1 Captain;\n prob += transfer_out.sum() == transfer_in.sum() # Transfers in must be same as transfers out\n\n prob += vice_captain.sum() == 1 # 1 vice-captain\n prob += captain <= lineup # Captain must be in lineup\n prob += vice_captain <= lineup # Vice-captain must be in lineup\n prob += captain + vice_captain <= 1 # Captain and vice-captain must be different players\n for position, limit in zip(positions, (2, 5, 5, 3)):\n prob += squad[projections['Pos'] == position].sum() == limit # Set squad position structure\n for team in teams:\n prob += squad[projections['Team'] == team].sum() <= 3 # No more than 3 players from each team\n if self.exclude_everton:\n prob += squad[projections['Team'] == 'Everton'].sum() == 0 # Option to exclude Everton players\n prob += bench_gk <= (projections['Pos'] == 'G') # Bench GK must be a goalkeeper\n prob += (lineup * (projections['Pos'] == 'G')).sum() == 1 # There must be 1 goalkeeper in lineup\n prob += (lineup * (projections['Pos'] == 'D')).sum() >= 3 # There must be at least 3 defenders in lineup\n prob += itb[[False] + [True] * self.horizon] >= 0 # The itb amount must be non-negative for future GWs\n\n # Set up transfer logic\n transfer_args = {'index': gw_interval, 'column_type': 'gw', 'model': prob, 'cat': 'Integer'}\n aux = DecisionSeries.lp_variable('aux', **transfer_args)\n free_transfers = DecisionSeries(data=[initial_fts], index=[next_gw - 1], model=prob) + DecisionSeries.\\\n lp_variable('free_transfers', **transfer_args)\n penalised_transfers = DecisionSeries.lp_variable('penalised_transfers', **transfer_args)\n transfer_counts = transfer_in.sum()\n frees_minus_transfers = free_transfers.lag(1) - transfer_counts\n lower_bound = aux * 15 - 14\n upper_bound = aux * 2\n if initial_fts > 1:\n prob += transfer_counts[next_gw] >= 1\n prob += frees_minus_transfers >= lower_bound\n prob += frees_minus_transfers <= upper_bound\n prob += free_transfers == aux + 1\n # penalised_transfers is max(transfers - frees, 0)\n prob += penalised_transfers >= -frees_minus_transfers\n prob += penalised_transfers >= 0\n\n ev_values = projections[[f'{gw}_Pts' for gw in gw_interval]] # Restructure projections data for easier\n ev_values.columns = gw_interval # manipulation\n objective = ((lineup + captain) * ev_values).sum() # Objective function is sum of lineup and captain pts\n objective += (vice_captain * self.vc_weight * ev_values).sum() # Add vice-captain weight\n for loc, bench_slot in enumerate((bench_gk, bench_1, bench_2, bench_3)):\n objective += (bench_slot * ev_values).sum() * self.bench_weights[:, loc] # Add bench weights to objective\n if force_transfers is None:\n objective -= penalised_transfers * 4 # Take away 4 points from each hit taken\n if force_chips is not None:\n self.force_chips = force_chips\n for gw in force_chips:\n if force_chips[gw] == 'wildcard':\n objective[gw] += penalised_transfers[gw] * 4 # Remove penalised points in wildcard week\n\n if force_players is not None:\n for player in force_players['include']:\n prob += squad.T[player].drop(next_gw - 1) == 1\n for player in force_players['exclude']:\n prob += squad.T[player].drop(next_gw - 1) == 0\n if 'include_for_gw' in force_players:\n for gw in force_players['include_for_gw']:\n try:\n prob += squad[force_players['include_for_gw'][gw], gw] == 1\n except ValueError:\n pass\n if 'exclude_for_gw' in force_players:\n for gw in force_players['exclude_for_gw']:\n try:\n prob += squad[force_players['exclude_for_gw'][gw], gw] == 0\n except ValueError:\n pass\n self.rolls = frees_minus_transfers + penalised_transfers\n prob += self.rolls <= 2\n\n if self.penalties is not None:\n if time_decay in self.penalties:\n self.penalties[time_decay] = self.penalties.pop(time_decay) # Apply time decay after other penalties\n for penalty, parameter in self.penalties.items():\n objective = penalty(objective, self, parameter) # Apply external penalty functions\n\n # Apply price change EV\n if price_changes is not None:\n gws_remaining = 38 - next_gw + 1\n for player in price_changes['rise']:\n objective[next_gw] += self.million_value / 30 * squad[player, next_gw] * gws_remaining\n for player in price_changes['drop']:\n objective[next_gw] -= self.million_value / 10 * squad[player, next_gw] * gws_remaining\n\n prob.model += objective.sum()\n prob.solve(time_limit=time_limit, optimizer=optimizer, message=message)\n\n return Solution(lineup, bench_gk, bench_1, bench_2, bench_3, captain, vice_captain, objective, transfer_in,\n transfer_out, itb, projections, free_transfers, penalised_transfers, force_chips)", "def test_change_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.change_learner_group', self.learner_groups[1]))", "def match_constraints(self) -> Optional['outputs.MatchResourcesPatch']:\n return pulumi.get(self, \"match_constraints\")", "def placement_groups(self, placement_groups):\n\n self._placement_groups = placement_groups", "def matches_add_fe_prereqs(self, fe_matches):\n #*** This assumes that policy doesn't do dumb stuff like try\n #*** to match tcp and udp in same rule...\n #\n #*** TBD: expand this to deal with all prerequisites and write tests\n\n if 'tcp_src' in fe_matches or 'tcp_dst' in fe_matches:\n #*** Set ip protocol to TCP\n fe_matches['ip_proto'] = 6\n if 'udp_src' in fe_matches or 'udp_dst' in fe_matches:\n #*** Set ip protocol to UDP\n fe_matches['ip_proto'] = 17\n if 'ip_proto' in fe_matches:\n #*** Set eth_type to IP:\n fe_matches['eth_type'] = 2048\n return fe_matches", "def set_users_groups_allowed(self, users_allowed, groups_allowed):\n self.widget.user_choices = user_choices(users_allowed)\n self.fields[0].queryset = users_allowed\n self.widget.widgets[0].choices = self.widget.user_choices\n\n self.widget.group_choices = group_choices(groups_allowed)\n self.fields[1].queryset = groups_allowed\n self.widget.widgets[1].choices = self.widget.group_choices", "def test_change_learner_group_specific_for_coach_pt1(self):\n self.assertTrue(self.coach1.has_perm('auth.change_learner_group', self.learner_groups[0]))", "def test_list_role_assignment_using_sourced_groups(self):\n test_plan = {\n # The default domain with 3 users, 3 groups, 3 projects,\n # plus 3 roles.\n 'entities': {'domains': {'id': CONF.identity.default_domain_id,\n 'users': 3, 'groups': 3, 'projects': 3},\n 'roles': 3},\n # Users 0 & 1 are in the group 0, User 0 also in group 1\n 'group_memberships': [{'group': 0, 'users': [0, 1]},\n {'group': 1, 'users': [0]}],\n # Spread the assignments around - we want to be able to show that\n # if sourced by group, assignments from other sources are excluded\n 'assignments': [{'user': 0, 'role': 0, 'project': 0},\n {'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1},\n {'user': 2, 'role': 1, 'project': 1},\n {'group': 2, 'role': 2, 'project': 2}\n ],\n 'tests': [\n # List all effective assignments sourced from groups 0 and 1\n {'params': {'source_from_group_ids': [0, 1],\n 'effective': True},\n 'results': [{'group': 0, 'role': 1, 'project': 1},\n {'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n # Adding a role a filter should further restrict the entries\n {'params': {'source_from_group_ids': [0, 1], 'role': 2,\n 'effective': True},\n 'results': [{'group': 1, 'role': 2, 'project': 0},\n {'group': 1, 'role': 2, 'project': 1}\n ]},\n ]\n }\n self.execute_assignment_plan(test_plan)", "def test_add_learner_group_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm('auth.add_learner_group', self.classrooms[1]))", "def solveTaskGroupingAssignment(agent_capacity, task_cost, groups, assign_same_quantity_of_tasks=False):\n print(\"Agent capacities\", agent_capacity.values())\n agents = agent_capacity.keys()\n tasks = task_cost.keys()\n _groups = groups.keys()\n agentsxtasks = list(itertools.product(agent_capacity.keys(),\n task_cost.keys())) # Lista de pares resultante de hacer producto cartesiano entre agents y tasks\n tasks_en_groups = list(itertools.chain.from_iterable(groups.values()))\n agentsxtasks_in_groups = list(itertools.product(agent_capacity.keys(),\n tasks_en_groups)) # Lista de pares resultante de hacer producto cartesiano entre agents y tasks\n agentsxgroups = list(itertools.product(agent_capacity.keys(),\n groups.keys())) # Lista de pares resultante de hacer producto cartesiano entre agents y tasks\n prob = pulp.LpProblem(\"Task grouping assignment \", pulp.LpMinimize)\n assignment_vars = pulp.LpVariable.dicts(\"Assignment\", agentsxtasks, None, None, pulp.LpBinary)\n # Variables Auxes para ayudarse a resolver la desviacin estandard\n aux_vars = pulp.LpVariable.dicts(\"Aux\", agentsxtasks_in_groups, None, None)\n # Funcion objetivo\n\n assignment_agente_in_each_group = {} # (idagente, idgrupo): lpSum(tasks_del_grupo_idgrupo_al_agente_idagente\n\n # tasks asignadas al agente por grupo\n for agente in agents:\n for grupo in _groups:\n assignment_agente_in_each_group[(agente, grupo)] = pulp.lpSum(\n [assignment_vars[x] for x in agentsxtasks if x[0] == agente and x[1] in groups[grupo]])\n\n # Retorna la desviacion standard de las Assignmentes a un grupo determinado\n\n # print (assignment_agente_in_each_group[(1,0)])\n assignment_agent_in_each_group_average = {}\n for agente in agents:\n for grupo in _groups:\n assignment_agent_in_each_group_average[(agente, grupo)] = pulp.lpSum(\n assignment_agente_in_each_group[(agente, grupo)]) / float(len(groups[grupo]))\n assigned_tasks_to_agent_less_group_average = {}\n for agente in agents:\n for grupo in _groups:\n for task in groups[grupo]:\n assigned_tasks_to_agent_less_group_average[(agente, task)] = assignment_vars[(agente, task)] - \\\n assignment_agent_in_each_group_average[\n (agente, grupo)]\n\n def construir_desviacion_standard(agente, grupo):\n return pulp.lpSum([aux_vars[(agente, task)] for task in groups[grupo]]) / float((len(groups[grupo])))\n\n def construir_funcion_objetivo():\n return pulp.lpSum(\n [construir_desviacion_standard(agentexgrupo[0], agentexgrupo[1]) for agentexgrupo in agentsxgroups])\n\n # Restricciones\n assignments_by_agent = {}\n\n for agente in agents:\n assignments_by_agent[agente] = [task_cost[i[1]] * assignment_vars[i] for i in agentsxtasks if i[0] == agente]\n\n # La suma de las horas asignadas no puede superar el mximo de horas disponibles\n for agente in agents:\n prob += lpSum(assignments_by_agent[agente]) <= agent_capacity[agente]\n prob += construir_funcion_objetivo(), \"Minimizar desviacion estandard en la asignaciin de groups\"\n # Correspondencia valores absulutos y sus respectivas variables auxiliares\n for agente in agents:\n for task in tasks_en_groups:\n prob += assigned_tasks_to_agent_less_group_average[(agente, task)] <= aux_vars[(agente, task)]\n prob += -assigned_tasks_to_agent_less_group_average[(agente, task)] <= aux_vars[(agente, task)]\n\n # Una task solamente puede ser asignada a una persona:\n\n for task in tasks:\n prob += pulp.lpSum([assignment_vars[i] for i in agentsxtasks if i[1] == task]) == 1\n\n tiempo_solve_inicial = time()\n prob.solve()\n tiempo_final_solve = time()\n tiempo_solve = tiempo_final_solve - tiempo_solve_inicial\n\n # The status of the solution is printed to the screen\n print(\"Status:\", pulp.LpStatus[prob.status])\n\n for v in prob.variables():\n print(re.findall(r'\\d+', v.name))\n print(v.name, \"=\", v.varValue)\n print('El tiempo total de el solve fue:', tiempo_solve) # En segundos\n return prob.status, prob.variables()", "def _force_winners(self, matrix, won, least_represented):\n max_activation = float(\"-inf\")\n max_activation_neuron = -1\n\n output = self.compute(self.network, self.least_represented)\n\n # Loop over all of the output neurons. Consider any neurons that were\n # not the BMU (winner) for any pattern. Track which of these\n # non-winning neurons had the highest activation.\n for output_neuron in range(len(won)):\n # Only consider neurons that did not \"win\".\n if won[output_neuron] == 0:\n if (max_activation_neuron == -1) \\\n or (output[output_neuron] > max_activation):\n max_activation = output[output_neuron]\n max_activation_neuron = output_neuron\n\n # If a neurons was found that did not activate for any patterns, then\n # force it to \"win\" the least represented pattern.\n if max_activation_neuron != -1:\n self.copy_input_pattern(matrix, max_activation_neuron, least_represented)\n return True\n else:\n return False", "def _MatchGroup(\n self, device_group, group_requirements, extra_required_attr):\n logging.debug('Try to match %s against %s',\n group_requirements, device_group.name)\n matched_devices = []\n matched_device_serials = set()\n for run_target_requirement in group_requirements.run_targets:\n matched = False\n run_target_candidate = device_group.run_targets.get(\n run_target_requirement.name)\n if not run_target_candidate:\n logging.debug('No run target %s.', run_target_requirement.name)\n return None\n for device_candidate in run_target_candidate.devices.values():\n if device_candidate.device_serial in matched_device_serials:\n continue\n if self._MatchDeviceAttributes(\n run_target_requirement.device_attributes + extra_required_attr,\n device_candidate.attributes):\n matched_devices.append(device_candidate)\n matched_device_serials.add(device_candidate.device_serial)\n matched = True\n break\n if not matched:\n logging.debug('There is no match for %s.', run_target_requirement)\n return None\n logging.debug('%s matches requirement %s with %s.',\n device_group.name,\n group_requirements,\n [d.device_serial for d in matched_devices])\n return matched_devices", "def group_required(group_names):\n\ttry:\n\t\tuser = CrequestMiddleware.get_request().user\n\t\tif user.is_authenticated():\n\t\t\ttest = user.groups.filter(name=group_names).exists()\n\texcept (AttributeError):\n\t\ttest = False\n\n\n\treturn user_passes_test(test)", "def EnableGroupStatMatchCapability(self):\n\t\treturn self._get_attribute('enableGroupStatMatchCapability')", "def group_required(*group_names):\n\n def in_groups(current_user):\n if not settings.ENABLE_PERMISSIONS:\n return True\n if current_user.is_authenticated:\n if current_user.groups.filter(name__in=group_names).exists():\n return True\n return False\n\n return user_passes_test(in_groups)", "def match_constraints(self) -> Optional['outputs.MatchResources']:\n return pulumi.get(self, \"match_constraints\")", "def test_org_specific_restriction(self):\n self.given({\n \"rules\": {\n \"12\": \"allow\",\n },\n \"rules:trial\": {\n \"123\": \"allow\",\n },\n \"rules:org\": {\n \"1234\": \"restrict\"\n }\n })\n self.expect_for_trial((\n (\"123\", \"allow\"),\n (\"1230\", \"allow\"),\n (\"1234\", \"restrict\"),\n (\"12340\", \"restrict\"),\n ))", "def set_matching_rule(self, matching_rule):\n if not ((matching_rule == self.BPQ_MATCHING_RULE_EXACT) or\n (matching_rule == self.BPQ_MATCHING_RULE_TOKENS) or\n (matching_rule == self.BPQ_MATCHING_RULE_NEVER)):\n raise ValueError\n \n self.matching_rule = matching_rule\n return", "def score_group_conflicts(self):\n group_conflict_score = 0\n multiplier = 4\n \n for day_num in range(self.num_days):\n \n current_day = self.days[ day_num ]\n num_conflicts = 0\n \n for groups in current_day.values():\n for group in groups:\n if not group.available( day_num ):\n num_conflicts += 1\n \n group_conflict_score += multiplier * ( num_conflicts ** 2 )\n \n self.group_conflict_score = group_conflict_score\n return self.group_conflict_score", "def consistency_groups_some(self, consistency_groups_some):\n\n self._consistency_groups_some = consistency_groups_some", "def quality(self, rating_groups, weights=None):\n rating_groups, keys = self.validate_rating_groups(rating_groups)\n weights = self.validate_weights(weights, rating_groups, keys)\n flatten_ratings = sum(map(tuple, rating_groups), ())\n flatten_weights = sum(map(tuple, weights), ())\n length = len(flatten_ratings)\n # a vector of all of the skill means\n mean_matrix = Matrix([[r.mu] for r in flatten_ratings])\n # a matrix whose diagonal values are the variances (sigma ** 2) of each\n # of the players.\n def variance_matrix(height, width):\n variances = (r.sigma ** 2 for r in flatten_ratings)\n for x, variance in enumerate(variances):\n yield (x, x), variance\n variance_matrix = Matrix(variance_matrix, length, length)\n # the player-team assignment and comparison matrix\n def rotated_a_matrix(set_height, set_width):\n t = 0\n for r, (cur, next) in enumerate(zip(rating_groups[:-1],\n rating_groups[1:])):\n for x in range(t, t + len(cur)):\n yield (r, x), flatten_weights[x]\n t += 1\n x += 1\n for x in range(x, x + len(next)):\n yield (r, x), -flatten_weights[x]\n set_height(r + 1)\n set_width(x + 1)\n rotated_a_matrix = Matrix(rotated_a_matrix)\n a_matrix = rotated_a_matrix.transpose()\n # match quality further derivation\n _ata = (self.beta ** 2) * rotated_a_matrix * a_matrix\n _atsa = rotated_a_matrix * variance_matrix * a_matrix\n start = mean_matrix.transpose() * a_matrix\n middle = _ata + _atsa\n end = rotated_a_matrix * mean_matrix\n # make result\n e_arg = (-0.5 * start * middle.inverse() * end).determinant()\n s_arg = _ata.determinant() / middle.determinant()\n return math.exp(e_arg) * math.sqrt(s_arg)", "def __init_groups_for_customers(self, values):\n group_customer = self.env.ref('anytracker.group_customer').id\n group_partner = self.env.ref('anytracker.group_partner').id\n group_portal = self.env.ref('base.group_portal').id\n sel_groups = [v for v in values.items()\n if v[0].startswith('sel_groups_')]\n for group_id in (group_customer, group_partner):\n if any(['_' + str(group_id) in g[0]\n and g[1] and group_id == g[1]\n for g in sel_groups]):\n values = {k: v for k, v in values.items()\n if not k.startswith('sel_groups_')\n and not k.startswith('_in_group')}\n values['groups_id'] = [(6, 0, [group_id, group_portal])]\n return values", "def test_add_meeting_default_conflict_types(self):\n def _run_test(mtg):\n url = reverse('ietf.secr.meetings.views.add')\n r = self.client.get(url)\n q = PyQuery(r.content)\n selected_items = q('#id_group_conflict_types input[checked]')\n selected_values = [si.value for si in selected_items]\n expected_values = [cn.slug for cn in mtg.group_conflict_types.all()]\n self.assertCountEqual(selected_values, expected_values)\n\n self.client.login(username='secretary', password='secretary+password')\n\n meeting = MeetingFactory(type_id='ietf', group_conflicts=[]) # start with no conflicts selected\n _run_test(meeting)\n\n # enable one\n meeting.group_conflict_types.add(ConstraintName.objects.filter(is_group_conflict=True).first())\n self.assertEqual(meeting.group_conflict_types.count(), 1)\n _run_test(meeting)\n\n # enable a few ([::2] selects every other)\n meeting.group_conflict_types.clear()\n for cn in ConstraintName.objects.filter(is_group_conflict=True)[::2]:\n meeting.group_conflict_types.add(cn)\n self.assertGreater(meeting.group_conflict_types.count(), 1)\n _run_test(meeting)" ]
[ "0.7490272", "0.4813987", "0.47733462", "0.4771342", "0.46703315", "0.45196325", "0.44602185", "0.44554698", "0.44414607", "0.44335997", "0.44194436", "0.43835708", "0.43758637", "0.43642312", "0.43547672", "0.43463677", "0.4340733", "0.4329243", "0.43282712", "0.42960754", "0.42941138", "0.4261825", "0.4258959", "0.4245488", "0.42388994", "0.42289695", "0.42219064", "0.421827", "0.42167506", "0.42116493" ]
0.8618694
0
Gets the activity_category_rules of this ShiftTradeSettings. Rules that specify what to do with activity categories that are part of a shift defined in a trade
def activity_category_rules(self): return self._activity_category_rules
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activity_category_rules(self, activity_category_rules):\n \n self._activity_category_rules = activity_category_rules", "def get_rules_for_category(category):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE category_id = ?', (category,)).fetchall()\n\n return rules", "def get_rules_for_action(self, action_type: ActionType) -> List[\"Rule\"]:\n return [rule for rule in self.rules if rule.action_type == action_type]", "def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules", "def rules(self) -> pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]:\n return pulumi.get(self, \"rules\")", "def rules(self):\n return tuple(e for e in self.entries if e.is_rule)", "def security_group_rules(self):\n return int(self.get('security_group_rules'))", "def relevant_rules(\n self,\n categories: set[\"HierarchicalCategory\"],\n source_categorization: typing.Optional[\"Categorization\"] = None,\n simple_sums_only: bool = False,\n ) -> list[ConversionRule]:\n relevant_rules: list[ConversionRule] = []\n if not categories:\n return relevant_rules\n\n if source_categorization is None:\n source_categorization = next(iter(categories)).categorization\n\n for rule in self.rules:\n if source_categorization == self.categorization_a:\n fc = rule.factors_categories_a\n else:\n fc = rule.factors_categories_b\n\n if simple_sums_only:\n rule_source_categories = {\n cat for cat, factor in fc.items() if factor == 1\n }\n else:\n rule_source_categories = {cat for cat, factor in fc.items()}\n\n if categories.intersection(rule_source_categories):\n relevant_rules.append(rule)\n\n return relevant_rules", "def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> pulumi.Output[Sequence['outputs.BucketLifecycleConfigurationV2Rule']]:\n return pulumi.get(self, \"rules\")", "def lifecycle_rules(self) -> typing.Optional[typing.List[\"LifecycleRule\"]]:\n return self._values.get('lifecycle_rules')", "def get_rules(cls):\n raise NotImplementedError()", "def rules(self):\n return self._alert_rules_client", "def effective_rules(self) -> pulumi.Output[Sequence[Any]]:\n return pulumi.get(self, \"effective_rules\")", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules", "def getListOfRules(self):\n return self.model.getListOfRules()", "def get_categories(self) -> tuple:\n return self.categories", "def rules(self) -> FrozenOrderedSet[Union[Callable, Rule]]:\n return self._rules", "def categories(self):\n game_categories = self.game_categories.all()\n return [ gc.category for gc in game_categories ]", "def category(self):\n\n for category, match_list in rule_list:\n for match in match_list:\n if match.match(self):\n return category\n\n return None", "def rule(model, s, k):\n if self.category_days[s, k] <= 0:\n return Constraint.Feasible\n return self.category_days[s, k], model.S_cat[s, k], None", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def get_rules(self, **params):\n return self._make_request(\n \"GET\", f\"/2/tweets/search/stream/rules\", params=params,\n endpoint_parameters=(\"ids\",), data_type=StreamRule\n )", "def _constraints_category_days(self):\n\n def rule(model, s, k):\n \"\"\"\n Ensure that a task of a category is assigned on each day as desired.\n\n More precisely:\n S_cat[s,k] >= cat_days[s, k]\n \"\"\"\n if self.category_days[s, k] <= 0:\n return Constraint.Feasible\n return self.category_days[s, k], model.S_cat[s, k], None\n\n self.model.constrain_cat_days2 = Constraint(self.model.dayslots,\n self.model.categories, rule=rule)\n\n def rule(model, k):\n \"\"\"\n Lower bound on number of distinct days in which a (task from a)\n category is assigned.\n\n More precisely:\n sum_s S_cat[s,k] = S_cat_total[k] >= cat_days[k]\n \"\"\"\n if self.category_days_total[k] <= 0:\n return Constraint.Feasible\n return self.category_days_total[k], model.S_cat_total[k], None\n\n self.model.constrain_cat_days3 = Constraint(self.model.categories,\n rule=rule)", "def getCategories(self):\n logger.debug(\"Func: getCategories\")\n\n return self._categories", "def rules(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetRule']:\n return pulumi.get(self, \"rules\")", "def rules(cls):\n rules_CityscapesValConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesValConfig" ]
[ "0.65764445", "0.56854343", "0.5613786", "0.5561689", "0.55392265", "0.55276525", "0.55239964", "0.5513118", "0.54814136", "0.5451201", "0.53944457", "0.53867567", "0.53611493", "0.53129584", "0.52809757", "0.5203807", "0.51137215", "0.5097135", "0.50868076", "0.50547934", "0.49916765", "0.49410897", "0.49141163", "0.4909092", "0.4909092", "0.48993194", "0.48627824", "0.483288", "0.48093423", "0.47919756" ]
0.81497526
0
Sets the activity_category_rules of this ShiftTradeSettings. Rules that specify what to do with activity categories that are part of a shift defined in a trade
def activity_category_rules(self, activity_category_rules): self._activity_category_rules = activity_category_rules
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activity_category_rules(self):\n return self._activity_category_rules", "def set_device_rules(self, rules, rule_objs):\n self.logger.debug(\"set_device_rules: rules: {}\".format(rules))\n self._load_device_rules(rules, rule_objs=rule_objs)\n self._determine_cli_command_list()\n self._determine_get_method_list()", "def set_category(self, category):\n\n\t\tif category is not None and not isinstance(category, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: category EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__category = category\n\t\tself.__key_modified['category'] = 1", "def set_rules(rules, overwrite=True, use_conf=False):\n\n init(use_conf=False)\n _ENFORCER.set_rules(rules, overwrite, use_conf)", "async def set_rules(self, ctx: discord.ext.commands.context.Context, *, rules: str):\n guild_info = server_setup.get_guild_info(ctx.guild)\n\n if guild_info[\"rulesChannelID\"] is not None:\n rules_channel = server_setup.get_channel(guild=ctx.guild, channel_id=guild_info[\"rulesChannelID\"])\n embed = await format_rules(rules=rules, title=\"Rules\",\n description=\"You must follow these rules at all times\")\n\n if guild_info[\"rulesMessageID\"] is not None:\n message = await rules_channel.fetch_message(guild_info[\"rulesMessageID\"])\n\n await message.edit(embed=embed)\n\n else:\n message = await rules_channel.send(embed=embed)\n guild_info[\"rulesMessageID\"] = message.id\n\n server_setup.update_guild(guild_info=guild_info)\n\n guild_info[\"rules\"] = rules\n server_setup.update_guild(guild_info=guild_info)\n\n else:\n await ctx.send(\"You must create a rules channel before you may set the rules message.\")\n\n print(\"Rules have been updated.\")", "def set_rules(rules, overwrite=True, use_conf=False): # pragma: no cover\n init(use_conf=False)\n _ENFORCER.set_rules(rules, overwrite, use_conf)", "def rule(self, rules):\n\n if not isinstance(rules, list):\n rules = [rules]\n\n for rule in rules:\n self.__addRule(rule)", "def categories(self, categories):\n\n self._categories = categories", "def categories(self, categories):\n\n self._categories = categories", "def categories(self, categories):\n\n self._categories = categories", "def categories(self, categories):\n\n self._categories = categories", "def categories(self, categories):\n self._categories = categories", "def _constraints_category_days(self):\n\n def rule(model, s, k):\n \"\"\"\n Ensure that a task of a category is assigned on each day as desired.\n\n More precisely:\n S_cat[s,k] >= cat_days[s, k]\n \"\"\"\n if self.category_days[s, k] <= 0:\n return Constraint.Feasible\n return self.category_days[s, k], model.S_cat[s, k], None\n\n self.model.constrain_cat_days2 = Constraint(self.model.dayslots,\n self.model.categories, rule=rule)\n\n def rule(model, k):\n \"\"\"\n Lower bound on number of distinct days in which a (task from a)\n category is assigned.\n\n More precisely:\n sum_s S_cat[s,k] = S_cat_total[k] >= cat_days[k]\n \"\"\"\n if self.category_days_total[k] <= 0:\n return Constraint.Feasible\n return self.category_days_total[k], model.S_cat_total[k], None\n\n self.model.constrain_cat_days3 = Constraint(self.model.categories,\n rule=rule)", "def category(self, category):\n allowed_values = [\"Trace\", \"Verbose\", \"Info\", \"Wait\", \"Highlight\", \"Gap\", \"Alert\", \"Warning\", \"Error\", \"Fatal\", \"Planned\", \"Updated\", \"Finished\", \"Abandoned\"] # noqa: E501\n if category not in allowed_values:\n raise ValueError(\n \"Invalid value for `category` ({0}), must be one of {1}\" # noqa: E501\n .format(category, allowed_values)\n )\n\n self._category = category", "def early_stopping_rules(self, early_stopping_rules):\n\n self._early_stopping_rules = early_stopping_rules", "def add_gamemode_rules(self, rules):\n self.remainingBalls = rules['remainingballs']", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def category(self, category):\n\n self._category = category", "def activities(self, activities):\n\n self._activities = activities", "def relevant_rules(\n self,\n categories: set[\"HierarchicalCategory\"],\n source_categorization: typing.Optional[\"Categorization\"] = None,\n simple_sums_only: bool = False,\n ) -> list[ConversionRule]:\n relevant_rules: list[ConversionRule] = []\n if not categories:\n return relevant_rules\n\n if source_categorization is None:\n source_categorization = next(iter(categories)).categorization\n\n for rule in self.rules:\n if source_categorization == self.categorization_a:\n fc = rule.factors_categories_a\n else:\n fc = rule.factors_categories_b\n\n if simple_sums_only:\n rule_source_categories = {\n cat for cat, factor in fc.items() if factor == 1\n }\n else:\n rule_source_categories = {cat for cat, factor in fc.items()}\n\n if categories.intersection(rule_source_categories):\n relevant_rules.append(rule)\n\n return relevant_rules", "def categories(self, categories: List[str]):\n\n self._categories = categories", "def update_categories(self):\n categories = {}\n datasets = self.data['dataset']\n used_categories = self._get_list_categories_used(datasets)\n for category in used_categories:\n categories.update({\n category: self._get_datasets_tasks_by_category(datasets, category)\n })\n self.data[\"category\"] = categories", "def activities(self, activities):\n \n self._activities = activities", "def _constraints_category_duration(self):\n\n def rule(model, k):\n if self.category_min[k] <= 0 and self.category_max[k] >= NUMSLOTS:\n return Constraint.Feasible\n elif self.category_min[k] <= 0:\n return None, model.C_total[k], self.category_max[k]\n elif self.category_max[k] >= NUMSLOTS:\n return self.category_min[k], model.C_total[k], None\n return self.category_min[k], model.C_total[k], self.category_max[k]\n\n self.model.constrain_cat_duration1 = Constraint(self.model.categories,\n rule=rule)", "def Categories(self, new_categories):\r\n if not isinstance(new_categories, ListType):\r\n raise TypeError(\"The supplied categories must be a list of \"\r\n \"strings.\")\r\n for new_cat in new_categories:\r\n if not isinstance(new_cat, str):\r\n raise TypeError(\"Invalid category: not of type 'string'\")\r\n elif new_cat not in self._metadata_map.CategoryNames:\r\n raise ValueError(\"The category '%s' is not in the mapping \"\r\n \"file.\" % new_cat)\r\n\r\n if not self._suppress_numeric_category_check:\r\n if not self._metadata_map.isNumericCategory(new_cat):\r\n raise TypeError(\"The category '%s' is not numeric. Not \"\r\n \"all values could be converted to numbers.\"\r\n % new_cat)\r\n\r\n if not self._suppress_category_uniqueness_check:\r\n if self._metadata_map.hasUniqueCategoryValues(new_cat):\r\n raise ValueError(\"All values in category '%s' are unique. \"\r\n \"This statistical method cannot operate \"\r\n \"on a category with unique values (e.g. \"\r\n \"there are no 'within' distances because \"\r\n \"each group of samples contains only a \"\r\n \"single sample).\" % new_cat)\r\n\r\n if not self._suppress_single_category_value_check:\r\n if self._metadata_map.hasSingleCategoryValue(new_cat):\r\n raise ValueError(\"All values in category '%s' are the \"\r\n \"same. This statistical method cannot \"\r\n \"operate on a category that creates only \"\r\n \"a single group of samples (e.g. there \"\r\n \"are no 'between' distances because \"\r\n \"there is only a single group).\"\r\n % new_cat)\r\n\r\n self._categories = new_categories", "def category(self, category: str):\n\n self._category = category", "def add_rules(self, rules: List[Rule]):\n self.rules.extend(rules)" ]
[ "0.5965316", "0.49182183", "0.47771955", "0.4753459", "0.47052824", "0.46763086", "0.45752648", "0.4574007", "0.4574007", "0.4574007", "0.4574007", "0.4553582", "0.4497546", "0.44832736", "0.4482188", "0.4435759", "0.43805346", "0.43805346", "0.43805346", "0.43805346", "0.43805346", "0.43757483", "0.43605545", "0.43498364", "0.4346029", "0.432411", "0.43099663", "0.43031073", "0.42992792", "0.4283328" ]
0.8358281
0
_createSourceName_ dccp takes a local path, so all we have to do is return the pfn asis
def createSourceName(self, protocol, pfn): return pfn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getPFCName(path, inputpoolfcstring):\n\n _tmp_fcname = inputpoolfcstring.split(':')[-1]\n if os.path.isabs(_tmp_fcname):\n pfc_name = _tmp_fcname\n else:\n pfc_name = os.path.join(os.path.abspath(path), _tmp_fcname)\n\n return pfc_name", "def name_from_dist(dist_func):\n return str(dist_func).split()[0].split('.')[-1][:-4]", "def get_data_file(source_file_name):\n # type: (str) -> (str)\n path = list(os.path.split(source_file_name))\n path[-1] = path[-1].split('_')[0] + '.K'\n return os.path.join(*path)", "def source_file_name_feature(self):\n return \"_\".join((C_FILE_NAME, self.file_image_name.value))", "def get_title(src_name, src_type=None):\n if src_type == 'tcp':\n return '{0}:{1}'.format(*src_name)\n return os.path.basename(src_name)", "def get_source_name(self, source_id: str) -> str:\n if not self._source_list_map:\n return \"\"\n if source_id.upper() == DIGITAL_TV.upper():\n source_id = \"dtv\"\n for map_value in self._source_list_map:\n map_id = map_value.get(\"id\")\n if map_id and map_id == source_id:\n return map_value.get(\"name\", \"\")\n return \"\"", "def createExternalFunction(self, extSpaceAddr: ghidra.program.model.address.Address, name: unicode, nameSpace: ghidra.program.model.symbol.Namespace, extData3: unicode, source: ghidra.program.model.symbol.SourceType) -> ghidra.program.model.listing.Function:\n ...", "def __GetLibFileName(cls, src, name):\n bin_path = FileUtils.GetBinPathForFile(src)\n return os.path.join(os.path.dirname(bin_path), '_%s.so' % name)", "def cpsym(src,dest):\n \n src = os.path.normpath(src)\n dest = os.path.normpath(dest)\n \n if not os.path.exists(src):\n return\n \n for dirpath,dirnames,filenames in os.walk(src):\n rel_dirpath = os.path.relpath(dirpath,src)\n dest_dirpath = os.path.join(dest,rel_dirpath)\n mkdir(dest_dirpath,isfull=True)\n \n for filename in filenames:\n src_filename = os.path.join(dirpath,filename)\n rel_filename = os.path.relpath(src_filename,src)\n \n dest_filename = os.path.join(dest,rel_filename)\n try:\n os.symlink(src_filename,dest_filename)\n except OSError:\n pass", "def friendly_source_name(name):\n known_names = dict(\n blastprodom=\"BlastProDom\",\n fprintscan=\"FPrintScan\",\n gene3d=\"Gene3D\",\n hamap=\"HAMAP\",\n hmmpir=\"HMMPIR\",\n hmmpanther=\"HMMPanther\",\n hmmpfam=\"HMMPfam\",\n hmmsmart=\"HMMSmart\",\n hmmtigr=\"HMMTIGR\",\n patternscan=\"PatternScan\",\n profilescan=\"ProfileScan\",\n superfamily=\"SUPERFAMILY\"\n )\n return known_names.get(name.lower(), name)", "def input_name_from_func_name(func_name):\n\treturn os.path.join(INPUTS_DIR, ''.join(func_name.split('make_')[1:])) \\\n\t\t\t+ '.%s' % EXTENSION", "def src_get_name(converter_type):\n return ffi.string(_lib.src_get_name(converter_type)).decode()", "def fs_generate_entry_name(self, sDirPath, sFilenamePrefix = '', sFilenameSuffix = '', sIndexDelimiter = ''):\n\t\treturn Job(SDK.PrlSrv_FsGenerateEntryName(self.handle, sDirPath, sFilenamePrefix, sFilenameSuffix, sIndexDelimiter)[0])", "def create_final_name(fname, date, fc_id, sample_name):\n \n # Split the file name according to CASAVA convention\n m = re.match(r'(\\S+?)_(?:[ACGTN\\-]+|NoIndex|Undetermined)_L0*(\\d+)_R(\\d)_\\d+\\.fastq(.*)', fname)\n if m is not None:\n lane = m.group(2)\n read = m.group(3)\n ext = m.group(4)\n else:\n # Split the file name according to bcbb convention\n m = re.match(r'(\\d+)_(\\d+)_([^_]+)_(\\d+)_(?:nophix_)?(\\d+)_fastq.txt(.*)', fname)\n if m is None:\n raise ValueError(\"Could not parse file name {:s} correctly!\".format(fname))\n lane = m.group(1)\n read = m.group(5)\n ext = m.group(6)\n \n dest_file_name = \"{:s}.fastq{:s}\".format(\"_\".join([lane,\n date,\n fc_id,\n sample_name,\n read]),\n ext.replace('..','.'))\n return dest_file_name", "def _generate_test_name(source):\n out = source.replace(' ', '_').replace(':', '').replace(',', '').lower()\n return \"test_%s\" % out", "def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))", "def first_source_part(cadsource):\n split_source = cadsource.split(\":\")\n if len(split_source) > 1:\n cadsource = split_source[0]\n return cadsource", "def newCalFileName(self, type, runBegin, runEnd='end'):\n \n path=os.path.join(self.cdir)\n if not os.path.exists(path): \n os.mkdir(path)\n path=os.path.join(self.cdir,self.calibgroup)\n if not os.path.exists(path): \n os.mkdir(path)\n path=os.path.join(self.cdir,self.calibgroup,self.src)\n if not os.path.exists(path): \n os.mkdir(path)\n path=os.path.join(self.cdir,self.calibgroup,self.src,type)\n if not os.path.exists(path): \n os.mkdir(path)\n return path+'/'+str(runBegin)+'-'+str(runEnd)+'.data'", "def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'", "def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))", "def relative_name(media_fullname, sourcedir):\n x = os.path.relpath(media_fullname, sourcedir)\n x = x.replace('\\\\', '_').replace('/', '_').replace('#', '_')\n return x", "def get_data_filename(relative_path): #TODO put in utils\n\n import os\n from pkg_resources import resource_filename\n fn = resource_filename('mdfptools', os.path.join('data', relative_path))\n\n if not os.path.exists(fn):\n raise ValueError(\"Sorry! %s does not exist. If you just added it, you'll have to re-install\" % fn)\n\n return fn", "def name2ncc_path(name: str, src_dir: Path, extension: str):\n path = src_dir / f\"{name}{extension}\"\n if path.is_file():\n return path\n\n # Some of the benchmark sources are dataset dependent. This is reflected by\n # the dataset name being concatenated to the path.\n name_components = name.split(\"-\")\n\n new_name = \"-\".join(name_components[:-1])\n path = src_dir / f\"{new_name}{extension}\"\n if path.is_file():\n return path\n\n new_name = \"-\".join(name_components[:-1]) + \"_\" + name_components[-1]\n path = src_dir / f\"{new_name}{extension}\"\n if path.is_file():\n return path\n\n raise FileNotFoundError(f\"No OpenCL source found for {name}\")", "def _resolveSourcePath(self, sources, source):\n source = copy.deepcopy(source)\n if source['path'] != '__none__':\n sourcePath = Path(source['path'])\n source['path'] = self._basePath / sourcePath\n if not source['path'].is_file():\n altpath = self._basePath.parent / sourcePath / sourcePath.name\n if altpath.is_file():\n source['path'] = altpath\n if not source['path'].is_file():\n raise TileSourceFileNotFoundError(str(source['path']))\n sources.append(source)", "def _get_cfn_template_file_name(self, cfn_template_path: str) -> str:\n base_name = os.path.basename(cfn_template_path)\n (file_name, ext) = os.path.splitext(base_name)\n return file_name", "def createname(cls):\n name = config.get(\"pyzombie_filesystem\", \"execbase\")\n name = \"{0}_{1}\".format(name, datetime.utcnow().strftime(\"%Y%jT%H%M%SZ\"))\n if os.path.isdir(Executable.execdirpath(name)):\n #Need to handle the rare case of duplicate resource names---this\n #will happen all the time in testing, but rarely in production.\n index = 0\n altname = \"{0}_{1:03}\".format(name, index)\n while os.path.isdir(Executable.execdirpath(altname)):\n index = index + 1\n altname = \"{0}_{1:03}\".format(name, index)\n name = altname\n return name", "def _CreateSanitizedDestination(\n self, source_file_entry, source_path_spec, destination_path):\n file_system = source_file_entry.GetFileSystem()\n path = getattr(source_path_spec, u'location', None)\n path_segments = file_system.SplitPath(path)\n\n # Sanitize each path segment.\n for index, path_segment in enumerate(path_segments):\n path_segments[index] = u''.join([\n character if character not in self._DIRTY_CHARACTERS else u'_'\n for character in path_segment])\n\n return (\n os.path.join(destination_path, *path_segments[:-1]), path_segments[-1])", "def pfn_to_lfn(fn):\n return fn[fn.find(\"/store\"):]", "def create_internal_dns_name ( base_name, name ) :\n name = name + '.internal'\n return create_dns_name( base_name, name )", "def extract_file_name_from_source_full_path(source_full_path):\n destination_file_name = os.path.basename(source_full_path)\n return destination_file_name" ]
[ "0.5489577", "0.5397066", "0.53578424", "0.5325086", "0.52370214", "0.5161669", "0.5159798", "0.5158864", "0.5138597", "0.5134293", "0.51239634", "0.5101735", "0.5097504", "0.506354", "0.50249326", "0.49959907", "0.49909014", "0.49875447", "0.49866307", "0.4985442", "0.4982325", "0.4960299", "0.49530408", "0.49468508", "0.49461928", "0.49440432", "0.4937486", "0.49275562", "0.49114314", "0.4877829" ]
0.7818806
0
_createStageOutCommand_ Build a dccp command with a pnfs mkdir to generate the directory
def createStageOutCommand(self, sourcePFN, targetPFN, options = None, checksums = None): try: import dcap except ImportError as ie: raise StageOutError("Python dCap wrappers not found on this host.") optionsStr = "" if options != None: optionsStr = str(options) result = "#!/bin/sh\n" result += "dc_stageout %s %s %s" % ( optionsStr, sourcePFN, targetPFN) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mkdirout():\n #pdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n #outdir = os.path.join(OUTPUT_DIR, pdbid(),\"\") # OUTPUT DIRECTORY WHERE OUTPUT FILES WILL GO\n\n if os.path.exists(output_dir()):\n sys.exit(\"ERROR. Unable to create output directory. %s already exists. Please, make sure you choose an output path not containing former results.\" % output_dir() ) # LOGGING?\n else:\n try:\n os.mkdir(output_dir())\n except OSError:\n sys.exit(\"ERROR. Unable to create output directory %s.\" % output_dir() )\n os.mkdir(output_tmpdir())\n os.mkdir(output_tmpdir(\"pisacov\"))\n os.mkdir(output_tmpdir(\"pisa\"))\n os.mkdir(output_tmpdir(\"deepmetapsicov\"))", "def prepareCommand(self, client):\n # No command, just create the dirs\n return ''", "def db2gbk_mkdir(self, path, p_list, update):\n if update is True:\n path = where.dir_archive(path, p_list)\n else:\n path = where.dir_make(path, p_list)\n return path", "def emit(self) -> str:\n out: str = self.dst.replace(' ', '\\\\ ')\n out += (\n ' : '\n + ' '.join(s for s in self.src)\n + (\n ('\\n\\t@mkdir -p \"' + os.path.dirname(self.dst) + '\"')\n if self.mkdir\n else ''\n )\n + '\\n\\t@'\n + self.cmd\n + '\\n'\n )\n return out", "def make_output_folders():\n call([\"mkdir\", \"-p\", args.out_folder.strip()])\n call([\"mkdir\", args.out_folder.strip() + \"/files\"])\n call([\"mkdir\", args.out_folder.strip() + \"/fasta\"])", "def create(self, basedir, outdir, name, prefix=None):", "def createCfg_prep_dcard(self, jobOptions):\n category_output = self.channel\n if jobOptions['label']:\n category_output += \"_%s\" % jobOptions['label']\n lines = []\n lines.append(\"process.fwliteInput.fileNames = cms.vstring('%s')\" % jobOptions['inputFile'])\n lines.append(\"process.fwliteOutput.fileName = cms.string('%s')\" % jobOptions['datacardFile'])\n lines.append(\"process.prepareDatacards.processesToCopy = cms.vstring(%s)\" % self.prep_dcard_processesToCopy)\n lines.append(\"process.prepareDatacards.signals = cms.vstring(%s)\" % self.prep_dcard_signals)\n lines.append(\"process.prepareDatacards.makeSubDir = cms.bool(True)\")\n lines.append(\"process.prepareDatacards.categories = cms.VPSet(\")\n for charge in [\"OS\", \"SS\"]:\n for ptEtaBin in [\n \"BB_LL\", \"BB_ML\", \"BB_MM\", \"BB_HL\", \"BB_HM\", \"BB_HH\",\n \"EE_LL\", \"EE_ML\", \"EE_MM\", \"EE_HL\", \"EE_HM\", \"EE_HH\",\n \"BE_LL\", \"BE_ML\", \"EB_ML\",\"BE_MM\", \"BE_HL\", \"EB_HL\",\n \"BE_HM\", \"EB_HM\", \"BE_HH\", \"total\",\n ]:\n lines.append(\" cms.PSet(\")\n lines.append(\" input = cms.string('%s/%s'),\" % (charge, ptEtaBin))\n lines.append(\" output = cms.string('ttH_%s_%s_%s')\" % (self.channel, charge, ptEtaBin))\n lines.append(\" ),\")\n lines.append(\")\")\n lines.append(\"process.prepareDatacards.histogramToFit = cms.string('%s')\" % jobOptions['histogramToFit'])\n lines.append(\"process.prepareDatacards.sysShifts = cms.vstring(%s)\" % systematics.muon_E)\n create_cfg(self.cfgFile_prep_dcard, jobOptions['cfgFile_modified'], lines)", "def cmd_mkd(args):", "def mkdir_rep_vhosts_vm(self):\n print \"Creation du repertoire /vhosts/%s\" % name_vm_dest\n self.rep_vhosts_vm = \"/vhosts/\"+ name_vm_dest +\"\"\n self.exec_cmd(\"mkdir -p %s\" % self.rep_vhosts_vm)", "def createDirectory(self, summary_handle,directory,mode,role =\"\",summary_var_dict={}):\n if role:\n directory = directory + \"/\" + role\n \n tmp_var = \"mkdir -p %s%s%s\" %(directory,self,role)\n if mode == \"RECOVERY\":\n flag = self.check_var_in_dict(tmp_var,summary_var_dict)\n if flag == \"true\":\n return\n\n self.pushMode(CLI_MODES.shell)\n if role:\n self.removePath(directory)\n\n logger.info (\"Directory is %s\" %directory)\n output = self.sendCmd(\"mkdir -p %s\" % directory)\n status = self.command_execution_status()\n if status == \"true\":\n summary_handle.write(\"mkdir -p %s,%s,%s,pass \\n\" %(directory,self,role))\n else:\n summary_handle.write(\"mkdir -p %s,%s,%s,fail \\n\" %(directory,self,role)) \n\n self.popMode()\n return output", "def fs_mkdir(self, dirname: str) -> None:\n self.exec_(\"import uos\\nuos.mkdir('%s')\" % dirname)", "def mkdir(session, ds_path, dc_ref):\n LOG.debug(\"Creating directory with path %s\", ds_path)\n session._call_method(session._get_vim(), \"MakeDirectory\",\n session._get_vim().get_service_content().fileManager,\n name=ds_path, datacenter=dc_ref,\n createParentDirectories=True)\n LOG.debug(\"Created directory with path %s\", ds_path)", "def _make_output_dir(self):\n out_dir = os.path.dirname(self._out_format)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n LOG.info('Created output directory: %s', out_dir)", "def _make_output_directory(self):\n fs = self._filesystem\n output_filename = fs.join(self._root_output_dir, self._test_name)\n fs.maybe_make_directory(fs.dirname(output_filename))", "def prepare_supplemental_output_directory():\n output_dir = workspace_path('%s/%s' % (scenario_filename(), \"Supplemental Output Files\")) # this does not have the .db suffix\n output_args = ['--output-dir', output_dir] # to be returned and passed to adsm_simulation.exe\n if not os.path.exists(output_dir):\n os.makedirs(output_dir, exist_ok=True)\n return output_args", "def create_out_dir_name(params):\n\n current_timestamp = timestamp()\n out_dir = os.path.join('out', current_timestamp)\n return out_dir", "def svn_client_mkdir(svn_client_commit_info_t_commit_info_p, apr_array_header_t_paths, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def setup(self, newdir=None):\n if not os.path.exists(self.output_path):\n os.makedirs(self.output_path)\n if newdir:\n _new = os.path.join(self.output_path, newdir)\n if not os.path.exists(_new):\n os.makedirs(_new)", "def create_output_dir(self):\n if self.output_dir is None:\n new_path = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n self.output_dir = os.path.expanduser(os.path.join(self.input_dir, new_path))\n try:\n os.makedirs(self.output_dir)\n except OSError:\n pass", "def __build_cmd(self, infname, outdir):\n self._outdirname = os.path.join(outdir, \"trimmomatic_output\")\n cmd = [\"trimmomatic\",\n infname,\n \"-o\", self._outdirname]\n self._cmd = ' '.join(cmd)", "def _create_target_directories(self):\n if os.path.exists(self.PREPROCESSED_DATA_OUT_DIR):\n if self._hparams.over_write:\n print_info(\"Deleting data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n shutil.rmtree(self.PREPROCESSED_DATA_OUT_DIR)\n print_info(\"Recreating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)\n else:\n print_info(\"Skipping preprocessing step, since the data might already be available\")\n else:\n print_info(\"Creating data folder: {}\".format(self.PREPROCESSED_DATA_OUT_DIR))\n os.makedirs(self.PREPROCESSED_DATA_OUT_DIR)", "def prepare(self, dst, options):\n self.checkExisting(dst)\n self.makedirs(dst.parent())", "def make_output_dirs_for_part2(parent_dir):\n\n if not os.path.exists(parent_dir + 'Modeling/'):\n os.makedirs(parent_dir + 'Modeling/')\n if not os.path.exists(parent_dir + 'Modeling/cleaned_template_fastas/'):\n os.makedirs(parent_dir + 'Modeling/cleaned_template_fastas/')\n if not os.path.exists(parent_dir + 'Modeling/cleaned_template_pdbs/'):\n os.makedirs(parent_dir + 'Modeling/cleaned_template_pdbs/')\n if not os.path.exists(parent_dir + 'Modeling/fasta_alns_and_identities/'):\n os.makedirs(parent_dir + 'Modeling/fasta_alns_and_identities/')\n if not os.path.exists(parent_dir + 'Modeling/grishin_alns/'):\n os.makedirs(parent_dir + 'Modeling/grishin_alns/')\n if not os.path.exists(parent_dir + 'Modeling/threaded_pdbs/'):\n os.makedirs(parent_dir + 'Modeling/threaded_pdbs/')\n if not os.path.exists(parent_dir + 'Modeling/final_models/'):\n os.makedirs(parent_dir + 'Modeling/final_models/')", "def _create_data_directory(self):\n self.src_data_dir.mkdir(exist_ok=True, parents=True)", "def mkpsf(expnum, ccd):\n\n ## get image from the vospace storage area\n filename = storage.get_image(expnum, ccd, version='p')\n logging.info(\"Running mkpsf on %s %d\" % (expnum, ccd))\n ## launch the makepsf script\n util.exec_prog(['jmpmakepsf.csh',\n './',\n filename,\n 'no'])\n\n ## place the results into VOSpace\n basename = os.path.splitext(filename)[0]\n\n ## confirm destination directory exists.\n destdir = os.path.dirname(\n storage.dbimages_uri(expnum, ccd, version='p',ext='fits'))\n logging.info(\"Checking that destination direcoties exist\")\n storage.mkdir(destdir)\n\n\n for ext in ('mopheader', 'psf.fits',\n 'zeropoint.used', 'apcor', 'fwhm', 'phot'):\n dest = storage.dbimages_uri(expnum, ccd, version='p', ext=ext)\n source = basename + \".\" + ext\n storage.copy(source, dest)\n\n return", "def prepDir(path=None):\n if path:\n if os.path.exists(path):\n return path\n else:\n os.makedirs(path)\n else:\n # Do something innocent when no path is provided\n path = tempfile.mkdtemp(prefix='XEPs_')\n print \"creating {} for output\".format(path)\n return path", "def cmd_mkdir(self, msg_dict):\r\n dir = msg_dict[\"second_parameter\"]\r\n # dir_path = \"/home/%s/%s\" % (msg_dict[\"username\"],dir)\r\n dir_path = msg_dict[\"current_directory\"] + '/' + msg_dict[\"second_parameter\"]\r\n os.system(\"mkdir %s\" % dir_path)\r\n self.request.send(msg_dict[\"current_directory\"].encode())", "def make_dir_structure(self, out):\n program_folder = os.path.join(out, self.out)\n self.make_output_dir(program_folder)\n self.make_config_dirs(program_folder)\n return None", "def create(dir_name, options):\n return exec_fn(lambda: _create(dir_name, options))", "def make_case_dir(self):\n\n mkdir(directory=self.dir)\n mkdir(directory=self.sub_dir)" ]
[ "0.6048934", "0.5826364", "0.5755246", "0.5728969", "0.5728202", "0.56315583", "0.56039834", "0.55505246", "0.5518401", "0.54481965", "0.5429", "0.5413144", "0.5412861", "0.5367991", "0.53583246", "0.5355196", "0.53349096", "0.53324974", "0.53165174", "0.5314745", "0.53089845", "0.53006315", "0.52938455", "0.5265179", "0.5260634", "0.52500063", "0.5243057", "0.52375853", "0.5235513", "0.5234772" ]
0.68755096
0
_removeFile_ CleanUp pfn provided
def removeFile(self, pfnToRemove): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanUp(self, f):\n os.system('rm ' + f)", "def cleanup(fname):\n if os.path.isfile(fname):\n try:\n os.remove(fname)\n print \"Cleaned up\", fname\n except OSError:\n print \"Failed to clean up\", fname", "def remove_file(self, path):\n pass", "def cleanup_file(path_to_file):\n print \"Removing generated file: %s\" % path_to_file\n os.remove(path_to_file)", "def clean_up(user, fname, tango_output):\n time.sleep(1)\n run(['rm', fname])\n time.sleep(1)\n path = tango_output + user + '.out'\n run(['rm', path])", "def _removeFile(self, filename):\n try:\n #delete the output file\n os.remove(filename)\n except:\n #print (\"Failed to remove the file: \" + filename)\n pass", "def export_removeFile( self, lfns ):\n res = database.removeFile( lfns )\n return self._parseRes( res )", "def _remove_unique_file(self):\n if self._uniquefile_created:\n self._unlink(self.uniquefile)\n self._uniquefile_created = False\n self._p(\"Unique file deleted: %s\" % self.uniquefile)", "def delete_file(input_fn):\r\n if os.path.isfile(input_fn):\r\n os.remove(input_fn)", "def cleanup(e):\n for f in e.files:\n try:\n if os.path.isfile(f):\n os.remove(f)\n except OSError:\n continue\n\n return", "def _cleanup(self, fnum):\n while os.path.exists('%s.%s' % (self.name, fnum)):\n try:\n fname = '%s.%s' % (self.name, fnum)\n os.unlink(fname)\n # self.log.debug(\"Cleaned up file: %s\", fname)\n except:\n pass\n fnum -= 1", "def remove(self):\n self.remove_file()", "def CleanUp(self, path):\n try:\n if os.path.exists(path):\n os.remove(path)\n except (OSError, IOError) as e:\n logging.info(\"Failed to remove temporary file %s. Err: %s\", path, e)", "def clean():\n clean_files()", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def _call_cleanup(self,\r\n input_fp,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n poll_directly,\r\n suppress_submit_jobs):\r\n pass", "def cleanup(self):\n\n if self.do_nothing_bl is False:\n if os.path.exists(self.local_fileP_str) is True:\n if self.temp_dirP_obj is not None:\n self.temp_dirP_obj.cleanup()\n\n elif os.path.isdir(self.local_fileP_str) is True:\n log_obj.debug('Removing directory \"{:s}\"'.format(self.local_fileP_str))\n shutil.rmtree(self.local_fileP_str)\n\n else:\n log_obj.debug('Removing file \"{:s}\"'.format(self.local_fileP_str))\n os.remove(self.local_fileP_str)", "def cleanUpPackage(inProgressFilename, packageFilename, propFilename):\n try:\n for filename in (inProgressFilename, packageFilename, propFilename):\n if (filename is not None and os.path.exists(filename)):\n os.remove(filename)\n\n except OSError, osErr :\n LOG.error('Unable to cleanup Package (%s)' % osErr)", "def remove(path):", "def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def removeFilenameValidate(call, args=(), kwargs={}, nodeClass='Write'):", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def erase_files(self):\n self.ofile_handle()\n self.efile_handle()\n\n os.remove(self.ofile_name())\n os.remove(self.efile_name())\n return None", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def handleRemoveFile(self):\n for w in self.filesList.selectedItems():\n self.filesList.removeFile(w.text(2))\n self.metadataList.clear()\n self.metadataList.setRowCount(0)\n self.metadataList.setHorizontalHeaderLabels([\"Metadata Header\", \"Value\"])\n self.personalDataList.clear()", "def remove_file(path: str) -> None:\n\tremove(path)", "def warn_purge_exit(info_msg, filename, exit_msg):\n floyd_logger.info(info_msg)\n rmtree(os.path.dirname(filename))\n sys.exit(exit_msg)", "def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))", "def classCleanup(cls):\n cls.RemoveTempFile(\"child_send1.txt\")\n cls.RemoveTempFile(\"child_read1.txt\")\n cls.RemoveTempFile(\"child_send2.txt\")\n cls.RemoveTempFile(\"child_read2.txt\")", "def cleanup(self):\n if os.path.exists(self.tgzfile):\n os.remove(self.tgzfile)\n\n if os.path.exists(self.dirname):\n shutil.rmtree(self.dirname)" ]
[ "0.7110942", "0.6926332", "0.6837502", "0.67880255", "0.66312337", "0.65511507", "0.65271634", "0.6437142", "0.6406541", "0.6383176", "0.63297075", "0.63115835", "0.629489", "0.6281163", "0.6279226", "0.62617594", "0.626105", "0.6258333", "0.6251337", "0.6229411", "0.6194104", "0.61825895", "0.6181004", "0.6175096", "0.61722374", "0.6167069", "0.61652756", "0.6104703", "0.6087443", "0.6084922" ]
0.8349402
0
homepage redirects to list of users
def home_page(): return redirect('/users')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def homepage():\n if g.user:\n return redirect(f\"/user/{g.user.id}\")\n else:\n return redirect(\"/landing\")", "def users_page(request):", "def index(request):\n\n\tif request.user.is_authenticated:\n\t\treturn HttpResponseRedirect('home')\n\treturn HttpResponseRedirect('login')", "def list_users():\n users = User.query.order_by(User.last_name, User.first_name).all()\n return render_template('index.html', users=users)", "def home_app():\n users = User.query.order_by(User.last_name, User.first_name).all()\n\n return render_template('index.html', users=users)", "def index(request):\n users = User.objects.filter(is_staff=False, is_active=True).order_by('username')\n return render(request, 'users/view_all_users.html',\n { 'users': users })", "def user_list():\n\n users = User.query.all()\n return render_template(\"/user_list.html\", users=users)", "def home():\n # if session.get('username'):\n # return redirect(url_for('categories'))\n # else:\n return render_template('home.html')", "def index(self):\n\n # try and pull the user's data\n user = get_active_user_data()\n\n if not user:\n # they are not logged in give them the login form\n return render('/login_form.html')\n\n # they are logged in, pass them to the home page\n redirect('/')", "def redirect_users():\n\n recent = Post.query.order_by(desc(\"created_at\")).limit(5).all()\n\n return render_template(\"posts/recent_posts.html\", posts=recent)", "def index(self):\n raise cherrypy.HTTPRedirect('/user')", "def home():\n\n if not current_user.is_authenticated:\n return redirect(url_for('login'))\n else:\n return redirect(url_for('show_registrations'))", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n return render_template(\"user_list.html\", users=users)", "def user_list():\n\n users = User.query.all()\n \n return render_template(\"user_list.html\", users=users)", "def index():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n return render_template('index.html')", "def index():\n user_list = Users.query.all()\n return render_template('users/index.html'\n ,user_list=user_list\n ,t=t\n ,m=m)", "def user_list():\n\n users = User.query.all()\n\n return render_template(\"user_list.html\", users=users)", "def list_all_users():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n users_list = get_users_list()\n if check_authentication(session_id, user_id) and is_admin_user(user_id):\n return render_template('admin_area.html', user=user_id, session_id=session_id, users_list=users_list)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)", "def show_index():\r\n if 'username' in flask.session:\r\n return flask.redirect(flask.url_for('home')) # Need to fix redirect\r\n\r\n return flask.render_template(\"index.html\")", "def homepage():\n return redirect(\"/posts\")", "def list_users():\n check_admin()\n results = User.query.order_by(-User.id)\n return render_template('user_list.html', users=results)", "def index():\n if session.get('user_id'):\n return redirect('/feed')\n \n return render_template('index.html')", "async def index(request: Request, user: UserInfo) -> HTTPResponse:\n return redirect('home')", "def list_users():\n\n db_users = User.query.all()\n\n return render_template(\"list_users.html\", headline=\"Blogly Users\", users=db_users)", "def index():\n if 'name' in session:\n return render_template('home.html')\n return redirect(url_for('log_in'))" ]
[ "0.7494506", "0.7432099", "0.7120991", "0.69668645", "0.6889106", "0.68814206", "0.6858822", "0.6828226", "0.68036246", "0.6792592", "0.67875254", "0.67855155", "0.67843896", "0.67843896", "0.67843896", "0.67843896", "0.67843896", "0.67843896", "0.6780509", "0.6769911", "0.67634827", "0.6760547", "0.67581004", "0.6755803", "0.6721465", "0.6720713", "0.6710477", "0.66753566", "0.6660072", "0.6636517" ]
0.7679901
0
renders template to add new user
def add_new_user(): return render_template('new.html')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_user():\n\n return render_template('register-form.html')", "def add_user_form():\n\n return render_template(\"add_user.html\", headline=\"Add New Blogly User\")", "def new_users():\n\n return render_template(\"new_user.html\")", "def render_create_user_page():\n\n return render_template(\"create_user.html\")", "def create_user():\n\n return render_template(\"users/create_user.html\")", "def show_new_user_page():\n\n return render_template(\"new_user.html\")", "def signup():\n return render_template(\"new_user.html\")", "def make_new_user():\n return render_template('users/new_user_form.html')", "def show_new_user_form():\r\n return render_template('user-form.html')", "def get(self, request):\n self.context[\"form\"] = AddUserForm()\n return render(request, \"dbkeeper/add.html\", self.context)", "def show_user_detail_form():\n\n return render_template(\"add-user-details.html\")", "def cassh_add(current_user=None):\n return render_template('add.html', username=current_user['name'], \\\n logged_in=current_user['is_authenticated'])", "def new_user(request, **kwargs):\n varz = {}\n varz.update(kwargs)\n c = RequestContext(request, varz)\n t = loader.get_template('family_info/add_edit_user.html')\n return HttpResponse(t.render(c))", "def goto_make_new_user():\n\n return render_template('users/new.html')", "def add():\n form = RegisterForm(request.form)\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n email = form.email.data\n \n save_result = add_user(username, password, email)\n if save_result['status']:\n # create a starter project\n save_result_project = add_project(save_result['entry'], 'my first project', [], 'an example project', True, 2)\n save_result_task = add_task(save_result['entry'].id, 'this is an example task', save_result_project['entry'].id, 'you can edit these notes', False, False, 2, '1970-01-01')\n flash(u'thanks for joining, %s. please login!' % username, 'success')\n else:\n flash(u'cannot register \"%s\". try a different username or email.' % username, 'error')\n return redirect(url_for('.add'))\n\n return redirect(url_for('login'))\n\n return render_template('users/register.html'\n ,form=form\n ,t=t\n ,m=m)", "def add_student():\n\n return render_template(\"student_add.html\")", "def add_user():\n\n if request.method == 'POST':\n add_new_user_schema = AddNewUser()\n\n errors = add_new_user_schema.validate(data=request.form)\n\n if errors:\n abort(400, str(errors))\n\n args = add_new_user_schema.dump(request.form)\n\n user = User(connection=connection, cursor=cursor)\n user.add_user(\n first_name=args['first_name'],\n second_name=args['second_name'],\n is_internal=args['is_internal'],\n\n position=args['position'],\n email=args['email'],\n phone_number=args['phone_number']\n )\n\n return redirect(url_for('documentation.home'))\n\n return render_template('pages/inputs/add_user.html')", "def user():\r\n return render_base_template(\"user.html\", user=current_user)", "def add_user():\n\n if request.method == 'POST':\n add_new_user_schema = AddNewUser()\n\n errors = add_new_user_schema.validate(data=request.form)\n\n if errors:\n abort(400, str(errors))\n\n args = add_new_user_schema.dump(request.form)\n\n user = User(root_uri=os.environ['ROOT_BACKEND_URI'])\n user.add_user(\n first_name=args['first_name'],\n second_name=args['second_name'],\n is_internal=args['is_internal'],\n\n position=args['position'],\n email=args['email'],\n phone_number=args['phone_number']\n )\n\n return redirect(url_for('show_documentation.show_users'))\n\n return render_template('pages/inputs/add_user.html')", "def student_add():\n\n html = render_template(\"student_add.html\")\n\n return html", "def get(self):\n if self.user:\n self.redirect_to('secure', id=self.user_id)\n params = {\n \"action\": self.request.url,\n }\n return self.render_template('create_user.html', **params)", "def create_user_form():\n template_name = \"create_user.html\"\n users = []\n print request.form\n\n flash(request.form['username'])\n flash(request.form['email'])\n\n return render_template(template_name, users=users)", "def get(self):\r\n return_url = self.request.get(\"return_url\")\r\n template_values = {\r\n \"user_form\" : User.to_form(return_url, mode=\"add\")\r\n }\r\n self.render_out(\"templates/register.html\", template_values)", "def get(self, user):\n return self.render(\"post-new.html\", user=user)", "def add_user():\n #check_admin()\n\n # if form submit\n if request.method == 'POST':\n # create new user with UI form data\n user = User(username=request.form['username'],\n password=request.form['password'],\n is_admin=request.form.getlist('is_admin'))\n\n try:\n # add user to the database\n db.session.add(user)\n db.session.commit()\n # message to the UI\n flash('Utilizador adicionado com sucesso.', 'success')\n # redirect to the users page\n return redirect(url_for('user.list_users'))\n except:\n # in case user name already exists\n flash('Erro: username já existe.', 'danger')\n return redirect(url_for('user.add_user'))\n\n # load add user form template\n return render_template('user_add.html')", "def user(request):\n\n context = {\n\n }\n\n return render(request, 'user.html', context=context)", "def sign_up():\n return render_template('sign_up.html')", "def signup():\n return render_template('auth/signup.html')", "def request_user_create():\n return Response(render_template('admin/user/create-update.html',\n csrf_token=(\n get_raw_jwt() or {}).get(\"csrf\"),\n target=\"/admin/user/create\",\n genders=list(GenderType),\n states=list(StateType),\n groups=Group.query.all(),\n roles=list(RoleType),\n gender=GenderType.FEMALE,\n role=RoleType.LOCAL_POWER_TAKER,\n state=StateType.ACTIVATION_PENDING),\n mimetype='text/html')", "def register():\n\n return render_template(\"auth/registerHere.html\")" ]
[ "0.87503904", "0.8419606", "0.80858696", "0.7989216", "0.79065436", "0.7873468", "0.78427976", "0.7788214", "0.7762511", "0.7753578", "0.7438695", "0.73353076", "0.7235222", "0.721587", "0.71043146", "0.70694435", "0.70558715", "0.70520866", "0.6944675", "0.6933298", "0.6882539", "0.6843664", "0.6842527", "0.6840494", "0.67427576", "0.67389256", "0.67264", "0.66799295", "0.665686", "0.6647955" ]
0.8767982
0
adds new user to db
def add_new_user_to_db(): first_name = request.form['first_name'] last_name = request.form['last_name'] img_url = request.form['img_url'] new_user = User(first_name=first_name,last_name=last_name, img_url=img_url) db.session.add(new_user) db.session.commit() return redirect('/users')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_user(self):\n query = \"INSERT INTO users (first_name, last_name, email, password) VALUES (%s, %s, %s, %s)\"\n self.cursor.execute(query,(\n self.first_name, \n self.last_name, \n self.email, \n self.password))", "def handle_add_user():\n new_user = User(first_name=request.form['first_name'], last_name=request.form['last_name'], image_url=request.form['image_url'])\n db.session.add(new_user)\n db.session.commit()\n\n return redirect('/')", "def add_user():\n\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n fname = request.form.get(\"fname\")\n lname = request.form.get(\"lname\")\n language = request.form.get(\"language\")\n\n new_user = User(email=email, password=password,fname=fname,\n lname=lname,language=language)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/\")", "def add_new_user(self, user):\n # print(\"Saving new user\")\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, True, False, False))\n\n self.execute(TABELLE['users']['insert'],\n (user['id'], user['username']))", "def add_user():\n username = request.json['username']\n email = request.json['email']\n\n user = User(username, email)\n\n db.session.add(user)\n db.session.commit()\n return user_schema.jsonify(user)", "def add_user():\n first_name = request.form.get('first_name')\n last_name = request.form.get('last_name')\n image_url = request.form.get('image_url')\n\n new_user = User(\n first_name=first_name, last_name=last_name, image_url=image_url)\n db.session.add(new_user)\n db.session.commit()\n flash(f'Added new user: {first_name} {last_name}')\n return redirect('/users')", "def add_user():\n\n email = request.form[\"email\"]\n password = request.form[\"password\"] \n fname = request.form[\"fname\"]\n lname = request.form[\"lname\"]\n macaddress = request.form[\"macaddress\"]\n role = request.form[\"role\"]\n\n password_hash = generate_password_hash(password, method='sha256', salt_length=8)\n # create a new User object.\n new_user = User(email=email, password=password_hash,\n fname=fname, lname=lname, macaddress=macaddress, role=role)\n\n # add new user to db\n db.session.add(new_user)\n # commit the new add.\n db.session.commit()\n\n return userSchema.jsonify(new_user)", "def new_user():\n success = True\n try:\n usr = User(request.json['username'], request.json['email'])\n db.session.add(usr)\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def add_user(self, username, password): #WORKS\n password_hash = generate_password_hash(password) # Generates a SHA256 hash.\n try:\n self.cur.execute(\"INSERT INTO users VALUES(\\\"{}\\\", \\\"{}\\\")\".format(username, password_hash))\n self.db.commit()\n except:\n self.db.rollback()", "def add_user(self, username, password, name, department):\n db = sqlite3.connect(self.name)\n cur = db.cursor()\n cur.execute('SELECT MAX(ID) FROM users')\n maxid = cur.fetchone()[0]\n usid = maxid + 1 if maxid is not None else 0\n date = time.strftime('%Y.%m.%d')\n cur.execute(\n 'INSERT INTO users VALUES (?, ?, ?, ?, ?, ?, ?)',\n (usid, username, password, \"user\", name, department, 28)\n )\n db.commit()\n db.close()", "def new_user():\n new_user = User(first_name=request.form['first_name'], last_name=request.form['last_name'], image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def insert_user(user):\n\n try:\n session.add(user)\n session.commit()\n except Exception as e:\n logger.error(e)", "def add_user(self):\n user = models.User(email=self.test_user,\n password=generate_password_hash(self.test_user_password))\n user.add()", "def save_user(self):\n db.session.add(self)\n db.session.commit()", "def test_add_new_user_to_db(self):\n\n test_user = 'test_first_user'\n test_password = 'liamNees0n_T4k3n'\n user_object = User(username=test_user, password=test_password)\n db.session.add(user_object)\n db.session.commit()\n self.assertEqual(user_object.username, 'test_first_user')", "def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user", "def add_user(username, password):\n user = User(id=0, username=username, password=password)\n session.add(user)\n session.commit()", "def create_user():\n first_name = request.form['first_name'].capitalize()\n last_name = request.form['last_name'].capitalize()\n image_url = request.form['image_url']\n\n new_user = User(first_name=first_name, last_name=last_name, image_url=image_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def add(\n new_user: schemas.UserCreate,\n db_session: Session = Depends(get_db),\n current_user: models.User = Depends(get_current_admin_user)\n):\n db_user = crud.get_by_email(db_session, new_user.email)\n\n if db_user:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail='The user with this email already exists in the system.'\n )\n\n return crud.create(db_session, new_user)", "def register_new_user(first_name,email,password):\n\n new_user = User(first_name=first_name, email=email, password=password)\n\n db.session.add(new_user)\n db.session.commit()\n\n return new_user", "def add_user(user: dict):\n new_user = [user]\n insert_into_table('users', new_user)", "def add_user(self, firstname, lastname, email, username, password, role):\n\n new_user = {\n \"id\": len(self.db) + 1,\n \"firstname\": firstname,\n \"lastname\": lastname,\n \"email\": email,\n \"username\": username,\n \"password\": password,\n \"role\": role\n }\n\n ALL_USERS.append(new_user)", "def make_new_user():\n\n new_user = User(\n first_name=request.form['first_name'],\n last_name=request.form['last_name'],\n image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def add_user():\n\n if request.method == 'POST':\n add_new_user_schema = AddNewUser()\n\n errors = add_new_user_schema.validate(data=request.form)\n\n if errors:\n abort(400, str(errors))\n\n args = add_new_user_schema.dump(request.form)\n\n user = User(connection=connection, cursor=cursor)\n user.add_user(\n first_name=args['first_name'],\n second_name=args['second_name'],\n is_internal=args['is_internal'],\n\n position=args['position'],\n email=args['email'],\n phone_number=args['phone_number']\n )\n\n return redirect(url_for('documentation.home'))\n\n return render_template('pages/inputs/add_user.html')", "def add_user():\n #check_admin()\n\n # if form submit\n if request.method == 'POST':\n # create new user with UI form data\n user = User(username=request.form['username'],\n password=request.form['password'],\n is_admin=request.form.getlist('is_admin'))\n\n try:\n # add user to the database\n db.session.add(user)\n db.session.commit()\n # message to the UI\n flash('Utilizador adicionado com sucesso.', 'success')\n # redirect to the users page\n return redirect(url_for('user.list_users'))\n except:\n # in case user name already exists\n flash('Erro: username já existe.', 'danger')\n return redirect(url_for('user.add_user'))\n\n # load add user form template\n return render_template('user_add.html')", "def add_user(username, password=\"\"):\n global db\n if db is None:\n init_db()\n user_model = Query()\n if db.search(user_model.username == username):\n return {\n 'error': 'User {0} already exists'.format(username)\n }\n\n if password == \"\": # nosec (not a hardcoded password)\n password = getpass.getpass()\n\n salt = hashlib.sha512(str(os.urandom(64)).encode('utf-8')).hexdigest()\n password = hash_password(password, salt)\n api_key = gen_api_key(username)\n\n user = {\n 'username': username,\n 'password': password,\n 'salt': salt,\n 'api_key': api_key\n }\n user_id = db.insert(user)\n\n return {\n 'result': 'success',\n 'eid': user_id,\n 'user_created': user,\n 'api_key': api_key\n }", "def add_user(self, user_id, group_id='', user_level=1, user_name='', name='', method_id=1):\n stmt = \"\"\"INSERT INTO users (_user_id, group_id, user_level, _user_name, _name, method_id) \n SELECT ?, ?, ?, ?, ?, ? \n WHERE NOT EXISTS(SELECT 1 FROM users WHERE (?) = _user_id)\"\"\"\n args = (user_id, group_id, user_level, user_name, name, method_id, user_id)\n self.conn.execute(stmt, args)\n self.conn.commit()", "def create_user():\n new_user = User(id=login_session['gplus_id'],\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(new_user)\n session.flush()\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id", "def add_user(name: str, last_name: str, username: str) -> None:\n with connection:\n connection.execute(ADD_USER, (name, last_name, username))", "def register_user(self):\n User.add_user(User(self.email.data, self.password.data))" ]
[ "0.83477956", "0.82241684", "0.8169616", "0.81505746", "0.80565304", "0.79970205", "0.7881901", "0.7866157", "0.78567743", "0.7816866", "0.7795224", "0.77623284", "0.77358586", "0.7726013", "0.7695807", "0.7691027", "0.7687878", "0.76473737", "0.7645796", "0.76421", "0.7618797", "0.7597231", "0.7591131", "0.75910723", "0.7584741", "0.75527066", "0.7526989", "0.75166655", "0.75079393", "0.7503561" ]
0.8355208
0
displays form where user can edit info
def show_edit_form(user_id): user = User.query.get_or_404(user_id) return render_template('edit.html', user=user)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_form():\n return template (\"edit\")", "def show_edit_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"users/edit_user.html\", user=user)", "def edit(self):\n\n pass", "def show_edit_user_form(user_id):\r\n user = User.query.get_or_404(user_id)\r\n\r\n return render_template('edit-user.html', user=user)", "def show_edit_user_form(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('edit_user.html', user=user)", "def edit():", "def show_user_detail_form():\n\n return render_template(\"add-user-details.html\")", "def showEditContact(self):", "def edit(user_id):\n if user_id != current_user.id:\n return abort(403)\n\n user = get_user(user_id)\n form = EditForm(obj=user)\n form.email.data = user.email\n\n if form.validate_on_submit():\n password = form.password.data\n username = form.username.data\n\n save_result = edit_user(user_id, password, username, user.active)\n user = save_result['entry']\n form = EditForm(request.form, obj=save_result['entry'])\n form.email.data = user.email\n return redirect(url_for('.index'))\n \n return render_template('users/edit.html'\n ,form=form\n ,user=user\n ,t=t\n ,m=m)", "def show_edit_actor(self):\n\t\ttry:\n\t\t\tnombre = self.ui.lista_act.currentItem().text()\n\t\t\tformulario = view_form_actor.Form(self)\n\t\t\tformulario.edit(nombre)\n\t\t\tformulario.exec_()\n\t\t\tself.load_data()\n\t\texcept AttributeError as e:\n\t\t\terrorMessageBox = QtGui.QMessageBox.warning(self,\"Error\",\"Debe seleccionar un actor\")", "def edit_user(self):\n from editWindow import EditPlayer\n self.edit = EditPlayer(self.lang, self.result_table.currentItem().text())\n self.edit.show()", "def edit(self, **kwargs):\n ...", "def edit_basic_info(id):\n form = CreateEventForm()\n form.submit.label.text = \"Update Event\"\n event = Event.query.get_or_404(id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n if form.validate_on_submit():\n services.update_models_from_create_event_form(form, event.venue, event)\n db.session.commit()\n flash(\"Your changes were saved.\", \"success\")\n return redirect(url_for(\"events.edit_basic_info\", id=id))\n services.populate_create_event_form(form, event.venue, event)\n return render_template(\"events/basic_info.html\", form=form, event=event)", "def viewprofile():\n user = current_user\n form = UserUpdateForm(obj=user)\n form.populate_obj(user)\n if form.validate_on_submit():\n form.populate_obj(user)\n\n db.session.commit()\n\n flash('You have successfully edited your profile!')\n return render_template('user/user.html', title=\"View Profile\",\n user=user, form=form, action='Edit')", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('/users/edit_page.html', user=user)", "def edit(request, pk):\n\n try:\n object = User.objects.get(pk=pk)\n except:\n object = User()\n\n if request.method == 'POST': # If the form has been submitted...\n form = UserForm(request.POST, instance=object)\n\n if form.is_valid(): # If the form is valid\n object = form.save()\n\n messages.success(request, _('The user has been saved.'))\n\n return redirect('users.views.list')\n else:\n form = UserForm(instance=object)\n\n return render(request, 'users/users/edit.html', {'form': form})", "def edit_profile_view(request, user_id=0):\n\tuser = User.objects.filter(id=user_id)[0]\n\tstalker = request.user\n\tif stalker != user:\n\t\treturn HttpResponse(\"Go away guys, nothing to see here!\")\n\telse:\n\t\tuser_indicator = 0 # Used to check if the user is logged in \n\t\tif (stalker.is_authenticated()):\n\t\t\tuser_indicator = 1\n\t\telse:\n\t\t\tuser_indicator = 0\n\t\t# show a form with data from database for 'GET' request\n\t\tif request.method == 'GET':\n\t\t\tperson = Person.objects.filter(user=user)[0]\n\t\t\taccount_form = AccountDetailForm(initial={\n\t\t\t\t'user_name': user.username,\n\t\t\t\t'user_email': user.email,\n\t\t\t\t'user_faculty': person.faculty,\n\t\t\t}) # Display user account detail\n\t\t\taccount_form.fields['user_name'].disabled = False\n\t\t\taccount_form.fields['user_email'].disabled = False\n\t\t\taccount_form.fields['user_faculty'].disabled = False\n\n\t\t\tlocation_form = LocationForm() # Display location box for user to type into\n\n\t\t\tlive_update_list = LiveUpdate.objects.all().order_by('date_time') # Display live update list\n\n\t\t\tfriend_update_list = FriendUpdate.objects.filter(receiver=user).order_by('date_time') # Display friend update list\n\n\t\t\tfriend_request_list = FriendRequest.objects.filter(receiver=user).order_by('-date_time') # Display friend request list\n\n\t\t\tfriend_list = person.friends.all() # Display all friends list\n\n\t\t\taddfriend_indicator = 0 # Help decide the status of the add friend button\n\t\t\tperson_2 = Person.objects.filter(user=stalker)[0]\n\t\t\tif (person_2 in friend_list):\n\t\t\t\taddfriend_indicator = 2 # stalker and user whose profile being shown are friends\n\t\t\telse:\n\t\t\t\trelevant_request = FriendRequest.objects.filter(sender=stalker,receiver=user)\n\t\t\t\tif len(relevant_request) == 0:\n\t\t\t\t\taddfriend_indicator = 0 # stalker has not added user whose profile is being shown as a friend\n\t\t\t\telse:\n\t\t\t\t\taddfriend_indicator = 1 # stalker has added user whose profile is being shown as a friend\n\n\t\t\treturn render(request, 'mugspot/userprofile_edit.html', {\n\t\t\t\t\t'live_update_list':live_update_list,\n\t\t\t\t\t'friend_update_list': friend_update_list,\n\t\t\t\t\t'location_form': location_form,\n\t\t\t\t\t'account_form': account_form,\n\t\t\t\t\t'friend_list': friend_list,\n\t\t\t\t\t'friend_request_list': friend_request_list,\n\t\t\t\t\t'user_prof': user,\n\t\t\t\t\t'stalker': stalker,\n\t\t\t\t\t'user_indicator': user_indicator,\n\t\t\t\t\t'addfriend_indicator': addfriend_indicator,\n\t\t\t\t})", "def edit_user_information():\n session_id = request.args.get('session-id', None)\n old_username = request.args.get('user-id', None)\n user = get_user_by_id(old_username)\n if request.method == 'POST':\n surname = request.form['surname']\n name = request.form['name']\n birthdate = request.form['birthdate']\n new_username = request.form['username']\n today = datetime.date.today()\n reservations_list = get_user_reservations_list(old_username)\n cars_reservations_list = get_cars_user_reservations_list(reservations_list)\n reservations_status_list = get_reservations_status_list(reservations_list)\n if check_authentication(session_id, old_username):\n are_changes_valid = edit_user_info(name, surname, birthdate, old_username, new_username)\n else:\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), del_session_cookie=True)\n if are_changes_valid == \"OK\":\n edit_session(session_id, new_username)\n return render_template('user_area.html', user=new_username, session_id=session_id, edit_mode=False,\n surname=surname, name=name, birthdate=birthdate, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)\n else:\n return render_template('user_area.html', user=user.id, session_id=session_id, edit_mode=True,\n surname=user.surname, name=user.name, birthdate=user.birthdate,\n feedback_msg=are_changes_valid, today=today,\n reservations_list=reservations_list, cars_reservations_list=cars_reservations_list,\n reservations_status_list=reservations_status_list)", "def home_edituser():\n\tpass", "def getEditForm( self ):\n return \"listc_edit\"", "def show_edit_form(self, obj_pk=None):\n obj = self.model.objects.get(pk=obj_pk)\n # if there is no edit permission then does not show the form\n if not self.has_view_permissions(obj): return\n\n\n # create the edit form a add it to the empty widget details\n # override the function hide_form to make sure the list is shown after the user close the edition form\n params = {\n 'title':'Edit',\n 'model':self.model,\n 'pk':obj.pk,\n 'parent_model':self.parent_model,\n 'parent_pk':self.parent_pk,\n 'parent_win': self\n }\n\n if self.INLINES: params.update({'inlines': self.INLINES} )\n if self.FIELDSETS: params.update({'fieldsets':self.FIELDSETS})\n if self.READ_ONLY: params.update({'readonly': self.READ_ONLY})\n\n editmodel_class = self.get_editmodel_class(obj)\n editform = editmodel_class(**params)\n\n if hasattr(self, '_details') and self.USE_DETAILS_TO_EDIT:\n self._details.value = editform\n self._list.hide()\n self._details.show()\n\n # only if the button exists:\n toolbar = [self.toolbar] if isinstance(self.toolbar, str) else self.toolbar\n if toolbar:\n for o in toolbar:\n if o and hasattr(self, o): getattr(self, o).hide()\n\n else:\n self._list.show()\n if hasattr(self, '_details'):\n self._details.hide()", "def show_admin_edit_users():\n return render_admin_page(\"admin-eu.html\")", "def get(self,request,*args,**kwargs):\n\t\tuser_form = UserUpdateForm(instance=request.user)\n\t\tpersona_form = PersonaUpdateForm(instance=request.user.persona)\n\t\tuser_password_update_form = UserPasswordUpdateForm(user=request.user)\n\n\t\tcontext = {\n\t\t'user_form':user_form,\n\t\t'persona_form':persona_form,\n\t\t'user_password_update_form':user_password_update_form\n\t\t}\n\t\treturn render(request, 'cuenta/editar.html', context)", "def edit_user(request, username):\n context = {}\n detail = IMPUser.objects.all().filter(username = username)\n if detail:\n context = {'username':username,\n 'display_name':detail[0].display_name,\n 'tel':detail[0].tel,\n 'mobile':detail[0].mobile,\n 'office':detail[0].office,\n 'num':detail[0].num}\n return render(request, \"account/edit_user.html\", context)", "def view_user_edit(self):\n\n logged_in = authenticated_userid(self.request)\n message = ''\n form = Form(self.request, schema=UserEditSchema,\n state=State(request=self.request))\n if form.validate():\n password = self.request.params['password']\n if self.context.validate_password(password):\n if self.request.params['new_password']:\n password = self.request.params['new_password']\n message = 'Successfully saved'\n email = self.request.params['email']\n self.context.edit(password, email)\n else:\n message = msg['password_invalid']\n return {\n 'message': message,\n 'project': '',\n 'username': self.context.username,\n 'logged_in': logged_in,\n 'form': FormRenderer(form),\n 'email': self.context.email\n }", "def edit():\n database.ask(mode='single')\n F = database.check(single=True)\n if F and hasattr(F,'edit'):\n name = database[0]\n F.edit(name)", "def edit_basic_info(request):\n if request.POST:\n request.user.first_name = request.POST['first_name']\n request.user.last_name = request.POST['last_name']\n request.user.email = request.POST['email']\n request.user.save()\n request.user.userprofile.phone_number = request.POST['phone']\n request.user.userprofile.save()\n messages.add_message(request, messages.SUCCESS, 'Your changes have been saved.')\n return redirect('base_dashboard')\n\n return render(request, 'edit_basic_info.html', {'the_user': request.user})", "def edit_show_user(user_id):\n edited_user = User.query.get_or_404(user_id)\n\n edited_user.first_name = request.form['first_name']\n edited_user.last_name = request.form['last_name']\n edited_user.image_url = request.form['image_url']\n\n db.session.add(edited_user)\n db.session.commit()\n\n return redirect('/')", "def show_edit_pet(id):\r\n pet = Pet.query.get_or_404(id)\r\n form = EditPetForm(obj=pet)\r\n\r\n if form.validate_on_submit():\r\n pet.photo_url = form.photo_url.data\r\n pet.notes = form.notes.data\r\n pet.available = form.available.data\r\n db.session.commit()\r\n\r\n return redirect('/')\r\n\r\n else:\r\n return render_template(\"pet_profile.html\", form=form, pet=pet)", "def edit_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/edit.html', user=user)" ]
[ "0.7944607", "0.76103383", "0.7417197", "0.7322191", "0.73178375", "0.7314002", "0.7090248", "0.70597273", "0.7043498", "0.70094925", "0.6948445", "0.69112897", "0.68536806", "0.6801866", "0.6767648", "0.67107993", "0.6693232", "0.66814184", "0.6656329", "0.6631694", "0.6631562", "0.6620936", "0.66155404", "0.66091555", "0.66091293", "0.656956", "0.656869", "0.6556886", "0.6555827", "0.6552495" ]
0.7746406
1
shows form by which user can add new post
def show_form_to_add_new_post(user_id): user = User.query.get_or_404(user_id) tags = Tag.query.all() return render_template('posts/new.html', user=user, tags=tags)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_new_post_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template('new_post_form.html', user=user)", "def new_post_form(user_id):\n user = User.query.get_or_404(user_id)\n tags = Tag.query.all()\n return render_template('posts/new.html', user=user, tags=tags)", "def get(self, request):\n form = PostForm()\n context = {\n 'form': form,\n 'success_message': ''\n }\n return render(request, 'posts/new-post.html', context)", "def show_post_form(user_id):\n\n user = User.query.get_or_404(user_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/create_post.html\", user=user, tags=tags)", "def add_post_form(user_id):\n user_data = {}\n user_data[\"id\"] = user_id\n user_data[\"name\"] = User.query.get(user_id).get_full_name()\n\n return render_template(\"add_post.html\", headline=f\"Add Post for { user_data['name'] }\",\n user=user_data)", "def new_post_form(user_id):\r\n\r\n user = User.query.get_or_404(user_id)\r\n tags = Tag.query.all()\r\n\r\n return render_template('post-form.html', user=user, tags=tags)", "def add_post(request):\n if request.session.get('id_user') is None:\n request.session['res'] = 'Warn'\n messages.add_message(request, messages.WARNING,\n \"The session has expired\")\n return HttpResponseRedirect(reverse('home'))\n form = PostForm(request.POST or None)\n if request.method == 'POST':\n if form.is_valid():\n data = form.save(commit=False)\n data.author = User.objects.filter(id=request.session.get(\n 'id_user'))[0]\n data.save()\n messages.add_message(request, messages.SUCCESS,\n \"The post has been saved!\")\n return HttpResponseRedirect(\"/posts/list/\")\n return render(request, 'form.html', {'form': form})", "def get(self, request):\n form = PostForm()\n context = {\n 'form': form,\n }\n return render(request, 'blogs/new_post.html', context)", "def get(self, request):\n\n # crear el formulario\n form = PostForm()\n form.fields['owner'].queryset = Blog.objects.filter(owner=request.user)\n # renderiza la plantilla con el formulario\n context = {\n \"form\": form\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/new-post.html', context)", "def get(self):\n\n self.render(\"newpost.html\", user=self.user)", "def add():\n if request.method == \"POST\":\n result = add_post(\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n return render_template(\"add.html\")", "def new_post(request):\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form = PostForm()\n else:\n # POST data submitted; process data.\n form = PostForm(data=request.POST)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.owner = request.user\n new_post.save()\n return redirect('blogs:posts')\n\n # Display a blank or invalid form.\n context = {'form': form}\n return render(request, 'blogs/new_post.html', context)", "def post():\n\n form = forms.PostForm()\n if form.validate_on_submit():\n models.Post.create(title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n details=form.details.data,\n remember=form.remember.data)\n return redirect(url_for('index'))\n return render_template('new.html', form=form)", "def new(request):\n\n form = PostForm(request.POST or None)\n\n if request.method == \"POST\":\n if form.is_valid():\n instance = form.save(commit=False)\n instance.user = request.user\n instance.save()\n return HttpResponseRedirect(reverse('posts:toggle_publish', args=(instance.id,)))\n\n context = {'form':form}\n\n return render(request, 'posts/new.html', context)", "def get(self):\n if self.user:\n self.render(\"blog/addpost.html\")\n else:\n self.redirect(\"/login\")", "def show_user_post_form(user_id):\n user = User.query.get_or_404(user_id)\n\n return render_template('add-post-user.html', user=user)", "def post(self, request):\n\n # crear el formulario con los datos del post\n form = PostForm(request.POST)\n\n if form.is_valid():\n #crea el post\n post = form.save()\n\n #generar mensaje de exito\n msg = \"Post creado con éxito\"\n form = PostForm()\n else:\n msg = \"Ha ocurrido un error al guardar el post\" \\\n\n\n # renderiza la plantilla con el formulario\n context = {\n \"form\": form,\n \"msg\": msg\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/new-post.html', context)", "def new_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(pub_date=datetime.date.today())\n post.title = form.title.data\n post.content = form.content.data\n post.slug = slugify(post.title)\n db.session.add(post)\n db.session.commit()\n return flask.redirect(flask.url_for(\n 'view_post',\n year=post.pub_date.year,\n month=post.pub_date.month,\n day=post.pub_date.day,\n slug=post.slug\n ))\n return flask.render_template('new.html', form=form)", "def render_form(self, title=\"\", body=\"\", error=\"\"):\n self.render(\"newpost.html\", title=title, body=body, error=error)", "def add_post(request):\n if 'form.submitted' in request.params:\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('new_post')\n post = Post('')\n return environment_factory(post=post, save_url=save_url)", "def add_user_form():\n\n return render_template(\"add_user.html\", headline=\"Add New Blogly User\")", "def add_post(request):\n\tcontext = RequestContext(request)\n\tif request.method == 'POST':\n\t\tform = PostForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tform.save(commit=True)\n\t\t\treturn redirect(blog)\n\t\telse:\n\t\t\tform.errors\n\tform = PostForm()\n\treturn render_to_response('blog/add_post.html', {'form': form}, context)", "def community_post_create_view(request):\n task = \"Create New\"\n form = AddEditPostForm() # An unbound form\n\n if request.method == 'POST': # If the form has been submitted...\n form = AddEditPostForm(request.POST, request.FILES) # A form bound to the POST data\n if form.is_valid(): # All validation rules pass\n post = form.save(commit=False) # Create a new object from the form, but don't save it to the database\n post.author = request.user # Set the author to the current user\n post.save() # Save the object to the database\n slug_str = \"%s %s\" % (post.title, post.date_posted) # Create a slug from the title and date\n post.slug = slugify(slug_str) # Create the slug\n post.save() # Save the object to the database\n return redirect('community-home') # Redirect to the home page\n\n context = { # Pass the variables to the template\n 'task': task,\n 'form': form,\n }\n return render(request,\n 'pages/patient-community/community-create-update-post.html',\n context) # render the patient community create post page", "def show_post_form(request, pk=None):\n post = get_object_or_404(Post, pk=pk) if pk else None\n header = \"Edit \\\"{0}\\\"\".format(post.title) if pk else \"New Post\"\n title = \"Edit #{0}\".format(pk) if pk else \"New Post\"\n if request.method == \"POST\":\n form = PostForm(request.POST, request.FILES, instance=post)\n if form.is_valid():\n form.instance.author = request.user\n post = form.save()\n return redirect(post_detail, post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, \"postform.html\", {\"form\":form, \"title\": title, \"header\": header})", "def post(self, req):\n error_messages = []\n success_message = ''\n\n # Creamos owner y se lo pasamos al form con un objeto pre-establecido\n post_with_owner = Post()\n post_with_owner.owner = req.user\n post_with_owner.blog = Blog.objects.filter(owner=req.user)[0]\n\n form = PostCreateForm(req.POST, instance=post_with_owner)\n if form.is_valid():\n\n new_post = form.save()\n form = PostCreateForm()\n success_message = u'Post guardado con éxito! '\n success_message += u'<a href=\"{0}\">'.format(reverse('post_detail', args=[req.user.username, new_post.pk]))\n success_message += u'(ver post)</a>'\n else:\n error_messages.append(u'Formulario incompleto.')\n\n context = {\n 'form': form,\n 'success_message': success_message\n }\n return render(req, 'posts/new_post.html', context)", "def new_tag_form():\r\n\r\n posts = Post.query.order_by(Post.title).all()\r\n\r\n return render_template('tag-form.html', posts=posts)", "def new(request):\n assert isinstance(request, HttpRequest)\n if request.method == 'POST': # フォームが提出された\n form = EntryForm(request.POST) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n entry = form.save(commit=False)\n entry.member = request.user\n entry.save()\n return HttpResponseRedirect(reverse('entry_list')) # POST 後のリダイレクト\n else:\n form = EntryForm() # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n auth_form = AuthenticationForm(None, request.POST or None)\n return render(request, 'app/entry_edit.html', { \n 'form': form,\n 'title':'ブログ記事の新規登録',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'登録する',\n 'auth_form':auth_form,\n 'current_user':request.user,\n })", "def post_create(request):\n\tform = PostForm(request.POST or None, request.FILES or None)\n\tif request.POST:\n\t\tif form.is_valid():\n\t\t\tinstance = form.save(commit=False)\n\t\t\tinstance.user = request.user\n\t\t\tinstance.save()\n\t\t\tmessages.success(request, \"Post created!\")\n\t\t\treturn HttpResponseRedirect(instance.get_absolute_url())\n\t\telse:\n\t\t\tmessages.error(request, \"Sorry! Something went wrong.\", extra_tags=\"\")\n\tcontext = {\n\t\t'title': \"Create Post\",\n\t\t'form' : form,\n\t}\n\treturn render(request, 'post/create.html', context)", "def posts_create(request):\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect('feed')\n\n else:\n form = PostForm()\n\n return render(\n request=request,\n template_name='posts/new.html',\n context={\n 'form': form,\n 'user': request.user,\n 'perfil': request.user.perfil\n }\n )", "def post(self):\n post_title = self.request.get(\"post_title\")\n post_content = self.request.get(\"post_content\")\n param_list = dict(post_title=post_title, post_content=post_content)\n any_error = False\n\n if not post_title:\n param_list['title_error'] = \"Title is missing!\"\n any_error = True\n if not post_content:\n param_list['content_error'] = \"Content is missing!\"\n any_error = True\n\n if any_error:\n self.render(\"blog/addpost.html\", **param_list)\n else:\n p = Post.add_post(post_title, post_content, self.user)\n self.redirect('/blog/%s' % str(p.key().id()))" ]
[ "0.7803368", "0.77031416", "0.76967293", "0.7611464", "0.7593046", "0.75702906", "0.7559357", "0.75396353", "0.7437009", "0.74052477", "0.7396248", "0.73693126", "0.7366067", "0.7362356", "0.7318585", "0.7317058", "0.7299925", "0.7257682", "0.72130656", "0.72055334", "0.71996117", "0.7117906", "0.7087284", "0.7067642", "0.7003908", "0.69315904", "0.69275075", "0.6911415", "0.6876485", "0.6837308" ]
0.7992803
0
shows individual post based on ID of post
def show_post(post_id): post = Post.query.get_or_404(post_id) tags = post.tags return render_template('posts/post.html', post=post, tags=tags)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_post(post_id):\n\n post = Post.query.get_or_404(post_id)\n user = post.user\n \n return render_template(\"posts/post_details.html\", post=post, user=user)", "def view_post(post_id):\n\n posts = models.Post.select().where(models.Post.id == post_id)\n if posts.count() == 0:\n abort(404)\n return render_template('detail.html', posts=posts)", "def show_post(post_id):\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n post = Post.query.get_or_404(post_id)\n user = User.query.get(session[CURRENT_USER_KEY])\n return render_template('show_post.html', post=post, user=user)", "def show_post_details(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('post-user.html', post=post)", "def show_post_details(post_id):\r\n post = Post.query.get(post_id)\r\n user = User.query.get_or_404(post.user_id)\r\n return render_template('post-details.html', user=user, post=post)", "def post_detail(request, post_id):\n # Get the Post object corresponding to the id given in the URL.\n # If there is no corresponding Post, show an error page.\n post = get_object_or_404(Post, id=post_id)\n return render(request,\n 'blog/post_detail.html',\n {'post': post}\n )", "def post(request, post_id):\n post = Post.objects.get(id=post_id)\n\n context = {'post': post}\n return render(request, 'blogs/post.html', context)", "def get(self, request, post_id):\n post = Evento.objects.get(id=post_id)\n #post = get_object_or_404(Post, id=post_id)\n self.context['post'] = post\n\n self.context['title'] = str(post)\n\n return render(request, self.template, self.context)", "def posts_index_single(post_id=None):\n posts = Post.query.all()\n post = Post.query.get(post_id)\n return render_template('posts.html', posts=posts, post=post)", "def view_post(post_id):\n\n db_post = Post.query.get_or_404(post_id)\n full_name = db_post.user.get_full_name()\n created_date = datetime.strftime(\n db_post.created_at, \"%a %b %d, %Y %I:%M %p\").replace(\" 0\", \" \")\n return render_template(\"view_post.html\",\n headline=db_post.title,\n post=db_post, user_full_name=full_name, created=created_date)", "def show_post_details(post_id, user_id):\n\n post = Post.query.get_or_404(post_id)\n user = post.user\n\n return render_template('post_details.html', post=post, user=user)", "def get(self, request, post_id):\n post = Evento.objects.get(id=post_id)\n self.context['post'] = post\n\n self.context['title'] = str(post)\n\n return render(request, self.template, self.context)", "def post_detail(request, post_pk, blog_name):\n # recuperar el post\n try:\n post = Post.objects.select_related().get(pk=post_pk)\n except Post.DoesNotExist:\n return render(request, '404.html', {}, status=404)\n except Post.MultipleObjectsReturned:\n return HttpResponse(\"Existen varios posts con ese identificador\", status=300)\n\n # preparar el contexto\n context = {\n 'post': post\n }\n\n # renderizar la plantilla\n\n return render(request, 'blogs/post-detail.html', context)", "def show_post(slug):\n post = Post.query.filter_by(slug=slug).first()\n return render_template('articles/post.html', post=post)", "def detail(request, post_id, post_type):\n\n\tuser_additional_info = UserAdditionalInfo.objects.get(user_id = 1)\n\tuser = User.objects.get(id=1)\n\ttry:\n\t\tif post_type == \"i\": # i represents images\n\t\t\tpost = ImagePost.objects.get(id = post_id)\n\t\telse:\n\t\t\tpost = VideoPost.objects.get(id = post_id)\n\texcept (ImagePost.DoesNotExist, VideoPost.DoesNotExist):\n\t\tlogger.error('Something went wrong!')\n\t\traise Http404(\"Post does not exist\")\n\treturn render(request, 'devblog/post_detail.html', {'post':post, 'extra_info':user_additional_info,'author':user})", "def get_object(self, id):\n try:\n return Post.objects.get(id=id)\n except Post.DoesNotExist:\n raise Http404", "def show_edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('edit-post.html', post=post)", "def show_edit_post(post_id):\r\n post = Post.query.get_or_404(post_id)\r\n tags = Tag.query.all()\r\n return render_template('edit-post.html', post=post, tags=tags)", "def post_detail_blog(request, blog_pk):\n # recuperar el post\n # recupera posts\n posts = Post.objects.order_by('-created_at').filter(owner=blog_pk)\n\n # prepara el contexto de la plantilla\n context = {\n 'post_objects': posts\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/inicio.html', context)", "def get(self, post_id):\n key = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(key)\n\n # if use request a non-exist post, render 404 error\n if not post:\n self.error(404)\n return\n\n self.render(\"permalink.html\", post = post)", "def show():\n context = {\n \"posts\": get_posts()[::-1]\n }\n return render_template(\"show.html\", **context)", "def showPost(self, id):\n post = self._extractPost(id)\n date = datetime.datetime.strptime(post['dateCreated'].value,\n \"%Y%m%dT%H:%M:%S\")\n\n output = \"-\" * 72\n output += \"\\n[ {0} ] === {1} ===\".format(post['postid'],\n post['title'])\n if 'categories' in post :\n output += \"\\nBy {0} on {1} - {2}\".format(\n post['userid'], date.ctime(), post['categories'][0])\n else :\n output += \"\\nBy {0} on {1}\".format(\n post['userid'], date.ctime())\n\n output += \"\\n( tags: {0} )\".format(post['mt_keywords'])\n output += \"\\n\"\n output += \"\\n\" + post['formatted_text']\n output += \"\\n\"\n output += \"\\nLink: \" + post['link']\n output += \"\\nPermaLink: \" + post['permaLink']\n output += \"\\n\" + \"-\" * 72\n\n enc = getpreferredencoding()\n print output.encode( enc)", "def show_post(slug):\n post = Post.query.filter_by(slug=slug).first()\n if not post: abort(404)\n if not session.get('logged_in') and not post.visible: abort(404)\n return render_template('post.html', post=post)", "def view_post(request, slug_post):\n try:\n post = Entry.objects.filter(status=2).get(slug=slug_post)\n except Entry.DoesNotExist:\n raise Http404\n return render_to_response('blog/post.html', {'post':post, 'DISQUS_SHORTNAME':settings.DISQUS_SHORTNAME}, RequestContext(request))", "def get(self, id):\n\n post = Post.get_by_id(int(id))\n if post:\n #t = jinja_env.get_template(\"post.html\")\n #response = t.render(post=post)\n self.render(\"post.html\", post=post)\n else:\n error = \"there is no post with id %s\" % id\n #t = jinja_env.get_template(\"404.html\")\n #response = t.render(error=error)\n self.render(\"404.html\", error=error)\n #self.response.out.write(response)", "def get(self, post_id):\n\n post = Post.get_by_id(int(post_id), parent=blog_key())\n\n if post and self.user.key().id() == post.user.key().id():\n self.render(\"edit.html\", post=post, user=self.user)\n else:\n self.redirect('/blog/%s' % str(post.key().id()))", "def view_post(id):\n # all_category = postCategory.get_categories()\n posts = Post.query.get(id)\n\n if posts is None:\n\n abort(404)\n \n comment = Comments.get_comments(id)\n print(comment)\n count_likes = Votes.query.filter_by(posts_id=id, vote=1).all()\n count_dislikes = Votes.query.filter_by(posts_id=id, vote=2).all()\n return render_template('view-post.html', posts = posts, comment = comment, count_likes=len(count_likes), count_dislikes=len(count_dislikes))", "def show_post_edit(post_id):\n\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def get(self, post_id):\n post = Post.get_by_id(int(post_id), parent=blog_key())\n if not post:\n self.error(404)\n return\n\n # comment query object\n comments = Comment.all().filter('post =', post).order('created')\n\n # break a line of content and rander a post to post page\n post._render_text = post.content.replace('\\n', '<br>')\n self.render('post.html', post=post, user=self.user, comments=comments)", "def get(self, request, post_id):\n evento = post_id\n self.context['evento'] = evento\n #all_posts = Post.objects.all()\n #self.context['posts'] = all_posts\n return render(request, self.template, self.context)" ]
[ "0.80884796", "0.7809729", "0.7766649", "0.76972395", "0.7675512", "0.75907516", "0.7551688", "0.73931134", "0.73359364", "0.73324984", "0.73168266", "0.72386354", "0.72310734", "0.72155094", "0.72065705", "0.7182989", "0.7161204", "0.7115597", "0.71091795", "0.70830536", "0.7071694", "0.70703125", "0.7015949", "0.698538", "0.694549", "0.69313645", "0.6909848", "0.6896766", "0.6854827", "0.68506384" ]
0.7998874
1
shows individual post form to be edited
def show_edit_post_form(post_id): post = Post.query.get_or_404(post_id) return render_template('posts/edit.html', post=post)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_edit_post_form(user_id, post_id):\n\n post = Post.query.get_or_404(post_id)\n user = post.user\n\n return render_template('edit_post.html', post=post, user=user)", "def show_post_form(request, pk=None):\n post = get_object_or_404(Post, pk=pk) if pk else None\n header = \"Edit \\\"{0}\\\"\".format(post.title) if pk else \"New Post\"\n title = \"Edit #{0}\".format(pk) if pk else \"New Post\"\n if request.method == \"POST\":\n form = PostForm(request.POST, request.FILES, instance=post)\n if form.is_valid():\n form.instance.author = request.user\n post = form.save()\n return redirect(post_detail, post.pk)\n else:\n form = PostForm(instance=post)\n return render(request, \"postform.html\", {\"form\":form, \"title\": title, \"header\": header})", "def show_post_edit(post_id):\n\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def show_edit_post(post_id):\r\n post = Post.query.get_or_404(post_id)\r\n tags = Tag.query.all()\r\n return render_template('edit-post.html', post=post, tags=tags)", "def show_edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n\n return render_template('edit-post.html', post=post)", "def editor_edit_post(post_id=None):\n post = Post.query.get(post_id)\n return render_template('ghostdown.html', post=post)", "def edit_post(post_id):\n\n form = forms.PostForm()\n posts = models.Post.select().where(models.Post.id == post_id)\n if posts.count() == 0:\n abort(404)\n elif form.validate_on_submit():\n models.Post.create(title=form.title.data,\n date=form.date.data,\n time_spent=form.time_spent.data,\n details=form.details.data,\n remember=form.remember.data)\n models.Post.get(models.Post.id == post_id).delete_instance()\n return redirect(url_for('index'))\n return render_template('edit.html', posts=posts, form=form)", "def edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def edit_post(request, post_id):\n post = Post.objects.get(id=post_id)\n check_post_owner(request, post)\n\n if request.method != 'POST':\n # Initial request; pre-fill form with the current entry.\n form = PostForm(instance=post)\n else:\n # POST data submitted; process data.\n form = PostForm(instance=post, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('blogs:post', post_id=post.id)\n\n context = {'post': post, 'form': form}\n return render(request, 'blogs/edit_post.html', context)", "def get_view(post_id):\n # create DB connection\n db_connection = sqlite3.connect(DB_FILE)\n db_cursor = db_connection.cursor()\n post_info = get_post_info(db_cursor, post_id)\n # close DB connection\n db_connection.close()\n page_title = \"Blog Manager | Edit Post\"\n styles = [ '/static/stylesheets/blog/edit_post_style.css' ]\n scripts = [ '/static/scripts/jquery.js', '/static/blog/scripts/edit_post_scripts.js' ]\n return render_template(\"blog/edit_post.html\", page_title=page_title, styles=styles, scripts=scripts, post_info=post_info)", "def edit_post(bid, pid):\n # pylint: disable=unused-argument\n pst = Post.query.get(pid)\n form = PostForm(request.form)\n if request.method == 'POST' and current_user.uid == pst.uid:\n if form.validate():\n if pst.name != form.name.data or pst.text != form.desc.data:\n og_name = pst.name\n pst.name = form.name.data\n pst.text = form.desc.data\n DB.session.commit()\n flash('Post ({}) successfully edited!'.format(og_name))\n else:\n flash(constants.DEFAULT_SUBMISSION_ERR)\n return redirect(request.referrer)", "def edit(request):\n if 'form.submitted' in request.params:\n # delete old post\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('edit_post')\n post = DBSession.query(Post).filter(Post.name==name).first()\n return environment_factory(post=post, save_url=save_url)", "def edit_post(post_id):\n\n post_data = {\"id\": post_id}\n db_post = Post.query.get_or_404(post_id)\n post_data[\"title\"] = db_post.title\n post_data[\"content\"] = db_post.content\n post_data[\"user_id\"] = db_post.user_id\n\n return render_template(\"edit_post.html\", headline=\"Add New Blogly User\", post=post_data)", "def edit_post(post_id):\n\n post = Post.query.get_or_404(post_id)\n\n title = request.form[\"title\"]\n content = request.form[\"content\"]\n tags = request.form.getlist(\"tag\")\n post.tags = []\n if tags:\n for tag in tags:\n post.tags.append(Tag.query.filter(Tag.name==tag).one())\n\n if not title or not content:\n flash(\"Please enter a title and content\")\n return redirect(f\"/posts/{post.id}/edit\")\n\n post.title = title\n post.content = content\n db.session.add(post) \n db.session.commit()\n\n return redirect(f\"/posts/{post_id}\")", "def get(self, post_id):\n post = Post.by_id(int(post_id))\n\n if self.user and post.author.get_id() == self.user.get_id():\n post.content = post.content.replace('<br>', '\\n')\n self.render(\"/blog/editpost.html\", post=post)\n else:\n self.render(\"/base.html\", error=\"Not allowed to edit post.\")", "def edit_post(request, year, month, day, slug):\n post = get_model_for_date_and_slug(Post, year, month, day, slug)\n form = PostForm(instance=post)\n if request.method == \"POST\":\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n post = form.save()\n if \"continue_editing\" in request.POST:\n return http.HttpResponseRedirect(post.get_edit_url())\n return http.HttpResponseRedirect(post.get_absolute_url())\n return render_to_response(\"montgomery/edit_post.html\", {\"form\": form}, context_instance=RequestContext(request))", "def edit_post(request, slug):\n post = Post.objects.get(slug=slug)\n # import pdb; pdb.set_trace()\n if request.method == 'POST':\n post.title = request.POST['title']\n post.content = request.POST['content']\n post.save()\n return redirect('post')\n\n return render(request, 'posts/edit.html', {'post': post})", "def show_edit_tag_form(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n posts = Post.query.all()\n return render_template('tags/edit_tag.html', tag=tag, posts=posts)", "def edit_form():\n return template (\"edit\")", "def edit_post(post_id):\n if CURRENT_USER_KEY not in session:\n raise Unauthorized()\n\n post = Post.query.get_or_404(post_id)\n user = User.query.get(session[CURRENT_USER_KEY])\n\n # prevent editing of post by anyone except for post owner\n if post.user_id != session[CURRENT_USER_KEY]:\n raise Unauthorized()\n\n form = PostForm(obj=post)\n\n filled_muscles = form.muscles.object_data\n filled_equipment = form.equipment.object_data\n\n \n form.muscles.choices = [(m.id, m.name) for m in Muscle.query.all()]\n form.equipment.choices = [(e.id, e.name) for e in Equipment.query.all()]\n\n if form.validate_on_submit():\n post.title = form.title.data\n post.details = form.details.data\n post.is_private = form.is_private.data\n muscles = form.muscles.data\n equipment = form.equipment.data\n db.session.add(post)\n db.session.commit()\n # create join table additions\n muscles_to_add = []\n equipment_to_add = []\n # remove muscles and equipment from post if they were removed on edit\n for filled_muscle in filled_muscles:\n if filled_muscle.id not in muscles:\n PostMuscle.remove(muscle_id=filled_muscle.id, post_id=post_id)\n db.session.commit()\n for filled_equipment_choice in filled_equipment:\n if filled_equipment_choice.id not in equipment:\n PostEquipment.remove(\n equipment_id=filled_equipment_choice.id, post_id=post_id)\n db.session.commit()\n for muscle in muscles:\n muscle_post = PostMuscle(post_id=post.id, muscle_id=muscle)\n muscles_to_add.append(muscle_post)\n for choice in equipment:\n equipment_post = PostEquipment(\n post_id=post.id, equipment_id=choice)\n equipment_to_add.append(equipment_post)\n db.session.add_all(muscles_to_add)\n db.session.commit()\n db.session.add_all(equipment_to_add)\n db.session.commit()\n\n return redirect(f'/posts/{post_id}')\n else:\n form.muscles.data = [m.id for m in post.muscles]\n form.equipment.data = [e.id for e in post.equipment]\n\n return render_template('edit_post.html', form=form, post=post, user=user)", "def handle_edit_post(post_id):\n edited_post = Post.query.get_or_404(post_id)\n\n edited_post.title = request.form['post-title']\n edited_post.content = request.form['post-content']\n\n db.session.add(edited_post)\n db.session.commit()\n\n return redirect(f\"/users/{edited_post.user_id}\")", "def edit_post(year, month, day, slug):\n post = Post.query.filter_by(slug=slug, pub_date=datetime.date(year, month, day)).first()\n form = PostForm(title=post.title, content=post.content)\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db.session.add(post)\n db.session.commit()\n return flask.redirect(flask.url_for(\n 'view_post',\n year=post.pub_date.year,\n month=post.pub_date.month,\n day=post.pub_date.day,\n slug=post.slug\n ))\n return flask.render_template('edit.html', post=post, form=form)", "def show_post_form(user_id):\n\n user = User.query.get_or_404(user_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/create_post.html\", user=user, tags=tags)", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)", "def edit(self):\n\n pass", "def edit_form(pagename):\n\n articles = get_articles()\n\n edit_article = None\n for article in articles:\n if article[\"title\"] == pagename:\n edit_article = article\n\n if edit_article == None:\n return template(\"skapa-artikel\")\n\n else:\n return template(\"edit\", article=edit_article)", "def edit_profile_post(request, pk=None):\n profilepost = get_object_or_404(ProfilePost, pk=pk) \n if (request.user == profilepost.user or\n request.user.is_superuser):\n if request.method == \"POST\":\n profile_post_form = ProfilePostForm(request.POST, request.FILES, instance=profilepost)\n if profile_post_form.is_valid():\n profilepost = profile_post_form.save()\n messages.success(request, 'Your post has been updated!') \n return redirect(reverse('profile'))\n else:\n profile_post_form = ProfilePostForm(instance=profilepost)\n else:\n return HttpResponseForbidden()\n\n return render(request, 'newprofilepost.html', {'profile_post_form': profile_post_form})", "def get(self, request):\n\n # crear el formulario\n form = PostForm()\n form.fields['owner'].queryset = Blog.objects.filter(owner=request.user)\n # renderiza la plantilla con el formulario\n context = {\n \"form\": form\n }\n\n # renderiza y devuelve la plantilla\n return render(request, 'blogs/new-post.html', context)", "def post(self):\n subject = self.request.get('subject')\n content = self.request.get('post_content')\n post_id = self.request.get('post_id')\n post = Posts.get_by_id(int(post_id))\n user = self.get_active_user()\n user_id = int(user.key().id())\n\n if post and user and subject and content:\n if post.submitter_id == user_id:\n self.render_editpage(user, post_id, subject, content)\n else:\n self.render_improper_access()\n else:\n self.error(500)", "def edit_post(post_id):\n\n if request.method == \"GET\":\n the_post = mongo.db.blog.find_one({'_id': ObjectId(post_id)})\n return render_template(\"edit.html\", page_title=\"Edit Blog\", post=the_post)\n\n elif request.method == \"POST\":\n blog_post = mongo.db.blog\n now = datetime.now()\n blog_post.replace_one({'_id': ObjectId(post_id)},\n {\n 'title': request.form['blog_title'],\n 'blog': request.form['blog_info'],\n 'image': request.form['image_url'],\n 'created': request.form['created'],\n 'updated': now.strftime(\"%d/%m/%Y %H:%M:%S\"),\n })\n return redirect(url_for('blog.home'))" ]
[ "0.80158204", "0.790066", "0.7776901", "0.7769089", "0.7762255", "0.7487989", "0.74165535", "0.7331607", "0.714657", "0.71126884", "0.70684004", "0.70555127", "0.6985439", "0.6881445", "0.68796444", "0.68688715", "0.6858047", "0.68471867", "0.6838015", "0.6833424", "0.6778794", "0.6642548", "0.6627843", "0.6618695", "0.6618584", "0.66057557", "0.6574066", "0.6563611", "0.6560539", "0.65432763" ]
0.79917425
1
Show edit form for a tag
def show_edit_tag_form(tag_id): tag = Tag.query.get_or_404(tag_id) posts = Post.query.all() return render_template('tags/edit_tag.html', tag=tag, posts=posts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_edit_tag_form(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n \n return render_template('edit-tag.html', tag=tag)", "def show_edit_tag_form(user_id, tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n user = tag.user\n\n return render_template('edit_tag.html', tag=tag, user=user)", "def show_tag_edit(tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n\n return render_template(\"tags/edit_tag.html\", tag=tag)", "def edit_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n return render_template(\"tags/edit_tag.html\", tag=tag)", "def show_edit_tag(tag_id):\r\n tag = Tag.query.get_or_404(tag_id)\r\n posts = Post.query.order_by(Post.title).all()\r\n\r\n return render_template('edit-tag.html', tag=tag, posts=posts)", "def go_to_edit_tag(tag_id):\n \n tag = Tag.query.get_or_404(tag_id)\n posts = Post.query.all()\n return render_template('tags/edit.html', tag=tag, posts=posts)", "def edit_tag(request, id=None):\n tag = id and get_object_or_404(Tag, pk=id, user=request.user)\n if request.method == 'POST':\n form = EditTagForm(instance=tag, data=request.POST)\n if form.is_valid():\n tag = form.save(commit=False)\n tag.user = request.user\n tag.save()\n return redirect(tag)\n else:\n form = EditTagForm(instance=tag)\n return render(request, 'pages/form.html', {\n 'title': \"{} Tag\".format(\"Edit\" if tag else \"New\"),\n 'form': form,\n })", "def handle_edit_tag_form(tag_id):\n\n return redirect('/')", "def edit_form():\n return template (\"edit\")", "def edit_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n tag.name = request.form['name']\n\n db.session.add(tag)\n db.session.commit()\n\n flash(f\"Tag '{tag.name}' was successfully edited\")\n\n return redirect('/tags')", "def edit_tag(tag_id):\n\n tag = Tag.query.get_or_404(tag_id)\n tag_name = request.form[\"name\"]\n\n if not tag_name:\n flash(\"Please enter tag name\")\n return redirect(f\"/tags/{tag_id}/edit\")\n\n tag.name = tag_name\n db.session.add(tag)\n db.session.commit()\n\n return redirect(f\"/tags/{tag_id}\")", "def show_post_edit(post_id):\n\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def show_new_tag_form():\n\n return render_template('create_tag.html')", "def test_show_tag_edit(self):\r\n\r\n with app.test_client() as client:\r\n resp = client.get(f\"/tags/{self.tag.id}/edit\")\r\n html = resp.get_data(as_text=True)\r\n\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertIn(\"Edit Tag\", html)\r\n self.assertIn(self.tag.name, html)", "def show_edit_post(post_id):\r\n post = Post.query.get_or_404(post_id)\r\n tags = Tag.query.all()\r\n return render_template('edit-post.html', post=post, tags=tags)", "def edit(self):\n\n pass", "def edit_post(post_id):\n post = Post.query.get_or_404(post_id)\n tags = Tag.query.all()\n\n return render_template(\"posts/edit_post.html\", post=post, tags=tags)", "def new_tag_form():\r\n\r\n posts = Post.query.order_by(Post.title).all()\r\n\r\n return render_template('tag-form.html', posts=posts)", "def process_tag_edit(user_id, tag_id):\n\n title = request.form.get('title')\n content = request.form.get('content')\n\n tag = Tag.query.get_or_404(tag_id)\n\n tag.title = title\n tag.content = content\n\n db.session.add(tag)\n db.session.commit()\n\n return redirect(f'/users/{user_id}/tags/{tag_id}')", "def edit(self, **kwargs):\n ...", "async def edit(self, ctx: \"IceTeaContext\", otag: TagConverter, *, new_content: str):\n tag: models.Tag = otag\n if tag.alias:\n return await ctx.send(\"Unable to edit an alias\")\n if tag.author == ctx.author.id:\n content = await ctx.clean_content(new_content)\n await tag.edit(content=content)\n await ctx.send(\"Tag updated Successfully\")\n elif tag.author != ctx.author.id:\n await ctx.send(\"You do not own this tag\")", "def edit():", "async def slashtag_edit(\n self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter\n ):\n await ctx.send(await tag.edit_tagscript(tagscript))", "def getEditForm( self ):\n return \"listc_edit\"", "def show_pet_with_edit_form(pet_id):\n pet = Pet.query.get_or_404(pet_id)\n form = PetFormEdit(obj=pet)\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n \n db.session.commit()\n return redirect('/')\n else:\n return render_template('pet.html', pet=pet, form=form)", "def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )", "def show_edit_form(self, obj_pk=None):\n obj = self.model.objects.get(pk=obj_pk)\n # if there is no edit permission then does not show the form\n if not self.has_view_permissions(obj): return\n\n\n # create the edit form a add it to the empty widget details\n # override the function hide_form to make sure the list is shown after the user close the edition form\n params = {\n 'title':'Edit',\n 'model':self.model,\n 'pk':obj.pk,\n 'parent_model':self.parent_model,\n 'parent_pk':self.parent_pk,\n 'parent_win': self\n }\n\n if self.INLINES: params.update({'inlines': self.INLINES} )\n if self.FIELDSETS: params.update({'fieldsets':self.FIELDSETS})\n if self.READ_ONLY: params.update({'readonly': self.READ_ONLY})\n\n editmodel_class = self.get_editmodel_class(obj)\n editform = editmodel_class(**params)\n\n if hasattr(self, '_details') and self.USE_DETAILS_TO_EDIT:\n self._details.value = editform\n self._list.hide()\n self._details.show()\n\n # only if the button exists:\n toolbar = [self.toolbar] if isinstance(self.toolbar, str) else self.toolbar\n if toolbar:\n for o in toolbar:\n if o and hasattr(self, o): getattr(self, o).hide()\n\n else:\n self._list.show()\n if hasattr(self, '_details'):\n self._details.hide()", "def show_add_tag():\n\n return render_template(\"tags/create_tag.html\")", "def is_edit(self):\n return self._tag == 'edit'", "def update_tag(tag_id):\n tag = Tag.query.get_or_404(tag_id)\n tag.name = request.form[\"edit_tag_name\"]\n\n db.session.add(tag)\n db.session.commit()\n return redirect(\"/tags\")" ]
[ "0.8776973", "0.8398177", "0.82558274", "0.80214214", "0.77986705", "0.7691515", "0.7521986", "0.72903264", "0.707884", "0.6930865", "0.6884716", "0.6647438", "0.66433793", "0.6603262", "0.65803325", "0.649248", "0.6481094", "0.6407203", "0.6392679", "0.6370153", "0.6313979", "0.6309899", "0.61289495", "0.61000925", "0.61000323", "0.60962373", "0.6078547", "0.6046356", "0.60331964", "0.60219395" ]
0.85679936
1
This function will break up words for us.
def break_words(stuff): # triple quotes turn to be the document of ex25 help words = stuff.split(' ') # listing the parts of breaking return words
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def break_words(stuff):\n\twords = stuff.split(' ')\n\treturn words", "def break_words(stuff):\n\twords = stuff.split(' ')\n\treturn words", "def break_words(stuff):\r\n #parte la cadena cada vez que encuentra un espacio\r\n words = stuff.split(' ') \r\n return words", "def break_words(stuff):\r\n words = stuff.split(' ')\r\n return words", "def break_words(stuff):\n words = stuff.split(' ')\n return words", "def break_words(stuff):\n words = stuff.split(' ')\n return words", "def break_words(stuff):\n words = stuff.split(' ')\n return words", "def break_words(stuff):\n words = stuff.split(' ')\n return words", "def break_words(stuff):\n words = stuff.split(' ')\n return words", "def break_words(stuff):\n words = stuff.split(\" \")\n return words", "def break_words(stuff):\n words = stuff.split(\" \")\n return words", "def break_words(stuff):\n words = stuff.split(' ') # split every time you see a space\n return words", "def break_words(stuff):\n words = stuff.split(' ') #截取字符串一空格为基准,截取为列表\n return words #返回words", "def break_words(stuff):\r\n # Above line is a short explanation for the function.\r\n\t# It will appear when help is typed in python\r\n words = stuff.split(' ') \r\n\t# .split is a built in function in python \r\n\t#It will sepearate words with what we will type in ()\r\n\t#In our case we are splitting with blank space.\r\n return words", "def _split(self):\n \n self._words = []\n \n # (1) Expand contractions\n text = self._text.replace(\"'m \", \" am \")\n text = text.replace(\"'d \", \" would \")\n text = text.replace(\"'ll \", \" will \")\n text = text.replace(\"'ve \", \" have \")\n text = text.replace(\"'re \", \" are \")\n text = text.replace(\"can't \", \"can not \")\n text = text.replace(\"won't \", \"will not \")\n text = text.replace(\"n't \", \" not \")\n # Assume possesives are contractions of is\n text = text.replace(\"'s \", \" is \")\n text = text.replace(\"s' \", \"s \")\n \n # (2) Replace newlines, carriage returns, tabs, form feed with space.\n text = re.sub('[\\r\\n\\t\\f]', ' ', text)\n \n # (3) remove duplicate spaces\n text = re.sub(' +', ' ', text.strip())\n \n # Empty text\n if len(text) == 0:\n return \n \n # (4) Split text by whitespace (tokenize).\n words = text.split(' ')\n \n # (5) Separate out punctuation\n for word in words:\n length = len(word)\n \n begin = 0\n for i in range(0,length):\n if not word[i].isdigit() and not word[i].isalpha():\n # decimal, thousandths, fraction symbol\n if word[i] in ['.', ',', '/'] and i < length-1 and word[i+1].isdigit():\n continue\n # degree\n if word[i] in ['°'] and i < length-1 and word[i+1] in [ 'f', 'F', 'c', 'C']:\n continue\n # sign symbol\n if word[i] in ['-', '+'] and i < length-1 and (word[i+1].isdigit() or word[i+1] in ['.', ',']):\n # first char or exponent\n if begin == i or word[i-1] in ['e', 'E']:\n continue\n \n if begin != i:\n self._words.append( { 'word': word[begin:i], 'tag': Vocabulary.UNTAG } )\n if word[i] in [ '.', '?', '!', ',', ':', ';', '(', ')', '[', ']', '\"', '\\'', '¿', '¡']:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.PUNCT } )\n # non-printable ascii\n elif (ord(word[i]) >= 0 and ord(word[i]) <= 7) or (ord(word[i]) >= 14 and ord(word[i]) <= 31):\n pass\n else:\n self._words.append( { 'word': word[i], 'tag': Vocabulary.SYMBOL } )\n begin = i + 1\n if begin < length:\n self._words.append( { 'word': word[begin:], 'tag': Vocabulary.UNTAG } )", "def guess_splitwords():\n\n if t_word[:2] == 'un' and (t_pos == 'ADJD' or t_pos == 'ADJA'):\n create_splitword_tags(t_word[:2], t_word[2:])\n create_negation_frame()\n create_splitword_target(t_word[:2])\n create_splitword_focus(t_word[2:])\n create_splitword_negated(t_word[2:])\n create_splitword_scope(t_word[2:])", "def separate_words(text, min_word_return_size=2):\n splitter = re.compile('[^a-zA-Z0-9_\\\\+\\\\-/]')\n words = []\n for single_word in splitter.split(text):\n current_word = single_word.strip().lower()\n # leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases\n if len(current_word) > min_word_return_size and \\\n current_word != '' and \\\n not is_number(current_word):\n words.append(current_word)\n return words", "def splitWordList(self, text):\n result = list()\n if text is None:\n return result\n\n t = text + \"⁋\"\n t = t.replace('\\n', '⁋')\n t = re.sub(WordListProcessor.REFERENCE_PATTERN, \"\", t)\n t = re.sub(WordListProcessor.SUPERSCRIPT_PATTERN, \"\", t) # TODO: Extract sense!\n t = re.sub(WordListProcessor.HTML_REMOVER, \"\", t)\n t = t.replace(\"&quot\", \"\\\"\")\n t = t.replace(\",\", \"⁋,\")\n t = t.replace(\";\", \"⁋\")\n # print(t)\n # t = re.sub(WordListProcessor.BRACKETED_DELIMITER, \"$1$2$3$4$5$6\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER1, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER2, \"$1$2\", t)\n # t = re.sub(WordListProcessor.ESCAPE_DELIMITER3, \"$1$2\", t)\n t = self.escapeDelimiters(t)\n # print(t)\n t = t.replace(\"⁋;\", \"⁋\")\n t = t.replace(\"⁋,\", \"⁋\")\n t = t.replace(\"]] or [[\", \"]]⁋[[\")\n t = t.replace(\"]] and [[\", \"]]⁋[[\")\n t = t.replace(\" - \", \"⁋\")\n # t = t.replace(\" / \", \"⁋\")\n j = t.find(\" / \") # Use ' / ' only as a delimiter if there are at least two of them!\n if j >= 0:\n j = t.find(\" / \", j)\n if j >= 0:\n t = t.replace(\" / \", \"⁋\")\n # print(t)\n\n # print(t)\n while True:\n delim = t.find('⁋')\n if delim >= 0:\n word = t[0:delim]\n if word:\n # Normalize the word.\n word = word.strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n word = self.deWikify(word).strip()\n word = self.removeBrackets(word).strip()\n word = self.removeTemplates(word).strip()\n word = self.removeComments(word).strip()\n if word.lower().startswith(\"see also\"):\n word = word[8:].strip()\n if word.lower().startswith(\"see\"):\n word = word[3:].strip()\n if word.startswith(\":\"):\n word = word[1:].strip()\n if word.endswith(\".\"):\n word = word[:-1].strip()\n if word.endswith(\",\"):\n word = word[:-1].strip()\n\n # Check for slashes.\n word = word.replace(\" / \", \"/\")\n word = word.replace(\"/ \", \"/\")\n i = word.find('/')\n if word:\n if i >= 0 and word.find(' ') < 0:\n while True:\n result.append(word[0:i])\n word = word[i + 1:]\n i = word.find('/')\n if i < 0:\n break\n result.append(word)\n else:\n result.append(word)\n\n t = t[delim + 1:]\n\n else:\n break\n\n return result", "def format_word_split(txt):\n tt = re.sub(r\"'s\\b\", '', txt).lower() # possessives\n tt = re.sub(r'[\\.\\,\\;\\:\\'\\\"\\(\\)\\&\\%\\*\\+\\[\\]\\=\\?\\!/]', '', tt) # weird stuff\n tt = re.sub(r'[\\-\\s]+', ' ', tt) # hyphen -> space\n tt = re.sub(r' [a-z] ', ' ', tt) # single letter -> space\n tt = re.sub(r' [0-9]* ', ' ', tt) # numbers\n\n tt = re.sub('\\W+', ' ', tt)\n tt = tt.split(\" \")\n\n ret = []\n for elem in tt:\n if elem not in stop_words:\n ret.append(elem)\n\n tt = ' '.join(ret)\n return tt.strip()", "def get_words():\n # words\n words_list = list()\n for i in range(1, 114+1):\n sura = quran.get_sura(i)\n for aya in sura:\n wordsList = aya.split(' ')\n for word in wordsList:\n words_list.append(word)\n\n return words_list", "def extract_words(self):\n str = self.text.lower()\n words = re.sub(r'[?|—|:|\"|,|\\.\\n|\\.|\\s|\\n|\\t|\\v|\\f|\\r]+', \"*\", str)\n self.word_list = words.split(\"*\")", "def list_of_words(self):\n\t\treturn str.split(re.sub(r'\\W+', ' ', self.body.encode('ascii', 'replace')))", "def split_into_words(sentences):\n return list(sentences.split(\" \"))", "def _words(self):\n regex = r'\\b\\w+\\b'\n for word in re.findall(regex, self.text):\n yield word", "def get_words(self, cleaner):\n return cleaner.clean(self.get_text())", "def getWords(speech):\r\n return speech.split()", "def makeWords(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if x not in self.words: \r\n self.words[x] = 1\r\n else: \r\n self.words[x] += 1\r\n return self.words", "def make_bag(txt, stopw):\n bow = re.split('\\s',txt.lower())\n new_bow=[]\n for word in bow:\n if word not in stopw and len(word)>0 and not re.search('\\d',word):\n new_bow.append(word)\n return(new_bow)", "def split_words(value: str) -> List[str]:\n words: List[str] = []\n buffer: List[str] = []\n previous = None\n\n def flush():\n if buffer:\n words.append(\"\".join(buffer))\n buffer.clear()\n\n for char in value:\n tp = classify(char)\n if tp == StringType.OTHER:\n flush()\n elif not previous or tp == previous:\n buffer.append(char)\n elif tp == StringType.UPPER and previous != StringType.UPPER:\n flush()\n buffer.append(char)\n else:\n buffer.append(char)\n\n previous = tp\n\n flush()\n return words", "def split_into_words(s):\n s = re.sub(r\"\\W+\", \" \", s)\n s = re.sub(r\"[_0-9]+\", \" \", s)\n return s.split()" ]
[ "0.80394965", "0.80394965", "0.8015403", "0.7988996", "0.786715", "0.786715", "0.786715", "0.786715", "0.786715", "0.7849892", "0.7849892", "0.7701187", "0.75530666", "0.7530405", "0.72611374", "0.7182393", "0.7165535", "0.70521915", "0.7007744", "0.69567066", "0.68832916", "0.6853212", "0.67845815", "0.6763883", "0.67079663", "0.6691293", "0.66848886", "0.6673581", "0.6643278", "0.66199666" ]
0.80425274
0
report root wallet balance metrics to statsd.
def report_balance(root_address, channel_addresses=[]): try: wallet = Blockchain.get_wallet(root_address) statsd.gauge('root_wallet.kin_balance', wallet.kin_balance, tags=['address:%s' % root_address]) except Exception: pass # don't fail
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def report_balance(self):\n print(f\"\\nThe current balance in your account is ${self.balance}.\\n\")", "def show_balance(self):\n\t\tbalance = 0\n\t\tfor acct in self.wallet:\n\t\t\tutxos = get_unspent(acct[\"address\"], self.testnet)\n\t\t\tbalance += sum(i['value'] for i in utxos)\n\t\treturn f\"{self.name} current balance: {str(balance/100000000.0)} BTC\"", "def balance_report(abroker):\n log.info('*** balances ***\\n')\n s = \"*** balances ***\\n\"\n \"\"\"\n for asset in assets:\n v = abroker.balance_currency(asset)['Total']\n log.info('%s => %f'%(asset,v))\n s += '%s => %f\\n'%(asset,v)\n print (\"send \" + str(s))\n \"\"\"\n\n y = abroker.balance_all()\n for x in y: \n if x['Total'] > 0:\n v = x['Total']\n s += '%s => %f\\n'%(x['Symbol'],v)\n #print (x)\n print (\"send \" + str(s))\n mail.send_simple_message(abroker.mail_api_key, abroker.mail_domain, \"Balance Report\",s)", "def balances():\n loop.run_until_complete(app.exchanges.fetch_balances())\n print(app.exchanges.balances_str)", "def balance(plugin, unit='btc'):\n if not is_valid_unit(unit):\n return \"Value units are %s\" % ', '.join(valid_units)\n\n rpc = plugin.rpc\n\n data = {\n 'unit': unit,\n 'onchain_total_balance': lib.node_stats.onchain_balance(rpc).to(unit),\n 'onchain_confirmed_balance': lib.node_stats.onchain_confirmed_balance(rpc).to(unit),\n 'onchain_unconfirmed_balance': lib.node_stats.onchain_unconfirmed_balance(rpc).to(unit),\n 'total_funded_outgoing': lib.balance_stats.funded_outgoing_balance(rpc).to(unit),\n 'total_funded_incoming': lib.balance_stats.funded_incoming_balance(rpc).to(unit),\n 'pending_outgoing_balance': lib.balance_stats.pending_balance(rpc).to(unit),\n 'pending_incoming_balance': lib.balance_stats.pending_incoming_balance(rpc).to(unit),\n 'active_outgoing_balance': lib.balance_stats.active_balance(rpc).to(unit),\n 'active_incoming_balance': lib.balance_stats.active_incoming_balance(rpc).to(unit),\n 'closed_recent_to_self': lib.balance_stats.closed_balance(rpc).to(unit),\n 'closed_recent_to_remote': lib.balance_stats.closed_incoming_balance(rpc).to(unit),\n 'reserve_balance': lib.balance_stats.reserve_balance(rpc).to(unit),\n 'spendable_balance': lib.balance_stats.spendable_balance(rpc).to(unit)\n }\n plugin.log(json.dumps(data, indent=2))\n return data", "def displayBalance(self):\n orders = self.trader.tradeData.get(\n 'openOrders',\n 'Failed to read orderCount')\n# uncomment 3 lines below for orderType debug printing\n## ordertype = type(orders)\n# print'DEBUG: helper.displayBalance orders TYPE is',ordertype\n# print'DEBUG: helper.displayBalance orders:',orders\n if isinstance(orders, int) and orders > 0:\n print\"Open Orders:\", orders\n self.processOrders(printOutput=True)\n self.separator()\n print'Available Balances:'\n funds = self.trader.tradeData['funds']\n for bal in funds.keys():\n if funds[bal] >= 0.01:\n print bal.upper() + ':', funds[bal]\n self.separator()", "def test_balance_tracking(self):\n # TODO\n pass", "async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])", "async def _update_balances(self):\n local_asset_names = set(self._account_balances.keys())\n remote_asset_names = set()\n resp_json = await self._api_request(\"post\",\n \"terra/balances\",\n {\"address\": self._terra_wallet_address})\n for token, bal in resp_json[\"balances\"].items():\n self._account_available_balances[token] = Decimal(str(bal))\n self._account_balances[token] = Decimal(str(bal))\n remote_asset_names.add(token)\n\n asset_names_to_remove = local_asset_names.difference(remote_asset_names)\n for asset_name in asset_names_to_remove:\n del self._account_available_balances[asset_name]\n del self._account_balances[asset_name]\n\n self._in_flight_orders_snapshot = {k: copy.copy(v) for k, v in self._in_flight_orders.items()}\n self._in_flight_orders_snapshot_timestamp = self.current_timestamp", "def get_account_balance(unspent_outputs):\n return sum(output[\"value\"] for output in unspent_outputs.values())", "def show_balances(self):\n print 'Pot: %d' % (self.account.balance,)\n for player in self.players:\n balance = player.account.balance\n if balance > 0:\n print '%s: %d' % (player, balance,)", "def _total_d(self):\n debit = 0.0\n for l in self.data:\n debit += l['debit']\n self.t_credit += l['credit']\n self.t_balance += l['balance']\n return debit", "def get_balance(self, currency):\n\n result = self.api_query('getInfo', {'coinName': currency, 'need_new':0})\n\n #{'success': True, 'message': '', 'result': {'Currency': 'NXS', 'Balance': 1.55257461, 'Available': 1.55257461, 'Pending': 0.0, 'CryptoAddress': None}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 2}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255221}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255362}}\n\n #{'success': False, 'message': 'INVALID_CURRENCY', 'result': None}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255600}}\n try:\n result = {'success': True, 'message' :'', 'result':{'Currency': currency, 'Balance': result['return']['funds_incl_orders'][currency], 'Available': result['return']['funds'][currency], 'Pending': 0.0, 'CryptoAddress': None}}\n except:\n result = {'success': False, 'message' :'', 'result':{'Currency': currency, 'Balance': 0.0, 'Available': 0.0, 'Pending': 0.0, 'CryptoAddress': None}}\n return result", "async def watch_balance(self, params={}):\n token = await self.authenticate(params)\n type = None\n type, params = self.handle_market_type_and_params('watchBalance', None, params)\n types = self.safe_value(self.options, 'accountsByType', {})\n assetType = self.safe_string(types, type, type)\n params = self.omit(params, 'type')\n messageHash = 'balancess'\n url = self.urls['api']['ws']\n subscribe = {\n 'jsonrpc': '2.0',\n 'id': self.request_id(),\n 'method': '/private/subscribe',\n 'params': {\n 'access_token': token,\n 'channels': [\n 'user.asset.' + assetType,\n ],\n },\n }\n request = self.deep_extend(subscribe, params)\n return await self.watch(url, messageHash, request, messageHash, request)", "def get_balance(self):\n r = requests.get(build_api_call(self.base_url, None, 'balance', ''), auth=HTTPBasicAuth(KEY, SECRET))\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def balance(self) -> Decimal:\n return sum_queryset(AccountEntry.objects.filter(account=self.account, timestamp__lte=self.timestamp).exclude(timestamp=self.timestamp, id__gt=self.id))", "def balance(self):\n total_money = 0\n for item in self.ledger:\n total_money += item['amount']\n return total_money", "def show_balance(cls):\n if cls.is_logged_in():\n print(f'\\nBalance: {cls.__current_acct.__get_balance()}\\n')", "def meter_stats():\n current_time = time.time()\n r = requests.get('http://localhost:8080/stats/flow/1')\n r.raise_for_status()\n data = r.json()\n bytes_tx = 0\n for stat in data['1']:\n if stat['match'].get('dl_src') == '00:00:00:00:00:01':\n bytes_tx += stat['byte_count']\n global LAST_TIME\n global LAST_BYTES_TX\n time_diff = current_time - LAST_TIME\n byte_diff = bytes_tx - LAST_BYTES_TX\n LAST_TIME = current_time\n LAST_BYTES_TX = bytes_tx\n transfer_rate = byte_diff / time_diff / 1024\n # We need to accomodate the dropping of our rule with the hard timeout\n return jsonify({'transfer_rate': transfer_rate})", "def checkbalance(self):\n logging.debug('Checked user balance')", "def get_balance(self):\n balance = 0\n for transaction in self.ledger:\n balance += transaction[\"amount\"]\n return balance", "def do_balance(self,args):\n \"\"\"Can show total, available(available for trading), or reserved(reserved in open orders)\"\"\"\n \"\"\"usage: balance [available/reserved](optional)\"\"\"\n args = stripoffensive(args)\n if 'available' in args:\n btc,usd = available() \n elif 'reserved' in args:\n btc,usd = reserved()\n else:\n btc,usd = bal()\n word = args if args else \"total\"\n print 'Your %s balance is %.8f BTC and $%.2f USD ' % (word,btc,usd)\n if word == \"total\":\n last = D(bitstamp.ticker()['last'])\n print 'Account Value: $%.2f @ Last BTC Price of $%.2f' % (btc*last+usd,last)", "async def __getDataFromBalance(self, account) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n data = {}\n\n URL_BALANCE = API_HOST + \"/api/resources/account/{account}/balance?count=-1\"\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL_BALANCE.format(account=account))\n if response.status == 200:\n data = (await response.json())[\"data\"]\n\n indice = [i for i, x in enumerate(data) if x[\"details\"] == \"DEBT\"][\n 0\n ]\n\n deb = data[indice][\"amount\"]\n\n except Exception as e:\n _LOGGER.error(e)\n\n return {\"balance_data\": data}", "def balance():\n address = request.args.get(\"address\")\n balance = p2p.query(\"/balance\", address=address)[\"balance\"]\n payload = jsonpickle.encode({\"balance\": balance})\n return payload, 200, {\"Content-Type\": \"application/json\"}", "def balance(self) -> int:\r\n if self._top == None:\r\n return 0\r\n return self._top.balance()", "def get_balances_and_thresholds():\n try:\n query = \"\"\"\n SELECT\n info.uid,\n info.wallet,\n info.payment_threshold,\n COALESCE(credits_pending.sum, 0),\n COALESCE(credits_matured.sum, 0),\n COALESCE(debits.sum, 0)\n FROM (\n SELECT\n uid,\n payment_threshold,\n wallet\n FROM users\n ) AS info\n LEFT JOIN (\n SELECT\n uid,\n SUM(\n COALESCE(amount_reward, 0) +\n COALESCE(amount_bonus, 0) +\n COALESCE(amount_dev, 0)\n ) AS sum\n FROM credits\n WHERE status = 0\n GROUP BY uid\n ) AS credits_pending ON credits_pending.uid = info.uid\n LEFT JOIN (\n SELECT\n uid,\n SUM(\n COALESCE(amount_reward, 0) +\n COALESCE(amount_bonus, 0) +\n COALESCE(amount_dev, 0)\n ) AS sum\n FROM credits\n WHERE status = 1\n GROUP BY uid\n ) AS credits_matured ON credits_matured.uid = info.uid\n LEFT JOIN (\n SELECT\n uid,\n SUM(\n COALESCE(payments.amount_paid, 0) +\n COALESCE(payments.amount_fee, 0)\n ) AS sum\n FROM payments\n WHERE status <> -1\n GROUP BY uid\n ) AS debits ON debits.uid = info.uid\n \"\"\"\n\n database.execute(query)\n\n return database.fetchall()\n\n except database.psycopg2.Error as e:\n raise Exception(e.pgerror) from None\n except Exception as e:\n log.error('Failed to get balances and thresholds')\n log.error(e)\n return []", "def balance(self) -> float:\n return self.position.exchange.wallet_balance", "def get_wallet_balances(self):\r\n method = self.wallet_endpoints['balances']['method']\r\n url = self.base_url + self.wallet_endpoints['balances']['url']\r\n req = requests.request(method, url, headers=self.get_auth_headers())\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res", "def do_balance(self, args):\n if not self._check_args(args):\n return\n else:\n self.wallet.update_balances()\n balance = self.wallet.addresses.get(args, -1)['balance']\n if balance == -1:\n print(\"Address not found.\")\n else:\n print(balance)", "def get_wallet_health(self):\n return self.__call__('currencies', 'getwallethealth')" ]
[ "0.61463773", "0.6089032", "0.5850805", "0.5772713", "0.5758199", "0.56520957", "0.5570537", "0.5546814", "0.55428386", "0.55103374", "0.5492825", "0.54784095", "0.54520524", "0.5406334", "0.5402505", "0.5397196", "0.53325844", "0.5331363", "0.5331347", "0.5328559", "0.53144485", "0.5294317", "0.5286876", "0.52865744", "0.5284469", "0.52736026", "0.52662444", "0.5263339", "0.5261768", "0.5249218" ]
0.7062899
0
JavaScript specific to gizmo to be placed in the {% block scripts %} block
def get_gizmo_js(): return ( "tethys_gizmos/js/gizmo_utilities.js", "tethys_gizmos/js/cesium_map_view.js", "tethys_gizmos/js/DrawHelper.min.js", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_javascripts_subscriber(event):\n c = event.request.tmpl_context\n c.javascripts = [\n ('spline', 'lib/jquery-1.7.1.min'),\n ('spline', 'lib/jquery.cookies-2.2.0.min'),\n ('spline', 'lib/jquery.ui-1.8.4.min'),\n ('spline', 'core'),\n ('pokedex', 'pokedex-suggestions'),\n ('pokedex', 'pokedex'), # XXX only on main pokedex pages\n ]", "def js():\n with lcd(BASEDIR):\n js_ext = (\n 'submodules/jquery-cookie/src/jquery.cookie.js',\n 'submodules/jquery-treegrid/js/jquery.treegrid.js',\n 'submodules/bootstrap/dist/js/bootstrap.js',\n )\n js_own = (\n 'js/variables.js',\n 'js/bmf-autocomplete.js',\n 'js/bmf-calendar.js',\n 'js/bmf-editform.js',\n 'js/bmf-inlineform.js',\n 'js/bmf-buildform.js',\n 'js/menu.js',\n )\n\n local('cp submodules/bootstrap/dist/js/bootstrap.min.js djangobmf/static/djangobmf/js/')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.cookie.min.js submodules/jquery-cookie/src/jquery.cookie.js')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.treegrid.min.js submodules/jquery-treegrid/js/jquery.treegrid.js')\n\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_ext + js_own))\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/djangobmf.min.js djangobmf/static/djangobmf/js/djangobmf.js')\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_own))", "def create_js(self):\n for x in self.__js:\n self.__content.append(\"<script src=\\\"%s\\\"></script>\\n\"% (x))", "def get_default_javascript():\n return [\"_static/require.js\"]", "def generateJavascriptContent(notification):", "def resource_js(self):\n \n portal_url = getSite().absolute_url()\n \n return \"\"\"\n <script type=\"text/javascript\" src=\"%s/++resource++swfobject.js\"></script>\n <script type=\"text/javascript\" src=\"%s/++resource++audio_player.js\"></script> \n <script type=\"text/javascript\"> \n AudioPlayer.setup(\"%s/++resource++audio_player.swf\", { \n width: 300\n }); \n </script>\n \"\"\" % (portal_url, portal_url, portal_url)", "def setup_js(self):\n script = \"\"\"\n Salamat.contextData.redactorOptions = {imageGetJson: '%s'};\n \"\"\"\n script %= self.reverse('redactor_files', args=(self.namespace,\n self.prefix))\n return HttpResponse(script, content_type='text/javascript')", "def module_use_template_javascript(self):\n return False", "def module_use_template_javascript(self):\n return False", "def propeller_javascript(jquery=None):\n javascript = ''\n # See if we have to include jQuery\n if jquery is None:\n jquery = get_propeller_setting('include_jquery', False)\n # NOTE: No async on scripts, not mature enough. See issue #52 and #56\n if jquery:\n url = propeller_jquery_url()\n if url:\n javascript += render_tag('script', attrs={'src': url})\n url = propeller_javascript_url()\n if url:\n attrs = {'src': url}\n javascript += render_tag('script', attrs=attrs)\n return mark_safe(javascript)", "def process_js():\n source_paths = [\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/admin.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/app.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/footnotes.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/table_of_contents.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/text_resize.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/toastr.js'),\n ]\n dest_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.js')\n min_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.min.js')\n\n process_js_files(source_paths, dest_path, min_path)", "def code(self):\n return '{}\\n<script>{}</script>'.format(self.html, self.js)", "def third_party_scripts(request):\n return {\n 'ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE':\n settings.ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE\n }", "def get_filter():\n return render_template(\"filter_js.html\")", "def topcoat_icons_script_tag():\n return u'<script type=\"text/javascript src=\"%s\"></script>' % topcoat_icons_script_url()", "def add_navbar_js(self):\n this_dir, this_filename = os.path.split(__file__)\n file_path = os.path.join(this_dir, \"js\", \"navbar.js\")\n \n with open(file_path, \"r\") as fi:\n navbar = fi.read()\n \n new_script = html.Element(\"script\")\n new_script.text = navbar\n self.book.xpath(\"//head\")[0].insert(1, new_script)\n \n ## Add jquery library\n new_script = html.Element(\"script\")\n new_script.attrib[\"src\"] = \"https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js\"\n self.book.xpath(\"//head\")[0].insert(1, new_script)", "def js(self, file):\n\t\tfor f in file:\n\t\t\tself.to_head('<script type=\"text/javascript\" src=\"' + f + '\"></script>\\n')", "def studio_view(self, context=None):\n fragment = super(WhoWhereWhyXBlock,\n self).studio_view(context=context)\n\n # We could also move this function to a different file\n fragment.add_javascript(load(self.js_path))\n fragment.initialize_js('WhoWhereWhyXBlock')\n\n return fragment", "def loadjs(*args):\n return render(settings, 'JS_FILES', 'staticloader/load_js.html', *args)", "def js(self, script):\n self.page().mainFrame().evaluateJavaScript(script)", "def bootstrap_javascript(jquery=False):\n\n javascript = ''\n # No async on scripts, not mature enough. See issue #52 and #56\n if jquery:\n url = bootstrap_jquery_url()\n if url:\n javascript += '<script src=\"{url}\"></script>'.format(url=url)\n url = bootstrap_javascript_url()\n if url:\n javascript += '<script src=\"{url}\"></script>'.format(url=url)\n return javascript", "def author_view(self, context=None):\n # creating xblock fragment\n fragment = Fragment(u\"<!-- This is the studio -->\")\n fragment.add_javascript(load(self.js_path))\n fragment.initialize_js('WhoWhereWhyXBlock')\n\n return fragment", "def include_admin_script(script_path):\n if not absolute_url_re.match(script_path):\n script_path = '%s%s' % (settings.ADMIN_MEDIA_PREFIX, script_path)\n return '<script type=\"text/javascript\" src=\"%s\"></script>' % script_path", "def settings(request):\n gauges = Gauge.objects.all()\n return render_to_response('dashboard/settings.js',{'gauges': gauges} )", "def propeller_javascript_url():\n return javascript_url()", "def javascript(self, name, script, **kw):\n if callable(script):\n # Transcode the function or the method to javascript code\n script = ajax.javascript(script)\n\n if isinstance(script, ajax.JS):\n # Transcoded javascript needs a helper\n self.javascript_url('/static/nagare/pyjslib.js')\n script = script.javascript\n\n self._named_javascript.setdefault(name, (self._order, script, kw))\n self._order += 1\n return ()", "def get_js_file(self):\n return 'placeholder'", "def bootstrap_javascript_url():\n return javascript_url()", "def asta_script(request, legahash, astaid, numero=0):\n context = { 'legahash': legahash, 'astaid':astaid}\n return render(request, 'fanta/asta_template_backup.js', context)", "def script():\n return Response(\n response=render_template(\"import_export/js/import_export.js\", _=_),\n status=200,\n mimetype=\"application/javascript\"\n )" ]
[ "0.7191042", "0.6449665", "0.6252366", "0.6234498", "0.6112128", "0.61028486", "0.60693014", "0.6015799", "0.6015799", "0.59201545", "0.58840746", "0.58380616", "0.57780325", "0.57507026", "0.57086045", "0.5698426", "0.5633011", "0.56101644", "0.55908453", "0.5570004", "0.55577326", "0.5534543", "0.55267966", "0.5523733", "0.5517519", "0.54512084", "0.54443353", "0.5442467", "0.54290104", "0.5425495" ]
0.70864314
1
CSS specific to gizmo to be placed in the {% block content_dependent_styles %} block
def get_gizmo_css(): return ( "tethys_gizmos/css/cesium_map_view.min.css", "tethys_gizmos/css/DrawHelper.min.css", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateInlineCSS():", "def get_stylesheet():\n\n #ss_dict\n ss_dict = {'header_image' : HEADER_IMAGE,\n 'icon_true' : ICON_TRUE,\n 'icon_false' : ICON_FALSE,\n 'futura_lt_light' : FUTURA_LT_LIGHT,\n 'bright_orange' : BRIGHT_ORANGE.name(),\n 'bright_orange_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_ORANGE.red(), BRIGHT_ORANGE.green(), BRIGHT_ORANGE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'dark_orange' : DARK_ORANGE.name(),\n 'dark_orange_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_ORANGE.red(), DARK_ORANGE.green(), DARK_ORANGE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'bright_blue' : BRIGHT_BLUE.name(),\n 'bright_blue_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_BLUE.red(), BRIGHT_BLUE.green(), BRIGHT_BLUE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'dark_blue' : DARK_BLUE.name(),\n 'dark_blue_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_BLUE.red(), DARK_BLUE.green(), DARK_BLUE.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'bright_green' : BRIGHT_GREEN.name(),\n 'bright_green_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_GREEN.red(), BRIGHT_GREEN.green(), BRIGHT_GREEN.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n 'dark_green' : DARK_GREEN.name(),\n 'dark_green_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_GREEN.red(), DARK_GREEN.green(), DARK_GREEN.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n\t\t\t 'bright_grey' : BRIGHT_GREY.name(),\n 'bright_grey_transparent' : 'rgba({0},{1},{2},{3})'.format(BRIGHT_GREY.red(), BRIGHT_GREY.green(), BRIGHT_GREY.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n\t\t\t 'grey' : GREY.name(),\n 'grey_transparent' : 'rgba({0},{1},{2},{3})'.format(GREY.red(), GREY.green(), GREY.blue(), TABLEVIEW_EDITOR_TRANSPARENCY),\n\t\t\t 'dark_grey' : DARK_GREY.name(),\n 'dark_grey_transparent' : 'rgba({0},{1},{2},{3})'.format(DARK_GREY.red(), DARK_GREY.green(), DARK_GREY.blue(), TABLEVIEW_EDITOR_TRANSPARENCY)}\n\n\n #str_stylesheet\n str_stylesheet = \" \\\n\\\n\\\n/* QWidget */\\\nQWidget { background-color: %(dark_grey)s; \\\n font-family: \\\"%(futura_lt_light)s\\\"; \\\n font-size: 14pt; \\\n selection-background-color: %(bright_blue)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_header_icon */\\\nQWidget#wdgt_header_icon { border-image: url(%(header_image)s); } \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QToolTip */\\\nQToolTip { background-color: %(dark_grey)s; \\\n font-size: 14pt; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QLabel */\\\nQLabel { background-color: transparent; \\\n} \\\n\\\n\\\n/* QLabel - lbl_explanation_header */\\\nQLabel#lbl_explanation_header { font-weight: bold; \\\n font-size: 20pt; \\\n color: %(bright_grey)s; \\\n margin-top: 10; \\\n margin-left: 10; \\\n margin-bottom: 4; \\\n margin-right: 10; \\\n} \\\n\\\n\\\n/* QLabel - lbl_explanation_text */\\\nQLabel#lbl_explanation_text { color: %(bright_grey)s; \\\n margin-top: 4; \\\n margin-left: 10; \\\n margin-bottom: 4; \\\n margin-right: 10; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QProgressBar */\\\nQProgressBar { border: none;\\\n background-color: %(dark_grey)s;\\\n text-align: center;\\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QLineEdit */\\\nQLineEdit { border: none;\\\n background-color: %(grey)s;\\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QMenuBar - mnubar_menu */\\\nQMenuBar#mnubar_menu { background-color: transparent;\\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenuBar - mnubar_menu - item */\\\nQMenuBar#mnubar_menu::item { background: transparent;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenuBar - mnubar_menu - item - selected */\\\nQMenuBar#mnubar_menu::item:selected { background: transparent;\\\n color: %(bright_orange)s; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenuBar - mnubar_menu - item - pressed */\\\nQMenuBar#mnubar_menu::item:pressed { background: transparent;\\\n color: %(dark_orange)s; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QMenu - separator */\\\nQMenu::separator { background: %(bright_orange)s;\\\n height: 1px; \\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_threads */\\\nQMenu#mnu_threads { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_threads - item */\\\nQMenu#mnu_threads::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_threads - item - selected */\\\nQMenu#mnu_threads::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_threads_logging */\\\nQMenu#mnu_threads_logging { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_threads_logging - item */\\\nQMenu#mnu_threads_logging::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_threads_logging - item - selected */\\\nQMenu#mnu_threads_logging::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_gui */\\\nQMenu#mnu_gui { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_gui - item */\\\nQMenu#mnu_gui::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_gui - item - selected */\\\nQMenu#mnu_gui::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_shot_metatada_view */\\\nQMenu#mnu_shot_metatada_view { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_shot_metatada_view - item */\\\nQMenu#mnu_shot_metatada_view::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_shot_metatada_view - item - selected */\\\nQMenu#mnu_shot_metatada_view::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_char_metatada_view */\\\nQMenu#mnu_char_metatada_view { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_char_metatada_view - item */\\\nQMenu#mnu_char_metatada_view::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_char_metatada_view - item - selected */\\\nQMenu#mnu_char_metatada_view::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_prop_metatada_view */\\\nQMenu#mnu_prop_metatada_view { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_prop_metatada_view - item */\\\nQMenu#mnu_prop_metatada_view::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_prop_metatada_view - item - selected */\\\nQMenu#mnu_prop_metatada_view::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_alembic */\\\nQMenu#mnu_alembic { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_alembic - item */\\\nQMenu#mnu_alembic::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_alembic - item - selected */\\\nQMenu#mnu_alembic::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_assets */\\\nQMenu#mnu_assets { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_assets - item */\\\nQMenu#mnu_assets::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_assets - item - selected */\\\nQMenu#mnu_assets::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n/* QMenu - mnu_attributes */\\\nQMenu#mnu_attributes { background-color: %(dark_grey)s;\\\n margin-left: 8; \\\n margin-right: 8; \\\n border-left: none; \\\n border-right: none; \\\n border-bottom: none; \\\n border-top: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_attributes - item */\\\nQMenu#mnu_attributes::item { background: transparent;\\\n} \\\n\\\n\\\n/* QMenu - mnu_attributes - item - selected */\\\nQMenu#mnu_attributes::item:selected { background: transparent;\\\n color: %(dark_orange)s;\\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QScrollBar */\\\nQScrollBar { background: %(dark_grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QTableCornerButton */\\\nQTableCornerButton { background-color: %(grey)s; \\\n border: none; \\\n}\\\n\\\n\\\n/* QTableCornerButton - section */\\\nQTableCornerButton::section { background-color: %(grey)s; \\\n border: none; \\\n}\\\n\\\n\\\n\\\n\\\n\\\n\\\n/* ShotMetadataView */\\\nShotMetadataView { background-color: %(grey)s; \\\n selection-background-color: %(bright_orange)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QHeaderView - shot_metadata_view_hor_header*/\\\nQHeaderView#shot_metadata_view_hor_header{ background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - shot_metadata_view_hor_header - section */\\\nQHeaderView#shot_metadata_view_hor_header::section { background-color: qlineargradient(spread:reflect, x1:0.06, y1:0.04, x2:0, y2:0, \\\n stop:0.8 %(grey)s, \\\n stop:1 %(dark_orange)s); \\\n font-weight: bold; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: 1px solid %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QHeaderView - shot_metadata_view_ver_header */\\\nQHeaderView#shot_metadata_view_ver_header { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - shot_metadata_view_ver_header - section */\\\nQHeaderView#shot_metadata_view_ver_header::section { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* ShotMetadataContextMenu */\\\nShotMetadataContextMenu { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* ShotMetadataContextMenu -item - selected */\\\nShotMetadataContextMenu::item:selected { background-color: %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* PropMetadataView */\\\nPropMetadataView { background-color: %(grey)s; \\\n selection-background-color: %(bright_blue)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QHeaderView - prop_metadata_view_hor_header*/\\\nQHeaderView#prop_metadata_view_hor_header{ background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - prop_metadata_view_hor_header - section */\\\nQHeaderView#prop_metadata_view_hor_header::section { background-color: qlineargradient(spread:reflect, x1:0.06, y1:0.04, x2:0, y2:0, \\\n stop:0.8 %(grey)s, \\\n stop:1 %(bright_blue)s); \\\n font-weight: bold; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: 1px solid %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QHeaderView - prop_metadata_view_ver_header */\\\nQHeaderView#prop_metadata_view_ver_header { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - prop_metadata_view_ver_header - section */\\\nQHeaderView#prop_metadata_view_ver_header::section { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n/* PropMetadataContextMenu */\\\n/* Here is the above mentioned menu but also its sub menus. */\\\n/* mnu_metadata, mnu_geometry, mnu_visibility, mnu_selection */\\\n\\\n\\\n/* PropMetadataContextMenu */\\\nPropMetadataContextMenu { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* PropMetadataContextMenu -item - selected */\\\nPropMetadataContextMenu::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata */\\\nQMenu#PropMetadataContextMenu_mnu_metadata { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_metadata::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry */\\\nQMenu#PropMetadataContextMenu_mnu_geometry { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_geometry::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility */\\\nQMenu#PropMetadataContextMenu_mnu_visibility { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_visibility::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection */\\\nQMenu#PropMetadataContextMenu_mnu_selection { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_blue)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection -item - selected */\\\nQMenu#PropMetadataContextMenu_mnu_selection::item:selected { background-color: %(bright_blue_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* CharMetadataView */\\\nCharMetadataView { background-color: %(grey)s; \\\n selection-background-color: %(dark_green)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QHeaderView - char_metadata_view_hor_header*/\\\nQHeaderView#char_metadata_view_hor_header{ background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - char_metadata_view_hor_header - section */\\\nQHeaderView#char_metadata_view_hor_header::section { background-color: qlineargradient(spread:reflect, x1:0.06, y1:0.04, x2:0, y2:0, \\\n stop:0.8 %(grey)s, \\\n stop:1 %(bright_green)s); \\\n font-weight: bold; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: 1px solid %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QHeaderView - char_metadata_view_ver_header */\\\nQHeaderView#char_metadata_view_ver_header { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QHeaderView - char_metadata_view_ver_header - section */\\\nQHeaderView#char_metadata_view_ver_header::section { background-color: %(grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n/* CharMetadataContextMenu */\\\n/* Here is the above mentioned menu but also its sub menus. */\\\n/* mnu_metadata, mnu_geometry, mnu_visibility, mnu_selection */\\\n\\\n\\\n/* CharMetadataContextMenu */\\\nCharMetadataContextMenu { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* CharMetadataContextMenu -item - selected */\\\nCharMetadataContextMenu::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata */\\\nQMenu#CharMetadataContextMenu_mnu_metadata { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_metadata -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_metadata::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry */\\\nQMenu#CharMetadataContextMenu_mnu_geometry { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_geometry -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_geometry::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility */\\\nQMenu#CharMetadataContextMenu_mnu_visibility { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_visibility -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_visibility::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection */\\\nQMenu#CharMetadataContextMenu_mnu_selection { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_green)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QMenu - mnu_selection -item - selected */\\\nQMenu#CharMetadataContextMenu_mnu_selection::item:selected { background-color: %(bright_green_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorFramerange */\\\n/* This widget has a transparent background. Below are the stylesheets for the */\\\n/* children of this widget. */\\\n\\\n\\\n/* QSpinBox - spnbx_frame */\\\nQSpinBox#spnbx_frame { background-color: transparent; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange_transparent)s; \\\n border-bottom: 1px solid %(bright_orange_transparent)s; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_table_view_editor_framerange_main */\\\nQWidget#wdgt_table_view_editor_framerange_main { background-color: %(grey_transparent)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame */\\\nQWidget#wdgt_frame { background-color: transparent; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_and_time_slider */\\\nQWidget#wdgt_range_and_time_slider { background-color: transparent; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame_slider */\\\nQWidget#wdgt_frame_slider { background-color: transparent; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame_slider_left */\\\nQWidget#wdgt_frame_slider_left { background-color: qlineargradient(spread:reflect, x1:0.3, y1:0, x2:0, y2:0, \\\n stop:0.45 transparent, \\\n stop:0.5 %(dark_orange_transparent)s, \\\n stop:0.55 transparent); \\\n} \\\n\\\n\\\n/* QWidget - wdgt_frame_slider_right */\\\nQWidget#wdgt_frame_slider_right { background-color: qlineargradient(spread:reflect, x1:0.1, y1:0, x2:0, y2:0, \\\n stop:0.45 transparent, \\\n stop:0.5 %(dark_orange_transparent)s, \\\n stop:0.55 transparent); \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_get_current_frame*/\\\nAssetManagerHoverButton#btn_get_current_frame { background-color: %(bright_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider */\\\nQWidget#wdgt_range_slider { background-color: transparent; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_scrollbar */\\\nQWidget#wdgt_range_scrollbar { background-color: transparent; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider_left */\\\nQWidget#wdgt_range_slider_left { background-color: %(dark_grey_transparent)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider_middle */\\\nQWidget#wdgt_range_slider_middle { background-color: %(bright_grey_transparent)s; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_range_slider_right */\\\nQWidget#wdgt_range_slider_right { background-color: %(dark_grey_transparent)s; \\\n} \\\n\\\n\\\n/* QLabel - lbl_framesource */\\\nQLabel#lbl_framesource { background-color: transparent; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_complete_range_start*/\\\nAssetManagerHoverButton#btn_complete_range_start { background-color: %(grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_current_range_start*/\\\nAssetManagerHoverButton#btn_current_range_start { background-color: %(dark_orange_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_complete_range_end*/\\\nAssetManagerHoverButton#btn_complete_range_end { background-color: %(grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* AssetManagerHoverButton - btn_current_range_end*/\\\nAssetManagerHoverButton#btn_current_range_end { background-color: %(dark_orange_transparent)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorNodepicker */\\\n/* This widget has a transparent background. Below are the stylesheets for the */\\\n/* children of this widget. */\\\n\\\n\\\n/* QWidget - wdgt_table_view_editor_nodepicker_main */\\\nQWidget#wdgt_table_view_editor_nodepicker_main { background-color: %(grey_transparent)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QLabel - lbl_nodetype */\\\nQLabel#lbl_nodetype { background-color: %(grey_transparent)s; \\\n} \\\n\\\n\\\n/* QLineEdit - le_filter */\\\nQLineEdit#le_filter { background-color: %(dark_grey_transparent)s; \\\n border: 1px solid %(dark_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QListView - node_view */\\\nQListView#node_view { background-color: %(grey_transparent)s; \\\n alternate-background-color: %(dark_grey_transparent)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QListView - node_view - item selected */\\\nQListView#node_view::item:selected { background-color: %(bright_orange)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorPathpicker */\\\n/* This widget has a transparent background. Below are the stylesheets for the */\\\n/* children of this widget. */\\\n\\\n\\\n/* QWidget - wdgt_table_view_editor_pathpicker_main */\\\nQWidget#wdgt_table_view_editor_pathpicker_main { background-color: %(grey_transparent)s; \\\n border: 1px solid %(bright_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QLabel - lbl_base_path */\\\nQLabel#lbl_base_path { background-color: %(grey_transparent)s; \\\n} \\\n\\\n\\\n/* QLineEdit - le_path_filter */\\\nQLineEdit#le_path_filter { background-color: %(dark_grey_transparent)s; \\\n border: 1px solid %(dark_orange_transparent)s; \\\n} \\\n\\\n\\\n/* QListView - path_view */\\\nQListView#path_view { background-color: %(grey_transparent)s; \\\n alternate-background-color: %(dark_grey_transparent)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QListView - path_view - item selected */\\\nQListView#path_view::item:selected { background-color: %(bright_orange)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* AssetManagerSliderAction */\\\n/* QWidgetAction that draws a slider and an LCD Display */\\\n\\\n\\\n/* AssetManagerSliderAction - QLabel */\\\nQLabel#AssetManagerSliderActionQLabel { background-color: transparent; \\\n margin-left: 8; \\\n margin-right: 8; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QWidget */\\\nQWidget#AssetManagerSliderActionQWidget { background-color: transparent; \\\n margin-left: 8; \\\n margin-right: 8; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QSlider - groove - horizontal */\\\nQSlider#AssetManagerSliderActionQSlider::groove:horizontal { background: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, \\\n stop:0 transparent, \\\n stop:1 %(bright_orange)s); \\\n height: 1px; \\\n margin-left: 8; \\\n margin-right: 8; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QSlider - handle - horizontal */\\\nQSlider#AssetManagerSliderActionQSlider::handle:horizontal { background: %(bright_grey)s; \\\n width: 20px; \\\n} \\\n\\\n\\\n/* AssetManagerSliderAction - QLCDNumber */\\\nQLCDNumber#AssetManagerSliderActionQLCDNumber { background: transparent; \\\n color: %(bright_orange)s; \\\n border: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* QWidget - wdgt_asset_manager_pre_export_dialog_main */\\\nQWidget#wdgt_asset_manager_pre_export_dialog_main { background-color: %(dark_grey_transparent)s; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QWidget - wdgt_wdgt_asset_manager_pre_export_dialog_main_options */\\\nQWidget#wdgt_wdgt_asset_manager_pre_export_dialog_main_options { background-color: transparent; } \\\n\\\n\\\n/* QLabel - lbl_question */\\\nQLabel#lbl_question { background-color: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_accept */\\\nQPushButton#btn_accept { background-color: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_accept - pressed */\\\nQPushButton#btn_accept:pressed { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_accept - hover */\\\nQPushButton#btn_accept:hover { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_reject */\\\nQPushButton#btn_reject { background-color: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_reject - pressed */\\\nQPushButton#btn_reject:pressed { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QPushButton - btn_reject - hover */\\\nQPushButton#btn_reject:hover { background-color: transparent; \\\n color: %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice */\\\nQCheckBox#chkbx_remember_choice { background: transparent; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator */\\\nQCheckBox#chkbx_remember_choice::indicator { background: transparent; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator - hover */\\\nQCheckBox#chkbx_remember_choice::indicator:hover { background: %(dark_grey)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator - checked */\\\nQCheckBox#chkbx_remember_choice::indicator:checked { background: %(bright_grey)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n/* QCheckBox - chkbx_remember_choice - indicator - pressed */\\\nQCheckBox#chkbx_remember_choice::indicator:pressed { background: %(dark_orange)s; \\\n color: %(bright_grey)s; \\\n border: 1px solid %(bright_orange)s; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* TableViewEditorBool */\\\n/* Below are the stylesheets for the children of this widget. */\\\n\\\n\\\n/* TableViewEditorBool */\\\nTableViewEditorBool { background-color: %(dark_grey)s; \\\n border-left: none; \\\n border-top: none; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_true */\\\nQPushButton#TableViewEditorBool_btn_true { background-color: transparent; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_true - hover */\\\nQPushButton#TableViewEditorBool_btn_true:hover { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_true - pressed */\\\nQPushButton#TableViewEditorBool_btn_true:pressed { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_false */\\\nQPushButton#TableViewEditorBool_btn_false { background-color: transparent; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_false - hover */\\\nQPushButton#TableViewEditorBool_btn_false:hover { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n/* QPushButton - btn_false - pressed */\\\nQPushButton#TableViewEditorBool_btn_false:pressed { background-color: %(grey)s; \\\n border: none; \\\n} \\\n\\\n\\\n\\\n\\\n\\\n\\\n/* AssetManagerDockWidget */\\\nAssetManagerDockWidget { background: %(dark_grey)s; \\\n font-size: 14pt; \\\n color: %(bright_grey)s; \\\n} \\\n\\\n\\\n/* AssetManagerDockWidget - title */\\\nAssetManagerDockWidget::title { background: %(dark_grey)s; \\\n text-align: left; \\\n font-size: 14pt; \\\n color: %(bright_grey)s; \\\n border-left: none; \\\n border-top: 1px solid %(bright_orange)s; \\\n border-bottom: none; \\\n border-right: none; \\\n} \\\n\\\n\\\nAssetManagerDockWidget::close-button, AssetManagerDockWidget::float-button {background: %(bright_orange)s; \\\n border: none; \\\n} \\\n\\\n\\\nAssetManagerDockWidget::close-button:hover, AssetManagerDockWidget::float-button:hover { background: %(dark_orange)s; \\\n} \\\n\\\n\\\nAssetManagerDockWidget::close-button:pressed, AssetManagerDockWidget::float-button:pressed { background: %(dark_orange)s; \\\n} \\\n\\\n\\\n\"%ss_dict\n\n return str_stylesheet", "def CSSClasses(self):", "def base_site(request):\n from django.conf import settings\n\n context = {}\n context['STATIC_URL'] = '/static/'\n context['BASE_SIDEBAR'] = 'yui-t2'\n\n if hasattr(settings, 'STATIC_URL'):\n context['STATIC_URL'] = settings.STATIC_URL\n\n if not hasattr(settings, 'BASE_SIDEBAR'):\n return context\n\n if settings.BASE_SIDEBAR == 'left':\n context['BASE_SIDEBAR'] = 'yui-t2'\n if settings.BASE_SIDEBAR == 'right':\n context['BASE_SIDEBAR'] = 'yui-t4'\n\n return context", "def csssnippets(self):\n return [\n render_template(\n \"domain_constraints/css/domain_constraints.css\",\n node_type=self.node_type\n )\n ]", "def global_admin_css():\n return format_html('<link rel=\"stylesheet\" href=\"{}\">', static('css/admin.css'))", "def editor_css():\n return format_html('<link rel=\"stylesheet\" href=\"' \\\n + settings.STATIC_URL \\\n + 'css/editor.css\">')", "def propeller_css():\n rendered_urls = [render_link_tag(propeller_css_url()), ]\n if propeller_theme_url():\n rendered_urls.append(render_link_tag(propeller_theme_url()))\n return mark_safe(''.join([url for url in rendered_urls]))", "def embed_styles(self):\n for style in self.book.xpath(\"//link[@rel='stylesheet']\"):\n style_raw = self.get_remote_content(style.attrib[\"href\"])\n if style_raw != None:\n style_content = style_raw.decode(\"utf-8\")\n new_style = html.Element(\"style\")\n new_style.attrib[\"type\"] = \"text/css\"\n new_style.text = style_content \n style.xpath(\"//head\")[0].insert(0, new_style)\n style.getparent().remove(style)", "def editorcss(subdomain: t.Optional[str] = None) -> Response:\n return current_app.response_class(\n render_template('editor.css.jinja2'),\n mimetype='text/css',\n headers={\n 'Expires': (request_timestamp() + timedelta(minutes=60)).strftime(\n '%a, %d %b %Y %H:%M:%S GMT'\n )\n },\n )", "def css():\n with lcd(BASEDIR):\n local('lessc less/djangobmf.less > bootstrap.css')\n local('yui-compressor --type css -o djangobmf/static/djangobmf/css/djangobmf.min.css bootstrap.css')\n local('rm bootstrap.css')", "def css_file(self):\n pass", "def extend():\n global EXTENDED # pylint: disable=global-statement\n if not EXTENDED:\n EXTENDED = True\n pn.config.raw_css.append(CODE_HILITE_PANEL_EXPRESS_CSS.read_text())", "def ext_css_bundle(context, extension, name):\n return _render_css_bundle(context, extension, name)", "def GetStyleSheet():\n styles = []\n for locale in translation.LOCALES:\n styles.append(\"\"\"\n .goofy-label-{locale} {{\n display: none;\n }}\n .goofy-locale-{locale} .goofy-label-{locale} {{\n display: inline;\n }}\"\"\".format(locale=locale))\n return '\\n'.join(styles)", "def header_style(self):\n ...", "def assets():", "def css_view(request):\n\n stylesheets = []\n for css_file in request.registry.settings['spline.plugins.stylesheets']:\n stylesheets.append(render(\"/css/%s\" % css_file, {}, request=request))\n\n response = request.response\n response.content_type = 'text/css'\n response.charset = 'utf-8'\n response.text = u'\\n'.join(stylesheets)\n return response", "def on_get(self, req, resp):\n resp.status = falcon.HTTP_200\n resp.content_type = 'text/css'\n resp.body = self.csscontent", "async def m004_add_custom_css_to_charges(db):\n\n await db.execute(\"ALTER TABLE satspay.charges ADD COLUMN custom_css TEXT;\")", "def UpdateBaseStyles(self):\n super(EditraBaseStc, self).UpdateBaseStyles()\n\n # Set control specific styles\n sback = self.GetItemByName('select_style')\n if not sback.IsNull():\n sback = sback.GetBack()\n else:\n sback = wx.SystemSettings.GetColour(wx.SYS_COLOUR_HIGHLIGHT)\n self.VertEdit.SetBlockColor(sback)\n self.DefineMarkers()", "def abv_vs_style():\n return render_template(\"ABV_vs_style.html\")", "def loadCss(app):\n\n _browse = app._browse\n aContext = app.context\n appCss = aContext.css\n\n if _browse:\n return appCss\n\n if not app.inNb:\n return\n\n css = getCss(app)\n dh(css)\n dh(\n dedent(\n \"\"\"\n <script>\n globalThis.copyChar = (el, c) => {\n for (const el of document.getElementsByClassName('ccon')) {\n el.className = 'ccoff'\n }\n el.className = 'ccon'\n navigator.clipboard.writeText(String.fromCharCode(c))\n }\n </script>\n \"\"\"\n )\n )", "def load_style() -> str:\n return '<style id=\"scipp-style-sheet\">' + load_style_sheet() + '</style>'", "def make_styles(working_dir='.', media_query='@media....'):\n #print 'making styles'\n # if we dont have a order json file dont do anything\n #print \">>>>>\" + working_dir + '/order.json'\n if False == os.path.isfile(working_dir + '/order.json'): return\n final_block = ''\n json_css_order = open(working_dir + '/order.json')\n css_order = json.load(json_css_order)\n json_css_order.close()\n final_block, import_block_720 = create_import_blocks(css_order, working_dir)\n if import_block_720:\n final_block = final_block + '\\n\\n' + media_query + import_block_720 + '\\n}\\n'\n make_file(working_dir + '/scss/' + 'main_styles.scss', final_block + '\\n')\n call_sass(working_dir)", "def compile_templated_sass(systems, settings):\r\n for sys in systems:\r\n sh(django_cmd(sys, settings, 'preprocess_assets'))", "def _css_classes_for(self, block, view):\n block_css_entrypoint = block.entry_point.replace('.', '-')\n css_classes = [\n block_css_entrypoint,\n f'{block_css_entrypoint}-{view}',\n ]\n return css_classes", "def scan_system_css():\r\n pass", "def topcoat_stylesheet_url(device='desktop', variant='dark', minified=True):\n return '%stopcoat/css/topcoat-%s-%s.%scss' % (settings.STATIC_URL, device, variant, 'min.' if minified else '')", "def process_scss():\n paths = [\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/foundation-sites/scss/'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/motion-ui/src/')\n ]\n\n # File dirs\n app_scss_file = os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/scss/app.scss')\n app_css_file = os.path.join(settings.BASE_DIR, 'static/CMESH/css/app.css')\n\n compiled_css_from_file = sass.compile(filename=app_scss_file, include_paths=paths)\n\n # Open the CSS file and write into it\n write_file = open(app_css_file, 'w')\n write_file.write(compiled_css_from_file)" ]
[ "0.5841171", "0.54973793", "0.5384116", "0.5379654", "0.5354501", "0.53334975", "0.52722573", "0.52624387", "0.52198005", "0.51025265", "0.50930065", "0.49965754", "0.4979057", "0.4951599", "0.4907853", "0.4872566", "0.48463085", "0.4835834", "0.4820844", "0.48029348", "0.4780647", "0.47689655", "0.47465092", "0.47277695", "0.47258142", "0.47132474", "0.47056222", "0.4691322", "0.46907786", "0.4681461" ]
0.6296576
0
Check admin permissions on jwt identity Wrap flask blueprint endpoints
def admin_required(fn): @wraps(fn) def wrapper(*args, **kwargs): identity = get_jwt_identity() if identity['role'] != 'admin': return jsonify({'message': 'Permission denied'}), 403 else: return fn(*args, **kwargs) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == False:\n return jsonify({\"messsage\": \"Only admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def admin_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request() # will return a 401 if no token provided\n claims = get_jwt_claims()\n if claims['role'] != 'admin':\n response = {\n 'error': {\n 'message': 'Admin required.'\n }\n }\n return jsonify(response), 403\n else:\n return fn(*args, **kwargs)\n return wrapper", "def admin_required(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request()\n user_id = get_jwt_identity()\n target_user = User.query.filter_by(id=user_id).first()\n\n if target_user is None:\n return redirect(\"/admin/login\", code=403)\n\n if target_user.role != RoleType.ADMINISTRATOR:\n return redirect(\"/admin/login\", code=403)\n return fn(*args, **kwargs)\n return wrapper", "def admin_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"validate token provided and ensures the user is an admin\"\"\"\n\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\": \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n admin = data['is_admin']\n except:\n return make_response(jsonify({\"message\": \"kindly provide a valid token in the header\"}), 401)\n\n if not admin:\n return make_response(\n jsonify({\"message\": \"you are not authorized to perform this function as a non-admin user\"}), 401)\n\n return f(*args, **kwargs)\n\n return decorated", "def check_is_admin(context):\n init()\n credentials = context.to_policy_values()\n target = credentials\n return _ENFORCER.authorize('admin_required', target, credentials)", "def admin_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n if request.user != config['server']['admin_username']:\n return web.json_response({'status': 'error', 'message': 'admin rights required'}, status=403)\n return func(request)\n return wrapper", "def protected():\n return jsonify(message=f'protected endpoint (allowed user {flask_praetorian.current_user().username})')", "def non_admin_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n isAdmin = get_current_user()[\"isAdmin\"]\n if isAdmin == True:\n return jsonify({\"messsage\": \"Only Non admin can access this route\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def admin_required(handler):\n def admin_login(self, *args, **kwargs):\n auth = self.auth\n if not auth.get_user_by_session():\n self.redirect('/auth/login', abort=True)\n \n user = auth.get_user_by_session()\n queried_entity = User.get_by_id(user['user_id'])\n \n if queried_entity and queried_entity.phb_user_admin_status == 'admin-1':\n return handler(self, *args, **kwargs)\n else:\n self.redirect('/', abort = True)\n \n return admin_login", "def LoginCheck():\n jwt_data = get_jwt()\n if jwt_data['roles'] != 'admin':\n return jsonify(msg=\"Permission denied\"), Status.HTTP_BAD_FORBIDDEN\n\n identity = get_jwt_identity()\n if not identity:\n return jsonify({\"msg\": \"Token invalid\"}), Status.HTTP_BAD_UNAUTHORIZED\n\n data = {\"msg\": \"Loggeed In\"}\n json_response = json.dumps(data)\n return Response(json_response,\n status=Status.HTTP_OK_BASIC,\n mimetype='application/json')", "def require_admin_login(handler_method):\n\n def wrapper(self, *args, **kwargs):\n \"\"\" Verifies that the calling user is an administrator of the application before calling the\n decorated handler\n\n Parameters:\n :param args: the arguments for the decorated function\n :param kwargs: the keyword arguments for the decorated function\n\n Returns:\n :return: the decorated function result if the access token was valid; otherwise it\n send an error response and returns None\n \"\"\"\n user = users.get_current_user()\n if not user:\n self.write_error(401)\n elif not users.is_current_user_admin():\n self.write_error(403)\n else:\n handler_method(self, *args, **kwargs)\n\n return wrapper", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def requires_admin(original_route):\n @functools.wraps(original_route)\n def wrapper(config, *args, **kwargs):\n if \"Authorization\" not in request.headers:\n raise MissingAuthorization()\n auth_header_value = request.headers[\"Authorization\"]\n if not auth_header_value.startswith(\"Bearer \"):\n raise InvalidAuthHeaderValue()\n auth_token_value = auth_header_value.split(\"Bearer \", 1)[1]\n tokens = get_valid_admin_tokens()\n if auth_token_value not in tokens:\n raise InvalidAuthorization()\n return original_route(config, *args, **kwargs)\n return wrapper", "def decorated(*args, **kwargs):\n\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\": \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n admin = data['is_admin']\n except:\n return make_response(jsonify({\"message\": \"kindly provide a valid token in the header\"}), 401)\n\n if not admin:\n return make_response(\n jsonify({\"message\": \"you are not authorized to perform this function as a non-admin user\"}), 401)\n\n return f(*args, **kwargs)", "def require_admin(f):\n\n @require_login\n @wraps(f)\n def wrapper(*args, **kwds):\n if not api.user.get_user().get(\"admin\", False):\n raise PicoException(\n \"You do not have permission to access this resource\", 403\n )\n return f(*args, **kwds)\n\n return wrapper", "def protected():\n message = \"\"\n if flask_praetorian.current_user().roles == \"admin\":\n message = f\"welcome {flask_praetorian.current_user().username}, this is protected endpoint\"\n else:\n message = f'Endpoint not allowed for user {flask_praetorian.current_user().username}'\n return {\"message\": message}", "def check_auth():\n if not current_user.is_authenticated:\n return render_template('401.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 401\n for role in current_user.roles:\n if appbuilder.get_app.config['AUTH_ROLE_ADMIN'] == role.name:\n return None\n return render_template('403.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 403", "def admin_required(f):\n def decorator(*args, **kwargs):\n if \"user\" not in g:\n abort(401)\n if not g.user.admin:\n abort(403)\n return f(*args, **kwargs)\n return decorator", "def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)", "def web_admin_required(handler):\n\n def check_admin(self, *args, **kwargs):\n \"\"\"\n If handler has no login_url specified invoke a 403 error\n \"\"\"\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)\n\n return check_admin", "def authorize_admin(self, instance):\n\n # Authorize user admin.\n instance.client.post(\n reverse(\"login\"),\n {\"username\": \"admin\", \"password\": \"admin\"},\n )\n return instance.client.get(reverse(\"edit\"))", "def admin_required(f): # pragma: no cover\r\n @wraps(f)\r\n def decorated_function(*args, **kwargs):\r\n if current_user.admin:\r\n return f(*args, **kwargs)\r\n else:\r\n return abort(403)\r\n return decorated_function", "def admin_required(f):\n @wraps(f)\n def admin_decorator(*args, **kwargs):\n if session.get('logged_in') and session.get('type') == 'Admin':\n return f(*args, **kwargs)\n else:\n abort(401)\n return admin_decorator", "def test_01_admin_index_authenticated(self):\r\n self.register()\r\n self.signout()\r\n self.register(name=\"tester2\", email=\"[email protected]\",\r\n password=\"tester\")\r\n res = self.app.get(\"/admin\", follow_redirects=True)\r\n err_msg = (\"The user should not be able to access this page\"\r\n \" but the returned status is %s\" % res.status)\r\n assert \"403 FORBIDDEN\" in res.status, err_msg", "def jwt_permission(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n auth_token = request.headers.environ.get('HTTP_AUTHORIZATION', '').split(' ')\n if len(auth_token) < 2:\n abort(403, \"Authentication fails\")\n\n JwtAuth.decode_auth_token(auth_token[1])\n return func(*args, **kwargs)\n return wrapper", "def test_admin_accessible(self) -> None:\n response = self.client.get(\"/admin/\")\n self.assertEqual(200, response.status_code)", "def authorization():\n pass", "def admin_required(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if current_user.is_admin:\n return func(*args, **kwargs)\n else:\n return login_manager.unauthorized()\n\n return wrapper", "def require_admin(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if not g.user:\n # flash('此操作需要登录账户')\n return redirect(url_for('admin.login'))\n if g.user.name != 'admin':\n abort(403)\n return func(*args, **kwargs)\n\n return decorator", "def log_in(jwt):\n return current_app.library_registry.admin_controller.log_in(jwt)" ]
[ "0.7161469", "0.70733166", "0.70655507", "0.6944064", "0.68472445", "0.6795886", "0.6738831", "0.665472", "0.6586904", "0.65524095", "0.6508487", "0.6495549", "0.6462605", "0.6440125", "0.6429783", "0.6428201", "0.641124", "0.63652575", "0.63415575", "0.633403", "0.6315441", "0.6299292", "0.6283528", "0.62693423", "0.6267117", "0.6259953", "0.6217808", "0.6213833", "0.62083375", "0.6191861" ]
0.73043376
0
Assert that bust_fragments applies with the minimum args.
def test_min_args(self): bust_fragments(self.resp, '/foo/bar') self.assert_header_set('["/foo/bar"]')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fail_missing_signature_fragment_underflow(self):\n # Adjust bundle balance, since we will also remove the change\n # transaction.\n self.bundle[0].value += self.bundle[-1].value\n\n # Remove the last input's second signature fragment, and the change\n # transaction.\n del self.bundle.transactions[-2:]\n for txn in self.bundle:\n txn.last_index -= 2\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Transaction 4 has invalid signature (using 2 fragments).',\n ],\n )", "def check_fragment(self, fragment: Package, source: tuple, destination: tuple) -> bool:\n return True", "def test_fail_signature_fragment_address_wrong(self):\n self.bundle[5].address =\\\n Address(\n b'QHEDFWZULBZFEOMNLRNIDQKDNNIELAOXOVMYEI9P'\n b'GNFDPEEZCWVYLKZGSLCQNOFUSENIXRHWWTZFBXMPS'\n )\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n # The bundle validator uses the address to link inputs\n # together, so if it encounters a different address, then it\n # assumes it has found a new input.\n 'Transaction 4 has invalid signature (using 1 fragments).',\n ],\n )", "def test_fail_signature_fragment_value_wrong(self):\n # Don't forget to adjust the change transaction, in order to ensure\n # the bundle has a zero balance.\n self.bundle[5].value = -1\n self.bundle[-1].value += 1\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n 'Transaction 5 has invalid value (expected 0, actual -1).',\n ],\n )", "def test_block_bad_consensus(self):\n pass", "def test_block_bad_signature(self):\n pass", "def is_valid_fragment(args, skip=False):\n if (is_valid_file_and_directory(args) or is_valid_command(args)) or skip:\n if args.fragment is not None and args.count is not None:\n return True\n return False", "def _tcp_frag_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n dict_check(var, func=func)\n bufid = var.get('bufid')\n ip_check(bufid[0], bufid[1], func=func)\n bytearray_check(var.get('payload'), func=func)\n bool_check(var.get('syn'), var.get('fin'), func=func)\n int_check(bufid[2], bufid[3], var.get('num'), var.get('ack'), var.get('dsn'),\n var.get('first'), var.get('last'), var.get('len'), func=func)", "def test_frag_1(self):\n self.vapi.cli(\"clear errors\")\n malformed_packets = [\n (\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IP(\n id=7,\n len=21,\n flags=\"MF\",\n frag=0,\n ttl=64,\n src=self.src_if.remote_ip4,\n dst=self.dst_if.remote_ip4,\n )\n / ICMP(type=\"echo-request\")\n ),\n (\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IP(\n id=7,\n len=21,\n frag=1,\n ttl=64,\n src=self.src_if.remote_ip4,\n dst=self.dst_if.remote_ip4,\n )\n / Raw(load=b\"\\x08\")\n ),\n ]\n\n p = (\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IP(id=1000, src=self.src_if.remote_ip4, dst=self.dst_if.remote_ip4)\n / UDP(sport=1234, dport=5678)\n / Raw(b\"X\" * 1000)\n )\n valid_fragments = fragment_rfc791(p, 400)\n\n self.pg_enable_capture()\n self.src_if.add_stream(malformed_packets + valid_fragments)\n self.pg_start()\n\n self.dst_if.get_capture(1)\n\n self.assert_packet_counter_equal(\"ip4-full-reassembly-feature\", 1)\n # TODO remove above, uncomment below once clearing of counters\n # is supported\n # self.assert_packet_counter_equal(\n # \"/err/ip4-full-reassembly-feature/reass_malformed_packet\", 1)", "def _ip_frag_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n dict_check(var, func=func)\n bufid = var.get('bufid')\n str_check(bufid[3], func=func)\n bool_check(var.get('mf'), func=func)\n ip_check(bufid[0], bufid[1], func=func)\n bytearray_check(var.get('header'), var.get('payload'), func=func)\n int_check(bufid[2], var.get('num'), var.get('fo'),\n var.get('ihl'), var.get('tl'), func=func)", "def test_bunch_of_fragments(self):\n pkt = (\n Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6)\n / ICMPv6EchoRequest()\n / Raw(\"X\" * 1600)\n )\n frags = fragment_rfc8200(pkt, 1, 400)\n self.send_and_expect(self.pg0, frags, self.pg0, n_rx=1)\n\n inc_frag = (\n Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6)\n / IPv6ExtHdrFragment(id=1, nh=58, offset=608)\n / Raw(\"X\" * 308)\n )\n\n self.send_and_assert_no_replies(self.pg0, inc_frag * 604)\n\n pkt = (\n Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6)\n / IPv6ExtHdrFragment(id=1)\n / ICMPv6EchoRequest()\n )\n rx = self.send_and_expect(self.pg0, [pkt], self.pg0)\n self.assertNotIn(IPv6ExtHdrFragment, rx)", "def frag_check(*args, protocol, func=None):\n func = func or inspect.stack()[2][3]\n if 'IP' in protocol:\n _ip_frag_check(*args, func=func)\n elif 'TCP' in protocol:\n _tcp_frag_check(*args, func=func)\n else:\n raise FragmentError('Unknown fragmented protocol {}.'.format(protocol))", "def test_fragment(self):\n # Test that an unknown arg results in the return of the raw value in\n # an appropriatly formatted span\n\n # Generic call results in the span\n result = fragment(\"foo\", arg=\"bar\", autoescape=True)\n expected = u'<span class=\"bar\">foo</span>'\n self.assertEquals(expected, result) \n\n # multiple classes in the arg result in the correct class\n result = fragment(\"foo\", arg=\"bar baz\", autoescape=True)\n expected = u'<span class=\"bar baz\">foo</span>'\n self.assertEquals(expected, result) \n\n # override the formatting of date-time data\n dt = datetime.datetime.today()\n result = fragment(dt, arg=\"dtstart %a %b %d %Y\", autoescape=True)\n expected = u'<abbr class=\"dtstart\" title=\"%s\">%s</abbr>' % (\n dt.isoformat(),\n dt.strftime('%a %b %d %Y')\n )\n self.assertEquals(expected, result)\n result = fragment(dt, arg=\"dtstart right now\", autoescape=True)\n expected = u'<abbr class=\"dtstart\" title=\"%s\">right now</abbr>' % (\n dt.isoformat(),\n )\n self.assertEquals(expected, result)\n result = fragment(dt, arg=\"dtstart\", autoescape=True)\n expected = u'<abbr class=\"dtstart\" title=\"%s\">%s</abbr>' % (\n dt.isoformat(),\n dt.strftime('%c')\n )\n self.assertEquals(expected, result)\n\n # Check for geo related abbr pattern\n result = fragment(37.408183, arg=\"latitude\", autoescape=True)\n expected = u'<abbr class=\"latitude\" title=\"37.408183\">37.408183</abbr>'\n self.assertEquals(expected, result)\n\n result = fragment(37.408183, arg=\"lat\", autoescape=True)\n self.assertEquals(expected, result)\n\n result = fragment(-122.13855, arg=\"longitude\", autoescape=True)\n expected = u'<abbr class=\"longitude\" title=\"-122.13855\">-122.13855</abbr>'\n self.assertEquals(expected, result)\n\n result = fragment(-122.13855, arg=\"long\", autoescape=True)\n self.assertEquals(expected, result)\n\n # Check for email address anchor element (this depends on the value\n # of the field *NOT* the name of the class passed as an arg)\n result = fragment('[email protected]', arg='foo', autoescape=True)\n expected = u'<a class=\"foo\" href=\"mailto:[email protected]\">[email protected]</a>'\n self.assertEquals(expected, result)\n\n # Check for URL anchor element (works in the same way as email but\n # with a different regex)\n result = fragment('http://foo.com', arg='bar', autoescape=True)\n expected = u'<a class=\"bar\" href=\"http://foo.com\">http://foo.com</a>'\n self.assertEquals(expected, result)\n\n # Lets make sure we can handle ints and floats\n result = fragment(1.234, arg='foo', autoescape=True)\n expected = u'<span class=\"foo\">1.234</span>'\n self.assertEquals(expected, result)\n\n result = fragment(1234, arg='foo', autoescape=True)\n expected = u'<span class=\"foo\">1234</span>'\n self.assertEquals(expected, result)", "def test_fail_signature_invalid(self):\n self.bundle[5].signature_message_fragment[:-1] = b'9'\n\n validator = BundleValidator(self.bundle)\n\n self.assertFalse(validator.is_valid())\n\n self.assertListEqual(\n validator.errors,\n\n [\n # Transaction 5's fragment is invalid, but the validator has no\n # way to determine this, so it just assumes the entire input is\n # invalid (the first transaction for this input is at index 4).\n 'Transaction 4 has invalid signature (using 3 fragments).',\n ],\n )", "def assert_xblocks_are_good(self, block):\r\n scope_ids = block.scope_ids\r\n self.assertIsNotNone(scope_ids.usage_id)\r\n self.assertIsNotNone(scope_ids.def_id)\r\n\r\n for child_id in block.children:\r\n child = block.runtime.get_block(child_id)\r\n self.assert_xblocks_are_good(child)", "def testCheckMoveOperation_FailStagnantBlocks(self):\n payload_checker = checker.PayloadChecker(self.MockPayload())\n op = update_metadata_pb2.InstallOperation()\n op.type = common.OpType.MOVE\n\n self.AddToMessage(op.src_extents,\n self.NewExtentList((1, 4), (12, 2), (1024, 128)))\n self.AddToMessage(op.dst_extents,\n self.NewExtentList((8, 128), (512, 6)))\n self.assertRaises(\n PayloadError, payload_checker._CheckMoveOperation,\n op, None, 134, 134, 'foo')", "def test_minimum_args(self) -> None:\n schema = JSONSchema()\n self.assertIsInstance(schema.schema, str)\n self.assertIsNone(schema.title)\n self.assertIsNone(schema.description)", "def test_invalid_frag_size(self):\n p = (\n Ether(dst=self.src_if.local_mac, src=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.src_if.local_ip6)\n / UDP(sport=1234, dport=5678)\n / Raw()\n )\n self.extend_packet(p, 1000, self.padding)\n fragments = fragment_rfc8200(p, 1, 500)\n bad_fragment = fragments[0]\n self.extend_packet(bad_fragment, len(bad_fragment) + 5)\n self.pg_enable_capture()\n self.src_if.add_stream([bad_fragment])\n self.pg_start()\n pkts = self.src_if.get_capture(expected_count=1)\n icmp = pkts[0]\n self.assertIn(ICMPv6ParamProblem, icmp)\n self.assert_equal(icmp[ICMPv6ParamProblem].code, 0, \"ICMP code\")", "def test_one_fragment(self):\n pkt = (\n Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6)\n / ICMPv6EchoRequest()\n / Raw(\"X\" * 1600)\n )\n frags = fragment_rfc8200(pkt, 1, 400)\n\n # send a fragment with known id\n self.send_and_assert_no_replies(self.pg0, [frags[0]])\n\n # send an atomic fragment with same id - should be reassembled\n pkt = (\n Ether(src=self.pg0.local_mac, dst=self.pg0.remote_mac)\n / IPv6(src=self.pg0.remote_ip6, dst=self.pg0.local_ip6)\n / IPv6ExtHdrFragment(id=1)\n / ICMPv6EchoRequest()\n )\n rx = self.send_and_expect(self.pg0, [pkt], self.pg0)\n self.assertNotIn(IPv6ExtHdrFragment, rx)\n\n # now finish the original reassembly, this should still be possible\n rx = self.send_and_expect(self.pg0, frags[1:], self.pg0, n_rx=1)\n self.assertNotIn(IPv6ExtHdrFragment, rx)", "def DontFragment(self) -> bool:", "def DontFragment(self) -> bool:", "def test_valid_balance_genesis(self):\n db = MockDatabase()\n prev = TestBlock(block_type=BlockTypes.CHECKPOINT, transaction={'balance': 0})\n result, errors = prev.validate_transaction(db)\n self.assertEqual(result, ValidationResult.valid)\n self.assertEqual(errors, [])\n db.add_block(prev)", "def testCheckReplaceBzOperation(self):\n payload_checker = checker.PayloadChecker(self.MockPayload())\n block_size = payload_checker.block_size\n data_length = block_size * 3\n\n op = self.mox.CreateMock(\n update_metadata_pb2.InstallOperation)\n op.type = common.OpType.REPLACE_BZ\n\n # Pass.\n op.src_extents = []\n self.assertIsNone(\n payload_checker._CheckReplaceOperation(\n op, data_length, (data_length + block_size - 1) / block_size + 5,\n 'foo'))\n\n # Fail, src extents founds.\n op.src_extents = ['bar']\n self.assertRaises(\n PayloadError, payload_checker._CheckReplaceOperation,\n op, data_length, (data_length + block_size - 1) / block_size + 5, 'foo')\n\n # Fail, missing data.\n op.src_extents = []\n self.assertRaises(\n PayloadError, payload_checker._CheckReplaceOperation,\n op, None, (data_length + block_size - 1) / block_size, 'foo')\n\n # Fail, too few blocks to justify BZ.\n op.src_extents = []\n self.assertRaises(\n PayloadError, payload_checker._CheckReplaceOperation,\n op, data_length, (data_length + block_size - 1) / block_size, 'foo')", "def test_noArgs(self):\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([])\n\n self.assertEqual(\n e.exception.args, (\"Must specify one argument: the Twisted checkout\",)\n )", "def test_bunch_of_fragments(self):\n pkt = (\n Ether(src=self.src_if.local_mac, dst=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / ICMPv6EchoRequest()\n / Raw(\"X\" * 1600)\n )\n frags = fragment_rfc8200(pkt, 1, 400)\n rx = self.send_and_expect(self.src_if, frags, self.dst_if)\n\n rogue = (\n Ether(src=self.src_if.local_mac, dst=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / IPv6ExtHdrFragment(id=1, nh=58, offset=608)\n / Raw(\"X\" * 308)\n )\n\n self.send_and_expect(self.src_if, rogue * 604, self.dst_if)\n\n pkt = (\n Ether(src=self.src_if.local_mac, dst=self.src_if.remote_mac)\n / IPv6(src=self.src_if.remote_ip6, dst=self.dst_if.remote_ip6)\n / IPv6ExtHdrFragment(id=1)\n / ICMPv6EchoRequest()\n )\n rx = self.send_and_expect(self.src_if, [pkt], self.dst_if)", "def test_vargs(self):", "def run_fragment(args):\n if args.shift > args.read_len:\n warnings.warn(\"Shift (\" + str(args.shift) + \") is larger than read length (\" + str(args.read_len) +\n \")!\")\n frag_genomes(args)", "def test_12(self):\n assert 'False' == Api.requestBlock('test-12')", "def test_42(self):\n assert 'True' == Api.requestBlock('test-42')", "def _find_verify_arguments(filters):\n if (\"minsize\" in filters and \"maxsize\" in filters and\n filters[\"maxsize\"] < filters[\"minsize\"]):\n exit_with_error(\"Maximum size cannot be less than minimum size.\")\n if (\"size\" in filters and \"maxsize\" in filters and\n filters[\"maxsize\"] < filters[\"size\"]):\n exit_with_error(\"Maximum size cannot be less than (exact) size.\")\n if (\"size\" in filters and \"minsize\" in filters and\n filters[\"minsize\"] > filters[\"size\"]):\n exit_with_error(\"Minimum size cannot be more than (exact) size.\")" ]
[ "0.60589397", "0.5724343", "0.5696273", "0.5563291", "0.5479956", "0.54702187", "0.5453022", "0.5393938", "0.5370606", "0.53701276", "0.53657234", "0.5336989", "0.5317501", "0.5313356", "0.5251449", "0.52381754", "0.52079916", "0.5144766", "0.51337266", "0.51300526", "0.51300526", "0.5104098", "0.50973946", "0.5096623", "0.5073873", "0.50609046", "0.50552857", "0.5042042", "0.5022975", "0.50005513" ]
0.6328368
0
Assert that bust_fragments applies a list properly.
def test_list(self): bust_fragments(self.resp, ['/foo/bar', '/zip/zap']) self.assert_header_set('["/foo/bar", "/zip/zap"]')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_list(self):\n pass", "def test_list(self):\n pass", "def test_list_field():", "def verify_response_block_list(self, response):\n self.assertSetEqual(\n {block['id'] for block in response.data},\n self.non_orphaned_block_usage_keys,\n )", "def test_POST_list(self):\n\t\t# cleaner's lists should originally be empty\n\t\tdata = self.GET_data('/api/cleaner/' + self.cleaner['_id'])\n\t\tself.assertEqual([], data['lists'])\n\n\t\t# after posting list, cleaner's lists should contain just id of posted list\n\t\tself.POST_list()\n\t\tdata = self.GET_data('/api/cleaner/' + self.cleaner['_id'])\n\t\tself.assertEqual(1, len(data['lists']))\n\t\tself.assertEqual(self.list_id, data['lists'][0])", "def check_for_list(check):", "def test_merge_list_empty(short_ll, empty_ll):\n assert ml(short_ll, empty_ll) == 8\n assert len(short_ll) == 4", "def test_list_identity(self):\n pass", "def test_list_format(self) -> None:\n r = self.perform_request('list', False)\n self.assert_json_schema(r.json(), self.get_list_schema())", "def test_post_order_list(self):\n _expected_list = [13, 5, 103, 57, 23]\n\n _output_list = []\n\n # Call post_order_list to test\n post_order_list(self.root, _output_list)\n\n # We just want to test the values\n # so make a list from the list of objects\n _post_order_output = [x.get_value() for x in _output_list]\n\n assert len(_expected_list) == len(_output_list)\n assert _expected_list == _post_order_output", "def test_list(self):\n self.assertValue(\n ['foo', 'bar', 'hello'],\n 'foo\\nbar\\nhello\\n')", "def test_listfield(self):\n self.assertEqual(self.scraped.urls, ['http://google.com', 'http://apple.com'])\n self.assertEqual(self.scraped.in_divs, ['Nested'])", "def testReplaceWithList(self):\n\n # Bypass setter\n self.node._desc = [\n 'first description',\n 'second description',\n 'third description'\n ]\n\n self.node.desc = [\n 'forth description',\n 'fifth description',\n 'sixth description'\n ]\n\n self.assertEqual(\n [\n 'forth description',\n 'fifth description',\n 'sixth description'\n ],\n self.node.desc\n )", "def _compare_list(self, name, actual, expect):\n self.op_test.assertListEqual(\n actual.recursive_sequence_lengths(),\n expect[1],\n \"Output (\" + name + \") has different lod at \" + str(place),\n )", "def test_list_differences():\n mock_list_a = ['a', 'b', 'c', 'd', 'e']\n mock_list_b = ['a', 'b', 'c']\n output = sh.list_differences(mock_list_a, mock_list_b)\n assert output == ['d', 'e']\n output = sh.list_differences(mock_list_b, mock_list_a)\n assert output == []", "def test_empty_list_error(self):\n with self.assertRaises(ValueError):\n function_inclusion_filter_builder([])", "def _list_assert(actual_list, expected_list):\n for actual, expected in itertools.izip_longest(actual_list, expected_list):\n _value_assert(None, actual, expected)", "def test_serialize_list():\n assert bytes([\n *UnsignedInt.to_bytes(3), # Number of values\n *String.to_bytes(\"Hello, world!\"),\n *String.to_bytes(\"This is the middle value.\"),\n *String.to_bytes(\"Goodbye, world!\")\n ]) == bytes(List(String).to_bytes([\n \"Hello, world!\",\n \"This is the middle value.\",\n \"Goodbye, world!\",\n ]))", "def test_list_group(self):\n pass", "def test_list(self, items: list) -> None:\r\n if not isinstance(items, list):\r\n raise ValueError(f'Expected list, but received {type(items)}')\r\n for item in items:\r\n if isinstance(item, dict):\r\n self.test_dict(dictionary=item)\r\n elif isinstance(item, list):\r\n self.test_list(items=item)", "def test_serializer_dump_load(lists_of_things):\n ids = [_dump(thing) for thing in lists_of_things]\n actual = [list(_load(id)) for id in ids]\n assert lists_of_things == actual", "def test_string_list(self):\n \n self.assertListEqual(\n [\n [5, 3, 1],\n [3, 5],\n [20, 13, 0]\n ],\n [\n maps.map_list(['hello', 'why', 'y']),\n maps.map_list(['yes', 'enjoy']),\n maps.map_list(['15236487921068952470', 'commemoration', ''])\n ])", "def test_keep_list(self):\n input_item = self.item_class(name=[\"foo\", \"bar\"])\n il = ItemLoader(item=input_item)\n loaded_item = il.load_item()\n self.assertIsInstance(loaded_item, self.item_class)\n self.assertEqual(ItemAdapter(loaded_item).asdict(), {\"name\": [\"foo\", \"bar\"]})", "def test_list_compositions(self):\n pass", "def test_compact_list(self):\n\n\t\tself.n = tracker.peer_list([(\"test1\", \"100.100.100.100\", \\\n\t\t\t\"1000\")], True)\n\t\tself.assertEqual(self.n, \"dddd\\x03\\xe8\")", "def testRaisesErrorFragmentList(self):\n with self.assertRaises(IOError):\n self.tree.get_fragment_list()", "def test_store_list(self):\n array = [1,2,3,'a','b',{'c':4, 'e':['t', 'y']}]\n self._test_storable(array)", "def test_setlist(self):\n self.assertEqual(self.show.setlist, [])", "def test_list_ordering(self) -> None:\n list1 = List.objects.create()\n item1 = Item.objects.create(list=list1, text=\"i1\")\n item2 = Item.objects.create(list=list1, text=\"item 2\")\n item3 = Item.objects.create(list=list1, text=\"3\")\n self.assertEqual(list(Item.objects.all()), [item1, item2, item3])", "def test_for_empty_list(self):\n emptylist = []\n self.assertEqual(self.place.amenity_ids, emptylist)" ]
[ "0.58295023", "0.58295023", "0.581594", "0.5727991", "0.56551546", "0.5616029", "0.5605433", "0.5574228", "0.54922616", "0.5477983", "0.5477703", "0.5476501", "0.5475215", "0.5468897", "0.5465023", "0.5427141", "0.5424886", "0.54224586", "0.5414302", "0.5377393", "0.53766316", "0.53679156", "0.53361785", "0.53318524", "0.5314581", "0.5312579", "0.53023386", "0.53017306", "0.5299257", "0.52663" ]
0.6258605
0
Terms is a list of dictionaries representing a disjunction (or) of conjunctions (and). Alternatively, you can specify kwargs only, resulting in a single term.
def __init__(self, *terms, **kwargs): self.missing = kwargs.pop('_key_missing_', False) if terms and kwargs: raise ValueError("You must specify terms or kwargs, not both") self.terms = [] for t in terms: self.add_term(t) self.add_term(kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def terms(self) -> Tuple[Term, ...]:\n ...", "def build_search_terms(self, kwds):\n combined_keywords = \"\"\n for i in range(len(kwds)):\n if i != len(kwds)-1:\n combined_keywords += kwds[i] + ' OR '\n else:\n combined_keywords += kwds[i] + ' '\n\n return combined_keywords", "def any_term(cls, *terms: str) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"anyTerm\", [*terms])", "def terms(self, filters={}):\n return self.__get_list_client(Term)(filters=filters)", "def combine_searchterms(terms):\r\n combined = {}\r\n\r\n for (name,val) in terms:\r\n combined[name] = combined.get(name,[]) + [val]\r\n\r\n ret = []\r\n\r\n for (name,vals) in combined.iteritems():\r\n if len(vals) == 1:\r\n ret.append(\"%s:%s\" % (name,vals[0]))\r\n else:\r\n ret.append(\"%s:(%s)\" % (name,\" \".join(vals)))\r\n\r\n if len(ret) > 1:\r\n ret = \"(%s)\" % \" OR \".join(ret)\r\n else:\r\n ret = \" \".join(ret)\r\n\r\n return ret", "def terms(self, terms):\n\n self._terms = terms", "def all_terms(cls, *terms: str) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"allTerms\", [*terms])", "def term_restrictions(data):\n\n term = [\"1st\", \"2nd\", \"3rd\", \"1ST\", \"2ND\", \"3RD\"]\n if data not in term:\n return False\n return True", "def parse_search_terms(self, search_terms):\n # v. kludgish.. TODO: do this a little more gracefully\n self.search_terms, b = [], []\n for index, st in enumerate(search_terms):\n if st == \"OR\":\n self.search_terms.append(b)\n b = []\n else:\n b.append(st)\n\n if index+1 == len(search_terms):\n self.search_terms.append(b)", "def suggest_terms(self, fields, prefix, handler=\"terms\", **kwargs):\n params = {\"terms.fl\": fields, \"terms.prefix\": prefix}\n params.update(kwargs)\n response = self._suggest_terms(params, handler=handler)\n result = self.decoder.decode(response)\n terms = result.get(\"terms\", {})\n res = {}\n\n # in Solr 1.x the value of terms is list of elements with the field name\n # and a flat list of value, count pairs:\n # [\"field_name\", [\"dance\", 23, \"dancers\", 10, …]]\n #\n # in Solr 3+ the value of terms is a dict of field name and a flat list of\n # value, count pairs: {\"field_name\": [\"dance\", 23, \"dancers\", 10, …]}\n if isinstance(terms, (list, tuple)):\n terms = dict(zip(terms[0::2], terms[1::2]))\n\n for field, values in terms.items():\n tmp = []\n\n while values:\n tmp.append((values.pop(0), values.pop(0)))\n\n res[field] = tmp\n\n self.log.debug(\n \"Found '%d' Term suggestions results.\", sum(len(j) for i, j in res.items())\n )\n return res", "def find(self, search_terms, _keywords=None):\n objects = super().get_queryset().order_by(\"name\")\n term_query = Q()\n for t in search_terms:\n term_query.add(Q(name__iexact=t), Q.OR)\n term_query.add(Q(search_tokens__icontains=t), Q.OR)\n return objects.filter(term_query)", "def __init__(self, **terms):\n self.terms = []\n for op, v in terms.iteritems():\n # use '_xx' to differentiate terms with same operator\n op = op.split('_')[0]\n if op == 'search':\n val = RegExp(v)\n elif op == 'match':\n val = RegExp(v, match=True)\n else:\n val = v\n try:\n op = self.operators[op]\n except KeyError:\n raise UnknownOperatorError(\"Operator '%s'\" % op)\n self.terms.append((op, val))", "def _do_conjunction(self, _and=(\"and\", \"e\", \"en\", \"et\", \"und\", \"y\")):\n w = self.words\n if len(w) > 2 and w[-2].type == \"CC\" and w[-2].chunk is None:\n cc = w[-2].string.lower() in _and and AND or OR\n ch1 = w[-3].chunk\n ch2 = w[-1].chunk\n if ch1 is not None and \\\n ch2 is not None:\n ch1.conjunctions.append(ch2, cc)\n ch2.conjunctions.append(ch1, cc)", "def terms(self) -> CustomTerms:\n enforce(self.is_set(\"terms\"), \"'terms' content is not set.\")\n return cast(CustomTerms, self.get(\"terms\"))", "def or_keywords(self, ored):\n self._orKw = ored", "def terms(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"terms\")", "def __init__(self, terms, *interfaces):\n self.by_value = {}\n self.by_token = {}\n self._terms = []\n for term in terms:\n if term.value in self.by_value:\n raise ValueError(\n 'term values must be unique: %s' % repr(term.value))\n if term.token in self.by_token:\n raise ValueError(\n 'term tokens must be unique: %s' % repr(term.token))\n self.by_value[term.value] = term\n self.by_token[term.token] = term\n self._terms.append(term)\n if interfaces:\n directlyProvides(self, *interfaces)", "def advanced_search(self, terms, relation=None, index=0, limit=25, **kwargs):\n if not isinstance(terms, dict):\n raise TypeError(\"terms must be a dict\")\n # terms are sorted (for consistent tests between Python < 3.7 and >= 3.7)\n query = \" \".join(sorted(f'{k}:\"{v}\"' for (k, v) in terms.items()))\n return self.get_object(\n \"search\", relation=relation, q=query, index=index, limit=limit, **kwargs\n )", "def findTerms(self, text, terms, scope=50, includeAll=True):\n\t\tlistOfResults = list()\n\t\tlistOfMatchesMain = list()\n\t\tlistOfMatchesSecondary = list()\n\n\t\tappend = listOfResults.append\n\t\treplace\t= str.replace\n\n\t\tkeywordIndices = self.find(text, terms[0])\n\n\t\t# loop through the indices and check for dependencies if terms list has more than 1 term\n\t\tfor indices in keywordIndices:\n\n\t\t\tleading = text[indices[0]-scope:indices[0]]\n\t\t\ttrailing = text[indices[0]:indices[0]+scope]\n\n\t\t\tleading = replace(replace(leading, '\\n', '_'), '\\t', ' ') \n\t\t\ttrailing = replace(replace(trailing, '\\n', '_'), '\\t', ' ') \n\n\t\t\t# if terms list has more than 1 term (i.e., contextual terms), see if present within scope\n\t\t\tif len(terms) > 1:\n\n\t\t\t\t# loop through the contextual terms and check for presence within scope\n\t\t\t\tfor term in terms[1:]:\n\n\t\t\t\t\t# if term in either leading or trailing\n\t\t\t\t\tif (replace(term, '*', '') in leading.lower()) or (replace(term, '*', '') in trailing.lower()):\n\n\t\t\t\t\t\t# if '*' in term, do not add this context\n\t\t\t\t\t\tif '*' in term:\n\t\t\t\t\t\t\tpass\n\n\t\t\t\t\t\t# if '*' not indicated, add this context\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t'+term)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tappend(excerpt)\n\n\t\t\t# if terms list has 1 term, just append the excerpt\n\t\t\telse:\n\n\t\t\t\texcerpt = leading + trailing\n\n\t\t\t\tif excerpt not in listOfResults:\n\t\t\t\t\tif includeAll==True:\n\t\t\t\t\t\tappend(excerpt+'\\t'+text[indices[0]:indices[1]]+'\\t')\n\t\t\t\t\telse:\n\t\t\t\t\t\tappend(excerpt)\n\n\t\treturn listOfResults", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def tag_terms(text, terms, nlp=None):\n from spacy.lang.en.stop_words import STOP_WORDS\n spacy.tokens.token.Token.set_extension('workaround', default='', force=True)\n \n HEURISTIC_TOKENS = [\"-\", \"plant\", \"substance\", \"atom\"]\n \n # default to Stanford NLP pipeline wrapped in Spacy\n if nlp is None:\n snlp = stanfordnlp.Pipeline(lang=\"en\")\n nlp = StanfordNLPLanguage(snlp)\n \n # preprocess with spacy if needed\n if type(terms[0]) != spacy.tokens.doc.Doc:\n terms = [nlp(term) for term in terms]\n if (type(text) != spacy.tokens.doc.Doc and type(text) != spacy.tokens.span.Span):\n text = nlp(text)\n \n # set up a custom representation of the text where we can add term type annotations\n for token in text:\n token._.workaround = token.text_with_ws\n\n lemmatized_text = [token.lemma_ for token in text]\n tokenized_text = [token.text for token in text]\n tags = ['O'] * len(text)\n found_terms = defaultdict(lambda: {\"text\": [], \"indices\": [], \"pos\": [], \"type\": []})\n \n # iterate through terms from longest to shortest\n terms = sorted(terms, key=len)[::-1]\n for spacy_term in terms:\n term_length = len(spacy_term)\n lemma_term_list = [token.lemma_ for token in spacy_term]\n text_term_list = [token.text for token in spacy_term]\n term_lemma = \" \".join(lemma_term_list)\n \n # skip short acronyms that can cause problems\n if len(term_lemma) <= 2:\n continue\n \n # additional check to check for simple plural of uncommon biology terms\n match_uncommon_plural = lemma_term_list.copy()\n match_uncommon_plural[-1] = match_uncommon_plural[-1] + \"s\"\n\n # additional check using heuristics on lemmatized version\n match_heuristic = []\n if lemma_term_list[0] not in HEURISTIC_TOKENS:\n for token in lemma_term_list:\n if token not in HEURISTIC_TOKENS:\n match_heuristic += token.split(\"-\")\n heuristic_length = len(match_heuristic)\n else:\n heuristic_term = lemma_term_list\n heuristic_length = len(lemma_term_list)\n \n for ix in range(len(text) - term_length):\n \n heuristic_match = (lemmatized_text[ix:ix + heuristic_length] == match_heuristic)\n plural_match = (lemmatized_text[ix:ix + term_length] == match_uncommon_plural)\n lemma_match = (lemmatized_text[ix:ix + term_length] == lemma_term_list)\n text_match = (tokenized_text[ix:ix + term_length] == text_term_list)\n lower_match = ([t.lower() for t in tokenized_text[ix:ix + term_length]] ==\n [t.lower() for t in text_term_list])\n \n # Only match on text if lemmatized version is a stop word (i.e. lower casing acronym)\n if term_lemma in STOP_WORDS:\n valid_match = text_match\n else:\n valid_match = heuristic_match or plural_match or text_match or lemma_match or lower_match\n \n if valid_match:\n \n if heuristic_match and not lemma_match:\n match_length = heuristic_length\n else:\n match_length = term_length\n \n term_text = \" \".join([t.text for t in text[ix:ix + match_length]])\n term_tag = \" \".join([t.tag_ for t in text[ix:ix + match_length]])\n \n # only tag term if not part of larger term\n if tags[ix:ix + match_length] == [\"O\"] * match_length:\n \n # classify term type\n term_type = determine_term_type(spacy_term)\n \n # collect term information\n found_terms[term_lemma][\"text\"].append(term_text)\n found_terms[term_lemma][\"indices\"].append((ix, ix + match_length))\n found_terms[term_lemma][\"pos\"].append(term_tag)\n found_terms[term_lemma][\"type\"].append(term_type)\n \n # update sentence tags\n tags = tag_bioes(tags, ix, match_length)\n \n # annotate token representations with term type\n text[ix]._.workaround = f\"<{term_type}>\" + text[ix]._.workaround\n end_ix = ix + match_length - 1\n if text[end_ix]._.workaround.endswith(\" \"):\n text[end_ix]._.workaround = text[end_ix]._.workaround[:-1] + f\"</{term_type}> \"\n else:\n text[end_ix]._.workaround += f\"</{term_type}>\"\n \n # reconstruct fully annotated input text\n annotated_text = \"\"\n for token in text:\n annotated_text += token._.workaround\n \n return {\n \"tokenized_text\": tokenized_text, \n \"tags\": tags, \n \"annotated_text\": annotated_text,\n \"found_terms\": dict(found_terms)\n }", "def __init__(self, *terms, op=None):\n\n if not op:\n assert len(terms) == 1\n\n # assign parameters\n self.op = op\n self.terms = terms" ]
[ "0.63574696", "0.62927365", "0.6289394", "0.6186162", "0.6161904", "0.6108699", "0.60665464", "0.58098257", "0.5766303", "0.57403225", "0.5638852", "0.5626199", "0.5621384", "0.55921006", "0.55164593", "0.55002266", "0.54936963", "0.54462075", "0.54355145", "0.54318094", "0.54318094", "0.54318094", "0.54318094", "0.54318094", "0.54318094", "0.54318094", "0.54318094", "0.54318094", "0.5399798", "0.5385666" ]
0.671826
0
Method that returns the error calculated for the cost function which is equal to the crossentropy error + L2 Regularization penalty term.
def cost(self, x, y): return self.cross_entropy_error(x,y) + self.l2_regularization_penalty()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n\n return cost + tf.losses.get_regularization_losses()", "def _get_l2_reg(self) -> torch.Tensor:\n loss = 0\n for param in self.model.parameters():\n loss += (param ** 2).sum()\n return loss", "def build_loss(self):\n\n opt = tf.train.AdamOptimizer(self.learning_rate)\n mse = tf.losses.mean_squared_error(self.label[-1], self.outputs[-1])\n loss = tf.losses.get_total_loss()\n\n return mse, loss", "def __error(self, R, P, Q, K, beta):\n e = 0\n for i in xrange(len(R)):\n for j in xrange(len(R[i])):\n if R[i][j] > 0:\n\n # loss function error sum( (y-y_hat)^2 )\n e = e + pow(R[i][j]-numpy.dot(P[i,:],Q[:,j]), 2)\n\n # add regularization\n for k in xrange(K):\n\n # error + ||P||^2 + ||Q||^2\n e = e + (beta/2) * ( pow(P[i][k], 2) + pow(Q[k][j], 2) )\n return e", "def _create_loss_op(self):\n # 1.) The reconstruction loss, which forces the NN towards reconstructing more accurately the\n # given input. This function is configurable, but usually it is the Bernoulli negative log-likelihood.\n if self.cost_function == 'abs':\n reconstr_loss = tf.reduce_sum(tf.abs(self.x_decoded - self.x_in), 1)\n elif self.cost_function in ('mse', 'l2', 'square'):\n reconstr_loss = tf.reduce_sum(tf.squared_difference(self.x_in, self.x_decoded), 1)\n elif self.cost_function in ('xentropy', 'log'):\n reconstr_loss = \\\n -tf.reduce_sum(self.x_in * tf.log(1e-10 + self.x_decoded)\n + (1 - self.x_in) * tf.log(1e-10 + 1 - self.x_decoded),\n 1)\n else:\n raise ValueError(self.cost_function, \"Unknown cost function name!\")\n\n # 2.) The latent loss, which is defined as the Kullback Leibler divergence\n ## between the distribution in latent space induced by the encoder on\n # the data and some prior. This acts as a kind of regularizer.\n # This can be interpreted as the number of \"nats\" required\n # for transmitting the the latent space distribution given\n # the prior.\n latent_loss = -0.5 * tf.reduce_sum(1. + self.z_log_sigma_sq\n - tf.square(self.z_mean)\n - tf.exp(self.z_log_sigma_sq), 1)\n\n self.loss_op = tf.reduce_mean(reconstr_loss + latent_loss) # average over batch\n tf.add_to_collection(\"losses\", self.loss_op)\n\n if self.learning_rate is not None:\n global_step = tf.train.get_or_create_global_step()\n self.train_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(\n self.loss_op,\n global_step=global_step,\n var_list=tf.get_collection(self.training_scope) if self.training_scope is not None else None)\n\n tf.add_to_collection(\"train_ops\", self.train_op)\n tf_logging.info(\"Added AdamOptimizer with learning rate: %.8f\" % self.learning_rate)\n\n tf.summary.scalar(\"latent_loss\", tf.reduce_mean(latent_loss))\n tf.summary.scalar(\"reconstruction_loss\", tf.reduce_mean(reconstr_loss))\n tf.summary.scalar(\"vae_loss\", self.loss_op)", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)", "def l2_loss(predictions, real_values):\n with tf.variable_scope('loss'):\n # 1/2n \\sum^{n}_{i=i}{(x_i - x'_i)^2}\n mse = tf.div(tf.reduce_mean(\n tf.square(tf.subtract(predictions, real_values))), 2, name=\"mse\")\n tf.add_to_collection('losses', mse)\n \n # mse + weight_decay per layer\n error = tf.add_n(tf.get_collection('losses'), name='total_loss')\n\n return error", "def get_lr_cost(self):\n\n\t\tlabels = self.get_symbolic_expected_rewards()\n\n\t\treturn -theano.tensor.mean(\n\t\t\ttheano.tensor.log(labels)[\n\t\t\t\ttheano.tensor.arange(self.symbolic_output.shape[0]),\n\t\t\t\tself.symbolic_output])", "def compute_cost(A2, Y, parameters):\n\n\tm = Y.shape[1] # number of example\n\n\t# Compute the cross-entropy cost\n\tlogprobs = np.multiply(np.log(A2), Y)\n\tcost = -(1/m)*(np.sum((logprobs) + np.multiply(np.log(1-A2), 1-Y)))\n\n\tcost = np.squeeze(cost) # makes sure cost is the dimension we expect. \n\t \t# E.g., turns [[17]] into 17 \n\tassert(isinstance(cost, float))\n\n\treturn cost", "def max_error(y_true, y_pred):\n ...", "def get_cost(self, y_enc, output, w1, w2):\n cost = - np.sum(y_enc*np.log(output))\n # add the L2 regularization by taking the L2-norm of the weights and multiplying it with our constant.\n l2_term = (self.l2/2.0) * (np.sum(np.square(w1[:, 1:])) + np.sum(np.square(w2[:, 1:])))\n cost = cost + l2_term\n return cost/y_enc.shape[1]", "def cost_func(w, X, y):\n y_pred = np.dot(X, w)\n err = np.sum(np.square(y_pred - y)) / (2 * len(y))\n\n return err", "def rms_error(self, X, y) :\n ### ========== TODO : START ========== ###\n # part h: compute RMSE\n n, d = X.shape\n error = np.sqrt(self.cost(X,y)/n)\n ### ========== TODO : END ========== ###\n return error", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def squaredError(label, prediction):\n return (label-prediction)*(label-prediction)", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def l2_loss(obs, actual):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n return tf.reduce_sum(tf.square(obs - actual), 1)", "def calc_error_parameter(X, y, target, dimension): #change if more parameters\n\n pos_max = np.argmax(y)\n best_parameters = X[pos_max, 0:dimension]\n best_parameters = np.reshape(best_parameters, (-1, 1))\n\n l2_errors = (\n np.power(best_parameters[0, :] - target[0], 2) +\n np.power(best_parameters[1, :] - target[1], 2) +\n np.power(best_parameters[2, :] - target[2], 2))\n\n return l2_errors.tolist(), best_parameters.tolist()", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def get_cost(self):\n\n\t\tx = self.symbolic_input\n\t\ty = self.get_reconstructed_input()\n\n\t\tnegative_log_loss = -theano.tensor.sum(x*theano.tensor.log(y) +\n\t\t\t(1-x)*theano.tensor.log(1-y), axis=1)\n\n\t\tmean_loss = theano.tensor.mean(negative_log_loss)\n\n\t\treturn mean_loss", "def error(beta_0: float, beta_1: float, x_i: float, y_i: float) -> float:\n return predict(beta_0, beta_1, x_i) - y_i", "def cross_entropy_error(self, x, y):\n return -1 * sum([y[i] * np.log(self.logistic_function(self.weights.dot(x[i]))) + (1-y[i]) * np.log(1-self.logistic_function(self.weights.dot(x[i]))) for i in range(len(y))])", "def learningCurve(X, y, Xval, yval, Lambda):\n\n # Number of training examples\n m, _ = X.shape\n\n # You need to return these values correctly\n error_train = np.zeros(m)\n error_val = np.zeros(m)\n\n for i in range(m):\n theta = trainLinearReg(X[:i + 1], y[:i + 1], Lambda)\n error_train[i], _ = linearRegCostFunction(X[:i + 1], y[:i + 1], theta, 0)\n error_val[i], _ = linearRegCostFunction(Xval, yval, theta, 0)\n \n return error_train, error_val", "def l2_error(self, X=None, y=None) -> np.ndarray:\n return np.square(self.residuals(X, y))", "def calculate_td_error(self, old_state, new_state, reward):\n\n output = self.net(self.state_tensor_convert(old_state))\n target = self.gamma * self.net(self.state_tensor_convert(new_state)) + reward\n self.loss = self.net.loss(output,target)\n return float(target-output)" ]
[ "0.7168252", "0.7168252", "0.7168252", "0.7110235", "0.67353547", "0.6554512", "0.6539384", "0.6533157", "0.6518578", "0.6460937", "0.6451485", "0.6346614", "0.6325323", "0.63039505", "0.63011825", "0.6289162", "0.62576413", "0.624856", "0.62276816", "0.62257445", "0.62255144", "0.6212046", "0.6206781", "0.6136762", "0.6119961", "0.61102915", "0.6097761", "0.6084513", "0.60512114", "0.60423404" ]
0.75452334
0
Implementation of Gradient Descent Returns the best set of weights for a model
def gradient_descent(self, x, y): # Initialize weights vector self.weights = np.zeros(len(x[0])) # Storing number of training example in a variable n = len(x) # Initiate variables to keep track of the current and smallest loss recorded lowest_loss = sys.float_info.max current_loss = sys.float_info.max # Initiate variables to keep track of step sizes norm = sys.float_info.max smallest_norm = sys.float_info.max # Initiate list variable that stores all previous weights prev_weights = [] # Initiate list that stores all the errors. errors = [] # Variable to keep track of the number of iterations that returns a bigger loss than current loss k_loss_iteration = 1 # Learning loop for i in range(self.max_iter): # Append current weights prev_weights.append(np.array(self.weights)) # Minimizing Loss Function Error by adjusting weights using Gradient Descent self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights) # Compute the error of the Cost Function and store it in a list current_loss = self.cost(x,y) if len(errors) > 1 and current_loss > errors[-1]: k_loss_iteration += 1 else: k_loss_iteration = 1 errors.append(current_loss) # Track smallest loss if current_loss < lowest_loss: lowest_loss = current_loss # Compute the L2 Norm of the difference between current weights and previous weights norm = np.linalg.norm(self.weights - prev_weights[-1]) # Track smallest step size and set it as error threshold if norm < smallest_norm: smallest_norm = norm # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small if self.error_threshold != None and norm < self.error_threshold: print("Converged after {} iterations!".format(i)) break # stop if error hasn't gone down in k iterations if k_loss_iteration >= 10: print(k_loss_iteration + " iterations of loss not decreasing on {}th itertion.".format(i)) break # Log final weights print("Final norm: " + str(norm) + "\nSmallest step size recorded: " + str(smallest_norm) + "\nFinal error: " + str(current_loss) + "\nLowest error recorded: " + str(lowest_loss) + "\nNumber of epochs: " + str(len(errors)) + "\nFinal weights: " + str(self.weights))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w = w-gamma*grad\n return w, loss", "def train_gradient_descent(self, X, y, learning_rate=0.01, n_iters=100):\r\n # Step 0: Initialize the parameters\r\n n_samples, n_features = X.shape\r\n self.weights = np.zeros(shape=(n_features,1))\r\n self.bias = 0\r\n costs = []\r\n\r\n for i in range(n_iters):\r\n # Step 1: Compute a linear combination of the input features and weights\r\n y_predict = np.dot(X, self.weights) + self.bias\r\n\r\n # Step 2: Compute cost over training set\r\n cost = (1 / n_samples) * np.sum((y_predict - y)**2)\r\n costs.append(cost)\r\n\r\n if i % 100 == 0:\r\n print(f\"Cost at iteration {i}: {cost}\")\r\n\r\n # Step 3: Compute the gradients\r\n dJ_dw = (2 / n_samples) * np.dot(X.T, (y_predict - y))\r\n dJ_db = (2 / n_samples) * np.sum((y_predict - y)) \r\n \r\n # Step 4: Update the parameters\r\n self.weights = self.weights - learning_rate * dJ_dw\r\n self.bias = self.bias - learning_rate * dJ_db\r\n\r\n return self.weights, self.bias, costs", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def _fit(self):\n loss = 1e10\n weights = self._init_weights\n while loss > self._converge_epsilon:\n d_F = 2 * (self._input.t() * self._input *\n weights - self._input.t() * self._label)\n dd_F = 2 * self._input.t() * self._input\n weights = weights - dd_F.inv() * d_F\n loss = self._mse(weights)\n print('Error : {}'.format(loss))\n return weights", "def gradient_weight(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the weights.\n return np.add(np.subtract(np.dot(np.transpose(predict(X, model)), X), np.dot(np.transpose(Y), X)), 2 * LAMBDA * np.transpose(model['weight'])) #np.zeros((X.shape[1], Y.shape[1]))", "def learning_by_gradient_descent(y, tx, w, gamma):\n\tgrad = calculate_gradient(y, tx, w)\n\n\tw = w - gamma * grad\n\treturn w", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n # Define parameters to store w and loss\n w = initial_w\n for n_iter in range(max_iters):\n # compute gradient\n grad = compute_gradient(y, tx, w)\n # gradient w by descent update\n if n_iter % (max_iters//10) == 0:\n print(compute_cost(y, tx, w))\n w -= gamma * grad\n\n return w, compute_cost(y, tx, w)", "def gradient_descent(self, alpha, batch, weight_gradients, bias_gradients):\n self._dwg = [0] * 8\n self._dbg = [0] * 8\n self._cost = 0\n\n workers = []\n for i in range(batch[0].shape[0]-1):\n p = Process(target=self.mp_gd, args=(batch, weight_gradients, bias_gradients, i))\n workers.append(p)\n p.start()\n\n\n for p in workers:\n self._cost += self._q.get()\n\n self._dwg = list(map(add, self._dwg, self._dwgq.get()))\n self._dbg = list(map(add, self._dbg, self._dbgq.get()))\n\n p.join()\n\n for j in range(len(self._dwg)):\n weight_gradients[j] = weight_gradients[j] - alpha * self._dwg[j]\n bias_gradients[j] = bias_gradients[j] - alpha * self._dbg[j]\n cost = self._cost/len(batch)\n self._cost_history.append(cost)\n\n return weight_gradients, bias_gradients", "def fit(self, w):\n w_former = w\n w_next = w\n w_t = w\n w_t_100 = w\n w_diff = 10000\n i = 0\n #tim_beg = t.time()\n # use two part to calculate the a(w,w0):calculate the gradient using regular or SDG, batch = 10\n # calculate the gradient and update the w,w0\n while i < 10000 and np.abs(w_diff) > 0.00001:\n loss_func = self.lost\n grads = self.gradient(loss_func)\n # calculate the y_pred(eta)\n w_next = w_former - grads(w_former) / (10000)\n k =self.lost(w_next) - self.lost(w_former)\n m = np.dot(w_next-w_former, grads(w_former).T)\n if i != 0 and i % 100 == 0:\n w_t = w_t_100\n w_t_100 = w_next\n w_diff = 1 / len(w) * (np.sum(np.abs(w_t_100 - w_t)))\n i_loss = self.lost(w_next)\n print(\"Iteration < %d > with loss < %f >\" % (i, i_loss))\n #self.los_plt.append(i_loss)\n #tim = t.time() - tim_beg\n #self.tim.append(tim)\n i += 1\n w_former = w_next\n #plt.plot(self.tim, self.los_plt)\n #plt.xlabel(\"time\")\n #plt.ylabel('loss')\n #plt.show()\n if i >= 10000:\n print(\"~Optimization stops because finishing iteration~\")\n if np.abs(w_diff) <= 0.00001:\n print(\"~Optimization stops because of difference between weights are less than 0.00001~\")\n self.w_result = w_next", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\r\n w_list = [initial_w]\r\n loss_list = []\r\n w = initial_w\r\n for n_iter in range(max_iters):\r\n gradient = compute_gradient(y,tx,w)\r\n loss = compute_loss_MSE(y,tx,w)\r\n w = w - gamma * gradient\r\n w_list.append(w)\r\n loss_list.append(loss)\r\n return w_list[-1], loss_list[-1]", "def model(X_train, Y_train, X_test, Y_test, num_iterations, learning_rate, print_cost):\n\n\n # initialize parameters with zeros\n w, b = initialize_with_zeros(X_train.shape[0]);\n\n print(\"w.shape() = \" +str(w.shape)+ \", b = \" +str(b));\n\n # Gradient descent\n parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost);\n \n # Retrieve parameters w and b from dictionary \"parameters\"\n w = parameters[\"w\"]\n b = parameters[\"b\"]\n \n # Predict test/train set examples \n Y_prediction_test = predict(w, b, X_test)\n Y_prediction_train = predict(w, b, X_train)\n\n # Print train/test Errors\n print(\"train accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))\n print(\"test accuracy: {} %\".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))\n \n d = {\"costs\": costs,\n \"Y_prediction_test\": Y_prediction_test, \n \"Y_prediction_train\" : Y_prediction_train, \n \"w\" : w,\n \"b\" : b,\n \"learning_rate\" : learning_rate,\n \"num_iterations\": num_iterations}\n \n return d", "def fit(self, X, y):\r\n newWeight = [0.0] * self.size\r\n w = [0.0] * len(X)\r\n val = self.predict_prob(X) \r\n grad = [(y-1.0) * i[1] for i in X] \r\n grad1 = float((math.exp(-math.fsum((self.weight[f]*v for f, v in X)))) * val)\r\n grad2 = [i[1] * -1 * grad1 for i in X] \r\n for i in range(len(w)):\r\n w[i] = (grad[i] - grad2[i])\r\n \r\n w = [i*self.eta for i in w]\r\n for i in range(len(X)):\r\n newWeight[i] = self.weight[X[i][0]] -w[i]\r\n \r\n self.weight = newWeight[:]\r\n \r\n pass", "def trainable_weights(self):\n self._trainable_weights = list(filter(lambda x: x.requires_grad, self.get_parameters(expand=True)))\n return self._trainable_weights", "def test_multiple_gradient_descent_with_backprop():\n layer_list = [7, 7, 7, 2]\n print(\"test\", \"layer_list\", layer_list)\n\n X, W, B = initialize_parameters_for_layers(layer_list, 5)\n\n alpha = 0.01\n num_iterations = 2000\n num_layers = len(layer_list) - 1\n\n Y = np.arange(10).reshape(2, 5)\n W, B, yhat = gradient_descent_for_n_layers(num_layers, W, B, X, Y, alpha, num_iterations)\n\n print(\"test done.\") # final W and B are\", W, \"and\", B)\n print(\"final yhat\", yhat)", "def get_weights(self):", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, loss_function=mse, gradient=mse_grad):\n w = initial_w\n for iter in range(max_iters):\n # compute gradient\n grad = gradient(y, tx, w)\n # update w\n w = w - gamma * grad\n loss = loss_function(y, tx, w)\n return w, loss", "def least_squares_GD(y, tx, initial_w=None, max_iters=50, gamma=0.1):\n # Define parameters to store w and loss\n if np.all(initial_w == None): initial_w = np.zeros(tx.shape[1]) \n ws = [initial_w] # Initial guess w0 generated randomly\n losses = []\n w = ws[0]\n for n_iter in range(max_iters):\n # compute loss, gradient\n grad, err = least_squares_gradient(y, tx, w)\n loss = compute_mse(y,tx,w)\n # gradient w by descent update\n w = w - gamma * grad\n # store w and loss\n ws.append(w)\n losses.append(loss)\n #if (n_iter % int(max_iters/5)) == 0:\n #print(\"Gradient Descent({bi}/{ti}): loss={l}\".format(bi=n_iter, ti=max_iters,l=loss))\n return w,loss", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n w_start = initial_w\n w = w_start\n\n for n_iter in range(max_iters):\n gradient = compute_gradient(y, tx, w)\n loss = compute_loss(y,tx,w)\n w = w - gamma * gradient\n\n return w, loss", "def logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma):\n\tw = initial_w\n\n\tfor iter in range(max_iters):\n\t\tw = learning_by_gradient_descent(y, tx, w, gamma)\n\n\treturn w", "def gradient_model (self, x, initial_weights = None, \\\n step_size = 5.0e-6, tol = 2.5e+7, n_iters = 501, l2 = 0):\n # setup initial intercept, slope, iter number and rss\n if initial_weights is None:\n weights = self.initial_weight\n else:\n weights = initial_weights\n # Compute indicator value for (y_i = +1)\n indicators = np.array([int (i) for i in (self.train_output_y==1)])\n for itr in range(n_iters):\n # Predict P(y_i = +1|x_1,w) using your predict_probability() function\n _, pred_probs = self.predict_probability(self.train_feature_x, weights)\n \n # Compute the errors as indicator - predictions\n errors = indicators - pred_probs\n\n #Update the weights:\n derivative = self.feature_derivative(errors, weights, l2)\n weights = weights + derivative * (step_size) \n \n #check if converged\n #todo\n \"\"\"\n # Checking whether log likelihood is increasing\n if itr <= 15 or (itr <= 100 and itr % 10 == 0) or (itr <= 1000 and itr % 100 == 0) \\\n or (itr <= 10000 and itr % 1000 == 0) or itr % 10000 == 0:\n lp = self.compute_log_likelihood(indicators,weights)\n print 'iteration %*d: log likelihood of observed labels = %.8f' % \\\n (int(np.ceil(np.log10(n_iters))), itr, lp)\n \"\"\"\n \n #check weights\n #print \"\\n\"\n #print \"The weights for features: \", weights\n #final prediction\n preds = self.prediction(x, weights)\n return preds, weights", "def compute_gradient_and_loss(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n \n if term > 0:\n local_loss = term\n \n loss += local_loss\n \n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n# dW += reg * deriv_abs(W) #dW[:,-1]\n# else:\n# dW += 2 * reg * W # l2 derivative formula \n dW[:-1,:] += reg * np.sign((W[:-1,:])) #dW[:,-1]\n else:\n dW[:-1,:] += 2 * reg * W[:-1,:] # l2 derivative formula \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def compute_weights(model, params, y_obs, LB_type='NWJ'):\n\n # Define PyTorch variables\n x = Variable(\n torch.from_numpy(params).type(torch.FloatTensor),\n requires_grad=True)\n y = Variable(\n torch.from_numpy(y_obs).type(torch.FloatTensor),\n requires_grad=True)\n\n # Pass observed data and parameters through the model\n w = list()\n for idx in range(len(x)):\n T = model(x[idx], y).data.numpy()\n if LB_type == 'NWJ':\n w.append(np.exp(T - 1))\n else:\n raise NotImplementedError\n w = np.array(w)\n\n return w.reshape(-1)", "def gradient_descent(y, tx, initial_w, max_iters, gamma, compute_loss, compute_grad, verbose=False):\n \n w = initial_w.copy()\n loss = 0\n\n for n_iter in range(max_iters):\n grad = compute_grad(y, tx, w)\n loss = compute_loss(y, tx, w)\n\n w -= gamma * grad\n\n if verbose:\n print(f\"Gradient Descent ({n_iter}/{max_iters - 1}): loss={loss}, w={w}\")\n \n return w, loss", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n w = initial_w.copy()\n ws = [w]\n loss = compute_loss_LS(y, tx, w)\n losses = [loss]\n for n_iter in range(max_iters):\n gradient = compute_gradient_LS(y, tx, w)\n w -= gamma * gradient\n loss = compute_loss_LS(y, tx, w)\n ws.append(w)\n losses.append(loss)\n# print(\"Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}\".format(\n# bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))\n\n return losses[-1], ws[-1]", "def test_update_parameters(model):\n train_inputs = torch.tensor([[1., 2., 3.]])\n train_loss = 0.5 * (model(train_inputs) ** 2)\n\n params = gradient_update_parameters(model,\n train_loss,\n params=None,\n step_size=0.5,\n first_order=False)\n\n assert train_loss.item() == 264.5\n assert list(params.keys()) == ['weight']\n assert torch.all(params['weight'].data == torch.tensor([[-9.5, -20., -29.5]]))\n\n \"\"\"\n The new loss function (still with respect to the weights of the model w) is\n defined as:\n g(w) = 0.5 * (4 * w'_1 + 5 * w'_2 + 6 * w'_3) ** 2\n = 0.5 * (4 * (w_1 - 0.5 * df / dw_1)\n + 5 * (w_2 - 0.5 * df / dw_2)\n + 6 * (w_3 - 0.5 * df / dw_3)) ** 2\n = 0.5 * (4 * (w_1 - 0.5 * 1 * (1 * w_1 + 2 * w_2 + 3 * w_3))\n + 5 * (w_2 - 0.5 * 2 * (1 * w_1 + 2 * w_2 + 3 * w_3))\n + 6 * (w_3 - 0.5 * 3 * (1 * w_1 + 2 * w_2 + 3 * w_3))) ** 2\n = 0.5 * ((4 - 4 * 0.5 - 5 * 1.0 - 6 * 1.5) * w_1\n + (5 - 4 * 1.0 - 5 * 2.0 - 6 * 3.0) * w_2\n + (6 - 4 * 1.5 - 5 * 3.0 - 6 * 4.5) * w_3) ** 2\n = 0.5 * (-12 * w_1 - 27 * w_2 - 42 * w_3) ** 2\n\n Therefore the gradient of the function g with respect to w (and evaluated\n at w = [2, 3, 5]) is:\n dg / dw_1 = -12 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 3780\n dg / dw_2 = -27 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 8505\n dg / dw_3 = -42 * (-12 * w_1 - 27 * w_2 - 42 * w_3) = 13230\n \"\"\"\n test_inputs = torch.tensor([[4., 5., 6.]])\n test_loss = 0.5 * (model(test_inputs, params=params) ** 2)\n\n grads = torch.autograd.grad(test_loss, model.parameters())\n\n assert test_loss.item() == 49612.5\n assert len(grads) == 1\n assert torch.all(grads[0].data == torch.tensor([[3780., 8505., 13230.]]))", "def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):\n \n costs = []\n \n for i in range(num_iterations):\n \n \n # Cost and gradient calculation \n grads, cost = propagate(w, b, X, Y)\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n \n # update rule (Gradient Descent)\n w = w - learning_rate * dw\n b = b - learning_rate * db\n\n # Record the change of costs\n if i % 100 == 0:\n costs.append(cost)\n \n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print (\"Cost after iteration %i: %f\" %(i, cost))\n \n params = {\"w\": w,\n \"b\": b}\n \n grads = {\"dw\": dw,\n \"db\": db}\n \n return params, grads, costs", "def __update_weights_grad_desc(self, x_train, y_train):\n\n predictions = self.__compute_prediction(x_train)\n weights_delta = np.dot(x_train.T, y_train - predictions)\n\n m = y_train.shape[0]\n self.__weights += self.__learning_rate / float(m) * weights_delta", "def gradient_descent(features, labels, alpha, num_iters):\n # Initial settings of weights\n weights = [0, 0, 0]\n\n # Length of dataset\n N = len(features[0])\n\n # Take 100 gradient steps\n gradient_losses = [0, 0, 0]\n\n # Take num_iters steps of gradient descent\n for step in range(num_iters):\n\n # For reach data point, compute the gradients w.r.t. weights and offset\n for x1, x2, y in zip(features[0], features[1], labels):\n\n # Create \"expanded feature dimension for x to account for offset\n x = [1, x1, x2]\n\n # Make prediction\n pred = weights[0]*x[0] + weights[1]*x[1] + weights[2]*x[2]\n\n # Compute gradient of loss for linear regression\n for j in range(len(gradient_losses)):\n gradient_losses[j] += (pred-y) * x[j]\n\n # Update weights using gradients above\n for j in range(len(gradient_losses)):\n weights[j] -= (alpha/N) * gradient_losses[j]\n\n # Reset gradients of loss after each step\n gradient_losses = [0, 0, 0]\n\n # Return the weights\n return [weights[0], weights[1], weights[2]]" ]
[ "0.69018984", "0.6838119", "0.68109965", "0.680312", "0.67406446", "0.6740553", "0.66947246", "0.6646846", "0.664137", "0.66060346", "0.65947324", "0.6593397", "0.6561884", "0.6527356", "0.6517943", "0.65154314", "0.6514863", "0.6511061", "0.64937884", "0.6486011", "0.64787114", "0.647839", "0.64779633", "0.64772445", "0.6468707", "0.64356405", "0.64236695", "0.64109117", "0.6350198", "0.63422203" ]
0.6886186
1
Method that computes the crossentropy error
def cross_entropy_error(self, x, y): return -1 * sum([y[i] * np.log(self.logistic_function(self.weights.dot(x[i]))) + (1-y[i]) * np.log(1-self.logistic_function(self.weights.dot(x[i]))) for i in range(len(y))])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_entropy(self):\n return self._cross_entropy_func", "def cross_entropy(y_pred,y):\n \n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n return sum(-y*np.log(y_pred+epsilon))", "def test_cross_entropy(self):\n loss = losses.cross_entropy_with_logits(\n logits=self.logits, targets=self.targets)\n kl = loss.mean()\n\n expected_loss = -(jnp.log(0.9) + jnp.log(0.1) + jnp.log(0.9)) / 3\n\n self.assertAlmostEqual(kl, expected_loss)", "def cross_entropy(y_observed, p):\n\n pass", "def cross_entropy_loss():\n return nn.CrossEntropyLoss()", "def cross_entropy(y_pred, y_true, normalize=True, eps=1e-15):\n if type(y_pred) != np.ndarray:\n raise TypeError(\"Require np.ndarray type,{} checked\".format(type(y_pred)))\n if type(y_true) != np.ndarray:\n raise TypeError(\"Require np.ndarray type,{} checked\".format(type(y_true)))\n # clip = np.vectorize(lambda x: max(eps, min(1 - eps, x)))\n # y_pred = clip(y_pred)\n y_pred = np.array(list(map(lambda x: max(eps, min(1 - eps, x)), y_pred)))\n l = np.multiply(y_true, np.log(y_pred)) + np.multiply(1 - y_true, np.log(1 - y_pred))\n loss = -1 * np.sum(l).item()\n if normalize:\n loss = loss / len(y_pred)\n return loss", "def calculate_cross_entropy(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def cross_entropy(true, pred, axis=-1, epsilon=1e-7):\n pred = ivy.clip(pred, epsilon, 1 - epsilon)\n log_pred = ivy.log(pred)\n # noinspection PyUnresolvedReferences\n return -ivy.reduce_sum(log_pred * true, axis)", "def cross_entropy(predictions, targets):\n likelihood = targets * np.log(predictions)\n return -np.sum(likelihood) / predictions.shape[0]", "def compute_error_cross_dataset(AL, train_y):\n # print(train_y.shape)\n nb = train_y.shape[0]\n error=np.power(np.add(train_y,-AL),2)*1/nb\n return error\n # raise NotImplementedError", "def _loss(self):\n\n cross_entropy = tf.reduce_mean(-tf.log(self.probability + epsilon) * self.y)\n self.loss = cross_entropy\n\n self.accuracy = tf.reduce_mean(\n tf.cast(tf.equal(tf.argmax(self.y, 1), self.prediction), tf.float32))", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def cross_entropy(targets, predictions, epsilon=1e-12):\n\n predictions = np.clip(predictions, epsilon, 1. - epsilon)\n\n N = predictions.shape[0]\n\n ce = -np.sum(np.sum(targets*np.log(predictions+1e-9)))/N\n\n return ce", "def cross_entropy_loss(predictions, targets, epsilon=1e-12):\n predictions = np.clip(predictions, epsilon, 1. - epsilon)\n N = predictions.shape[0]\n ce = -np.sum(targets*np.log(predictions+1e-9))/N\n return ce", "def compute_error(y_true, y_pred):\r\n\r\n # INSERT YOUR CODE HERE\r\n \r\n n = len(y_true)\r\n err = [y_true[i] != y_pred[i] for i in range(n)]\r\n return sum(err) / n\r\n \r\n raise Exception('Function not yet implemented!')", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def cross_entropy(t,y):\r\n #print(-1*t*np.log(y))\r\n #print(np.shape(np.log(y)))\r\n #print(np.shape(t))\r\n return t*np.log(y)*(-1)", "def crossentropy_loss(y_true, y_pred):\n ce = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True) \n return ce", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def get_cross_entropy(self):\n assert (self.dataset is not None) and (self.labels is not None), 'Logistic Regression requires a dataset and labels.'\n potential = 0.0\n logits = self.dataset @ self.parameters[:self.dataset.shape[1]]\n max_logits = torch.max(torch.zeros(logits.shape[0]),logits)\n potential = (-logits @ self.labels.t() + torch.sum(max_logits) + torch.sum(\n torch.log(torch.exp(-max_logits)+torch.exp(logits - max_logits))))# * n.reciprocal())\n return potential", "def cross_entropy(X, y):\n return lambda theta: -y * np.log(logistic_hypothesis(theta)(X) + 1e-9) - (\n 1 - y\n ) * np.log(1 - logistic_hypothesis(theta)(X) + 1e-9)", "def Weighted_Cross_Entropy(y_true, y_pred, eps = 1e-10):\n y_pred = tf.cast(y_pred, 'float64')\n y_true = tf.cast(y_true, 'float64')\n # deduce weights based on true pixel value\n class_weights = weights * y_true\n # compute your (unweighted) softmax cross entropy loss\n unweighted_losses = y_true*tf.math.log(y_pred + eps)\n ##print(unweighted_losses.dtype, weights.dtype)\n weighted_losses = unweighted_losses * class_weights\n # reduce the result to get your final loss\n loss = -tf.reduce_sum(weighted_losses)\n return loss", "def cross_entropy(U, V):\n return -np.sum(U * np.log(V))", "def _computeError(self, inputs, targets):\n return .5*np.sum((targets-self._pcnfwd(inputs))**2)", "def crossentropy_fn(args: StepFunctionArgs) -> SingleScorePerStepTensor:\n return -torch.log2(probability_fn(args))", "def cross_entropy(self, true_values, predicted_values):\n\n testing_set_size = len(true_values)\n running_sum = 0\n for i in range(len(true_values)):\n true_set = true_values[i]\n predicted_set = predicted_values[i]\n running_sum += sum([(true_set[j] * math.log(predicted_set[j])) for j in range(len(true_set))])\n\n #print(f\"Average cross entropy:\\t{-running_sum / testing_set_size}\")\n self.performance += (-running_sum / testing_set_size)\n return -running_sum / testing_set_size", "def evaluate(self, X, y):\n y_pred = self.predict(X)\n return self.cross_entropy_loss(y, y_pred)", "def cross_entropy(y_prob,y):\n from numpy import log, sum\n m = y.shape[0]\n p = y_prob\n log_likelihood = -log(p[range(m),y])\n loss = sum(log_likelihood) / m\n return loss", "def calculate_entropy(eval_token_count, training_model):\n sum_of_counts = sum(eval_token_count.values())\n cross_entropy = 0\n mis = []\n for ngram in eval_token_count:\n if training_model.score(ngram[-1], list(ngram[0:3])) == 0 or sum_of_counts == 0:\n mi = np.mean(mis) if len(mis) > 0 else np.log2(0.287)\n else:\n mi = training_model.logscore(ngram[-1], list(ngram[0:3]))\n pi = eval_token_count[ngram] / sum_of_counts\n mis.append(mi)\n cross_entropy -= pi * mi\n return cross_entropy", "def cross_entropy_loss(self, logits, labels):\n return F.cross_entropy(logits, labels)" ]
[ "0.7317789", "0.7193637", "0.716187", "0.7155573", "0.71354103", "0.69392055", "0.6925359", "0.6868344", "0.6839562", "0.68295264", "0.68244743", "0.6810189", "0.6806863", "0.6792568", "0.679048", "0.6756376", "0.67161447", "0.671162", "0.6709578", "0.67024356", "0.6697486", "0.6661615", "0.66593605", "0.6658526", "0.66499263", "0.6634312", "0.6618589", "0.6594554", "0.65864813", "0.65808827" ]
0.78930825
0
Method that computes and returns the L2 Regularization term
def l2_regularization_penalty(self): return self.l2 * (np.linalg.norm(self.weights)**2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def l2_regularization(cg, rate=0.01):\n W = VariableFilter(roles=[WEIGHT])(cg.variables)\n L2_cost = rate * l2_norm(W)\n\n return L2_cost", "def l2_regularization(W, reg_strength):\n # TODO: Copy from the previous assignment\n loss = reg_strength*np.sum(W*W)\n grad = 2*reg_strength*W\n return loss, grad", "def l2_regularizer(scale):\n if isinstance(scale, numbers.Integral):\n raise ValueError('scale cannot be an integer: %s' % (scale,))\n if isinstance(scale, numbers.Real):\n if scale < 0.:\n raise ValueError('Setting a scale less than 0 on a regularizer: %g.' %\n scale)\n if scale >= 1.:\n raise ValueError('Setting a scale greater than 1 on a regularizer: %g.' %\n scale)\n if scale == 0.:\n logging.info('Scale of 0 disables regularizer.')\n return lambda _, name=None: None\n\n def l2(weights, name=None):\n \"\"\"Applies l2 regularization to weights.\"\"\"\n with ops.op_scope([weights], name, 'l2_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)\n\n return l2", "def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n return cost + tf.losses.get_regularization_losses()", "def l2_reg_cost(cost):\n\n return cost + tf.losses.get_regularization_losses()", "def _get_l2_reg(self) -> torch.Tensor:\n loss = 0\n for param in self.model.parameters():\n loss += (param ** 2).sum()\n return loss", "def l2_reg_cost(cost, lambtha, weights, L, m):\n w_norm = 0\n for i in range(1, L + 1):\n w_norm += np.linalg.norm(weights['W' + str(i)])\n L2 = cost + (lambtha / (2 * m) * w_norm)\n return L2", "def reg_loss(model: nn.Module, regularizer: str, l1: float=0.01, l2: float=0.01):\n if regularizer == 'l1':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n return l1_reg\n if regularizer == 'l2':\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l2_reg\n if regularizer == 'l1_l2':\n l1_reg = l1 * sum(torch.abs(p).sum() for p in model.parameters())\n l2_reg = l2 * sum(torch.square(p).sum() for p in model.parameters())\n return l1_reg + l2_reg", "def l2_reg_cost(cost, lambtha, weights, L, m):\n enorm = 0\n for i in range(1, L + 1):\n layer = 'W{}'.format(i)\n enorm += np.linalg.norm(weights[layer])\n return cost + (lambtha / (2 * m)) * enorm", "def l2_regularization(variables, factor=1e-4, name='l2_regularization', collections=['regularization']):\n l2 = tf.add_n([tf.sqrt(2.*tf.nn.l2_loss(var)) for var in variables], name=name) if variables else tf.constant(0.)\n loss = factor * l2\n scalar_summary(loss, name, collections)\n return loss", "def l2_reg_cost(cost, lambtha, weights, L, m):\n f = 0\n while (L):\n index = \"W{}\".format(L)\n weight = weights[index]\n f += np.linalg.norm(weight)\n L -= 1\n return cost + lambtha / (2 * m) * f", "def l2_reg_cost(cost, lambtha, weights, L, m):\n Frobenius = 0\n for k, v in weights.items():\n if k[0] == \"W\":\n Frobenius += np.linalg.norm(v)\n return cost + (lambtha/(2*m)) * Frobenius", "def l2(parameter, bias=None, reg=0.01, lr=0.1):\n \n if bias is not None:\n w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)\n else:\n w_and_b = parameter\n L2 = reg # lambda: regularization strength\n Norm = (lr*L2/w_and_b.norm(2))\n if Norm.is_cuda:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cuda\"))\n else:\n ones_w = torch.ones(parameter.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_w, Norm)\n update = (parameter*l2T) \n parameter.data = update\n # Update bias\n if bias is not None:\n if Norm.is_cuda:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cuda\"))\n else:\n ones_b = torch.ones(bias.size(), device=torch.device(\"cpu\"))\n l2T = 1.0 - torch.min(ones_b, bias)\n update_b = (bias*l2T)\n bias.data = update_b", "def l2_reg_cost(cost, lambtha, weights, L, m):\n sumWeights = 0\n for i in range(1, L + 1):\n sumWeights += np.linalg.norm(weights['W' + str(i)])\n return cost + sumWeights * lambtha / (2 * m)", "def r2_GWR(GWRMod): \r\n tss = np.sum((GWRMod.y - GWRMod.y_mean)**2)\r\n r2 = 1.0 - GWRMod.res2/tss\r\n \r\n return r2", "def residualNorm2(self):\n r2 = (np.dot(self.x,np.dot(self.AtA,self.x)-2.0*self.Atb) + self.btb)*self.scale\n if self.regularizationLambda > 0:\n r2 -= self.regularizationLambda*np.dot(self.x,self.x)\n return r2", "def L2_norm(self):\n analyticals = self.analytical(self.x_values, self.C, self.D)\n error = analyticals - self.numerical\n self.L2 = np.sqrt((1/self.gp)*np.sum(error**2))", "def l2(weights, name=None):\n with ops.op_scope([weights], name, 'l2_regularizer') as scope:\n my_scale = ops.convert_to_tensor(scale,\n dtype=weights.dtype.base_dtype,\n name='scale')\n return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)", "def EmbeddingL2RegularizationUpdate(embedding_variable, net_input, learn_rate, l2_reg_val):\n # TODO(student): Change this to something useful. Currently, this is a no-op.\n # net_input = net_input / tf.norm(net_input)\n net_input = tf.nn.l2_normalize(net_input, axis=0)\n grad = l2_reg_val * tf.matmul(tf.transpose(net_input), tf.matmul(net_input, embedding_variable))\n embedding_variable_ = embedding_variable - learn_rate * grad\n\n ## local test #better to disable when learning\n batch_size, number_of_vocabulary_tokens = net_input.shape\n net_example = numpy.random.binomial(1, .1, (3, number_of_vocabulary_tokens))\n sigma_fnc = l2_reg_val * tf.nn.l2_loss(tf.matmul(net_input, embedding_variable))\n # assert tf.gradients(sigma_fnc, embedding_variable) == grad, \"wrong grad in L2\"\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n tf_grad = sess.run(tf.gradients(sigma_fnc, embedding_variable)[0], feed_dict={net_input: net_example})\n my_grad = sess.run(grad, feed_dict={net_input: net_example})\n differ = numpy.linalg.norm(tf_grad - my_grad)\n differ = differ / numpy.linalg.norm(tf_grad)\n print('l2 grad differentage {}'.format(differ))\n print('l2 grad max difference {}'.format(numpy.max(tf_grad - my_grad)))\n\n return embedding_variable.assign(embedding_variable_)", "def l2(name, weights):\n\n with tf.name_scope(name):\n regularizer = np.float32(0.0)\n for weight in weights:\n tf.add(regularizer, tf.nn.l2_loss(weight))\n\n return regularizer", "def l21_reg(data):\n m = data.size()[0] # number of data points\n n = data.size()[1] # number of dimensions on the data points\n # find L2,1 regularization term\n outer_sum = 0\n for i in range(m):\n inner_sum = 0\n for j in range(n):\n inner_sum += data[i][j] ** 2\n outer_sum += inner_sum ** 0.5\n return outer_sum", "def _calc_r2(self):\n sse = np.sum((self.data.y - self.predict(self.data.x))**2)\n sst = np.sum((self.data.y - self.data.y.mean())**2)\n return (1. - sse/sst)", "def test_regress_R2(self):\r\n x = [1.0, 2.0, 3.0, 4.0, 5.0]\r\n y = [2.1, 4.2, 5.9, 8.4, 9.6]\r\n result = regress_R2(x, y)\r\n self.assertFloatEqual(result, 0.99171419347896)", "def get_regularizer(type, l1, l2):\n if type == 'l1':\n reg = keras.regularizers.l1(l1)\n elif type == 'l2':\n reg = keras.regularizers.l2(l2)\n elif type == 'l1_l2':\n reg = keras.regularizers.l1_l2(l1, l2)\n else:\n reg = None\n return reg", "def calculate_R2(self):\n\n d1 = self.T - self.Y\n d2 = self.T - self.T.mean()\n\n self.r2 = 1 - d1.dot(d1) / d2.dot(d2)\n self.r2 = format(self.r2, '.3f')\n\n print \"\"\n print \"R2:\", self.r2", "def l2_reg_create_layer(prev, n, activation, lambtha):\n reg = tf.contrib.layers.l2_regularizer(lambtha)\n init = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n t = tf.layers.Dense(units=n, activation=activation,\n kernel_initializer=init,\n kernel_regularizer=reg,\n )\n return t(prev)", "def r2(t, y):\n\treturn r2_score(t, y)" ]
[ "0.77305657", "0.7178578", "0.7126942", "0.7085765", "0.70743436", "0.70743436", "0.70743436", "0.7026239", "0.70200115", "0.70168245", "0.68394643", "0.6777604", "0.67379314", "0.67117363", "0.6709994", "0.66848576", "0.6656158", "0.6496867", "0.6441608", "0.64246273", "0.64002705", "0.6393994", "0.63120025", "0.62116635", "0.62026644", "0.6177083", "0.61418355", "0.61381626", "0.6126379", "0.6108454" ]
0.79802835
0
Creates a warning if the price falls below min_price.
def price_too_low(cls, min_price: int): def eval_fn(p: Posting): if p.price < min_price: return f"The price (${p.price}) is suspiciously low." return cls(eval_fn)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_min_value(self, tracked_price):\n if tracked_price < self.min_value and self.warning_calls <= 2:\n print(f'Warning! Price dropeed under {self.min_value} pln {tracked_price}')\n self.make_phone_call()\n self.warning_calls += 1\n elif tracked_price < self.min_value and self.warning_calls == 3:\n self.send_a_message(\n f'This is a warning message. Price of EUR/PLN dropped under critical value!'\n f' {self.min_value} pln')\n print(f'Called 3 times! Price dropeed under {self.min_value} pln {tracked_price}')\n self.warning_calls = 0\n else:\n print(f\"Current price for Euro in PLN is {tracked_price}\")", "def check_price(self):\n if self.price < 0:\n self.raise_user_error(\"negative_amount\")", "def check_price():\n global NUMBER_OF_TOTAL_COINS, BEVERAGE_PRICE\n\n if NUMBER_OF_TOTAL_COINS == BEVERAGE_PRICE:\n return True\n elif NUMBER_OF_TOTAL_COINS < BEVERAGE_PRICE:\n return False\n else:\n return \"FATAL\"", "def compare_price(self):\n if self.__product_price < self.__alert_price:\n #print(\"price drop...\")\n self.__alert_client = True\n self.__price_difference = self.__product_price - self.__alert_price\n else:\n #print(\"Price not reduced...\")\n self.__alert_client = False\n self.__price_difference = self.__product_price - self.__alert_price", "def _on_order_amount_too_low(self, _msg):\r\n self.debug(\"### Server said: 'Order amount is too low'\")\r\n self.count_submitted -= 1", "def _check_amount_with_priority(self):\n\t\tfor slc in self:\n\t\t\tif slc.max_amount and self.search([('priority', '<', slc.priority), ('max_amount', '>=', slc.max_amount)]):\n\t\t\t\traise Warning(_(\"There are below slides [Priority less than %s] with bigger amount from [%s]\"\n\t\t\t\t \" which against the logic!!!\\n You can increase amount or handel priority\")\n\t\t\t\t % (slc.priority, slc.max_amount))", "def price_low(self, price_low):\n\n self._price_low = price_low", "def _check_amount_with_priority(self):\n\t\tfor line in self:\n\t\t\tif line.tax_slide_id and (line.amount_from > line.tax_slide_id.max_amount\n\t\t\t or line.amount_to > line.tax_slide_id.max_amount):\n\t\t\t\traise Warning(_(\"Line Amount couldn't exceed te slide max amount [%s]\" % line.tax_slide_id.max_amount))", "def filterPrice(self, minPrice = 5000):\n\n # Check and select if price button is displayed\n if commonFunctionsUI.isElementDisplayedByXPath(selector = self.locators.price):\n commonFunctionsUI.clickByXPath(selector = self.locators.price)\n else:\n LOGGER.error(\"Could not click price button\")\n raise Exception(\"could not click price button\")\n\n time.sleep(3)\n\n\n try:\n commonFunctionsUI.clickByXPath(selector = self.locators.minPrice)\n commonFunctionsUI.sendBackspace(selector = self.locators.priceSave, numOfBackspace = 5)\n\n commonFunctionsUI.enterTextByXPath(selector = self.locators.minPrice, text = minPrice)\n except:\n try:\n commonFunctionsUI.clickByXPath(selector = self.locators.searchButton)\n except:\n commonFunctionsUI.clickByXPath(selector = self.locators.priceSave)\n LOGGER.error(\"Could not find input field to enter min price\")\n raise Exception(\"Could not find input field to enter min price\")\n\n\n if commonFunctionsUI.isElementDisplayedByXPath(selector = self.locators.priceSave):\n commonFunctionsUI.clickByXPath(selector = self.locators.priceSave)\n else:\n raise Exception(\"Could not click on save price button\")", "def check_price(self, price_diff):\n chance = exp(price_diff / self.T)\n\n if price_diff < 0 and not chance > random():\n return True\n \n return False", "def check_symbol_price(self, data):\n if self.input_price < float(data.get(\"price\")):\n logging.info(\"Symbol price is higher than the input provided by the user.\")\n logging.info(\"Input Price :- \")\n logging.info(str(self.input_price))\n logging.info(\"Symbol Price :- \")\n logging.info(str(data.get(\"price\")))\n logging.info(\"+++++++++++++++++++++++++++++\")", "def buy_one_cent_less_than_bid_or_50(self, bid_price):\n if bid_price:\n buying_price = self.buy_fixed_quantity_less_than_bid_price(\n bid_price=bid_price,\n fixed_quantity=0.01)\n else:\n buying_price = self.buy_fixed_price(50)\n return buying_price", "def calc_new_bid_price_after_failure( self, cheapest_price ):\n\n new_bid_price = cheapest_price * 1.1\n return str(new_bid_price)", "def check_gc_min_max(self):\n if not self.allow_open_amount:\n return\n\n if self.gc_min < 0 or self.gc_max < 0:\n self.raise_user_error(\"negative_amount_not_allowed\")\n\n if self.gc_min > self.gc_max:\n self.raise_user_error(\"invalid_amount\")", "def _warn_nearing_exceed_budget(self, budget: Budget,\n exceeded_percent: int) -> None:\n print(f'[WARNING] You are about to exceed the {budget.name} budget! '\n f'You went over {exceeded_percent}% of the total '\n f'${budget.total_amount}.')", "def test_add_sale_with_price_below_one(self):\n self.register_admin_test_account()\n token = self.login_admin_test()\n\n response = self.app_test_client.post('{}/saleorder'.format(\n self.base_url), json={'name': 'Torch', 'price': -10, 'quantity': 5, 'totalamt': \"\"},\n headers=dict(Authorization=token),\n content_type='application/json')\n\n self.assertEqual(response.status_code, 400)\n\n self.assertEqual(general_helper_functions.convert_json(\n response)['message'], 'Bad request. The product price should be a positive number above 0.')", "def stock_min(stock):\n min_price=1000000\n for i in stock['Close']:\n if i < min_price:\n min_price=i\n return min_price", "def best_price(self):\n # TODO rename this to \"display_lowest_price\" or something...\n price = self.lowest_price\n if price:\n return Decimal.quantize(price.per_kg, settings.DECIMAL_CENTS) # round to cents", "def test_create_invalid_price_higher_than_999(self):\n product_name = \"Swift Iris\"\n size = Size.SIZE_4_5\n colour = \"Red\"\n price = 1001\n product_type = ProductType.SHOE\n product_code = \"A\"\n department = Department.LADIES\n\n product = Product(\n name=product_name,\n size=size,\n colour=colour,\n price=price,\n product_type=product_type,\n product_code=product_code,\n department=department,\n )\n\n with self.assertRaises(ValidationError):\n product.full_clean()", "def is_in_range(self, price):\r\n return price <= self.pmax and price >= self.pmin", "def _validate(self, instance, value):\n\n pmin = getattr(instance, \"p_min\", None)\n\n if pmin is None:\n raise RuntimeError(\n f\"Value for Pmin does not exist, so {self.prop_name} cannot be validated.\"\n )\n\n if not isinstance(value, Real):\n raise TypeError(f\"Value for {self.prop_name} shoulde be real numbers.\")\n\n if value < pmin and not isclose(value, pmin):\n raise ValueError(\n f\"Value for {self.prop_name} shoulde be greater or equal to Pmin.\"\n )", "def test_lowest_price(self):\n listings = steam_market.get_lowest_price(soup=get_soup_from_path(TEST_FILE_NORMAL_LISTING))\n self.assertEqual('11,59€', listings)", "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "def check_stock(self):\n if self.quantity > self.item.quantity:\n return \"%s Please adjust your cart.\" % CartItem.get_insufficient_stock_msg(self.item.quantity)\n return None", "def makeLow(self, force = False):\n\t\tlow = self.reSize( 25, self.name + '_LOW', force )\n\t\treturn low", "async def slotmin(self, ctx: commands.Context, bid: int):\r\n if bid < 1:\r\n await ctx.send(_(\"Invalid min bid amount.\"))\r\n return\r\n guild = ctx.guild\r\n if await bank.is_global():\r\n await self.config.SLOT_MIN.set(bid)\r\n else:\r\n await self.config.guild(guild).SLOT_MIN.set(bid)\r\n credits_name = await bank.get_currency_name(guild)\r\n await ctx.send(\r\n _(\"Minimum bid is now {bid} {currency}.\").format(\r\n bid=humanize_number(bid), currency=credits_name\r\n )\r\n )", "def isLow(self):\n\t\treturn self.resolution == 'LOW'", "def clean_price(self):\n price = self.cleaned_data.get('price')\n if price == \"0\":\n raise forms.ValidationError(\n u'Please insert a price for your product')\n return price", "def sell_min_amount(self, sell_min_amount):\n\n self._sell_min_amount = sell_min_amount", "def _validate_price(price):\n try:\n price = float(price)\n except ValueError:\n raise ValueError('Please provide valid price')\n if price < 1:\n raise ValueError('Price should be positive number')\n return price" ]
[ "0.7942152", "0.67987967", "0.642869", "0.6229255", "0.6202904", "0.61763763", "0.6161802", "0.60979813", "0.59673613", "0.59450233", "0.59405667", "0.5903012", "0.581013", "0.5708258", "0.56505924", "0.56133443", "0.5591867", "0.55732036", "0.55653024", "0.5558609", "0.55510455", "0.5537665", "0.55014414", "0.5500666", "0.55006427", "0.54970354", "0.54750544", "0.546195", "0.54484874", "0.54448336" ]
0.7976325
0
Creates a warning if the posting is unfurnished.
def unfurnished(cls): def eval_fn(p: Posting): if 'furnished' not in p.attrs: return "The posting appears to be unfurnished." return cls(eval_fn)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warning(self, warning):\n pass", "def hearWarning(self, warnlvl, screenname):\n print screenname,\"warned us\"", "def warning(self, msg, *args, **kwargs):\n pass", "def notice(self, warning):\n pass", "def warning(self, *args, **kwargs):", "def warn():\n pass", "def get_membership_warning():\n if current_user.is_anonymous or \\\n (current_user.is_authenticated and\n (current_user.has_paid or current_user.alumnus)):\n return ''\n\n return render_template('user/membership_warning.htm')", "def is_hungry(self):\r\n if self._hunger > 0:\r\n return True\r\n else:\r\n return False", "def on_status_withheld(self, notice):\n log.debug(\"Received status withheld content notice: %s\", notice)", "def warning(self) -> 'outputs.AnyResponse':\n return pulumi.get(self, \"warning\")", "def on_reject(self, update, _context):\n self.send_message(update.message.chat_id, c.MSG_THANKS_NOTHANKS)", "def _strict_warning(self):\n if self.options.get('strict', True):\n return ('Strict mode enabled (the default), so this could be due to an '\n 'integer key, such as an HTTP status code.')\n return ('Strict mode disabled. Prance cannot help you narrow this further '\n 'down, sorry.')", "def is_warning(self) -> bool:\n return not self.get_warning()", "def is_warning(self) -> bool:\n return not self.get_warning()", "def issue_locked_warning() -> None:\n print(\"\\n[red]Warning:[/red] Your bank account has been completely \"\n \"locked out for exceeding 2 or more categories!\")", "def warning(self) -> str:\n return pulumi.get(self, \"warning\")", "def on_user_withheld(self, notice):\n log.debug(\"Received user withheld content notice: %s\", notice)", "def warning(self, *args, **kwargs): # real signature unknown\n pass", "def has_warn(self):\r\n return self._arm.has_warn", "async def status(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n if adv.is_on_adventure(ctx.author.id):\n out = adv.print_adventure(ctx.author.id)\n else:\n out = 'You are not doing anything at the moment.'\n await ctx.send(out)", "def warning(self) -> Optional[str]:\n return pulumi.get(self, \"warning\")", "def doNotTrack(self):\n # return False\n return 'lol'", "def error_occured(self) -> None:\r\n \r\n warnings.warn(\r\n '''An Error has occured when processing this photo!\r\n The plants are too emerged in some places to analyze.''',\r\n RuntimeWarning)", "def warning(self, msg):\n oscid = self.app.global_osc_id()\n print(\"WARNING : /Llia/%s : %s\" % (oscid, msg))", "def unknown(self, w):\n return True", "def warning(self) -> Optional[pulumi.Input['AnyArgs']]:\n return pulumi.get(self, \"warning\")", "def malicious(self):\n return self.probably_malicious", "def whyNotLegal(self):\r\n return self._getLegalityStatus()[1]", "def has_warning(self) -> bool:\n return self._has_warning", "def unknown(update, context):\n\n context.bot.send_message(\n chat_id=update.effective_chat.id,\n text=f\"Duh maaf!, aku gak ngerti yang kamu maksud {emo_persevere}\")" ]
[ "0.6197715", "0.60206753", "0.59842765", "0.5926007", "0.58933556", "0.5834593", "0.58117014", "0.5683251", "0.56700027", "0.56301874", "0.5621875", "0.56208336", "0.56013507", "0.56013507", "0.5592754", "0.55814165", "0.5532438", "0.5514373", "0.5513407", "0.5507845", "0.5483828", "0.54709774", "0.54476655", "0.54416686", "0.54385614", "0.5397255", "0.5393002", "0.5375822", "0.5373419", "0.53647006" ]
0.6347684
0
Applies each of the warnings to the posting. Returns the posting after it's been modified
def run_on(self, p: Posting) -> Posting: p.warnings = [] for warning in self.pipeline: w = warning.evaluate(p) if w: p.warnings.append(w) return p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval_warnings(self):\n\n # Ensure the minimum number of warnings were raised.\n assert len(self.war) >= len(self.warn_msgs)\n\n # Test the warning messages, ensuring each attribute is present.\n testing.eval_warnings(self.war, self.warn_msgs)\n return", "def warnings(self) -> List[Error]:", "def warnings(self, warnings):\n\n self._warnings = warnings", "def PatchWarnings():\n # Since we are dealing with binary classification we calculate\n # precesion / recall / F1 wrt only the positive class.\n FLAGS.batch_results_averaging_method = \"binary\"\n # NOTE(github.com/ChrisCummins/ProGraML/issues/13): F1 score computation\n # warns that it is undefined when there are missing instances from a class,\n # which is fine for our usage.\n warnings.filterwarnings(\"ignore\", category=UndefinedMetricWarning)", "def warn(self) -> list:\n return self.__wrn", "def warnings_active(self) -> List[Error]:", "def warnings(self):\n return self.__warnings", "async def warnings(self, ctx):\n server = ctx.message.server\n server_id = server.id\n if not (server_id in self.warnlist2 and self.warnlist2[server_id]):\n await self.bot.say(\"No users are currently punished.\")\n return\n\n def getmname(mid):\n member = discord.utils.get(server.members, id=mid)\n if member:\n if member.nick:\n return '%s (%s)' % (member.nick, member)\n else:\n return str(member)\n else:\n return '(member not present, id #%d)'\n\n headers = ['Member', 'Warning Number', 'Moderator', 'Reason']\n table = []\n disp_table = []\n now = time.time()\n for member_id, data in self.warnlist2[server_id].items():\n\n #if not member_id.isdigit():\n #continue\n print (\"704\")\n member_name = getmname(data['User'])\n warnnum = data['Warning Number']\n punisher_name = getmname(data['Mod'])\n reason = data['Reason']\n table.append((member_name, warnnum, punisher_name, reason))\n\n #for _, name, warnum, mod, reason in sorted(table, key=lambda x: x[0]):\n disp_table.append((member_name, warnnum, punisher_name, reason))\n\n for page in pagify(tabulate(disp_table, headers)):\n await self.bot.say(box(page))", "def warnings(self) -> List[Error]:\n return self._get_warnings()", "async def setwarns(self, ctx, user: discord.Member, warnings: int = None):\r\n server = ctx.message.guild\r\n await self._create_warn(server, user)\r\n dataIO.save_json(self.JSON, self.data)\r\n if not warnings:\r\n del self.data[str(server.id)][\"user\"][str(user.id)]\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been reset\".format(user.name))\r\n return\r\n if warnings == 0:\r\n del self.data[str(server.id)][\"user\"][str(user.id)]\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been reset\".format(user.name))\r\n return\r\n if warnings <= 0:\r\n await ctx.send(\"You can set warnings to 1-4 only :no_entry:\")\r\n return\r\n if warnings >= 5:\r\n await ctx.send(\"You can set warnings to 1-4 only :no_entry:\")\r\n return\r\n self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] = warnings\r\n dataIO.save_json(self.JSON, self.data)\r\n await ctx.send(\"**{}'s** warnings have been set to **{}**\".format(user.name, warnings))", "def warning(self, *args, **kwargs):", "def get_warnings(self):\n pass", "def show_warnings(self):\n for w in self.warnings:\n w()", "def filter_post(self, room, post):\n room = str(room)\n to_notify = set()\n\n with self._lock:\n # minimise locking time by creating pre-processed copies\n regexes_for_room = self.notifications.get(room, {})\n regexes = {p: set(u) for p, u in regexes_for_room.items()}\n at_names = {u: _at_notification(n) for u, n in self.users.items()}\n\n for regex, users_for_regex in regexes.items():\n if re.search(regex, post):\n to_notify.update(users_for_regex)\n\n if not to_notify:\n return post\n\n notifications = \" \".join([at_names[user] for user in to_notify])\n return f\"{post} {notifications}\"", "def alert_new_posts(self):\n\n for ind, post in enumerate(self.parsed_feed['items']):\n # Record when we match the last-seen post. We will send alerts for\n # all posts occuring after match.\n if not self.is_new_post(post):\n cutoff = ind\n break\n item_list = list(reversed(self.parsed_feed['items'][:ind]))\n if len(item_list) == 0:\n return\n print '%d posts to send alerts for' % len(item_list)\n for post in item_list:\n if self.last_post is None or self.is_new_post(post):\n # Set text body\n tiny_url = tinyurl.create_one(str(post['id']))\n text_body = str(post['title']) + ' - ' + tiny_url\n self.send_sms(text_body)\n print 'Sent text for %s' % tiny_url\n break\n self.set_last_post(post)", "def classify_warnings(args):\n results = []\n for line, link in args['group']:\n common.classify_one_warning(line, link, results, args['project_patterns'],\n args['warn_patterns'])\n\n # After the main work, ignore all other signals to a child process,\n # to avoid bad warning/error messages from the exit clean-up process.\n if args['num_processes'] > 1:\n signal.signal(signal.SIGTERM, lambda *args: sys.exit(-signal.SIGTERM))\n return results", "def _warn(self, warning=None):\r\n debug.err('Warning: %s' % warning)\r\n\r\n if core.FW_conf['settings'].TestRun.ExecutionMode == 'Leader' and warning != None:\r\n executeInFollower(\"self.warn('%s')\" % (warning,))\r\n\r\n if type(warning) != types.ListType:\r\n warning = [warning]\r\n\r\n self.result.addStepWarning(warning)", "def warnings():\n return THE_LOGGER.warnings", "def warns(*warnings, **opts):\r\n import warnings as warnings_\r\n\r\n captured = []\r\n old_filters, old_showwarning = warnings_.filters, warnings_.showwarning\r\n warnings_.filters = old_filters[:]\r\n\r\n def showwarning(message, category, *args, **kwargs):\r\n if category not in warnings:\r\n old_showwarning(message, category, *args, **kwargs)\r\n return\r\n captured.append(message)\r\n warnings_.showwarning = showwarning\r\n\r\n for warning in warnings:\r\n warnings_.simplefilter(\"always\", warning)\r\n\r\n try:\r\n yield captured\r\n if opts.get(\"any\", False):\r\n assert captured\r\n else:\r\n assert set(warnings) == set(map(type, captured))\r\n finally:\r\n warnings_.filters = old_filters\r\n warnings_.showwarning = old_showwarning", "def warnings(self):\n return self.warning_buffer.warnings", "async def warnings(self, ctx, user: discord.Member):\r\n server = ctx.message.guild\r\n try:\r\n if self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] == 1:\r\n action = \"Mute\"\r\n if self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] == 2:\r\n action = \"Kick\"\r\n if self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] >= 3:\r\n action = \"Ban\"\r\n if not self.data[str(server.id)][\"user\"][str(user.id)][\"reasons\"]:\r\n reasons = \"None\"\r\n else:\r\n reasons = \", \".join([x for x in self.data[str(server.id)][\"user\"][str(user.id)][\"reasons\"]])\r\n if self.data[str(server.id)][\"user\"][str(user.id)][\"warnings\"] == 1:\r\n s = discord.Embed(description=\"{} is on 1 warning\".format(user), colour=user.colour)\r\n s.set_author(name=str(user), icon_url=user.avatar_url)\r\n s.add_field(name=\"Next Action\", value=action, inline=False)\r\n s.add_field(name=\"Reasons\", value=reasons, inline=False)\r\n await ctx.send(embed=s)\r\n else:\r\n try:\r\n s = discord.Embed(description=\"{} is on {} warnings\".format(user, self.data[str(server.id)][\"user\"][\r\n str(user.id)][\"warnings\"]), colour=user.colour)\r\n s.set_author(name=str(user), icon_url=user.avatar_url)\r\n s.add_field(name=\"Next Action\", value=action, inline=False)\r\n s.add_field(name=\"Reasons\", value=reasons, inline=False)\r\n await ctx.send(embed=s)\r\n except:\r\n await ctx.send(\"That user has no warnings :no_entry:\")\r\n except:\r\n await ctx.send(\"That user has no warnings :no_entry:\")", "def log_check_warnings(self):\n self._log_check_warnings_object(self._info)\n self._log_check_warnings_object(self._tags)\n self._log_check_warnings_object(self._schemes)\n self._log_check_warnings_object(self._paths)\n self._log_check_warnings_object(self._securityDefinitions)\n self._log_check_warnings_object(self._definitions)\n pass", "def pop_extra_errors(self, errors_warnings):\n for action in ['warnings', 'errors']:\n params = list(errors_warnings[action].keys())\n for param in params:\n if param not in self.raw_input_fields:\n errors_warnings[action].pop(param)", "def warning(self, warning):\n pass", "def _apply_post_render_hooks(self, data, obj, fmt):\n hooks = self.post_render_hooks.get(fmt,[])\n for hook in hooks:\n try:\n data = hook(data, obj)\n except Exception as e:\n self.param.warning(f\"The post_render_hook {hook!r} could not \"\n f\"be applied:\\n\\n {e}\")\n return data", "def preprocess_post(self, post):\n # tokenize, clean, & tag part-of-speech for all words\n if self.document_level == 'postwise':\n\n doc_text = all_comments_from_post(post)\n # leave early if there's nothing there\n if doc_text == '':\n return []\n\n tokens = nltk.word_tokenize(doc_text)\n # TODO: skip this if there's no POS filtering args!\n tagged = nltk.pos_tag(tokens)\n\n # filter out most invalid words with valid_word()\n processed_document = []\n for word, pos_tag in tagged:\n if self.valid_word(word, pos_tag):\n cleaned_word = self.clean_word(word)\n # things like digits and other junk become empty string,\n # so exclude them from final document\n if cleaned_word:\n processed_document.append(cleaned_word)\n # finally, update the post\n post['postwise'] = {'tokens': processed_document, 'text': doc_text}\n self.postman.posts_write.update_one({'_id':post['_id']}, {'$set':post}, upsert=True)\n else:\n raise NotImplementedError('document_level: \"%s\"' % self.document_level)\n\n return processed_document", "def get_warning(miscobj):\n\n finalwarning = []\n typewarning = misctype_byname(miscobj.filetype).warning\n if typewarning:\n finalwarning.append(typewarning.replace('\\n', '<br>'))\n\n langwarnings = {'Python': ('This is known to work with Python 2 and 3.'),\n 'Python 2': ('This has been tested with Python 2, but it '\n 'won\\'t work with Python 3.'),\n 'Python 3': ('This has been tested with Python 3, but it '\n 'won\\'t work with Python 2.'),\n }\n if miscobj.language in langwarnings.keys():\n finalwarning.append(langwarnings[miscobj.language])\n\n if len(finalwarning) > 1:\n return mark_safe('<br><br>'.join(finalwarning))\n else:\n return finalwarning[0] if finalwarning else ''", "def process_post(new_post, cfg):\n id_already_handled_in_db = i18n['debug']['id_already_handled_in_db']\n discovered_submit_title = i18n['posts']['discovered_submit_title']\n rules_comment = i18n['posts']['rules_comment']\n yt_already_has_transcripts = i18n['posts']['yt_already_has_transcripts']\n\n if new_post['subreddit'] in cfg.upvote_filter_subs:\n # ignore posts if they don't meet the threshold for karma and the sub\n # is in our list of upvoted filtered ones\n if new_post['ups'] < cfg.upvote_filter_subs[new_post['subreddit']]:\n return\n\n if not is_valid(new_post['name'], cfg):\n logging.debug(id_already_handled_in_db.format(new_post['name']))\n return\n\n if new_post['archived']:\n return\n\n if new_post['author'] is None:\n # we don't want to handle deleted posts, that's just silly\n return\n\n logging.info(\n f'Posting call for transcription on ID {new_post[\"name\"]} posted by '\n f'{new_post[\"author\"]}'\n )\n\n if new_post['domain'] in cfg.image_domains:\n content_type = 'image'\n content_format = cfg.image_formatting\n\n elif new_post['domain'] in cfg.audio_domains:\n content_type = 'audio'\n content_format = cfg.audio_formatting\n\n elif new_post['domain'] in cfg.video_domains:\n if 'youtu' in new_post['domain']:\n if not valid_youtube_video(new_post['url']):\n add_complete_post_id(new_post['name'], cfg)\n return\n if get_yt_transcript(new_post['url']):\n np = cfg.r.submission(id=new_post['name'])\n np.reply(_(\n yt_already_has_transcripts\n ))\n add_complete_post_id(new_post['name'], cfg)\n logging.info(\n f'Found YouTube video, {get_yt_video_id(new_post[\"url\"])},'\n f' with good transcripts.'\n )\n return\n content_type = 'video'\n content_format = cfg.video_formatting\n else:\n # This means we pulled from a subreddit bypassing the filters.\n content_type = 'Other'\n content_format = cfg.other_formatting\n\n # Truncate a post title if it exceeds 250 characters, so the added\n # formatting still fits in Reddit's 300 char limit for post titles\n post_title = new_post['title']\n max_title_length = 250\n if len(post_title) > max_title_length:\n post_title = post_title[:max_title_length - 3] + '...'\n\n # noinspection PyBroadException\n try:\n result = cfg.tor.submit(\n title=discovered_submit_title.format(\n sub=new_post['subreddit'],\n type=content_type.title(),\n title=post_title\n ),\n url=reddit_url.format(new_post['permalink'])\n )\n result.reply(\n _(\n rules_comment.format(\n post_type=content_type,\n formatting=content_format,\n header=cfg.header\n )\n )\n )\n flair_post(result, flair.unclaimed)\n\n add_complete_post_id(new_post['name'], cfg)\n cfg.redis.incr('total_posted', amount=1)\n\n if cfg.OCR and content_type == 'image':\n # hook for OCR bot; in order to avoid race conditions, we add the\n # key / value pair that the bot isn't looking for before adding\n # to the set that it's monitoring.\n cfg.redis.set(new_post['name'], result.fullname)\n cfg.redis.rpush('ocr_ids', new_post['name'])\n\n cfg.redis.incr('total_new', amount=1)\n\n # The only errors that happen here are on Reddit's side -- pretty much\n # exclusively 503s and 403s that arbitrarily resolve themselves. A missed\n # post or two is not the end of the world.\n except Exception as e:\n logging.error(\n f'{e} - unable to post content.\\nID: {new_post[\"name\"]}\\n '\n f'Title: {new_post[\"title\"]}\\n Subreddit: '\n f'{new_post[\"subreddit\"]}'\n )", "def __clearAllWarnings(self):\n for editor in self.editors:\n editor.clearWarnings()", "def format_warn(self, *args):\n if self._pretty:\n return self.format_multiline_message(*args, color='magenta', start='[WARN] ', multiline=' ~~ ')\n return self.format_multiline_message(*args)" ]
[ "0.59168273", "0.57546896", "0.5613459", "0.5539314", "0.55158526", "0.54893833", "0.54492897", "0.54224324", "0.5386594", "0.5370216", "0.5369238", "0.5328996", "0.52466166", "0.5223718", "0.51754683", "0.51736605", "0.5168381", "0.5153321", "0.5140546", "0.51151484", "0.51069546", "0.50656104", "0.50194937", "0.5003292", "0.49719864", "0.49627295", "0.49294758", "0.49214008", "0.4904497", "0.48999822" ]
0.65688336
0
inConnectionLost! stdin is closed! (we probably did it)
def inConnectionLost(self): self.logger('stdin closed by process %d' % self._pid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outConnectionLost(self):\n self.logger('stdout closed by process %d' % self._pid)", "def stdin(self):\n pass", "def test_eofReceived(self):\n stdio = FakeStdio()\n channel = SSHSession()\n channel.stdio = stdio\n channel.eofReceived()\n self.assertTrue(stdio.writeConnLost)", "def _on_stdin_read(self, data):\n if not self.opts[\"udp\"]:\n self.fire(write(data))\n else:\n self.fire(write((self.host, self.port), data))", "def handle_input(sock):\n\tprint(\"Type message, enter to send. 'q' to quit\")\n\twhile True:\n\t\tmsg = input() #Blocks\n\t\tif msg == 'q':\n\t\t\tprint('Shut Down Client')\n\t\t\tsock.shutdown(socket.SHUT_RDWR)\n\t\t\tsock.close()\n\t\t\tbreak\n\t\ttry:\n\t\t\ttincanchat.send_msg(sock,msg) #Blocks until sent\n\t\texcept(BrokenPipeError,ConnectionError):\n\t\t\tbreak", "def eof_received(self):\n logger.debug(\"EOF from client, closing.\")\n self.connection_lost(None)", "def read_input(inp):\n epoll = select.epoll()\n epoll.register(sys.stdin.fileno(), select.EPOLLIN)\n while inp.running:\n if is_terminated():\n return\n\n events = epoll.poll(1)\n for fileno, event in events:\n line = \"[\"\n while \"[\" in line:\n line = sys.stdin.readline().strip(\",\").strip()\n inp.has_event = True\n try:\n event = json.loads(line)\n if \"instance\" in event:\n inp.callback(event)\n inp.redraw()\n except ValueError:\n pass\n epoll.unregister(sys.stdin.fileno())\n epoll.close()\n inp.has_event = True\n inp.clean_exit = True", "def clear_in():\r\n c = sys.stdin.read(1)\r\n while(c!='\\n'):\r\n c = sys.stdin.read(1)", "def eof_received(self):\n self.connection_lost('EOF')\n return False", "def process_IN_CLOSE_WRITE(self, event):", "def _flow_in(self):\n print(\"MESSENGER: flow_in online!\")\n while self.running:\n data = b\"\"\n while data[-5:] != b\"ROGER\" and self.running:\n try:\n slc = self.sock.recv(1024)\n except socket.timeout:\n time.sleep(0.1)\n except socket.error as E:\n print(\"MESSENGER: caught socket exception:\", E)\n self.teardown(1)\n except Exception as E:\n print(\"MESSENGER: generic exception:\", E)\n self.teardown(1)\n else:\n data += slc\n if not self.running:\n if data:\n print(\"MESSENGER: data left hanging:\" + data[:-5].decode(\"utf8\"))\n return\n data = data[:-5].decode(\"utf8\")\n self.recvbuffer.extend(data.split(\"ROGER\"))\n print(\"MESSENGER: flow_in exiting...\")", "def _stdin_writer(self):\n self._is_launched.wait()\n while True:\n message = self.stdin_queue.get()\n if message is None or self._is_stopping or not self._is_running.is_set():\n if message is not None:\n log.debug(\"Ignore {0} on process {1} because it's stopped\".format(message, self.name))\n break\n self._direct_stdin_writer(message)\n self._log(\"raw\", \"write to stdin : {0}\".format(message.encode(\"utf-8\")))", "def monitor_stdin():\n while len(sys.stdin.read(1024)):\n pass\n if args.multi:\n client_monitor.notify_parent_exit()\n else:\n trigger_exit(ExitMode.PARENT)", "def errConnectionLost(self):\n self.logger('stderr closed by process %d' % self._pid)", "def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()", "def eofReceived(self):\n channel.SSHChannel.eofReceived(self)\n # print 'DirectTcpIpChannelClient:: remote eof'\n self.loseConnection()", "def _ioCollector(self, shutdown: Event, queue: Queue):\n fd = os.dup(sys.stdin.fileno())\n sys.stdin.close()\n poller = select.poll()\n poller.register(fd, select.POLLIN)\n while True:\n # be 50ms responsive\n event = poller.poll(50)\n if shutdown.is_set():\n break\n if len(event) == 0:\n continue\n d = os.read(fd, self.local_block_size)\n if len(d) == 0:\n # EOF condition\n queue.put(connectme_pb2.ConnectData(ctrl=connectme_pb2.EOF))\n logging.debug('closing stdin io collector')\n break\n queue.put(connectme_pb2.ConnectData(data=d, channel=connectme_pb2.STDIN))", "def connectionLost(reason):", "def stdin_read(self, data):\n self.write_master(data)", "def chk_stdin(self):\t# check keyboard input\n\t\tdr, dw, de = select([sys.stdin], [], [], 0)\n\t\treturn dr", "def parent_monitor_thread(selector):\n while True:\n try:\n c = sys.stdin.read(1)\n if c is None or len(c) == 0: # read returns None or empty string when stdin is closed.\n break\n except IOError: # Error on pipe should have same behavior as stdin close.\n break\n selector.abort() # Tell selector to abort", "def main():\n dt = DropToken()\n play = True\n while play:\n try:\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n if not line:\n break\n play = dt.inputProcess(line)", "def connectionMade(self):\n self._pid = self.transport.pid\n if self._pid:\n self.logger(\"Process has pid %d\" % self._pid)\n self.transport.closeStdin() # close stdin", "def read(self):\n global ALIVE\n line = sys.stdin.readline()\n if line:\n self.datalines.append(line.rstrip())\n else:\n ALIVE = False", "def _process_ended(self):\n self._is_stopping = True\n self.stdin_queue.put_nowait(None) # Ask stdin_thread to stop\n self._is_running.clear()\n unregister_thread(self)", "def exit_gracefully():\n input_channel.close()\n output_channel.close()\n cmd_channel.close()\n connection.close()", "def connection_lost(self, exc):\n if self._stream.done:\n # Stream is done, no need to panic\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id, exc_info=exc)", "def flushInput(self):\n self.sock.setblocking(0)\n try:\n while len(self.sock.recv(1)) > 0:\n pass\n except BaseException:\n pass\n self.sock.setblocking(1)\n self.sock.settimeout(self.__timeout)", "def process_IN_CLOSE_NOWRITE(self, event):", "def test_pipe_to_stdin(self):\n original_stdin = sys.stdin\n with self.pipe_to_stdin() as input:\n self.assertNotEqual(original_stdin, sys.stdin)\n input.write(\"Hello world!\\n\")\n self.assertEqual(sys.stdin.readline(), \"Hello world!\\n\")\n self.assertEqual(original_stdin, sys.stdin)" ]
[ "0.66580963", "0.6314382", "0.62261367", "0.6172009", "0.6104817", "0.6060875", "0.6025757", "0.60071176", "0.59619147", "0.59474856", "0.59413564", "0.5915416", "0.58921003", "0.5865144", "0.5861129", "0.57965195", "0.5780947", "0.5778603", "0.57679284", "0.57531106", "0.5735904", "0.5707701", "0.56309223", "0.5617409", "0.55954474", "0.5577284", "0.5558994", "0.5551288", "0.5521366", "0.5519316" ]
0.8422832
0
outConnectionLost! The child closed their stdout!
def outConnectionLost(self): self.logger('stdout closed by process %d' % self._pid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errConnectionLost(self):\n self.logger('stderr closed by process %d' % self._pid)", "def inConnectionLost(self):\n self.logger('stdin closed by process %d' % self._pid)", "def __exit__(self, exc_type, exc_val, exc_tb):\n sys.stdout.flush()\n sys.stdout.close()\n sys.stdout = sys.__stdout__", "def close(self):\n return self.old_stdout", "def on_end(self, returncode):\n self.stdout.close()\n self.stderr.close()", "def process_IN_CLOSE_WRITE(self, event):", "def unswitchstdout(self):\n\t\tsys.stdout = self.old_stdout", "def test_output_stdout(kube_api, clean_kube, kube_event_pipe: KubeEventPipe) -> None:\n with kube_event_pipe(output_file_name=None) as event_pipe:\n make_kube_event(kube_api)\n event_pipe.get_events(min_count=1)\n # SIGHUP will still make it reopen the file and even though it's a pipe, it should still\n # work.\n event_pipe.process.send_signal(signal.SIGHUP)\n make_kube_event(kube_api)\n event_pipe.get_events(min_count=1)", "async def _output_handler(self, child_output: pipes.Pipe,\n subscribers: List[pipes.Pipe]):\n last_ts = None\n try:\n while True:\n data = await child_output.read()\n if len(data) > 0:\n\n if not self._verify_monotonic_timestamps(data, last_ts, child_output.name):\n for pipe in subscribers:\n await pipe.close_interval()\n await self.restart()\n break\n last_ts = data['timestamp'][-1]\n\n child_output.consume(len(data))\n for pipe in subscribers[:]:\n # if child_output.name=='output2':\n # print(\"writing to %d subscribers\" % len(subscribers))\n try:\n\n await asyncio.wait_for(pipe.write(data),\n self.SUBSCRIBER_TIMEOUT)\n except (ConnectionResetError, BrokenPipeError):\n log.warning(\"subscriber write error [%s] \" % pipe.stream)\n subscribers.remove(pipe)\n except asyncio.TimeoutError:\n log.warning(\"subscriber [%s] timed out\" % pipe.stream)\n pipe.close_interval_nowait()\n if child_output.end_of_interval:\n for pipe in subscribers:\n pipe.close_interval_nowait()\n\n except (EmptyPipe, asyncio.CancelledError):\n pass\n except PipeError as e:\n if 'closed pipe' in str(e):\n # during shutdown the pipe may be closed but\n # another read might be attempted by the output_handler\n pass\n else:\n log.warning(\"Worker %s, pipe %s: %s\" % (\n self.name, child_output.name, str(e)))", "def stop(self):\n # Print the escape character to make the readOutput method stop:\n self.origstream.write(self.escape_char)\n # Flush the stream to make sure all our data goes in before\n # the escape character:\n self.origstream.flush()\n\n # Reads the output and stores it in capturedtext\n self.readOutput()\n\n # Close the pipes:\n os.close(self.pipe_in)\n os.close(self.pipe_out)\n\n # Restore the original streams:\n os.dup2(self.streamfd, self.origstreamfd)\n\n # Close the duplicate streams:\n os.close(self.streamfd)", "def outConnectEvent(self):\r\n pass", "def\tconnectionLost(self, reason):\n\t\tprint \"[:)] Connectionlost con reason: \", reason, self.addr, self.port", "def disable(self):\n self.out.close()\n sys.stdout = self._stdout", "def close(self):\n self.outfd.close()", "def poutput(self, msg, end='\\n'):\n if msg is not None and msg != '':\n try:\n msg_str = '{}'.format(msg)\n self.stdout.write(msg_str)\n if not msg_str.endswith(end):\n self.stdout.write(end)\n except BROKEN_PIPE_ERROR:\n # This occurs if a command's output is being piped to another process and that process closes before the\n # command is finished. We intentionally don't print a warning message here since we know that stdout\n # will be restored by the _restore_output() method. If you would like your application to print a\n # warning message, then override this method.\n pass", "def close(self):\n self.output.close()", "def stop(self):\n # This will result in a -1 being written to the stream, indicating the server is closing down.\n self._write_stream(-1, \"\")\n\n self.__channel.close()\n self.__channel = None\n self.__sys.stdout = self.__old_stdout\n self.__sys.stderr = self.__old_stderr", "def stdout(self):\n pass", "def close(self):\n sys.stdout.write('\\n')", "def connectionLost(reason):", "def process_IN_CLOSE_NOWRITE(self, event):", "def outCloseEvent(self):\r\n pass", "def connection_closed(self, exc):\n _logger.info(\"Connection lost: %s\", str(exc))\n super().close()", "def __init__( self, out = None ):\n self._out = out if out is not None else sys.stdout", "def _dumpStdout(self, p, outputCallback):\n while p.poll() is None:\n try:\n # May raise IOError if in non-blocking mode\n l = p.stdout.read()\n outputCallback(l)\n except IOError:\n pass\n time.sleep(0.1)\n outputCallback(p.stdout.read())", "def readOutput(self):\n while True:\n char = os.read(self.pipe_out, 1).decode(self.encoding)\n if not char or self.escape_char in char:\n break\n self.capturedtext += char", "def standard_output(self) -> global___Statement.StandardOutput:", "def on_connection_end() -> None:\r\n print(\"Connection lost with G-Earth\")\r\n print()", "def on_connection_closed(self):", "def exit_gracefully():\n input_channel.close()\n output_channel.close()\n cmd_channel.close()\n connection.close()" ]
[ "0.7060092", "0.70072204", "0.63617283", "0.6230432", "0.622997", "0.60452706", "0.5994969", "0.59138", "0.58624", "0.5796713", "0.57916695", "0.5784154", "0.5778169", "0.5777894", "0.57502854", "0.57466835", "0.5744456", "0.5728837", "0.5717156", "0.5651574", "0.5649623", "0.5628171", "0.56061745", "0.5605953", "0.5605049", "0.56049794", "0.55893385", "0.55736643", "0.55597734", "0.5554263" ]
0.8288977
0
errConnectionLost! The child closed their stderr.
def errConnectionLost(self): self.logger('stderr closed by process %d' % self._pid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outConnectionLost(self):\n self.logger('stdout closed by process %d' % self._pid)", "def connection_closed(self, exc):\n _logger.info(\"Connection lost: %s\", str(exc))\n super().close()", "def inConnectionLost(self):\n self.logger('stdin closed by process %d' % self._pid)", "def connectionLost(reason):", "def\tconnectionLost(self, reason):\n\t\tprint \"[:)] Connectionlost con reason: \", reason, self.addr, self.port", "def connectionLost(self,reason):\n pass", "def on_connection_open_error(self, _unused_connection, err):\n self.logger.info('Connection open failed: %s', err)\n self.reconnect()", "def on_connection_open_error(self, _unused_connection, err):\n # LOGGER.error('Connection open failed: %s', err)\n self.reconnect()", "def connection_lost(self, exc):\n pass", "def _connect_failed(self):\n\t\tself.root.stdout.write(\"Error: Connection Failed!\\n\")\n\t\tself.client = False", "def clientConnectionLost(self, connector, reason):\n\n moduleCoordinator.ModuleCoordinator().putError(\"Connection lost to \" + self.config['botnet'], self.module)", "def __connection_lost(self):\n print(\"Error: connection lost.\")\n try:\n # Try and send a message back to the server to notify connection\n # lost\n self.client_socket.send(\"q\".encode())\n except:\n pass\n # Raise an error to finish\n raise Exception", "async def connection_lost(self):\n logging.info('connection dropped')", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.conn.close()\n if exc_val:\n raise", "def on_connection_error(self):\n log.error(\"Stream connection has errored or timed out\")", "def error(self, msg=None):\n\t\tdebug(\"Connection Error:\", True)\n\n\t\tif msg is not None:\n\t\t\tdebug(msg, True)\n\t\t\n\t\tif self.port is not None:\n\t\t\tself.port.close()\n\t\t\n\t\tself.state = State.Unconnected", "def _write_err_msg_and_quit(self, msg):\n sys.stderr.write(msg)\n sys.exit(1)", "def connection_lost(self, exc: Optional[Exception]) -> None:\n if exc:\n logger.critical(f\"udp bridge lost its connection {exc}\")\n else:\n logger.info(\"udp connection stopped\")", "def connection_lost(self, exc):\n if not self._closing:\n self._closing = True\n self.log.info('{about}{reason}'.format(\n about=self.__str__(),\n reason=': {}'.format(exc) if exc is not None else ''))\n self.waiter_connected.cancel()\n self.waiter_closed.set_result(self)", "def connection_lost(self, exc):\n if self._stream.done:\n # Stream is done, no need to panic\n pass\n else:\n self._logger.debug('[%s] Connection lost!', self._sock_id, exc_info=exc)", "def handle_connection_lost(self, exc: Optional[Exception]) -> None:", "def connection_closed(self) -> bool:", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.socket.close()", "def _logConnectionLost(self, reason):\n line = 'Disconnected from %s' %(self.peer)\n log.msg(line)\n log.msg(reason.getErrorMessage())", "def connection_lost(self, exc):\n logger.info('The server closed the connection')\n self.loop.stop()", "def handle_err(self, err, msg):\n assert \"BAD:\" in msg.value().decode('utf-8')\n assert err is not None\n self.remaining -= 1", "def handle_err(self):\n pass", "def close(self):\n return self.old_stderr", "def connectionLost(self, reason):\n print \"connection lost from\", self.addr\n reactor.stop()", "def handle_error(self):\n self.cmd_channel.debug(\"ActiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()" ]
[ "0.75290006", "0.70639664", "0.68610656", "0.67444515", "0.65324247", "0.64623815", "0.64605296", "0.64530504", "0.64157903", "0.64110464", "0.63686746", "0.6255252", "0.6241299", "0.6238995", "0.62332237", "0.6221207", "0.6165477", "0.6143464", "0.6114373", "0.61118823", "0.60863984", "0.60861427", "0.607671", "0.607362", "0.607338", "0.607264", "0.6070139", "0.60524076", "0.6038484", "0.6031742" ]
0.8708799
0
Evaluates the poem based on how similar the description words are
def evaluate(poem): score = 0 sentence_list = poem.split("\n") for d1 in sentence_list: d1 = d1.split() if (len(d1) > 2): if (len(wordnet.synsets(d1[-1])) > 1): w1 = wordnet.synsets(d1[-1])[0] w2 = wordnet.synsets(d1[-2])[0] if (w1.wup_similarity(w2)!= None): score += w1.wup_similarity(w2) else: # arbitrary default value score += .1 return score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_find_phrase_matches1(self):\n\t\ttest = sentiment.LibraryRun(self.text1, self.lib)\n\t\tobj_ut = test.find_phrase_matches(self.tokens_generator1)[0]\n\t\tself.assertEqual(dict(obj_ut),\n\t\t\t{'not good': [[2, -1, 0]]})", "def test_find_phrase_matches2(self):\n\t\ttest = sentiment.LibraryRun(self.text2, self.lib)\n\t\tobj_ut = test.find_phrase_matches(self.tokens_generator2)[0]\n\t\tself.assertEqual(dict(obj_ut),\n\t\t\t{'not good': [[2, -1, 0], [4, -1, 0]]})", "def similar(text, database):\n # TODO\n pass", "def evaluate_description(self, description):\n description = copy.deepcopy(description)\n output = defaultdict(list)\n representations = {}\n\n hidden = self.init_hidden(1)\n\n self._subsample(hidden, False, self.search_space, [], representations, output, description)\n\n return (description, torch.cat(output['logprob']).transpose(0, 1),\n torch.cat(output['values']), torch.cat(output['entropies']))", "def wordSimilarityRatio(sent_1,sent_2):", "def __score(self, name, summary):\n score = 0\n for queryTerm in self.__query:\n if queryTerm.lower() in name.lower():\n score += 4\n if queryTerm.lower() == name.lower():\n score += 4\n \n if queryTerm.lower() in summary.lower():\n if QRegExp(r'\\b{0}\\b'.format(QRegExp.escape(queryTerm)),\n Qt.CaseInsensitive).indexIn(summary) != -1:\n # word match gets even higher score\n score += 2\n else:\n score += 1\n \n return score", "def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)", "def analyze(self, text):\n\n score = 0.0;\n\n words = text.split(' ')\n # match each word in either the positives or negatives list adding or subtracting 1 from the score if present\n for word in words:\n for w in self.positives:\n if w == word.lower():\n score += 1.0\n continue\n \n for w in self.negatives:\n if w == word.lower():\n score -= 1.0\n continue\n\n return score", "def check_analogy(word1, word2, word3, word4, model):\n LoM = model.most_similar(positive=[word2, word3], negative=[word1], topn=100)\n LoWords = []\n for x in LoM:\n LoWords += [x[0]]\n if word4 not in LoWords:\n return 0\n else:\n score = 100\n for x in LoWords:\n if x != word4:\n score += -1\n else:\n return score", "def Extended_Lesk(word1,word2):\n\n #Creates a list of the word, and one layer of hyponyms\n list1 = [word1]\n for i in word1.hyponyms():\n list1.append(i)\n list2 = [word2]\n for i in word2.hyponyms():\n list2.append(i)\n\n #Creates a list of each of the above words' definitions, tokenized\n words1 = []\n words2 = []\n for i in list1:\n words1.append([l for l in word_tokenize(i.definition())])\n for i in list2:\n words2.append([l for l in word_tokenize(i.definition())])\n\n #Calculates the Maximum length of the Longest Definition\n lengths = []\n lengths.extend(len(l) for l in words1)\n lengths.extend(len(l) for l in words2)\n maxim = max(lengths)\n\n igramcount = []\n igram1 = []\n igram2 = []\n\n # Creates N-grams for each definition for each N, from 1:max(lengths)\n for i in range(int(maxim)):\n for g in words1:\n for l in ngrams(g, i+1):\n igram1.append(l)\n for f in words2:\n for m in ngrams(f, i+1):\n igram2.append(m)\n\n #For Each N-gram in the first set, which matches that of the Second set,\n # Denoting a form of \"Similarity\" between the two definitions,\n # Record the Value of N into a new List, igramcount.\n for x in set(igram1):\n if x in set(igram2):\n igramcount.append(i + 1)\n\n igram1 = []\n igram2 = []\n\n #Square the values of igramcount, and return the sum as the value of Extended Lesk.\n squared = [number**2 for number in igramcount]\n return sum(squared)", "def similarity(query,word_dict,dictionary,number_of_docs,id):\n similarity = 0.0\n scalar_leng = 0.0\n for term in query:\n if term in dictionary:\n similarity += word_dict[term][1]*imp(term,word_dict,number_of_docs,id)\n\n for term in dictionary:\n scalar_leng += imp(term, word_dict, number_of_docs, id) ** 2\n\n final_scalar_leng = math.sqrt(scalar_leng)\n similarity = similarity / final_scalar_leng\n #print(similarity)\n return similarity", "def similarWords(targetWordList,targetWord):\n print(\"\\n\" + \"Similar words for '\" + targetWord + \"': \")\n text = nltk.Text(targetWordList)\n print(text.similar(targetWord))", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def get_title_similarity(self):\n title_words = []\n ignore_words = ['the', 'and', 'or', 'to', 'at', 'on', 'of']\n for w in self.target_movie.title.split(' '):\n w = w.strip('- ,:(){}[]')\n if w.lower() not in ignore_words:\n title_words.append(w)\n\n # if last word is a number then it's an year and should be ignored.\n if len(title_words) > 1 and title_words[-1].isdigit():\n title_words = title_words[:-1]\n\n print(title_words)\n res = self.db.query(Movie).filter(\n Movie.movie_id != self.target_movie.movie_id).filter(or_(\n Movie.title.ilike(r'%' + tw + r'%') for tw in title_words\n )).all()\n\n target_clean_title = string_cleanup(self.target_movie.title)\n\n print(\"%i records from partial title match\" % len(res))\n TSW = self.TITLE_SIMILARITY_WEIGHT\n for rec in res:\n mc_title = string_cleanup(rec.title)\n smid = rec.movie_id\n if smid not in self.recommendation_pool:\n self.recommendation_pool[smid] = {\n 'movie_obj': rec,\n 'title_similarity': jaccard_index(\n target_clean_title, mc_title, ' ') * TSW\n }\n\n else:\n self.recommendation_pool[smid]['title_similarity'] = \\\n jaccard_index(\n target_clean_title, mc_title, ' ') * TSW", "def compute_readability(text):\n total_words = 0\n total_sentences = 0\n total_syllables = 0\n score = 0\n\n words = text.split()\n total_words = len(text.split()) \n total_sentences = count_sentences(text)\n total_syllables = count_syllables(words)\n \n score = 206.835 - 1.015 * ( total_words / total_sentences) - 84.6 * (total_syllables / total_words)\n if score > 90.00:\n answer = 'Texto de nível do 5º ano do Ensino Fundamental, facilmente compreendido por um aluno de 11 anos.'\n elif score <= 90.00 and score > 80.00:\n answer = 'Texto de nível do 6º ano do Ensino Fundamental, inglês coloquial para consumidores.'\n elif score <= 80.00 and score > 70.00:\n answer = 'Texto de nível do 7º ano do Ensino Fundamental, razoavelmente fácil de ler.'\n elif score <= 70.00 and score > 60.00:\n answer = 'Texto de nível do 9º ano do Ensino Fundamental, Inglês simples compreendido por adolescentes de 13 - 15 anos.'\n elif score <= 60.00 and score > 50.00:\n answer = 'Texto de 1º a 3º ano do Ensino Médio, razoavelmente difícil de ler.'\n elif score <= 50.00 and score > 30.00:\n answer = 'Texto de nível Universitário, difícil de ler.'\n else:\n answer = 'Texto de nível de Graduação, muito difícil de ler e mais bem-compreendido por universitários graduados.'\n \n print('Pontuação Total:', score, answer)", "def test_distribution_with_many_clauses(self):\n spi_search = \"find a mele and brooks and holtkamp and o'connell\"\n inv_search = \"author:mele author:brooks author:holtkamp author:o'connell\"\n self._compare_searches(inv_search, spi_search)", "def phrase_scoring_ranking(phrases,model,dataset,bitext):\n e_phrases = []\n f_phrases = []\n count = 0\n f_phrase_count = {}\n e_phrase_count = {} #not needed\n #e_f_pair_count = {} #e words as rows and f words as columns\n f_e_pair_count = {} #e words as rows and f words as columns\n for phrase_set in phrases:\n for phrase in phrase_set:\n e_phrases.append(phrase[3])\n f_phrases.append(phrase[2])\n if phrase[2] in f_phrase_count:\n f_phrase_count[phrase[2]] += 1\n else:\n f_phrase_count[phrase[2]] = 1\n if phrase[2] in f_e_pair_count:\n if phrase[3] in f_e_pair_count[phrase[2]]:\n f_e_pair_count[phrase[2]][phrase[3]] += 1\n else:\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n else:\n f_e_pair_count[phrase[2]]={}\n f_e_pair_count[phrase[2]][phrase[3]] = 1\n\n e_phrases = list(set(e_phrases))\n f_phrases = list(set(f_phrases))\n ep_count = len(e_phrases)\n fp_count = len(f_phrases)\n #pmatrix = np.empty(ep_count*fp_count) # ######Not needed if dictionary is used\n #pmatrix = pmatrix.reshape(ep_count,fp_count)\n #pmatrix.fill(0)\n ef_prob_dict = {}\n for e in e_phrases:\n for f in f_phrases:\n ef_count =count_fe_pair(e,f,f_e_pair_count)# f_e_pair_count[e][f]\n f_count = f_phrase_count[f]\n e_idx = e_phrases.index(e) ###Check the count logic again\n f_idx = f_phrases.index(f)\n pair_prob = ef_count/f_count\n #pmatrix[e_idx][f_idx] = pair_prob\n if f in f_e_pair_count:\n if e in f_e_pair_count[f]:\n if f in ef_prob_dict:\n ef_prob_dict[f][e]=pair_prob\n else:\n ef_prob_dict[f] = {}\n ef_prob_dict[f][e] = pair_prob\n\n #if pmatrix[e_idx][f_idx] != 0:\n # print(e,f,ef_count,f_count,pair_prob)\n return ef_prob_dict", "def smart_answer(content, qwords):\n\n zipped = zip(qwords, qwords[1:])\n points = 0\n for element in zipped:\n if content.count(element[0] + \" \" + element[1]) != 0:\n points += 1000\n print(points)\n return points", "def __profanity_scan(self, title, text):\n profane_list = set(PROFANITY)\n text_words = nltk.word_tokenize(text)\n text_count = 0\n title_count = 0\n for word in text_words:\n if word.lower() in profane_list:\n text_count += 1\n for word in title.split():\n if word.lower() in profane_list:\n title_count += 1\n return title_count, text_count", "def test_snippet_2(self):\n\n text = \"Jon is a carpenter and an engineer\"\n drs = Drs.create_from_natural_language(text)\n expected_drs = Drs.create_from_predicates_string(\"\"\"\n {'word': 'is', 'tag': 'v', 'compound': 'is', 'entity': '', 'lemma': 'be', 'gender_guess': None, 'is_head_token': True, 'refers_to': None, 'negated': 'false'}(v1), {'word': 'Jon', 'tag': 'n', 'compound': 'Jon', 'entity': 'PERSON', 'lemma': 'Jon', 'gender_guess': 'm', 'is_head_token': False, 'refers_to': None, 'negated': 'false'}(v0), {'word': 'carpenter', 'tag': 'n', 'compound': 'carpenter', 'entity': '', 'lemma': 'carpenter', 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false'}(v3), {'word': 'engineer', 'tag': 'n', 'compound': 'engineer', 'entity': '', 'lemma': 'engineer', 'gender_guess': None, 'is_head_token': False, 'refers_to': None, 'negated': 'false'}(v6), {'type': 'AGENT'}(v1,v0), {'type': 'ATTR'}(v1,v6), {'type': 'ATTR'}(v1,v3), {'type': 'ATTR'}(v1,v6)\n \"\"\")\n lst = drs.apply(DrsMatcher(expected_drs, metric))\n is_match = len(lst) > 1\n self.assertTrue(is_match)", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob", "def test_score_text2(self):\n\t\t#import pdb; pdb.set_trace()\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tobj_ut, _ = test.score_text(matches, end_threshold=0.5)\n\t\tself.assertEqual(obj_ut, -1.25)", "def try1():\n path = '/Users/mayankkejriwal/datasets/eswc2017/disasters/'\n model = Word2Vec.load_word2vec_format(path+'GoogleNews-vectors-negative300.bin', binary=True)\n model.init_sims(replace=True)\n keys = ['charlotte', 'Charlotte', 'yorktown', 'LA']\n for key in keys:\n try:\n # print model.most_similar(positive=['woman', 'king'], negative=['man'])\n j = model[key]\n print 'found...',\n print key\n except KeyError:\n print 'not found...',\n print key\n continue\n print model.similarity('charlotte', 'carolina')\n print model.similarity('LA', 'California')", "def generate_analogy(word1, word2, word3, model):\n LoM = model.most_similar(positive=[word2, word3], negative=[word1], topn=100)\n return LoM", "def information_content_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets_sentence_1:\n L = []\n for ss in synsets_sentence_2:\n try:\n L.append(synset.lin_similarity(ss, brown_ic))\n except:\n continue\n if L: \n best_score = max(L)\n score += best_score\n count += 1\n # Average the values\n if count > 0: score /= count\n return score", "def word_match(question, morph_story_sentence_words):\n verbs_more_weightage = []\n question_pos_words = p.pos_tagger(question)\n for word, pos_tag in question_pos_words.items():\n if pos_tag == \"VERB\":\n verbs_more_weightage.append(word)\n verbs_more_weightage = p.morphological_roots(verbs_more_weightage)\n\n question_no_stop_words_punct = p.removeStopWords(question)\n question_no_stop_words_punct = question_no_stop_words_punct.translate(str.maketrans('', '', string.punctuation))\n morphological_root_of_question = p.word_tokenizer(question_no_stop_words_punct)\n morphological_root_of_question = p.morphological_roots(morphological_root_of_question)\n\n score = 0\n for morph_story_word in morph_story_sentence_words:\n if morph_story_word in morphological_root_of_question:\n if morph_story_word in verbs_more_weightage:\n score = score + 6\n else:\n score = score + 3\n elif morph_story_word in verbs_more_weightage:\n score = score + 6\n return score", "def __find_topics(self, concepts):\n\n # Set up\n found_topics = dict() # to store the matched topics\n explanation = dict()\n\n # finding matches\n for concept in concepts:\n evgrams = everygrams(concept.split(), 1, 3) # list of unigrams, bigrams, trigrams\n for grams in evgrams:\n gram = \"_\".join(grams)\n gram_without_underscore = \" \".join(grams)\n #### Finding similar words contained in the model\n\n list_of_matched_topics = []\n\n if self.fast_classification:\n list_of_matched_topics = self.__get_similar_words_from_cached_model(gram,grams)\n else:\n list_of_matched_topics = self.__get_similar_words_from_full_model(gram, grams)\n\n\n for topic_item in list_of_matched_topics:\n\n topic = topic_item[\"topic\"]\n str_sim = topic_item[\"sim_t\"]\n wet = topic_item[\"wet\"]\n sim = topic_item[\"sim_w\"]\n\n\n if str_sim >= self.min_similarity and topic in self.cso.topics_wu:\n\n\n if topic in found_topics:\n #tracking this match\n found_topics[topic][\"times\"] += 1\n\n found_topics[topic][\"gram_similarity\"].append(sim)\n\n #tracking the matched gram\n if gram in found_topics[topic][\"grams\"]:\n found_topics[topic][\"grams\"][gram] += 1\n else:\n found_topics[topic][\"grams\"][gram] = 1\n\n #tracking the most similar gram to the topic\n if str_sim > found_topics[topic][\"embedding_similarity\"]:\n found_topics[topic][\"embedding_similarity\"] = str_sim\n found_topics[topic][\"embedding_matched\"] = wet\n\n else:\n #creating new topic in the result set\n found_topics[topic] = {'grams': {gram:1},\n 'embedding_matched': wet,\n 'embedding_similarity': str_sim,\n 'gram_similarity':[sim],\n 'times': 1,\n 'topic':topic}\n\n\n\n if sim == 1:\n found_topics[topic][\"syntactic\"] = True\n\n\n\n primary_label_topic = self.cso.get_primary_label_wu(topic)\n if primary_label_topic not in explanation:\n explanation[primary_label_topic] = set()\n\n explanation[primary_label_topic].add(gram_without_underscore)\n\n return found_topics, explanation", "def eval_paradigm(gold, guess):\n correct, total = 0., 0.\n for lemma, D in gold.items():\n correct += 1\n total += 1\n for tag, str1 in D.items():\n str2 = u\"\" # empty string if no guess\n if lemma in guess and tag in guess[lemma]:\n str2 = guess[lemma][tag]\n if str1 != str2:\n correct -= 1\n break\n return round(correct/total*100, 2)", "def qualify_words():\n config = get_config()\n\n all_feature_matrices = []\n all_opinion_matrices = []\n\n # first 5 parts are labeled, thus are useful\n all_feature_label_vectors = []\n all_opinion_label_vectors = []\n\n for fname in config.file_names:\n feature_X, feature_dims = load_feature_matrices(fname)\n opinion_X, opinion_dims = load_opinion_matrices(fname)\n feature_y = load_feature_labels(fname)\n opinion_y = load_opinion_labels(fname)\n\n # append to all collector\n all_feature_matrices.append(feature_X)\n all_feature_label_vectors.append(feature_y)\n all_opinion_matrices.append(opinion_X)\n all_opinion_label_vectors.append(opinion_y)\n # use first 5 for training\n # stack first 5\n feature_training_X = []\n feature_training_y = []\n opinion_training_X = []\n opinion_training_y = []\n for i in range(5):\n feature_training_X.append(all_feature_matrices[i])\n feature_training_y.append(all_feature_label_vectors[i])\n opinion_training_X.append(all_opinion_matrices[i])\n opinion_training_y.append(all_opinion_label_vectors[i])\n\n feature_training_X = np.hstack(feature_training_X)\n feature_training_y = np.hstack(feature_training_y)\n opinion_training_X = np.hstack(opinion_training_X)\n opinion_training_y = np.hstack(opinion_training_y)\n\n # using combination of rule and ranking score as features\n feature_model = MultinomialNB()\n opinion_model = MultinomialNB()\n\n # training\n feature_model.fit(np.transpose(feature_training_X), feature_training_y.ravel())\n opinion_model.fit(np.transpose(opinion_training_X), opinion_training_y.ravel())\n\n # predicting on candidate aspects and opinions, extracted from amazon reviews\n for i in range(5, len(config.file_names)):\n fname = config.file_names[i]\n feature_pred = feature_model.predict_proba(\n np.transpose(all_feature_matrices[i]))[:,1]\n opinion_pred = opinion_model.predict_proba(\n np.transpose(all_opinion_matrices[i]))[:,1]\n # pickle the prediction results\n with open('../results/' + fname + '_feature_pred_score.pickle', 'wb') as f:\n pickle.dump(feature_pred, f)\n with open('../results/' + fname + '_opinion_pred_score.pickle', 'wb') as f:\n pickle.dump(opinion_pred, f)" ]
[ "0.6371693", "0.63224393", "0.6299222", "0.61357975", "0.60748553", "0.6043327", "0.6035676", "0.60105914", "0.5973418", "0.59562", "0.5948996", "0.5945075", "0.5931568", "0.59273475", "0.59270376", "0.5922571", "0.59209543", "0.5900297", "0.5843136", "0.5840886", "0.5836197", "0.5836197", "0.58353156", "0.58336705", "0.58292353", "0.5818857", "0.58126414", "0.58037114", "0.5792198", "0.5782439" ]
0.7116603
0
updates designated x and y fields with coordinates, reprojecting current cs to to_cs. to_cs = .prj or cs name or factory code (wkid).
def convert_and_update_xyfield(workspace,fc,xfield,yfield,to_cs,transformationname = None): # http://desktop.arcgis.com/en/arcmap/10.4/analyze/arcpy-classes/pdf/geographic_coordinate_systems.pdf # http://desktop.arcgis.com/en/arcmap/latest/map/projections/pdf/geographic_transformations.pdf arcpy.env.workspace = workspace errorcount = 0 to_cs = arcpy.SpatialReference(to_cs) with arcpy.da.UpdateCursor(fc,['SHAPE@',xfield,yfield]) as cursor: for row in cursor: try: if transformationname: new_cs = row[0].projectAs(to_cs,transformationname) else: new_cs = row[0].projectAs(to_cs) row[1] = new_cs.firstPoint.X # xfield = SHAPE@X row[2] = new_cs.firstPoint.Y # yfield = SHAPE@Y cursor.updateRow(row) except RuntimeError as e: errorcount += 1 print(f'{e}') except AttributeError as e: errorcount += 1 print(f'{e}') print(f'errorcount: {errorcount}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_xy_field(workspace,fc,xfield,yfield):\n arcpy.env.workspace = workspace\n errorcount = 0\n with arcpy.da.UpdateCursor(fc,['SHAPE@X','SHAPE@Y',xfield,yfield]) as cursor:\n for row in cursor:\n try:\n row[2] = row[0] # xfield = SHAPE@X\n row[3] = row[1] # yfield = SHAPE@Y\n cursor.updateRow(row)\n except RuntimeError as e:\n errorcount += 1\n print(f'{e}')\n print(f'errorcount: {errorcount}')", "def update_coords(self, coords):\n Mol = copy.deepcopy(self.m)\n c1 = Mol.GetConformer(-1)\n for i in range(self.na):\n coords_i = Point3D()\n coords_i.x, coords_i.y, coords_i.z = coords[i]\n c1.SetAtomPosition(i, coords_i)\n self.m = Mol", "def correct_coordinates(ds, verbose=False):\n ds = ds.copy()\n for co in ['x', 'y', 'lon', 'lat', 'lev',\n \"bnds\", \"lev_bounds\", \"lon_bounds\", \"lat_bounds\", \"time_bounds\",\n 'vertices_latitude', 'vertices_longitude',\n ]:\n if co in ds.variables:\n if verbose:\n print('setting %s as coord' %(co))\n ds = ds.set_coords(co)\n return ds", "def reprojectAndSaveNewShapefile(inFilepath,outFilepath,to_EPSG):\r\n import geopandas as gpd\r\n from fiona.crs import from_epsg\r\n\r\n inFile = gpd.read_file(inFilepath)\r\n inFile_proj = inFile.copy()\r\n inFile_proj['geometry'] = inFile_proj['geometry'].to_crs(epsg=to_EPSG)\r\n inFile_proj.crs = from_epsg(to_EPSG)\r\n inFile_proj.to_file(outFilepath)", "def reprojectQcew(overwrite=False):\n\n\tif exists(qcew_2913) and not overwrite:\n\t\tprint '\\nstate plane qcew already exists, if you wish to'\n\t\tprint 'overwrite the existing file use the \"overwrite\" flag\\n'\n\t\treturn\n\n\tgeom_type = 'POINT'\n\ttemplate = src_qcew\n\tospn = arcpy.SpatialReference(2913)\n\tmanagement.CreateFeatureclass(dirname(qcew_2913),\n\t\tbasename(qcew_2913), geom_type, template, spatial_reference=ospn)\n\n\ti_cursor = da.InsertCursor(qcew_2913, '*')\n\n\ts_fields = ['Shape@', '*']\n\twith da.SearchCursor(src_qcew, s_fields) as s_cursor:\n\t\t# replace point coordinates with geometry object in field\n\t\t# definition\n\t\tfields = list(s_cursor.fields)\n\t\tfields[1] = fields.pop(0)\n\n\t\tfor row in s_cursor:\n\t\t\tlist_row = list(row)\n\t\t\tlist_row[1] = list_row.pop(0)\n\t\t\td = OrderedDict(zip(fields, list_row))\n\n\t\t\tgeom = d['Shape@']\n\t\t\tgeom_2913 = geom.projectAs(ospn) \n\t\t\td['Shape@'] = geom_2913\n\t\t\td['POINT_X'] = geom_2913.firstPoint.X\n\t\t\td['POINT_Y'] = geom_2913.firstPoint.Y\n\n\t\t\twrite_row = [v for v in d.values()]\n\t\t\ti_cursor.insertRow(write_row)\n\n\tdel i_cursor", "def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2", "def _update_coords(self, change=None):\n if self.node_id:\n x, y = self.layout[self.node_id]\n self.coords = (x - self.dist, x + self.dist, y - self.dist, y + self.dist)", "def reproject_coordinates(x_in, y_in, spatial_reference_source, spatial_reference_target=None): \n if spatial_reference_target is not None:\n pass\n else:\n spatial_reference_target = osr.SpatialReference()\n spatial_reference_target.ImportFromEPSG(4326) \n pass\n \n if int(osgeo.__version__[0]) >= 3:\n # GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546\n \n spatial_reference_source.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n spatial_reference_target.SetAxisMappingStrategy(osgeo.osr.OAMS_TRADITIONAL_GIS_ORDER)\n\n \n pTransform = osr.CoordinateTransformation( spatial_reference_source, spatial_reference_target)\n \n x_new,y_new, z = pTransform.TransformPoint( x_in,y_in)\n \n return x_new,y_new", "def set_crs(self, input_crs=None):\n crs_names = [\"crs_wkt\", \"crs\", \"epsg\"]\n names = list(self._obj.coords.keys())\n if isinstance(self._obj, xr.Dataset):\n names = names + list(self._obj.data_vars.keys())\n # user defined\n if input_crs is not None:\n input_crs = pyproj.CRS.from_user_input(input_crs)\n # look in grid_mapping and data variable attributes\n else:\n for name in crs_names:\n # check default > GEO_MAP_COORDS attrs\n crs = self._obj.coords[GEO_MAP_COORD].attrs.get(name, None)\n if crs is None: # global attrs\n crs = self._obj.attrs.pop(name, None)\n for var in names: # data var and coords attrs\n if name in self._obj[var].attrs:\n crs = self._obj[var].attrs.pop(name)\n break\n if crs is not None:\n # avoid Warning 1: +init=epsg:XXXX syntax is deprecated\n crs = crs.strip(\"+init=\") if isinstance(crs, str) else crs\n try:\n input_crs = pyproj.CRS.from_user_input(crs)\n break\n except RuntimeError:\n pass\n if input_crs is not None:\n grid_map_attrs = input_crs.to_cf()\n crs_wkt = input_crs.to_wkt()\n grid_map_attrs[\"spatial_ref\"] = crs_wkt\n grid_map_attrs[\"crs_wkt\"] = crs_wkt\n self.set_attrs(**grid_map_attrs)", "def update_coords(self, l, b):\n self.l = l\n self.b = b\n self.ra, self.dec = astLib.astCoords.convertCoords(\n \"GALACTIC\", \"J2000\", self.l, self.b, epoch=2000.)", "def reproject(self, lon, lat):\n if self.xform is None:\n # if the CRS hasn't been determined yet, we set it from the first image's lat/lon (take the UTM crs)\n utm_i = str(int(math.floor((self.images[0].lon + 180) / 6 ) % 60) + 1).zfill(2)\n epsg_code = int('326' + utm_i) if (self.images[0].lat >= 0) else int('327' + utm_i)\n self.crs_dest = QgsCoordinateReferenceSystem(epsg_code)\n self.xform = QgsCoordinateTransform(self.crs_src, self.crs_dest, QgsProject.instance())\n return self.xform.transform(QgsPointXY(lon, lat))", "def setcoordsys(self, csys):\n return _image.image_setcoordsys(self, csys)", "def apply_changes(self):\n self.x = self.buff_x\n self.y = self.buff_y\n self.buff_x = None\n self.buff_y = None", "def _UpdateCoords(self, new_coords):\n for i in range(self.mol.n_atoms):\n for j in range(const.NUMDIM):\n self.mol.atoms[i].coords[j] = new_coords[i][j]", "def set_coordinates(self, x, y):\n self.x = x\n self.y = y", "def move_stage_to_xy(self, coordinates):\n raise NotImplementedError", "def change_coordinates(coords, p_from='C', p_to='S'):\n if p_from == p_to:\n return coords\n elif p_from == 'S' and p_to == 'C':\n\n theta = coords[..., 0]\n phi = coords[..., 1]\n r = 1.\n\n out = np.empty(theta.shape + (3,))\n\n ct = np.cos(theta)\n cp = np.cos(phi)\n st = np.sin(theta)\n sp = np.sin(phi)\n out[..., 0] = r * st * cp # x\n out[..., 1] = r * st * sp # y\n out[..., 2] = r * ct # z\n return out\n\n elif p_from == 'C' and p_to == 'S':\n\n x = coords[..., 0]\n y = coords[..., 1]\n z = coords[..., 2]\n\n out = np.empty(x.shape + (2,))\n out[..., 0] = np.arccos(z) # theta\n out[..., 1] = np.arctan2(y, x) # phi\n return out\n\n else:\n raise ValueError('Unknown conversion:' + str(p_from) + ' to ' + str(p_to))", "def compute_coordinates(self):\n self._x, self._y = self.board.index_to_coordinates(self.index)", "def set_new_location(self, xPos, yPos):", "def do_project_update(cs, args):\n raise NotImplementedError", "def updateModel(self, X, Y):\n self.X = X\n self.Y = Y", "def crs(self, crs):\n self.set_crs(crs)", "def replaceCoordinates(self):\n self.entry_list_todo = readLinesFromFile(os.path.join(self.results_dir, 'list', 'entry_list_recoord_nrgcing_shuffled.csv'))\n self.entry_list_todo = self.entry_list_todo[:100]\n entryListFileName = \"entry_list_recoord_todo.csv\"\n writeTextToFile(entryListFileName, toCsv(self.entry_list_todo))\n\n pythonScriptFileName = os.path.join(cingDirScripts, 'replaceCoordinates.py')\n nC = getDeepByKeysOrAttributes(self, 'nrgCing' )\n if not nC:\n nC = self\n nTwarning(\"Check code for replaceCoordinates\") \n inputDir = 'file://' + nC.results_dir + '/' + DATA_STR\n outputDir = self.results_dir\n storeCING2db = \"1\" # DEFAULT: '1' All arguments need to be strings.\n filterTopViolations = '0' # DEFAULT: '1'\n filterVasco = '0'\n singleCoreOperation = '1'\n # RECOORD coordinates\n inPathTemplate = \"/Library/WebServer/Documents/recoord_cns_w/web/%s_cns_w.pdb\"\n convention = XPLOR\n \n extraArgList = ( str(cing.verbosity), inputDir, outputDir,\n '.', '.', ARCHIVE_TYPE_BY_CH23_BY_ENTRY, PROJECT_TYPE_CING,\n storeCING2db, CV_RANGES_STR, filterTopViolations, filterVasco, singleCoreOperation,\n inPathTemplate, self.archive_id, convention )\n\n if doScriptOnEntryList(pythonScriptFileName,\n entryListFileName,\n self.results_dir,\n processes_max = self.processes_max,\n max_time_to_wait = self.max_time_to_wait,\n start_entry_id = 0,\n max_entries_todo = 90, \n extraArgList=extraArgList):\n nTerror(\"Failed to doScriptOnEntryList\")\n return True\n # end if", "def _increase_coordinates(coordinates, x, y):\n orig_x, orig_y = coordinates[\"x\"], coordinates[\"y\"]\n coordinates[\"x\"], coordinates[\"y\"] = orig_x + x, orig_y + y", "def set_coord_values(ds, wrf_out, footprint_nbins):\n xdim_var = ds.variables[\"dim_x\"]\n ydim_var = ds.variables[\"dim_y\"]\n xdim_bounds_var = ds.variables[\"dim_x_bnds\"]\n ydim_bounds_var = ds.variables[\"dim_y_bnds\"]\n lon_var = ds.variables[\"longitude\"]\n lat_var = ds.variables[\"latitude\"]\n\n time_back_var = ds.variables[\"time_before_observation\"]\n time_back_bounds_var = ds.variables[\"time_before_observation_bnds\"]\n\n height_var = ds.variables[\"height\"]\n height_bounds_var = ds.variables[\"height_bnds\"]\n\n dx = wrf_out[\"dx\"]\n\n xdim_data = wrf_out[\"proj_x_coord\"][0]\n ydim_data = wrf_out[\"proj_y_coord\"][0]\n xdim_var[:] = xdim_data[:]\n ydim_var[:] = ydim_data[:]\n\n xdim_bounds_var[:-1,:] = np.column_stack((xdim_data[:-1], xdim_data[1:]))\n xdim_bounds_var[-1,0] = xdim_data[-1]\n xdim_bounds_var[-1,1] = xdim_data[-1] + dx\n ydim_bounds_var[:-1,:] = np.column_stack((ydim_data[:-1], ydim_data[1:]))\n ydim_bounds_var[-1,0] = ydim_data[-1]\n ydim_bounds_var[-1,1] = ydim_data[-1] + dx\n\n wrf_lats = wrf_out[\"wrf_lat\"][0][0, :, :]\n wrf_lons = wrf_out[\"wrf_lon\"][0][0, :, :]\n lat_var[:, :] = wrf_lats[:, :]\n lon_var[:, :] = wrf_lons[:, :]\n\n ds.geospatial_lat_min = wrf_lats.min()\n ds.geospatial_lat_max = wrf_lats.max()\n ds.geospatial_lat_units = \"degree_north\"\n ds.geospatial_lon_min = wrf_lons.min()\n ds.geospatial_lon_max = wrf_lons.max()\n ds.geospatial_lon_units = \"degree_east\"\n\n time_back_vals = np.arange(0, footprint_nbins * FLUX_WINDOW, FLUX_WINDOW)\n time_back_var[:] = time_back_vals\n time_back_bounds_var[:-1,:] = np.column_stack((time_back_vals[:-1],\n time_back_vals[1:]))\n time_back_bounds_var[-1,:] = time_back_vals[-2:] + FLUX_WINDOW\n\n height_var[...] = 0\n height_bounds_var[:] = (0, CLOSE_TO_GROUND)", "def _updateProjectedPts(self):\n for pointSetName in self.pointSets:\n pointSet = self.pointSets[pointSetName]\n proj_pts = self._evaluatePoints(\n pointSet.u,\n pointSet.v,\n pointSet.t,\n pointSet.uvlimits0,\n pointSet.tlimits0,\n pointSet.bodyID,\n pointSet.faceID,\n pointSet.edgeID,\n pointSet.nPts,\n )\n pointSet.proj_pts = proj_pts", "def update_coords(self, cartesian_coords, polar_cords):\n\n self.cartesian_coords = cartesian_coords\n self.polar_coords = polar_cords\n\n self.db_upsert(force_insert=True)", "def update(self, dt): \r\n # update the projectile, move the circle to the new projectile location \r\n self.proj.update(dt) \r\n center = self.marker.getCenter() \r\n dx = self.proj.getX() - center.getX() \r\n dy = self.proj.getY() - center.getY() \r\n self.marker.move(dx, dy)", "def fixImageCoordinates( filename, projection):\n printcount = 0\n\n inname = filename\n nchar = len(inname)\n # strip off the end of the previous image and add the new projection name\n outname = inname[0:nchar-8]\n outname = outname + projection + \".fit\"\n\n# get the input image coordinate transform, Usually Cartesian\n win = wcs.WCS(filename)\n\n hdu = fits.open(filename)[0]\n imageData = fits.getdata( filename)\n imageCopy = copy.deepcopy( imageData)\n#\n header = hdu.header\n nx = header['NAXIS1']\n ny = header['NAXIS2']\n\n crval1 = header['CRVAL1']\n crval2 = header['CRVAL2']\n crpix1 = header['CRPIX1']\n crpix2 = header['CRPIX2']\n cdelt1 = header['CDELT1']\n cdelt2 = header['CDELT2']\n ctype1 = header['CTYPE1']\n ctype2 = header['CTYPE2']\n\n xmin = crval1 + (1. - crpix1)*cdelt1\n xmax = crval1 + (nx - crpix1)*cdelt1\n ymin = crval2 + (1. - crpix2)*cdelt2\n ymax = crval2 + (nx - crpix2)*cdelt2\n\n print( \"fixImage: %.2f,%2f %.1f,%.1f %.3f,%.3f\" % (crval1,crval2,crpix1,crpix2,cdelt1,cdelt2))\n print( \"fixImage: %s,%s\" % (ctype1,ctype2))\n # redefine for new projection desired\n ctype1 = ctype1[0:4]+projection\n ctype2 = ctype2[0:4]+projection\n print( \"=> %s, %s\" % (ctype1, ctype2))\n\n header['CTYPE1'] = ctype1\n header['CTYPE2'] = ctype2\n\n# for output image the reference coordinate x pixel can be anywhere\n# move the center to zero \n header['CRVAL1'] = 0.\n\n header['LONPOLE'] = 0.0\n header['LATPOLE'] = 90.0\n header.update()\n\n tempname = \"GridSave.fits\"\n hdu = fits.PrimaryHDU(header=header, data=imageCopy)\n print(\"Outname: %s\" % (tempname))\n if os.path.exists(tempname):\n os.remove(tempname)\n hdu.writeto(tempname)\n\n wout = wcs.WCS(tempname)\n # now that coordinates are defined, remove temporary file\n if os.path.exists(tempname):\n os.remove(tempname)\n\n pixin = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)\n pixout = np.array([[0, 0], [nx-1, ny-1]], dtype=np.float64)\n\n print(\"NX, NY: %d,%d\" % (nx, ny))\n\n nan = float(\"NAN\")\n# print(\"Nan = %f\" % (nan))\n# assume no data until found\n for jjj in range (ny):\n for iii in range (nx):\n imageCopy[jjj][iii] = nan\n\n# now for output image check all pixel values\n for jjj in range (ny):\n for iii in range (nx):\n # if this image pixal has no value\n pixout[0] = (iii,jjj)\n oworld = wout.wcs_pix2world(pixout, 0)\n xy = oworld[0]\n if np.isnan(xy[0]):\n continue\n# print(\"pixout: %d,%d : world %.f,%.2f\" % (iii,jjj,xy[0],xy[1]))\n pixin[0] = oworld[0]\n ipixels = win.wcs_world2pix(pixin, 0)\n# get input pixels for coordinate\n ixy = ipixels[0]\n# if outside of current image skip this pixel\n if np.isnan( ixy[0]):\n continue\n ix = int(ixy[0])\n iy = int(ixy[1])\n ix = max( min( nx-1, ix), 0)\n iy = max( min( ny-1, iy), 0)\n ix = int(ix)\n iy = int(iy)\n# print(\"pixin : %d,%d : world %.f,%.2f\" % (ix,iy,xy[0],xy[1]))\n# print(\"OX,OY:%d,%d <= IX,IY:%d,%d\" %( ox,oy, ix,iy))\n imageCopy[jjj][iii] = imageData[iy][ix]\n\n print(\"Preparing to write new coordiante transform: %s\" % (outname))\n if os.path.exists(outname):\n os.remove(outname)\n newhdu = fits.PrimaryHDU(header=header, data=imageCopy)\n newhdu.writeto(outname)\n print(\"Wrote new\")\n\n return", "def test_update_well_positions(tmpl, tmpdir):\n new_x = 0.0000112233\n new_y = 0.0000332211\n orig_x = tmpl.well_attrib()['FieldXStartCoordinate']\n orig_y = tmpl.well_attrib()['FieldYStartCoordinate']\n\n tmpl.well_fields()[0].FieldXCoordinate = new_x\n tmpl.well_fields()[0].FieldYCoordinate = new_y\n\n tmpl.update_well_positions()\n\n assert orig_x != tmpl.well_attrib()['FieldXStartCoordinate']\n assert orig_y != tmpl.well_attrib()['FieldYStartCoordinate']" ]
[ "0.58174837", "0.5648159", "0.55298245", "0.55191374", "0.5506121", "0.53981173", "0.537714", "0.53376645", "0.5320438", "0.53062177", "0.5266959", "0.52519304", "0.5230742", "0.5226363", "0.5222849", "0.5222461", "0.5218819", "0.5186922", "0.51763636", "0.5170673", "0.5168104", "0.5156463", "0.51550835", "0.51359105", "0.50951123", "0.5094492", "0.509296", "0.50904703", "0.5086006", "0.50794154" ]
0.7161199
0
VotingUser a model defined in Swagger
def __init__(self, voting_token: str=None, user_org_id: str=None): # noqa: E501 self.swagger_types = { 'voting_token': str, 'user_org_id': str } self.attribute_map = { 'voting_token': 'voting_token', 'user_org_id': 'user_org_id' } self._voting_token = voting_token self._user_org_id = user_org_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obj_create(self, bundle, request=None, **kwargs):\n return super(VoteResource, self).obj_create(bundle, request, user=request.user)", "def get_for_user(self, obj, user):\n if not user.is_authenticated:\n return None\n content_object = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(voter=user, content_type=content_object, object_id=obj._get_pk_val())\n\n except ObjectDoesNotExist:\n #print('No vote by {user} on {object}'.format(user=user, object=obj))\n return None\n\n return vote", "def get_for_user(self, obj, user):\r\n if not user.is_authenticated():\r\n return None\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\r\n user=user)\r\n except models.ObjectDoesNotExist:\r\n vote = None\r\n return vote", "def get_for_user(self, obj, user):\n if not user.is_authenticated():\n return None\n ctype = ContentType.objects.get_for_model(obj)\n try:\n vote = self.get(content_type=ctype, object_id=obj._get_pk_val(),\n user=user)\n except models.ObjectDoesNotExist:\n vote = None\n return vote", "def vote(request, model, object_id):\n if request.method != 'POST':\n raise Http404\n\n vote_type = request.POST.get('type', None)\n if vote_type == 'up' and auth.can_vote_up(request.user):\n vote_type = Vote.VOTE_UP\n elif vote_type == 'down' and auth.can_vote_down(request.user):\n vote_type = Vote.VOTE_DOWN\n else:\n raise Http404\n\n # TODO Ensure users can't vote on their own posts\n\n obj = get_object_or_404(model, id=object_id, deleted=False, locked=False)\n content_type = ContentType.objects.get_for_model(model)\n try:\n existing_vote = Vote.objects.get(content_type=content_type,\n object_id=object_id,\n user=request.user)\n except Vote.DoesNotExist:\n existing_vote = None\n\n if existing_vote is None:\n Vote.objects.create(content_type=content_type,\n object_id=object_id,\n user=request.user,\n vote=vote_type)\n else:\n if vote_type == existing_vote.vote:\n existing_vote.delete()\n else:\n existing_vote.vote = vote_type\n existing_vote.save()\n\n # TODO Reputation management\n\n if request.is_ajax():\n return JsonResponse({\n 'success': True,\n 'score': model._default_manager.filter(\n id=object_id).values_list('score', flat=True)[0],\n })\n else:\n return HttpResponseRedirect(obj.get_absolute_url())", "def view_vote_entities(self, request):\n\n layout = VoteLayout(self, request, 'entities')\n\n return {\n 'vote': self,\n 'layout': layout\n }", "def _user_vote(self, user):\n from . import Vote\n\n if not user.is_authenticated:\n return None\n\n return (\n Vote.query\n .filter(Vote.type == 'links')\n .filter(Vote.user_id == user.id)\n .filter(Vote.thing_id == self.id)\n .first()\n )", "def vote(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'votes')\r\n request = http.Request('POST', url, {'to': '1'})\r\n\r\n return request, parsers.parse_json", "def create_vote(self, data, header):\n return self.client.post(\n path='/api/v2/votes/', data=json.dumps(data), content_type='application/json', headers=header)", "def from_dict(cls, dikt) -> 'VotingUser':\n return util.deserialize_model(dikt, cls)", "def get_voters():", "def get_voters():", "def get_for_user_from_model(self, model, user):\n vote_dict = {}\n if len(model.objects.all()[:1]) > 0:\n ctype = ContentType.objects.get_for_model(model)\n votes = list(self.filter(content_type__pk=ctype.id,\n user__pk=user.id))\n vote_dict = dict([(vote.object_id, vote) for vote in votes])\n return vote_dict", "def user(self):", "def __init__(self, user: User):\n self.user = user", "def review_vote_entity_handler(review_id, user):\n review = Review.query.get_or_404(str(review_id))\n vote = Vote.query.filter_by(user=user, review=review).first()\n if not vote:\n raise NotFound\n else:\n return jsonify(vote=vote.to_dict())", "def get_vote(self, id: int) -> dict:", "def record_vote(self, obj, vote, user):\n if vote not in (+1, 0, -1):\n raise ValueError('Invalid vote (must be +1/0/-1)')\n content_type = ContentType.objects.get_for_model(obj)\n # First, try to fetch the instance of this row from DB\n # If that does not exist, then it is the first time we're creating it\n # If it does, then just update the previous one\n try:\n vote_obj = self.get(voter=user, content_type=content_type, object_id=obj._get_pk_val())\n if vote == 0 and not ZERO_VOTES_ALLOWED:\n vote_obj.delete()\n else:\n vote_obj.vote = vote\n vote_obj.save()\n\n except ObjectDoesNotExist:\n #This is the first time we're creating it\n try:\n if not ZERO_VOTES_ALLOWED and vote == 0:\n # This shouldn't be happening actually\n return\n vote_obj = self.create(voter=user, content_type=content_type, object_id=obj._get_pk_val(), vote=vote)\n except:\n print(( '{file}: something went wrong in creating a vote object at {line}'.format(file=str('__FILE__'), line=str('__LINE__'))))\n raise ObjectDoesNotExist\n\n return vote_obj", "def upvote(self) -> Response:\n self.force_authenticate_user()\n response = self.upvote_question()\n return response", "def one_voter(request, election, voter_uuid):\n voter = Voter.get_by_election_and_uuid(election, voter_uuid)\n if not voter:\n raise Http404\n return voter.toJSONDict()", "def set_vote_for_object_parameter(obj, user, value, uuid = None, tpclass = None, name = None, comment = None, caption = None):\n t = type(obj)\n if t not in parameter_class_map:\n raise TypeError('obj has wrong type {0}'.format(t))\n\n pclass = parameter_class_map[t]['param']\n pvalclass = parameter_class_map[t]['val']\n pvlclass = parameter_class_map[t]['vl']\n pvoteclass = parameter_class_map[t].get('vote')\n\n if isinstance(uuid, basestring):\n q = Q(uuid=uuid) & Q(obj=obj)\n else:\n q = Q(obj=obj) & Q(tpclass=tpclass)\n if tpclass == 'user':\n if not isinstance(name, basestring):\n raise Exception('name must be string when tpclass == \"user\"')\n q &= Q(name=name)\n # get parameter\n prm = pclass.objects.filter(q).all()[0]\n if prm.enum:\n if pvlclass.objects.filter(Q(value=value) & Q(parameter=prm)).count() == 0:\n raise ValueError('this value can not be accepted')\n\n # get or create voted value\n pval = get_or_create_object(pvalclass,\n {'parameter' : prm,\n 'value' : value,\n 'status' : 'voted'},\n {'caption' : caption},\n can_change = (lambda a: False))\n # delete all other votes for values of this parameter\n pvoteclass.objects.filter(Q(voter=user) &\n Q(parameter_val__status='voted') &\n Q(parameter_val__parameter=prm)).delete()\n # create vote for our value\n vt = pvoteclass(voter=user,\n parameter_val=pval)\n if isinstance(comment, basestring):\n vt.comment = comment\n vt.save(force_insert=True)", "def record_vote(self, obj, user, vote):\r\n if vote not in (+1, 0, -1):\r\n raise ValueError('Invalid vote (must be +1/0/-1)')\r\n ctype = ContentType.objects.get_for_model(obj)\r\n try:\r\n v = self.get(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val())\r\n if vote == 0:\r\n v.delete()\r\n else:\r\n v.vote = vote\r\n v.save()\r\n except models.ObjectDoesNotExist:\r\n if vote != 0:\r\n self.create(user=user, content_type=ctype,\r\n object_id=obj._get_pk_val(), vote=vote)", "def __init__(self, user, key):\n super(User, self).__init__(user=user, key=key)\n self.Ratings = User_Ratings(user, key)\n \"\"\"\n Allows to retrieve, add and delete user ratings.\n \"\"\"", "async def vote(self, ctx):\n embed = discord.Embed(title = \"Here are some bot lists that you can vote for me on, voters may soon™ recieve perks\", color = discord.Color.blurple())\n embed.add_field(name = \"Bots For Discord\", value = \"[Click Here](https://botsfordiscord.com/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Discord Boats\", value = \"[Click Here](https://discord.boats/bot/592811241756688405/vote)\")\n embed.add_field(name = \"Divine Discord Bots\", value = \"[Click Here](https://divinediscordbots.com/bot/592811241756688405/vote)\") \n embed.add_field(name = \"Botlist.space\", value = \"[Click Here](https://botlist.space/bot/592811241756688405/upvote)\") \n embed.set_thumbnail(url = self.bot.user.avatar_url)\n await ctx.send(embed = embed)", "def do_votes_by_user(parser, token):\r\n bits = token.contents.split()\r\n if len(bits) != 6:\r\n raise template.TemplateSyntaxError(\"'%s' tag takes exactly four arguments\" % bits[0])\r\n if bits[2] != 'on':\r\n raise template.TemplateSyntaxError(\"second argument to '%s' tag must be 'on'\" % bits[0])\r\n if bits[4] != 'as':\r\n raise template.TemplateSyntaxError(\"fourth argument to '%s' tag must be 'as'\" % bits[0])\r\n return VotesByUserNode(bits[1], bits[3], bits[5])", "def vote(self):\n if self.vote_exists():\n return self.update_vote()\n return self.create_vote()", "def voter_votes(request, election, voter_uuid):\n voter = Voter.get_by_election_and_uuid(election, voter_uuid)\n votes = CastVote.get_by_voter(voter)\n return [v.toJSONDict() for v in votes]", "def view_vote_entities_proposal(self, request):\n\n layout = VoteLayout(self, request, 'proposal-entities')\n\n return {\n 'vote': self,\n 'layout': layout\n }", "def use_voting_classifier(self):\n\t\tself.model = VotingClassifier(estimators=[('nb', self.models[\"naive_bayes\"]), ('et', self.models[\"extra_tree\"]), ('gb', self.models[\"gradient_boost\"])], voting='hard', weights=[2,3,1.5])", "def opt_model_create_rest_api():\n request_json = request.get_json()\n OptimModelRequestAPI(request_json).validate()\n return create_model_data(request_json)" ]
[ "0.6134873", "0.58559036", "0.5683679", "0.5661734", "0.56304413", "0.560258", "0.553985", "0.54510254", "0.54231775", "0.539229", "0.53908366", "0.53908366", "0.5388102", "0.5358371", "0.535237", "0.5326526", "0.5316479", "0.53155357", "0.5293799", "0.5286143", "0.524816", "0.52405334", "0.5231426", "0.52068", "0.5203265", "0.5198216", "0.51939684", "0.5190798", "0.5170208", "0.5134033" ]
0.5859945
1
Gets the voting_token of this VotingUser.
def voting_token(self) -> str: return self._voting_token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_token(self):\n token_model = TokenModel.find_by_user_id(self.id)\n return token_model.token if token_model else None", "def _get_token(self):\n return user.get_token()", "def get_token(self):\n\n return self._token", "def token(self):\n if not self._token:\n self._token = self.authenicate().token\n\n return self._token", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def get_token(self):\n token = self._session.token\n return token", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def voting_token(self, voting_token: str):\n if voting_token is None:\n raise ValueError(\"Invalid value for `voting_token`, must not be `None`\") # noqa: E501\n\n self._voting_token = voting_token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def token(self):\n return self._token", "def vpp_token_id(self):\n if \"vppTokenId\" in self._prop_dict:\n return self._prop_dict[\"vppTokenId\"]\n else:\n return None", "def token(self):\n\n return self.__token", "def token(self):\n return self[\"token\"]", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def token(self):\n return self._generate_jwt_token()", "def token(self):\r\n return self._token", "def token(self):\n\n if not self.requests:\n return None\n return self.requests[0].token", "def token(self) -> Token:\n return getattr(self, \"tok\", None)", "def token(self):\n print(\"getter of token called\")\n return self._token", "def device_token(self):\n return self._device_token", "def Token(self) -> Token:\r\n\t\treturn self._token", "def authenticationToken(self):\n return self.authToken", "def token(self):\n if self.is_auth_needed():\n self.authorize()\n\n return self.get_from_cache('token')", "def token(self):\n if not self._token or self._expires <= datetime.now():\n self._request_token()\n return self._token", "def token_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"token_id\")" ]
[ "0.6720019", "0.6619114", "0.65700465", "0.63307583", "0.6300595", "0.6300595", "0.62598306", "0.6192554", "0.6192554", "0.6192554", "0.6187039", "0.6184881", "0.6184881", "0.6184881", "0.6169354", "0.61598897", "0.6079098", "0.6042822", "0.6042822", "0.6042822", "0.6038956", "0.60210854", "0.58603257", "0.57326454", "0.57103026", "0.5680905", "0.5668172", "0.56560093", "0.5648166", "0.5569985" ]
0.8317904
0
Sets the voting_token of this VotingUser.
def voting_token(self, voting_token: str): if voting_token is None: raise ValueError("Invalid value for `voting_token`, must not be `None`") # noqa: E501 self._voting_token = voting_token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def voting_token(self) -> str:\n return self._voting_token", "def __init__(self, voting_token: str=None, user_org_id: str=None): # noqa: E501\n self.swagger_types = {\n 'voting_token': str,\n 'user_org_id': str\n }\n\n self.attribute_map = {\n 'voting_token': 'voting_token',\n 'user_org_id': 'user_org_id'\n }\n self._voting_token = voting_token\n self._user_org_id = user_org_id", "def token(self, token):\n\n self._token = token", "def token(self, token):\n\n self._token = token", "def login_token(self, token):\n self.token = token # this will also set the refresh_token to None", "def set_token(self, token: AccessToken):\n self.access_token = token.access_token or \"\"\n if isinstance(token, AccessToken):\n self.refresh_token = token.refresh_token or \"\"\n self.token_type = token.token_type or \"\"\n self.expires_in = token.expires_in or 0\n\n lag = datetime.timedelta(seconds=-self.lag_time)\n if token.access_token and token.expires_in:\n lag = datetime.timedelta(seconds=token.expires_in - self.lag_time)\n self.expires_at = datetime.datetime.now() + lag", "def device_token(self, device_token):\n \n self._device_token = device_token", "def set_maptoken(self, token):\n self._data['maptoken'] = token", "def set_token(self, token):\n # type: (Token) -> None\n self.token = token\n self._token_header = \"Bearer \" + token[\"access_token\"]", "def __set_authentication_token(self, token):\n cache = {\"authentication_token\": token}\n save_json(self._tokenPath, cache)", "def token(self, value):\r\n self._token = value", "def _update_token(token):\n session.token = token", "async def token(self, token):\n # [p]set token <token>\n\n if len(token) < 50:\n await self.bot.say(\"Invalid token.\")\n else:\n CacheAPI.set(key='dwarf_token', value=token, timeout=None)\n await self.bot.say(\"Token set. Restart me.\")\n log.debug(\"Token changed.\")", "async def _tokenset(self, ctx: commands.Context, token: str):\n self.config[ctx.message.server.id] = token\n dataIO.save_json('data/football/config.json', self.config)\n await self.bot.say('football-data API token set')", "def toggle_vote(self):\n\n self.vote = 1 - self.vote", "def token_id_from(self, token_id_from):\n\n self._token_id_from = token_id_from", "def use_voting_classifier(self):\n\t\tself.model = VotingClassifier(estimators=[('nb', self.models[\"naive_bayes\"]), ('et', self.models[\"extra_tree\"]), ('gb', self.models[\"gradient_boost\"])], voting='hard', weights=[2,3,1.5])", "def api_token(self, api_token):\n\n self._api_token = api_token", "def api_token(self, api_token):\n\n self._api_token = api_token", "def token_expiration(self, token_expiration):\n\n self._token_expiration = token_expiration", "def user(self, user_token, user_device=None):\n\n self.user_token = user_token\n self.user_device = user_device", "def _set_token(self) -> None:\n if 'token' in self.params['user'].keys():\n logger.debug('user token already set')\n else:\n logger.debug('setting user token')\n\n elems = [e.get_attribute('href') for e in self._driver.find_elements_by_xpath(\"//a[@href]\")]\n elems = [e for e in elems if 'editwh.php' in e]\n try:\n match = re.search(pattern=r'(?<=ee\\=)\\d*(?=\\&e)', string=elems[0])\n except IndexError:\n raise IndexError('source of html page has no href with token')\n\n if match:\n self.params['user']['token'] = match[0]\n else:\n raise ValueError('did not extract token from %s', elems[0])", "def vm_num(self, vm_num):\n\n self._vm_num = vm_num", "def token_id_to(self, token_id_to):\n\n self._token_id_to = token_id_to", "def vip(self, vip):\n\n self._vip = vip", "def set_UserToken(self, value):\n super(GetCategoriesInputSet, self)._set_input('UserToken', value)", "def set_token(self, new_token):\n try:\n self.config[USER_SECTION_KEY][TOKEN_OPTION_KEY] = new_token\n\n with open(self.configuration_filename, 'w') as configfile:\n self.config.write(configfile)\n except KeyError:\n # Create non-existent user section\n self.config[USER_SECTION_KEY] = {}\n self.set_token(new_token)", "def token_data(self, token_data: TokenData):\n\n self._token_data = token_data", "def set_access_token(self, access_token):\n self.access_token = access_token", "def set_vial(self, vial_num):\n self.vial_num = vial_num" ]
[ "0.6830911", "0.6465393", "0.595013", "0.595013", "0.5531626", "0.5400101", "0.53767824", "0.53420585", "0.53248674", "0.53226763", "0.5282861", "0.5255783", "0.5218371", "0.5135403", "0.5127925", "0.51046884", "0.5068379", "0.5013884", "0.5013884", "0.50131935", "0.49514234", "0.49384075", "0.49129784", "0.4907951", "0.49013475", "0.48908508", "0.48572156", "0.48240638", "0.4817093", "0.4802124" ]
0.84230316
0
Gets the user_org_id of this VotingUser.
def user_org_id(self) -> str: return self._user_org_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def org_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"org_id\")", "def get_user_id(self):\n return self.id_user", "def org_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"org_id\")", "def org_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"org_id\")", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def get_id(self):\n return self.user_id", "def user_id(self) -> str:\n return self._user_id", "def user_id(self) -> str:\n return self._user_id", "def user_id(self):\n # type: () -> string_types\n return self._user_id", "def get_id(self) -> int:\n return self.user_id", "def user_id(self):\n return self.status.user[\"id\"]", "def get_user_id(self):\n raise NotImplementedError", "def user_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"user_id\")", "def organization_id(self):\n return self._organization_id", "def organization_id(self) -> str:\n return pulumi.get(self, \"organization_id\")", "def user_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self):\n return lamin_user_settings().id", "def user_id(self) -> str:\n return self.app_config()[\"metadata.user.id\"]", "def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_id\")", "def organization_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"organization_id\")", "def sfdc_org_id(self) -> str:\n return pulumi.get(self, \"sfdc_org_id\")" ]
[ "0.6899005", "0.6897272", "0.68779665", "0.67156845", "0.6661198", "0.6661198", "0.6661198", "0.6661198", "0.6661198", "0.6648258", "0.66482234", "0.66482234", "0.6560425", "0.6552495", "0.6548865", "0.6532151", "0.64681876", "0.64505225", "0.6379392", "0.6378247", "0.6376733", "0.6376733", "0.63693917", "0.6294518", "0.6249504", "0.6249504", "0.6249504", "0.62486136", "0.61408055", "0.6098086" ]
0.83588845
0
Sets the user_org_id of this VotingUser.
def user_org_id(self, user_org_id: str): if user_org_id is None: raise ValueError("Invalid value for `user_org_id`, must not be `None`") # noqa: E501 self._user_org_id = user_org_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def org_id(self, org_id):\n\n self._org_id = org_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def user_id(self, user_id):\n\n self._user_id = user_id", "def id_user(self, id_user):\n\n self._id_user = id_user", "def user_org_id(self) -> str:\n return self._user_org_id", "def set_user(self, user: User):\n self.__user = user", "def user(self, user):\n self.user_id = user.get_id()", "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "def organization_id(self, organization_id):\n\n self._organization_id = organization_id", "def set_user(self, user):\r\n self.user = user", "def set_userId(self, userId):\n self.authentication.userId = userId", "def set_user_attribute(self, key, val):\n self._user_attributes[key] = val", "def set_is_org_active(self, is_org_active):\n self.is_org_active = is_org_active", "def org_name(self, org_name):\n\n self._org_name = org_name", "def org_name(self, org_name):\n\n self._org_name = org_name", "def set_user(self, user_model):\n\n self.user_model = user_model\n return self", "def user_id(self, user_id):\n # type: (string_types) -> None\n\n if user_id is not None:\n if not isinstance(user_id, string_types):\n raise TypeError(\"Invalid type for `user_id`, type has to be `string_types`\")\n\n self._user_id = user_id", "def set_user(self, user):\n self._user = user" ]
[ "0.6810901", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62934107", "0.62673724", "0.6159892", "0.5727693", "0.5723272", "0.5714084", "0.5714084", "0.5692987", "0.5671795", "0.5497968", "0.5487969", "0.5487033", "0.5487033", "0.5485976", "0.5480079", "0.5479761" ]
0.74308753
0
Check classes listed in Implemented_Targets are derived from Target
def test_implemented_targets_derived_from_target(self): for key in forcebalance.objective.Implemented_Targets.keys(): self.logger.debug("Assert %s is subclass of target\n" % str(forcebalance.objective.Implemented_Targets[key])) assert issubclass(forcebalance.objective.Implemented_Targets[key],forcebalance.target.Target)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_implemented_targets_derived_from_target(self):\n for key in forcebalance.objective.Implemented_Targets.keys():\n self.logger.debug(\"Assert %s is subclass of target\\n\" % str(forcebalance.objective.Implemented_Targets[key]))\n self.assertTrue(issubclass(forcebalance.objective.Implemented_Targets[key],forcebalance.target.Target))", "def test_no_unlisted_classes_derived_from_Target(self):\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n self.logger.debug(module)\n # Skip over smirnoff_hack because it is not intended to contain any Target implementations.\n if module in [\"_dcdlib\", \"smirnoff_hack\"]: continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n self.logger.debug(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if inspect.isclass(obj) and issubclass(obj, forcebalance.target.Target):\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Hessian',\n 'Thermo',\n 'Hydration',\n 'Moments', \n 'OptGeoTarget',\n 'TorsionProfileTarget']\n self.logger.debug(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n pytest.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def test_no_unlisted_classes_derived_from_Target(self):\n self.skipTest(\"Not sure if test is working properly.\")\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n print(module)\n if module == \"_dcdlib\": continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n print(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if type(obj) == abc.ABCMeta:\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Thermo',\n 'Hydration',\n 'Moments']\n print(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n self.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def is_concrete(self):\r\n targets = list(self.resolve())\r\n return len(targets) == 1 and targets[0] == self", "def is_target( self ):\n\n raise NotImplementedError(\"is_target\");", "def import_targets(self):\n count = 0\n target_subclasses = self.get_subclasses(Target)\n for module in os.listdir(config.targets_path):\n if not os.path.isdir(f\"{config.targets_path}/{module}\"):\n continue\n\n for target in os.listdir(f\"{config.targets_path}/{module}\"):\n if target == \"__init__.py\" or target[-3:] != \".py\":\n continue\n else:\n targets_path_split = config.targets_path.split(\"/\")\n module_directory = targets_path_split[0]\n target_path = targets_path_split[1]\n targetcls = importlib.import_module(f\"{module_directory}.{target_path}.{module}.{target[:-3]}\")\n for _, obj in inspect.getmembers(targetcls):\n if inspect.isclass(obj):\n if inspect.getmro(obj)[1] in target_subclasses and obj is not Target:\n self.add_target(obj)\n count += 1\n\n return count", "def resolve_all(cls, targets, *expected_types):\r\n if targets:\r\n for target in maybe_list(targets, expected_type=Target):\r\n concrete_targets = [t for t in target.resolve() if t.is_concrete]\r\n for resolved in concrete_targets:\r\n if expected_types and not isinstance(resolved, expected_types):\r\n raise TypeError('%s requires types: %s and found %s' % (cls, expected_types, resolved))\r\n yield resolved", "def _should_reject_unexamined(self, base_cls):\n result = (\n self.serialize_type(base_cls) not in self.classes_examined\n and base_cls.__module__ not in self.modules_examined\n and not qcore.inspection.is_cython_class(base_cls)\n )\n if not result:\n self.unexamined_base_classes.add(base_cls)\n return result", "def IsTarget(self, target_name):\n return target_name in self.GetTargets()", "def is_target(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = (\n name for name in vars(Target) if not name.startswith(\"_\")\n )\n\n return all([hasattr(X, name) for name in require_attrs])", "def matches(self, target):\n raise NotImplementedError()", "def class_is(cls: Class) -> bool:\n pass", "def is_targeted(self, targets):\n\n if targets:\n if isinstance(targets, str):\n # See if the string is a '|' separated list of targets.\n targets = targets.split('|')\n if len(targets) == 1:\n # There was no '|' so restore the original string.\n targets = targets[0]\n\n if isinstance(targets, str):\n # String targets can come from the project file (ie. the user)\n # and so need to be validated.\n if targets.startswith('!'):\n # Note that this assumes that the target is a platform\n # rather than an architecture. If this is incorrect then\n # it is a bug in the meta-data somewhere.\n platform = Platform.platform(targets[1:])\n covered = (self.platform is not platform)\n elif '-' in targets:\n architecture = Architecture.architecture(targets)\n covered = (self is architecture)\n else:\n platform = Platform.platform(targets)\n covered = (self.platform is platform)\n else:\n covered = (self.platform.name in targets)\n else:\n covered = True\n\n return covered", "def match(self, cls):\n return isinstance(self, cls)", "def Check(self, artifacts: list[T]) -> bool:\n raise NotImplementedError(\"Subclass didn't implement Check method.\")", "def target_interfaces(self):", "def target_interfaces(self):", "def can_create(self, target_class, *args, **kw):\n\n return target_class is self.target_class", "def is_base_and_derived(based, derived):\n assert isinstance(based, class_declaration.class_t)\n assert isinstance(derived, (class_declaration.class_t, tuple))\n\n if isinstance(derived, class_declaration.class_t):\n all_derived = ([derived])\n else: # tuple\n all_derived = derived\n\n for derived_cls in all_derived:\n for base_desc in derived_cls.recursive_bases:\n if base_desc.related_class == based:\n return True\n return False", "def _has_base(cls, base):\n if cls is base:\n return True\n elif cls is None:\n return False\n try:\n for bs in cls.__bases__:\n if _has_base(bs, base):\n return True\n except:\n pass\n return False", "def register_target_types(\n self, target_types: Union[typing.Iterable[Type[Target]], Any]\n ) -> None:\n if not isinstance(target_types, Iterable):\n raise TypeError(\n f\"The entrypoint `target_types` must return an iterable. Given {repr(target_types)}\"\n )\n bad_elements = [\n tgt_type\n for tgt_type in target_types\n if not isinstance(tgt_type, type) or not issubclass(tgt_type, Target)\n ]\n if bad_elements:\n raise TypeError(\n \"Every element of the entrypoint `target_types` must be a subclass of \"\n f\"{Target.__name__}. Bad elements: {bad_elements}.\"\n )\n self._target_types.update(target_types)", "def has(self, target):\r\n return target in self.by_target", "def applies(cls, obj):\n return type(obj) in cls.types", "def targets(obj, reftype):", "def target_type(self):", "def supported_target(self, target, message_handler):\n\n # This default implementation checks that the architectures are the\n # same.\n return target is self", "def __subclasshook__(cls, subclass: Type[Any]) -> bool:\n return (subclass in cls.__subclasses__() \n or denovo.unit.has_methods(\n item = subclass,\n methods = [\n 'add', 'subset', '__add__', '__iadd__', '__iter__', \n '__len__']))", "def has(self, target):\n return target in self.by_target", "def exactly(base_cls):\n\n @meta\n def check(cls):\n return cls is base_cls\n\n return check" ]
[ "0.86741906", "0.796275", "0.7916209", "0.6642787", "0.64910054", "0.64711076", "0.6310859", "0.6294609", "0.62178797", "0.6209579", "0.6149524", "0.5955374", "0.5882046", "0.5872212", "0.5858272", "0.5815201", "0.58059925", "0.58059925", "0.57850796", "0.5773781", "0.5770892", "0.5751324", "0.5745112", "0.57344085", "0.5714556", "0.5689875", "0.567322", "0.5647768", "0.56164306", "0.56083703" ]
0.8581047
1
Check for unknown omissions from Implemented_Targets Check to make sure any classes derived from Target are either listed in Implemented_Targets or in the exclusion list in this test case
def test_no_unlisted_classes_derived_from_Target(self): forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0]) if re.compile(".*\.py$").match(module) and module not in ["__init__.py"]] for module in forcebalance_modules: # LPW: I don't think dcdlib should be imported this way. self.logger.debug(module) # Skip over smirnoff_hack because it is not intended to contain any Target implementations. if module in ["_dcdlib", "smirnoff_hack"]: continue m = __import__('forcebalance.' + module) objs = dir(eval('m.' + module)) self.logger.debug(objs) for obj in objs: obj = eval('m.'+module+'.'+obj) if inspect.isclass(obj) and issubclass(obj, forcebalance.target.Target): implemented = [i for i in forcebalance.objective.Implemented_Targets.values()] # list of documented exceptions # Basically, platform-independent targets are excluded. exclude = ['Target', 'AbInitio', 'Interaction', 'Interaction_GMX', 'Liquid', 'Lipid', 'BindingEnergy', 'LeastSquares', 'Vibration', 'Hessian', 'Thermo', 'Hydration', 'Moments', 'OptGeoTarget', 'TorsionProfileTarget'] self.logger.debug(obj) if obj not in implemented and obj.__name__ not in exclude: pytest.fail("Unknown class '%s' not listed in Implemented_Targets" % obj.__name__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_unlisted_classes_derived_from_Target(self):\n self.skipTest(\"Not sure if test is working properly.\")\n forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])\n if re.compile(\".*\\.py$\").match(module)\n and module not in [\"__init__.py\"]]\n for module in forcebalance_modules:\n # LPW: I don't think dcdlib should be imported this way.\n print(module)\n if module == \"_dcdlib\": continue\n m = __import__('forcebalance.' + module)\n objs = dir(eval('m.' + module))\n print(objs)\n for obj in objs:\n obj = eval('m.'+module+'.'+obj)\n if type(obj) == abc.ABCMeta:\n implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]\n # list of documented exceptions\n # Basically, platform-independent targets are excluded.\n exclude = ['Target',\n 'AbInitio',\n 'Interaction',\n 'Interaction_GMX',\n 'Liquid',\n 'Lipid',\n 'BindingEnergy',\n 'LeastSquares',\n 'Vibration',\n 'Thermo',\n 'Hydration',\n 'Moments']\n print(obj)\n if obj not in implemented and obj.__name__ not in exclude:\n self.fail(\"Unknown class '%s' not listed in Implemented_Targets\" % obj.__name__)", "def test_implemented_targets_derived_from_target(self):\n for key in forcebalance.objective.Implemented_Targets.keys():\n self.logger.debug(\"Assert %s is subclass of target\\n\" % str(forcebalance.objective.Implemented_Targets[key]))\n self.assertTrue(issubclass(forcebalance.objective.Implemented_Targets[key],forcebalance.target.Target))", "def test_implemented_targets_derived_from_target(self):\n for key in forcebalance.objective.Implemented_Targets.keys():\n self.logger.debug(\"Assert %s is subclass of target\\n\" % str(forcebalance.objective.Implemented_Targets[key]))\n assert issubclass(forcebalance.objective.Implemented_Targets[key],forcebalance.target.Target)", "def _should_reject_unexamined(self, base_cls):\n result = (\n self.serialize_type(base_cls) not in self.classes_examined\n and base_cls.__module__ not in self.modules_examined\n and not qcore.inspection.is_cython_class(base_cls)\n )\n if not result:\n self.unexamined_base_classes.add(base_cls)\n return result", "def davComplianceClasses(self):\n unimplemented(self)", "def has_invalid_targets(self):\n return len(self._combined_invalid_versioned_targets.targets) > 0", "def targets(self):\n\n # Targets that fail but shouldn't\n known_failing_targets = [\n # The following two targets lose out due to a resource collision, because `example_b` happens\n # to be first in the context, and test.junit mixes all classpaths.\n 'testprojects/maven_layout/resource_collision/example_b/src/test/java/org/pantsbuild/duplicateres/exampleb:exampleb',\n 'testprojects/maven_layout/resource_collision/example_c/src/test/java/org/pantsbuild/duplicateres/examplec:examplec',\n # TODO: This one has a missing dependency, but is intended to succeed... should it?\n 'testprojects/src/java/org/pantsbuild/testproject/thriftdeptest',\n # TODO(Eric Ayers): I don't understand why this fails\n 'testprojects/src/java/org/pantsbuild/testproject/jvmprepcommand:compile-prep-command',\n ]\n\n # Targets that are intended to fail\n negative_test_targets = [\n 'testprojects/maven_layout/provided_patching/leaf:fail',\n 'testprojects/src/antlr/python/test:antlr_failure',\n 'testprojects/src/java/org/pantsbuild/testproject/bundle:missing-files',\n 'testprojects/src/java/org/pantsbuild/testproject/compilation_warnings:fatal',\n 'testprojects/src/java/org/pantsbuild/testproject/dummies:compilation_failure_target',\n 'testprojects/src/java/org/pantsbuild/testproject/junit/earlyexit:tests',\n 'testprojects/src/java/org/pantsbuild/testproject/junit/failing/tests/org/pantsbuild/tmp/tests',\n 'testprojects/src/java/org/pantsbuild/testproject/junit/mixed/tests/org/pantsbuild/tmp/tests',\n 'testprojects/src/java/org/pantsbuild/testproject/missingdepswhitelist.*',\n 'testprojects/src/java/org/pantsbuild/testproject/missingdirectdepswhitelist:missingdirectdepswhitelist',\n 'testprojects/src/java/org/pantsbuild/testproject/missingjardepswhitelist:missingjardepswhitelist',\n 'testprojects/src/java/org/pantsbuild/testproject/runtime:compile-fail',\n 'testprojects/src/scala/org/pantsbuild/testproject/compilation_failure',\n 'testprojects/src/scala/org/pantsbuild/testproject/compilation_warnings:fatal',\n 'testprojects/src/thrift/org/pantsbuild/thrift_exports:C-without-exports',\n 'testprojects/src/thrift/org/pantsbuild/thrift_linter:',\n 'testprojects/src/java/org/pantsbuild/testproject/provided:c',\n 'testprojects/tests/java/org/pantsbuild/testproject/dummies:failing_target',\n 'testprojects/tests/java/org/pantsbuild/testproject/empty:',\n 'testprojects/tests/java/org/pantsbuild/testproject/fail256:fail256',\n 'testprojects/tests/python/pants/dummies:failing_target',\n 'testprojects/tests/scala/org/pantsbuild/testproject/non_exports:C',\n 'testprojects/src/scala/org/pantsbuild/testproject/exclude_direct_dep',\n # These don't pass without special config.\n 'testprojects/tests/java/org/pantsbuild/testproject/depman:new-tests',\n 'testprojects/tests/java/org/pantsbuild/testproject/depman:old-tests',\n 'testprojects/tests/java/org/pantsbuild/testproject/htmlreport:htmlreport',\n 'testprojects/tests/java/org/pantsbuild/testproject/parallel.*',\n 'testprojects/src/python/python_distribution/fasthello_with_install_requires.*'\n ]\n\n # May not succeed without java8 installed\n need_java_8 = [\n 'testprojects/src/java/org/pantsbuild/testproject/targetlevels/java8',\n 'testprojects/tests/java/org/pantsbuild/testproject/testjvms',\n 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:eight',\n 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:eight-test-platform',\n 'examples/src/java/org/pantsbuild/example/plugin',\n ]\n\n # Targets for testing timeouts. These should only be run during specific integration tests,\n # because they take a long time to run.\n timeout_targets = [\n 'testprojects/tests/python/pants/timeout:sleeping_target',\n 'testprojects/tests/java/org/pantsbuild/testproject/timeout:sleeping_target',\n # Called with test_pytest_run_integration\n 'testprojects/tests/python/pants/timeout:exceeds_timeout',\n 'testprojects/tests/python/pants/timeout:ignores_terminate',\n ]\n\n deliberately_conflicting_targets = [\n 'testprojects/src/python/interpreter_selection.*'\n ]\n\n simply_skip = [\n # Already tested at pants_test.backend.jvm.targets.test_jar_dependency_integration.JarDependencyIntegrationTest\n 'testprojects/3rdparty/org/pantsbuild/testprojects:testprojects',\n # Already tested in 'PantsRequirementIntegrationTest' and 'SetupPyIntegrationTest'.\n 'testprojects/pants-plugins/*',\n ]\n\n targets_to_exclude = (known_failing_targets + negative_test_targets + need_java_8 +\n timeout_targets + deliberately_conflicting_targets + simply_skip)\n exclude_opts = map(lambda target: '--exclude-target-regexp={}'.format(target),\n targets_to_exclude)\n\n # Run list with exclude options, then parse and sort output.\n pants_run = self.run_pants(['list', 'testprojects::', 'examples::'] + exclude_opts)\n self.assert_success(pants_run)\n return sorted(pants_run.stdout_data.split())", "def is_target( self ):\n\n raise NotImplementedError(\"is_target\");", "def target_interfaces(self):", "def target_interfaces(self):", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def invalid_targets(self):\n return self._combined_invalid_versioned_targets.targets", "def is_target(X, require_attrs=None):\n\n if require_attrs is None:\n require_attrs = (\n name for name in vars(Target) if not name.startswith(\"_\")\n )\n\n return all([hasattr(X, name) for name in require_attrs])", "def Check(self, artifacts: list[T]) -> bool:\n raise NotImplementedError(\"Subclass didn't implement Check method.\")", "def _ensure_all_targets_allowed(self, metadata_role, metadata_object):\n \n # Return if 'metadata_role' is 'targets'. 'targets' is not\n # a delegated role.\n if metadata_role == 'targets':\n return\n \n # The targets of delegated roles are stored in the parent's\n # metadata file. Retrieve the parent role of 'metadata_role'\n # to confirm 'metadata_role' contains valid targets.\n parent_role = tuf.roledb.get_parent_rolename(metadata_role)\n\n # Iterate over the targets of 'metadata_role' and confirm they are trusted,\n # or their root parent directory exists in the role delegated paths of the\n # parent role.\n roles = self.metadata['current'][parent_role]['delegations']['roles']\n role_index = tuf.repo.signerlib.find_delegated_role(roles, metadata_role)\n\n # Ensure the delegated role exists prior to extracting trusted paths from\n # the parent's 'paths', or trusted path hash prefixes from the parent's\n # 'path_hash_prefixes'.\n if role_index is not None:\n role = roles[role_index] \n allowed_child_paths = role.get('paths')\n allowed_child_path_hash_prefixes = role.get('path_hash_prefixes')\n actual_child_targets = metadata_object['targets'].keys()\n\n if allowed_child_path_hash_prefixes is not None:\n consistent = self._paths_are_consistent_with_hash_prefixes\n if not consistent(actual_child_targets,\n allowed_child_path_hash_prefixes):\n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target which does not'+\\\n ' have a path hash prefix matching'+\\\n ' the prefix listed by the parent'+\\\n ' role '+repr(parent_role)+'.')\n\n elif allowed_child_paths is not None: \n\n # Check that each delegated target is either explicitly listed or a parent\n # directory is found under role['paths'], otherwise raise an exception.\n # If the parent role explicitly lists target file paths in 'paths',\n # this loop will run in O(n^2), the worst-case. The repository\n # maintainer will likely delegate entire directories, and opt for\n # explicit file paths if the targets in a directory are delegated to \n # different roles/developers.\n for child_target in actual_child_targets:\n for allowed_child_path in allowed_child_paths:\n prefix = os.path.commonprefix([child_target, allowed_child_path])\n if prefix == allowed_child_path:\n break\n else: \n raise tuf.ForbiddenTargetError('Role '+repr(metadata_role)+\\\n ' specifies target '+\\\n repr(child_target)+' which is not'+\\\n ' an allowed path according to'+\\\n ' the delegations set by '+\\\n repr(parent_role)+'.')\n\n else:\n\n # 'role' should have been validated when it was downloaded.\n # The 'paths' or 'path_hash_prefixes' attributes should not be missing,\n # so raise an error in case this clause is reached.\n raise tuf.FormatError(repr(role)+' did not contain one of '+\\\n 'the required fields (\"paths\" or '+\\\n '\"path_hash_prefixes\").')\n\n # Raise an exception if the parent has not delegated to the specified\n # 'metadata_role' child role.\n else:\n raise tuf.RepositoryError(repr(parent_role)+' has not delegated to '+\\\n repr(metadata_role)+'.')", "def legal_target(self):\n return choice([each for each in self.minions if not hasattr(each, 'taunt')])", "def _CheckNoInterfacesInBase(input_api, output_api):\n pattern = input_api.re.compile(r'^\\s*@interface', input_api.re.MULTILINE)\n files = []\n for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):\n if (f.LocalPath().startswith('base/') and\n not \"/ios/\" in f.LocalPath() and\n not \"/test/\" in f.LocalPath() and\n not f.LocalPath().endswith('.java') and\n not f.LocalPath().endswith('_unittest.mm') and\n not f.LocalPath().endswith('_spi.h')):\n contents = input_api.ReadFile(f)\n if pattern.search(contents):\n files.append(f)\n\n if len(files):\n return [ output_api.PresubmitError(\n 'Objective-C interfaces or categories are forbidden in libbase. ' +\n 'See http://groups.google.com/a/chromium.org/group/chromium-dev/' +\n 'browse_thread/thread/efb28c10435987fd',\n files) ]\n return []", "def can_target(name):\n return False", "def test_all_no_class(self):", "def test_all_no_class(self):", "def check_unused_attributes(self):\n all_attrs_read = collections.defaultdict(set)\n\n def _add_attrs(typ, attr_names_read):\n if typ is None:\n return\n all_attrs_read[typ] |= attr_names_read\n for base_cls in typ.__bases__:\n all_attrs_read[base_cls] |= attr_names_read\n if isinstance(typ, type):\n for child_cls in qcore.inspection.get_subclass_tree(typ):\n all_attrs_read[child_cls] |= attr_names_read\n\n for serialized, attrs_read in six.iteritems(self.attributes_read):\n attr_names_read = {attr_name for attr_name, _, _ in attrs_read}\n _add_attrs(self.unserialize_type(serialized), attr_names_read)\n\n for typ, attrs in self.config.IGNORED_UNUSED_ATTRS_BY_CLASS:\n _add_attrs(typ, attrs)\n\n used_bases = tuple(self.config.USED_BASE_CLASSES)\n\n for typ, attrs_read in sorted(\n six.iteritems(all_attrs_read), key=self._cls_sort\n ):\n if self.serialize_type(typ) not in self.classes_examined or issubclass(\n typ, used_bases\n ):\n continue\n existing_attrs = set(typ.__dict__.keys())\n for attr in existing_attrs - attrs_read - self.config.IGNORED_UNUSED_ATTRS:\n # server calls will always show up as unused here\n if _safe_getattr(_safe_getattr(typ, attr, None), \"server_call\", False):\n continue\n print(\"Unused method: %r.%s\" % (typ, attr))", "async def is_target_reached(self) -> bool: # type: ignore\n ...", "def invalid_versioned_targets(self):\n return self._invalid_versioned_targets", "def untargeted(self):\n\t\tpass", "def _target_filter(self, obj):\r\n return type(obj).__name__ in ['Cube'] and not obj.is_grasped # List because may be extended to other objects.\r", "def test_check_exclude_none(self):\n\n self.assertTrue(PostfixExclude([]).check(self.file_gitignore))\n self.assertTrue(PostfixExclude([]).check(self.file_py))\n self.assertTrue(PostfixExclude([]).check(self.file_authors))\n self.assertTrue(PostfixExclude([]).check(self.file__init__))\n self.assertTrue(PostfixExclude([]).check(self.file_bin))", "def import_targets(self):\n count = 0\n target_subclasses = self.get_subclasses(Target)\n for module in os.listdir(config.targets_path):\n if not os.path.isdir(f\"{config.targets_path}/{module}\"):\n continue\n\n for target in os.listdir(f\"{config.targets_path}/{module}\"):\n if target == \"__init__.py\" or target[-3:] != \".py\":\n continue\n else:\n targets_path_split = config.targets_path.split(\"/\")\n module_directory = targets_path_split[0]\n target_path = targets_path_split[1]\n targetcls = importlib.import_module(f\"{module_directory}.{target_path}.{module}.{target[:-3]}\")\n for _, obj in inspect.getmembers(targetcls):\n if inspect.isclass(obj):\n if inspect.getmro(obj)[1] in target_subclasses and obj is not Target:\n self.add_target(obj)\n count += 1\n\n return count", "def matches(self, target):\n raise NotImplementedError()", "def resolve_all(cls, targets, *expected_types):\r\n if targets:\r\n for target in maybe_list(targets, expected_type=Target):\r\n concrete_targets = [t for t in target.resolve() if t.is_concrete]\r\n for resolved in concrete_targets:\r\n if expected_types and not isinstance(resolved, expected_types):\r\n raise TypeError('%s requires types: %s and found %s' % (cls, expected_types, resolved))\r\n yield resolved", "def test_check_exclude_none(self):\n\n self.assertTrue(DirExclude([]).check(self.file_gitignore))\n self.assertTrue(DirExclude([]).check(self.file_perceval))\n self.assertTrue(DirExclude([]).check(self.file_authors))\n self.assertTrue(DirExclude([]).check(self.file_tests))\n self.assertTrue(DirExclude([]).check(self.file_bin))" ]
[ "0.8121008", "0.7501856", "0.736171", "0.6360859", "0.59526706", "0.5948528", "0.59097034", "0.5747231", "0.57119584", "0.57119584", "0.57095045", "0.560901", "0.559652", "0.55962926", "0.5539723", "0.5533668", "0.5523853", "0.55037796", "0.5490197", "0.5490197", "0.54899144", "0.5473976", "0.54738754", "0.5465111", "0.54615283", "0.54579306", "0.54202825", "0.5419512", "0.5356281", "0.5345287" ]
0.82257766
0
Check penalty computation functions
def test_penalty_compute(self): objective = {'G': numpy.zeros((9)), 'H': numpy.diag((1,)*9), 'X': 1} for penalty in self.penalties: result = penalty.compute([1]*self.np, objective) assert isinstance(result,tuple) # more tests go here
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_penalty_compute(self):\n objective = {'G': numpy.zeros((9)),\n 'H': numpy.diag((1,)*9),\n 'X': 1}\n for penalty in self.penalties:\n result=penalty.compute([1]*self.np, objective)\n self.assertEqual(tuple, type(result))\n # more tests go here", "def test_check_cost():", "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def _compute_penalty(self):\n raise ValueError('Implement in a child class')", "def penalty(self):\n return 0", "def test_get_compute_func(self):\n assert api._get_compute_func(self.run_fa) is api._compute_grade_for_fa\n assert api._get_compute_func(self.run_no_fa) is api._compute_grade_for_non_fa", "def check_optimality(xs, recovered_xs, fps, recovered_fps, fns, recovered_fns, fpr, fnr, f):\n fpr, fnr, f = check_inputs(fpr, fnr, f)\n\n def objective(xs, fps, fns):\n \"\"\"We want to minimize this objective. \"\"\"\n Wp = - np.log(fpr / (1 - fpr)) # Weight for false positives\n Wn = - np.log(fnr / (1 - fnr)) # Weight for false negatives\n Wx = - np.log(f / (1 - f)) # Weight for all positives\n return np.sum(xs) * Wx + np.sum(fps) * Wp + np.sum(fns) * Wn\n\n _, num_trials = xs.shape\n for trial in range(num_trials):\n x, recovered_x = xs[:, trial], recovered_xs[:, trial]\n num_errors = np.sum(x != recovered_x)\n objective_true = objective(xs, fps, fns)\n objective_recovered = objective(recovered_xs, recovered_fps, recovered_fns)\n if num_errors != 0 and objective_true < objective_recovered:\n print(\"ILP solver fails to find the optimize the objective for trail %s\" % trial)", "def _check_completeness(self, ops_and_rates):\n op = sum((L.dag() * L) for L, _ in ops_and_rates)\n\n a_candidate = op.tr() / op.shape[0]\n with CoreOptions(rtol=self.options[\"completeness_rtol\"],\n atol=self.options[\"completeness_atol\"]):\n if op == a_candidate * qeye(op.dims[0]):\n return np.real(a_candidate), None\n\n a = max(op.eigenenergies())\n L = (a * qeye(op.dims[0]) - op).sqrtm() # new Lindblad operator\n return a, L", "def checkCostFunction(lbd=0):\n # Create small problem\n X_t = np.random.rand(4, 3)\n Theta_t = np.random.rand(5, 3)\n\n # Zap out most entries\n Y = X_t.dot(Theta_t.T)\n Y[np.random.rand(Y.shape[0], Y.shape[1]) > .5] = 0\n R = np.zeros(Y.shape)\n R[Y == 0] = 1\n\n # Run Gradient Checking\n X = np.random.randn(X_t.shape[0], X_t.shape[1])\n Theta = np.random.randn(Theta_t.shape[0], Theta_t.shape[1])\n num_users = Y.shape[1]\n num_movies = Y.shape[0]\n num_features = Theta_t.shape[1]\n\n def Jfunc(t):\n return cofiCostFunc(t, Y, R, num_users, num_movies, num_features, lbd)\n\n numgrad = computeNumericalGradient(Jfunc, np.r_[X.flatten(), Theta.flatten()])\n\n cost, grad = cofiCostFunc(np.r_[X.flatten(), Theta.flatten()], Y, R, num_users, num_movies, num_features, lbd)\n\n print(np.c_[numgrad, grad])\n print('The above two columns you get should be very similar.')\n print('(Left-Your Numerical Gradient, Right-Analytical Gradient)\\n')\n\n diff = np.linalg.norm(numgrad-grad)/np.linalg.norm(numgrad+grad)\n print('If your cost function implementation is correct, then')\n print('the relative difference will be small (less than 1e-9).')\n print('Relative Difference: %g\\n' % diff)", "def problem():\n\n print 'problem #27'\n\n l = 0\n m_a = 0\n m_b = 0\n for a in xrange(-1000, 1000):\n for b in xrange(-1000, 1000):\n p = len(check(a, b))\n if p > l:\n l = p\n m_a = a\n m_b = b\n\n print 'the product of coefficients is %s' % (m_a * m_b)", "def test_bounds_respected_func_not_called(\n self, check_bounds_respected):\n self.controller.problem.value_ranges = {'test': (0, 1)}\n self.controller.minimizer = \"deriv_free_algorithm\"\n self.controller.flag_expected = [3]\n\n _ = loop_over_hessians(self.controller,\n options=self.options,\n grabbed_output=self.grabbed_output,\n checkpointer=self.cp)\n check_bounds_respected.assert_not_called()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()", "def get_penalty(state, action, winrate_predictor):\n if violate_rule(state, action):\n return -1 \n return 0", "def get_expected_cost(self):", "def compute_cost(AL, Y):\n pass", "def test_bisection_system(testFunctions,tol, printFlag):\n pass", "def check_cpu_constrained():\n return psutil.cpu_percent(1) > 75", "def test_bounds_respected_func_called(\n self, check_bounds_respected):\n self.controller.problem.value_ranges = {'test': (0, 1)}\n self.controller.minimizer = \"deriv_free_algorithm\"\n\n _ = loop_over_hessians(self.controller,\n options=self.options,\n grabbed_output=self.grabbed_output,\n checkpointer=self.cp)\n check_bounds_respected.assert_called()", "def test_error_rate(self):\n # For the penalty, the default loss is hinge.\n expected_signed_penalty_labels = (self._penalty_labels > 0.0) * 2.0 - 1.0\n expected_penalty_numerator = np.sum(\n np.maximum(\n 0.0,\n 1.0 - expected_signed_penalty_labels * self._penalty_predictions) *\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_denominator = np.sum(\n self._penalty_weights * self._penalty_predicate)\n expected_penalty_value = (\n expected_penalty_numerator / expected_penalty_denominator)\n\n # For the constraint, the default loss is zero-one.\n expected_signed_constraint_labels = (\n (self._constraint_labels > 0.0) * 2.0 - 1.0)\n expected_constraint_numerator = np.sum(\n (0.5 * (1.0 - expected_signed_constraint_labels * np.sign(\n self._constraint_predictions))) * self._constraint_weights *\n self._constraint_predicate)\n expected_constraint_denominator = np.sum(\n self._constraint_weights * self._constraint_predicate)\n expected_constraint_value = (\n expected_constraint_numerator / expected_constraint_denominator)\n\n actual_expression = binary_rates.error_rate(self.context)\n self.check_rates(expected_penalty_value, expected_constraint_value,\n actual_expression)", "def cross_validate_per_threshold(dict_trust_cv, dict_labels, dict_predictions, dict_seizure_timings, dict_idx_to_check,\n fld_number, run_number, pat_ids_arr, per_arr_cv):\n n_patients = pat_ids_arr.shape[0]\n per_arr_cv = np.array(per_arr_cv)\n n_per = per_arr_cv.shape[0]\n F1_per = np.zeros((n_per, n_patients))\n # determine threshold\n all_trust_scores = []\n for pat_id in pat_ids_arr:\n dict_str = 'fld_' + str(fld_number) + '_run_' + str(run_number) + '_pat_id_' + str(pat_id)\n all_trust_scores = all_trust_scores + list(dict_trust_cv[dict_str])\n for pat_cnt in range(n_patients):\n pat_id = pat_ids_arr[pat_cnt]\n pid_str = str(pat_id)\n labels_all = dict_labels[pid_str]\n predictions_all = dict_predictions[pid_str]\n idx_pat_to_check = dict_idx_to_check[pid_str]\n dict_str = 'fld_' + str(fld_number) + '_run_' + str(run_number) + '_pat_id_' + str(pat_id)\n trust_scores_pat = dict_trust_cv[dict_str]\n seizure_timings = dict_seizure_timings[pid_str]\n for per_cnt in range(n_per):\n per = per_arr_cv[per_cnt]\n threshold_trust_scores = np.percentile(all_trust_scores, per)\n # loop over the recordings of the patient\n TP = 0\n FP = 0\n FN = 0\n total_seizures = 0\n F1 = 0.\n seizure_flags, n_not_trusted = af.calculate_seizure_flags_w_trustscores_threshold_fast(label_input=predictions_all,\n trust_scores=trust_scores_pat, threshold_value=threshold_trust_scores, idx_to_check=idx_pat_to_check)\n # calculate F1\n TP, FP, FN, total_seizures, det_del, detected_seizure_timings, FP_times = af.precision_metrics_kaat_10s(seizure_flags, seizure_timings)\n if total_seizures > 0: \n if int(TP) > 0:\n F1 = 2.*TP / (2.*TP + FP + FN)\n else:\n F1 = 0.\n F1_per[per_cnt, pat_cnt] = F1 \n else:\n F1_per[per_cnt, pat_cnt] = np.nan\n F1_av = []\n for i in range(n_per):\n F1_av.append(np.nanmean(F1_per[i, :]))\n print('F1 = ' + str(F1_av))\n F1_av = np.array(F1_av)\n where_max = np.argmax(F1_av)\n max_per = per_arr_cv[where_max]\n max_threshold = np.percentile(all_trust_scores, max_per)\n del all_trust_scores\n return max_threshold", "def compute_penalty(self, spec_nums: Union[float, Iterable[float]], spec_kwrd: str) \\\n -> Union[float, List[float]]:\n raise NotImplementedError", "def _check_optimality(self):\n\n dual_obj = -0.5* np.dot(self.beta, self.beta) + np.sum(self.alpha)\n\n prim_obj = 0.5* np.dot(self.beta, self.beta) + self.C * np.sum( np.maximum(1 - np.multiply(np.dot(self.X, self.beta), self.y), 0))\n\n # print (prim_obj - dual_obj)\n self.gap = prim_obj - dual_obj\n if self.gap <= 1e-6:\n return True\n else:\n return False", "def penalty_reward(reward):\n if reward < 0:\n return True\n return False", "def uniformCostSearch(problem):\n \"*** YOUR CODE HERE IF YOU WANT TO PRACTICE ***\"\n aStarSearch(problem)", "def test_PoissonRegression_penalty_C(self):\n for penalty in PoissonRegression._penalties.keys():\n if penalty != 'none':\n if penalty == 'binarsity':\n learner = PoissonRegression(\n penalty=penalty, C=self.float_1, blocks_start=[0],\n blocks_length=[1])\n else:\n learner = PoissonRegression(penalty=penalty,\n C=self.float_1)\n self.assertEqual(learner.C, self.float_1)\n self.assertEqual(learner._prox_obj.strength, 1. / self.float_1)\n learner.C = self.float_2\n self.assertEqual(learner.C, self.float_2)\n self.assertEqual(learner._prox_obj.strength, 1. / self.float_2)\n\n msg = '^``C`` must be positive, got -1$'\n with self.assertRaisesRegex(ValueError, msg):\n if penalty == 'binarsity':\n PoissonRegression(penalty=penalty, C=-1,\n blocks_start=[0], blocks_length=[1])\n else:\n PoissonRegression(penalty=penalty, C=-1)\n else:\n msg = '^You cannot set C for penalty \"%s\"$' % penalty\n with self.assertWarnsRegex(RuntimeWarning, msg):\n PoissonRegression(penalty=penalty, C=self.float_1)\n\n learner = PoissonRegression(penalty=penalty)\n with self.assertWarnsRegex(RuntimeWarning, msg):\n learner.C = self.float_1\n\n msg = '^``C`` must be positive, got -2$'\n with self.assertRaisesRegex(ValueError, msg):\n learner.C = -2", "def penalty_calc(self):\n self.p_budget = (self.tx_oma_min - self.rx_unstressed_sensitivity - self.fiber_conn_loss)*self.l_1\n\n # fiber attenuation,\n self.p_atten = self.alpha*self.length # column B\n\n # calculate bandwidth for RIN test (exclude transmitter)\n rin_inverse_bw = np.sqrt(np.square(1.0/self.bw_cd) + np.square(1.0/self.bw_md) + (0.477/(self.rx_bw**2))*self.l_1)\n rin_bw = 1.0 / rin_inverse_bw\n\n # see FC-MSQS-2 equation B.47 in Annex B.4 for the following k_rin = math.sqrt(2.0/math.pi)*erfinv(0.8)\n k_rin = 0.7\n\n # v_rin,\n self.v_rin = (k_rin*1E6*(self.rin_test_isi**2)*rin_bw*\n math.pow(10.0,0.1*self.rin)) # column AK\n\n # Prin,\n print('v_rin: ', self.v_rin)\n print('Q: ',self.Q)\n print('isi_dj_refl_closed :', self.isi_dj_refl_closed)\n self.p_rin = -10.0*np.log10(np.sqrt(1.0-np.multiply(self.v_rin, np.square(self.Q/self.isi_dj_refl_closed)))) # column R\n print(\"P_rin : \", self.p_rin)\n self.beta = (3.14159E-6*self.speedup*self.br_nominal *self.delta_lambda*self.d1*self.length) # column O\n self.sigma_mpn = (self.k_mpn/math.sqrt(2.0)*(self.l_1 -np.exp(-np.square(self.beta)))) # column P\n self.p_mpn = (-10.0*np.log10(np.sqrt(self.l_1 - (self.Q**2)*np.square(self.sigma_mpn)))) # column Q\n self.p_blw = (-10.0*math.log10(math.sqrt(1.0- ((self.Q*self.sigma_blw)/ self.isi_tp4_rx)**2))*self.l_1) # cell T13\n self.p_reflection = -10.0*np.log10(self.isi_reflection) # column N\n self.v_mn = (((1.0-math.pow(10.0,-0.2*self.pmn))/ (self.Q)**2)*self.l_1) # cell AG7\n print(\"isi_center : \", self.isi_center)\n\n self.p_isi_center = -10.0*np.log10(self.isi_center) # column J\n\n self.p_isi_corners = (-10.0*np.log10(self.isi_corners) - self.p_isi_center) # column K\n self.p_isi_dj_center = (-10.0*np.log10(self.isi_dj_refl_closed) - self.p_isi_center) # column L\n self.p_isi_dj_corners = (-10.0*np.log10(self.isi_dj_corners) -self.p_isi_center -self.p_isi_corners) # column M\n\n\n # calculate the \"cross\" penalty contribution, column S\n arg1 = ((self.sigma_blw**2 + self.v_rin)/ np.square(self.isi_dj_refl_closed))\n arg2 = self.l_1 - (self.Q**2)*(arg1 + self.v_mn + np.square(self.sigma_mpn))\n arg3 = (-10.0*np.log10(np.multiply(self.isi_dj_refl_closed, np.sqrt(arg2))))\n self.p_cross_center = ( # column S\n arg3\n - self.p_blw # cell T13\n - self.p_isi_center # column J\n - self.p_isi_dj_center # column L\n - self.p_mpn # column Q\n - self.p_reflection # column N\n - self.p_rin # column R\n - self.pmn*self.l_1) # cell G13\n print('p_isi_center: ', self.p_isi_center)\n\n # calculate the total power budget evaluated at the center of the eye\n self.p_total_center = ( # column T\n self.p_isi_center # column J\n + self.p_isi_dj_center # column L\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1) # cell G13\n # calculate the total power budget evaluated at the corner of the eye\n self.p_total_corners = (\n self.p_isi_center # column J\n + self.p_isi_corners # column K\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1 # cell G13\n + self.p_isi_dj_corners)# column M\n\n # receiver stressed sensitivity\n self.margin = ( self.p_budget\n - self.p_total_center) # column W\n\n self.rx_stressed_sensitivity = (\n self.tx_oma_min*self.l_1\n - self.chil\n - self.p_mpn\n - self.p_reflection\n - self.p_rin\n - 0.5*self.p_cross_center\n - self.pmn*self.l_1\n - self.margin[self.lnum//2]*self.l_1)\n\n\n # end of GbE10.penalty_calc\n #======================================================================+", "def learnign_rate_examples():\n #######\n bad_larning_rate = 0.1\n not_bad_learning_rate = 1e-4\n good_learning_rate = 1e-3\n #######\n return bad_larning_rate, not_bad_learning_rate, good_learning_rate", "def test_non_integral_validation(self):" ]
[ "0.7188831", "0.704875", "0.6502529", "0.6319194", "0.6253688", "0.6040241", "0.60105747", "0.5938388", "0.59152675", "0.5900038", "0.5884319", "0.5855779", "0.5855779", "0.5855779", "0.5843721", "0.5841484", "0.5834948", "0.58325404", "0.58152175", "0.57859385", "0.57823145", "0.57781076", "0.5771676", "0.5761841", "0.5761132", "0.5755922", "0.57551754", "0.57523847", "0.5741478", "0.57361555" ]
0.7320431
0
Check zero order target terms
def test_target_zero_order_terms(self): obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=0) assert isinstance(obj, dict) assert "X" in obj assert "G" in obj assert "H" in obj assert int(obj["X"]) != 0 assert obj["G"].any() == False assert (obj["H"] == numpy.diag([1]*self.ff.np)).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_target_zero_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=0)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertNotEqual(int(obj[\"X\"]), 0)\n \n self.assertTrue(\"G\" in obj)\n self.assertFalse(obj[\"G\"].any())\n \n self.assertTrue(\"H\" in obj)\n self.assertEqual(obj[\"H\"], numpy.diag([1]*self.ff.np))", "def test_target_first_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=1)\n assert isinstance(obj, dict)\n assert \"X\" in obj\n assert \"G\" in obj\n assert \"H\" in obj", "def test_target_first_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=1)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertTrue(\"G\" in obj)\n self.assertTrue(\"H\" in obj)", "def test_target_second_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=2)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertTrue(\"G\" in obj)\n self.assertTrue(\"H\" in obj)", "def test_target_second_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=2)\n assert isinstance(obj, dict)\n assert \"X\" in obj\n assert \"G\" in obj\n assert \"H\" in obj", "def test_abundant_sequence_zero_term(self):\n\n\t\tself.assertEquals(abundant_sequence(0), [])", "def is_zero(self):\n for action, prob in self._regrets.items():\n if prob != 0.0:\n return False\n return True", "def valid(self, target):", "def test_verify_all_gates_have_valid_targets():\n nSpinOrbitals = input_json[\"constants\"][\"nSpinOrbitals\"]\n\n interaction_list = input_json[\"terms\"]\n\n for interaction in interaction_list:\n targets = interaction[\"targets\"]\n\n for orbital in targets:\n assert 0 <= orbital < nSpinOrbitals, \"Orbital target is out of range\"", "def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0", "def test_tensor_terms_have_constraints(toy_interaction_X_y):\n X, y = toy_interaction_X_y\n gam = LinearGAM(te(0, 1, constraints='none')).fit(X, y)\n\n assert gam._is_fitted\n assert gam.terms.hasconstraint", "def is_zero(self):\n for t in self:\n if t != TRIT_ZERO:\n return False\n return True", "def test_which_targets():\n num_multi_targets = 0\n for which_targets_day in which_targets:\n # All inputs have a label\n assert np.all(which_targets_day.sum(axis=1) > 0)\n # No inputs have more than 3 targets\n assert np.all(which_targets_day.sum(axis=1) < 4)\n\n num_multi_targets += np.sum(which_targets_day.sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0", "def valid_target(start, target, words):\r\n if target.isalpha(): # target word must be alphabetic\r\n if len(start) == len(target): # target word must be same size as start word\r\n if start != target: # target and start words must be different\r\n if target in words: # target word must be in the list of words\r\n return \"0\"\r\n else:\r\n return \"Target word not in list of words....please reenter\"\r\n else:\r\n return \"Target word must be different from Start word....please reenter\"\r\n else:\r\n return \"Target word must be same length as Start word....please reenter\"\r\n else:\r\n return \"Target word must contain only letters....please reenter\"", "def zero_target(*args):\n robots = get_robot_roots()\n if not robots:\n pm.warning('Nothing Selected; Select a valid robot')\n return\n\n try:\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n\n ik_mode = pm.getAttr(target_ctrl_path + '.ik')\n\n if ik_mode:\n if pm.objExists(tool_ctrl_path):\n pm.setAttr(tool_ctrl_path + '.translate', 0, 0, 0)\n pm.setAttr(tool_ctrl_path + '.rotate', 0, 0, 0)\n else:\n pm.setAttr(target_ctrl_path + '.translate', 0, 0, 0)\n pm.setAttr(target_ctrl_path + '.rotate', 0, 0, 0)\n else:\n a1_fk_ctrl_path = format_path(__A1_FK_CTRL_PATH, robot)\n a2_fk_ctrl_path = format_path(__A2_FK_CTRL_PATH, robot)\n a3_fk_ctrl_path = format_path(__A3_FK_CTRL_PATH, robot)\n a4_fk_ctrl_path = format_path(__A4_FK_CTRL_PATH, robot)\n a5_fk_ctrl_path = format_path(__A5_FK_CTRL_PATH, robot)\n a6_fk_ctrl_path = format_path(__A6_FK_CTRL_PATH, robot)\n\n pm.setAttr(a1_fk_ctrl_path + '.rotateY', 0)\n pm.setAttr(a2_fk_ctrl_path + '.rotateX', 0)\n pm.setAttr(a3_fk_ctrl_path + '.rotateX', 0)\n pm.setAttr(a4_fk_ctrl_path + '.rotateZ', 0)\n pm.setAttr(a5_fk_ctrl_path + '.rotateX', 0)\n pm.setAttr(a6_fk_ctrl_path + '.rotateZ', 0)\n except:\n pm.warning('Cannot zero target')", "def test_where_targets():\n num_multi_targets = 0\n for where_targets_day in where_targets:\n # All inputs have a label\n assert np.all(where_targets_day.sum(axis=3).sum(axis=3).sum(axis=1).sum(axis=1) > 0)\n num_multi_targets += np.sum((where_targets_day.sum(axis=3).sum(axis=3).sum(axis=2) > 1).sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0", "def a_test_no_terms():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=0, ma=0, family=Exponential())\n x = model.fit()\n assert(len(model.latent_variables.z_list) == 2)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)", "def any_zero(self, **indices):\n for child in self.children:\n if is_num(child, **indices):\n if evaluate(child, **indices) == 0:\n return True\n return False", "def test_extracting_no_values(self):\n\t\tformula = bf.And([bf.Or([bf.Tru(), bf.Tru(), bf.Tru()]), bf.Or([bf.Not(bf.Tru()), bf.Not(bf.Tru())]), bf.Not(bf.Tru())])\n\t\tself.assertEqual([], au.extract_variables(formula), \"Invalid variables extracted, expected [].\")", "def test_check_null_weight_with_nonzeros() -> None:\n sample_weight = np.ones_like(y_toy)\n sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)\n np.testing.assert_almost_equal(sw_out, sample_weight)\n np.testing.assert_almost_equal(X_out, X_toy)\n np.testing.assert_almost_equal(y_out, y_toy)", "def test_term(self):\n\t\tterm_one = schrodinger.term(0)\n\t\tself.assertEqual(1, term_one(0).numpy())\n\t\tterm_two = schrodinger.term(1)\n\t\tself.assertEqual(0, term_two(0).numpy())", "def consistenttarget(self, target):\n if self.predictors:\n return target == self.predictors.values()[0]\n return True", "def is_zero(self):\n return float(self.coeff.nominator) / self.coeff.denominator == 0.0", "def test_thermal_relaxation_error_t1_equal_t2_0state(self):\n error = thermal_relaxation_error(1, 1, 1)\n targets = [[{'name': 'id', 'qubits': [0]}],\n [{'name': 'reset', 'qubits': [0]}]]\n probs = [np.exp(-1), 1 - np.exp(-1)]\n for j in range(2):\n circ, p = error.error_term(j)\n self.remove_if_found(circ, targets)\n if circ[0]['name'] == 'id':\n self.assertAlmostEqual(p, probs[0], msg=\"identity probability\")\n else:\n self.assertAlmostEqual(p, probs[1], msg=\"reset probability\")\n self.assertEqual(targets, [], msg=\"relaxation circuits\")", "def test_validate_tgt_returns_true_when_no_valid_minions_have_been_found():\n ckminions = salt.utils.minions.CkMinions(opts={})\n with patch(\n \"salt.utils.minions.CkMinions.check_minions\", autospec=True, return_value={}\n ):\n result = ckminions.validate_tgt(\"fnord\", \"fnord\", \"fnord\", minions=[])\n assert result is True", "def has_invalid_targets(self):\n return len(self._combined_invalid_versioned_targets.targets) > 0", "def test_query_ot(self):\n term = next(self.OntTerm.query(label='deep'))\n assert term, 'oops?'", "def test_when_targets():\n num_multi_targets = 0\n for when_targets_day in when_targets:\n # All inputs have a label\n assert np.all(when_targets_day.sum(axis=1).sum(axis=1) > 0)\n\n num_multi_targets += np.sum((when_targets_day.sum(axis=2) > 1).sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0", "def test_check_null_weight_with_zeros() -> None:\n sample_weight = np.ones_like(y_toy)\n sample_weight[:1] = 0.0\n sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)\n np.testing.assert_almost_equal(sw_out, np.array([1, 1, 1, 1, 1]))\n np.testing.assert_almost_equal(X_out, np.array([[1], [2], [3], [4], [5]]))\n np.testing.assert_almost_equal(y_out, np.array([7, 9, 11, 13, 15]))", "def test_model(preds, target):\n ### START CODE HERE (Replace instances of 'None' with your code) ###\n print(preds.shape, target.shape)\n total_log_ppx = np.sum(preds * tl.one_hot(target, preds.shape[-1]), axis= -1) # HINT: tl.one_hot() should replace one of the Nones\n print(total_log_ppx.shape)\n \n non_pad = 1.0 - np.equal(target, 0) # You should check if the target equals 0\n ppx = total_log_ppx * non_pad # Get rid of the padding\n\n log_ppx = np.sum(ppx) / np.sum(non_pad)\n ### END CODE HERE ###\n \n return -log_ppx" ]
[ "0.7730585", "0.65473384", "0.65343684", "0.6058782", "0.604456", "0.601732", "0.5878453", "0.58585846", "0.58449787", "0.57668084", "0.57624483", "0.57577574", "0.5672885", "0.5633333", "0.5599019", "0.5590405", "0.55601686", "0.5526151", "0.55170417", "0.55007845", "0.5498032", "0.5410733", "0.5403491", "0.5399457", "0.5399353", "0.53987634", "0.5384172", "0.5360034", "0.5343376", "0.53306746" ]
0.78727204
0
Check first order target terms
def test_target_first_order_terms(self): obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=1) assert isinstance(obj, dict) assert "X" in obj assert "G" in obj assert "H" in obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_target_first_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=1)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertTrue(\"G\" in obj)\n self.assertTrue(\"H\" in obj)", "def test_target_second_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=2)\n assert isinstance(obj, dict)\n assert \"X\" in obj\n assert \"G\" in obj\n assert \"H\" in obj", "def test_target_second_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=2)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertTrue(\"G\" in obj)\n self.assertTrue(\"H\" in obj)", "def test_target_zero_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=0)\n assert isinstance(obj, dict)\n assert \"X\" in obj\n assert \"G\" in obj\n assert \"H\" in obj\n assert int(obj[\"X\"]) != 0\n assert obj[\"G\"].any() == False\n assert (obj[\"H\"] == numpy.diag([1]*self.ff.np)).all()", "def test_target_zero_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=0)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertNotEqual(int(obj[\"X\"]), 0)\n \n self.assertTrue(\"G\" in obj)\n self.assertFalse(obj[\"G\"].any())\n \n self.assertTrue(\"H\" in obj)\n self.assertEqual(obj[\"H\"], numpy.diag([1]*self.ff.np))", "def valid_target(start, target, words):\r\n if target.isalpha(): # target word must be alphabetic\r\n if len(start) == len(target): # target word must be same size as start word\r\n if start != target: # target and start words must be different\r\n if target in words: # target word must be in the list of words\r\n return \"0\"\r\n else:\r\n return \"Target word not in list of words....please reenter\"\r\n else:\r\n return \"Target word must be different from Start word....please reenter\"\r\n else:\r\n return \"Target word must be same length as Start word....please reenter\"\r\n else:\r\n return \"Target word must contain only letters....please reenter\"", "def pfd_find_first_target():\n\tglobal dependencies_list\n\tglobal targets\n\tnum_dependencies = len(dependencies_list)\n\tfor i in xrange(num_dependencies):\n\t\tif dependencies_list[i] == 0:\n\t\t\theapq.heappush(targets, i)", "def test_query_ot(self):\n term = next(self.OntTerm.query(label='deep'))\n assert term, 'oops?'", "def term_restrictions(data):\n\n term = [\"1st\", \"2nd\", \"3rd\", \"1ST\", \"2ND\", \"3RD\"]\n if data not in term:\n return False\n return True", "def consistenttarget(self, target):\n if self.predictors:\n return target == self.predictors.values()[0]\n return True", "def test_verify_all_gates_have_valid_targets():\n nSpinOrbitals = input_json[\"constants\"][\"nSpinOrbitals\"]\n\n interaction_list = input_json[\"terms\"]\n\n for interaction in interaction_list:\n targets = interaction[\"targets\"]\n\n for orbital in targets:\n assert 0 <= orbital < nSpinOrbitals, \"Orbital target is out of range\"", "def compare_token(repair, target):\n if repair in target or target in repair:\n return 1\n return iterated_lev_dist(repair, target)", "def calculate_appropriate_target(self):\n pass", "def calculate_appropriate_target(self):\n pass", "def test_which_targets():\n num_multi_targets = 0\n for which_targets_day in which_targets:\n # All inputs have a label\n assert np.all(which_targets_day.sum(axis=1) > 0)\n # No inputs have more than 3 targets\n assert np.all(which_targets_day.sum(axis=1) < 4)\n\n num_multi_targets += np.sum(which_targets_day.sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0", "def _check_le_1(self, target, **kwargs):\n # For every keyword argument\n for key, value in kwargs.items():\n # Set boolean conditions\n applicable_keyword = key in self._le_1_keywords\n applicable_target = target in self._le_1_targets\n # If key is in specified list\n if applicable_keyword and applicable_target:\n # Check if value is less than or equal to 1\n if 0.0 <= value <= 1.0:\n pass\n # If not, raise error\n else:\n raise FairException('\"{}\" must have \"{}\" value between zero and one.'.format(target, key))", "def findLinkedTerms(self):\n for key in self.summaryDict.keys(): # v' in the formula\n if self.getCoverFromModalityInDictionnary(self.summaryDict, key) == 0:\n correlation = 0\n else:\n dep = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,key) / self.getCoverFromModalityInDictionnary(self.summaryDict, key) #cover(v',R')/cover(v'R)\n if dep <= 1:\n correlation = 0\n else:\n correlation = 1 - (1 / dep)\n self.correlationDict[key] = correlation", "def test_where_targets():\n num_multi_targets = 0\n for where_targets_day in where_targets:\n # All inputs have a label\n assert np.all(where_targets_day.sum(axis=3).sum(axis=3).sum(axis=1).sum(axis=1) > 0)\n num_multi_targets += np.sum((where_targets_day.sum(axis=3).sum(axis=3).sum(axis=2) > 1).sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0", "def test_accept_all_terms_required(api, account, given_terms):\n api.terms.get_required_terms.return_value = given_terms\n api.terms.accept_terms.reset_mock()\n account.accept_all_terms()\n api.terms.get_required_terms.assert_called()\n api.terms.get_all_terms.assert_not_called()\n\n call_count = custom_st.count_terms(given_terms)\n assert api.terms.accept_terms.call_count == call_count", "def uses_all(word, required):\n pass", "def _is_term_exist(self, term):\n return term in self.postingDict", "def valid(self, target):", "def test_target_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert atom.lr.target == atom.target", "def test_accept_all_terms_optional(api, account, given_terms):\n api.terms.get_all_terms.return_value = given_terms\n api.terms.accept_terms.reset_mock()\n account.accept_all_terms(optional=True)\n api.terms.get_required_terms.assert_not_called()\n api.terms.get_all_terms.assert_called()\n\n call_count = custom_st.count_terms(given_terms)\n assert api.terms.accept_terms.call_count == call_count", "def get_requisite_objective_terms(self):\n return # osid.learning.ObjectiveQueryInspector", "def checkTargets(targets, strings, propagation, output):\n result = \"\"\n #Do not check an URL twice\n #Here, two different pages on the same target can be checked\n #This is because a page can be \"alone\" on a website\n targetViewed = set([])\n for url in targets:\n if url not in targetViewed:\n string, otherLinks, linksViewed = checkSite(url, strings, output)\n result += string\n result += \"\\n\"\n targetViewed = targetViewed | set([url])\n\n #If user want use propagation, add other links to the targets\n if propagation > 0:\n targets += list(otherLinks)\n propagation -= 1\n #Add all viewed links in targetViewed in order to do not check\n #twice the same URL\n targetViewed = targetViewed | linksViewed\n return result", "def test_do_check_number_of_terms(self):\n self.assertTrue(self.a.do_check_number_of_terms(self.b))\n self.assertFalse(self.a.do_check_number_of_terms(self.c))", "def test_one_condition_multiple_targets(self):\n matrices = [\n np.array([[0, 0.6, 0], [1.0, 0, 1.0], [0, 0.1, 0]])\n ]\n coefficients = get_importance_coeffs(['A', 'B', 'C'], ['A', 'C'], matrices)\n assert coefficients['A'] == 1.0\n assert coefficients['B'] == 0.6\n assert coefficients['C'] == 1.0\n\n matrices = [\n np.array([[0, 0.1, 0], [1.0, 0, 1.0], [0, 0.6, 0]])\n ]\n coefficients = get_importance_coeffs(['A', 'B', 'C'], ['A', 'C'], matrices)\n assert coefficients['A'] == 1.0\n assert coefficients['B'] == 0.6\n assert coefficients['C'] == 1.0", "def legal_target(self):\n return choice([each for each in self.minions if not hasattr(each, 'taunt')])", "def is_prefix(self, term: str, labels: istr = None) -> bool:" ]
[ "0.7594869", "0.69414026", "0.6917484", "0.6709718", "0.6650869", "0.5694065", "0.5567757", "0.5471913", "0.5433634", "0.53689086", "0.52868193", "0.52829295", "0.5240539", "0.5240539", "0.52094805", "0.51635295", "0.5076618", "0.50554353", "0.50218064", "0.50205237", "0.501249", "0.50070375", "0.5001856", "0.5000269", "0.49934247", "0.49931052", "0.499247", "0.49917832", "0.4986417", "0.4952286" ]
0.7671157
0
Check second order target terms
def test_target_second_order_terms(self): obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=2) assert isinstance(obj, dict) assert "X" in obj assert "G" in obj assert "H" in obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_target_second_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=2)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertTrue(\"G\" in obj)\n self.assertTrue(\"H\" in obj)", "def test_target_first_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=1)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertTrue(\"G\" in obj)\n self.assertTrue(\"H\" in obj)", "def test_target_first_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=1)\n assert isinstance(obj, dict)\n assert \"X\" in obj\n assert \"G\" in obj\n assert \"H\" in obj", "def test_target_zero_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=0)\n assert isinstance(obj, dict)\n assert \"X\" in obj\n assert \"G\" in obj\n assert \"H\" in obj\n assert int(obj[\"X\"]) != 0\n assert obj[\"G\"].any() == False\n assert (obj[\"H\"] == numpy.diag([1]*self.ff.np)).all()", "def test_target_zero_order_terms(self):\n obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=0)\n self.assertEqual(type(obj),dict)\n self.assertTrue(\"X\" in obj)\n self.assertNotEqual(int(obj[\"X\"]), 0)\n \n self.assertTrue(\"G\" in obj)\n self.assertFalse(obj[\"G\"].any())\n \n self.assertTrue(\"H\" in obj)\n self.assertEqual(obj[\"H\"], numpy.diag([1]*self.ff.np))", "def valid_target(start, target, words):\r\n if target.isalpha(): # target word must be alphabetic\r\n if len(start) == len(target): # target word must be same size as start word\r\n if start != target: # target and start words must be different\r\n if target in words: # target word must be in the list of words\r\n return \"0\"\r\n else:\r\n return \"Target word not in list of words....please reenter\"\r\n else:\r\n return \"Target word must be different from Start word....please reenter\"\r\n else:\r\n return \"Target word must be same length as Start word....please reenter\"\r\n else:\r\n return \"Target word must contain only letters....please reenter\"", "def checkTerm(*args):\n return _libsbml.SBO_checkTerm(*args)", "def term_restrictions(data):\n\n term = [\"1st\", \"2nd\", \"3rd\", \"1ST\", \"2ND\", \"3RD\"]\n if data not in term:\n return False\n return True", "def compare_token(repair, target):\n if repair in target or target in repair:\n return 1\n return iterated_lev_dist(repair, target)", "def checkTargets(targets, strings, propagation, output):\n result = \"\"\n #Do not check an URL twice\n #Here, two different pages on the same target can be checked\n #This is because a page can be \"alone\" on a website\n targetViewed = set([])\n for url in targets:\n if url not in targetViewed:\n string, otherLinks, linksViewed = checkSite(url, strings, output)\n result += string\n result += \"\\n\"\n targetViewed = targetViewed | set([url])\n\n #If user want use propagation, add other links to the targets\n if propagation > 0:\n targets += list(otherLinks)\n propagation -= 1\n #Add all viewed links in targetViewed in order to do not check\n #twice the same URL\n targetViewed = targetViewed | linksViewed\n return result", "def test_query_ot(self):\n term = next(self.OntTerm.query(label='deep'))\n assert term, 'oops?'", "def SBO_checkTerm(*args):\n return _libsbml.SBO_checkTerm(*args)", "def potential_multi_term(tagged) :\n res = True\n for tag in tagged :\n res = res and stemgrammar(tag)\n return res", "def test_combine_multiple_or(self):\n inv_search = 'author:\"ellis, j*\" and (title:report or keyword:\"cross section\")'\n spi_search = 'find a j ellis and (t report or k \"cross section\")'\n self._compare_searches(inv_search, spi_search)", "def test_which_targets():\n num_multi_targets = 0\n for which_targets_day in which_targets:\n # All inputs have a label\n assert np.all(which_targets_day.sum(axis=1) > 0)\n # No inputs have more than 3 targets\n assert np.all(which_targets_day.sum(axis=1) < 4)\n\n num_multi_targets += np.sum(which_targets_day.sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0", "def test_verify_all_gates_have_valid_targets():\n nSpinOrbitals = input_json[\"constants\"][\"nSpinOrbitals\"]\n\n interaction_list = input_json[\"terms\"]\n\n for interaction in interaction_list:\n targets = interaction[\"targets\"]\n\n for orbital in targets:\n assert 0 <= orbital < nSpinOrbitals, \"Orbital target is out of range\"", "def test_do_check_number_of_terms(self):\n self.assertTrue(self.a.do_check_number_of_terms(self.b))\n self.assertFalse(self.a.do_check_number_of_terms(self.c))", "def test_where_targets():\n num_multi_targets = 0\n for where_targets_day in where_targets:\n # All inputs have a label\n assert np.all(where_targets_day.sum(axis=3).sum(axis=3).sum(axis=1).sum(axis=1) > 0)\n num_multi_targets += np.sum((where_targets_day.sum(axis=3).sum(axis=3).sum(axis=2) > 1).sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0", "def test_target_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert atom.lr.target == atom.target", "def intent_of_text_LnDOR(ChapterTextS, TargetQuestionsD, TestS, StopWords):\n \n # Chapter Text - stokenize\n StokensCT = stokenize(ChapterTextS, StopWords) \n\n # Test question - stokenize\n StokensTest = stokenize(TestS, StopWords)\n\n # Knowledge Base Dict - stokenize\n KBD_structure = stokenizeKBD(TargetQuestionsD, StopWords)\n\n # List (because list is mutable, set is not) of all stokens in document\n StokensDoc = StokensCT[:] # from chapter text\n StokensDoc.extend(StokensTest[:]) # += Test string\n\n # extend list of stokens in Doc\n for i in TargetQuestionsD:\n StokensDoc.extend(TargetQuestionsD[i][\"mq stokens\"][:]) # += KB target [matched Q]s\n StokensDoc.extend(TargetQuestionsD[i][\"ans stokens\"][:]) # += KB answers\n \n StokensTestV = set(StokensTest)\n StokensDocV = set(StokensDoc)\n StokensAntiTgtV = StokensDocV\n \n # Complement of all targets\n for i in TargetQuestionsD:\n StokensAntiTgtV = StokensAntiTgtV.difference(set(TargetQuestionsD[i][\"mq stokens\"]))\n \n # calculate confusion matrix and DOR etc.\n LnDORD = {}\n # Anti Target\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensAntiTgtV, StokensTestV) \n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN) \n \n LnDORD[\"AntiTgt\"] = {'lndor': LnDOR, 'theta': someAngle}\n\n # total occurences\n total_occ = 0\n for i in TargetQuestionsD:\n total_occ += TargetQuestionsD[i]['count']\n\n for i in TargetQuestionsD:\n StokensTgtV = set(TargetQuestionsD[i][\"mq stokens\"][:])\n\n TP, FP, FN, TN = confusion_matrix(StokensDocV, StokensTgtV, StokensTestV) \n priorOR = TargetQuestionsD[i]['count'] / total_occ\n\n LnDOR = lndor(TP, FP, FN, TN) \n someAngle = angleDOR(TP, FP, FN, TN, priorOR) \n \n LnDORD[i] = {'lndor': LnDOR, 'theta': someAngle}\n # LnDORD = {i: {'lndor': , 'theta': }}, KB indices + \"AntiTgt\"\n\n return LnDORD", "def calculate_appropriate_target(self):\n pass", "def calculate_appropriate_target(self):\n pass", "def price_target_deviation_2_rule(_m, y):\r\n\r\n return m.z_t2[y] >= m.YEAR_AVERAGE_PRICE_TARGET[y] - m.YEAR_AVERAGE_PRICE[y]", "def feature_two(ds, tup):\n # try:\n # if (nx.shortest_path_length(G, frm, to) == 2):\n # o2.write(\"trusted\\n\")\n # else:\n # o2.write(\"unverified\\n\")\n # except:\n # o2.write(\"unverified\\n\")\n\n A_child = ds[tup[0]]\n C_child = ds[tup[1]]\n return ((len(A_child.intersection(C_child)) > 0) | (tup[0] in ds[tup[1]]))", "def valid(self, target):", "def target_validation(target_name, action):\n json_data = read_file('presqt/specs/targets.json', True)\n for data in json_data:\n if data['name'] == target_name:\n if data[\"supported_actions\"][action] is False:\n raise PresQTValidationError(\n \"PresQT Error: '{}' does not support the action '{}'.\".format(target_name, action),\n status.HTTP_400_BAD_REQUEST)\n return True, data['infinite_depth']\n else:\n raise PresQTValidationError(\n \"PresQT Error: '{}' is not a valid Target name.\".format(target_name), status.HTTP_404_NOT_FOUND)", "def validateSemantic(cls,corpus,target):\n printMessage(cls,inspect.stack()[0][3],\n \"Validating against '%s' semantics..\"%(target))\n\n if target==\"new\":\n testfunction = Validator.isValidNew\n elif target==\"relaxed\":\n testfunction = Validator.isValidRelaxed\n elif target==\"compatible\":\n testfunction = Validator.isValidCompatible\n else:\n printError(cls,inspect.stack()[0][3],\"Cannot validate '%s' format\"%target)\n return(False)\n \n valid = testfunction(corpus)\n if valid:\n printMessage(cls,inspect.stack()[0][3],\"Valid semantics\")\n else:\n printError(cls,inspect.stack()[0][3],\"Invalid semantics\")\n return(valid)", "def fin_check(expresion):\n for token in set(bin_tokens.values()).union(set(un_tokens.values())):\n if token in expresion:\n return False\n return True", "def test_abundant_sequence_second_term(self):\n\n\t\tfirst_term = nth_abundant(1)\n\t\texcepted_output = 18\n\t\tself.assertEquals(first_term, excepted_output)", "def testTwoWords(self):\n\n\t\t\t\twords = ['business', 'directory']\n\t\t\t\tsynonyms = spinner.Synonym.objects.get_synonyms(words)\n\n\t\t\t\tassert len(synonyms)" ]
[ "0.74371237", "0.6681502", "0.66587096", "0.59809166", "0.5927252", "0.58840847", "0.5625534", "0.55692434", "0.5549024", "0.5514033", "0.5510398", "0.54905486", "0.546219", "0.53917503", "0.5349499", "0.5327487", "0.53227705", "0.5288182", "0.5278594", "0.5258623", "0.5233079", "0.5233079", "0.52318615", "0.520721", "0.51973945", "0.5190616", "0.51779604", "0.5160177", "0.51382065", "0.51300603" ]
0.75028914
0
Check objective.indicate() runs without errors
def test_indicate(self): self.objective.Indicate()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def objective(self):\n pass", "def prove_NI() -> Proof:\n # Optional Task 6.7e", "def check(self, runtime):", "def violated(self) -> bool:\n ...", "def experiment3():\n raise FAKE_ERROR", "def prove_NN() -> Proof:\n # Optional Task 6.7c", "def check():", "def check_optimization_sanity(self):\n if len(self.parameters) == 0:\n msg = \"No parameters defined. Optimization not possible.\"\n raise ValueError(msg)\n\n if len(self.constraints) == 0:\n msg = \"No constraints defined. Optimization not possible.\"\n raise ValueError(msg)", "def test_warning_is_triggered(caplog):\n pytest.importorskip('transformers')\n apply_alibi(\n model=torch.nn.Sequential(torch.nn.Linear(20, 10), torch.nn.Linear(10, 5)),\n max_sequence_length=64,\n )\n assert encountered_alibi_warning(caplog), 'A warning should be generated when Alibi has no effect.'", "def objective(self, x):\n pass", "def objective(self, x):\n pass", "def check(self) -> None:", "def test_penalty_compute(self):\n objective = {'G': numpy.zeros((9)),\n 'H': numpy.diag((1,)*9),\n 'X': 1}\n for penalty in self.penalties:\n result=penalty.compute([1]*self.np, objective)\n self.assertEqual(tuple, type(result))\n # more tests go here", "def test_likelihood(app):\n\n assert False", "def test_invalid_method():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"SGD\")\n pytest.raises(AttributeError, atom.sgd.predict_proba, X_bin)", "def __call__(self):\n self.start()\n numberOfIterations, stateVectorConv = self.iterations()\n if numberOfIterations <= 1:\n self.answer = None\n raise DidNotConvergeWarning(\"Number of iterations <= 1.\")\n result = self.diagnostic(numberOfIterations, stateVectorConv)\n return result", "def objective(self, args: Dict[str, Any]) -> float:\n pass", "def test_penalty_compute(self):\n objective = {'G': numpy.zeros((9)),\n 'H': numpy.diag((1,)*9),\n 'X': 1}\n for penalty in self.penalties:\n result = penalty.compute([1]*self.np, objective)\n assert isinstance(result,tuple)\n # more tests go here", "def Check(self, parameters):", "def test_acc(self):\n raise Exception(\" not implemented in base model\")", "def testAxiomReasoning(self):\n \n state = State.from_problem(self.prob)\n extstate, reasons, universalReasons = state.get_extended_state(getReasons=True)\n\n relevantVars = []\n relevantReplanVars = []\n load = self.dom.get_action(\"a_load\")\n with load.instantiate([\"agent\", \"obj11\", \"tru1\"], self.prob):\n self.assert_(extstate.is_satisfied(load.precondition, relevantVars))\n self.assert_(extstate.is_satisfied(load.replan, relevantReplanVars))\n\n all_reasons = set(relevantVars)\n for v in relevantVars:\n all_reasons |= reasons[v]\n\n all_replan_reasons = set(relevantReplanVars)\n for v in relevantReplanVars:\n all_replan_reasons |= reasons[v]\n \n s1 = StateVariable(self.prob.functions[\"location-of\"][0], [self.prob[\"tru1\"]])\n s2 = StateVariable(self.prob.functions[\"location-of\"][0], [self.prob[\"obj11\"]])\n\n self.assert_(s1 in all_reasons)\n self.assert_(s2 in all_reasons)\n self.assertFalse(s1 in all_replan_reasons)\n self.assert_(s2 in all_replan_reasons)", "def DO(experiment, ln):\n # Determine if the library needs to be constructed\n do_init, do_mature = initial_check(experiment, ln)\n # If it should be, then do so\n if do_init:\n # Initialize the library\n initialize_libraries(experiment, ln)\n # If an affinity maturation should be done\n if do_mature:\n # Affinity maturation of the antibody \n Affinity_Maturation(experiment, ln)\n # Check to see if everything is finished\n finished = check_finish(experiment, ln)\n # Finish the experiment\n if finished:\n Finish(experiment, ln)\n sys.exit(0)", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def check(self):\n pass", "def solveOneStep(self):\n ### Student code goes here\n return True", "def prove_I0() -> Proof:\n # Task 4.8", "def test_input_objective(self):\n self.set_up()\n self.assertRaises(ValueError,\n self.vmecOptimization.input_objective,\n which_objective='volumee')\n boundary = np.copy(self.vmecOptimization.boundary_opt)\n boundary_new = np.hstack((boundary,boundary))\n self.assertRaises(ValueError,\n self.vmecOptimization.input_objective,\n boundary=boundary_new)\n volume1 = self.vmecOptimization.input_objective(\n which_objective='volume')\n volume2 = self.vmecOptimization.vmec_objective(\n which_objective='volume') \n self.assertAlmostEqual(volume1,volume2)\n \n boundary_new = np.copy(self.vmecOptimization.boundary_opt)\n boundary_new[0] = 1.1*boundary_new[0]\n volume1 = self.vmecOptimization.input_objective(\n which_objective='volume',boundary=boundary_new)\n self.assertAlmostEqual(self.vmecOptimization.boundary_opt[0],\n boundary_new[0])\n boundary_old = np.copy(self.vmecOptimization.boundary_opt)\n boundary_new = np.copy(self.vmecOptimization.boundary_opt)\n boundary_new[0] = 1.1*boundary_new[0]\n volume1 = self.vmecOptimization.input_objective(\n which_objective='volume',boundary=boundary_new,update=False)\n self.assertAlmostEqual(self.vmecOptimization.boundary_opt[0],\n boundary_old[0])\n self.tear_down()", "def problem_statement():\n pass" ]
[ "0.57647496", "0.5661307", "0.55599916", "0.5370101", "0.5358906", "0.53583854", "0.53413033", "0.53176993", "0.52501816", "0.5248551", "0.5248551", "0.52420944", "0.52360874", "0.5224271", "0.52124524", "0.51895326", "0.5175995", "0.51721793", "0.51585484", "0.515155", "0.5128351", "0.5099514", "0.50933534", "0.50933534", "0.50933534", "0.50933534", "0.50844014", "0.5082282", "0.5071785", "0.50707614" ]
0.8210446
0
Modulate (map) an array of bits to constellation symbols.
def modulate(self, input_bits): index_list = map(lambda i: self.table[tuple((input_bits[i:i+self.num_bits_symbol]))], \ xrange(0, len(input_bits), self.num_bits_symbol)) baseband_symbols = self.constellation[index_list] return baseband_symbols
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, bits=8):\n\n self.map = {}\n self.bits = bits\n for r, g, b in itertools.product(range(2 ** bits), repeat=3):\n self.map[(r << 2 * bits) + (g << bits) + b] = rgb2lab(r << (8 - bits), g << (8 - bits), b << (8 - bits))", "def encode(bits, nt_to_bits=None):\r\n if nt_to_bits is None:\r\n nt_to_bits = DEFAULT_GOLAY_NT_TO_BITS\r\n\r\n bits = numpy.array(bits).reshape((12, 1))\r\n\r\n # cheap way to do binary xor in matrix dot\r\n res = numpy.dot(DEFAULT_G.T, bits)\r\n codeword = divmod(res.ravel(), 2)[1]\r\n\r\n return _bits_to_seq(codeword, nt_to_bits)", "def get_data():\n state = np.array([1,1,1,1,1,1,1,1,1], dtype=np.bool)\n taps = np.array([0,0,0,0,1,0,0,0,1], dtype=np.bool)\n p = np.zeros(176, dtype=np.uint8)\n for i in range(176):\n p[i] = np.sum(state[-3:]*[4,2,1])\n for _ in range(3):\n state = np.concatenate(([np.sum(state&taps)&1], state[0:-1]))\n a = np.zeros(176, common.SYMB_SCRAMBLE_DTYPE)\n ## 8PSK modulation\n constellation = PhysicalLayer.make_psk(8,range(8))['points']\n a['scramble'] = constellation[p,]\n known_symbols = np.mod(range(176),48)>=32\n a['symb'][known_symbols] = a['scramble'][known_symbols]\n return a", "def mk_bitvecs(self):\n self.bitvec = ''.join([f'{b:#010b}'[2:] for b in self.code ][::-1])\n self.bitvec_data = ''.join([f'{b:#010b}'[2:] for b in self.input][::-1])\n\n # Pad with some zeros to catch the last instructions.\n self.bitvec = '0'*64 + self.bitvec", "def BitSwap(bits):\n\n swaptable = (0, 128, 64, 192, 32, 160, 96, 224)\n\n return swaptable[bits]", "def vectorize(smiles):\n one_hot = np.zeros((smiles.shape[0], embed, len(charset)), dtype=np.int8)\n for i, smile in enumerate(smiles):\n # encode the startchar\n one_hot[i, 0, char_to_int[\"!\"]] = 1\n # encode the rest of the chars\n for j, c in enumerate(smile):\n one_hot[i, j + 1, char_to_int[c]] = 1\n # Encode endchar\n one_hot[i, len(smile) + 1 :, char_to_int[\"E\"]] = 1\n # Return two, one for input and the 2other for output\n return one_hot[:, 0:-1, :], one_hot[:, 1:, :]", "def bin_code(self):\n self.alphabet = np.unique(self.sequence)\n\n for s, n in zip([chr(k + ord('a') - 1) for k in self.alphabet], self.alphabet):\n self.alphabet_symbol[s] = n\n\n sigm = len(self.alphabet)\n bin_code = []\n for i, e in enumerate(self.alphabet):\n em = [0] * sigm\n em[sigm - 1 - i] = 1\n bin_code.append(em)\n\n for i in range(len(bin_code)):\n self.alphabet_dict[self.alphabet[i]] = bin_code[i]\n\n return reduce(lambda r, e: r + self.alphabet_dict[e], self.sequence, [])", "def two_bit_mapper(iterable):\n return {k: decompress_seq(k) for k in iterable}", "def setBitsPerSymbol(self, bits_per_symbol):\n \n self.bits_per_symbol = bits_per_symbol", "def ADD_ROUND_KEY(state_array, key_array):\n for i in range(4):\n for j in range(4):\n state_array[i][j] = state_array[i][j] ^ key_array[i][j]\n return state_array", "def __call__(self):\n return {self.idx: rle_encoding(self.mask)}", "def patchCode(code, value, bits):\n assert(type(code) == type(value) == type(bits) == int)\n mask = 2**bits-1\n return (code & ~mask) | (value & mask)", "def encode_one_hot2(s):\n x = np.zeros((LINE_SIZE, INPUT_VOCAB_SIZE))\n for n, c in enumerate(s):\n index = char_indices[c]\n x[n, index] = 1 \n return x", "def swap_bits_bit_array(x, bit_array):\n\n if (is_power_two(bit_array)):\n bit_array |= bit_array << 1\n\n if (is_power_two(x & bit_array)):\n x ^= bit_array\n return x", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def decode(bytes, command):\n\n cmap = com_map[command]\n ret = {}\n for com, rng in cmap.items():\n ret[com] = sum([ord(b)<<(8*i) for i,b in\n enumerate(bytes[rng[0]-1:rng[1]])])\n\n return ret", "def transform(self,\n chip: 'np.ndarray',\n channel_order: Optional[List[int]] = None):\n masks = []\n for (value_from, value_to) in self.mapping.items():\n mask = (chip == value_from)\n masks.append((mask, value_to))\n for (mask, value_to) in masks:\n chip[mask] = value_to\n\n return chip", "def list_to_set24(l):\n res = 0\n for x in l: res ^= 1 << x\n return res & 0xffffff", "def bits_apply(op, n):\n return verts_to_bits(op[v] for v in bits_to_verts(n))", "def mask(mask_key, data):\r\n _m = array.array(\"B\", mask_key)\r\n _d = array.array(\"B\", data)\r\n for i in xrange(len(_d)):\r\n _d[i] ^= _m[i % 4]\r\n return _d.tostring()", "def hash_reduce(ascii_array):\r\n\r\n if len(ascii_array) % 16 != 0:\r\n raise ValueError(\"Array size not equally divisible by 16.\")\r\n\r\n output = []\r\n for i in range(0, int(len(ascii_array) / 16)):\r\n\r\n val = 0\r\n for numb in ascii_array[i*16:i*16+16]:\r\n val ^= numb\r\n output.append(val)\r\n\r\n return output", "def make_input_map(self) :\n\n self.input_map = \"\"\n stencil = self.core.stencil\n pattern = self.core.pattern\n reflect = len(pattern)+1 # reflector id, last material\n N = self.dimension\n coremap = np.zeros((N+2,N+2), dtype='i')\n \n # reflections and vacuum\n coremap[0, 1:N+1] = -1 \n coremap[1:N+1, 0] = -1\n coremap[N+1, 1:N+1] = -2\n coremap[1:N+1, N+1] = -2\n \n fuelindex = 0\n \n for i in range(1, N+1) :\n for j in range(1, N+1) :\n if j == 1 and i > 1 :\n pass\n else :\n if stencil[i-1, j-1] > 0 : # a fuel\n coremap[i, j] = pattern[fuelindex]+1\n fuelindex += 1\n elif stencil[i-1, j-1] == 0 : # a reflector\n coremap[i, j] = reflect\n else : # a void\n pass \n # Copy elements such that rotational symmetry is enforced. \n for j in range(2, N+1) :\n coremap[j, 1] = coremap[1, j]\n for i in range(0, N+2) :\n for j in range(0, N+2) :\n self.input_map +='%4i' % (coremap[i, j])\n self.input_map += '\\n'", "def __setitem__(self, n, bit):\n self.num ^= (np.uint64(-bit) ^ self.num) & (UINT64_ONE << np.uint64(n))", "def _derive_modifiers(self, tx):\n return [tx + str(pixel) for pixel in self.adjacent_pixels]", "def __init__(self, x):\n self.bit = x\n for i in range(len(x)):\n j = i | (i + 1)\n if j < len(x):\n x[j] += x[i]", "def encode_one_hot(s):\n all = []\n for c in s:\n x = np.zeros((INPUT_VOCAB_SIZE)) \n index = char_indices[c]\n x[index] = 1 \n all.append(x)\n return all", "def update_board(self, symbol, modified_squares):\n\t\tfor coord in modified_squares:\n\t\t\tself.board[coord] = symbol", "def generate_symbole(figure_name = \"canon\"):\n if figure_name == \"planeur\": #PLANNEUR\n planneur = np.zeros((3, 3))\n planneur[1, 0] = 1\n planneur[0, 1] = 1\n planneur[0, 2] = 1\n planneur[1, 2] = 1\n planneur[2, 2] = 1\n return planneur\n\n elif figure_name == \"canon\": #CANON\n canon = np.zeros((36,9))\n canon[0:2,5:7] = 1\n canon[11,4:7] = 1\n canon[15:17,4:7] = 1\n canon[12,3] = 1\n canon[14,3] = 1\n canon[13,2] = 1\n canon[12,7] = 1\n canon[14,7] = 1\n canon[13,8] = 1\n canon[25,0:2] = 1\n canon[22:25,1:3] = 1\n canon[21,2:5] = 1\n canon[24,3] = 1\n canon[22:25,4:6] = 1\n canon[25,5:7] = 1\n canon[30,1:3] = 1\n canon[34:36,3:5] = 1\n return canon\n\n elif figure_name == \"blinker\": #BLINKER\n blinker = np.ones((3,1))\n return blinker\n\n elif figure_name == \"oscillator_alone\":\n osc = np.zeros((11,11))\n osc[2,2:9] = 1\n osc[8,2:9] = 1\n osc[2:9,2] = 1\n osc[2:9,8] = 1\n osc[5,2] = 0\n osc[5,8] = 0\n osc[2,5] = 0\n osc[8,5] = 0\n osc[0,5] = 1\n osc[10,5] = 1\n osc[5,0] = 1\n osc[5,10] = 1\n osc[1,4:7] = 1\n osc[9,4:7] = 1\n osc[4:7,1] = 1\n osc[4:7,9] = 1\n return osc\n\n elif figure_name == \"oscillator_one_block\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2,-2:] = 1\n return osc\n\n elif figure_name == \"oscillator_four_blocks\":\n osc = generate_symbole(\"oscillator_alone\")\n osc[0:2, -2:] = 1\n osc[0:2,0:2] = 1\n osc[-2:,0:2] = 1\n osc[-2:,-2:] = 1\n return osc\n\n elif figure_name == \"croix\":\n return osc\n\n elif figure_name == \"diag\":\n return osc\n\n elif figure_name == \"octogone\":\n return osc\n\n else:\n return 0", "def mask_codes_op(base_array, codes_array):\r\n result = numpy.empty(base_array.shape, dtype=numpy.int8)\r\n result[:] = mask_nodata\r\n valid_mask = base_array != base_nodata\r\n result[valid_mask] = numpy.isin(\r\n base_array[valid_mask], codes_array)\r\n return result", "def encode_state(player, players=\"\", apples=\"\", board_size=(15,15)):\n player_location = players[player-1].get('location')\n dx = 8-player_location[0]\n dy = 8-player_location[1]\n\n # One-Hot mapping dict\n oh_mapping = {'empty': np.array([1, 0, 0, 0, 0, 0]),\n 'apple': np.array([0, 1, 0, 0, 0, 0]),\n 'up': np.array([0, 0, 1, 0, 0, 0]),\n 'down': np.array([0, 0, 0, 1, 0, 0]),\n 'left': np.array([0, 0, 0, 0, 1, 0]),\n 'right': np.array([0, 0, 0, 0, 0, 1])}\n\n # Initialise an empty board_state\n board_state = [[oh_mapping[\"empty\"] for i in range(board_size[0])] for i in range(board_size[1])]\n # Load apples into board\n for location in apples:\n x,y = location\n x = (x+dx)%15\n y = (y+dy)%15\n board_state[x][y] = oh_mapping[\"apple\"]\n # Load other players into board\n for worm in players:\n location = worm[\"location\"]\n\n if location == [\"?\",\"?\"]:\n newlocation=[\"?\",\"?\"]\n\n else:\n newlocation=[]\n newlocation.append((location[0] + dx)%15)\n newlocation.append((location[1] + dy)%15)\n board_state[newlocation[0]][newlocation[1]] = oh_mapping[worm[\"orientation\"]]\n return board_state" ]
[ "0.58877903", "0.55004615", "0.54638433", "0.5388877", "0.53436345", "0.53311616", "0.5254693", "0.52274746", "0.5226104", "0.52138996", "0.51705074", "0.51263374", "0.5111572", "0.50981164", "0.5097587", "0.50909793", "0.5069528", "0.50634634", "0.505401", "0.5018951", "0.49819574", "0.49814135", "0.49797335", "0.4971087", "0.4970005", "0.4962657", "0.49485424", "0.49430823", "0.49382642", "0.49258745" ]
0.6858786
0
Find the Paulirepresentation of InteractionOperator for BravyiKitaev Super fast (BKSF) algorithm. Paulirepresentation of general FermionOperator is not possible in BKSF. Also, the InteractionOperator given as input must be Hermitian. In future we might provide a transformation for a restricted set of fermion operator.
def bravyi_kitaev_fast(operator: InteractionOperator) -> QubitOperator: if isinstance(operator, InteractionOperator): return bravyi_kitaev_fast_interaction_op(operator) else: raise TypeError("operator must be an InteractionOperator.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_fermion_operator(operator):\n fermion_operator = FermionOperator()\n\n if isinstance(operator, PolynomialTensor):\n for term in operator:\n fermion_operator += FermionOperator(term, operator[term])\n return fermion_operator\n\n raise TypeError(\"Unsupported type of oeprator {}\".format(operator))", "def taper(self, operator: PauliSumOp) -> OperatorBase:\n\n if not self._symmetries or not self._sq_paulis or not self._sq_list:\n raise OpflowError(\n \"Z2 symmetries, single qubit pauli and single qubit list cannot be empty.\"\n )\n\n converted_ops = self.convert_clifford(operator)\n tapered_ops = self.taper_clifford(converted_ops)\n\n return tapered_ops", "def get_operator(self):\n distribution = self.get_distribution_operator()\n temp = self.get_unit_conversion_operator()\n aperture = self.get_aperture_integration_operator()\n filter = self.get_filter_operator()\n projection = self.get_projection_operator()\n hwp = self.get_hwp_operator()\n polarizer = self.get_polarizer_operator()\n integ = self.get_detector_integration_operator()\n trans_inst = self.instrument.get_transmission_operator()\n trans_atm = self.scene.atmosphere.transmission\n response = self.get_detector_response_operator()\n\n with rule_manager(inplace=True):\n H = CompositionOperator([\n response, trans_inst, integ, polarizer, hwp * projection,\n filter, aperture, trans_atm, temp, distribution])\n if self.scene == 'QU':\n H = self.get_subtract_grid_operator()(H)\n return H", "def get_exact_classical_binary_solution(qubit_operator, offset):\n result = get_classical_solver_result(qubit_operator, offset)\n return result.x", "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"operator\")", "def convert(self, operator: OperatorBase) -> OperatorBase:\n # pylint: disable=cyclic-import,import-outside-toplevel\n from ..evolutions.evolved_op import EvolvedOp\n\n if isinstance(operator, ListOp):\n if isinstance(operator, SummedOp) and all([isinstance(op, PauliOp)\n for op in operator.oplist]):\n # For now, we only support graphs over Paulis.\n return self.group_subops(operator)\n elif self._traverse:\n return operator.traverse(self.convert)\n else:\n return operator\n elif isinstance(operator, OperatorStateFn) and self._traverse:\n return OperatorStateFn(self.convert(operator.primitive),\n is_measurement=operator.is_measurement,\n coeff=operator.coeff)\n elif isinstance(operator, EvolvedOp) and self._traverse:\n return EvolvedOp(self.convert(operator.primitive), coeff=operator.coeff)\n else:\n return operator", "def pro_avfid_superoperator(U):\n if U.type=='oper':\n ptrace = np.abs((U.dag()*U_target).tr())**2\n dim = 9 # dimension of the whole space\n return np.real((ptrace+dim)/(dim*(dim+1)))\n\n elif U.type=='super':\n return np.real(qtp.average_gate_fidelity(U,target=U_target_diffdims))", "def bravyi_kitaev(operator, n_qubits=None):\n if isinstance(operator, FermionOperator):\n return _bravyi_kitaev_fermion_operator(operator, n_qubits)\n if isinstance(operator, MajoranaOperator):\n return _bravyi_kitaev_majorana_operator(operator, n_qubits)\n raise TypeError(\"Couldn't apply the Bravyi-Kitaev Transform to object \"\n \"of type {}.\".format(type(operator)))", "def seepage_from_superoperator(U):\n if U.type=='oper':\n sump = 0\n for i_list in [[0,2],[1,2],[2,0],[2,1],[2,2]]:\n for j_list in [[0,2],[1,2],[2,0],[2,1],[2,2]]:\n bra_i = qtp.tensor(qtp.ket([i_list[0]], dim=[3]),\n qtp.ket([i_list[1]], dim=[3])).dag()\n ket_j = qtp.tensor(qtp.ket([j_list[0]], dim=[3]),\n qtp.ket([j_list[1]], dim=[3]))\n p = np.abs((bra_i*U*ket_j).data[0, 0])**2\n sump += p\n sump /= 5 # divide by number of non-computational states\n L1 = 1-sump\n return L1\n elif U.type=='super':\n sump = 0\n for i_list in [[0,2],[1,2],[2,0],[2,1],[2,2]]:\n for j_list in [[0,2],[1,2],[2,0],[2,1],[2,2]]:\n ket_i = qtp.tensor(qtp.ket([i_list[0]], dim=[3]),\n qtp.ket([i_list[1]], dim=[3]))\n rho_i=qtp.operator_to_vector(qtp.ket2dm(ket_i))\n ket_j = qtp.tensor(qtp.ket([j_list[0]], dim=[3]),\n qtp.ket([j_list[1]], dim=[3]))\n rho_j=qtp.operator_to_vector(qtp.ket2dm(ket_j))\n p = (rho_i.dag()*U*rho_j).data[0, 0]\n sump += p\n sump /= 5 # divide by number of non-computational states\n sump=np.real(sump)\n L1 = 1-sump\n return L1", "def find_Z2_symmetries(cls, operator: PauliSumOp) -> \"Z2Symmetries\":\n pauli_symmetries = []\n sq_paulis = []\n sq_list = []\n\n stacked_paulis = []\n\n if operator.is_zero():\n logger.info(\"Operator is empty.\")\n return cls([], [], [], None)\n\n for pauli in operator:\n stacked_paulis.append(\n np.concatenate(\n (pauli.primitive.paulis.x[0], pauli.primitive.paulis.z[0]), axis=0\n ).astype(int)\n )\n\n stacked_matrix = np.array(np.stack(stacked_paulis))\n symmetries = _kernel_F2(stacked_matrix)\n\n if not symmetries:\n logger.info(\"No symmetry is found.\")\n return cls([], [], [], None)\n\n stacked_symmetries = np.stack(symmetries)\n symm_shape = stacked_symmetries.shape\n\n for row in range(symm_shape[0]):\n\n pauli_symmetries.append(\n Pauli(\n (\n stacked_symmetries[row, : symm_shape[1] // 2],\n stacked_symmetries[row, symm_shape[1] // 2 :],\n )\n )\n )\n\n stacked_symm_del = np.delete(stacked_symmetries, row, axis=0)\n for col in range(symm_shape[1] // 2):\n # case symmetries other than one at (row) have Z or I on col qubit\n Z_or_I = True\n for symm_idx in range(symm_shape[0] - 1):\n if not (\n stacked_symm_del[symm_idx, col] == 0\n and stacked_symm_del[symm_idx, col + symm_shape[1] // 2] in (0, 1)\n ):\n Z_or_I = False\n if Z_or_I:\n if (\n stacked_symmetries[row, col] == 1\n and stacked_symmetries[row, col + symm_shape[1] // 2] == 0\n ) or (\n stacked_symmetries[row, col] == 1\n and stacked_symmetries[row, col + symm_shape[1] // 2] == 1\n ):\n sq_paulis.append(\n Pauli((np.zeros(symm_shape[1] // 2), np.zeros(symm_shape[1] // 2)))\n )\n sq_paulis[row].z[col] = False\n sq_paulis[row].x[col] = True\n sq_list.append(col)\n break\n\n # case symmetries other than one at (row) have X or I on col qubit\n X_or_I = True\n for symm_idx in range(symm_shape[0] - 1):\n if not (\n stacked_symm_del[symm_idx, col] in (0, 1)\n and stacked_symm_del[symm_idx, col + symm_shape[1] // 2] == 0\n ):\n X_or_I = False\n if X_or_I:\n if (\n stacked_symmetries[row, col] == 0\n and stacked_symmetries[row, col + symm_shape[1] // 2] == 1\n ) or (\n stacked_symmetries[row, col] == 1\n and stacked_symmetries[row, col + symm_shape[1] // 2] == 1\n ):\n sq_paulis.append(\n Pauli((np.zeros(symm_shape[1] // 2), np.zeros(symm_shape[1] // 2)))\n )\n sq_paulis[row].z[col] = True\n sq_paulis[row].x[col] = False\n sq_list.append(col)\n break\n\n # case symmetries other than one at (row) have Y or I on col qubit\n Y_or_I = True\n for symm_idx in range(symm_shape[0] - 1):\n if not (\n (\n stacked_symm_del[symm_idx, col] == 1\n and stacked_symm_del[symm_idx, col + symm_shape[1] // 2] == 1\n )\n or (\n stacked_symm_del[symm_idx, col] == 0\n and stacked_symm_del[symm_idx, col + symm_shape[1] // 2] == 0\n )\n ):\n Y_or_I = False\n if Y_or_I:\n if (\n stacked_symmetries[row, col] == 0\n and stacked_symmetries[row, col + symm_shape[1] // 2] == 1\n ) or (\n stacked_symmetries[row, col] == 1\n and stacked_symmetries[row, col + symm_shape[1] // 2] == 0\n ):\n sq_paulis.append(\n Pauli((np.zeros(symm_shape[1] // 2), np.zeros(symm_shape[1] // 2)))\n )\n sq_paulis[row].z[col] = True\n sq_paulis[row].x[col] = True\n sq_list.append(col)\n break\n\n return cls(pauli_symmetries, sq_paulis, sq_list, None)", "def expectation_computational_basis_state(operator, computational_basis_state):\n if isinstance(operator, QubitOperator):\n raise NotImplementedError('Not yet implemented for QubitOperators.')\n\n if not isinstance(operator, FermionOperator):\n raise TypeError('operator must be a FermionOperator.')\n\n occupied_orbitals = computational_basis_state\n\n if not isinstance(occupied_orbitals, list):\n computational_basis_state_index = (occupied_orbitals.nonzero()[0][0])\n\n occupied_orbitals = [\n digit == '1' for digit in bin(computational_basis_state_index)[2:]\n ][::-1]\n\n expectation_value = operator.terms.get((), 0.0)\n\n for i in range(len(occupied_orbitals)):\n if occupied_orbitals[i]:\n expectation_value += operator.terms.get(((i, 1), (i, 0)), 0.0)\n\n for j in range(i + 1, len(occupied_orbitals)):\n expectation_value -= operator.terms.get(\n ((j, 1), (i, 1), (j, 0), (i, 0)), 0.0)\n\n return expectation_value", "def get_classical_solver_result(qubit_operator, offset):\n qp = QuadraticProgram()\n qp.from_ising(qubit_operator, offset)\n exact = MinimumEigenOptimizer(NumPyMinimumEigensolver())\n return exact.solve(qp)", "def ideal_from_IQF_label(K, lab):\n if '[' in lab:\n lab = lab[1:-1].replace(\",\", \".\")\n a, c, d = [ZZ(x) for x in lab.split(\".\")]\n\n a /= d\n P = K.ideal([a, c+d*K.gen()])\n return P", "def _instruction_to_superop(cls, obj):\n if not isinstance(obj, Instruction):\n raise QiskitError(\"Input is not an instruction.\")\n chan = None\n if obj.name == \"reset\":\n # For superoperator evolution we can simulate a reset as\n # a non-unitary superoperator matrix\n chan = SuperOp(np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]))\n if obj.name == \"kraus\":\n kraus = obj.params\n dim = len(kraus[0])\n chan = SuperOp(_to_superop(\"Kraus\", (kraus, None), dim, dim))\n elif hasattr(obj, \"to_matrix\"):\n # If instruction is a gate first we see if it has a\n # `to_matrix` definition and if so use that.\n try:\n kraus = [obj.to_matrix()]\n dim = len(kraus[0])\n chan = SuperOp(_to_superop(\"Kraus\", (kraus, None), dim, dim))\n except QiskitError:\n pass\n return chan", "def operator_rhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator.adjoint(inp)", "def __init__(self, orbital_operators, orbital_labels, op_type, prefactor=1.0):\n\n self.orbital_operators = np.array(orbital_operators, dtype=str)\n self.orbital_labels = np.array(orbital_labels, dtype=int)\n self.op_type = op_type\n\n if len(self.orbital_operators) != len(self.orbital_labels):\n ValueError('The number of orbital operators and labels is inconsistent for the OperatorString: {} {}'.format(len(self.orbital_operators), len(self.orbital_labels)))\n\n self.prefactor = prefactor\n\n # Stored for use in computing commutators.\n # A dictionary of the labels to their index in the operator string.\n self._indices_orbital_labels = dict()\n for ind_orbital in range(len(self.orbital_labels)):\n self._indices_orbital_labels[self.orbital_labels[ind_orbital]] = ind_orbital\n \n # Compute the prefactor automatically if a Majorana operator.\n if self.op_type == 'Majorana':\n # Stored for use in computing commutators.\n # The labels of orbital operators that are 'A' or 'B'.\n self._labels_ab_operators = np.array([self.orbital_labels[ind] for ind in range(len(self.orbital_labels)) if self.orbital_operators[ind] in ['A', 'B']], dtype=int)\n num_ab = len(self._labels_ab_operators)\n\n # The prefactor is 1 or 1j, depending\n # on whether reversing the order of operators creates\n # a +1 or -1 sign due to anti-commutation operators.\n num_swaps_to_reorder = (num_ab*(num_ab-1))/2\n if num_swaps_to_reorder % 2 == 1:\n self.prefactor = 1j\n\n if (self.op_type == 'Pauli' and self.prefactor != 1) \\\n or (self.op_type == 'Majorana' and self.prefactor not in [1, 1j]) \\\n or (self.op_type == 'Fermion' and self.prefactor not in [1, 1j]):\n raise ValueError('Invalid prefactor {} for operator string of op_type {}'.format(self.prefactor, self.op_type))\n \n name_list = [str(self.prefactor),' ']\n for (op, la) in zip(self.orbital_operators, self.orbital_labels):\n name_list.extend([op, ' ', str(la), ' '])\n\n self.name = ''.join(name_list)", "def my_operator(self):\n return self._my_operator", "def vector_to_operator(op):\n if not op.isoperket:\n raise TypeError(\"only defined for operator-kets\")\n if op.superrep != \"super\":\n raise TypeError(\"only defined for operator-kets in super format\")\n dims = op.dims[0]\n return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),\n dims=dims,\n copy=False)", "def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function", "def convert_clifford(self, operator: PauliSumOp) -> OperatorBase:\n\n if not self._symmetries or not self._sq_paulis or not self._sq_list:\n raise OpflowError(\n \"Z2 symmetries, single qubit pauli and single qubit list cannot be empty.\"\n )\n\n if not operator.is_zero():\n for clifford in self.cliffords:\n operator = cast(PauliSumOp, clifford @ operator @ clifford)\n operator = operator.reduce(atol=0)\n\n return operator", "def qubitop_to_pauliop(n_qubits, qubit_operator):\n if not isinstance(qubit_operator, QubitOperator):\n raise TypeError(\"qubit_operator must be an openFermion QubitOperator object.\")\n paulis = []\n\n for qubit_terms, coefficient in qubit_operator.terms.items():\n count=0\n pauli_label = ['I' for _ in range(n_qubits)]\n coeff = coefficient\n \n for tensor_term in qubit_terms:\n pauli_label[tensor_term[0]] = tensor_term[1]\n \n paulis.append([coeff, Pauli.from_label(pauli_label)])\n \n pauliOp = WeightedPauliOperator(paulis)\n \n return pauliOp", "def operator(self) -> str:\n return pulumi.get(self, \"operator\")", "def operator(self) -> str:\n return pulumi.get(self, \"operator\")", "def get_unit_conversion_operator(self):\n nu = self.instrument.filter.nu\n return self.scene.get_unit_conversion_operator(nu)", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")", "def operator(self) -> Optional[str]:\n return pulumi.get(self, \"operator\")" ]
[ "0.5818381", "0.5716979", "0.570891", "0.567657", "0.562116", "0.562116", "0.562116", "0.5613257", "0.5463698", "0.539848", "0.53607637", "0.5300279", "0.52566344", "0.5202197", "0.5202183", "0.5198091", "0.51973337", "0.51899695", "0.51859266", "0.51655465", "0.5154106", "0.5147854", "0.51417524", "0.5127427", "0.5127427", "0.5097394", "0.5097201", "0.5097201", "0.5097201", "0.5097201" ]
0.5804373
1
r""" Transform from InteractionOperator to QubitOperator for BravyiKitaev fast algorithm. The electronic Hamiltonian is represented in terms of creation and annihilation operators. These creation and annihilation operators could be
def bravyi_kitaev_fast_interaction_op(iop: InteractionOperator ) -> QubitOperator: n_qubits = count_qubits(iop) # Initialize qubit operator as constant. qubit_operator = QubitOperator((), iop.constant) edge_matrix = bravyi_kitaev_fast_edge_matrix(iop) edge_matrix_indices = numpy.array( numpy.nonzero( numpy.triu(edge_matrix) - numpy.diag(numpy.diag(edge_matrix)))) # Loop through all indices. for p in range(n_qubits): for q in range(n_qubits): # Handle one-body terms. coefficient = complex(iop[(p, 1), (q, 0)]) if coefficient and p >= q: qubit_operator += (coefficient * _one_body(edge_matrix_indices, p, q)) # Keep looping for the two-body terms. for r in range(n_qubits): for s in range(n_qubits): coefficient = complex(iop[(p, 1), (q, 1), (r, 0), (s, 0)]) # Skip zero terms. if (not coefficient) or (p == q) or (r == s): # coverage: ignore continue # Identify and skip one of the complex conjugates. if [p, q, r, s] != [s, r, q, p]: if len(set([p, q, r, s])) == 4: if min(r, s) < min(p, q): continue # Handle case of 3 unique indices elif len(set([p, q, r, s])) == 3: transformed_term = _two_body( edge_matrix_indices, p, q, r, s) transformed_term *= .5 * coefficient qubit_operator += transformed_term continue elif p != r and q < p: # TODO: remove pragma if reachable continue continue # pragma: no cover # Handle the two-body terms. transformed_term = _two_body(edge_matrix_indices, p, q, r, s) transformed_term *= coefficient qubit_operator += transformed_term return qubit_operator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, transition: tuple = (0, 1), energies: tuple = (1,), pauli='X', code=qubit, graph=None,\n IS_subspace=False):\n self.transition = transition\n self.energies = energies\n assert pauli in ['X', 'Y', 'Z']\n self.pauli = pauli\n self.code = code\n self.graph = graph\n if self.pauli == 'X' and not self.code.logical_code:\n self._operator = np.zeros((self.code.d, self.code.d))\n self._operator[self.transition[1], self.transition[0]] = 1\n self._operator[self.transition[0], self.transition[1]] = 1\n elif self.pauli == 'Y' and not self.code.logical_code:\n self._operator = np.zeros((self.code.d, self.code.d))\n self._operator[self.transition[1], self.transition[0]] = 1j\n self._operator[self.transition[0], self.transition[1]] = -1j\n elif self.pauli == 'Z' and not self.code.logical_code:\n self._operator = np.zeros((self.code.d, self.code.d))\n self._operator[self.transition[0], self.transition[0]] = 1\n self._operator[self.transition[1], self.transition[1]] = -1\n # If a logical code, we should use the normal qubit operators because we assume the code is a qubit\n elif self.pauli == 'X' and self.code.logical_code:\n self._operator = self.code.X\n elif self.pauli == 'Y' and self.code.logical_code:\n self._operator = self.code.Y\n elif self.pauli == 'Z' and self.code.logical_code:\n self._operator = self.code.Z\n self.IS_subspace = IS_subspace\n\n if self.IS_subspace:\n # Generate sparse mixing Hamiltonian\n assert isinstance(graph, Graph)\n if code is not qubit:\n IS, num_IS = graph.generate_independent_sets_qudit(self.code)\n if self.pauli == 'Z':\n self._diagonal_hamiltonian = np.zeros((num_IS, 1))\n for k in range(num_IS):\n self._diagonal_hamiltonian[k, 0] = np.sum(IS[k, ...] == self.transition[0]) - np.sum(\n IS[k, ...] == self.transition[1])\n\n self._csr_hamiltonian = sparse.csr_matrix((self._diagonal_hamiltonian.T[0], (np.arange(num_IS),\n np.arange(num_IS))))\n\n self._hamiltonian = self._csr_hamiltonian\n\n else:\n # For each IS, look at spin flips generated by the laser\n # Over-allocate space\n rows = np.zeros(graph.n * num_IS, dtype=int)\n columns = np.zeros(graph.n * num_IS, dtype=int)\n entries = np.zeros(graph.n * num_IS, dtype=int)\n num_terms = 0\n for i in range(num_IS):\n for j in range(graph.n):\n if IS[i, j] == self.transition[0]:\n # Flip spin at this location\n # Get binary representation\n temp = IS[i, ...].copy()\n temp[j] = self.transition[1]\n where_matched = (np.argwhere(np.sum(np.abs(IS - temp), axis=1) == 0).flatten())\n if len(where_matched) > 0:\n # This is a valid spin flip by removing a node\n rows[num_terms] = where_matched[0]\n columns[num_terms] = i\n if self.pauli == 'X':\n entries[num_terms] = 1\n elif self.pauli == 'Y':\n # entries[num_terms] = -1j\n entries[num_terms] = 1j\n num_terms += 1\n \"\"\"else:\n for (i, key) in enumerate(self.graph.independent_sets_dict):\n num_neighbors = len(self.graph.independent_sets_dict[key][1])\n rows[num_terms:num_terms + num_neighbors] = self.graph.independent_sets_dict[key][1]\n columns[num_terms:num_terms + num_neighbors] = np.ones(num_neighbors) * \\\n self.graph.independent_sets_dict[key][0]\n if self.pauli == 'X':\n entries[num_terms:num_terms + num_neighbors] = 1\n elif self.pauli == 'Y':\n entries[num_terms:num_terms + num_neighbors] = -1j\n num_terms += num_neighbors\"\"\"\n\n # Cut off the excess in the arrays\n columns = columns[:2 * num_terms]\n rows = rows[:2 * num_terms]\n entries = entries[:2 * num_terms]\n # Populate the second half of the entries according to self.pauli\n if self.pauli == 'X':\n columns[num_terms:2 * num_terms] = rows[:num_terms]\n rows[num_terms:2 * num_terms] = columns[:num_terms]\n entries[num_terms:2 * num_terms] = entries[:num_terms]\n elif self.pauli == 'Y':\n columns[num_terms:2 * num_terms] = rows[:num_terms]\n rows[num_terms:2 * num_terms] = columns[:num_terms]\n entries[num_terms:2 * num_terms] = -1 * entries[:num_terms]\n # Now, construct the Hamiltonian\n self._csr_hamiltonian = sparse.csr_matrix((entries, (rows, columns)), shape=(num_IS, num_IS))\n self._hamiltonian = self._csr_hamiltonian\n else:\n # Use graph generator functions\n if self.pauli == 'Z':\n independent_sets = enumerate_independent_sets(graph.graph)\n # Generate a list of integers corresponding to the independent sets in binary\n # All ones\n k = self.graph.num_independent_sets - 2\n self.mis_size = 0\n hamiltonian = np.zeros(self.graph.num_independent_sets, dtype=float)\n hamiltonian[-1] = -1 * self.graph.n\n for i in independent_sets:\n hamiltonian[k] = len(i) - (self.graph.n - len(i))\n k -= 1\n\n self._hamiltonian = sparse.csr_matrix(\n (hamiltonian,\n (np.arange(self.graph.num_independent_sets), np.arange(self.graph.num_independent_sets))),\n shape=(self.graph.num_independent_sets, self.graph.num_independent_sets))\n\n else:\n independent_sets = enumerate_independent_sets(graph.graph)\n # Generate a list of integers corresponding to the independent sets in binary\n previous_size = 0\n self.mis_size = 0\n independent_sets_dict = {(): self.graph.num_independent_sets - 1}\n rows = []\n columns = []\n entries = []\n k = self.graph.num_independent_sets - 2\n for i in independent_sets:\n current_size = len(i)\n if current_size - previous_size > 1:\n previous_size = current_size - 1\n # Clear out the dictionary with terms we can't connect to\n for key in list(independent_sets_dict):\n if len(key) != previous_size:\n independent_sets_dict.pop(key)\n independent_sets_dict[tuple(i)] = k\n for (j, node) in enumerate(i):\n i_removed = i.copy()\n i_removed.pop(j)\n index = independent_sets_dict[tuple(i_removed)]\n # Index is the current independent set with a single node removed\n rows.append(k)\n columns.append(index)\n rows.append(index)\n columns.append(k)\n if self.pauli == 'Y':\n entries.append(-1j)\n entries.append(1j)\n else:\n entries.append(1)\n entries.append(1)\n k -= 1\n # Now, construct the Hamiltonian\n self._csr_hamiltonian = sparse.csr_matrix((entries, (rows, columns)),\n shape=(self.graph.num_independent_sets,\n self.graph.num_independent_sets))\n self._hamiltonian = self._csr_hamiltonian\n else:\n self._hamiltonian = None\n self._left_acting_hamiltonian = None\n self._right_acting_hamiltonian = None", "def test_creation_from_choi_operator():\n # Get krauss operators from dephrasure channel\n krauss_ops = set_up_dephrasure_conditions(0.1, 0.2)\n\n # Construct choi matrix from krauss operators\n choi_matrix = sum([np.outer(np.ravel(x, order=\"F\"),\n np.conj(np.ravel(x, order=\"F\"))) for x in krauss_ops])\n numb_qubits, dim_in, dim_out = [1, 1], 2, 3\n choi_obj = ChoiQutip(choi_matrix, numb_qubits, dim_in, dim_out)\n\n # Check if the two constructed krauss operators are the same.\n assert check_two_sets_of_krauss_are_same(krauss_ops, choi_obj.kraus_operators(), numb_qubits,\n dim_in, dim_out)\n\n # Test dimensions must match the choi matrix specified.\n assert_raises(ValueError, ChoiQutip, choi_matrix, numb_qubits, 3, 3)\n assert_raises(ValueError, ChoiQutip, choi_matrix, numb_qubits, 2, 2)\n assert_raises(ValueError, ChoiQutip, choi_matrix, [1, 2], 2, 3)", "def get_qubitops(H, verbose):\n num_nodes = H.shape[0]\n pauli_list = [] \n s = \"\"\n for i in range(num_nodes):\n xp = np.zeros(num_nodes, dtype=np.bool)\n zp = np.zeros(num_nodes, dtype=np.bool)\n zp[i] = True\n pauli_list.append([ H[i, i], Pauli(zp, xp)]) \n s += ' {}*Z[{}]'.format(H[i,i], i)\n for j in range(i):\n if H[i, j] != 0:\n xp = np.zeros(num_nodes, dtype=np.bool)\n zp = np.zeros(num_nodes, dtype=np.bool)\n zp[i] = True\n zp[j] = True\n pauli_list.append([ H[i, j], Pauli(zp, xp)]) \n s += ' + {}*Z[{}]*Z[{}]'.format(H[i,j], i, j) \n if verbose > 0:\n print(s)\n return Operator(paulis=pauli_list)", "def test_action_of_choi_operator():\n krauss = initialize_pauli_examples(0.1, 0.2, 0.3)\n choi = sum([np.outer(np.ravel(x, \"F\"),\n np.conj(np.ravel(x, \"F\").T)) for x in krauss])\n choi_obj = ChoiQutip(choi, numb_qubits=[1, 1], dim_in=2, dim_out=2)\n\n for _ in range(0, 1000):\n rho = np.array(rand_dm_ginibre(2).data.todense())\n actual = choi_obj.channel(rho)\n desired = sum([k.dot(rho).dot(np.conj(k).T) for k in krauss])\n assert np.all(np.abs(actual - desired) < 1e-3)\n\n # Test number of qubits being 2.\n krauss = np.kron(krauss, krauss)\n choi = sum([np.outer(np.ravel(x, \"F\"),\n np.conj(np.ravel(x, \"F\"))) for x in krauss])\n choi_obj = ChoiQutip(choi, numb_qubits=[2, 2], dim_in=2, dim_out=2)\n\n for _ in range(0, 1000):\n rho = np.array(rand_dm_ginibre(4).data.todense())\n actual = choi_obj.channel(rho)\n desired = sum([k.dot(rho).dot(np.conj(k).T) for k in krauss])\n assert np.all(np.abs(actual - desired) < 1e-3)\n\n # Test Dephrasure Channe\n krauss = set_up_dephrasure_conditions(0.1, 0.2)\n choi = sum([np.outer(np.ravel(x, \"F\"),\n np.conj(np.ravel(x, \"F\"))) for x in krauss])\n choi_obj = ChoiQutip(choi, [1, 1], 2, 3)\n\n for _ in range(0, 1000):\n rho = np.array(rand_dm_ginibre(2).data.todense())\n actual = choi_obj.channel(rho)\n desired = sum([k.dot(rho).dot(np.conj(k).T) for k in krauss])\n assert np.all(np.abs(actual - desired) < 1e-3)", "def __init__(self, index: int = 0, energies=(1,), code=qubit, IS_subspace=False, graph=None):\n self.index = index\n self.graph = graph\n self.energies = energies\n self.code = code\n if not self.code.logical_code:\n if not 0 <= self.index < self.code.d:\n raise Exception('Index exceeds qudit dimension.')\n self._operator = np.zeros((self.code.d, self.code.d))\n self._operator[self.index, self.index] = 1\n else:\n if self.index != 0 and self.index != 1:\n raise Exception('Logical codes are qubits, so index must be 0 or 1.')\n if self.index == 0:\n self._operator = self.code.Q\n elif self.index == 1:\n self._operator = self.code.P\n self.IS_subspace = IS_subspace\n if self.IS_subspace:\n # Generate sparse mixing Hamiltonian\n assert graph is not None\n assert isinstance(graph, Graph)\n if code is not qubit:\n IS, num_IS = graph.independent_sets_qudit(self.code)\n self._diagonal_hamiltonian = np.zeros((num_IS, 1), dtype=float)\n for k in range(num_IS):\n self._diagonal_hamiltonian[k, 0] = np.sum(IS[k, ...] == self.index)\n self._hamiltonian = sparse.csr_matrix(\n (self._diagonal_hamiltonian.T[0], (np.arange(num_IS), np.arange(num_IS))),\n shape=(num_IS, num_IS))\n else:\n # We have already solved for this information\n independent_sets = enumerate_independent_sets(graph.graph)\n # Generate a list of integers corresponding to the independent sets in binary\n # All ones\n k = self.graph.num_independent_sets - 2\n self.mis_size = 0\n hamiltonian = np.zeros(self.graph.num_independent_sets, dtype=float)\n hamiltonian[-1] = 0\n for i in independent_sets:\n hamiltonian[k] = len(i)\n k -= 1\n\n self._hamiltonian = sparse.csr_matrix(\n (hamiltonian,\n (np.arange(self.graph.num_independent_sets), np.arange(self.graph.num_independent_sets))),\n shape=(self.graph.num_independent_sets, self.graph.num_independent_sets))\n else:\n # Use full Hilbert space\n self._hamiltonian = None", "def bravyi_kitaev_fast(operator: InteractionOperator) -> QubitOperator:\n if isinstance(operator, InteractionOperator):\n return bravyi_kitaev_fast_interaction_op(operator)\n else:\n raise TypeError(\"operator must be an InteractionOperator.\")", "def operations(self, qubits: Sequence[cirq.Qid]) -> cirq.OP_TREE:\n # TODO implement asymmetric ansatz\n\n param_set = set(self.params())\n\n # Change to the basis in which the one-body term is diagonal\n yield cirq.inverse(\n bogoliubov_transform(qubits, self.basis_change_matrix))\n\n for i in range(self.iterations):\n\n # Simulate one-body terms\n for p in range(len(qubits)):\n u_symbol = LetterWithSubscripts('U', p, i)\n if u_symbol in param_set:\n yield cirq.ZPowGate(exponent=u_symbol).on(qubits[p])\n\n # Rotate to the computational basis\n yield bogoliubov_transform(qubits, self.basis_change_matrix)\n\n # Simulate the two-body terms\n def two_body_interaction(p, q, a, b) -> cirq.OP_TREE:\n v_symbol = LetterWithSubscripts('V', p, q, i)\n if v_symbol in param_set:\n yield cirq.CZPowGate(exponent=v_symbol).on(a, b)\n yield swap_network(qubits, two_body_interaction)\n qubits = qubits[::-1]\n\n # Rotate back to the basis in which the one-body term is diagonal\n yield cirq.inverse(\n bogoliubov_transform(qubits, self.basis_change_matrix))\n\n # Simulate one-body terms again\n for p in range(len(qubits)):\n u_symbol = LetterWithSubscripts('U', p, i)\n if u_symbol in param_set:\n yield cirq.ZPowGate(exponent=u_symbol).on(qubits[p])\n\n # Rotate to the computational basis\n yield bogoliubov_transform(qubits, self.basis_change_matrix)", "def get_operator(self):\n\n Operator = []\n\n '''\n print('Create H - 150 & 220 GHz')\n ope=[]\n for i in range(self.nfreqs):\n ope.append(self.H150.operands[i])\n for i in range(self.nfreqs):\n ope.append(self.H220.operands[i])\n self.Hboth = BlockRowOperator(ope, new_axisin=0)\n self.H=self.Hboth\n '''\n\n\n\n H_qubic = self.qubic.get_operator()\n R_qubic = ReshapeOperator(H_qubic.shapeout, H_qubic.shape[0])\n Operator.append(R_qubic(H_qubic))\n\n H_planck = self.planck.get_operator()\n R_planck = ReshapeOperator(H_planck.shapeout, H_planck.shape[0])\n Operator.append(R_planck(H_planck))\n return BlockColumnOperator(Operator, axisout=0)", "def get_operator(self):\n distribution = self.get_distribution_operator()\n temp = self.get_unit_conversion_operator()\n aperture = self.get_aperture_integration_operator()\n filter = self.get_filter_operator()\n projection = self.get_projection_operator()\n hwp = self.get_hwp_operator()\n polarizer = self.get_polarizer_operator()\n integ = self.get_detector_integration_operator()\n trans_inst = self.instrument.get_transmission_operator()\n trans_atm = self.scene.atmosphere.transmission\n response = self.get_detector_response_operator()\n\n with rule_manager(inplace=True):\n H = CompositionOperator([\n response, trans_inst, integ, polarizer, hwp * projection,\n filter, aperture, trans_atm, temp, distribution])\n if self.scene == 'QU':\n H = self.get_subtract_grid_operator()(H)\n return H", "def get_qubit_hamiltonian(mol):\n m_ham = mol.get_molecular_hamiltonian()\n int_ham = InteractionOperator(*(m_ham.n_body_tensors.values()))\n f_ham = get_fermion_operator(int_ham)\n q_ham = Transform(f_ham).jordan_wigner()\n return q_ham", "def coupled_transmons_hamiltonian_new(w_q0, w_q1, alpha_q0, alpha_q1, J):\n\n H = w_q0 * n_q0 + w_q1 * n_q1 + \\\n 1/2*alpha_q0*(a.dag()*a.dag()*a*a) + 1/2*alpha_q1*(b.dag()*b.dag()*b*b) +\\\n J * (a.dag() + a) * (b + b.dag())\n H = H * (2*np.pi)\n return H", "def generateOperator(onQubits: Union[int, List[int]], matrices: Union[numpy.ndarray, List[numpy.ndarray]],\n sysLevel: Union[int, List[int]], qubitNum: int) -> numpy.ndarray:\n # Each qubit of the system has the same energy level. \n if isinstance(sysLevel, int):\n # We first define the identity matrix to fill un-assigned qubits\n idMat = numpy.identity(sysLevel, dtype=complex)\n if isinstance(onQubits, int):\n assert numpy.size(matrices) == (sysLevel, sysLevel), \"Dimension of matrix does not match the system Level.\"\n # The operator is on only one qubit.\n if onQubits == 0:\n # This operator is on the first qubit.\n operator = matrices\n for i in range(1, qubitNum):\n operator = numpy.kron(operator, idMat)\n else:\n # This operator is not on the first qubit.\n operator = idMat\n for i in range(1, onQubits):\n operator = numpy.kron(operator, idMat)\n operator = numpy.kron(operator, matrices)\n for i in range(onQubits + 1, qubitNum):\n operator = numpy.kron(operator, idMat)\n return operator\n elif isinstance(onQubits, list):\n operator = []\n for i in range(qubitNum):\n if i == 0:\n # On the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operator = matrices[matrixIndex]\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel, sysLevel), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel}).\"\n else:\n operator = idMat\n else:\n # Not on the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel, sysLevel), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel}).\"\n operator = numpy.kron(operator, matrices[matrixIndex])\n else:\n operator = numpy.kron(operator, idMat)\n return operator\n \n else:\n assert False, \"Variable onQubits should be a list or an int.\"\n # The sysLevel is a list of different energy levels for multiple qubits\n if isinstance(sysLevel, list):\n # Create a list of identities of different dimension for each qubit of different energy level\n idMat = [numpy.identity(i, dtype=complex) for i in sysLevel]\n # The operator is acting on only one qubit.\n if isinstance(onQubits, int):\n assert numpy.size(matrices) == (sysLevel[onQubits], sysLevel[onQubits]), \"Dimension of matrix does not match the system Level.\" \n # The operator is acting on the first qubit.\n if onQubits == 0:\n operator = matrices\n for i in range(1, qubitNum):\n operator = numpy.kron(operator, idMat[i])\n else:\n # This operator is not acting on the first qubit.\n operator = idMat[0]\n for i in range(1, onQubits):\n operator = numpy.kron(operator, idMat[i])\n operator = numpy.kron(operator, matrices)\n for i in range(onQubits + 1, qubitNum):\n operator = numpy.kron(operator, idMat[i])\n return operator\n # The operator is acting on multiple qubits.\n elif isinstance(onQubits, list):\n operator = []\n for i in range(qubitNum):\n if i == 0:\n # Acting on the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operator = matrices[matrixIndex]\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel[i], sysLevel[i]), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel[i]}).\"\n else:\n operator = idMat[i]\n else:\n # Not acting on the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel[i], sysLevel[i]), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel[i]}).\"\n operator = numpy.kron(operator, matrices[matrixIndex])\n else:\n operator = numpy.kron(operator, idMat[i])\n return operator\n \n else:\n assert False, \"Variable onQubits should be a list or an int.\"", "def coupled_transmons_hamiltonian(w_q0, w_q1, alpha_q0, alpha_q1, J, w_bus):\n\n raise NotImplementedError(\"Old way of handling the hamiltonian H_0. Use calc_hamiltonian\")\n\n eps=0\n delta_q1=w_q1-w_bus\n delta_q0_interactionpoint=(w_q1-alpha_q0)-w_bus\n delta_q0=(w_q0+eps)-w_bus\n\n J_new = J / ((delta_q1+delta_q0_interactionpoint)/(delta_q1*delta_q0_interactionpoint)) * (delta_q1+delta_q0)/(delta_q1*delta_q0)\n\n H_0 = w_q0 * n_q0 + w_q1 * n_q1 + \\\n 1/2*alpha_q0*(a.dag()*a.dag()*a*a) + 1/2*alpha_q1*(b.dag()*b.dag()*b*b) +\\\n J_new * (a.dag() + a) * (b + b.dag())\n return H_0", "def normal_ordered(operator, hbar=1.):\n kwargs = {}\n\n if isinstance(operator, FermionOperator):\n ordered_operator = FermionOperator()\n order_fn = normal_ordered_ladder_term\n kwargs['parity'] = -1\n\n elif isinstance(operator, BosonOperator):\n ordered_operator = BosonOperator()\n order_fn = normal_ordered_ladder_term\n kwargs['parity'] = 1\n\n elif isinstance(operator, QuadOperator):\n ordered_operator = QuadOperator()\n order_fn = normal_ordered_quad_term\n kwargs['hbar'] = hbar\n\n elif isinstance(operator, InteractionOperator):\n constant = operator.constant\n n_modes = operator.n_qubits\n one_body_tensor = operator.one_body_tensor.copy()\n two_body_tensor = numpy.zeros_like(operator.two_body_tensor)\n quadratic_index_pairs = (\n (pq, pq) for pq in itertools.combinations(range(n_modes)[::-1], 2))\n cubic_index_pairs = (\n index_pair\n for p, q, r in itertools.combinations(range(n_modes)[::-1], 3)\n for index_pair in [((p, q), (p, r)), ((p, r), (\n p, q)), ((p, q), (q, r)), ((q, r),\n (p, q)), ((p, r),\n (q, r)), ((q, r), (p, r))])\n quartic_index_pairs = (\n index_pair\n for p, q, r, s in itertools.combinations(range(n_modes)[::-1], 4)\n for index_pair in [((p, q), (r, s)), ((r, s), (\n p, q)), ((p, r), (q, s)), ((q, s),\n (p, r)), ((p, s),\n (q, r)), ((q, r), (p, s))])\n index_pairs = itertools.chain(quadratic_index_pairs, cubic_index_pairs,\n quartic_index_pairs)\n for pq, rs in index_pairs:\n two_body_tensor[pq + rs] = sum(\n s * ss * operator.two_body_tensor[pq[::s] + rs[::ss]]\n for s, ss in itertools.product([-1, 1], repeat=2))\n return InteractionOperator(constant, one_body_tensor, two_body_tensor)\n\n else:\n raise TypeError('Can only normal order FermionOperator, '\n 'BosonOperator, QuadOperator, or InteractionOperator.')\n\n for term, coefficient in operator.terms.items():\n ordered_operator += order_fn(term, coefficient, **kwargs)\n\n return ordered_operator", "def operations(self, qubits):\n q0, q1, q2, q3 = qubits\n yield cq.H(q0), cq.H(q1), cq.H(q2)\n yield (cq.X**-0.5).on(q3)\n \n yield cq.CNOT(q0, q1), cq.CNOT(q1, q2), cq.CNOT(q2, q3)\n yield (cq.Z._with_exponent(cq.Symbol('theta_0'))).on(q3)\n yield cq.CNOT(q2, q3), cq.CNOT(q1, q2), cq.CNOT(q0, q1)\n \n yield cq.H(q0), cq.H(q1), cq.H(q2)\n yield (cq.X**0.5).on(q3)", "def qaoa_step(state, H, n_qubits, params):\n B=B_operator(n_qubits)\n state=lasp.expm_multiply(1j*params[0]*H, state)\n return lasp.expm_multiply(-1j*params[1]*B,state)", "def qchannel_to_qiskit(representation):\n\n rep = representation.representation\n # Find what representation it is.\n # Then create the corresponding matrix and shape it like qiskit is expecting it.\n # Finally, create the qiskit representation from that matrix.\n if rep in (RepresentationType.PTM, RepresentationType.CHOI):\n matri = representation.matrix\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n return PTM(data) if (rep == RepresentationType.PTM) else Choi(data)\n if rep in (RepresentationType.CHI, RepresentationType.SUPEROP):\n final_data = []\n for matri in representation.basis:\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n final_data.append(data)\n if rep == RepresentationType.CHI:\n return Chi(final_data) if len(final_data) > 1 else Chi(final_data[0])\n return SuperOp(final_data) if len(final_data) > 1 else SuperOp(final_data[0])\n if rep == RepresentationType.KRAUS:\n final_data = []\n for matri in representation.kraus_ops:\n data_re = []\n data_im = []\n for i in range(matri.nRows):\n for j in range(matri.nCols):\n data_re.append(matri.data[i * matri.nRows + j].re + 0.j)\n data_im.append(matri.data[i * matri.nRows + j].im)\n data = np.array(data_re)\n data.imag = np.array(data_im)\n data = data.reshape((matri.nRows, matri.nCols))\n final_data.append(data)\n return Kraus(final_data)\n return None", "def simulate_quantities_of_interest_superoperator(tlist, c_ops, noise_parameters_CZ, fluxlutman,\n fluxbias_q1, amp,\n sim_step,\n verbose: bool=True):\n\n H_0=calc_hamiltonian(0,fluxlutman,noise_parameters_CZ) # computed at 0 amplitude\n # NOTE: parameters of H_0 could be not exactly e.g. the bare frequencies\n\n # We change the basis from the standard basis to the basis of eigenvectors of H_0\n # The columns of S are the eigenvectors of H_0, appropriately ordered\n if noise_parameters_CZ.dressed_compsub():\n S = qtp.Qobj(matrix_change_of_variables(H_0),dims=[[3, 3], [3, 3]])\n else:\n S = qtp.tensor(qtp.qeye(3),qtp.qeye(3)) # line here to quickly switch off the use of S\n H_0_diag = S.dag()*H_0*S\n\n #w_q0 = fluxlutman.q_freq_01()\n w_q0 = (H_0_diag[1,1]-H_0_diag[0,0]) / (2*np.pi)\n #w_q1 = fluxlutman.q_freq_10()\n w_q1 = (H_0_diag[3,3]-H_0_diag[0,0]) / (2*np.pi)\n\n # H_rotateaway = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1, \n # alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)\n\n w_q1_sweetspot = noise_parameters_CZ.w_q1_sweetspot()\n # Correction up to second order of the frequency due to flux noise, computed from w_q0(phi) = w_q0^sweetspot * sqrt(cos(pi * phi/phi_0))\n w_q1_biased = w_q1 - np.pi/2 * (w_q1_sweetspot**2/w_q1) * np.sqrt(1 - (w_q1**4/w_q1_sweetspot**4)) * fluxbias_q1 - \\\n - np.pi**2/2 * w_q1_sweetspot * (1+(w_q1**4/w_q1_sweetspot**4)) / (w_q1/w_q1_sweetspot)**3 * fluxbias_q1**2\n # with sigma up to circa 1e-3 \\mu\\Phi_0 the second order is irrelevant\n correction_to_H = coupled_transmons_hamiltonian_new(w_q0=0, w_q1=np.real(w_q1_biased-w_q1), alpha_q0=0, alpha_q1=0, J=0)\n\n\n t0 = time.time()\n\n exp_L_total=1\n for i in range(len(amp)):\n H=calc_hamiltonian(amp[i],fluxlutman,noise_parameters_CZ) + correction_to_H\n H=S.dag()*H*S\n if c_ops != []:\n c_ops_temp=[]\n for c in range(len(c_ops)):\n if isinstance(c_ops[c],list):\n c_ops_temp.append(c_ops[c][0]*c_ops[c][1][i]) # c_ops are already in the H_0 basis\n else:\n c_ops_temp.append(c_ops[c])\n liouville_exp_t=(qtp.liouvillian(H,c_ops_temp)*sim_step).expm()\n else:\n liouville_exp_t=(-1j*H*sim_step).expm()\n exp_L_total=liouville_exp_t*exp_L_total\n\n t1 = time.time()\n #print('\\n alternative propagator',t1-t0)\n\n\n U_final = exp_L_total\n #U_final=rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_0_diag)\n\n phases = phases_from_superoperator(U_final) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond\n phi_cond = phases[-1]\n L1 = leakage_from_superoperator(U_final)\n population_02_state = calc_population_02_state(U_final)\n L2 = seepage_from_superoperator(U_final)\n avgatefid = pro_avfid_superoperator_phasecorrected(U_final,phases)\n avgatefid_compsubspace = pro_avfid_superoperator_compsubspace_phasecorrected(U_final,L1,phases) # leakage has to be taken into account, see Woods & Gambetta\n #print('avgatefid_compsubspace',avgatefid_compsubspace)\n\n \n \n #H_twoqubits = coupled_transmons_hamiltonian_new(w_q0=w_q0, w_q1=w_q1, \n # alpha_q0=-2*w_q0, alpha_q1=-2*w_q1, J=0)\n #U_final_new = rotating_frame_transformation_new(U_final, fluxlutman.cz_length(), H_twoqubits) ### old method rotating away also the phase of the |2> state\n\n t = tlist[-1]+sim_step\n U_final_new = correct_reference(U=U_final,w_q1=w_q1,w_q0=w_q0,t=t)\n\n ### Script to check that we are correctly removing the single qubit phases in the rotating frame\n # cz_length = fluxlutman.cz_length()\n # U_check = (1j*H_twoqubits*cz_length).expm() * (-1j*H_0_diag*cz_length).expm()\n # phases_check = phases_from_superoperator(U_check)\n # print(phases_check)\n\n \n avgatefid_compsubspace_notphasecorrected = pro_avfid_superoperator_compsubspace(U_final_new,L1)\n # NOTE: a single qubit phase off by 30 degrees costs 5.5% fidelity\n\n ### Script to check that leakage and phi_cond are not affected by the phase correction, as it should be\n # L1_bis = leakage_from_superoperator(U_final_new)\n # phi_cond_bis = phases_from_superoperator(U_final_new)[-1]\n # print('leakage',L1-L1_bis)\n # print('phi_cond',phi_cond-phi_cond_bis)\n\n phases = phases_from_superoperator(U_final_new) # order is phi_00, phi_01, phi_10, phi_11, phi_02, phi_20, phi_cond\n phase_q0 = (phases[1]-phases[0]) % 360\n phase_q1 = (phases[2]-phases[0]) % 360\n\n\n # We now correct only for the phase of qubit left (q1), in the rotating frame\n avgatefid_compsubspace_pc_onlystaticqubit = pro_avfid_superoperator_compsubspace_phasecorrected_onlystaticqubit(U_final_new,L1,phases)\n \n\n return {'phi_cond': phi_cond, 'L1': L1, 'L2': L2, 'avgatefid_pc': avgatefid,\n 'avgatefid_compsubspace_pc': avgatefid_compsubspace, 'phase_q0': phase_q0, 'phase_q1': phase_q1,\n 'avgatefid_compsubspace': avgatefid_compsubspace_notphasecorrected,\n 'avgatefid_compsubspace_pc_onlystaticqubit': avgatefid_compsubspace_pc_onlystaticqubit, 'population_02_state': population_02_state,\n 'U_final_new': U_final_new}", "def make_operators(self):\n self.relationship_operator = Operators.RelationshipOperator(self)\n self.infection_operator = Operators.InfectionOperator(self)\n self.time_operator = Operators.TimeOperator(self)", "def _qij_minus(i: int, j: int):\n ib = i * 2 + 1\n jb = j * 2 + 1\n term = FermionOperator(((jb, 0), (ib, 0)), 1.0)\n return term", "def hermitian_conjugated(operator):\n # Handle FermionOperator\n if isinstance(operator, FermionOperator):\n conjugate_operator = FermionOperator()\n for term, coefficient in operator.terms.items():\n conjugate_term = tuple([(tensor_factor, 1 - action)\n for (tensor_factor,\n action) in reversed(term)])\n conjugate_operator.terms[conjugate_term] = coefficient.conjugate()\n\n # Handle BosonOperator\n elif isinstance(operator, BosonOperator):\n conjugate_operator = BosonOperator()\n for term, coefficient in operator.terms.items():\n conjugate_term = tuple([(tensor_factor, 1 - action)\n for (tensor_factor,\n action) in reversed(term)])\n # take into account that different indices commute\n conjugate_term = tuple(\n sorted(conjugate_term, key=lambda factor: factor[0]))\n conjugate_operator.terms[conjugate_term] = coefficient.conjugate()\n\n # Handle QubitOperator\n elif isinstance(operator, QubitOperator):\n conjugate_operator = QubitOperator()\n for term, coefficient in operator.terms.items():\n conjugate_operator.terms[term] = coefficient.conjugate()\n\n # Handle QuadOperator\n elif isinstance(operator, QuadOperator):\n conjugate_operator = QuadOperator()\n for term, coefficient in operator.terms.items():\n conjugate_term = reversed(term)\n # take into account that different indices commute\n conjugate_term = tuple(\n sorted(conjugate_term, key=lambda factor: factor[0]))\n conjugate_operator.terms[conjugate_term] = coefficient.conjugate()\n\n # Handle InteractionOperator\n elif isinstance(operator, InteractionOperator):\n conjugate_constant = operator.constant.conjugate()\n conjugate_one_body_tensor = hermitian_conjugated(\n operator.one_body_tensor)\n conjugate_two_body_tensor = hermitian_conjugated(\n operator.two_body_tensor)\n conjugate_operator = type(operator)(conjugate_constant,\n conjugate_one_body_tensor,\n conjugate_two_body_tensor)\n\n # Handle sparse matrix\n elif isinstance(operator, spmatrix):\n conjugate_operator = operator.getH()\n\n # Handle numpy array\n elif isinstance(operator, numpy.ndarray):\n conjugate_operator = operator.T.conj()\n\n # Unsupported type\n else:\n raise TypeError('Taking the hermitian conjugate of a {} is not '\n 'supported.'.format(type(operator).__name__))\n\n return conjugate_operator", "def current_operator(h0):\n h = h0.copy()\n h = h0.get_multicell() # multicell Hamiltonian\n if h.dimensionality != 1: raise # only for 1d\n if not h.is_multicell: # no multicell\n def fj(k):\n phik = np.exp(1j*2.*np.pi*k) # complex phase\n jk = 1j*(h.inter*phik - h.inter.H*np.conjugate(phik)) \n return jk\n else: # multicell Hamiltonian\n def fj(k):\n jk = h.intra*0. # initialize\n for t in h.hopping:\n phik = np.exp(1j*2.*np.pi*k*t.dir[0]) # complex phase\n jk = jk + 1j*t.m*phik*t.dir[0]\n return jk\n return fj", "def __init__(self, orbital_operators, orbital_labels, op_type, prefactor=1.0):\n\n self.orbital_operators = np.array(orbital_operators, dtype=str)\n self.orbital_labels = np.array(orbital_labels, dtype=int)\n self.op_type = op_type\n\n if len(self.orbital_operators) != len(self.orbital_labels):\n ValueError('The number of orbital operators and labels is inconsistent for the OperatorString: {} {}'.format(len(self.orbital_operators), len(self.orbital_labels)))\n\n self.prefactor = prefactor\n\n # Stored for use in computing commutators.\n # A dictionary of the labels to their index in the operator string.\n self._indices_orbital_labels = dict()\n for ind_orbital in range(len(self.orbital_labels)):\n self._indices_orbital_labels[self.orbital_labels[ind_orbital]] = ind_orbital\n \n # Compute the prefactor automatically if a Majorana operator.\n if self.op_type == 'Majorana':\n # Stored for use in computing commutators.\n # The labels of orbital operators that are 'A' or 'B'.\n self._labels_ab_operators = np.array([self.orbital_labels[ind] for ind in range(len(self.orbital_labels)) if self.orbital_operators[ind] in ['A', 'B']], dtype=int)\n num_ab = len(self._labels_ab_operators)\n\n # The prefactor is 1 or 1j, depending\n # on whether reversing the order of operators creates\n # a +1 or -1 sign due to anti-commutation operators.\n num_swaps_to_reorder = (num_ab*(num_ab-1))/2\n if num_swaps_to_reorder % 2 == 1:\n self.prefactor = 1j\n\n if (self.op_type == 'Pauli' and self.prefactor != 1) \\\n or (self.op_type == 'Majorana' and self.prefactor not in [1, 1j]) \\\n or (self.op_type == 'Fermion' and self.prefactor not in [1, 1j]):\n raise ValueError('Invalid prefactor {} for operator string of op_type {}'.format(self.prefactor, self.op_type))\n \n name_list = [str(self.prefactor),' ']\n for (op, la) in zip(self.orbital_operators, self.orbital_labels):\n name_list.extend([op, ' ', str(la), ' '])\n\n self.name = ''.join(name_list)", "def create_pauli_qiskit(pauli):\n\ttemp = [] # List in which save the weights and the Pauli strings\n\tfor key in pauli.keys(): # Iterate over all the Pauli strings\n\t\ttemp.append([pauli[key], Pauli(key)])\n\treturn WeightedPauliOperator(temp) # Transform the list into a qiskit operator", "def hermitian_conjugated(operator):\n # Handle FermionOperator\n if isinstance(operator, FermionOperator):\n conjugate_operator = FermionOperator()\n for term, coefficient in operator.terms.items():\n # reverse the order and change the action from 0(1) to 1(0)\n conjugate_term = tuple([(index, 1 - op)\n for (index, op) in reversed(term)])\n conjugate_operator.terms[conjugate_term] = coefficient.conjugate()\n\n # Handle QubitOperator\n elif isinstance(operator, QubitOperator):\n conjugate_operator = QubitOperator()\n for term, coefficient in operator.terms.items():\n conjugate_operator.terms[term] = coefficient.conjugate()\n\n # Handle QubitExcitationOperator\n elif isinstance(operator, QubitExcitationOperator):\n conjugate_operator = QubitExcitationOperator()\n for term, coefficient in operator.terms.items():\n # reverse the order and change the action from 0(1) to 1(0)\n conjugate_term = tuple([(index, 1 - op)\n for (index, op) in reversed(term)])\n conjugate_operator.terms[conjugate_term] = coefficient.conjugate()\n\n # Unsupported type\n else:\n raise TypeError('Taking the hermitian conjugate of a {} is not '\n 'supported.'.format(type(operator).__name__))\n\n return conjugate_operator", "def get_CC_operators():\n i = symbols('i', below_fermi=True, cls=Dummy)\n a = symbols('a', above_fermi=True, cls=Dummy)\n t_ai = AntiSymmetricTensor('t', (a,), (i,))\n ai = NO(Fd(a)*F(i))\n i, j = symbols('i,j', below_fermi=True, cls=Dummy)\n a, b = symbols('a,b', above_fermi=True, cls=Dummy)\n t_abij = AntiSymmetricTensor('t', (a, b), (i, j))\n abji = NO(Fd(a)*Fd(b)*F(j)*F(i))\n\n T1 = t_ai*ai\n T2 = Rational(1, 4)*t_abij*abji\n return (T1, T2)", "def _instruction_to_superop(cls, obj):\n if not isinstance(obj, Instruction):\n raise QiskitError(\"Input is not an instruction.\")\n chan = None\n if obj.name == \"reset\":\n # For superoperator evolution we can simulate a reset as\n # a non-unitary superoperator matrix\n chan = SuperOp(np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]))\n if obj.name == \"kraus\":\n kraus = obj.params\n dim = len(kraus[0])\n chan = SuperOp(_to_superop(\"Kraus\", (kraus, None), dim, dim))\n elif hasattr(obj, \"to_matrix\"):\n # If instruction is a gate first we see if it has a\n # `to_matrix` definition and if so use that.\n try:\n kraus = [obj.to_matrix()]\n dim = len(kraus[0])\n chan = SuperOp(_to_superop(\"Kraus\", (kraus, None), dim, dim))\n except QiskitError:\n pass\n return chan", "def buildOperatorCache(ham: Dict[str, Any]) -> None:\n sysLevel = ham[\"circuit\"][\"sys_level\"]\n qubitNum = ham[\"circuit\"][\"qubits\"]\n\n # Generator the operator for all of the drift terms\n for key in ham[\"drift\"]:\n drifts = ham[\"drift\"][key]\n operator = generateOperator(drifts[\"on_qubits\"], drifts[\"matrices\"], sysLevel, qubitNum) * drifts[\"amp\"]\n ham[\"cache\"][\"operator\"][\"drift\"][key] = operator\n\n # Sum all the drift terms and save to the cache.\n if isinstance(sysLevel, int):\n driftTotal = numpy.zeros((sysLevel ** qubitNum, sysLevel ** qubitNum), dtype=complex)\n elif isinstance(sysLevel, list):\n dim = 1\n for i in sysLevel:\n dim = dim * i\n driftTotal = numpy.zeros((dim, dim), dtype=complex)\n for key in ham[\"cache\"][\"operator\"][\"drift\"]:\n driftTotal = driftTotal + ham[\"cache\"][\"operator\"][\"drift\"][key]\n ham[\"cache\"][\"matrix_of_drift\"] = driftTotal\n\n # Generator the pulse sequences for all of the control terms.\n for key in ham[\"control\"]:\n ctrls = ham[\"control\"][key]\n operator = generateOperator(ctrls[\"on_qubits\"], ctrls[\"matrices\"], sysLevel, qubitNum)\n ham[\"cache\"][\"operator\"][\"control\"][key] = operator", "def _qij_plus(i: int, j: int):\n ia = i * 2 + 0\n ja = j * 2 + 0\n term = FermionOperator(((ja, 0), (ia, 0)), 1.0)\n return term", "def make_circuit(A, b, num_clock_qubits):\n \n # save smaller circuit example for display\n global QC_, U_, UI_, QFT_, QFTI_, HP_, INVROT_\n\n # read in number of qubits\n N = len(A)\n n = int(np.log2(N))\n n_t = num_clock_qubits # number of qubits in clock register\n \n # lower bound on eigenvalues of A. Fixed for now\n C = 1/4\n \n ''' Define sets of qubits for this algorithm '''\n \n # create 'input' quantum and classical measurement register\n qr = QuantumRegister(n, name='input')\n qr_b = QuantumRegister(n, name='in_anc') # ancillas for Hamiltonian simulation (?)\n cr = ClassicalRegister(n)\n \n # create 'clock' quantum register\n qr_t = QuantumRegister(n_t, name='clock') # for phase estimation\n \n # create 'ancilla' quantum and classical measurement register\n qr_a = QuantumRegister(1, name='ancilla') # ancilla qubit\n cr_a = ClassicalRegister(1)\n \n # create the top-level HHL circuit, with all the registers\n qc = QuantumCircuit(qr, qr_b, qr_t, qr_a, cr, cr_a)\n\n ''' Initialize the input and clock qubits '''\n \n # initialize the |b> state - the 'input'\n qc = initialize_state(qc, qr, b)\n \n #qc.barrier()\n\n # Hadamard the phase estimation register - the 'clock'\n for q in range(n_t):\n qc.h(qr_t[q])\n\n qc.barrier()\n \n ''' Perform Quantum Phase Estimation on input (b), clock, and ancilla '''\n \n # perform controlled e^(i*A*t)\n for q in range(n_t):\n control = qr_t[q]\n anc = qr_a[0]\n phase = -(2*pi)*2**q \n qc_u = shs.control_Ham_sim(n, A, phase)\n if phase <= 0:\n qc_u.name = \"e^{\" + str(q) + \"iAt}\"\n else:\n qc_u.name = \"e^{-\" + str(q) + \"iAt}\"\n if U_ == None:\n U_ = qc_u\n qc.append(qc_u, qr[0:len(qr)] + qr_b[0:len(qr_b)] + [control] + [anc])\n\n qc.barrier()\n \n ''' Perform Inverse Quantum Fourier Transform on clock qubits '''\n \n #qc = IQFT(qc, qr_t)\n \n qc_qfti = inv_qft_gate(n_t, method=2)\n qc.append(qc_qfti, qr_t)\n\n if QFTI_ == None:\n QFTI_ = qc_qfti\n \n qc.barrier()\n \n ''' Perform inverse rotation with ancilla '''\n \n # reset ancilla\n qc.reset(qr_a[0])\n \n # compute angles for inversion rotations\n alpha = [2*np.arcsin(C)]\n for x in range(1,2**n_t):\n x_bin_rev = np.binary_repr(x, width=n_t)[::-1]\n lam = int(x_bin_rev,2)/(2**n_t)\n if lam < C:\n alpha.append(0)\n elif lam >= C:\n alpha.append(2*np.arcsin(C/lam))\n theta = ucr.alpha2theta(alpha)\n \n # do inversion step\n\n qc_invrot = ucr.uniformly_controlled_rot(n_t, theta)\n qc.append(qc_invrot, qr_t[0:len(qr_t)] + [qr_a[0]])\n \n if INVROT_ == None:\n INVROT_ = qc_invrot\n \n # and measure ancilla\n \n qc.measure(qr_a[0], cr_a[0])\n qc.reset(qr_a[0])\n\n qc.barrier()\n \n ''' Perform Quantum Fourier Transform on clock qubits '''\n \n #qc = QFT(qc, qr_t)\n \n qc_qft = qft_gate(n_t, method=2)\n qc.append(qc_qft, qr_t)\n\n if QFT_ == None:\n QFT_ = qc_qft\n \n qc.barrier()\n \n ''' Perform Inverse Quantum Phase Estimation on input (b), clock, and ancilla '''\n \n # uncompute phase estimation\n # perform controlled e^(-i*A*t)\n for q in reversed(range(n_t)):\n control = qr_t[q]\n phase = (2*pi)*2**q \n qc_ui = shs.control_Ham_sim(n, A, phase)\n if phase <= 0:\n qc_ui.name = \"e^{\" + str(q) + \"iAt}\"\n else:\n qc_ui.name = \"e^{-\" + str(q) + \"iAt}\"\n if UI_ == None:\n UI_ = qc_ui\n qc.append(qc_ui, qr[0:len(qr)] + qr_b[0:len(qr_b)] + [control] + [anc])\n\n qc.barrier()\n \n # Hadamard (again) the phase estimation register - the 'clock'\n for q in range(n_t):\n qc.h(qr_t[q])\n \n qc.barrier()\n \n ''' Perform final measurements '''\n \n # measure ancilla and main register\n qc.measure(qr[0:], cr[0:])\n\n if QC_ == None:\n QC_ = qc\n #print(f\"... made circuit = \\n{QC_}\")\n\n return qc" ]
[ "0.6446214", "0.6210634", "0.620553", "0.61128324", "0.60487795", "0.5991503", "0.59252745", "0.5920399", "0.5836899", "0.58049667", "0.58037627", "0.5781532", "0.57572883", "0.5695401", "0.56880194", "0.5616026", "0.5606468", "0.5605433", "0.5605308", "0.5558584", "0.55467737", "0.5527012", "0.55148256", "0.5510415", "0.5501628", "0.5485597", "0.5454796", "0.5451477", "0.5439855", "0.54032683" ]
0.64617664
0
Calculate the edge operator B_i. The definitions used here are
def edge_operator_b(edge_matrix_indices: numpy.ndarray, i: int) -> QubitOperator: B_i = QubitOperator() qubit_position_matrix = numpy.array(numpy.where(edge_matrix_indices == i)) qubit_position = qubit_position_matrix[1][:] qubit_position = numpy.sort(qubit_position) operator = tuple() for d1 in qubit_position: operator += ((int(d1), 'Z'),) B_i += QubitOperator(operator) return B_i
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bksf_edge_op_bi(self):\n edge_matrix = np.triu(np.ones((4, 4)))\n edge_list = np.array(np.nonzero(np.triu(edge_matrix) - np.diag(np.diag(edge_matrix))))\n qterm_b0 = _edge_operator_bi(edge_list, 0)\n qterm_b1 = _edge_operator_bi(edge_list, 1)\n qterm_b2 = _edge_operator_bi(edge_list, 2)\n qterm_b3 = _edge_operator_bi(edge_list, 3)\n\n ref_qterm_b0 = SparsePauliOp(\"IIIZZZ\")\n ref_qterm_b1 = SparsePauliOp(\"IZZIIZ\")\n ref_qterm_b2 = SparsePauliOp(\"ZIZIZI\")\n ref_qterm_b3 = SparsePauliOp(\"ZZIZII\")\n\n with self.subTest(\"Test edge operator b0\"):\n self.assertEqual(qterm_b0, ref_qterm_b0)\n with self.subTest(\"Test edge operator b1\"):\n self.assertEqual(qterm_b1, ref_qterm_b1)\n with self.subTest(\"Test edge operator b2\"):\n self.assertEqual(qterm_b2, ref_qterm_b2)\n with self.subTest(\"Test edge operator b3\"):\n self.assertEqual(qterm_b3, ref_qterm_b3)", "def test_bksf_edge_op_aij(self):\n edge_matrix = np.triu(np.ones((4, 4)))\n edge_list = np.array(np.nonzero(np.triu(edge_matrix) - np.diag(np.diag(edge_matrix))))\n qterm_a01 = _edge_operator_aij(edge_list, 0, 1)\n qterm_a02 = _edge_operator_aij(edge_list, 0, 2)\n qterm_a03 = _edge_operator_aij(edge_list, 0, 3)\n qterm_a12 = _edge_operator_aij(edge_list, 1, 2)\n qterm_a13 = _edge_operator_aij(edge_list, 1, 3)\n qterm_a23 = _edge_operator_aij(edge_list, 2, 3)\n\n ref_qterm_a01 = SparsePauliOp(\"IIIIIX\")\n ref_qterm_a02 = SparsePauliOp(\"IIIIXZ\")\n ref_qterm_a03 = SparsePauliOp(\"IIIXZZ\")\n ref_qterm_a12 = SparsePauliOp(\"IIXIZZ\")\n ref_qterm_a13 = SparsePauliOp(\"IXZZIZ\")\n ref_qterm_a23 = SparsePauliOp(\"XZZZZI\")\n\n with self.subTest(\"Test edge operator a01\"):\n self.assertEqual(qterm_a01, ref_qterm_a01)\n with self.subTest(\"Test edge operator a02\"):\n self.assertEqual(qterm_a02, ref_qterm_a02)\n with self.subTest(\"Test edge operator a03\"):\n self.assertEqual(qterm_a03, ref_qterm_a03)\n with self.subTest(\"Test edge operator a12\"):\n self.assertEqual(qterm_a12, ref_qterm_a12)\n with self.subTest(\"Test edge operator a13\"):\n self.assertEqual(qterm_a13, ref_qterm_a13)\n with self.subTest(\"Test edge operator a23\"):\n self.assertEqual(qterm_a23, ref_qterm_a23)", "def basis_message_func(self, edges):\r\n if self.num_bases < self.num_rels:\r\n # generate all weights from bases\r\n # 压缩维度\r\n weight = self.weight.view(self.num_bases,\r\n self.in_feat * self.out_feat)\r\n # 矩阵相乘,回归维度,上一步也所维度为了矩阵点乘,下一步再重新返回,得到self.num_rels*self.in_feat*self.out_feat张量\r\n weight = th.matmul(self.w_comp, weight).view(\r\n self.num_rels, self.in_feat, self.out_feat)\r\n else:\r\n # 接__init__函数中的判断语句,如果base比rel多,则不进行basis.\r\n weight = self.weight\r\n\r\n # 根据edges.data['type']切割张量weight\r\n # 根据参数weight和edges.data['type']先选择相应的relation下的矩阵\r\n # 对于edges.src['h'],在第2个维度上增加一个维度,使原来的n*din变为n*1*din\r\n # 运用bmm算法,将edges.src['h']与weight相乘,得到n*1*dout维张量,最后去掉一维,得到n*dout维输出\r\n msg = utils.bmm_maybe_select(edges.src['h'], weight, edges.data['type'].long())\r\n if 'norm' in edges.data:\r\n if self.norm == \"n\":\r\n msg = msg * edges.data['norm']\r\n elif self.norm == \"n2\":\r\n normm = th.pow(edges.data['norm'], 2)\r\n msg = msg * normm\r\n elif self.norm == \"sqrt\":\r\n normm = th.sqrt(edges.data['norm'])\r\n msg = msg * normm\r\n elif self.norm == \"clamp\":\r\n normm = th.clamp(edges.data['norm'], min=0.05)\r\n msg = msg * normm\r\n return {'msg': msg}", "def compute_edge_logits(self):", "def edge_model(label1, label2):\n if label1 == label2:\n return ALPHA\n else:\n return 1-ALPHA", "def train_op_b(self):\r\n return self._train_op_b", "def get_b(self):\n return ((self.b_plus_bstar / self.n_pos) + (self.b_minus_bstar / self.n_neg)) / 2", "def get_bias(self):", "def calculateMetallicityBinEdges(self):\n\n if self.binInLogSpace:\n logMetallicities = np.log10(self.metallicityGrid)\n b= logMetallicities[:-1] + (logMetallicities[1:] - logMetallicities[:-1])/2.\n b = 10.**b #the boundaries for integration are not in log space so\n #convert to \"normal\" numbers.\n else:\n b= (self.metallicityGrid[1:] - self.metallicityGrid[:-1])/2. \\\n + self.metallicityGrid[:-1] \n\n self.metallicityBinEdges = np.zeros(len(b)+2)\n\n #the lowest/highest metallicity bin edge are set in options\n #the calculated b edges are all in between\n\n self.metallicityBinEdges[0] = self.metallicityLowerLimit\n self.metallicityBinEdges[-1] = self.metallicityUpperLimit\n self.metallicityBinEdges[1:-1] = b", "def edge_operator_aij(edge_matrix_indices: numpy.ndarray, i: int,\n j: int) -> QubitOperator:\n a_ij = QubitOperator()\n operator = tuple()\n position_ij = -1\n qubit_position_i = numpy.array(numpy.where(edge_matrix_indices == i))\n for edge_index in range(numpy.size(edge_matrix_indices[0, :])):\n if set((i, j)) == set(edge_matrix_indices[:, edge_index]):\n position_ij = edge_index\n operator += ((int(position_ij), 'X'),)\n\n for edge_index in range(numpy.size(qubit_position_i[0, :])):\n if edge_matrix_indices[int(not (qubit_position_i[0, edge_index]))][\n qubit_position_i[1, edge_index]] < j:\n operator += ((int(qubit_position_i[1, edge_index]), 'Z'),)\n qubit_position_j = numpy.array(numpy.where(edge_matrix_indices == j))\n for edge_index in range(numpy.size(qubit_position_j[0, :])):\n if edge_matrix_indices[int(not (qubit_position_j[0, edge_index]))][\n qubit_position_j[1, edge_index]] < i:\n operator += ((int(qubit_position_j[1, edge_index]), 'Z'),)\n a_ij += QubitOperator(operator, 1)\n if j < i:\n a_ij = -1 * a_ij\n return a_ij", "def GetBoundaryEdgesPent(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n node_arranger = np.array([\n [0,1],\n [1,2],\n [2,3],\n [3,4],\n [4,0],\n ])\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]],\n self.elements[:,node_arranger[4,:]]),axis=0).astype(np.uint64)\n\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges", "def _two_body(edge_matrix_indices: numpy.ndarray, p: int, q: int, r: int,\n s: int) -> QubitOperator:\n # Initialize qubit operator.\n qubit_operator = QubitOperator()\n\n # Handle case of four unique indices.\n if len(set([p, q, r, s])) == 4:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_r = edge_operator_b(edge_matrix_indices, r)\n B_s = edge_operator_b(edge_matrix_indices, s)\n A_pq = edge_operator_aij(edge_matrix_indices, p, q)\n A_rs = edge_operator_aij(edge_matrix_indices, r, s)\n qubit_operator += 1 / 8. * A_pq * A_rs * (-QubitOperator(\n ()) - B_p * B_q + B_p * B_r + B_p * B_s + B_q * B_r + B_q * B_s -\n B_r * B_s +\n B_p * B_q * B_r * B_s)\n return qubit_operator\n\n # Handle case of three unique indices.\n elif len(set([p, q, r, s])) == 3:\n # Identify equal tensor factors.\n if p == r:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_s = edge_operator_b(edge_matrix_indices, s)\n A_qs = edge_operator_aij(edge_matrix_indices, q, s)\n qubit_operator += 1j * (A_qs * B_s + B_q * A_qs) * (QubitOperator(\n ()) - B_p) / 4.\n\n elif p == s:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_r = edge_operator_b(edge_matrix_indices, r)\n A_qr = edge_operator_aij(edge_matrix_indices, q, r)\n qubit_operator += -1j * (A_qr * B_r + B_q * A_qr) * (QubitOperator(\n ()) - B_p) / 4.\n\n elif q == r:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_s = edge_operator_b(edge_matrix_indices, s)\n A_ps = edge_operator_aij(edge_matrix_indices, p, s)\n qubit_operator += -1j * (A_ps * B_s + B_p * A_ps) * (QubitOperator(\n ()) - B_q) / 4.\n\n elif q == s:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n B_r = edge_operator_b(edge_matrix_indices, r)\n A_pr = edge_operator_aij(edge_matrix_indices, p, r)\n qubit_operator += 1j * (A_pr * B_r + B_p * A_pr) * (QubitOperator(\n ()) - B_q) / 4.\n\n # Handle case of two unique indices.\n elif len(set([p, q, r, s])) == 2:\n # Get coefficient.\n if p == s:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n qubit_operator += (QubitOperator(()) - B_p) * (QubitOperator(\n ()) - B_q) / 4.\n\n else:\n B_p = edge_operator_b(edge_matrix_indices, p)\n B_q = edge_operator_b(edge_matrix_indices, q)\n qubit_operator += -(QubitOperator(()) - B_p) * (QubitOperator(\n ()) - B_q) / 4.\n\n return qubit_operator", "def build_I(self,wedge):\n\n list_k = wedge.list_k\n list_kq = list_k + self.q_vector\n\n fk = function_fk(list_k)\n eps_k = function_epsilon_k(list_k)\n\n list_epsilon_k = [-eps_k, eps_k]\n\n fkq = function_fk(list_kq)\n eps_kq = function_epsilon_k(list_kq)\n\n list_epsilon_kq = [-eps_kq, eps_kq]\n\n list_Fk = []\n list_Fkq = []\n for epsilon_k, epsilon_kq in zip(list_epsilon_k,list_epsilon_kq):\n\n list_Fk.append(function_fermi_occupation(epsilon_k,self.mu,self.beta))\n list_Fkq.append(function_fermi_occupation(epsilon_kq,self.mu,self.beta))\n\n\n for n1, e1, f1 in zip([0,1],list_epsilon_k, list_Fk):\n\n for n3, e3, f3 in zip([0,1],list_epsilon_kq, list_Fkq):\n den13 = self.cutoff_denominator(e1-e3)\n\n for n2, e2, f2 in zip([0,1],list_epsilon_k, list_Fk):\n\n for i, get_gamma in zip([0,1], [get_gamma1, get_gamma2]):\n\n g1 = get_gamma(e1)\n g3 = get_gamma(e3)\n\n key = (i, n1,n2,n3)\n index = self.index_dictionary[key]\n\n freq12 = self.get_frequency_term(e1-e2)\n\n freq32 = self.get_frequency_term(e3-e2)\n\n fac12 = (f1-f2)*g1\n fac32 = (f3-f2)*g3\n\n numerator = fac12[:,N.newaxis]*freq12 -fac32[:,N.newaxis]*freq32\n\n self.I[index,:,:] = self.conversion_factor*den13[:,N.newaxis]*numerator \n\n return", "def format_bi(op):\n line = '.BI '\n if op['short_op'] is not None:\n line += '-' + op['short_op'] + '/'\n line += '--' + op['long_op']\n if op['has_arg'] != 'NULL':\n if op['has_arg'] == 'no_argument':\n markers = ['', '']\n elif op['has_arg'] == 'required_argument':\n markers = ['<', '>']\n elif op['has_arg'] == 'optional_argument':\n markers = ['[', ']']\n else:\n markers = ['', '']\n\n line += ' \" {}{}{}\"'.format(\n markers[0],\n op['long_op'].replace('-', '_'),\n markers[1],\n )\n return line", "def edges(image):\n #store image width and height and initialize new image\n image_width = image['width']\n image_height = image['height']\n new_image = {'height': image['height'], 'width': image['width'], 'pixels': len(image['pixels'])*[0]}\n \n #sobel operator kernels\n kernel_x = {'height': 3, 'width': 3, 'pixels': [-1,0,1,-2,0,2,-1,0,1]}\n kernel_y = {'height': 3, 'width': 3, 'pixels': [-1,-2,-1,0,0,0,1,2,1]}\n \n #creating the filters\n o_x = correlate(image, kernel_x)\n o_y = correlate(image, kernel_y)\n\n #perform relvant calculation for each pixel \n for x in range(image_width):\n for y in range(image_height):\n a = ((get_pixel(o_x, x, y))**2+(get_pixel(o_y, x, y))**2)**0.5\n set_pixel(new_image, x, y, a)\n return round_and_clip_image(new_image)", "def convert_elementwise_op(g, op, block):\n\n op_map = {\n \"elementwise_div\": \"divide\",\n \"elementwise_add\": \"add\",\n \"elementwise_mul\": \"multiply\",\n \"elementwise_sub\": \"subtract\",\n \"elementwise_mod\": \"mod\",\n \"elementwise_max\": \"maximum\",\n \"elementwise_min\": \"minimum\",\n \"elementwise_pow\": \"power\",\n \"elementwise_floordiv\": \"floor_divide\",\n \"equal\": \"equal\",\n \"greater_equal\": \"greater_equal\",\n \"greater_than\": \"greater\",\n \"less_equal\": \"less_equal\",\n \"less_than\": \"less\",\n \"not_equal\": \"not_equal\",\n }\n op_func = op_map[op.type]\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n ipt0_shape = infer_shape(ipt0)\n ipt1_shape = infer_shape(ipt1)\n axis = op.attr(\"axis\")\n if len(ipt0_shape) != len(ipt1_shape):\n if axis < 0:\n axis = axis + len(ipt0_shape)\n if axis != len(ipt0_shape) - 1:\n ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))\n op_func = get_relay_op(op_func)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)", "def associativity(ob):\n return 0", "def to_block_op(mfie, efie, k, mu):\n A = assembly.BlockedOperator(2, 2) # empty operator object\n A[0,0] = mfie\n A[0,1] = mu/k * efie\n A[1,0] = -k/mu * efie\n A[1,1] = mfie\n return A", "def number_operator(iop, mode_number=None):\n n_qubit = iop.n_qubits\n num_operator = QubitOperator()\n edge_matrix = bravyi_kitaev_fast_edge_matrix(iop)\n edge_matrix_indices = numpy.array(\n numpy.nonzero(\n numpy.triu(edge_matrix) - numpy.diag(numpy.diag(edge_matrix))))\n if mode_number is None:\n for i in range(n_qubit):\n num_operator += (QubitOperator(\n ()) - edge_operator_b(edge_matrix_indices, i)) / 2.\n\n else:\n num_operator += (QubitOperator(\n ()) - edge_operator_b(edge_matrix_indices, mode_number)) / 2.\n\n return num_operator", "def operator_rhs(self, inp):\n assert self.operator is not None, \\\n \"Please set an operator with the set_operation method\"\n\n return self.operator.adjoint(inp)", "def sigb(o) :\n return o * (1 - o)", "def cell_edges(self):", "def __add_boundary_contrib_inv_operator(self, bc, b_idx):\n if bc is not None:\n if bc.boundary_condition_type is configuration.BoundaryConditionType.DIRICHLET:\n fe_op.apply_pseudo_elimination(self.inv_operator, b_idx)\n elif bc.boundary_condition_type is configuration.BoundaryConditionType.ROBIN:\n fe_op.add_value(self.inv_operator, 0.5 * (self.timestep ** 2) * bc.param, b_idx, b_idx)\n elif bc.boundary_condition_type is configuration.BoundaryConditionType.ABSORBING:\n fe_op.add_value(self.inv_operator, 0.5 * self.timestep * bc.param, b_idx, b_idx)", "def to_instruction(self):\n return self.to_circuit().to_gate()", "def test_create_edge(self):\n n1, n2 = Node('a'), Node('b')\n n1 | n2\n self.assertEqual(n1.eout, [Edge(n1, n2)])\n self.assertEqual(n1.ein, [])\n self.assertEqual(n2.ein, [Edge(n1, n2)])\n self.assertEqual(n2.eout, [])", "def bravyi_kitaev_fast_interaction_op(iop: InteractionOperator\n ) -> QubitOperator:\n n_qubits = count_qubits(iop)\n\n # Initialize qubit operator as constant.\n qubit_operator = QubitOperator((), iop.constant)\n edge_matrix = bravyi_kitaev_fast_edge_matrix(iop)\n edge_matrix_indices = numpy.array(\n numpy.nonzero(\n numpy.triu(edge_matrix) - numpy.diag(numpy.diag(edge_matrix))))\n # Loop through all indices.\n for p in range(n_qubits):\n for q in range(n_qubits):\n # Handle one-body terms.\n coefficient = complex(iop[(p, 1), (q, 0)])\n if coefficient and p >= q:\n qubit_operator += (coefficient *\n _one_body(edge_matrix_indices, p, q))\n\n # Keep looping for the two-body terms.\n for r in range(n_qubits):\n for s in range(n_qubits):\n coefficient = complex(iop[(p, 1), (q, 1), (r, 0), (s, 0)])\n\n # Skip zero terms.\n if (not coefficient) or (p == q) or (r == s):\n # coverage: ignore\n continue\n\n # Identify and skip one of the complex conjugates.\n if [p, q, r, s] != [s, r, q, p]:\n if len(set([p, q, r, s])) == 4:\n if min(r, s) < min(p, q):\n continue\n # Handle case of 3 unique indices\n elif len(set([p, q, r, s])) == 3:\n transformed_term = _two_body(\n edge_matrix_indices, p, q, r, s)\n transformed_term *= .5 * coefficient\n qubit_operator += transformed_term\n continue\n elif p != r and q < p:\n # TODO: remove pragma if reachable continue\n continue # pragma: no cover\n\n # Handle the two-body terms.\n transformed_term = _two_body(edge_matrix_indices, p, q, r,\n s)\n transformed_term *= coefficient\n qubit_operator += transformed_term\n return qubit_operator", "def find_B(self):\n max_lb = 0\n for arc in self.arcs():\n lb = self.arc_info[arc[0]]['lower_bound']\n max_lb = max(max_lb, lb)\n n = len(self)\n m = len(list(self.edges()))\n return((m - n + 2)*max_lb)", "def gen_binop(self, expr: expressions.BinaryOperator):\n if expr.op in [\"*\", \"/\", \"%\", \"^\", \"|\", \"&\", \">>\", \"<<\"]:\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n op = expr.op\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, op, rhs, ir_typ)\n elif expr.op == \",\":\n # Handle the comma operator by returning the second result\n self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n value = rhs\n elif expr.op == \"+\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n # left and right are swapped in semantics if right is pointer.\n if expr.a.typ.is_pointer:\n assert expr.b.typ.is_integer\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n\n ir_typ = self.get_ir_type(expr.typ)\n value = self.builder.emit_binop(lhs, \"+\", rhs, ir_typ)\n elif expr.op == \"-\":\n # Pay attention to pointer arithmetics!\n lhs = self.gen_expr(expr.a, rvalue=True)\n rhs = self.gen_expr(expr.b, rvalue=True)\n ir_typ = self.get_ir_type(expr.typ)\n if expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if expr.b.typ.is_pointer:\n # pointer - pointer\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir.ptr)\n value = self.emit(ir.Cast(value, \"typecast\", ir_typ))\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", ir_typ))\n value = self.emit(\n ir.Binop(value, \"/\", esize, \"rhs\", ir_typ)\n )\n else:\n # pointer - numeric\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n rhs = self.builder.emit_cast(rhs, ir.ptr)\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n else:\n # numeric - numeric\n value = self.builder.emit_binop(lhs, \"-\", rhs, ir_typ)\n\n elif expr.op in [\"<\", \">\", \"==\", \"!=\", \"<=\", \">=\", \"||\", \"&&\"]:\n value = self.gen_condition_to_integer(expr)\n elif expr.op in [\n \"=\",\n \"+=\",\n \"-=\",\n \"*=\",\n \"%=\",\n \"/=\",\n \">>=\",\n \"<<=\",\n \"&=\",\n \"|=\",\n \"~=\",\n \"^=\",\n ]:\n # Handle struct assignment special case:\n if expr.op == \"=\" and expr.a.typ.is_struct:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=False)\n amount = self.sizeof(expr.a.typ)\n self.gen_copy_struct(lhs, rhs, amount)\n value = None\n else:\n lhs = self.gen_expr(expr.a, rvalue=False)\n rhs = self.gen_expr(expr.b, rvalue=True)\n\n if expr.op == \"=\":\n value = rhs\n else:\n # Handle '+=' and friends:\n op = expr.op[:-1]\n ir_typ = self.get_ir_type(expr.typ)\n loaded = self._load_value(lhs, expr.typ)\n\n # pointer arithmatic:\n if op in [\"+\", \"-\"] and expr.a.typ.is_pointer:\n esize = self.sizeof(expr.a.typ.element_type)\n assert esize > 0\n if esize != 1:\n esize = self.emit(ir.Const(esize, \"esize\", rhs.ty))\n rhs = self.builder.emit_mul(rhs, esize, rhs.ty)\n\n value = self.builder.emit_binop(loaded, op, rhs, ir_typ)\n self._store_value(value, lhs)\n else: # pragma: no cover\n raise NotImplementedError(str(expr.op))\n return value", "def get_invntt_operator(self):\n\n\n Operator = []\n invntt_qubic = self.qubic.get_invntt_operator()\n R_qubic = ReshapeOperator(invntt_qubic.shapeout, invntt_qubic.shape[0])\n Operator.append(R_qubic(invntt_qubic(R_qubic.T)))\n\n invntt_planck = self.planck.get_invntt_operator()\n R_planck = ReshapeOperator(invntt_planck.shapeout, invntt_planck.shape[0])\n Operator.append(R_planck(invntt_planck(R_planck.T)))\n\n return BlockDiagonalOperator(Operator, axisout=0)", "def GetBoundaryEdgesTri(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.edges,np.ndarray):\n if self.edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.edges.shape[1] == 2 and p > 1:\n pass\n else:\n return\n\n\n node_arranger = NodeArrangementTri(p-1)[0]\n\n # CONCATENATE ALL THE EDGES MADE FROM ELEMENTS\n all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]]),axis=0)\n # GET UNIQUE ROWS\n uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)\n\n # ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES\n freqs_inv = itemfreq(inv)\n edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]\n # NOT ARRANGED\n self.edges = uniques[edges_ext_flags,:]\n\n # DETERMINE WHICH FACE OF THE ELEMENT THEY ARE\n boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)\n\n # FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR\n # IN ELEMENT CONNECTIVITY\n # THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES\n all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)\n all_edges_in_edges = np.where(all_edges_in_edges==True)[0]\n\n boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]\n boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]\n\n # ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS\n self.edges = self.elements[boundary_edge_to_element[:,0][:,None],node_arranger[boundary_edge_to_element[:,1],:]]\n self.edges = self.edges.astype(np.uint64)\n self.boundary_edge_to_element = boundary_edge_to_element\n\n return self.edges" ]
[ "0.7364842", "0.6461306", "0.59544754", "0.5885427", "0.567442", "0.5663807", "0.56600136", "0.563404", "0.5605291", "0.5598879", "0.55930614", "0.5551441", "0.55461", "0.5524313", "0.54962194", "0.5494069", "0.5489764", "0.54446363", "0.5434897", "0.54333586", "0.53780913", "0.53575486", "0.53518665", "0.53371364", "0.53331316", "0.53309727", "0.5319853", "0.53185904", "0.53099287", "0.5304907" ]
0.679432
1
Calculate the edge operator A_ij. The definitions used here are
def edge_operator_aij(edge_matrix_indices: numpy.ndarray, i: int, j: int) -> QubitOperator: a_ij = QubitOperator() operator = tuple() position_ij = -1 qubit_position_i = numpy.array(numpy.where(edge_matrix_indices == i)) for edge_index in range(numpy.size(edge_matrix_indices[0, :])): if set((i, j)) == set(edge_matrix_indices[:, edge_index]): position_ij = edge_index operator += ((int(position_ij), 'X'),) for edge_index in range(numpy.size(qubit_position_i[0, :])): if edge_matrix_indices[int(not (qubit_position_i[0, edge_index]))][ qubit_position_i[1, edge_index]] < j: operator += ((int(qubit_position_i[1, edge_index]), 'Z'),) qubit_position_j = numpy.array(numpy.where(edge_matrix_indices == j)) for edge_index in range(numpy.size(qubit_position_j[0, :])): if edge_matrix_indices[int(not (qubit_position_j[0, edge_index]))][ qubit_position_j[1, edge_index]] < i: operator += ((int(qubit_position_j[1, edge_index]), 'Z'),) a_ij += QubitOperator(operator, 1) if j < i: a_ij = -1 * a_ij return a_ij
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bksf_edge_op_aij(self):\n edge_matrix = np.triu(np.ones((4, 4)))\n edge_list = np.array(np.nonzero(np.triu(edge_matrix) - np.diag(np.diag(edge_matrix))))\n qterm_a01 = _edge_operator_aij(edge_list, 0, 1)\n qterm_a02 = _edge_operator_aij(edge_list, 0, 2)\n qterm_a03 = _edge_operator_aij(edge_list, 0, 3)\n qterm_a12 = _edge_operator_aij(edge_list, 1, 2)\n qterm_a13 = _edge_operator_aij(edge_list, 1, 3)\n qterm_a23 = _edge_operator_aij(edge_list, 2, 3)\n\n ref_qterm_a01 = SparsePauliOp(\"IIIIIX\")\n ref_qterm_a02 = SparsePauliOp(\"IIIIXZ\")\n ref_qterm_a03 = SparsePauliOp(\"IIIXZZ\")\n ref_qterm_a12 = SparsePauliOp(\"IIXIZZ\")\n ref_qterm_a13 = SparsePauliOp(\"IXZZIZ\")\n ref_qterm_a23 = SparsePauliOp(\"XZZZZI\")\n\n with self.subTest(\"Test edge operator a01\"):\n self.assertEqual(qterm_a01, ref_qterm_a01)\n with self.subTest(\"Test edge operator a02\"):\n self.assertEqual(qterm_a02, ref_qterm_a02)\n with self.subTest(\"Test edge operator a03\"):\n self.assertEqual(qterm_a03, ref_qterm_a03)\n with self.subTest(\"Test edge operator a12\"):\n self.assertEqual(qterm_a12, ref_qterm_a12)\n with self.subTest(\"Test edge operator a13\"):\n self.assertEqual(qterm_a13, ref_qterm_a13)\n with self.subTest(\"Test edge operator a23\"):\n self.assertEqual(qterm_a23, ref_qterm_a23)", "def adjacency(self):\n if self.E > 0:\n i = self.edges[:, 0]\n j = self.edges[:, 1]\n adj = coo_matrix((np.ones(self.E), (i, j)),\n shape=(self.V, self.V))\n else:\n adj = coo_matrix((self.V, self.V))\n return adj", "def ij(ij, pol, ant) :\n s.ij(pol, ij, ant)", "def graph_iaa(adj_matrix1, shortest_path_dist1, adj_matrix2, shortest_path_dist2, mode):\n n_nodes = len(adj_matrix1)\n\n n_edges_1 = np.count_nonzero(adj_matrix1)\n n_edges_2 = np.count_nonzero(adj_matrix2)\n sum_of_inverse_1 = 0.0\n sum_of_inverse_2 = 0.0\n for i in range(n_nodes):\n for j in range(n_nodes):\n if adj_matrix1[i][j] != NO_REL_SYMBOL:\n sum_of_inverse_1 += 1.0 / shortest_path_dist2[i][j]\n if adj_matrix2[i][j] != NO_REL_SYMBOL:\n sum_of_inverse_2 += 1.0 / shortest_path_dist1[i][j]\n sum_of_inverse_1 /= float(n_edges_1)\n sum_of_inverse_2 /= float(n_edges_2)\n\n if mode == \"avg\":\n return (sum_of_inverse_1 + sum_of_inverse_2) / 2.0\n elif mode==\"f1\":\n return (2.0 * sum_of_inverse_1 * sum_of_inverse_2) / (sum_of_inverse_1 + sum_of_inverse_2)\n else:\n return None", "def associativity(ob):\n return 0", "def ras2ijk(self,A):\n #productive #math #coordinate-space-conversion\n profprint()\n m=vtk.vtkMatrix4x4()\n volumeNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\n volumeNode.GetIJKToRASMatrix(m)\n m.Invert()\n imageData = volumeNode.GetImageData()\n ijk=[0,0,0]\n k = vtk.vtkMatrix4x4()\n o = vtk.vtkMatrix4x4()\n k.SetElement(0,3,A[0])\n k.SetElement(1,3,A[1])\n k.SetElement(2,3,A[2])\n k.Multiply4x4(m,k,o)\n ijk[0] = o.GetElement(0,3)\n ijk[1] = o.GetElement(1,3)\n ijk[2] = o.GetElement(2,3)\n return ijk", "def adjacency(self,kind='e'):\n inv = self.inverse()\n if kind == 'e':\n adj = inv[self].reshape((self.nelems(),-1))\n elif kind == 'n':\n adj = concatenate([where(inv>=0,self[:,i][inv],inv) for i in range(self.nplex())],axis=1)\n else:\n raise ValueError,\"kind should be 'e' or 'n', got %s\" % str(kind) \n return reduceAdjacency(adj)", "def compute_edge_logits(self):", "def E_ab(a, b, mesh, edge, width, height):\n # Get the UV coordinates of the edge pair, swaping endpoints of one edge\n uv0, uv1 = [mesh.vt[mesh.f[edge[0]][i].vt] for i in edge[1]]\n x0, x1 = [\n numpy.array(mesh.vc[mesh.f[edge[0]][i].v]).reshape(1, -1)\n for i in edge[1]\n ]\n\n # Determine the midpoint of the interval in UV-space\n mid_uv = lerp_UV((a + b) / 2., uv0, uv1)\n\n # Determine surrounding pixel indices\n (p00, p10, p01, p11) = surrounding_pixels(\n mid_uv, width, height, as_index=True)\n\n nPixels = width * height\n\n luv0, luv1 = globalEdge_to_local(uv0, uv1, p00, width, height)\n\n # Compute the coefficient matrix for the interval\n (A, B, C) = bilerp_coeffMats(luv0, luv1, 0, 1, 2, 3, 4)\n\n # Each of the A, Ap, B, Bp, C, Cp are 1xN matrices.\n # Q is Nx1 * 1xN = NxN\n def term(M, n):\n \"\"\"\n Compute the integral term with constant matrix (M) and power n after\n integration.\n \"\"\"\n M *= (1. / n * (b**n - a**n)) # Prevent unnecessary copying\n return M\n\n # Product of cooefficents (4x4)\n AA = A.T.dot(A)\n AB = A.T.dot(B)\n AC = A.T.dot(C)\n BB = B.T.dot(B)\n BC = B.T.dot(C)\n CC = C.T.dot(C)\n\n values = (term(AA, 5.) + term(AB + AB.T, 4.) + term(AC + AC.T + BB, 3.)\n + term(BC + BC.T, 2.) + term(CC, 1.))\n\n ijs = numpy.array(list(itertools.product((p00, p10, p01, p11), repeat=2)))\n\n Q = scipy.sparse.coo_matrix(\n (values.ravel(), ijs.reshape(-1, 2).T), shape=(nPixels, nPixels))\n\n # Difference in endpoints\n x1_x0 = x1 - x0\n\n # A, B, C are 1xN and x0, x1 are 1xD\n # L is Nx1 * 1xD = NxD\n values = (term(A.T.dot(x1_x0), 4.0)\n + term(A.T.dot(x0) + B.T.dot(x1_x0), 3.0)\n + term(B.T.dot(x0) + C.T.dot(x1_x0), 2.0)\n + term(C.T.dot(x0), 1.0))\n\n ijs = numpy.array(list(itertools.product(\n (p00, p10, p01, p11), range(x0.shape[1]))))\n\n L = scipy.sparse.coo_matrix(\n (values.ravel(), ijs.reshape(-1, 2).T), shape=(nPixels, x0.shape[1]))\n\n # x0, x1 are 1xD\n # C is Dx1 * 1xD = DxD\n x1_x0x0 = x1_x0.T.dot(x0)\n\n C = (term(x1_x0.T.dot(x1_x0), 3.0) + term(x1_x0x0 + x1_x0x0.T, 2.0)\n + term(x0.T.dot(x0), 1.0))\n\n return Q, L, C", "def ras2ijk(self, A):\r\n # productive #math #coordinate-space-conversion #frequent\r\n if frequent: profprint()\r\n m = vtk.vtkMatrix4x4()\r\n volumeNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\r\n volumeNode.GetIJKToRASMatrix(m)\r\n m.Invert()\r\n imageData = volumeNode.GetImageData()\r\n ijk = [0, 0, 0]\r\n k = vtk.vtkMatrix4x4()\r\n o = vtk.vtkMatrix4x4()\r\n k.SetElement(0, 3, A[0])\r\n k.SetElement(1, 3, A[1])\r\n k.SetElement(2, 3, A[2])\r\n k.Multiply4x4(m, k, o)\r\n ijk[0] = o.GetElement(0, 3)\r\n ijk[1] = o.GetElement(1, 3)\r\n ijk[2] = o.GetElement(2, 3)\r\n return ijk", "def adjacency_matrix():\n file_path = PROJECT_PATH + \"/geographycal_data/adjacency_matrix/Howgrp.txt\"\n router = Router(adjacency_metrix=file_path)\n # router.write2vtk(router.graph, \"adjacency_matrix\")\n # nx.draw(router.graph)\n # plt.show()\n # adjacency matrix\n A = nx.adjacency_matrix(router.graph, weight=None).toarray()\n # ... and its spectrum\n nx.adjacency_spectrum(router.graph, weight=None)\n # weighted adjacency\n W = nx.adjacency_matrix(router.graph)\n # D\n I = np.reshape(np.ones(12), (-1, 1))\n D = np.matmul(A, I)\n # combinatorial graph Laplacian L = D - A\n L = nx.laplacian_matrix(router.graph, weight=None)\n # ... and his spectrum\n nx.laplacian_spectrum(router.graph, weight=None)\n # weighted Laplacian\n Y = nx.laplacian_matrix(router.graph)\n\n # Note\n sumD = np.matmul(I.transpose(), D)\n sumD = sumD[0][0]\n sumA = 0\n for row in np.nditer(A):\n for e in np.nditer(row):\n sumA += e\n\n # Fielder vector\n fiedler_vector = nx.fiedler_vector(router.graph, weight=None)\n\n # Matrix Double index Sum\n\n def D_app(F):\n return D * F\n\n def A_app(F):\n AF = np.zeros(len(F))\n for i, e_i in enumerate(F):\n for j, e_j in enumerate(F):\n if (A[i][j] != 0):\n AF[i] += F[j]\n return AF", "def edge_operator_b(edge_matrix_indices: numpy.ndarray,\n i: int) -> QubitOperator:\n B_i = QubitOperator()\n qubit_position_matrix = numpy.array(numpy.where(edge_matrix_indices == i))\n qubit_position = qubit_position_matrix[1][:]\n qubit_position = numpy.sort(qubit_position)\n operator = tuple()\n for d1 in qubit_position:\n operator += ((int(d1), 'Z'),)\n B_i += QubitOperator(operator)\n return B_i", "def adj_op(self, x):\n raise NotImplementedError(\"'adj_op' is an abstract method.\")", "def adj2edge(adj):\n adj = adj.tocoo().astype(np.float64)\n row = adj.row\n col = adj.col\n values = adj.data\n edge_weights = torch.Tensor(values)\n edge_index = torch.LongTensor([list(row),list(col)])\n return edge_index, edge_weights", "def diagonalize(operator):\n eig_values, eig_vecs = la.eigh(operator)\n # eig_values -= np.amin(eig_values)\n return eig_values, eig_vecs", "def adj(self):\n\t\tres = SquareMatrix(self._rows)\n\t\tfor i in range(self._rows):\n\t\t\tfor j in range(self._rows):\n\t\t\t\tres[i][j] = ((-1) ** (i + j)) * self.minor(j, i)\n\t\treturn res", "def E_edge(mesh, edge, width, height, edge_len):\n uv_edge = [mesh.vt[mesh.f[edge[0]][i].vt] for i in edge[1]]\n intervals = sorted(list(compute_edge_intervals(uv_edge, width, height)))\n\n N = width * height\n depth = len(mesh.vc[0])\n\n Q_edge = AccumulateCOO()\n L_edge = AccumulateCOO()\n C_edge = scipy.sparse.csc_matrix((depth, depth))\n\n # Solve for the energy coeff matrix over the edge pair\n for a, b in pairwise(intervals):\n # Add intervals energy to total Energy\n Q, L, C = E_ab(a, b, mesh, edge, width, height)\n Q_edge.add(Q)\n L_edge.add(L)\n C_edge += C\n\n Q_edge = Q_edge.total((N, N))\n L_edge = L_edge.total((N, depth))\n\n # Multiply by the length of the edge in 3D\n return edge_len * Q_edge, edge_len * L_edge, edge_len * C_edge", "def inv(self, Am):\r\n # Section 1: MAmke sure Am cAmn be inverted.\r\n self.check_squareness(Am)\r\n self.check_non_singular(Am)\r\n \r\n # Section 2: MAmke copies of Am & I, AmM & IM, to use for row ops\r\n n = len(Am)\r\n AmM = self.copy_matrix(Am)\r\n I = self.identity_matrix(n)\r\n IM = self.copy_matrix(I)\r\n \r\n # Section 3: Perform row operAmtions\r\n indices = list(range(n)) # to Amllow flexible row referencing ***\r\n for fd in range(n): # fd stAmnds for focus diAmgonAml\r\n fdScAmler = 1.0 / AmM[fd][fd]\r\n # FIRST: scAmle fd row with fd inverse. \r\n for j in range(n): # Use j to indicAmte column looping.\r\n AmM[fd][j] *= fdScAmler\r\n IM[fd][j] *= fdScAmler\r\n # SECOND: operAmte on Amll rows except fd row Ams follows:\r\n for i in indices[0:fd] + indices[fd+1:]: \r\n # *** skip row with fd in it.\r\n crScAmler = AmM[i][fd] # cr stAmnds for \"current row\".\r\n for j in range(n): \r\n # cr - crScAmler * fdRow, but one element Amt Am time.\r\n AmM[i][j] = AmM[i][j] - crScAmler * AmM[fd][j]\r\n IM[i][j] = IM[i][j] - crScAmler * IM[fd][j]\r\n \r\n return IM", "def anti_symmeterize(self):\n A = self.to_coo_matrix()\n symg = wgraph_from_adjacency((A - A.T) / 2)\n self.E = symg.E\n self.edges = symg.edges\n self.weights = symg.weights\n return self.E", "def build_I(self,wedge):\n\n list_k = wedge.list_k\n list_kq = list_k + self.q_vector\n\n fk = function_fk(list_k)\n eps_k = function_epsilon_k(list_k)\n\n list_epsilon_k = [-eps_k, eps_k]\n\n fkq = function_fk(list_kq)\n eps_kq = function_epsilon_k(list_kq)\n\n list_epsilon_kq = [-eps_kq, eps_kq]\n\n list_Fk = []\n list_Fkq = []\n for epsilon_k, epsilon_kq in zip(list_epsilon_k,list_epsilon_kq):\n\n list_Fk.append(function_fermi_occupation(epsilon_k,self.mu,self.beta))\n list_Fkq.append(function_fermi_occupation(epsilon_kq,self.mu,self.beta))\n\n\n for n1, e1, f1 in zip([0,1],list_epsilon_k, list_Fk):\n\n for n3, e3, f3 in zip([0,1],list_epsilon_kq, list_Fkq):\n den13 = self.cutoff_denominator(e1-e3)\n\n for n2, e2, f2 in zip([0,1],list_epsilon_k, list_Fk):\n\n for i, get_gamma in zip([0,1], [get_gamma1, get_gamma2]):\n\n g1 = get_gamma(e1)\n g3 = get_gamma(e3)\n\n key = (i, n1,n2,n3)\n index = self.index_dictionary[key]\n\n freq12 = self.get_frequency_term(e1-e2)\n\n freq32 = self.get_frequency_term(e3-e2)\n\n fac12 = (f1-f2)*g1\n fac32 = (f3-f2)*g3\n\n numerator = fac12[:,N.newaxis]*freq12 -fac32[:,N.newaxis]*freq32\n\n self.I[index,:,:] = self.conversion_factor*den13[:,N.newaxis]*numerator \n\n return", "def symmeterize(self):\n A = self.to_coo_matrix()\n symg = wgraph_from_adjacency((A + A.T) / 2)\n self.E = symg.E\n self.edges = symg.edges\n self.weights = symg.weights\n return self", "def compute_activation(self):\r\n\r\n x=0\r\n edges=self.in_edges\r\n for edge in edges:\r\n x+= edge.source.activation*edge.weight\r\n self.activation=1/(1+exp(-x))", "def adjacencyMatrix(R, edges):\n A = np.zeros((len(R),len(R)))\n for i in range(0, len(edges)):\n A[edges[i][0]][edges[i][1]] = 1\n return A", "def get_adj_matrix(self):\n # This is currently implemented for the case when there are only two edge types (edge and no-edge)\n assert self.Z_edges_logits.shape[1] == 2\n Z_edge_logits = self.Z_edges_logits.detach().cpu().numpy() # [num_edges, 2]\n prob = np.exp(Z_edge_logits) / np.sum(np.exp(Z_edge_logits), axis=-1, keepdims=True) # [num_edges, 2]\n adj_matrix = np.zeros((self.num_nodes, self.num_nodes))\n mask = np.ones((self.num_nodes, self.num_nodes), dtype=bool) & ~np.eye(self.num_nodes, dtype=bool)\n adj_matrix[mask] = prob[:, 1]\n return adj_matrix", "def _qij_plus(i: int, j: int):\n ia = i * 2 + 0\n ja = j * 2 + 0\n term = FermionOperator(((ja, 0), (ia, 0)), 1.0)\n return term", "def a_ij(s, p, i=1, j=1): # (Validated)\n from math import sqrt\n if i == j:\n return s.c[i]['a'] # Return pure paramater\n else: # find mixture aij i =/= j\n return (1 - p.m['k'][i][j]) * sqrt(s.c[i]['a'] * s.c[j]['a'])", "def illuminator_of_elfes():\n\n\t# Alpha - simplified by taking out the i by multiplying the outerproduct by 2i\n\talpha1i = np.matrix([[0, 0, 0, 2], [0, 0, 2, 0], [0, -2, 0, 0], [-2, 0, 0, 0]])\n\talpha2i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, 2], [0, 0, -2, 0]])\n\talpha3i = np.matrix([[0, 0, 2, 0], [0, 0, 0, -2], [-2, 0, 0, 0], [0, 2, 0, 0]])\n\n\t# Betas - simplified by taking out the i by multiplication of outerprod by 2i\n\tbeta1i = np.matrix([[0, 0, 0, 2], [0, 0, -2, 0], [0, 2, 0, 0], [-2, 0, 0, 0]])\n\tbeta2i = np.matrix([[0, 0, 2, 0], [0, 0, 0, 2], [-2, 0, 0, 0], [0, -2, 0, 0]])\n\tbeta3i = np.matrix([[0, 2, 0, 0], [-2, 0, 0, 0], [0, 0, 0, -2], [0, 0, 2, 0]])\n\n\t# print(\"alpha 1\")\n\t# print(alpha1i)\n\t# print(\"\")\n\t# print(\"alpha 2\")\n\t# print(alpha2i)\n\t# print(\"\")\n\t# print(\"alpha 3\")\n\t# print(alpha3i)\n\t# print(\"\")\n\t# print(\"beta 1\")\n\t# print(beta1i)\n\t# print(\"\")\n\t# print(\"beta 2\")\n\t# print(beta2i)\n\t# print(\"\")\n\t# print(\"beta 3\")\n\t# print(beta3i)\n\t# print(\"\")\n\n\t# abperm_comb = [ np.multiply(alpha1i,-1), np.multiply(alpha2i,-1), np.multiply(alpha3i,-1), np.multiply(beta1i,-1), np.multiply(beta2i,-1), np.multiply(beta3i,-1)]\n\n\tabperm_comb = [alpha1i, alpha2i, alpha3i, beta1i, beta2i, beta3i]\n\treturn abperm_comb", "def edge(cls, edge):\n return cls(Lnk.EDGE, int(edge))", "def internal_adjacency(self, node_list):\n # Create igraph Graph object describing the subgraph\n subgraph = self.graph.subgraph(node_list)\n # Get adjacency matrix\n return np.array(subgraph.get_adjacency(type=2).data).astype(np.int8)", "def witch_of_agnesi(nx=100, ny=100, a=4.0):\n xc = int(np.floor(nx / 2.0))\n yc = int(np.floor(ny / 2.0))\n X, Y = np.meshgrid(range(nx), range(ny))\n D = np.sqrt( (X-xc)**2 + (Y-yc)**2 )\n\n return (8.0 * a**3) / (D**2 + 4 * a**2)" ]
[ "0.645519", "0.62153924", "0.5992542", "0.58474886", "0.5774487", "0.56838363", "0.5668433", "0.566825", "0.56331223", "0.56207055", "0.5588716", "0.5560463", "0.55506945", "0.55056185", "0.5448718", "0.5444221", "0.54131556", "0.5394625", "0.538959", "0.5291665", "0.5259175", "0.52497804", "0.5242005", "0.5240655", "0.52395386", "0.5228881", "0.5222938", "0.5166887", "0.5151328", "0.5150723" ]
0.6783255
0
Find the qubit operator for the number operator in bravyi_kitaev_fast representation
def number_operator(iop, mode_number=None): n_qubit = iop.n_qubits num_operator = QubitOperator() edge_matrix = bravyi_kitaev_fast_edge_matrix(iop) edge_matrix_indices = numpy.array( numpy.nonzero( numpy.triu(edge_matrix) - numpy.diag(numpy.diag(edge_matrix)))) if mode_number is None: for i in range(n_qubit): num_operator += (QubitOperator( ()) - edge_operator_b(edge_matrix_indices, i)) / 2. else: num_operator += (QubitOperator( ()) - edge_operator_b(edge_matrix_indices, mode_number)) / 2. return num_operator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_exact_classical_binary_solution(qubit_operator, offset):\n result = get_classical_solver_result(qubit_operator, offset)\n return result.x", "def Q_i(params):\n Q = params['Q'].value\n Qe = Q_e(params)\n return (Q ** -1 - np.real(Qe ** -1)) ** -1", "def get_quant(q):\n\n try:\n e_q = eval(q)\n except:\n return None\n\n if isinstance(e_q, (int,float,complex)):\n return e_q\n \n return None", "def B(q):\n # print('Value q')\n # print(q)\n if q > 0 and q != 0 and q != 1:\n result = -(q*math.log(q,2) + (1-q)*math.log(1-q,2))\n else:\n result = 0\n # print('Result of B')\n # print(result)\n return result", "def __int__(self):\n return int(self.q[0])", "def test_compar(K):\n K_int = int(np.ceil(K))\n n_k = len(bin(K_int))-1\n complement = np.binary_repr(-K_int, width=n_k)\n qr = QuantumRegister(5, 'q')\n qc = QuantumCircuit(qr)\n for i in range(3):\n qc.h(qr[i])\n qc.ccx(qr[0], qr[1], qr[3])\n for i in [2, 3, 4]:\n qc.x(qr[i])\n qc.ccx(qr[2], qr[3], qr[4])\n for i in [2, 3]:\n qc.x(qr[i])\n qc.ccx(qr[0], qr[1], qr[3])\n circ_m = measure(qc, qr, [i for i in range(5)])\n counts = launch(4000, circ_m)\n print(counts)\n print(complement)", "def stats(self):\n nqbits = self.operator.num_qubits", "def qk(n: float = 1, q_type: str = \"k\", representation: str = \"\") -> Q:\n\n return Q([0, 0, 0, n], q_type=q_type, representation=representation)", "def from_QQ_gmpy(K1, a, K0):\n return K1.dtype(int(a.numer())) / int(a.denom)", "def quanty_index(i,ang=2):\n norb = 2*ang + 1\n k = (i//(2*norb))*(2*norb)\n if (i-k) < norb:\n j = k + 2*(i-k)\n else:\n j = k + 2*((i-k)-norb) + 1\n return j", "def qMethod(g_b, g_n, m_b, m_n):\n\tB = g_b @ g_n.T + m_b @ m_n.T\n\tZ = (np.cross(g_b.flatten(), g_n.flatten()) + np.cross(m_b.flatten(), m_n.flatten())).reshape(-1, 1)\n\tK = np.block([[B + B.T - np.trace(B) * np.eye(3), Z], # quadratic cost max qTKq\n\t\t\t\t [Z.T, np.trace(B)]])\n\tw, v = np.linalg.eig(K)\n\tq_ = v[:, np.argmax(w), np.newaxis] # maximum eigenvector\n\tq_ /= np.linalg.norm(q_)\n\tq_b2n = np.zeros((4, 1)) # convert unit quat from [v s] to [s v]\n\tq_b2n[0, 0] = q_[-1, 0]\n\tq_b2n[1:, 0] = q_[:-1, 0]\n\treturn q_b2n", "def number(self, ket):\n \n final = 0.0\n q = 0\n for i in ket:\n if i != 0:\n final += 2**q\n q += 1 \n return final", "def test_qing(self):\n fun = get_problem('qing', self.dimension, -500, 500)\n self.assertAlmostEqual(fun(self.array10), 584.0, delta=1e-4)", "def from_QQ_gmpy(K1, a, K0=None):\n if a.denominator == 1:\n return K1.from_ZZ_gmpy(a.numerator)", "def qgset(x):\n return 0.2855*x - 0.8565", "def test_qubit_operator_custom_labels(self, obs, expected):\n dev = QeQiskitDevice(\n wires=[\"a\", \"b\", \"c\"], shots=1000, backend=\"qasm_simulator\", analytic=False\n )\n op_str = dev.qubit_operator_string(obs)\n assert op_str == expected", "def qj(n: float = 1.0, q_type: str = \"j\", representation: str = \"\") -> Q:\n\n return Q([0, 0, n, 0], q_type=q_type, representation=representation)", "def Q_term(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3, # vorticity-3 component\n s11, # strain rate-11 component\n s12, # strain rate-12 component\n s13, # strain rate-13 component\n s22, # strain rate-22 component\n s23, # strain rate-23 component\n s33): # strain rate-33 component\n #---------------------------------------------------------------------#\n # Numerator and denominator #\n #---------------------------------------------------------------------#\n num = omega1*s11*omega1 + omega1*s12*omega2 + omega1*s13*omega3 +\\\n omega2*s12*omega1 + omega2*s22*omega2 + omega2*s23*omega3+\\\n omega3*s13*omega1 + omega3*s23*omega2 + omega3*s33*omega3\n den1 = omega1*omega1 + omega2*omega2 + omega3*omega3\n den2 = (s11*s11 + s12*s12 + s13*s13 + s12*s12 + s22*s22 + s23*s23 +\\\n s13*s13 + s23*s23 + s33*s33)**0.5\n den = ((2.0/3.0)**0.5)* den1 * den2\n #---------------------------------------------------------------------#\n # Q calculation #\n #---------------------------------------------------------------------#\n Q = num/den\n\n return Q", "def test_qubit_operator_consec_int_wires(self, obs, expected):\n dev = QeQiskitDevice(wires=3, shots=1000, backend=\"qasm_simulator\", analytic=False)\n op_str = dev.qubit_operator_string(obs)\n assert op_str == expected", "def generateOperator(onQubits: Union[int, List[int]], matrices: Union[numpy.ndarray, List[numpy.ndarray]],\n sysLevel: Union[int, List[int]], qubitNum: int) -> numpy.ndarray:\n # Each qubit of the system has the same energy level. \n if isinstance(sysLevel, int):\n # We first define the identity matrix to fill un-assigned qubits\n idMat = numpy.identity(sysLevel, dtype=complex)\n if isinstance(onQubits, int):\n assert numpy.size(matrices) == (sysLevel, sysLevel), \"Dimension of matrix does not match the system Level.\"\n # The operator is on only one qubit.\n if onQubits == 0:\n # This operator is on the first qubit.\n operator = matrices\n for i in range(1, qubitNum):\n operator = numpy.kron(operator, idMat)\n else:\n # This operator is not on the first qubit.\n operator = idMat\n for i in range(1, onQubits):\n operator = numpy.kron(operator, idMat)\n operator = numpy.kron(operator, matrices)\n for i in range(onQubits + 1, qubitNum):\n operator = numpy.kron(operator, idMat)\n return operator\n elif isinstance(onQubits, list):\n operator = []\n for i in range(qubitNum):\n if i == 0:\n # On the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operator = matrices[matrixIndex]\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel, sysLevel), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel}).\"\n else:\n operator = idMat\n else:\n # Not on the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel, sysLevel), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel}).\"\n operator = numpy.kron(operator, matrices[matrixIndex])\n else:\n operator = numpy.kron(operator, idMat)\n return operator\n \n else:\n assert False, \"Variable onQubits should be a list or an int.\"\n # The sysLevel is a list of different energy levels for multiple qubits\n if isinstance(sysLevel, list):\n # Create a list of identities of different dimension for each qubit of different energy level\n idMat = [numpy.identity(i, dtype=complex) for i in sysLevel]\n # The operator is acting on only one qubit.\n if isinstance(onQubits, int):\n assert numpy.size(matrices) == (sysLevel[onQubits], sysLevel[onQubits]), \"Dimension of matrix does not match the system Level.\" \n # The operator is acting on the first qubit.\n if onQubits == 0:\n operator = matrices\n for i in range(1, qubitNum):\n operator = numpy.kron(operator, idMat[i])\n else:\n # This operator is not acting on the first qubit.\n operator = idMat[0]\n for i in range(1, onQubits):\n operator = numpy.kron(operator, idMat[i])\n operator = numpy.kron(operator, matrices)\n for i in range(onQubits + 1, qubitNum):\n operator = numpy.kron(operator, idMat[i])\n return operator\n # The operator is acting on multiple qubits.\n elif isinstance(onQubits, list):\n operator = []\n for i in range(qubitNum):\n if i == 0:\n # Acting on the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operator = matrices[matrixIndex]\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel[i], sysLevel[i]), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel[i]}).\"\n else:\n operator = idMat[i]\n else:\n # Not acting on the first qubit\n if i in onQubits:\n matrixIndex = onQubits.index(i)\n operatorSize = numpy.shape(matrices[matrixIndex])\n assert operatorSize == (sysLevel[i], sysLevel[i]), \\\n f\"Dim of input matrix {operatorSize} does not match with the system level ({sysLevel[i]}).\"\n operator = numpy.kron(operator, matrices[matrixIndex])\n else:\n operator = numpy.kron(operator, idMat[i])\n return operator\n \n else:\n assert False, \"Variable onQubits should be a list or an int.\"", "def get_unit_conversion_operator(self):\n nu = self.instrument.filter.nu\n return self.scene.get_unit_conversion_operator(nu)", "def __float__(self):\n return self.q[0]", "def calc_iqr(data: list) -> float:\n return calc_q3(data) - calc_q1(data)", "def _blr_tsqr(obj):\n nb = obj.nb[0]\n A = obj\n Q = core.BlockLowRank(numpy.full((nb, 1), None))\n B = numpy.full(nb, None)\n\n for i in range(nb):\n if isinstance(A[i, 0], core.LowRank):\n Qi, Ri = qr(A[i, 0].U)\n Q[i, 0] = Qi\n B[i] = Ri * A[i, 0].V\n else:\n B[i] = A[i, 0]\n\n B = numpy.vstack(B)\n\n if B.shape[0] < B.shape[1]:\n Z = numpy.zeros((B.shape[1] - B.shape[0], B.shape[1]))\n B = numpy.vstack([B, Z])\n\n Qb, R = qr(B)\n rstart, rend = 0, 0\n\n for i in range(nb):\n if isinstance(A[i, 0], core.LowRank):\n rstart = rend\n rend = rend + A[i, 0].rank\n U = Q[i, 0]\n V = Qb[rstart:rend, :]\n Q[i, 0] = core.LowRank((U, V), A[i, 0].method, A[i, 0].eps)\n else:\n rstart = rend\n rend = rend + A[i, 0].shape[0]\n Q[i, 0] = Qb[rstart:rend, :]\n\n return Q, R", "def b(q):\n if q == 0 or q == 1:\n return float(0.0)\n return -(q * log2(q) + (1 - q) * log2(1 - q))", "def test_quintic2(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array7), 0.0)", "def test_quintic(self):\n fun = get_problem('quintic', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array6), 0.0)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0\n features = self.featExtractor.getFeatures(state, action)\n #Each feature is in the form of dictionary {((3, 3), 'east'): 1.0}. Each key is a combination of coordinate and direction. Each value represents the old qvalue.\n for feature in features.keys():\n qvalue += features[feature] * self.weights[feature]\n return qvalue", "def get_eval_k(self, ikpt):\n return self.evals[ikpt, self.ibands]", "def q_from_ea(ea, p):\n return 0.622 * ea / (p - 0.378 * ea)" ]
[ "0.59903646", "0.5763858", "0.5735831", "0.5710417", "0.56848425", "0.566386", "0.5656029", "0.56033295", "0.56028676", "0.55861324", "0.55652666", "0.5559749", "0.5528611", "0.5506494", "0.5478781", "0.5472446", "0.5444578", "0.54406655", "0.5435226", "0.54314655", "0.542751", "0.5407298", "0.54020625", "0.53985673", "0.5397279", "0.5396208", "0.53951377", "0.53766847", "0.5369482", "0.5354593" ]
0.6643483
0
Create a JupyterHubUser This is idempotent. It will create a Sirepo email user if none exists for the email before creating a jupyterhub user It will update the user's display_name if the one supplied is different than the one in the db.
def create_user(email, display_name): import pyisemail import sirepo.auth import sirepo.auth_db import sirepo.server import sirepo.sim_api.jupyterhublogin import sirepo.template def maybe_create_sirepo_user(module, email, display_name): u = module.unchecked_user_by_user_name(email) if u: # Fully registered email user assert sirepo.auth_db.UserRegistration.search_by(uid=u).display_name, \ f'uid={u} authorized AuthEmailUser record but no UserRegistration.display_name' return u m = module.AuthEmailUser.search_by(unverified_email=email) if m: # Email user that needs to complete registration (no display_name but have unverified_email) assert sirepo.auth.need_complete_registration(m), \ 'email={email} has no display_name but does not need to complete registration' pkcli.command_error( 'email={} needs complete registration but we do not have their uid (in cookie)', email, ) # Completely new Sirepo user u = sirepo.auth.create_new_user( lambda u: sirepo.auth.user_registration(u, display_name=display_name), module, ) module.AuthEmailUser( unverified_email=email, uid=u, user_name=email, ).save() return u if not pyisemail.is_email(email): pkcli.command_error('invalid email={}', email) sirepo.server.init() sirepo.template.assert_sim_type('jupyterhublogin') with sirepo.auth_db.session_and_lock(): u = maybe_create_sirepo_user( sirepo.auth.get_module('email'), email, display_name, ) with sirepo.auth.set_user_outside_of_http_request(u): n = sirepo.sim_api.jupyterhublogin.create_user(check_dir=True) return PKDict(email=email, jupyterhub_user_name=n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user(username=None, password=None, email=None, is_admin=False, display_name=None):\n\tif not username or not password or not email:\n\t\treturn None # @todo: exception handling \n\tuser = User(username=username, password=password, email=email, is_admin=is_admin, display_name=display_name)\n\ttry:\n\t\tuser.save()\n\t\treturn user\n\texcept NotUniqueError, e:\n\t\traise UserExistsError('User with email address (%s) or username (%s) or useralready exists' % (email, username))\n\t\treturn None", "def create_user(\n email: str = Form(...),\n first_name: str = Form(...),\n last_name: str = Form(...),\n password: str = Form(...),\n) -> Dict:\n # Try to retrieve the user in the db\n user_exists = models.User.objects(email=email).first()\n if user_exists:\n raise HTTPException(\n status_code=status.HTTP_409_CONFLICT, detail=f\"The username already exists\"\n )\n return user.create(email, first_name, last_name, password)", "def create_user(self, email, name, phone1, password=None, signed_up=timezone.localtime(),):\n if not email:\n raise ValueError(_('Users must have an email address'))\n\n user = self.model(\n email=self.normalize_email(email),\n name=name,\n phone1=phone1,\n signed_up=signed_up,\n )\n\n user.set_password(password)\n user.save(using=self._db)\n MyUserProfile.objects.create(myuser=user) \n NotifClick.objects.create(myuser=user) \n\n return user", "def create(\n self, \n user_name, \n display_name,\n password,\n email,\n verified=False, \n ):\n user_name = user_name.lower()\n email = email.lower()\n salt_hashedpassword = ''.join(self.get_salt_hashedpassword(password))\n \n # create user\n user = tables.User(\n user_name=unicode(user_name), \n email=unicode(email), \n display_name=unicode(display_name), \n password=salt_hashedpassword,\n created=tables.now_func(),\n verified=verified, \n )\n self.session.add(user)\n # flush the change, so we can get real user id\n self.session.flush()\n assert user.user_id is not None, 'User id should not be none here'\n \n self.logger.info('Create user %s', user_name)\n return user", "def create_user(self, req):\n\n if models.User.query(models.User.name == req.user_name).get():\n raise endpoints.ConflictException('A User with that name already exists!')\n\n models.User.create(req.user_name, req.email)\n return msgs.StringMessage(msg=\"User {} created!\".format(req.user_name))", "def _create_user(self, email, **extra_fields):\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.save(using=self._db)\n return user", "def create_user(self, username, password, email, name):\n\n duplicate_check = User.query.filter_by(username=username).first()\n if duplicate_check is not None:\n return\n user = User(username=username, password=password, email=email, name=name)\n db.session.add(user)\n db.session.commit()", "def register(email, display_name=None):", "def create_user(backend, details, response, uid, username, user=None, *args,\n **kwargs):\n if user:\n return {'user': user}\n if not username:\n return None\n\n email = details.get('email')\n\n if email:\n try:\n UserSocialAuth.get_user_by_email(email=email)\n raise AuthException(backend, _('\"%(email)s\" is already used by other account. If it is your account, login and connect it on profile edit page.') % {\n 'email': email\n })\n except ObjectDoesNotExist:\n pass\n\n user = UserSocialAuth.create_user(username=username, email=email, force_email_valid=True)\n else:\n m = hashlib.md5()\n m.update(str(datetime.datetime.now()))\n email = '%s.%[email protected]' % (m.hexdigest(), backend.name)\n user = UserSocialAuth.create_user(username=username, email=email, send_email_confirmation=False)\n\n return {\n 'user': user,\n 'is_new': True\n }", "def createuser(self, firstname, lastname, email, address1, address2, city, state, country, zipcode, password):\n uquery = {'firstname': firstname,\n 'lastname': lastname,\n 'address1': address1,\n 'address2': address2,\n 'city': city,\n 'state': state,\n 'country' : country,\n 'zipcode' : zipcode,\n 'email': email,\n 'password': password\n }\n\n userdb = self.dbase['users']\n urecord = uquery.copy()\n urecord['created'] = self.epoch()\n emailquery = { 'email': uquery['email'] }\n uqresult= userdb.find_one(emailquery)\n\n result = {'exists': False, 'userid': None}\n if uqresult:\n result['exists'] = True\n result['userid'] = str(uqresult['_id'])\n logging.info(\"== Record Exists. Skipping update. {}\".format(uqresult))\n else:\n logging.info(\"== Record does not exist, creating entry \")\n uqresult = userdb.insert_one(urecord)\n uqresult = userdb.find_one(urecord)\n result['userid'] = str(uqresult['_id'])\n\n return json.dumps(result)", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def create_user(username, password, user_fname, user_lname, email, profile_picture=\"/static/img/profile_pictures/default.png\"):\n\n user = User(username=username, password=password, user_fname=user_fname, user_lname=user_lname, profile_picture=profile_picture, email=email)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(email, password):\n try:\n User(email=email, password=password)\n except IntegrityError:\n print('Error: Duplicate email address')", "def create_user(name, email):\n user = register(name, email)\n add_message(user=user, text=config.MSG_WELCOME)\n add_message(user=user, text=config.MSG_UNVERIFIED, can_dismiss=False)\n return user", "def _create_user(self, username, email, password, phone, **extra_fields):\n\n username = self.model.normalize_username(username)\n user = self.model(username=username, email=email, phone=phone, **extra_fields) # using email_id instead of email\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, FirstName,LastName, EmailId, MobileNo, password=None, **extra_fields):\n if not (FirstName and LastName):\n raise ValueError(\"The user's Name must be set\")\n if not EmailId:\n raise ValueError('The given EmailId must be set')\n if not password:\n raise ValueError('The given password must be set')\n if not MobileNo:\n raise ValueError('The given mobile must be set')\n EmailId = self.normalize_email(EmailId)\n user = self.model(FirstName =FirstName, LastName =LastName ,EmailId=EmailId, MobileNo=MobileNo, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_User(name, email):\n\n LOGGER.info(\"Create a User %s %s\", name, email)\n try:\n return commons.add_entity(User(name, email))\n\n except exc.SQLAlchemyError as err:\n errors.stracktrace()\n raise exceptions.DatabaseError(CREATE_USER_ERR) from err", "def create_new_user(cls, user_email, user_password, user_phone):\n\n new_user = User(email=user_email, password=user_password, mobile_phone=user_phone)\n\n db.session.add(new_user)\n db.session.commit()\n\n print \"Successfully added new user with the email: %s\" % user_email", "def create_user(email, password, f_name, l_name):\n pass", "def save_new_user(data):\n user = User.query.filter_by(email=data[\"email\"]).first()\n new_user = User(\n public_id=str(uuid.uuid4()),\n email=data[\"email\"],\n username=data[\"username\"],\n password=data[\"password\"],\n registered_on=datetime.datetime.utcnow(),\n )\n\n if not user:\n save_changes(new_user)\n return generate_token(new_user)\n\n else:\n response_object = {\n \"status\": \"fail\",\n \"message\": \"User already exists. Please Log in.\",\n }\n return response_object, 409", "def sample_user_dynamic_email(email):\n return get_user_model().objects.create_user(email=email,\n password=\"password123\",\n name=\"some name\")", "def _create_user(self, email, password, **extra_fields):\n\n email = self.normalize_email(email)\n #username = self.model.normalize_username(username)\n user = self.model( email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user", "def create_user(email, password):\n email_used = AuthUser.query.filter_by(email=email).first()\n if email_used:\n return False, \"Email address has already been used\"\n account = Account(email)\n account.plan_key = 'BASIC'\n account.is_active = True\n account.created = datetime.datetime.now()\n db.session.add(account)\n user = AuthUser(email, password, account)\n user.created = datetime.datetime.now()\n db.session.add(user)\n db.session.commit()\n return user.id, None", "def create_user(self, email_or_phone, password=None, **extra_fields):\n return self._create_user(email_or_phone, password, False, False, **extra_fields)", "def sign_up(email, password, firstname, familyname, gender, city, country):\n userdata = query_db('SELECT * FROM Users WHERE email = ?', [email], one=True)\n if userdata is not None:\n return {'success': False, 'message': 'User already exists.', 'code': 400}\n\n query_db('INSERT INTO Users (email, firstname, familyname, gender, city, country, password_hash) '\n 'VALUES (?, ?, ?, ?, ?, ?, ?)',\n [email, firstname, familyname, gender, city, country, generate_password_hash(password)])\n return {'success': True, 'message': 'Successfully created a new user', 'code': 200}", "def create(self, data):\n # Make User\n username = data['email'].split(\"@\")[0]\n user = User.objects.create_user(**data, username=username, is_verified=False, is_client=True)\n Profile.objects.create(user=user)\n send_confirmation_email.delay(user_pk=user.pk)\n return user", "def create_user():\n try:\n payload = _validatePayload(request)\n timestamp = int(time.time() * 1000)\n user = {\n 'name': payload.get('name'),\n 'email': payload.get('email'),\n 'password': _encodePassword(payload.get('password')),\n 'createdAt': timestamp,\n 'updatedAt': timestamp,\n }\n\n resp = table.put_item(\n Item=user,\n Expected={'email': {'Exists': False}}\n )\n return jsonify(user), 200\n except Exception as e:\n logger.info('ERROR {}'.format(str(e)))\n return _customizeErrorMessage(e)", "def _create_user(self, username, email, password, is_staff, is_superuser, first_name, last_name):\n now = timezone.now()\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n user = self.model(email=email,username=username,\n first_name=first_name, last_name=last_name,\n is_staff=is_staff, is_active=True,\n is_superuser=is_superuser,\n date_joined=now)\n user.uuid = generate_uuid()\n user.uniqueid = user.uuid[:4]\n user.set_password(password)\n user.save(using=self._db)\n return user", "def _create_user(self, first_name, last_name, email, password, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n email = self.normalize_email(email)\n first_name = first_name\n last_name = self.last_name\n user = self.model(first_name, last_name,email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user" ]
[ "0.740493", "0.7294818", "0.71817297", "0.7068116", "0.7059704", "0.70219904", "0.70201594", "0.70180523", "0.6983611", "0.6978325", "0.6924951", "0.6922734", "0.6911578", "0.6899807", "0.6899595", "0.689836", "0.6889405", "0.68889606", "0.6876101", "0.6872222", "0.6862059", "0.68575245", "0.6847535", "0.68297756", "0.6829269", "0.68248445", "0.6815755", "0.68133074", "0.6800309", "0.6799878" ]
0.80022687
0
Wrap a model that should run on CPU, transferring inputs and outputs as necessary.
def with_cpu(ops, model): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(model: nn.Module, inputs: torch.Tensor, device: torch.device):\n\n model.eval()\n model.to(device)\n\n with torch.no_grad():\n inputs = inputs.to(device)\n return model(inputs)", "def cpu(self, *args, **kwargs):\n self._tensor = self._tensor.cpu(*args, **kwargs)\n return self", "def forward(self, *inputs, **kwargs):\n\n # Simple processing.\n if not self.device_ids:\n return self.module(*inputs, **kwargs)\n # One device - also easy.\n if len(self.device_ids) == 1:\n return self.module(*inputs[0], **kwargs[0])\n\n # Preprocessing: get only the inputs important for to the wrapped model (optimization).\n inputs_tuple = []\n for i, item in enumerate(inputs):\n input_dict = DataStreams({key: value for key,value in item.items() if key in self.module.input_data_definitions().keys()})\n inputs_tuple.append(input_dict)\n # Convert to tuple.\n inputs_tuple = tuple(inputs_tuple)\n\n # Scatter inputs into several tuples.\n inputs_tuple, kwargs = self.scatter(inputs_tuple, kwargs, self.device_ids)\n\n # Create replicas of the module on all devices.\n replicas = self.replicate(self.module, self.device_ids[:len(inputs_tuple)])\n\n # Pass scattered inputs throught those replicas.\n self.parallel_apply(replicas, inputs_tuple, kwargs)\n\n # Gather tuple. This cannot be done \"in place\"!\n gathered_tuple = self.gather(inputs_tuple, self.output_device)\n\n # Return 0-th tuple, i.e. a single DataStreams on device 0.\n return gathered_tuple[0]", "def cpu(self):\n self.reader.model.cpu()\n self.reader.device = torch.device(\"cpu\")\n return self", "def create_cpu():\n return CPU()", "def wrap_jiant_forward(\n jiant_model: Union[JiantModel, nn.DataParallel],\n batch: tasks.BatchMixin,\n task: tasks.Task,\n compute_loss: bool = False,\n):\n assert isinstance(jiant_model, (JiantModel, nn.DataParallel))\n is_multi_gpu = isinstance(jiant_model, nn.DataParallel)\n model_output = construct_output_from_dict(\n jiant_model(\n batch=batch.to_dict() if is_multi_gpu else batch, task=task, compute_loss=compute_loss,\n )\n )\n if is_multi_gpu:\n model_output.loss = model_output.loss.mean()\n return model_output", "def build_model(self):\n if self.args.network_type == 'unet':\n self.shared = models.Unet(self.args)\n else:\n raise NotImplementedError(f'Network type '\n f'`{self.args.network_type}` is not '\n f'defined')\n self.controller = models.Controller(self.args)\n\n if self.args.num_gpu == 1:\n self.shared.cuda()\n self.controller.cuda()\n elif self.args.num_gpu > 1:\n raise NotImplementedError('`num_gpu > 1` is in progress')", "def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config):\n\n model = get_detector(cfg, checkpoint_path, device=\"cpu\")\n one_img, one_meta = preprocess_example_input(input_config)\n tensor_data = [one_img]\n model.forward = partial(model.forward, img_metas=[[one_meta]], return_loss=False)\n\n return model, tensor_data", "def __call__(self, data):\n return self.model(data.cuda())", "def forward(self, *inputs, **kwargs):\n if not self.device_ids:\n return self.module(*inputs, **kwargs)\n\n for t in chain(self.module.parameters(), self.module.buffers()):\n if t.device != self.src_device_obj:\n raise RuntimeError(\"module must have its parameters and buffers \"\n \"on device {} (device_ids[0]) but found one of \"\n \"them on device: {}\".format(self.src_device_obj, t.device))\n\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n return self.module(*inputs[0], **kwargs[0])\n replicas = self.replicate(self.module, self.device_ids[:len(inputs)])\n outputs = self.parallel_apply(replicas, inputs, kwargs)\n return self.gather(outputs, self.output_device)", "def PyTorchWrapper(\n pytorch_model: Any,\n convert_inputs: Optional[Callable] = None,\n convert_outputs: Optional[Callable] = None,\n) -> Model[Any, Any]:\n if convert_inputs is None:\n convert_inputs = convert_pytorch_default_inputs\n if convert_outputs is None:\n convert_outputs = convert_pytorch_default_outputs\n return Model(\n \"pytorch\",\n forward,\n attrs={\"convert_inputs\": convert_inputs, \"convert_outputs\": convert_outputs},\n shims=[PyTorchShim(pytorch_model)],\n dims={\"nI\": None, \"nO\": None},\n )", "def model_wrapper(self):\n original = self.args.rnn_type\n if(self.args.rnn_type=='DeepCoNN'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='TRANSNET'):\n self.args.rnn_type = 'RAW_MSE_MAX_CNN_FM_TNET'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='DATT'):\n self.args.rnn_type ='RAW_MSE_DUAL_DOT'\n self.args.base_encoder = 'Flat'\n elif(self.args.rnn_type=='MPCN'):\n self.args.rnn_type = 'RAW_MSE_MPCN_FN_FM'\n self.args.base_encoder = 'NBOW'\n\n print(\"Conversion to {} | base:{}\".format(\n self.args.rnn_type,\n self.args.base_encoder))", "def cpu(self) -> 'BatchEmbedder':\n self.is_cuda = False\n self.embedding_layer = self.embedding_layer.cpu()\n\n return self", "def keras_model_fn_cpu(model_config, vocab_size, embedding_size, embeddings):\n ## hyperparams\n model_name = model_config['model_name']\n num_class = model_config['num_class']\n lstm_hs = model_config['lstm_hs']\n gru_hs = model_config['gru_hs']\n learning_rate = model_config['learning_rate']\n \n with tf.device('/cpu:0'):\n ## build model\n inputs = ks.Input(shape=(None,), dtype='int32', name='inputs')\n embedded_sequences_ft1 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n embedded_sequences_ft2 = layers.Embedding(vocab_size, embedding_size, trainable = False, mask_zero = False)(inputs)\n concat_embed = layers.concatenate([embedded_sequences_ft1 ,embedded_sequences_ft2])\n concat_embed = layers.SpatialDropout1D(0.5)(concat_embed)\n x = layers.Bidirectional(layers.LSTM(lstm_hs,recurrent_activation = 'sigmoid', return_sequences = True))(concat_embed)\n x, x_h, x_c = layers.Bidirectional(layers.GRU(gru_hs, reset_after = True, recurrent_activation = 'sigmoid', return_sequences = True, return_state = True))(x)\n x_1 = layers.GlobalMaxPool1D()(x)\n x_2 = layers.GlobalAvgPool1D()(x)\n x_out = layers.concatenate([x_1 ,x_2, x_h])\n x_out = layers.BatchNormalization()(x_out)\n outputs = layers.Dense(num_class, activation = 'softmax', name = 'outputs')(x_out) # outputs\n model = ks.Model(inputs, outputs, name = model_name)\n\n ## compile\n model.compile(loss = 'categorical_crossentropy', \n optimizer=ks.optimizers.Adam(lr=learning_rate, clipnorm=.25, beta_1=0.7, beta_2=0.99), \n metrics=['categorical_accuracy', ks.metrics.TopKCategoricalAccuracy(k=3)])\n return model", "def train_wrapper(model):\n if FLAGS.pretrained_model:\n model.load(FLAGS.pretrained_model)\n # load data\n train_input_handle, test_input_handle = datasets_factory.data_provider(\n FLAGS.dataset_name,\n FLAGS.train_data_paths,\n FLAGS.valid_data_paths,\n FLAGS.batch_size * FLAGS.n_gpu,\n FLAGS.img_width,\n seq_length=FLAGS.total_length,\n is_training=True)\n\n eta = FLAGS.sampling_start_value\n\n for itr in range(1, FLAGS.max_iterations + 1):\n if train_input_handle.no_batch_left():\n train_input_handle.begin(do_shuffle=True)\n ims = train_input_handle.get_batch()\n if FLAGS.dataset_name == 'penn':\n ims = ims['frame']\n ims = preprocess.reshape_patch(ims, FLAGS.patch_size)\n\n eta, real_input_flag = schedule_sampling(eta, itr)\n\n trainer.train(model, ims, real_input_flag, FLAGS, itr)\n\n if itr % FLAGS.snapshot_interval == 0:\n model.save(itr)\n\n if itr % FLAGS.test_interval == 0:\n trainer.test(model, test_input_handle, FLAGS, itr)\n\n train_input_handle.next()", "def run(model_path):\n print(\"initialize\")\n\n # select device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"device: %s\" % device)\n\n # load network\n model = MidasNet(model_path, non_negative=True)\n\n transform = Compose(\n [\n Resize(\n 384,\n 384,\n resize_target=None,\n keep_aspect_ratio=True,\n ensure_multiple_of=32,\n resize_method=\"upper_bound\",\n image_interpolation_method=cv2.INTER_CUBIC,\n ),\n NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n PrepareForNet(),\n ]\n )\n\n model.to(device)\n model.eval()\n\n cap = cv2.VideoCapture(1)\n print(\"is camera open\", cap.isOpened())\n cap.set(3,320)\n cap.set(4,240)\n print(\"start processing\")\n\n i = 0\n while cap.isOpened():\n start = time.time()\n ret, frame = cap.read()\n print(\"new frame\", ret)\n p1 = time.time()\n print(f\"take a picture {p1 - start}\")\n if ret:\n img = utils.process_camera_img(frame)\n img_input = transform({\"image\": img})[\"image\"]\n p2 = time.time()\n print(f\"transoform image {p2 - p1}\")\n # compute\n with torch.no_grad():\n sample = torch.from_numpy(img_input).to(device).unsqueeze(0)\n p3 = time.time()\n print(f\"from numpy to cuda {p3 - p2}\")\n prediction = model.forward(sample)\n p4 = time.time()\n print(f\"prediction {p4 - p3}\")\n prediction = (\n torch.nn.functional.interpolate(\n prediction.unsqueeze(1),\n size=img.shape[:2],\n mode=\"bicubic\",\n align_corners=False,\n )\n .squeeze()\n .cpu()\n .numpy()\n )\n p5 = time.time()\n print(f\"prediction from cuda to cpu {p5 - p4}\")\n\n\n # output\n\n r = random.randint(0, 10000)\n cv2.imwrite(f\"output/input-{i}-{r}.png\", frame)\n utils.write_depth(f\"output/depth-{i}-{r}\", prediction, bits=2)\n p6 = time.time()\n print(f\"save input and write depth {p6 - p5}\")\n\n cv2.imshow('frame', frame)\n cv2.imshow('prediction', prediction)\n p7 = time.time()\n print(f\"show images {p7 - p6}\")\n i += 1\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n print(\"Camera is not recording\")\n print(f\"image took {time.time() - start} s\")\n print(\"\\n-----------------------\\n\")\n\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n\n print(\"finished\")", "def forward(self, *inputs) -> torch.Tensor:\n return self.model(*inputs)", "def evaluate(model: torch.nn.Module, dummy_input: torch.Tensor):\n model.eval()\n if isinstance(dummy_input, torch.Tensor):\n dummy_input = [dummy_input]\n with torch.no_grad():\n model(*dummy_input)", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def mount(xpu, model):\n # Unwrap the core model if necessary\n model = xpu.raw(model)\n model = xpu.move(model)\n if xpu._device_ids and len(xpu._device_ids) > 1:\n model = ContainerDataParallel(\n model, device_ids=xpu._device_ids,\n output_device=xpu._main_device_id)\n else:\n model = DataSerial(model)\n return model", "def evaluate(model: torch.nn.Module, dummy_input: torch.Tensor):\n if isinstance(dummy_input, torch.Tensor):\n dummy_input = [dummy_input]\n\n model.eval()\n with torch.no_grad():\n model(*dummy_input)", "def _create_model(self):\n if torch.cuda.is_available():\n model = torch.jit.load(self.torch_jit).cuda()\n else:\n model = torch.jit.load(self.torch_jit)\n model.eval()\n return model", "def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)", "def build_model(self):\r\n self.images, self.labels = self.dataloader.get_model_inputs()\r\n\r\n model = SimpleModel(self.images, self.labels, output_dim=F.output_dim, scope='source_regressor')\r\n self.out, _ = model.get_model()\r\n self.get_loss()", "def extract_model_from_parallel(model, keep_fp32_wrapper: bool = True):\n options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)\n if is_deepspeed_available():\n options += (DeepSpeedEngine,)\n\n while isinstance(model, options):\n model = model.module\n\n if not keep_fp32_wrapper:\n forward = getattr(model, \"forward\")\n original_forward = model.__dict__.pop(\"_original_forward\", None)\n if original_forward is not None:\n while hasattr(forward, \"__wrapped__\"):\n forward = forward.__wrapped__\n if forward == original_forward:\n break\n model.forward = forward\n if getattr(model, \"_converted_to_transformer_engine\", False):\n convert_model(model, to_transformer_engine=False)\n return model", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and \\\n self.num_gpu > 1:\n out = nn.parallel.data_parallel(\n self.layer, input, range(self.num_gpu))\n else:\n out = self.layer(input)\n # flatten output\n return out", "def run_model(model):\n\n model.create_initialised_input()\n\n model.run_from_buffer()\n\n output = model.output_parse()\n return output", "def cpu(self):\n for key, value in self.__dict__.items():\n self.__dict__[key] = value.cpu()\n return self", "def model_setup(self):\n self.input_a = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_A\")\n self.input_b = tf.placeholder(\n tf.float32, [\n 1,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"input_B\")\n\n self.fake_pool_A = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_A\")\n self.fake_pool_B = tf.placeholder(\n tf.float32, [\n None,\n model.IMG_WIDTH,\n model.IMG_HEIGHT,\n model.IMG_CHANNELS\n ], name=\"fake_pool_B\")\n\n self.global_step = slim.get_or_create_global_step()\n\n self.num_fake_inputs = 0\n\n self.learning_rate = tf.placeholder(tf.float32, shape=[], name=\"lr\")\n\n inputs = {\n 'images_a': self.input_a,\n 'images_b': self.input_b,\n 'fake_pool_a': self.fake_pool_A,\n 'fake_pool_b': self.fake_pool_B,\n }\n\n outputs = model.get_outputs(\n inputs, network=self._network_version, skip=self._skip)\n\n self.prob_real_a_is_real = outputs['prob_real_a_is_real']\n self.prob_real_b_is_real = outputs['prob_real_b_is_real']\n self.fake_images_a = outputs['fake_images_a']\n self.fake_images_b = outputs['fake_images_b']\n self.prob_fake_a_is_real = outputs['prob_fake_a_is_real']\n self.prob_fake_b_is_real = outputs['prob_fake_b_is_real']\n\n self.cycle_images_a = outputs['cycle_images_a']\n self.cycle_images_b = outputs['cycle_images_b']\n\n self.prob_fake_pool_a_is_real = outputs['prob_fake_pool_a_is_real']\n self.prob_fake_pool_b_is_real = outputs['prob_fake_pool_b_is_real']" ]
[ "0.6402771", "0.6401", "0.6344216", "0.63228977", "0.62614536", "0.6219543", "0.60333186", "0.6002807", "0.5994314", "0.5940613", "0.59332347", "0.5912314", "0.5864584", "0.58230406", "0.5788221", "0.5746694", "0.5739335", "0.5731383", "0.5720366", "0.57131535", "0.5707247", "0.57039", "0.56870764", "0.5676476", "0.5676312", "0.5658201", "0.5658201", "0.56392395", "0.5639168", "0.56207824" ]
0.76973623
0
Build a simple CNN text classifier, given a tokentovector model as inputs. If exclusive_classes=True, a softmax nonlinearity is applied, so that the outputs sum to 1. If exclusive_classes=False, a logistic nonlinearity is applied instead, so that outputs are in the range [0, 1].
def build_simple_cnn_text_classifier( tok2vec, nr_class, exclusive_classes: bool = ..., **cfg ): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classify(text):\n # TODO Wonder if there's a better way of doing this so the model persists across fucn calls. Will see once I get\n # Heroku running\n\n sentences = sent_tokenize(text)\n clean_sentences = list(map(clean_text, sentences))\n word_tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')\n # tokenize\n _input = word_tokenizer(clean_sentences, padding=True, return_tensors='pt', return_attention_mask=True, return_length=True)\n\n # pass tokenized text thru model\n model = LSTM(dr=.3)\n state_dict = torch.load(os.path.join('model', 'model.pt'), map_location=torch.device('cpu'))\n model.load_state_dict(state_dict['model_state_dict'])\n\n model.eval()\n with torch.no_grad():\n model_output = model(_input['input_ids'], _input['length'], _input['attention_mask'])\n\n # We start with: A list of command names, a list of sentences, a matrix with\n # each row corresponding to a sentence and each column corresponding to a label's probability of being\n # represented in the sentence The list of command names is parallel to the columns of the matrix\n\n # We want to end with a nested dict with sentences as keys and dicts of label : probability pairs as values\n labels = model_output.topk(3)\n\n label_indices = labels[0].tolist()\n probabilities = labels[1].tolist()\n\n with open(os.path.join('resources', 'label_names.txt')) as f:\n command_names = f.read().splitlines()\n\n output = dict()\n for i, row in enumerate(probabilities): # TODO vectorize this if possible\n sent = sentences[i]\n output[sent] = {command_names[idx]: label_indices[i][j] for j, idx in enumerate(row)}\n\n return output", "def classify_text(classifier, sentence):\n\n sentence = Sentence(sentence)\n classifier.predict(sentence, multi_class_prob=True)\n return sentence.labels", "def classify (self, text_test):\n test_features = self.vectorizer.transform(text_test)\n return self.nbc.predict(test_features)", "def classify_token(device, tokenizer, model):\n tokens = tokenization_helper.tokenize_input_sentence(tokenizer,\n FLAGS.sentence, '')\n tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(\n tokenizer, tokens, device)\n layers_act = inference_helper.run_inference_vanilla(tokens_tensor,\n segments_tensor, model)\n token_act = layers_act[0][FLAGS.layer_id][FLAGS.word_id]\n classification_head = classifier_helper.get_classification_head(\n device, FLAGS.layer_id, FLAGS.trained_variables_dir)\n y = token_act.matmul(classification_head)\n y = torch.sigmoid(y)\n print('Prediction: {}'.format(y.item()))", "def classifier(model):\n \n model.classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(model.classifier[0].in_features, 4096)),\n ('fc2', nn.Linear(4096, 102)),\n ('relu', nn.ReLU()),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n return model", "def create_classifier(model, hidden_units=None):\n\n defaul_nb_units = 4096\n nb_units = hidden_units if hidden_units else defaul_nb_units\n \n input_features = model.classifier[0].in_features\n \n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(input_features, nb_units, bias=True)),\n ('relu1', nn.ReLU()),\n ('dropout1', nn.Dropout(p=0.5)),\n ('fc2', nn.Linear(nb_units, 102, bias=True)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n \n return classifier", "def build_classifier(model, hidden_units):\n in_features = model.classifier._modules['0'].in_features\n classifier = nn.Sequential(OrderedDict([\n ('dropout1', nn.Dropout(0.5)),\n ('fc1', nn.Linear(in_features, hidden_units)), \n ('relu', nn.ReLU()),\n ('dropout2', nn.Dropout(0.5)),\n ('fc2', nn.Linear(hidden_units, 102)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n return classifier", "def build_character_cnn(model_hyperparameters=None, verbose=None):\r\n if model_hyperparameters is None:\r\n model_hyperparameters = _dutils.load_dictionary('model_hyperparameters.json')\r\n '''\r\n Load hyperparameter-specific values from JSON file.\r\n '''\r\n #The size of the characater vocabulary\r\n vocabulary_size = model_hyperparameters.get(\"vocabulary_size\")\r\n #The max length of the text. Set as 1014 in the original.\r\n text_length = model_hyperparameters.get(\"text_length\")\r\n #Number of filters for each convolutional layer\r\n num_filters = model_hyperparameters.get(\"num_filters\")\r\n #The threshold for the ReLU activation layers\r\n threshold = model_hyperparameters.get(\"relu_threshold\")\r\n #Dropout probability for Dropout layers\r\n dropout_p = model_hyperparameters.get(\"dropout_percent\")\r\n #Embedding output dimension. Implementation sets it equal to vocabulary_size\r\n embed_dim = model_hyperparameters.get(\"embedding_dimension\")\r\n '''\r\n Values below specify the architecture.\r\n These aren't stored in the JSON file due to\r\n architectutre constraints with layers and\r\n kernel sizes.\r\n '''\r\n #The number of units for each dense layer minus output layer\r\n fully_connected_layers = [128,64]\r\n '''\r\n conv_layers is a list of pairs.\r\n First component refers to kernel size.\r\n Second component refers to the size of\r\n the MaxPooling1D layer (-1 indicates said layer is not present).\r\n '''\r\n conv_layers = [[7, 3], [3,-1], [3,-1], [3,-1], [3, 3]]\r\n #Input layer\r\n inputs = Input(shape=(text_length,), name='sent_input', dtype='int32')\r\n #Embedding layers\r\n x = Embedding(vocabulary_size + 1, embed_dim, input_length=text_length, mask_zero=True)(inputs)\r\n #Convolution layers\r\n '''\r\n First Conv1D layer + MaxPooling is separate in case\r\n changes are made upstream. Also it was used to test out\r\n TimeDistributed functionality.\r\n '''\r\n x = (Convolution1D(num_filters, 7))(x)\r\n x = (MaxPooling1D(3))(x)\r\n for cl in conv_layers:\r\n x = (Convolution1D(num_filters, cl[0]))(x)\r\n x = ThresholdedReLU(threshold)(x)\r\n if cl[1] != -1:\r\n x = (MaxPooling1D(cl[1]))(x)\r\n\r\n x = Flatten()(x)\r\n # #Fully connected layers\r\n for fl in fully_connected_layers:\r\n '''\r\n Original architecture did not use L2 regularization.\r\n However, empirical results show that, for my dataset\r\n it works well in handling overfitting.\r\n '''\r\n x = Dense(fl, kernel_regularizer=regularizers.l2(0.0001))(x)\r\n x = ThresholdedReLU(threshold)(x)\r\n '''\r\n Original architecture had dropout at 50%.\r\n This seemed to be too high for my dataset, and\r\n it resulted in underfitting.\r\n '''\r\n x = Dropout(dropout_p)(x)\r\n # #Output layer\r\n predictions = Dense(vocabulary_size, activation='softmax')(x)\r\n # Build and compile model\r\n model = Model(inputs=inputs, outputs=predictions) \r\n if verbose:\r\n model.summary()\r\n return model", "def test_RecurrentNeuralNetwork_build_classification() -> None:\n vectorizer = Vectorizer('glove.6B.50d.txt')\n input_shape = {\n 'pos': (len(vectorizer.pos2index), 10),\n 'shape': (len(vectorizer.shape2index), 2)\n }\n rnn = RecurrentNeuralNetwork.build_classification(vectorizer.word_embeddings, input_shape, 1)\n assert isinstance(rnn._model, Model)", "def cnn_model(features, labels, mode):\r\n # Convert indexes of words into embeddings.\r\n # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then\r\n # maps word indexes of the sequence into [batch_size, sequence_length,\r\n # EMBEDDING_SIZE].\r\n word_vectors = tf.contrib.layers.embed_sequence(\r\n features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)\r\n word_vectors = tf.expand_dims(word_vectors, 3)\r\n with tf.variable_scope('CNN_Layer1'):\r\n # Apply Convolution filtering on input sequence.\r\n conv1 = tf.layers.conv2d(\r\n word_vectors,\r\n filters=N_FILTERS,\r\n kernel_size=FILTER_SHAPE1,\r\n padding='VALID',\r\n # Add a ReLU for non linearity.\r\n activation=tf.nn.relu)\r\n # Max pooling across output of Convolution+Relu.\r\n pool1 = tf.layers.max_pooling2d(\r\n conv1,\r\n pool_size=POOLING_WINDOW,\r\n strides=POOLING_STRIDE,\r\n padding='SAME')\r\n # Transpose matrix so that n_filters from convolution becomes width.\r\n pool1 = tf.transpose(pool1, [0, 1, 3, 2])\r\n with tf.variable_scope('CNN_Layer2'):\r\n # Second level of convolution filtering.\r\n conv2 = tf.layers.conv2d(\r\n pool1,\r\n filters=N_FILTERS,\r\n kernel_size=FILTER_SHAPE2,\r\n padding='VALID')\r\n # Max across each filter to get useful features for classification.\r\n pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])\r\n\r\n # Apply regular WX + B and classification.\r\n logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)\r\n\r\n predicted_classes = tf.argmax(logits, 1)\r\n if mode == tf.estimator.ModeKeys.PREDICT:\r\n return tf.estimator.EstimatorSpec(\r\n mode=mode,\r\n predictions={\r\n 'class': predicted_classes,\r\n 'prob': tf.nn.softmax(logits)\r\n })\r\n\r\n loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n optimizer = tf.train.AdamOptimizer(learning_rate=0.01)\r\n train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())\r\n return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\r\n\r\n eval_metric_ops = {\r\n 'accuracy': tf.metrics.accuracy(\r\n labels=labels, predictions=predicted_classes)\r\n }\r\n return tf.estimator.EstimatorSpec(\r\n mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)", "def cnn_model(features, labels, mode):\n # Convert indexes of words into embeddings.\n # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then\n # maps word indexes of the sequence into [batch_size, sequence_length,\n # EMBEDDING_SIZE].\n word_vectors = tf.contrib.layers.embed_sequence(\n features[WORDS_FEATURE], vocab_size=n_words, embed_dim=\n CNN_PARAMS.EMBEDDING_SIZE)\n\n # Inserts a dimension of 1 into a tensor's shape.\n word_vectors = tf.expand_dims(word_vectors, 3)\n\n with tf.variable_scope('CNN_Layer1'):\n # Apply Convolution filtering on input sequence.\n conv1 = tf.layers.conv2d(\n word_vectors,\n filters=CNN_PARAMS.N_FILTERS,\n kernel_size=CNN_PARAMS.FILTER_SHAPE1,\n padding='VALID',\n # Add a ReLU for non linearity.\n activation=tf.nn.relu)\n # Max pooling across output of Convolution+Relu.\n pool1 = tf.layers.max_pooling2d(\n conv1,\n pool_size=CNN_PARAMS.POOLING_WINDOW,\n strides=CNN_PARAMS.POOLING_STRIDE,\n padding='SAME')\n # Transpose matrix so that n_filters from convolution becomes width.\n pool1 = tf.transpose(pool1, [0, 1, 3, 2])\n with tf.variable_scope('CNN_Layer2'):\n # Second level of convolution filtering.\n conv2 = tf.layers.conv2d(\n pool1,\n filters=CNN_PARAMS.N_FILTERS,\n kernel_size=CNN_PARAMS.FILTER_SHAPE2,\n padding='VALID')\n # Max across each filter to get useful features for classification.\n pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])\n\n # Apply regular WX + B and classification.\n logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)\n predicted_classes = tf.argmax(logits, 1)\n\n return estimator_spec_for_softmax_classification(\n logits=logits, labels=labels, mode=mode)", "def build_model_mobilenet(num_classes):", "def train_classifier(X, y, Cs=10):\n cls = LogisticRegressionCV(Cs=Cs, random_state=0, solver='lbfgs', max_iter=10000)\n cls.fit(X, y)\n return cls", "def softmax_class_vector(nr_class, *, exclusive_classes=True, **cfg):\n width = cfg[\"token_vector_width\"]\n return chain(\n get_class_tokens,\n flatten_add_lengths,\n Pooling(mean_pool),\n Softmax(nr_class, width),\n )", "def lstm_classifier(**kwargs):\n input_vector_size = kwargs.get('input_vector_size', 128)\n dense_size = kwargs.get('dense_size', 20)\n output = kwargs.get('label_size', 2)\n timesteps = 1\n xav_init = tf.contrib.layers.xavier_initializer()\n adam = optimizers.Adam(lr=0.01)\n sgd = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)\n ##########\n\n model = Sequential()\n model.add(CuDNNLSTM(64))\n model.add(Dense(20, activation='softmax', \n kernel_initializer='glorot_normal',\n activity_regularizer=regularizers.l2(0.001)))\n model.add(Dropout(0.2))\n model.add(Dense(20, activation='softmax', \n kernel_initializer='glorot_normal',\n activity_regularizer=regularizers.l2(0.001)))\n model.add(Dropout(0.2))\n model.add(Dense(2, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n \n return model", "def generate_cnn_model(num_classes, num_words):\n def cnn_model(features, target):\n # Create embeddings and map\n\n target = tf.one_hot(target, num_classes, 1, 0)\n word_vectors = tf.contrib.layers.embed_sequence(\n features, vocab_size=num_words, embed_dim=EMBEDDING_SIZE, scope='words')\n word_vectors = tf.expand_dims(word_vectors, 3)\n\n # First Layer here!!!!!!!\n with tf.variable_scope('CNN_MODEL_layer1'):\n # First layer convolution filtering on sequence\n conv1 = tf.contrib.layers.convolution2d(\n word_vectors, N_FILTERS, FILTER_SHAPE1, padding='VALID')\n # First layler adding a RELU for non linearity.\n conv1 = tf.nn.relu(conv1)\n # First layler Max pooling\n pool1 = tf.nn.max_pool(\n conv1,\n ksize=[1, POOLING_WINDOW, 1, 1],\n strides=[1, POOLING_STRIDE, 1, 1],\n padding='SAME')\n pool1 = tf.transpose(pool1, [0, 1, 3, 2])\n\n # Second Layer here!!!!!!!\n with tf.variable_scope('CNN_MODEL_layer2'):\n conv2 = tf.contrib.layers.convolution2d(\n pool1, N_FILTERS, FILTER_SHAPE2, padding='VALID')\n # Max across each filter to get useful features for classification.\n pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])\n\n # Fully_conncted pool2 and classes\n logits = tf.contrib.layers.fully_connected(pool2, num_classes, activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss,\n tf.contrib.framework.get_global_step(),\n optimizer='Adam',\n learning_rate=LEARNING_RATE)\n\n return ({\n 'class': tf.argmax(logits, 1),\n 'prob': tf.nn.softmax(logits)\n }, loss, train_op)\n\n return cnn_model", "def classify_string(self, s, **kwargs):\n\n token = GoldTagPOSToken(s, goldlabel=\"NONE\")\n\n sio = StringIO()\n\n # TODO: Fix the behavior of write_gram such that we can just do it from a string.\n intent.igt.grams.write_gram(token, type='classifier', output=sio, **kwargs)\n\n c_token = sio.getvalue().strip()\n sio.close()\n\n result = self.classify(c_token)\n return result", "def build(self, input_image, num_class):\n x = build_resnet(101)\n # add classifier\n x = Conv2D(num_class, (1, 1), kernel_initializer='he_normal', activation='linear', padding='valid', strides=(1, 1), kernel_regularizer=l2(weight_decay))(x)", "def classify_sentiment(self, model, sentence, tokenizer, min_len=32):\n model = WrapperModel(model)\n PAD_IND = tokenizer.pad_token_id\n indexed = tokenizer([sentence],\n padding=\"max_length\",\n truncation=True,\n max_length=32,\n return_tensors=\"pt\")\n text = tokenizer.convert_ids_to_tokens(indexed['input_ids'][0])\n\n if len(text) < min_len:\n text += ['pad'] * (min_len - len(text))\n\n model.zero_grad()\n\n\n # predict\n preds = F.softmax(model(**indexed), dim=-1)\n pred_ind = torch.argmax(preds.squeeze()).item()\n pred = torch.max(preds)\n return pred, LABEL_MAP[pred_ind], pred_ind", "def create_classification_model(include_top=True,\n input_tensor=None, input_shape=None,\n pooling=None,\n classes=1000):\n\n\n img_input = Input(shape=input_shape)\n # Block 1\n x = Conv2D(16, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n x = Conv2D(32, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)\n\n if include_top:\n # Classification block\n x = Flatten(name='flatten')(x)\n x = Dense(512, activation='relu', name='fc1')(x)\n x = Dense(128, activation='relu', name='fc2')(x)\n x = Dense(classes, activation='softmax', name='predictions')(x)\n else:\n if pooling == 'avg':\n x = GlobalAveragePooling2D()(x)\n elif pooling == 'max':\n x = GlobalMaxPooling2D()(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n # Create model.\n model = Model(inputs, x, name='vgg19')\n\n # # load weights\n # if weights == 'imagenet':\n # if include_top:\n # weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',\n # WEIGHTS_PATH,\n # cache_subdir='models')\n # else:\n # weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',\n # WEIGHTS_PATH_NO_TOP,\n # cache_subdir='models')\n # model.load_weights(weights_path)\n # if K.backend() == 'theano':\n # layer_utils.convert_all_kernels_in_model(model)\n #\n # if K.image_data_format() == 'channels_first':\n # if include_top:\n # maxpool = model.get_layer(name='block5_pool')\n # shape = maxpool.output_shape[1:]\n # dense = model.get_layer(name='fc1')\n # layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n #\n # if K.backend() == 'tensorflow':\n # warnings.warn('You are using the TensorFlow backend, yet you '\n # 'are using the Theano '\n # 'image data format convention '\n # '(`image_data_format=\"channels_first\"`). '\n # 'For best performance, set '\n # '`image_data_format=\"channels_last\"` in '\n # 'your Keras config '\n # 'at ~/.keras/keras.json.')\n return model", "def create_classifier(config):\n\n feature_columns = list(featurizer.create_feature_columns().values())\n\n deep_columns, wide_columns = featurizer.get_deep_and_wide_columns(\n feature_columns\n )\n\n linear_optimizer = tf.train.FtrlOptimizer(learning_rate=parameters.HYPER_PARAMS.learning_rate)\n dnn_optimizer = tf.train.AdagradOptimizer(learning_rate=parameters.HYPER_PARAMS.learning_rate)\n\n classifier = tf.estimator.DNNLinearCombinedClassifier(\n\n n_classes=len(metadata.TARGET_LABELS),\n label_vocabulary=metadata.TARGET_LABELS,\n\n linear_optimizer=linear_optimizer,\n linear_feature_columns=wide_columns,\n\n dnn_feature_columns=deep_columns,\n dnn_optimizer=dnn_optimizer,\n\n weight_column=metadata.WEIGHT_COLUMN_NAME,\n\n dnn_hidden_units=construct_hidden_units(),\n dnn_activation_fn=tf.nn.relu,\n dnn_dropout=parameters.HYPER_PARAMS.dropout_prob,\n\n config=config,\n )\n\n print(\"creating a classification model: {}\".format(classifier))\n\n return classifier", "def multi_class5_classification_model_logits_sparse_labels() -> tf.keras.Model:\n\n # Build model\n model = tf.keras.Sequential(tf.keras.layers.Dense(5, activation=None))\n model.compile(optimizer=tf.keras.optimizers.Adam(),\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))\n\n return model", "def classifier(text):\n return random.choice([True, False])", "def build(input_shape, classes):\n model = Sequential()\n model.add(Conv2D(20, kernel_size=9, padding=\"same\", input_shape=input_shape,\n kernel_initializer='glorot_normal'))\n\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), dim_ordering=\"tf\"))\n model.add(Dropout(0.3))\n\n model.add(Conv2D(40, kernel_size=5, padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), dim_ordering=\"tf\"))\n model.add(Dropout(0.3))\n\n model.add(Conv2D(40, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"relu\"))\n\n model.add(Conv2D(50, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"relu\"))\n\n model.add(Conv2D(50, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"relu\"))\n\n model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2), dim_ordering=\"tf\"))\n model.add(Dropout(0.3))\n\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n model.add(Dropout(0.3))\n\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n\n model.add(Dense(classes))\n model.add(Activation(\"softmax\"))\n\n return model", "def class_predictor(num_anchors, num_classes):\n return nn.Conv2D(num_anchors * (num_classes + 1), 3, padding=1)", "def softmax_tanh_class_vector(nr_class, *, exclusive_classes=True, **cfg):\n width = cfg[\"token_vector_width\"]\n return chain(\n get_class_tokens,\n flatten_add_lengths,\n with_getitem(0, chain(Affine(width, width), tanh)),\n Pooling(mean_pool),\n Softmax(nr_class, width),\n )", "def build_cnn_classification_model(self, input_shape):\n\n input_image = Input(shape=input_shape)\n\n conv_1 = Conv2D(32, (3, 3), activation='relu')(input_image)\n pool_1 = MaxPooling2D((2, 2))(conv_1)\n\n conv_2 = Conv2D(32, (3, 3), activation='relu')(pool_1)\n pool_2 = MaxPooling2D((2, 2))(conv_2)\n\n conv_3 = Conv2D(64, (3, 3), activation='relu')(pool_2)\n pool_3 = MaxPooling2D((2, 2))(conv_3)\n\n flatten = Flatten()(pool_3)\n dense = Dense(64, activation='relu')(flatten)\n dropout = Dropout(0.5)(dense)\n\n prediction = Dense(1, activation='sigmoid')(dropout)\n\n cnn_classification_model = Model(inputs=input_image, outputs=prediction)\n\n cnn_classification_model.compile(loss=self._loss_function, optimizer='rmsprop', metrics=['accuracy'])\n\n return cnn_classification_model", "def cnn_model(x, y):\n # Convert indexes of words into embeddings.\n # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then\n # maps word indexes of the sequence into [batch_size, sequence_length,\n # EMBEDDING_SIZE].\n y = tf.one_hot(y, NUMBER_OF_CATEGORIES, 1, 0)\n word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,\n embedding_size=EMBEDDING_SIZE, name='words')\n word_vectors = tf.expand_dims(word_vectors, 3)\n with tf.variable_scope('CNN_Layer1'):\n # Apply Convolution filtering on input sequence.\n conv1 = tf.contrib.layers.convolution2d(word_vectors, N_FILTERS,\n FILTER_SHAPE1, padding='VALID')\n # Add a RELU for non linearity.\n conv1 = tf.nn.relu(conv1)\n # Max pooling across output of Convolution+Relu.\n pool1 = tf.nn.max_pool(conv1, ksize=[1, POOLING_WINDOW, 1, 1],\n strides=[1, POOLING_STRIDE, 1, 1], padding='SAME')\n # Transpose matrix so that n_filters from convolution becomes width.\n pool1 = tf.transpose(pool1, [0, 1, 3, 2])\n with tf.variable_scope('CNN_Layer2'):\n # Second level of convolution filtering.\n conv2 = tf.contrib.layers.convolution2d(pool1, N_FILTERS,\n FILTER_SHAPE2, padding='VALID')\n # Max across each filter to get useful features for classification.\n pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])\n\n # Apply regular WX + B and classification.\n prediction, loss = learn.models.logistic_regression(pool2, y)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss, tf.contrib.framework.get_global_step(),\n optimizer='Adam', learning_rate=0.01)\n\n return {'class': tf.argmax(prediction, 1), 'prob': prediction}, loss, train_op", "def classify(texts: List[str], params: Any) -> List[str]:\n\n alpha = 1\n token_probs_pos = params[\"token_probs_pos\"]\n token_probs_neg = params[\"token_probs_neg\"]\n all_words = params[\"all_words\"]\n M = len(all_words)\n cnt_pos_docs = params[\"cnt_pos_docs\"]\n cnt_neg_docs = params[\"cnt_neg_docs\"]\n\n sum_len_neg = params[\"sum_len_neg\"]\n sum_len_pos = params[\"sum_len_pos\"]\n pos_dict = params[\"pos_dict\"]\n neg_dict = params[\"neg_dict\"]\n\n\n test_texts = preprocessing(texts)\n test_tokenized_texts = text_to_tokens(test_texts)\n \n res = []\n log_pos_probablity = 0\n log_neg_probablity = 0\n i = 0\n for text in test_tokenized_texts:\n if (i % 5000 == 0):\n print(\"Classified\", i, \"texts\")\n i += 1\n log_pos_probablity = log(cnt_pos_docs)\n log_neg_probablity = log(cnt_neg_docs)\n for token in text:\n if (token_probs_pos[token] == 0):\n token_probs_pos[token] = alpha / (alpha * M + sum_len_pos)\n else:\n log_pos_probablity += log(token_probs_pos[token])\n if (token_probs_neg[token] == 0):\n token_probs_neg[token] = alpha / (alpha * M + sum_len_neg)\n else:\n log_neg_probablity += log(token_probs_neg[token])\n if (log_neg_probablity > log_pos_probablity):\n res.append(\"neg\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # neg_dict[token] += text[token]\n # sum_len_neg += text[token]\n # token_probs_neg[token] = (alpha + neg_dict[token]) / (alpha * M + sum_len_neg)\n\n else:\n res.append(\"pos\")\n #for token in text:\n # all_words.add(token)\n # M = len(all_words)\n # pos_dict[token] += text[token]\n # sum_len_pos += text[token]\n # token_probs_pos[token] = (alpha + pos_dict[token]) / (alpha * M + sum_len_pos)\n\n\n \n print('Predicted labels counts:')\n print(count_labels(res))\n return res", "def fasttext_model(\n sentences, size=100, min_count=5, negative=5, window=5,\n cbow=True, iterations=5, seed=0, workers=1):\n cbow = 0 if cbow == 1 else 1\n model = FastText(\n sentences, size=size, min_count=min_count,\n negative=negative, window=window, sg=cbow, iter=iterations,\n seed=seed, workers=workers)\n\n return model" ]
[ "0.6299664", "0.6254263", "0.6134055", "0.604031", "0.6039286", "0.59022474", "0.58584005", "0.5856348", "0.5843683", "0.5827657", "0.5826641", "0.58193755", "0.5802258", "0.5767329", "0.5765888", "0.57630885", "0.5753324", "0.5748217", "0.5743231", "0.5742967", "0.5705073", "0.57020986", "0.56666875", "0.5656981", "0.5632872", "0.5617206", "0.5603505", "0.5593564", "0.5587133", "0.55870575" ]
0.8247789
0
Compose two or more models `f`, `g`, etc, such that their outputs are concatenated, i.e. `concatenate(f, g)(x)` computes `hstack(f(x), g(x))`
def concatenate_lists(*layers, **kwargs): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def concat_model():\n x = tf.keras.Input(shape=[10, 10, 3, ])\n x1 = tf.keras.layers.Conv2D(5, (2, 2))(x)\n x2 = tf.keras.layers.Conv2D(6, (2, 2))(x)\n x3 = tf.keras.layers.Conv2D(7, (2, 2))(x)\n z = tf.keras.layers.concatenate([x2, x1, x3], axis=-1)\n z1 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z2 = tf.keras.layers.Conv2D(10, (2, 2))(z)\n z = tf.add(z1, z2)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"concat_model\")(z)\n return output", "def __build_model_pyramid(name, model, features):\n return keras.layers.Concatenate(axis=1, name=name)([model(f) for f in features])", "def combined_model(video_frames, audio_frames):\n\n audio_features = audio_model([], audio_frames)\n visual_features = video_model(video_frames,[])\n\n return tf.concat(2, (audio_features, visual_features), name='concat')", "def concat(*xforms):\n\n result = xforms[0]\n\n for i in range(1, len(xforms)):\n result = np.dot(result, xforms[i])\n\n return result", "def concat(*xforms):\n\n result = xforms[0]\n\n for i in range(1, len(xforms)):\n result = np.dot(result, xforms[i])\n\n return result", "def forward(self, self_feats, aggregate_feats, neighs=None):\n combined = torch.cat([self_feats, aggregate_feats], dim=1)\n # [b_s, emb_size * 2]\n combined = F.relu(self.linear(combined)) # [b_s, emb_size]\n return combined", "def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)", "def Concat(*args, **kwargs):\n return _gdi_.GraphicsMatrix_Concat(*args, **kwargs)", "def _concat_arrays(arrays):\n # torch\n if isinstance(arrays[0], torch.Tensor):\n return torch.cat(arrays)\n\n # numpy\n if not isinstance(arrays[0], np.ndarray):\n arrays = np.asarray(arrays)\n\n return np.concatenate(arrays)", "def concat_obs_and_action(obs, action):\n return F.concat((obs, action), axis=-1)", "def concat_obs_and_action(obs, action):\n return F.concat((obs, action), axis=-1)", "def concatenate(tensors, axis=0):\n raise NotImplementedError", "def ConcatTransform(*args, **kwargs):\n return _gdi_.GraphicsContext_ConcatTransform(*args, **kwargs)", "def _concatenate_features(features):\n pass", "def concat(a, b):\n return torch.cat((a, b), 1)", "def concave_fun_eval(x):\r\n return np.stack([f1(x), f2(x)]), np.stack([f1_dx(x), f2_dx(x)])", "def concatenate_inputs(start=0):\n def wrap(function):\n @wraps(function)\n def wrapped_function(*args, **kwargs):\n \"\"\"Concatenate the input arguments.\"\"\"\n nargs = len(args) - start\n torch_objects = torch.Tensor\n if any(isinstance(arg, torch_objects) for arg in args[start:]):\n # reduce number of function calls in graph\n if nargs == 1:\n return function(*args, **kwargs)\n # concatenate extra arguments\n args = args[:start] + (torch.cat(args[start:], dim=1),)\n return function(*args, **kwargs)\n else:\n to_concatenate = list(map(np.atleast_2d, args[start:]))\n if nargs == 1:\n concatenated = tuple(to_concatenate)\n else:\n concatenated = (np.hstack(to_concatenate),)\n \n args = args[:start] + concatenated\n return function(*args, **kwargs)\n return wrapped_function\n\n return wrap", "def combineResult(self, *xpars):\n if len(xpars) == 0:\n xpars = self.__x\n xshape = self.__xshape\n else:\n assert len(xpars) == self.__nx # The input parameter number should be consistent.\n xshape = xpars[0].shape\n #-> Calculate the add model components\n addCmpDict = {}\n for modelName in self._addList:\n mf = self.__modelDict[modelName]\n addCmpDict[modelName] = mf(*xpars)\n #-> Manipulate the model components\n for modelName in self._mltList:\n mf = self.__modelDict[modelName]\n my = mf(*xpars) # multiplied y component\n #--> Multiply the current component to the target models\n for tmn in mf.multiList:\n addCmpDict[tmn] *= my\n #-> Add up all the add models\n result = np.zeros(xshape, dtype=self.dtype)\n #print addCmpDict\n for modelName in self._addList:\n result += addCmpDict[modelName]\n return result", "def concatenate(module, arrays, dimension):\n _import_modules()\n if module in [np, ma, jnp]:\n return module.concatenate(arrays, dimension)\n elif module == torch:\n return module.cat(arrays, dimension)\n elif module == tf:\n return tf.concat(arrays, axis=dimension)\n return UnknownModuleException(f\"Module {module.__name__} not supported.\")", "def brepalgo_ConcatenateWire(*args):\n return _BRepAlgo.brepalgo_ConcatenateWire(*args)", "def _concat(self, partial: Optional[O], outputs: O):\n raise NotImplementedError", "def concat(*args: Union[ObservableBase, Iterable[ObservableBase]]) -> ObservableBase:\n from ..operators.observable.concat import concat\n return concat(*args)", "def concatHMMs(hmmmodels, namelist):\n concat = hmmmodels[namelist[0]]\n for idx in range(1,len(namelist)):\n # print(hmmmodels[namelist[idx]])\n concat = concatTwoHMMs(concat, hmmmodels[namelist[idx]])\n return concat", "def concatHMMs(hmmmodels, namelist):\n concat = hmmmodels[namelist[0]]\n for idx in range(1,len(namelist)):\n concat = concatTwoHMMs(concat, hmmmodels[namelist[idx]])\n return concat", "def concatHMMs(hmmmodels, namelist):\n concat = hmmmodels[namelist[0]]\n for idx in range(1,len(namelist)):\n concat = concatTwoHMMs(concat, hmmmodels[namelist[idx]])\n return concat", "def concatHMMs(hmmmodels, namelist):\n concat = hmmmodels[namelist[0]]\n for idx in range(1,len(namelist)):\n concat = concatTwoHMMs(concat, hmmmodels[namelist[idx]])\n return concat", "def concat(xs, axis=1):\n return Concat(axis=axis)(*xs)", "def split_and_concat_model():\n x = tf.keras.Input(shape=[224, 224, 3, ])\n # TODO: implement split for the following commented out method of splitting\n # y1 = x[:, :100, :, :]\n # y2 = x[:, 101:, :, :]\n y1, y2 = tf.split(x, [100, 124], 1)\n y1 = tf.nn.relu(y1)\n y2 = tf.keras.layers.BatchNormalization()(y2)\n z = tf.keras.layers.concatenate([y1, y2], axis=1)\n z = tf.keras.layers.Flatten()(z)\n output = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name=\"split_and_concat_model\")(z)\n return output", "def conv_cond_concat(x, y):\n return T.concatenate([x, y*T.ones((x.shape[0], y.shape[1], x.shape[2], x.shape[3], x.shape[4]))], axis=1)", "def concatenate_data():" ]
[ "0.70515543", "0.5997107", "0.5982442", "0.5935776", "0.5935776", "0.58978873", "0.5835013", "0.5824077", "0.5802191", "0.5802128", "0.5802128", "0.5794551", "0.5778237", "0.57725596", "0.57704157", "0.5757591", "0.5691272", "0.5679168", "0.5676331", "0.5657232", "0.56446177", "0.5640622", "0.5601784", "0.56016314", "0.56016314", "0.56016314", "0.55868757", "0.55598795", "0.5557888", "0.5557053" ]
0.6018841
1
Convert a model into a BERTstyle masked language model
def masked_language_model(vocab, model, mask_prob=...): ...
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_model():\n global model_tok, model_mlm, model, model_cls\n if model is None:\n model_name_or_path = os.getenv('TRANSFORMER_MODEL', default='distilbert-base-multilingual-cased')\n # 'bert-base-multilingual-cased'\n model_tok = AutoTokenizer.from_pretrained(model_name_or_path)\n model_mlm = AutoModelForMaskedLM.from_pretrained(model_name_or_path)\n model_mlm.eval()\n model = model_mlm.base_model\n\n if isinstance(model_mlm, BertPreTrainedModel):\n model_cls = model_mlm.cls\n elif isinstance(model_mlm, DistilBertPreTrainedModel):\n model_cls = nn.Sequential(\n model_mlm.vocab_transform,\n nn.GELU(),\n model_mlm.vocab_layer_norm,\n model_mlm.vocab_projector\n )\n else:\n raise ValueError(f'{model_name_or_path} is not supported yet. try one of '\n f'{\", \".join(list(AvailableModels.__members__.keys()))}')\n model.to(device)\n model_mlm.to(device)\n # model_tok.to(device)\n model_cls.to(device)", "def model_fn(model_dir):\n\n model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking', \n num_labels=1)\n model = torch.nn.DataParallel(model)\n with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:\n model.load_state_dict(torch.load(f))\n \n return {\"net\": model, \"tokenizer\": tokenizer}", "def _make_bert_compatifier(do_masking):\n\n def bert_compatibility(data):\n # data['input_word_ids'] = data.pop('maybe_masked_input_ids')\n # data['input_mask'] = data.pop('token_mask')\n\n if do_masking:\n x = {\n 'input_word_ids': data['maybe_masked_input_ids'],\n 'input_mask': data['op_token_mask'],\n 'input_type_ids': tf.zeros_like(data['op_token_mask']), # segment ids\n 'masked_lm_positions': data['masked_lm_positions'],\n 'masked_lm_ids': data['masked_lm_ids'],\n 'masked_lm_weights': data['masked_lm_weights'],\n # next_sentence_label = 1 if instance.is_random_next else 0\n 'next_sentence_labels': tf.constant([0], tf.int32)\n }\n\n # y = data['masked_lm_weights']\n\n else:\n x = {\n 'input_word_ids': data['maybe_masked_input_ids'],\n 'input_mask': data['op_token_mask'],\n 'input_type_ids': tf.zeros_like(data['op_token_mask']), # segment ids\n }\n\n y = {'outcome': data['outcome'], 'treatment': data['treatment'],\n 'in_dev': data['in_dev'], 'in_test': data['in_test'], 'in_train': data['in_train'],\n 'y0': data['y0'], 'y1': data['y1'],\n 'index': data['index'], 'subreddit':data['subreddit']}\n\n return x, y\n\n return bert_compatibility", "def decode_meraki_model(model):\n model_label = \"\"\n\n if \"MX\" in model:\n model_label = \"appliance\"\n if \"MS\" in model:\n model_label = \"switch\"\n if \"MR\" in model:\n model_label = \"wireless\"\n if \"MV\" in model:\n model_label = \"camera\"\n if \"MC\" in model:\n model_label = \"phone\"\n return model_label", "def create_bert_features(raw_text, tokenizer, model):\n # Load pre-trained model tokenizer (vocabulary)\n text_preprocessed = bert_preprocess(raw_text)\n \n # tokenize\n #tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n tokenized_text = tokenizer.tokenize(text_preprocessed)[:512]\n \n # need to fill in [SEP] here?\n \n # Convert token to vocabulary indices\n indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)\n\n # segments\n segments_ids = bert_segment(tokenized_text)\n\n # Convert inputs to PyTorch tensors\n tokens_tensor = torch.tensor([indexed_tokens])\n segments_tensors = torch.tensor([segments_ids])\n \n # Load pre-trained model (weights)\n #model = BertModel.from_pretrained('bert-base-uncased')\n \n # Set the model in evaluation mode to deactivate the DropOut modules\n # This is IMPORTANT to have reproducible results during evaluation!\n model.eval()\n\n # Predict hidden states features for each layer\n with torch.no_grad():\n # See the models docstrings for the detail of the inputs # outputs\n outputs = model(tokens_tensor, token_type_ids=segments_tensors)\n\n # Transformers models always output tuples.\n # See the models docstrings for the detail of all the outputs\n # In our case, the first element is the hidden state of the last layer of the Bert model\n encoded_layers = outputs[0]\n\n # We have encoded our input sequence in a FloatTensor of shape (batch size, sequence length, model hidden dimension)\n assert tuple(encoded_layers.shape) == (1, len(indexed_tokens), model.config.hidden_size)\n \n # take average over words\n sentence_embedding = torch.mean(encoded_layers,[0,1]).data.numpy()\n \n return sentence_embedding", "def get_bert_clf():\n model = tf.keras.models.model_from_json(get_object('distilbert_model.json', 'r'))\n model.load_weights(model_dir/'distilbert_weights.hdf5')\n return model", "def reconstruct_input_ext(self, model_in):", "def convert(encoder, bert_model):\n num_layers = encoder._config[\"num_layers\"]\n num_attention_heads = encoder._config[\"num_attention_heads\"]\n hidden_size = encoder._config[\"hidden_size\"]\n head_size = hidden_size // num_attention_heads\n assert head_size * num_attention_heads == hidden_size\n encoder._embedding_layer.set_weights(\n [bert_model[\"embeddings.word_embeddings.weight\"]])\n encoder._embedding_norm_layer.set_weights([\n bert_model[\"embeddings.LayerNorm.weight\"],\n bert_model[\"embeddings.LayerNorm.bias\"]\n ])\n encoder._type_embedding_layer.set_weights(\n [bert_model[\"embeddings.token_type_embeddings.weight\"]])\n encoder._position_embedding_layer.set_weights(\n [bert_model[\"embeddings.position_embeddings.weight\"]])\n for layer_num in range(num_layers):\n encoder._transformer_layers[\n layer_num]._attention_layer._key_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.attention.self.key.weight\"].T\n .reshape((hidden_size, num_attention_heads, head_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.self.key.bias\"]\n .reshape((num_attention_heads, head_size))\n ])\n encoder._transformer_layers[\n layer_num]._attention_layer._query_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.attention.self.query.weight\"]\n .T.reshape((hidden_size, num_attention_heads, head_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.self.query.bias\"]\n .reshape((num_attention_heads, head_size))\n ])\n encoder._transformer_layers[\n layer_num]._attention_layer._value_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.attention.self.value.weight\"]\n .T.reshape((hidden_size, num_attention_heads, head_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.self.value.bias\"]\n .reshape((num_attention_heads, head_size))\n ])\n encoder._transformer_layers[\n layer_num]._attention_layer._output_dense.set_weights([\n bert_model[\n f\"encoder.layer.{layer_num}.attention.output.dense.weight\"].T\n .reshape((num_attention_heads, head_size, hidden_size)),\n bert_model[f\"encoder.layer.{layer_num}.attention.output.dense.bias\"]\n ])\n encoder._transformer_layers[layer_num]._attention_layer_norm.set_weights([\n bert_model[\n f\"encoder.layer.{layer_num}.attention.output.LayerNorm.weight\"],\n bert_model[f\"encoder.layer.{layer_num}.attention.output.LayerNorm.bias\"]\n ])\n encoder._transformer_layers[layer_num]._intermediate_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.intermediate.dense.weight\"].T,\n bert_model[f\"encoder.layer.{layer_num}.intermediate.dense.bias\"]\n ])\n encoder._transformer_layers[layer_num]._output_dense.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.output.dense.weight\"].T,\n bert_model[f\"encoder.layer.{layer_num}.output.dense.bias\"]\n ])\n encoder._transformer_layers[layer_num]._output_layer_norm.set_weights([\n bert_model[f\"encoder.layer.{layer_num}.output.LayerNorm.weight\"],\n bert_model[f\"encoder.layer.{layer_num}.output.LayerNorm.bias\"]\n ])", "def create_model(is_predicting, input_ids, input_mask, segment_ids, vocab, vocab_size, bert_config, use_one_hot_embeddings):\n\n # bert_module = hub.Module(\n # BERT_MODEL_HUB,\n # trainable=True)\n \n # bert_inputs = dict(\n # input_ids=input_ids,\n # input_mask=input_mask,\n # segment_ids=segment_ids)\n\n # bert_outputs = bert_module(\n # inputs=bert_inputs,\n # signature=\"tokens\",\n # as_dict=True)\n\n # Use \"pooled_output\" for classification tasks on an entire sentence.\n # Use \"sequence_output\" for token-level output.\n # output_layer = bert_outputs[\"sequence_output\"]\n \n\n model = modeling.BertModel(\n config=bert_config,\n is_training=not is_predicting,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings\n )\n\n output_layer = model.get_sequence_output()\n\n\n\n batch_size = output_layer.shape[0]\n max_seq_length = output_layer.shape[1]\n hidden_size = output_layer.shape[2]\n \n\n # Create our own layer to tune for politeness data.\n output_weights = tf.get_variable(\n \"output_weights\", [vocab_size, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [vocab_size], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n\n # Dropout helps prevent overfitting\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n # add a max_seq length stack of bias so that we add the bias to each word distributoin\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n # Convert labels into one-hot encoding\n one_hot_answer = tf.one_hot(input_ids, depth=vocab_size)\n\n\n predictions = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n # If we're predicting, we want predicted labels and the probabiltiies.\n if is_predicting:\n return (predictions, log_probs)\n\n # If we're train/eval, compute loss between predicted and actual label\n per_example_loss = -tf.reduce_sum(one_hot_answer * log_probs, axis=-1)\n per_example_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=input_ids, logits=logits)\n \n loss = tf.reduce_mean(per_example_loss)\n return (loss, predictions, log_probs)", "def convert_to_model(self, *args):", "def build_model(model_id1='bert-base-multilingual-cased',\n model_id2='bert-base-multilingual-uncased',\n max_len=192, dropout=0.2,\n **_):\n print(model_id1, model_id2)\n\n transformer1 = TFAutoModel.from_pretrained(model_id1)\n transformer2 = TFAutoModel.from_pretrained(model_id2)\n\n input_word_ids1 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids1\")\n out1 = transformer1(input_word_ids1)\n\n input_word_ids2 = Input(shape=(max_len,), dtype=tf.int32, name=\"input_word_ids2\")\n out2 = transformer2(input_word_ids2)\n\n sequence_output1 = out1[0]\n sequence_output2 = out2[0]\n cls_token1 = sequence_output1[:, 0, :]\n cls_token2 = sequence_output2[:, 0, :]\n\n x = Dropout(dropout)(cls_token1) + Dropout(dropout)(cls_token2)\n out = Dense(1, activation='sigmoid')(x)\n\n model = Model(inputs=[input_word_ids1, input_word_ids2], outputs=out)\n\n return model", "def decode(self, model: bytes):\n _, path = tempfile.mkstemp()\n with open(path, \"wb\") as fd:\n fd.write(model)\n onnx_model = onnx.load(path)\n pytorch_model = ConvertModel(onnx_model)\n os.remove(path)\n return pytorch_model", "def load(self, model_name_or_path):\n return BertMLM(model_name_or_path, self.top_k)", "def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n if output_type == \"sequence\":\n output_layer = model.get_sequence_output()\n elif output_type == \"pool\":\n output_layer = model.get_pooled_output()\n else:\n raise NotImplementedError()\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=dropout_val)\n\n if head_type == \"dense\" or head_type == \"raw\":\n dense = tf.layers.dense(tf.layers.flatten(output_layer), 1, activation=lla,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02))\n dense = tf.squeeze(dense)\n\n elif head_type == \"2dense\":\n dense = tf.layers.dense(tf.layers.flatten(output_layer), 256, activation=tf.nn.relu,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n dense = tf.nn.dropout(dense, keep_prob=dropout_val)\n dense = tf.layers.dense(dense, 1, activation=lla,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02))\n dense = tf.squeeze(dense)\n\n elif head_type == \"conv\":\n if output_type == \"sequence\":\n output_layer = tf.expand_dims(output_layer, -1)\n conv = tf.layers.conv2d(output_layer, 128, (1, 1), activation=tf.nn.relu)\n global_avg_pool = tf.reduce_mean(conv, axis=[1, 2])\n elif output_type == \"pool\":\n output_layer = tf.expand_dims(output_layer, -1)\n conv = tf.layers.conv1d(output_layer, 128, (1), activation=tf.nn.relu)\n global_avg_pool = tf.reduce_mean(conv, axis=[1])\n else:\n raise NotImplementedError()\n\n dense = tf.layers.dense(global_avg_pool, 1, activation=lla,\n kernel_initializer=tf.truncated_normal_initializer(stddev=0.02))\n dense = tf.squeeze(dense)\n\n ground_truth = tf.log1p(tf.clip_by_value(tf.cast(labels, tf.float32), 1e-8, 1e+30))\n predictions = tf.log1p(tf.clip_by_value(dense, 1e-8, 1e+30))\n msle = tf.losses.mean_squared_error(ground_truth, predictions)\n se = tf.square(ground_truth - predictions)\n\n if rmsle_loss == \"rmsle\":\n msle = tf.sqrt(msle)\n se = tf.sqrt(se)\n\n if head_type == \"raw\":\n print(output_layer.shape)\n print(type(output_layer))\n return (msle, se, dense, output_layer)\n\n return (msle, se, dense, predictions)", "def model_fn(features, labels, mode, params):\n # obtain the data\n _info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features['input_ids'] # [batch_size, seq_length]\n input_mask = features['input_mask'] # [batch_size, seq_length]\n\n # if mode != tf.estimator.ModeKeys.PREDICT:\n # # segment_idx = features['segment_dis']\n # masked_lm_positions = features['masked_lm_positions'] # [batch_size, seq_length], specify the answer\n # masked_lm_ids = features['masked_lm_ids'] # [batch_size, answer_seq_length], specify the answer labels\n # masked_lm_weights = features['masked_lm_weights'] # [batch_size, seq_length], [1, 1, 0], 0 refers to the mask\n # # next_sentence_labels = features['next_sentence_labels']\n # else:\n masked_lm_positions = features['masked_lm_positions']\n masked_lm_ids = features['masked_lm_ids']\n masked_lm_weights = features['masked_lm_weights']\n\n if bert_config.train_type == 'seq2seq':\n _info('Training seq2seq task.')\n elif bert_config.train_type == 'lm':\n _info('Training language model task.')\n \n # build model\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask)\n \n # compute loss\n loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config,\n model.get_sequence_output(),\n model.embedding_table,\n model.projection_table,\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n mode)\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1])\n output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions)\n else:\n if mode == tf.estimator.ModeKeys.TRAIN: \n # restore from the checkpoint,\n # tf.estimator automatically restore from the model typically,\n # maybe here is for restore some pre-trained parameters\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n _info('*** Trainable Variables ***')\n for var in tvars:\n init_string = ''\n if var.name in initialized_variable_names:\n init_string = ', *INIT_FROM_CKPT*'\n _info('name = {}, shape={}{}'.format(var.name, var.shape, init_string))\n \n train_op = optimization.create_optimizer(\n loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit)\n\n # learning_rate = tf.train.polynomial_decay(bert_config.learning_rate,\n # tf.train.get_or_create_global_step(),\n # num_train_steps,\n # end_learning_rate=0.0,\n # power=1.0,\n # cycle=False)\n # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n # gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)\n # clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n # train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32)\n \n def metric_fn(loss, label_ids, logits, is_real_example):\n \"\"\"\n Args:\n loss: tf.float32.\n label_ids: [b, s].\n logits: [b, s, v].\n \"\"\"\n # [b * s, v]\n logits = tf.reshape(logits, [-1, logits.shape[-1]])\n # [b * s, 1]\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n # [b * s]\n label_ids = tf.reshape(label_ids, [-1])\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=loss)\n return {'eval_accuracy': accuracy, 'eval_loss': loss}\n \n eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example)\n output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)\n\n return output_spec", "def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n output_layer = model.get_pooled_output()\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, log_probs)", "def load_model(self, output_model_file):\n\t\tmodel_state_dict = torch.load(output_model_file)\n\t\tmodel = BertForSequenceClassification.from_pretrained(self.bert_model, state_dict= model_state_dict, num_labels = self.num_labels)\n\t\tmodel.to(self.device)\n\t\treturn model", "def to_BayesianModel(model, verbose=3):\n if isinstance(model, dict):\n adjmat = model.get('adjmat', None)\n else:\n adjmat = model\n if adjmat is None: raise Exception('[bnlearn] >Error: input for \"to_BayesianModel\" should be adjmat or a dict containing a key \"adjmat\".')\n\n if verbose>=3: print('[bnlearn] >Conversion of adjmat to BayesianModel.')\n\n # Convert to vector\n vec = adjmat2vec(adjmat)[['source', 'target']].values.tolist()\n # Make BayesianModel\n bayesianmodel = BayesianModel(vec)\n # Return\n return bayesianmodel", "def build_model(cls, args, task):\n # print(\"In build_model !!!\")\n default_architecture(args)\n assert args.load_hf_bert_from != ''\n encoder = HuggingFaceBertEncoder(args, task.dictionary)\n\n return cls(args, encoder, task)", "def to_bayesianmodel(model, verbose=3):\n if isinstance(model, dict):\n adjmat = model.get('adjmat', None)\n else:\n adjmat = model\n if adjmat is None: raise Exception('[bnlearn] >Error: input for \"to_bayesianmodel\" should be adjmat or a dict containing a key \"adjmat\".')\n\n if verbose>=3: print('[bnlearn] >Conversion of adjmat to BayesianModel.')\n\n # Convert to vector\n vec = adjmat2vec(adjmat)[['source', 'target']].values.tolist()\n # Make BayesianModel\n bayesianmodel = BayesianModel(vec)\n # Return\n return bayesianmodel", "def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n num_labels, use_one_hot_embeddings):\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n output_layer = model.get_pooled_output()\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n return (None, None, log_probs)", "def convert_caffe_model(model_name, meta_info, dst_dir='./model'):\n\n (prototxt, caffemodel, mean) = download_caffe_model(model_name, meta_info, dst_dir)\n model_name = os.path.join(dst_dir, model_name)\n convert_model(prototxt, caffemodel, model_name)\n if isinstance(mean, str):\n mx_mean = model_name + '-mean.nd'\n convert_mean(mean, mx_mean)\n mean = mx_mean\n return (model_name, mean)", "def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings, is_prediction=False):\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n\n logits = tf.nn.bias_add(logits, output_bias)\n probabilities = tf.nn.softmax(logits, axis=-1)\n if is_prediction:\n return tf.constant(0.0, dtype=tf.float32), tf.constant(0.0, dtype=tf.float32), logits, probabilities\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n label_smoothing = tf.constant(FLAGS.label_smoothing, dtype=tf.float32)\n\n one_hot_labels = one_hot_labels*(1 - label_smoothing) + label_smoothing / num_labels\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n\n loss = tf.reduce_mean(per_example_loss)\n\n return loss, per_example_loss, logits, probabilities", "def create_model(\n bert_config,\n is_training,\n input_ids,\n input_mask,\n segment_ids,\n labels,\n num_labels,\n use_one_hot_embeddings,\n):\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings,\n )\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n output_layer = model.get_pooled_output()\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\",\n [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02),\n )\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer()\n )\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n # probabilities = tf.nn.softmax(logits, axis=-1) ### multiclass case\n probabilities = tf.nn.sigmoid(logits) # multi-label case\n\n labels = tf.cast(labels, tf.float32)\n tf.logging.info(\n \"num_labels:{};logits:{};labels:{}\".format(num_labels, logits, labels)\n )\n per_example_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=labels, logits=logits\n )\n loss = tf.reduce_mean(per_example_loss)\n\n # probabilities = tf.nn.softmax(logits, axis=-1)\n # log_probs = tf.nn.log_softmax(logits, axis=-1)\n #\n # one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n #\n # per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n # loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, logits, probabilities)", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n # INFO:tensorflow: name = input_ids, shape = (?, 180)\n # INFO:tensorflow: name = input_mask, shape = (?, 180)\n # INFO:tensorflow: name = is_real_example, shape = (?,)\n # INFO:tensorflow: name = label_ids, shape = (?,)\n # INFO:tensorflow: name = masked_lm_ids, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_positions, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_weights, shape = (?, 180)\n # INFO:tensorflow: name = segment_ids, shape = (?, 180)\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n #next_sentence_labels = features[\"next_sentence_labels\"]\n \n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n \n gcn_embedding = build_gcn_output(adj_mat, w2n, n2w, model.get_embedding_table(), bert_config, is_training)\n \n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), gcn_embedding,\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n\n masked_lm_loss = tf.identity(masked_lm_loss, name=\"masked_lm_loss\")\n\n\n total_loss = masked_lm_loss\n\n total_loss = tf.identity(total_loss, name='total_loss')\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint and (not FLAGS.use_horovod or hvd.rank() == 0):\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if not FLAGS.use_horovod or hvd.rank() == 0:\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, FLAGS.use_horovod)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n return output_spec\n elif mode == tf.estimator.ModeKeys.PREDICT:\n\n #def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n # masked_lm_weights):#, next_sentence_example_loss,\n #next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n #masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n # [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n # values=next_sentence_example_loss)\n\n predictions = {\n \"input_ids\": tf.reshape(input_ids, [-1]),\n \"predictions\": masked_lm_log_probs\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions)\n #eval_metric_ops=eval_metrics)\n return output_spec\n else:\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n }\n\n eval_metrics = metric_fn(\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights)\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics)\n\n return output_spec", "def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_pooled_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=2)\n batch_size = final_hidden_shape[0]\n hidden_size = final_hidden_shape[1]\n\n # 输出层:三分类\n output_weights = tf.get_variable(\n \"cls/oqmrc/output_weights\", [3, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls/oqmrc/output_bias\", [3], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, 3])\n # logits = tf.transpose(logits, [2, 0, 1])\n return logits\n # unstacked_logits = tf.unstack(logits, axis=0)\n\n # (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n # return (start_logits, end_logits)", "def get_bert_model(input_word_ids,\n input_mask,\n input_type_ids,\n config=None,\n name=None,\n float_type=tf.float32,\n share_parameter_across_layers=False):\n bert_model_layer = BertModel(\n config=config, float_type=float_type,\n share_parameter_across_layers=share_parameter_across_layers, name=name)\n pooled_output, sequence_output = bert_model_layer(input_word_ids, input_mask,\n input_type_ids)\n bert_model = tf.keras.Model(\n inputs=[input_word_ids, input_mask, input_type_ids],\n outputs=[pooled_output, sequence_output])\n return bert_model", "def create_model(max_seq_len, adapter_size=64):\n\n # adapter_size = 64 # see - arXiv:1902.00751\n\n # create the bert layer\n with tf.io.gfile.GFile(bert_config_file, \"r\") as reader:\n bc = StockBertConfig.from_json_string(reader.read())\n bert_params = map_stock_config_to_params(bc)\n bert_params.adapter_size = adapter_size\n bert = BertModelLayer.from_params(bert_params, name=\"bert\")\n\n input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"input_ids\")\n # token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name=\"token_type_ids\")\n # output = bert([input_ids, token_type_ids])\n output = bert(input_ids)\n\n print(\"bert shape\", output.shape)\n cls_out = keras.layers.Lambda(lambda seq: seq[:, 0, :])(output)\n cls_out = keras.layers.Dropout(0.5)(cls_out)\n logits = keras.layers.Dense(units=1024, activation=\"tanh\")(cls_out)\n logits = keras.layers.Dropout(0.5)(logits)\n logits = keras.layers.Dense(units=2, activation=\"softmax\")(logits)\n\n # model = keras.Model(inputs=[input_ids, token_type_ids], outputs=logits)\n # model.build(input_shape=[(None, max_seq_len), (None, max_seq_len)])\n model = keras.Model(inputs=input_ids, outputs=logits)\n model.build(input_shape=(None, max_seq_len))\n\n # load the pre-trained model weights\n load_stock_weights(bert, bert_ckpt_file)\n\n # freeze weights if adapter-BERT is used\n if adapter_size is not None:\n freeze_bert_layers(bert)\n\n model.compile(optimizer=keras.optimizers.Adam(),\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[keras.metrics.SparseCategoricalAccuracy(name=\"acc\")])\n\n model.summary()\n\n return model", "def create_model(albert_config, is_training, input_ids, input_mask, segment_ids,\n labels, num_labels, use_one_hot_embeddings):\n model = modeling.AlbertModel(\n config=albert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n # In the demo, we are doing a simple classification task on the entire\n # segment.\n #\n # If you want to use the token-level output, use model.get_sequence_output()\n # instead.\n #if FLAGS.use_pooled_output:\n tf.logging.info(\"using pooled output\")\n output_layer = model.get_pooled_output()\n #else:\n # tf.logging.info(\"using meaned output\")\n # output_layer = tf.reduce_mean(model.get_sequence_output(), axis=1)\n\n hidden_size = output_layer.shape[-1].value\n\n output_weights = tf.get_variable(\n \"output_weights\", [num_labels, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n\n with tf.variable_scope(\"loss\"):\n if is_training:\n # I.e., 0.1 dropout\n output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n\n logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n probabilities = tf.nn.softmax(logits, axis=-1)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n\n one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n\n per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n loss = tf.reduce_mean(per_example_loss)\n\n return (loss, per_example_loss, probabilities, predictions)", "def convert_model(self, model: nn.Module) -> nn.Module:\n if self.sync_bn is not None:\n try:\n model = convert_sync_batchnorm(model, self.sync_bn)\n except ValueError as e:\n self.logger.error('cfg.sync_bn should be \"torch\" or '\n f'\"mmcv\", but got {self.sync_bn}')\n raise e\n\n return model" ]
[ "0.6321037", "0.61176497", "0.60473555", "0.59833467", "0.59296566", "0.59263337", "0.58409685", "0.58396", "0.5774077", "0.57682884", "0.57416177", "0.5726364", "0.57068795", "0.5675471", "0.56682515", "0.5662315", "0.5658786", "0.5653014", "0.5638642", "0.5619386", "0.5619371", "0.5615394", "0.56026363", "0.5592255", "0.5591289", "0.55779177", "0.5576862", "0.5553547", "0.55262107", "0.5525594" ]
0.7494484
0
Interpolates over image using a mask. The `func` parameter defines the function used for interpolation within the aperture. This can be either a statistical or interpolation
def maskinterp(image, func=spline_interp_2dfunc, mask=None, apstep=1, maxap=9, cval=np.nan, minfrac=0.2, minpoints=4, cdis=1, statistical=False, creep=False, coplanar=None, **kwargs): if mask is None: mask = ~np.isnan(image) if not mask.any(): log.error("Mask is all False") return elif mask.all(): return image.copy() mask = mask.astype(bool) if coplanar is None: coplanar = hasattr(func, '__name__') \ and func.__name__ == 'clough_tocher_2dfunc' # geometry used to define whether interpolation should # be attempted width = (maxap * 2) + 1 ay, ax = np.mgrid[:width, :width] ax -= maxap ay -= maxap dr = np.sqrt((ay ** 2) + (ax ** 2)) ygrid, xgrid = np.mgrid[:image.shape[0], :image.shape[1]] basemask = mask.copy() found = basemask.copy() corrected = image.copy() imshape = image.shape radius = apstep while True: wdata = corrected.copy() if creep else image.copy() apmask = np.array(dr <= np.ceil(radius)) nap = apmask.sum() find = ~found & ~mask xfind, yfind = xgrid[find], ygrid[find] nfind = len(xfind) xs = np.empty((nap, nfind), dtype=int) ys = np.empty((nap, nfind), dtype=int) dx, dy = np.empty_like(xs), np.empty_like(ys) for i, (offy, offx) in enumerate(zip(ay[apmask], ax[apmask])): xs[i], ys[i] = xgrid[find] + offx, ygrid[find] + offy dx[i], dy[i] = offx, offy # must exist inside features of image valid = (ys >= 0) & (ys < imshape[0]) & (xs >= 1) & (xs < imshape[1]) # populate data and mask ds = np.full((nap, nfind), np.nan, dtype=image.dtype) ms = np.full((nap, nfind), False) ds[valid] = wdata.copy()[(ys[valid], xs[valid])] ms[valid] = basemask.copy()[(ys[valid], xs[valid])] # data must not be NaN valid[np.isnan(ds)] = False # do not use unmask data valid[~ms] = False # must be more than minpoints npts = np.sum(valid, axis=0) valid[:, npts < minpoints] = False # must be more than minfrac valid[:, (npts / nap) < minfrac] = False # coplanar? if coplanar: planevalid = np.any(valid, axis=0) tx, ty = dx.copy().astype(float), dy.copy().astype(float) tx[:, ~planevalid] = 0 ty[:, ~planevalid] = 0 tx = np.nanmax(tx, axis=0) - np.nanmin(tx, axis=0) ty = np.nanmax(ty, axis=0) - np.nanmin(ty, axis=0) valid[:, tx == 0] = False valid[:, ty == 0] = False # calculate center-of-mass cx, cy = dx.copy().astype(float), dy.copy().astype(float) w = np.sum(valid, axis=0) zi = w == 0 cx[:, zi] = 0 cy[:, zi] = 0 cx, cy = np.nanmean(cx, axis=0), np.nanmean(cy, axis=0) cr = np.hypot(cx, cy) cr = cr > cdis valid[:, cr] = False pts = np.nonzero(np.any(valid, axis=0))[0] for pt in pts: idx = valid[:, pt] din = ds[idx, pt] xout = xfind[pt] yout = yfind[pt] if statistical: corrected[yout, xout] = func(din, **kwargs) else: cin = np.array([xs[idx, pt], ys[idx, pt]]).T cout = np.array([[xout], [yout]]).T corrected[yout, xout] = func(din, cin, cout, **kwargs) found[yout, xout] = True radius += apstep if radius > maxap or found.all(): break if not found.all(): corrected[~found] = cval return corrected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(cls, image, mask,\n interp_mask=DEFAULT_INTERP_MASK,\n BADPIX_INTERP=maskbits.BADPIX_INTERP,\n min_cols=DEFAULT_MINCOLS,\n max_cols=DEFAULT_MAXCOLS,\n invalid_mask=DEFAULT_INVALID_MASK,\n add_noise=DEFAULT_ADD_NOISE,\n clobber=DEFAULT_CLOBBER,\n block_size=DEFAULT_BLOCK_SIZE,\n logger=logger):\n\n # Pass the locals as kwargs\n kwargs = locals()\n image, mask = zipp.zipper_interp_rows(**kwargs)\n return image, mask", "def interpolate(x, y, mask, kind='linear'):\r\n f = interp1d(x[~mask], y[~mask], kind, axis=-1)\r\n return f(x)", "def smooth_with_function_and_mask(image, function, mask):\n bleed_over = function(mask.astype(float))\n masked_image = np.zeros(image.shape, image.dtype)\n masked_image[mask] = image[mask]\n smoothed_image = function(masked_image)\n output_image = smoothed_image / (bleed_over + np.finfo(float).eps)\n return output_image", "def applyMaskOnValues(self, func):\r\n self._maskFunctions.append(func)\r\n pass", "def overlay(image, mask):\n if len(image.shape) == 3:\n image = image[:, :, 0]\n if len(mask.shape) == 3:\n mask = mask[:, :, 0]\n if np.amax(image) > 100:\n image = image / 255\n\n masked = np.ma.masked_where(mask == 0, mask)\n\n plt.figure()\n plt.subplot(1, 2, 1)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.subplot(1, 2, 2)\n plt.imshow(image, 'gray', interpolation='nearest')\n plt.imshow(masked, 'jet', interpolation='nearest', alpha=0.5)\n plt.show()", "def overlay_mask(img, mask, transparency=0.5):\n im_over = np.ndarray(img.shape)\n im_over[:, :, 0] = (1 - mask[:, :, 0]) * img[:, :, 0] + mask[:, :, 0] * (\n 255 * transparency + (1 - transparency) * img[:, :, 0])\n im_over[:, :, 1] = (1 - mask[:, :, 1]) * img[:, :, 1] + mask[:, :, 1] * (\n 255 * transparency + (1 - transparency) * img[:, :, 1])\n im_over[:, :, 2] = (1 - mask[:, :, 2]) * img[:, :, 2] + mask[:, :, 2] * (\n 255 * transparency + (1 - transparency) * img[:, :, 2])\n return im_over", "def bilinear_interpolation(self, image, fx, fy):\r\n\r\n # Write your code for bilinear interpolation here\r\n width, height = image.shape[:2]\r\n nw = int(width * fx)\r\n nh = int(height * fy)\r\n new_img = np.zeros((nw, nh), np.uint8)\r\n inter = interpolation.interpolation()\r\n for i in range(nw):\r\n for j in range(nh):\r\n ii = int(np.floor((i * (image.shape[0]-1))/(nw - 1)))\r\n jj = int(np.floor((j * (image.shape[1]-1))/(nh - 1)))\r\n\r\n # All four corner pixels copied as it is\r\n if (ii == 0 and jj == 0) or \\\r\n (ii == 0 and jj == image.shape[1] - 1) or \\\r\n (ii == image.shape[0] - 1 and jj == 0) or \\\r\n (ii == image.shape[0] - 1 and jj == image.shape[1] - 1):\r\n new_img[i][j] = image[ii][jj]\r\n\r\n # Linear interpolation along left and right edges\r\n elif ii == 0 or ii == image.shape[0] - 1:\r\n new_img[i][j] = inter.linear_interpolation([jj - 1, image[ii][jj - 1]], [jj + 1, image[ii][jj + 1]], jj)\r\n\r\n # Linear interpolation along top and bottom edges\r\n elif jj == 0 or jj == image.shape[1] - 1:\r\n new_img[i][j] = inter.linear_interpolation([ii - 1, image[ii - 1][jj]], [ii + 1, image[ii + 1][jj]], ii)\r\n\r\n else:\r\n\r\n q11i, q11j = ii - 1, jj - 1\r\n q12i, q12j = ii - 1, jj + 1\r\n q21i, q21j = ii + 1, jj - 1\r\n q22i, q22j = ii + 1, jj + 1\r\n\r\n point_a = [q11i, q11j, image[q11i][q11j]]\r\n point_b = [q12i, q12j, image[q12i][q12j]]\r\n point_c = [q21i, q21j, image[q21i][q21j]]\r\n point_d = [q22i, q22j, image[q22i][q22j]]\r\n unknown = [ii, jj]\r\n\r\n new_img[i][j] = inter.bilinear_interpolation(point_a, point_b, point_c, point_d, unknown)\r\n\r\n return new_img", "def interpolate(self, image):\n return", "def wrapImg(func):\n def wrapFunc(*args, **kwargs):\n return np.array(func(*args, **kwargs))\n return wrapFunc", "def bilinear_interpolate(xv, yv, im, xout, yout, fill_value=0):\n ny, nx = np.shape(im)\n if (nx, ny) != (xv.size, yv.size):\n raise ValueError(\"Input arrays dimensions do not match\")\n\n xi = (nx-1.)/(xv[-1] - xv[0]) * (xout - xv[0])\n yi = (ny-1.)/(yv[-1] - yv[0]) * (yout - yv[0])\n\n return ndimage.map_coordinates(im.T, [xi, yi], cval=fill_value, order=1)", "def bilinear_interpolate(xv, yv, im, xout, yout, fill_value=0):\n ny, nx = np.shape(im)\n if (nx, ny) != (xv.size, yv.size):\n raise ValueError(\"Input arrays dimensions do not match\")\n\n xi = (nx-1.)/(xv[-1] - xv[0]) * (xout - xv[0])\n yi = (ny-1.)/(yv[-1] - yv[0]) * (yout - yv[0])\n\n return ndimage.map_coordinates(im.T, [xi, yi], cval=fill_value, order=1)", "def compute_interpolation_function(self, x, inarray,\n kind,\n bounds_error=True,\n axis=0):\n\n return interp1d(x, inarray, kind=kind,copy=True, axis=axis,\n bounds_error=bounds_error)", "def pil_image_mask_by_band_value(img, band, val, cval=0):\n # type: (PImage.Image, int, int) -> PImage.Image\n\n num_bands = len(img.getbands())\n\n if band >= num_bands:\n raise ValueError('Cannot get band with index {} from image with {} bands'.format(band, num_bands))\n\n # Create a look up table where only one value maps to itself and everything else to cval\n other_band_lut = [cval] * 256\n target_band_lut = [cval] * 256\n target_band_lut[val] = val\n lut = []\n\n for i in range(num_bands):\n if i == band:\n lut += target_band_lut\n else:\n lut += other_band_lut\n\n img = img.point(lut)\n return img", "def interpolate_spectrum(interp,wave_ini,flux_ini,wave_fnl,flux_fnl):\n wave_ini_p = wave_ini.ctypes.data_as(ct.POINTER(ct.c_double))\n flux_ini_p = flux_ini.ctypes.data_as(ct.POINTER(ct.c_double))\n wave_fnl_p = wave_fnl.ctypes.data_as(ct.POINTER(ct.c_double))\n flux_fnl_p = flux_fnl.ctypes.data_as(ct.POINTER(ct.c_double))\n\n mask = np.zeros_like(wave_fnl).astype('int32')\n mask_p = mask.ctypes.data_as(ct.POINTER(ct.c_int))\n\n interp(wave_ini.shape[0],wave_fnl.shape[0],\n wave_ini_p,flux_ini_p,\n wave_fnl_p,flux_fnl_p,mask_p)\n\n return mask", "def apply_mask(image, mask, cls2color=cityscapes_cls2color, alpha=0.5):\n masks = []\n for c in range(3):\n mask_copy = mask.copy()\n for k, v in cls2color.items():\n mask_copy[mask == k] = v[c]\n mask_copy = np.expand_dims(mask_copy, 2)\n masks.append(mask_copy)\n mask = np.concatenate(masks, axis=-1)\n if image is not None:\n ret = image*(1-alpha)+alpha*mask/255.0\n else:\n ret = mask/255.0\n\n return ret", "def bilinear_interpolation(self, image, fx, fy):\n\n # Write your code for bilinear interpolation here\n (width, height) = image.shape\n (scaledWidth, scaledHeight) = (int(fy * width), int(fy * height))\n resampledImage = np.zeros((scaledWidth, scaledHeight), dtype=int)\n\n for x in range(0, scaledWidth):\n for y in range(0, scaledHeight):\n nx = math.floor(x / fx)\n ny = math.floor(y / fy)\n if (nx + 1) > width - 1:\n nx = width - 2\n if (ny + 1) > height - 1:\n ny = height - 2\n\n n1 = (nx, ny)\n n2 = (nx, ny+1)\n n3 = (nx+1, ny)\n n4 = (nx+1, ny+1)\n\n resampledImage[x, y] = self.inter.bilinear_interpolation([n1, image[n1]], [n2, image[n2]], [n3, image[n3]],[n4, image[n4]], [x/fx, y/fy])\n\n return resampledImage", "def fill_region(image,mask,value=1):\n\tim = image.copy().ravel()\n\tif image.ndim > 2:\n\t\tim_h, im_w, im_ch = image.shape\n\telse:\n\t\tim_ch = 1\n\t\tim_h, im_w = self.image.shape\n\t# linear indices of masked pixels\n\tind = masked_indices(mask)\n\tfor i in ind:\n\t\tfor ch in range(im_ch):\n\t\t\tim.data[i*im_ch+ch] = value\n\treturn im.reshape(image.shape)", "def apply_mask(self, mask_band=None, mask_val=None):\n pass", "def bilinear_interpolation(self, image, fx, fy):\n\n # Write your code for bilinear interpolation here\n w = image.shape[1]\n h = image.shape[0]\n\n newW = int(w*float(fx))\n newH = int(h*float(fy))\n\n ratioW = w / newW\n ratioH = h / newH\n\n import numpy as np\n import math as ma\n from . import interpolation as bilinear\n bi = bilinear.interpolation()\n newImg = np.zeros((newH, newW), np.uint8)\n for i in range(newImg.shape[0]):\n for j in range(newImg.shape[1]):\n\n x = ratioH * i\n y = ratioW * j\n\n x1 = ma.floor(x)\n x2 = ma.ceil(x)\n y1 = ma.floor(y)\n y2 = ma.ceil(y)\n\n x1 = x1-1 if x1 == h else x1\n x2 = x2-1 if x2 == h else x2\n y1 = y1-1 if y1 == w else y1\n y2 = y2-1 if y2 == w else y2\n\n pt1 = (x1, y1, image[x1, y1])\n pt2 = (x1, y2, image[x1, y2])\n pt3 = (x2, y1, image[x2, y1])\n pt4 = (x2, y2, image[x2, y2])\n unknown = (x, y, 0)\n\n if pt1[0] == pt3[0] and pt1[2] == pt2[2]:\n newImg[i, j] = image[int(x), int(y)]\n else:\n newImg[i, j] = bi.bilinear_interpolation(pt1, pt2, pt3, pt4, unknown)\n\n image = newImg\n return image", "def imshow_overlay(im, mask, alpha=0.5, color='red', **kwargs):\n mask = mask > 0\n mask = ma.masked_where(~mask, mask) \n plt.imshow(im, **kwargs)\n plt.imshow(mask, alpha=alpha, cmap=ListedColormap([color]))", "def interact(func,\n image = None,\n *args,\n continuous_update: bool = True,\n context:dict = None,\n zoom_factor:float = 1.0,\n zoom_spline_order:int = 0,\n colormap:str = None,\n display_min:float = None,\n display_max:float = None,\n viewer: _SliceViewer = None,\n **kwargs):\n import inspect\n import ipywidgets\n from ._utilities import parameter_is_image_parameter\n from ._context import Context\n from ._utilities import _no_resize\n from ._slice_viewer import _SliceViewer\n\n # hidden feature: func can be a tuple of (function, alias_name)\n if isinstance(func, tuple):\n func_name = func[1]\n func = func[0]\n else:\n func_name = func.__name__\n\n exposable_parameters = []\n footprint_parameters = []\n image_parameters = []\n\n if context is not None and not isinstance(context, Context):\n context = Context(context)\n\n image_passed = image is not None\n if context is not None and image is None:\n image = next(iter(context._images.values()))\n\n sig = inspect.signature(func)\n for key in sig.parameters.keys():\n exposable = False\n default_value = 0\n if isinstance(sig.parameters[key].default, int) or isinstance(sig.parameters[key].default, float):\n default_value = sig.parameters[key].default\n min_value, max_value, step = guess_range(key, sig.parameters[key].annotation)\n\n #if min_value is not None:\n int_slider = ipywidgets.IntSlider\n float_slider = ipywidgets.FloatSlider\n #else:\n # int_slider = ipywidgets.IntText\n # float_slider = ipywidgets.FloatText\n if min_value is None:\n min_value = 0\n if max_value is None:\n max_value = 100\n\n if sig.parameters[key].annotation is int:\n default_value = int_slider(min=min_value, max=max_value, step=step, value=default_value, continuous_update=continuous_update)\n exposable = True\n elif sig.parameters[key].annotation is float or 'sigma' in key or 'radius' in key:\n default_value = float_slider(min=min_value, max=max_value, step=step, value=default_value, continuous_update=continuous_update)\n exposable = True\n elif key.startswith(\"is_\") or sig.parameters[key].annotation is bool:\n default_value = ipywidgets.Checkbox(value=default_value)\n exposable = True\n elif key == 'footprint' or key == 'selem' or key == 'structuring_element':\n footprint_parameters.append(key)\n default_value = ipywidgets.IntSlider(min=min_value, max=max_value, step=step, value=default_value, continuous_update=continuous_update)\n exposable = True\n elif parameter_is_image_parameter(sig.parameters[key]) and \"destination\" not in key and key != \"out\" and key != \"output\":\n if context is not None:\n image_parameters.append(key)\n default_value = ipywidgets.Dropdown(\n options=list(context._images.keys())\n #options=[(k, v) for k, v in context._images.items()],\n )\n exposable = True\n\n if exposable:\n if key in kwargs.keys():\n default_value = kwargs[key]\n exposable_parameters.append(inspect.Parameter(key, inspect.Parameter.KEYWORD_ONLY, default=default_value))\n\n viewer_was_none = viewer is None\n if viewer_was_none:\n viewer = _SliceViewer(image, zoom_factor=zoom_factor, zoom_spline_order=zoom_spline_order, colormap=colormap, display_min=display_min, display_max=display_max)\n viewer.slice_slider.continuous_update=continuous_update\n command_label = ipywidgets.Label(value=func_name + \"()\")\n command_label.style.font_family = \"Courier\"\n\n from skimage import morphology\n execution_blocked = True\n\n def worker_function(*otherargs, **kwargs):\n\n command = func_name + \"(\"\n if image_passed:\n command = command + \"...\"\n\n for key in [e.name for e in exposable_parameters]:\n\n if key in footprint_parameters:\n if len(image.shape) == 2:\n command = command + \", \" + key + \"=disk(\" + str(kwargs[key]) + \")\"\n kwargs[key] = morphology.disk(kwargs[key])\n elif len(image.shape) == 3:\n command = command + \", \" + key + \"=ball(\" + str(kwargs[key]) + \")\"\n kwargs[key] = morphology.ball(kwargs[key])\n elif key in image_parameters:\n command = command + \", \" + key + \"=\" + str(kwargs[key])\n kwargs[key] = context._images[kwargs[key]]\n else:\n command = command + \", \" + key + \"=\" + str(kwargs[key])\n command = command + \")\"\n command_label.value = command.replace(\"(,\", \"(\")\n\n if not execution_blocked:\n if image_passed:\n viewer.image = func(image, *args, **kwargs)\n else:\n viewer.image = func(*args, **kwargs)\n\n viewer.slice_slider.max = viewer.image.shape[0] - 1\n viewer.configuration_updated(None)\n\n\n worker_function.__signature__ = inspect.Signature(exposable_parameters)\n inter = ipywidgets.interactive(worker_function, dict(manual=False, auto_display=False))\n\n execution_blocked = False\n\n inter.update()\n\n output_widgets = []\n output_widgets.append(inter)\n output_widgets.append(command_label)\n\n if viewer_was_none:\n output_widgets.append(_no_resize(viewer.view))\n output_widgets.append(viewer.slice_slider)\n\n return ipywidgets.VBox(output_widgets)", "def image_resample(f, oversamp=1.0):\n img = imageio.imread(f)\n x = np.linspace(0,img.shape[0],img.shape[0])\n y = np.linspace(0,img.shape[1],img.shape[1])\n #\n # 2x oversample the image since we'll dither it.\n #\n xnew = np.linspace(0, img.shape[0], img.shape[0]*oversamp)\n ynew = np.linspace(0, img.shape[1], img.shape[1]*oversamp)\n from scipy import interpolate\n rc = interpolate.interp2d(x, y, img[:,:,0].flatten(), kind='linear')\n gc = interpolate.interp2d(x, y, img[:,:,1].flatten(), kind='linear')\n bc = interpolate.interp2d(x, y, img[:,:,2].flatten(), kind='linear')\n rgb_new = np.stack([rc(xnew.flatten(), ynew.flatten()),\n gc(xnew.flatten(), ynew.flatten()),\n bc(xnew.flatten(), ynew.flatten())],-1).transpose(1,0,2).astype(np.uint8)\n plt.imshow(rgb_new)\n return rgb_new", "def trilinear_interp(img, indices): \n input_array = np.array(img.get_data())\n indices = np.array(indices)\n\n x_indices = indices[:,0]\n y_indices = indices[:,1]\n z_indices = indices[:,2]\n\n # get lower bounds\n x0 = x_indices.astype(np.integer)\n y0 = y_indices.astype(np.integer)\n z0 = z_indices.astype(np.integer)\n\n # get upper bounds0000\n x1 = x0 + 1\n y1 = y0 + 1\n z1 = z0 + 1\n\n # #Check if xyz1 is beyond array boundary:\n x1[np.where(x1==input_array.shape[0])] = x0.max()\n y1[np.where(y1==input_array.shape[1])] = y0.max()\n z1[np.where(z1==input_array.shape[2])] = z0.max()\n\n x = x_indices - x0\n y = y_indices - y0\n z = z_indices - z0\n\n kx = 1 - x\n ky = 1 - y\n kz = 1 - z\n\n #output = input_array[x0,y0,z0]\n #print output\n output = (input_array[x0,y0,z0]*kx*ky*kz +\n input_array[x1,y0,z0]*x*ky*kz +\n input_array[x0,y1,z0]*kx*y*kz +\n input_array[x0,y0,z1]*kx*ky*z +\n input_array[x1,y0,z1]*x*ky*z +\n input_array[x0,y1,z1]*kx*y*z +\n input_array[x1,y1,z0]*x*y*kz +\n input_array[x1,y1,z1]*x*y*z)\n\n return output", "def interpolate(f, Q, method='linear', y_transect=None):\n if isinstance(f, (ufl.core.expr.Expr, firedrake.Function)):\n return firedrake.interpolate(f, Q)\n\n mesh = Q.mesh()\n element = Q.ufl_element()\n if len(element.sub_elements()) > 0:\n element = element.sub_elements()[0]\n\n V = firedrake.VectorFunctionSpace(mesh, element)\n X = firedrake.interpolate(mesh.coordinates, V).dat.data_ro\n\n q = firedrake.Function(Q)\n\n if isinstance(f, rasterio.DatasetReader):\n q.dat.data[:] = _sample(f, X, method, y_transect)\n elif (isinstance(f, tuple)\n and all(isinstance(fi, rasterio.DatasetReader) for fi in f)):\n for i, fi in enumerate(f):\n q.dat.data[:, i] = _sample(fi, X, method, y_transect)\n else:\n raise ValueError('Argument must be a rasterio data set or a tuple of '\n 'data sets!')\n\n return q", "def extract_from_mask(array, mask, fun='mean', sp_axes=(1,2)):\n mask_3d = np.broadcast_to(mask, array.shape)\n masked_array = np.ma.MaskedArray(array, mask_3d)\n if fun == 'mean':\n out = np.ma.mean(masked_array, axis=sp_axes)\n elif fun == 'median':\n out = np.ma.median(masked_array, axis=sp_axes)\n elif fun == 'min':\n out = np.ma.min(masked_array, axis=sp_axes)\n elif fun == 'max':\n out = np.ma.max(masked_array, axis=sp_axes)\n elif fun == 'sum':\n out = np.ma.sum(masked_array, axis=sp_axes)\n else:\n raise ValueError('fun= must be one of mean, median, min, max or sum')\n return out", "def preprocess_func(cls, func):\n return DaskWrapper.put(func, hash=False, broadcast=True)", "def source_annular(f, ctr, r_min, r_max, I0=1):\n\n f2 = (f[0] - ctr[0])**2 + (f[1] - ctr[1])**2\n mask = (f2 >= r_min**2) * (f2 <= r_max**2)\n return I0 * mask", "def calcmask(self, *args, **kwargs):\n return _image.image_calcmask(self, *args, **kwargs)", "def vis_mask(img, mask, col, alpha=0.4):\n\n img = img.astype(np.float32)\n idx = np.nonzero(mask)\n\n img[idx[0], idx[1], :] *= 1.0 - alpha\n img[idx[0], idx[1], :] += alpha * col\n\n return img.astype(np.uint8)", "def profile_interp(var,z_orig,z_interp,method='linear',out_of_bounds='NaN'):\n z_orig = z_orig[~isnan(z_orig)]\n var= var[~isnan(var)]\n #assert(all(diff(z_orig) > 0))\n if len(z_orig) > len(var) or len(var) > len(z_orig): return NaN\n if len(z_orig) <= 2 or len(var) <= 2: return NaN\n \n if out_of_bounds == 'NaN':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value=NaN)\n elif out_of_bounds == 'nearest':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value=(var[0],var[-1]))\n elif out_of_bounds == 'extrap':\n interpolant = interpolate.interp1d(z_orig,var,kind=method,bounds_error=False,fill_value='extrapolate')\n else:\n raise ValueError('Extrapolation method must be NaN, nearest, or cubic.')\n result = interpolant(z_interp)\n\n if result.size == 1: return result.item()\n else: return result" ]
[ "0.5979875", "0.596896", "0.58975315", "0.5509023", "0.5382732", "0.538206", "0.5352394", "0.5333508", "0.5331767", "0.5311795", "0.5311795", "0.5161992", "0.51591897", "0.514953", "0.5132063", "0.5104912", "0.5066806", "0.5038082", "0.503342", "0.50329393", "0.5016978", "0.5000473", "0.49756706", "0.4960685", "0.4937498", "0.49338672", "0.49299118", "0.4923551", "0.49219507", "0.4917967" ]
0.69919026
0
Fills in NaN values in an image Uses the CloughTocher scheme to construct a piecewise cubic interpolating Bexier polynomial on Delaunay triangulation.
def image_naninterp(data): if not isinstance(data, np.ndarray) or len(data.shape) != 2: log.error("data must be a 2D %s" % np.ndarray) return mask = np.isnan(data) if not mask.any(): return data if mask.all(): log.error("data are all NaN") return yy, xx = np.mgrid[:data.shape[0], :data.shape[1]] points = np.array([yy[~mask], xx[~mask]]).T interp = interpolate.CloughTocher2DInterpolator(points, data[~mask]) result = data.copy() result[mask] = interp(np.array([yy[mask], xx[mask]]).T) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fill_blind_pores(im):\n holes = find_disconnected_voxels(im)\n im[holes] = False\n return im", "def fill_nan(array):\n idx = np.arange(array.shape[0])\n good = np.where(np.isfinite(array))\n interp = interpolate.interp1d(idx[good], array[good], bounds_error=False)\n return np.where(np.isfinite(array), array, interp(idx))", "def fix_nan(image, replace=0.):\n h = pyfits.open(image, mode='update')\n imgdata = h[0].data\n imgdata = np.where(np.isnan(imgdata), replace, imgdata)\n h[0].data = imgdata\n h.flush()\n h.close()", "def interpolate_missing(y):\n if y.isna().any():\n y = y.interpolate(method='linear', limit_direction='both')\n return y", "def test_interpolation_random_array_and_nan(self):\n\n # Define pixel centers along each direction\n x = numpy.arange(20) * 1.0\n y = numpy.arange(25) * 1.0\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define arbitrary values for each x, y pair\n numpy.random.seed(17)\n A = numpy.random.random((len(x), len(y))) * 10\n\n # Create islands of NaN\n A[5, 13] = numpy.nan\n A[6, 14] = A[6, 18] = numpy.nan\n A[7, 14:18] = numpy.nan\n A[8, 13:18] = numpy.nan\n A[9, 12:19] = numpy.nan\n A[10, 14:17] = numpy.nan\n A[11, 15] = numpy.nan\n\n A[15, 5:6] = numpy.nan\n\n # Creat interpolation points\n xis = numpy.linspace(x[0], x[-1], 39) # Hit all mid points\n etas = numpy.linspace(y[0], y[-1], 73) # Hit thirds\n points = combine_coordinates(xis, etas)\n\n for mode in ['linear', 'constant']:\n vals = interpolate2d(x, y, A, points, mode=mode)\n\n # Calculate reference result with expected NaNs and compare\n i = j = 0\n for k, (xi, eta) in enumerate(points):\n\n # Find indices of nearest higher value in x and y\n i = numpy.searchsorted(x, xi)\n j = numpy.searchsorted(y, eta)\n\n if i > 0 and j > 0:\n\n # Get four neigbours\n A00 = A[i - 1, j - 1]\n A01 = A[i - 1, j]\n A10 = A[i, j - 1]\n A11 = A[i, j]\n\n if numpy.allclose(xi, x[i]):\n alpha = 1.0\n else:\n alpha = 0.5\n\n if numpy.allclose(eta, y[j]):\n beta = 1.0\n else:\n beta = eta - y[j - 1]\n\n if mode == 'linear':\n if numpy.any(numpy.isnan([A00, A01, A10, A11])):\n ref = numpy.nan\n else:\n ref = (A00 * (1 - alpha) * (1 - beta) +\n A01 * (1 - alpha) * beta +\n A10 * alpha * (1 - beta) +\n A11 * alpha * beta)\n elif mode == 'constant':\n assert alpha >= 0.5 # Only case in this test\n\n if beta < 0.5:\n ref = A10\n else:\n ref = A11\n else:\n msg = 'Unknown mode: %s' % mode\n raise Exception(msg)\n\n #print i, j, xi, eta, alpha, beta, vals[k], ref\n assert nanallclose(vals[k], ref, rtol=1e-12, atol=1e-12)", "def _interpolation(matrix):\n try:\n\tok = ~np.isnan(matrix)\n \txp = ok.ravel().nonzero()[0]\n \tfp = matrix[~np.isnan(matrix)]\n \tx = np.isnan(matrix).ravel().nonzero()[0]\n \tmatrix[np.isnan(matrix)] = np.interp(x, xp, fp)\n \treturn matrix\n except:\n return matrix", "def fill_missing_data_points(data):\n return data.interpolate()", "def fill_nan(A):\n\tinds = np.arange(A.shape[0])\n\tgood = np.where(np.isfinite(A))\n\tA[np.isnan(A)] = np.interp(inds[np.isnan(A)], inds[good], A[good])\n\treturn A", "def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan", "def test_isentropic_pressure_masked_column():\n lev = [100000., 95000.] * units.Pa\n tmp = np.ma.ones((len(lev), 5, 5))\n tmp[0, :] = 296.\n tmp[1, :] = 292.\n tmp[:, :, -1] = np.ma.masked\n tmp = units.Quantity(tmp, units.kelvin)\n isentprs = isentropic_interpolation([296.] * units.kelvin, lev, tmp)\n trueprs = np.ones((1, 5, 5)) * (1000. * units.hPa)\n trueprs[:, :, -1] = np.nan\n assert isentprs[0].shape == (1, 5, 5)\n assert_almost_equal(isentprs[0], trueprs, 3)", "def dask_gd2_nanfill(xx, yy, z_array, algorithm='cubic', **kwargs):\n n_jobs = kwargs.pop(\"n_jobs\", 4)\n chunk_size = kwargs.get(\"chunk_size\", int(xx.size / (n_jobs - 1)))\n # make dask arrays\n dask_xyz = da.from_array((xx, yy, z_array), chunks=(3, chunk_size, \"auto\"), name=\"dask_all\")\n dask_xx = dask_xyz[0,:,:]\n dask_yy = dask_xyz[1,:,:]\n dask_zz = dask_xyz[2,:,:]\n\n # select only valid values\n dask_valid_x1 = dask_xx[~da.isnan(dask_zz)]\n dask_valid_y1 = dask_yy[~da.isnan(dask_zz)]\n dask_valid_z1 = dask_zz[~da.isnan(dask_zz)]\n\n # interpolate for missing values\n return dask_interpolate(dask_valid_x1, dask_valid_y1, dask_valid_z1, dask_xx, dask_yy, algorithm=algorithm, **kwargs)", "def pad(input_data):\n # source : https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array \n data = input_data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data)\n data[bad_indexes] = interpolated\n return data", "def nan_interp(A):\n\tni,nj = np.shape(A)\n\t# extend edges of A by one\n\tA = np.concatenate((np.array([A[:,0]]).transpose(),A,np.array([A[:,-1]]).transpose()),axis=1)\n\tA = np.concatenate((np.array([A[0,:]]),A,np.array([A[-1,:]])),axis=0)\n\t\n\t#nit = 0\n\t#while np.sum(np.isnan(A)) != 0:\n\t#nit+=1\n\tnanp = np.isnan(A)\n\tfor i in range(1,ni+1):\n\t\tfor j in range(1,nj+1):\n\t\t\tif nanp[i,j]:\n\t\t\t\t#\t# edges\n\t\t\t\t#\tif (i==0) & (j!=0)& (j!=nj-1):\n\t\t\t\t#\t\tb = np.array([A[i+1,j],A[i,j-1],A[i,j+1]])\n\t\t\t\t#\tif (i==ni-1) & (j!=0)& (j!=nj-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i,j-1],A[i,j+1]])\n\t\t\t\t#\tif (j==0) & (i!=0)& (i!=ni-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i+1,j],A[i,j+1]])\n\t\t\t\t#\tif (j==nj-1) & (i!=0)& (i!=ni-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i+1,j],A[i,j-1]])\n\t\t\t\t#\t# corners\n\t\t\t\t#\tif (i==0) & (j==0):\n\t\t\t\t#\t\tb = np.array([A[i+1,j],A[i,j+1]])\n\t\t\t\t#\tif (i==ni-1) & (j==0):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i,j+1]])\n\t\t\t\t#\tif (i==0) & (j==nj-1):\n\t\t\t\t#\t\tb = np.array([A[i+1,j],A[i,j-1]])\n\t\t\t\t#\tif (i==ni-1) & (j==nj-1):\n\t\t\t\t#\t\tb = np.array([A[i-1,j],A[i,j-1]])\n\t\t\t\t#\t# core\n\t\t\t\t#\telse:\n\t\t\t\tb = np.array([A[i-1,j],A[i,j-1],A[i+1,j],A[i,j+1]])\n\t\t\t\tsnan = np.sum(np.isnan(b))\n\t\t\t\tsb = np.nansum(b)\n\t\t\t\tA[i,j] = sb/(len(b)-snan)\n\t\t\t\t#print(i,j)\n\t# only the core matters\n\tA = A[1:ni+1,1:nj+1]\n\treturn A", "def filling(omegas, gf):\n idx = np.argmin(np.abs(omegas)) + 1\n x = omegas[:idx]\n y = -gf[:idx].imag\n x[-1] = 0\n y[-1] = (y[-1] + y[-2]) / 2\n return integrate.simps(y, x)", "def fill_nan(x):\n (n_rows, wdw) = x.shape\n new_x = np.zeros((n_rows,wdw)); new_x[:] = np.nan\n for i in range(n_rows):\n indMissing = np.where(np.isnan(x[i,:]))[0]\n l = len(x[i,indMissing]) #number of MVs\n if l < 4*wdw/5: #20% available values otherwise discarded\n new_x[i,:] = x[i,:]\n if l > 0 and indMissing[0] == 0: #missing value at index 0 \n c = 0\n while c + 1 < len(indMissing) and indMissing[c+1] == indMissing[c] + 1:\n c += 1\n new_x[i,:c+1] = x[i,c+1] #first nans replaced by first non nan value\n indMissing = np.where(np.isnan(new_x[i,:]))[0]\n l = len(new_x[i,indMissing])\n if l > 0 and indMissing[0] > 0:\n new_x[i,:] = interpolate1d(new_x[i,:]) #interpolate intermediate nans\n ind = np.where(~np.isnan(new_x).all(axis=1))[0]\n new_x = new_x[ind] #remove NaNs \n \n return new_x, ind", "def bilinear_interpolation(self, image, fx, fy):\r\n\r\n # Write your code for bilinear interpolation here\r\n width, height = image.shape[:2]\r\n nw = int(width * fx)\r\n nh = int(height * fy)\r\n new_img = np.zeros((nw, nh), np.uint8)\r\n inter = interpolation.interpolation()\r\n for i in range(nw):\r\n for j in range(nh):\r\n ii = int(np.floor((i * (image.shape[0]-1))/(nw - 1)))\r\n jj = int(np.floor((j * (image.shape[1]-1))/(nh - 1)))\r\n\r\n # All four corner pixels copied as it is\r\n if (ii == 0 and jj == 0) or \\\r\n (ii == 0 and jj == image.shape[1] - 1) or \\\r\n (ii == image.shape[0] - 1 and jj == 0) or \\\r\n (ii == image.shape[0] - 1 and jj == image.shape[1] - 1):\r\n new_img[i][j] = image[ii][jj]\r\n\r\n # Linear interpolation along left and right edges\r\n elif ii == 0 or ii == image.shape[0] - 1:\r\n new_img[i][j] = inter.linear_interpolation([jj - 1, image[ii][jj - 1]], [jj + 1, image[ii][jj + 1]], jj)\r\n\r\n # Linear interpolation along top and bottom edges\r\n elif jj == 0 or jj == image.shape[1] - 1:\r\n new_img[i][j] = inter.linear_interpolation([ii - 1, image[ii - 1][jj]], [ii + 1, image[ii + 1][jj]], ii)\r\n\r\n else:\r\n\r\n q11i, q11j = ii - 1, jj - 1\r\n q12i, q12j = ii - 1, jj + 1\r\n q21i, q21j = ii + 1, jj - 1\r\n q22i, q22j = ii + 1, jj + 1\r\n\r\n point_a = [q11i, q11j, image[q11i][q11j]]\r\n point_b = [q12i, q12j, image[q12i][q12j]]\r\n point_c = [q21i, q21j, image[q21i][q21j]]\r\n point_d = [q22i, q22j, image[q22i][q22j]]\r\n unknown = [ii, jj]\r\n\r\n new_img[i][j] = inter.bilinear_interpolation(point_a, point_b, point_c, point_d, unknown)\r\n\r\n return new_img", "def maskinterp(image, func=spline_interp_2dfunc,\n mask=None, apstep=1, maxap=9, cval=np.nan, minfrac=0.2,\n minpoints=4, cdis=1, statistical=False, creep=False,\n coplanar=None, **kwargs):\n if mask is None:\n mask = ~np.isnan(image)\n if not mask.any():\n log.error(\"Mask is all False\")\n return\n elif mask.all():\n return image.copy()\n mask = mask.astype(bool)\n\n if coplanar is None:\n coplanar = hasattr(func, '__name__') \\\n and func.__name__ == 'clough_tocher_2dfunc'\n\n # geometry used to define whether interpolation should\n # be attempted\n width = (maxap * 2) + 1\n ay, ax = np.mgrid[:width, :width]\n ax -= maxap\n ay -= maxap\n dr = np.sqrt((ay ** 2) + (ax ** 2))\n ygrid, xgrid = np.mgrid[:image.shape[0], :image.shape[1]]\n basemask = mask.copy()\n found = basemask.copy()\n corrected = image.copy()\n imshape = image.shape\n radius = apstep\n while True:\n wdata = corrected.copy() if creep else image.copy()\n apmask = np.array(dr <= np.ceil(radius))\n nap = apmask.sum()\n find = ~found & ~mask\n xfind, yfind = xgrid[find], ygrid[find]\n nfind = len(xfind)\n\n xs = np.empty((nap, nfind), dtype=int)\n ys = np.empty((nap, nfind), dtype=int)\n dx, dy = np.empty_like(xs), np.empty_like(ys)\n\n for i, (offy, offx) in enumerate(zip(ay[apmask], ax[apmask])):\n xs[i], ys[i] = xgrid[find] + offx, ygrid[find] + offy\n dx[i], dy[i] = offx, offy\n\n # must exist inside features of image\n valid = (ys >= 0) & (ys < imshape[0]) & (xs >= 1) & (xs < imshape[1])\n\n # populate data and mask\n ds = np.full((nap, nfind), np.nan, dtype=image.dtype)\n ms = np.full((nap, nfind), False)\n ds[valid] = wdata.copy()[(ys[valid], xs[valid])]\n ms[valid] = basemask.copy()[(ys[valid], xs[valid])]\n\n # data must not be NaN\n valid[np.isnan(ds)] = False\n # do not use unmask data\n valid[~ms] = False\n\n # must be more than minpoints\n npts = np.sum(valid, axis=0)\n valid[:, npts < minpoints] = False\n\n # must be more than minfrac\n valid[:, (npts / nap) < minfrac] = False\n\n # coplanar?\n if coplanar:\n planevalid = np.any(valid, axis=0)\n tx, ty = dx.copy().astype(float), dy.copy().astype(float)\n tx[:, ~planevalid] = 0\n ty[:, ~planevalid] = 0\n tx = np.nanmax(tx, axis=0) - np.nanmin(tx, axis=0)\n ty = np.nanmax(ty, axis=0) - np.nanmin(ty, axis=0)\n valid[:, tx == 0] = False\n valid[:, ty == 0] = False\n\n # calculate center-of-mass\n cx, cy = dx.copy().astype(float), dy.copy().astype(float)\n w = np.sum(valid, axis=0)\n zi = w == 0\n cx[:, zi] = 0\n cy[:, zi] = 0\n cx, cy = np.nanmean(cx, axis=0), np.nanmean(cy, axis=0)\n cr = np.hypot(cx, cy)\n cr = cr > cdis\n valid[:, cr] = False\n\n pts = np.nonzero(np.any(valid, axis=0))[0]\n\n for pt in pts:\n idx = valid[:, pt]\n din = ds[idx, pt]\n xout = xfind[pt]\n yout = yfind[pt]\n if statistical:\n corrected[yout, xout] = func(din, **kwargs)\n else:\n cin = np.array([xs[idx, pt], ys[idx, pt]]).T\n cout = np.array([[xout], [yout]]).T\n corrected[yout, xout] = func(din, cin, cout, **kwargs)\n found[yout, xout] = True\n\n radius += apstep\n if radius > maxap or found.all():\n break\n\n if not found.all():\n corrected[~found] = cval\n\n return corrected", "def interpolate(self, image):\n return", "def __fill_lip_solid(self, outer, inner):\n inner[0].reverse()\n inner[1].reverse()\n outer_curve = zip(outer[0], outer[1])\n inner_curve = zip(inner[0], inner[1])\n points = []\n for point in outer_curve:\n points.append(np.array(point, dtype=np.int32))\n for point in inner_curve:\n points.append(np.array(point, dtype=np.int32))\n points = np.array(points, dtype=np.int32)\n self.red_l = int(self.red_l)\n self.green_l = int(self.green_l)\n self.blue_l = int(self.blue_l)\n cv2.fillPoly(self.image, [points], (self.red_l, self.green_l, self.blue_l))", "def interpolate_nans(self):\n\n signal = self.signal\n\n # check for more than one nan in row\n for i in range(len(signal)-1) :\n if np.isnan(signal[i]) and np.isnan(signal[i+1]) :\n raise Exception('There are two nans in a row ask moritz what to do !')\n\n if np.isnan(signal[0]) :\n np.signal[0] = signal[1]\n if np.isnan(signal[-1]) :\n signal[-1] = signal[-2]\n\n for i in range(1,len(signal)-1) :\n if np.isnan(signal[i]):\n signal[i] = (signal[i-1] + signal[i+1])/2", "def correct_band(self, arr, band):\n if self.lut[band] is None:\n # No interpolation, so return NaNs\n new_arr = arr.copy()\n new_arr[:] = np.nan\n return new_arr\n else:\n return self.lut[band](arr)", "def fill_in_data(color,frames,fs=25):\n color = color\n colormat = color.as_matrix()\n frameDiff = np.diff(colormat.T[2])\n locations = np.where(frameDiff!=1)[0]\n\n #Calculate number of frames skipped\n #sample = []\n #sample = colormat.T\n sample = sample[:2].T\n #frames = range(100,len(colormat.T[2])+100)\n #frames = np.linspace(frames[0],frames[-1],frames[-1]-frames[0]+1)\n #frames = frames[:len(frames)-1]\n \n #if locations is empty, try looking for a row of nans\n if np.all(locations):\n for i in range(len(sample)):\n if np.all(sample[i] == 0):\n sample[i]=[np.nan, np.nan]\n missing = list(np.where(np.isnan(sample.T[0])))\n\n else:\n numfill = []\n missing = []\n for i in locations:\n numfill.append(frames[i+1]-frames[i])#-1)\n #pdb.set_trace()\n missing.append(np.linspace(i+1,i+1+numfill[-1],numfill[-1]))\n\n missing = np.concatenate(missing)\n\n missing = missing[:len(missing)-1]\n missing = missing.astype(int)\n\n pdb.set_trace()\n\n for j in reversed(missing):\n sample = np.insert(sample,j,(np.nan,np.nan),axis = 0)\n #frames = np.insert(frames,j,j,axis=0)\n\n color_x,color_y,x_filt=KFilt(sample,fs)\n color_mat = np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1]))\n return color_mat,frames,x_filt", "def test_nan_color_copy():\n\n data = np.zeros((16, 16))\n\n f1 = FITSFigure(data)\n f1.show_grayscale()\n f1.set_nan_color('blue')\n\n f2 = FITSFigure(data)\n f2.show_grayscale()\n f2.set_nan_color('red')\n\n assert f1.image.get_cmap()._rgba_bad == (0.0, 0.0, 1.0, 1.0)\n assert f2.image.get_cmap()._rgba_bad == (1.0, 0.0, 0.0, 1.0)", "def problem2():\n\n data = loaddata(\"data/bayerdata.npy\")\n r, g, b = separatechannels(data)\n\n img = assembleimage(r, g, b)\n display_image(img)\n\n img_interpolated = interpolate(r, g, b)\n display_image(img_interpolated)", "def preprocess_images(input_image, soften=None, fill_holes=None):\n ratio = get_scaling_ratio(input_image)\n if soften == None:\n soften = max(soften_amt_deafult * ratio, 1)\n if fill_holes == None:\n fill_holes = round(fill_holes_deafult * ratio)\n fill_holes = max(fill_holes, 1)\n\n # ensure that all points which are transparent have RGB values of 255 (will become white when\n # converted to non-transparent grayscale.)\n input_image = img_as_float32(input_image)\n if len(input_image.shape) == 3 and input_image.shape[2] == 4:\n input_image = rgba2rgb(input_image)\n gray_img = img_as_ubyte(rgb2gray(input_image))\n\n # get the otsu threshold after running a flood fill on the corners, so that those huge clumps of\n # dark pixels don't mess up the statistics too much (we only care about text!)\n thresh = threshold_otsu(\n fill_corners(gray_img, fill_value=255, thresh=5, tol=1, fill_below_thresh=True)\n )\n\n # n.b. here we are setting black pixels from the original image to have a value of 1 (effectively inverting\n # what you would get from a normal binarization, because the math gets easier this way)\n img_bin = img_as_ubyte(gray_img < thresh)\n \n # need to add clipping because of a weird case where the range of the\n # blurred imagewill be from -1 to 1.0000000004\n blurred = np.clip(gaussian(gray_img, soften), -1, 1)\n img_blur_bin = img_as_ubyte(img_as_ubyte(blurred) < thresh)\n\n # now, fill corners of binarized images with black (value 0)\n img_bin = fill_corners(\n img_bin, fill_value=0, thresh=1, tol=1, fill_below_thresh=False\n )\n img_blur_bin = fill_corners(\n img_blur_bin, fill_value=0, thresh=1, tol=1, fill_below_thresh=False\n )\n\n # run smoothing on the blurred-binarized image so we get blobs of text in neat lines\n kernel = np.ones((fill_holes, fill_holes), np.uint8)\n img_cleaned = binary_opening(binary_closing(img_blur_bin, kernel), kernel)\n\n # find rotation angle of cleaned, smoothed image. use that to correct the rotation of the unsmoothed image\n angle = find_rotation_angle(img_cleaned)\n img_cleaned_rot = rotate(img_cleaned, angle, order=0, mode=\"edge\") > 0\n img_bin_rot = rotate(img_bin, angle, order=0, mode=\"edge\") > 0\n\n return img_bin_rot, img_cleaned_rot, angle", "def infill_large_regions(I, npixels=10000, precision=1000):\n assert I.shape[0] == I.shape[1]\n xgrid, ygrid = np.meshgrid(np.arange(I.shape[1]),\n np.arange(I.shape[0]))\n\n I_ = I.copy()\n\n # Exclude two pixels on the border during infilling.\n bad_regions, n_bad_regions = ndimage.label(\n ndimage.binary_dilation(np.isnan(I), iterations=2))\n\n # Use 5 pixel regions surrounding each hole.\n surround = ndimage.grey_dilation(bad_regions, size=5)\n counts, _ = np.histogram(bad_regions, np.arange(n_bad_regions + 1) - .5)\n\n for i in range(1, n_bad_regions):\n if counts[i] > npixels:\n # This is a big region, infill using the GPR method.\n surround_data = (surround == i) & (bad_regions == 0)\n xgrid_s, ygrid_s = xgrid[surround_data], ygrid[surround_data]\n\n # Take N_points points at random, fit a Gaussian process.\n subs = np.random.permutation(np.arange(len(xgrid_s)))[:precision]\n gp_kernel = Matern(length_scale=1,\n length_scale_bounds=(.01, 100), nu=1.5)\n gpr = GaussianProcessRegressor(kernel=gp_kernel, normalize_y=True)\n\n X = np.concatenate((xgrid_s.reshape(-1, 1),\n ygrid_s.reshape(-1, 1)), axis=1)\n gpr.fit(X[subs, :], I[surround_data][subs])\n xgrid_s, ygrid_s = xgrid[bad_regions == i], ygrid[bad_regions == i]\n X_ = np.concatenate((xgrid_s.reshape(-1, 1),\n ygrid_s.reshape(-1, 1)), axis=1)\n y_ = gpr.predict(X_)\n I_[bad_regions == i] = y_\n return I_", "def _pchip_coeffs_i(X, Y, i):\n\n # Pre-assign sizes for PCHIP variables.\n h = [0.0, 0.0, 0.0]\n δ = [0.0, 0.0, 0.0]\n d = [0.0, 0.0]\n\n # Check whether x is adjacent to the start or end of this X\n at_start = (i == 0) or np.isnan(X[i - 1] + Y[i - 1])\n at_end = (i == len(X) - 2) or np.isnan(X[i + 2] + Y[i + 2])\n\n if at_start and at_end:\n\n # if np.isnan(X[i + 1]) or np.isnan(Y[i + 1]):\n # # Only one valid data point. Leave the interpolant as NaN.\n # d[0], c, b = np.nan, np.nan, np.nan\n\n # else:\n\n # ||| X[0] <= x <= X[1] ||| Revert to Linear Interpolation\n # If actually only one non-NaN data point, then d[0] will be NaN, so\n # interpolant will evaluate to NaN.\n d[0] = (Y[i + 1] - Y[i]) / (X[i + 1] - X[i])\n C3, C2 = 0.0, 0.0\n\n else:\n if at_start:\n # ||| X[0] <= x <= X[1] < X[2] --->\n h[1] = X[i + 1] - X[i]\n h[2] = X[i + 2] - X[i + 1]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n δ[2] = (Y[i + 2] - Y[i + 1]) / h[2]\n\n # Noncentered, shape-preserving, three-point formula:\n d[0] = ((2.0 * h[1] + h[2]) * δ[1] - h[1] * δ[2]) / (h[1] + h[2])\n if np.sign(d[0]) != np.sign(δ[1]):\n d[0] = 0.0\n elif (np.sign(δ[1]) != np.sign(δ[2])) and (\n np.abs(d[0]) > np.abs(3.0 * δ[1])\n ):\n d[0] = 3.0 * δ[1]\n\n # Standard PCHIP formula\n if np.sign(δ[1]) * np.sign(δ[2]) > 0.0:\n w1 = 2.0 * h[2] + h[1]\n w2 = h[2] + 2.0 * h[1]\n d[1] = (w1 + w2) / (w1 / δ[1] + w2 / δ[2])\n else:\n d[1] = 0.0\n\n elif at_end:\n # <--- X[i-1] < X[i] < x <= X[i+1] |||\n h[0] = X[i] - X[i - 1]\n h[1] = X[i + 1] - X[i]\n δ[0] = (Y[i] - Y[i - 1]) / h[0]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n\n # Standard PCHIP formula\n if np.sign(δ[0]) * np.sign(δ[1]) > 0.0:\n w1 = 2.0 * h[1] + h[0]\n w2 = h[1] + 2.0 * h[0]\n d[0] = (w1 + w2) / (w1 / δ[0] + w2 / δ[1])\n else:\n d[0] = 0.0\n\n # Noncentered, shape-preserving, three-point formula:\n d[1] = ((h[0] + 2.0 * h[1]) * δ[1] - h[1] * δ[0]) / (h[0] + h[1])\n if np.sign(d[1]) != np.sign(δ[1]):\n d[1] = 0.0\n elif (np.sign(δ[1]) != np.sign(δ[0])) and (\n np.abs(d[1]) > np.abs(3 * δ[1])\n ):\n\n d[1] = 3.0 * δ[1]\n\n else:\n # <--- X[i-1] < X[i] < x <= X[i+1] < X[i+2] --->\n h[0] = X[i] - X[i - 1] # Way faster to do this\n h[1] = X[i + 1] - X[i] # than\n h[2] = X[i + 2] - X[i + 1] # diff(X(i-1:i+3))\n δ[0] = (Y[i] - Y[i - 1]) / h[0]\n δ[1] = (Y[i + 1] - Y[i]) / h[1]\n δ[2] = (Y[i + 2] - Y[i + 1]) / h[2]\n\n # Standard PCHIP formula\n for j in range(2):\n if np.sign(δ[j]) * np.sign(δ[j + 1]) > 0.0:\n w1 = 2.0 * h[j + 1] + h[j]\n w2 = h[j + 1] + 2.0 * h[j]\n d[j] = (w1 + w2) / (w1 / δ[j] + w2 / δ[j + 1])\n else:\n d[j] = 0.0\n\n # Polynomial coefficients for this piece\n dzzdx = (δ[1] - d[0]) / h[1]\n dzdxdx = (d[1] - δ[1]) / h[1]\n C3 = (dzdxdx - dzzdx) / h[1] # coeff of the 3rd degree term (x^3)\n C2 = 2 * dzzdx - dzdxdx # coeff of 2nd degree term (x^2)\n\n # The following code evaluates the `d`'th deriviative of the cubic\n # interpolant at `x`.\n # s = x - X[i]\n # if d == 0:\n # y = Y[i] + s * (d[0] + s * (C2 + s * C3))\n # elif d == 1: # first derivative\n # y = d[0] + s * (2 * C2 + 3 * s * C3)\n # elif d == 2: # second derivative\n # y = 2 * C2 + 6 * s * C3\n # elif d == 3: # third derivative\n # y = 6 * C3\n # else:\n # y = 0.0\n # return y\n\n # Faster to return tuple than build an np.array just to deconstruct it later\n return C3, C2, d[0], Y[i]", "def __smoothen_color(self, outer, inner):\n outer_curve = zip(outer[0], outer[1])\n inner_curve = zip(inner[0], inner[1])\n x_points = []\n y_points = []\n for point in outer_curve:\n x_points.append(point[0])\n y_points.append(point[1])\n for point in inner_curve:\n x_points.append(point[0])\n y_points.append(point[1])\n img_base = np.zeros((self.height, self.width))\n cv2.fillConvexPoly(img_base, np.array(np.c_[x_points, y_points], dtype='int32'), 1)\n img_mask = cv2.GaussianBlur(img_base, (81, 81), 0) #51,51\n img_blur_3d = np.ndarray([self.height, self.width, 3], dtype='float')\n img_blur_3d[:, :, 0] = img_mask\n img_blur_3d[:, :, 1] = img_mask\n img_blur_3d[:, :, 2] = img_mask\n self.im_copy = (img_blur_3d * self.image * 0.7 + (1 - img_blur_3d * 0.7) * self.im_copy).astype('uint8')", "def trim_floating_solid(im):\n holes = find_disconnected_voxels(~im)\n im[holes] = True\n return im", "def infill_small_regions(I):\n n_tiles = 4 # ntiles horizontally.\n assert I.shape[0] == I.shape[1]\n tile_size = I.shape[0] // (n_tiles - 1)\n tile_delta = tile_size // 2\n\n k = 0\n I_stack = np.ones(I.shape + (2, 2)) * np.nan\n for j in range(n_tiles * 2 - 1):\n for i in range(n_tiles * 2 - 1):\n dy = slice(tile_delta * j, tile_delta * (j + 2))\n dx = slice(tile_delta * i, tile_delta * (i + 2))\n S = I[dy, dx]\n M = ndimage.binary_dilation(np.isnan(S), iterations=2)\n image_inpainted = inpaint.inpaint_biharmonic(S, M, multichannel=False)\n I_stack[dy, dx, j % 2, i % 2] = image_inpainted\n k += 1\n return np.nanmean(np.nanmean(I_stack, axis=2), axis=2)" ]
[ "0.60985243", "0.60102165", "0.5806957", "0.58024496", "0.578654", "0.57636756", "0.5724484", "0.56875443", "0.5679379", "0.5644574", "0.5640386", "0.56197864", "0.5557094", "0.552808", "0.5472851", "0.5463112", "0.54055804", "0.53835154", "0.53573185", "0.53321165", "0.53060234", "0.5287876", "0.5247045", "0.5218275", "0.52132547", "0.5189215", "0.5188904", "0.51561683", "0.515382", "0.51454693" ]
0.6175499
0
Clip a polygon to a square unit pixel Uses the SutherlandHodgman polygon clipping algorithm. Pixel centers for pixel (i, j) is at (i + 0.5, j + 0.5).
def polyclip(i, j, pol_x, pol_y, area=False): n = len(pol_x) nout = n + 4 px_out, py_out = [0] * nout, [0] * nout clip_vals = [i, i + 1, j + 1, j] for ctype in range(4): cv = clip_vals[ctype] if ctype == 0: inside = [px > i for px in pol_x] elif ctype == 1: inside = [(px < i + 1) for px in pol_x] elif ctype == 2: inside = [(py < j + 1) for py in pol_y] else: inside = [py > j for py in pol_y] if all(inside): continue shiftp1 = inside.copy() shiftp1.insert(0, shiftp1.pop(-1)) crosses = [i1 != i2 for (i1, i2) in zip(inside, shiftp1)] pind = 0 for k in range(n): px, py = pol_x[k], pol_y[k] if crosses[k]: # out->in or in->out, add intersection ind = n - 1 if k == 0 else k - 1 sx, sy = pol_x[ind], pol_y[ind] try: if ctype <= 1: # left or right px_out[pind] = cv py_out[pind] = sy + ((py - sy) / (px - sx)) * (cv - sx) else: # top or bottom px_out[pind] = sx + ((px - sx) / (py - sy)) * (cv - sy) py_out[pind] = cv except ZeroDivisionError: # pragma: no cover px_out[pind] = np.nan py_out[pind] = np.nan pind += 1 if inside[k]: # out->in or in->in, add 2nd point px_out[pind] = px py_out[pind] = py pind += 1 if pind >= nout - 2: nout *= 2 px_out = px_out + [0] * nout py_out = py_out + [0] * nout nout *= 2 if pind == 0: # polygon is entirely outside this line return None, None n = pind pol_x = px_out[:n].copy() pol_y = py_out[:n].copy() if area: if pol_x is None: # pragma: no cover return 0.0 shiftx = pol_x.copy() shifty = pol_y.copy() shiftx.append(shiftx.pop(0)) shifty.append(shifty.pop(0)) a1 = [p[0] * p[1] for p in zip(pol_x, shifty)] a2 = [p[0] * p[1] for p in zip(pol_y, shiftx)] a = [p[0] - p[1] for p in zip(a1, a2)] return abs(sum(a)) / 2 return pol_x, pol_y
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shrink_polygon(self,polygon, offset = 1):\r\n \r\n import numpy as np\r\n import copy\r\n import math\r\n \r\n def angle(x1, y1, x2, y2):\r\n numer = (x1*x2 + y1*y2)\r\n denom = np.sqrt((x1**2 + y1**2) * (x2**2 + y2**2))\r\n print(numer)\r\n print(denom)\r\n print( math.acos(numer/denom) )\r\n return math.acos(numer/denom) \r\n \r\n def cross_sign(x1, y1, x2, y2):\r\n return x1*y2 > x2*y1\r\n \r\n # If the polygon is closed, un-close it\r\n closed = False\r\n if np.linalg.norm(polygon[0,:]-polygon[-1,:]) < 1E-10:\r\n polygon = polygon[:-1,:]\r\n closed = True\r\n \r\n # Make sure polygon is counter-clockwise\r\n if self.are_vertices_clockwise(np.row_stack((polygon,polygon[0,:]))):\r\n polygon = np.flipud(polygon)\r\n \r\n polygon_shrinked = copy.copy(polygon)\r\n \r\n for idx in range(polygon.shape[0]):\r\n \r\n if idx == polygon.shape[0]-1:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = 0\r\n else:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = idx+1\r\n \r\n side_before = polygon[vtx_center,:] - polygon[vtx_before,:]\r\n side_after = polygon[vtx_after,:] - polygon[vtx_center,:]\r\n \r\n side_before /= np.linalg.norm(side_before)\r\n side_after /= np.linalg.norm(side_after)\r\n \r\n nvec_before = np.asarray([-side_before[1], side_before[0]])\r\n nvec_after = np.asarray([-side_after[1], side_after[0]])\r\n \r\n vtx1_before = polygon[vtx_before,:] + nvec_before*offset\r\n vtx2_before = polygon[vtx_center,:] + nvec_before*offset\r\n \r\n vtx1_after = polygon[vtx_center,:] + nvec_after*offset\r\n vtx2_after = polygon[vtx_after,:] + nvec_after*offset\r\n \r\n p = vtx1_before\r\n r = (vtx2_before-vtx1_before)\r\n \r\n q = vtx1_after\r\n s = (vtx2_after-vtx1_after)\r\n \r\n if np.cross(r,s) == 0:\r\n \r\n # Lines are collinear\r\n polygon_shrinked[idx,:] = vtx2_before\r\n \r\n else:\r\n \r\n # Lines are not collinear\r\n t = np.cross(q - p,s)/(np.cross(r,s))\r\n \r\n # This is the intersection point\r\n polygon_shrinked[idx,:] = p + t*r\r\n \r\n if closed:\r\n polygon_shrinked = np.row_stack((\r\n polygon_shrinked,\r\n polygon_shrinked[0,:]))\r\n \r\n return polygon_shrinked", "def shrink_polygon(self,polygon, offset = 1):\r\n \r\n import numpy as np\r\n import copy\r\n import math\r\n \r\n def angle(x1, y1, x2, y2):\r\n numer = (x1*x2 + y1*y2)\r\n denom = np.sqrt((x1**2 + y1**2) * (x2**2 + y2**2))\r\n print(numer)\r\n print(denom)\r\n print( math.acos(numer/denom) )\r\n return math.acos(numer/denom) \r\n \r\n def cross_sign(x1, y1, x2, y2):\r\n return x1*y2 > x2*y1\r\n \r\n # If the polygon is closed, un-close it\r\n closed = False\r\n if np.linalg.norm(polygon[0,:]-polygon[-1,:]) < 1E-10:\r\n polygon = polygon[:-1,:]\r\n closed = True\r\n \r\n # Make sure polygon is counter-clockwise\r\n if self.are_vertices_clockwise(np.row_stack((polygon,polygon[0,:]))):\r\n polygon = np.flipud(polygon)\r\n \r\n polygon_shrinked = copy.copy(polygon)\r\n \r\n for idx in range(polygon.shape[0]):\r\n \r\n if idx == polygon.shape[0]-1:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = 0\r\n else:\r\n vtx_before = idx-1\r\n vtx_center = idx\r\n vtx_after = idx+1\r\n \r\n side_before = polygon[vtx_center,:] - polygon[vtx_before,:]\r\n side_after = polygon[vtx_after,:] - polygon[vtx_center,:]\r\n \r\n side_before /= np.linalg.norm(side_before)\r\n side_after /= np.linalg.norm(side_after)\r\n \r\n nvec_before = np.asarray([-side_before[1], side_before[0]])\r\n nvec_after = np.asarray([-side_after[1], side_after[0]])\r\n \r\n vtx1_before = polygon[vtx_before,:] + nvec_before*offset\r\n vtx2_before = polygon[vtx_center,:] + nvec_before*offset\r\n \r\n vtx1_after = polygon[vtx_center,:] + nvec_after*offset\r\n vtx2_after = polygon[vtx_after,:] + nvec_after*offset\r\n \r\n p = vtx1_before\r\n r = (vtx2_before-vtx1_before)\r\n \r\n q = vtx1_after\r\n s = (vtx2_after-vtx1_after)\r\n \r\n if np.cross(r,s) == 0:\r\n \r\n # Lines are collinear\r\n polygon_shrinked[idx,:] = vtx2_before\r\n \r\n else:\r\n \r\n # Lines are not collinear\r\n t = np.cross(q - p,s)/(np.cross(r,s))\r\n \r\n # This is the intersection point\r\n polygon_shrinked[idx,:] = p + t*r\r\n \r\n if closed:\r\n polygon_shrinked = np.row_stack((\r\n polygon_shrinked,\r\n polygon_shrinked[0,:]))\r\n \r\n return polygon_shrinked", "def square_clip(points, bounds):\n\n # Extact x y coordinates from cloud\n xy = points[[\"x\", \"y\"]]\n\n # Create masks for each axis\n x_in = (xy[\"x\"] >= bounds[0]) & (xy[\"x\"] <= bounds[2])\n y_in = (xy[\"y\"] >= bounds[1]) & (xy[\"y\"] <= bounds[3])\n stack = np.stack((x_in, y_in), axis=1)\n in_clip = np.all(stack, axis=1)\n\n return in_clip", "def clip(t):\n x, y = t.pos()\n nx = ny = None\n if x > W / 2:\n nx = W / 2\n elif x < -W / 2:\n nx = -W / 2\n\n if y > H / 2:\n ny = H / 2\n elif y < -H / 2:\n ny = -H / 2\n\n if nx is not None:\n t.setx(nx)\n if ny is not None:\n t.sety(ny)", "def clip_polygon(subject, clipper, operation = 'difference'):\n Subject = Polygon()\n Clipper = Polygon()\n\n for s in subject:\n Subject.add(Vertex(s))\n\n for c in clipper:\n Clipper.add(Vertex(c))\n\n clipped = Clipper.difference(Subject)\\\n if operation == 'reversed-diff'\\\n else Subject.__getattribute__(operation)(Clipper)\n\n clipped = [(ext.points,[hole.points for hole in holes]) for ext,holes in clipped]\n return clipped", "def test_clip_points_by_polygons_with_holes0(self):\n\n # Define an outer ring\n outer_ring = numpy.array([[106.79, -6.233],\n [106.80, -6.24],\n [106.78, -6.23],\n [106.77, -6.21],\n [106.79, -6.233]])\n\n # Define inner rings\n inner_rings = [numpy.array([[106.77827, -6.2252],\n [106.77775, -6.22378],\n [106.78, -6.22311],\n [106.78017, -6.22530],\n [106.77827, -6.2252]])[::-1],\n numpy.array([[106.78652, -6.23215],\n [106.78642, -6.23075],\n [106.78746, -6.23143],\n [106.78831, -6.23307],\n [106.78652, -6.23215]])[::-1]]\n\n v = Vector(geometry=[Polygon(outer_ring=outer_ring,\n inner_rings=inner_rings)])\n assert v.is_polygon_data\n\n # Write it to file\n tmp_filename = unique_filename(suffix='.shp')\n v.write_to_file(tmp_filename)\n\n # Read polygon it back\n L = read_layer(tmp_filename)\n P = L.get_geometry(as_geometry_objects=True)[0]\n\n outer_ring = P.outer_ring\n inner_ring0 = P.inner_rings[0]\n inner_ring1 = P.inner_rings[1]\n\n # Make some test points\n points = generate_random_points_in_bbox(outer_ring, 1000, seed=13)\n\n # Clip to outer ring, excluding holes\n indices = inside_polygon(points, P.outer_ring, holes=P.inner_rings)\n\n # Sanity\n for point in points[indices, :]:\n # Must be inside outer ring\n assert is_inside_polygon(point, outer_ring)\n\n # But not in any of the inner rings\n assert not is_inside_polygon(point, inner_ring0)\n assert not is_inside_polygon(point, inner_ring1)\n\n if False:\n # Store for visual check\n pol = Vector(geometry=[P])\n tmp_filename = unique_filename(suffix='.shp')\n pol.write_to_file(tmp_filename)\n print 'Polygon with holes written to %s' % tmp_filename\n\n pts = Vector(geometry=points[indices, :])\n tmp_filename = unique_filename(suffix='.shp')\n pts.write_to_file(tmp_filename)\n print 'Clipped points written to %s' % tmp_filename", "def clip_raster_with_polygon(src_raster, src_poly, all_touched=False, no_data_value=0):\n assert src_raster.geo_transform is not None, \"src_raster.geo_transform should not be None\"\n src_poly_copy = src_poly.copy()\n src_poly_copy['value'] = 1\n src_poly_raster = rasterize_layer_by_ref_raster(src_poly_copy, src_raster, use_attribute='value', all_touched=all_touched, no_data_value=0)\n dst_raster = src_raster.copy()\n dst_raster.data[~(src_poly_raster.data[:, :, 0].astype(bool))] = no_data_value\n \n row_idxs, col_idxs, bands_idxs = np.where(src_poly_raster.data!=0)\n rmin, rmax, cmin, cmax = np.min(row_idxs), np.max(row_idxs), np.min(col_idxs), np.max(col_idxs)\n dst_raster.data = dst_raster.data[rmin:rmax+1, cmin:cmax+1]\n\n coords = tgp.npidxs_to_coords([(rmin, cmin)], src_raster.geo_transform)[0]\n geo_transform = np.array(dst_raster.geo_transform)\n geo_transform[[0, 3]] = coords\n dst_raster.geo_transform = geo_transform\n\n # src_ds = src_raster.to_gdal_ds()\n # temp_dir = tgp.create_temp_dir_when_not_exists()\n # src_shp_fp = os.path.join(temp_dir, 'src_poly.shp')\n # src_poly.to_file(src_shp_fp)\n # dst_ds = gdal.Warp('', src_ds, format= 'MEM', cutlineDSName=src_shp_fp, cropToCutline=True)\n # dst_raster = tgp.read_gdal_ds(dst_ds)\n return dst_raster", "def clip(in_file, out_file, area_file):\n bb = load_bb(area_file)\n in_las = laspy.file.File(in_file, mode='r')\n out_las = laspy.file.File(out_file, mode='w', header=in_las.header)\n\n inside = (in_las.x > bb.x_min) & (in_las.x < bb.x_max) & (in_las.y > bb.y_min) & (in_las.y < bb.y_max)\n out_las.points = in_las.points[inside]\n out_las.close()", "def clip_image(coords_poly, fname):\n with rio.open(\"%s.tif\" % fname) as src:\n out_image, out_transform = mask.mask(src, [to_geojson(coords_poly)],\n crop=True, nodata=-9999)\n masked_image = ma.masked_equal(out_image, -9999)\n return masked_image", "def clip(subjectPolygon, clipPolygon):\n def inside(p):\n return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])\n\n def computeIntersection():\n dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]\n dp = [s[0] - e[0], s[1] - e[1]]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0]\n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n return(outputList)", "def polygon_clip(self, subjectPolygon, clipPolygon):\n def inside(p):\n return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])\n\n def computeIntersection():\n dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]\n dp = [ s[0] - e[0], s[1] - e[1] ]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0]\n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return(outputList)", "def clip(self, clipbox):\r\n pmin, pmax = clipbox\r\n ind = []\r\n vlt = []\r\n # Direct elimination of out of bounds edges and vertices\r\n for i in range(len(self.vl)):\r\n if self.vl[i][0] < pmin[0] or self.vl[i][1] < pmin[1] or \\\r\n self.vl[i][0] > pmax[0] or self.vl[i][1] > pmax[1]:\r\n ind.append(i)\r\n else:\r\n vlt.append(self.vl[i])\r\n elt = filter((lambda x: (x[0] not in ind) and (x[1] not in ind)),\r\n self.el)\r\n li = filter((lambda x: x not in ind), range(len(self.vl)))\r\n # We rename the indices in the trimmed edge list\r\n lf = range(len(self.vl) - len(ind))\r\n equiv = {}\r\n for i in range(len(li)):\r\n if li[i] != lf[i]:\r\n equiv[li[i]] = lf[i]\r\n\r\n for i in range(len(elt)):\r\n if elt[i][0] in equiv:\r\n x = equiv[elt[i][0]]\r\n else:\r\n x = elt[i][0]\r\n if elt[i][1] in equiv:\r\n y = equiv[elt[i][1]]\r\n else:\r\n y = elt[i][1]\r\n elt[i] = (x, y)\r\n\r\n self.vl = vlt\r\n self.el = elt\r\n self.minmax()", "def polygon_clip(subjectPolygon, clipPolygon):\n\n def inside(p):\n return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])\n\n def computeIntersection():\n dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]\n dp = [s[0] - e[0], s[1] - e[1]]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0]\n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return outputList", "def crop_to_square(self, image):\n orig_height, orig_width, orig_channels = image.shape\n if orig_height > orig_width:\n return image[:orig_width, ...]\n elif orig_height < orig_width:\n return image[:, :orig_height, ...]\n return image", "def polygon_clip(subjectPolygon, clipPolygon):\n def inside(p):\n return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0])\n \n def computeIntersection():\n dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ]\n dp = [ s[0] - e[0], s[1] - e[1] ]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0] \n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return(outputList)", "def polygon_clip(subjectPolygon, clipPolygon):\n def inside(p):\n return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])\n\n def computeIntersection():\n dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]\n dp = [s[0] - e[0], s[1] - e[1]]\n n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]\n n2 = s[0] * e[1] - s[1] * e[0]\n n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])\n return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]\n\n outputList = subjectPolygon\n cp1 = clipPolygon[-1]\n\n for clipVertex in clipPolygon:\n cp2 = clipVertex\n inputList = outputList\n outputList = []\n s = inputList[-1]\n\n for subjectVertex in inputList:\n e = subjectVertex\n if inside(e):\n if not inside(s):\n outputList.append(computeIntersection())\n outputList.append(e)\n elif inside(s):\n outputList.append(computeIntersection())\n s = e\n cp1 = cp2\n if len(outputList) == 0:\n return None\n return (outputList)", "def raw_noise_2d(x, y):\n # Noise contributions from the three corners\n n0, n1, n2 = 0.0, 0.0, 0.0\n\n # Skew the input space to determine which simplex cell we're in\n F2 = 0.5*(sqrt(3.0) - 1.0)\n # Hairy factor for 2D\n s = (x + y) * F2\n i = fastfloor( x + s )\n j = fastfloor( y + s )\n\n G2 = (3.0 - sqrt(3.0))/6.0\n t = (i + j)*G2\n # Unskew the cell origin back to (x,y) space\n X0 = i-t\n Y0 = j-t\n # The x,y distances from the cell origin\n x0 = x-X0\n y0 = y-Y0\n\n # For the 2D case, the simplex shape is an equilateral triangle.\n # Determine which simplex we are in.\n # Offsets for second (middle) corner of simplex in (i,j) coords\n if x0>y0: # lower triangle, XY order: (0,0)->(1,0)->(1,1)\n i1=1\n j1=0\n else: # upper triangle, YX order: (0,0)->(0,1)->(1,1)\n i1=0\n j1=1\n\n # A step of (1,0) in (i,j) means a step of (1-c,-c) in (x,y), and\n # a step of (0,1) in (i,j) means a step of (-c,1-c) in (x,y), where\n # c = (3-sqrt(3))/6\n x1 = x0 - i1 + G2 # Offsets for middle corner in (x,y) unskewed coords\n y1 = y0 - j1 + G2\n x2 = x0 - 1.0 + 2.0*G2 # Offsets for last corner in (x,y) unskewed coords\n y2 = y0 - 1.0 + 2.0*G2\n\n # Work out the hashed gradient indices of the three simplex corners\n ii = i & 255\n jj = j & 255\n gi0 = perm[ii+perm[jj]] % 12\n gi1 = perm[ii+i1+perm[jj+j1]] % 12\n gi2 = perm[ii+1+perm[jj+1]] % 12\n\n # Calculate the contribution from the three corners\n t0 = 0.5 - x0*x0 - y0*y0\n if t0 < 0:\n n0 = 0.0\n else:\n t0 *= t0\n # (x,y) of grad3 used for 2D gradient\n n0 = t0 * t0 * dot2(grad3[gi0][0], grad3[gi0][1], x0, y0)\n\n t1 = 0.5 - x1*x1 - y1*y1\n if t1 < 0:\n n1 = 0.0\n else:\n t1 *= t1\n n1 = t1 * t1 * dot2(grad3[gi1][0], grad3[gi1][1], x1, y1)\n\n t2 = 0.5 - x2*x2 - y2*y2\n if t2 < 0:\n n2 = 0.0\n else:\n t2 *= t2\n n2 = t2 * t2 * dot2(grad3[gi2][0], grad3[gi2][1], x2, y2)\n\n # Add contributions from each corner to get the final noise value.\n # The result is scaled to return values in the interval [-1,1].\n return 70.0*(n0 + n1 + n2)", "def sharp_ground(X):\n return img_conv(X, kernel_sharp)", "def crop_to_square(image):\n\n if image is None:\n return None\n w, h = (image.shape[1], image.shape[0])\n w = float(w)\n h = float(h)\n\n # only crop images automatically if the aspect ratio is not bigger than 2 or not smaller than 0.5\n aspectRatio = w / h\n if aspectRatio > 3 or aspectRatio < 0.3:\n return None\n if aspectRatio == 1.0:\n return image\n \n # the shortest edge is the edge of our new square. b is the other edge\n a = min(w, h)\n b = max(w, h)\n\n # get cropping position\n x = (b - a) / 2.0\n\n # depending which side is longer we have to adjust the points\n # Heigth is longer\n if h > w:\n upperLeft = (0, x) \n else:\n upperLeft = (x, 0)\n cropW = cropH = a \n return crop_image(image, upperLeft[0], upperLeft[1], cropW, cropH)", "def clip_by_shp(self, shp_path):\n shp = ogr.Open(shp_path)\n return self.clip_by_layer(shp.GetLayer())", "def clipToUnit(x0,y0,x1,y1):\n if x0 < 0.0 or x0 > 1.0 or y0 < 0.0 or y0 > 1.0:\n raise Exception(\"(x0,y0) must be in unit square (got [%s,%s])\" % (x0,y0))\n if x1 < 0.0: # X crosses y=0 axis\n t = x0 / abs(x1-x0)\n xc = 0.0\n yc = y0 + (y1-y0) * t\n elif x1 > 1.0: # X crosses y=1 axis\n t = (1.0-x0) / abs(x1-x0)\n xc = 1.0\n yc = y0 + (y1-y0) * t\n else: # X not needing clipping\n xc = x1\n yc = y1\n if yc < 0.0: # Y crosses x=0 axis\n t = y0 / abs(yc-y0)\n yc = 0.0\n xc = x0 + (xc-x0) * t\n elif yc > 1.0: # Y crosses x=1 axis\n t = (1.0-y0) / abs(yc-y0)\n yc = 1.0\n xc = x0 + (xc-x0) * t\n else: # X not needing clipping\n pass\n return xc, yc", "def clip(self, window):\n\n def connect_points(clipped, side1, side2, window):\n \"\"\" Connects points of the window. \"\"\"\n edge = side1\n while edge != side2:\n clipped.append(window.points[0][edge])\n edge = (edge - 1) % 4\n\n boundaries = window.real_boundaries\n clipped = []\n for face in self._points:\n new_face = []\n entered, exited = None, None\n for i in range(len(face) - 1):\n points, side = Object._clip_line(\n face[i], face[i + 1], *boundaries[0], *boundaries[1])\n\n if not points: # clipped line is outside window\n continue\n\n if side[0] is not None: # entered\n if exited is not None:\n connect_points(new_face, exited, side[0], window)\n else:\n entered = side[0]\n\n if side[1] is not None: # exited\n exited = side[1]\n new_face.append(points[0])\n new_face.append(points[1])\n else:\n new_face.append(points[0])\n\n if new_face and face[0] == face[-1]:\n if entered is not None:\n connect_points(new_face, exited, entered, window)\n new_face.append(new_face[0])\n\n clipped.append(new_face)\n\n self._points = clipped", "def non_zero_polygon(polygon, suppress_warning=False):\n\t\tif polygon.area > 0:\n\t\t\treturn polygon\n\n\t\tif not suppress_warning:\n\t\t\tprint(\"Warning: polygon has zero area; dilating\", file=sys.stderr)\n\t\treturn polygon.buffer(0.05, 1).convex_hull", "def clip(self, image, x=0, y=0, w=0, h=0, oX=0, oY=0):\n if(w==0):\n w = image.get_rect()[2]\n if(h==0):\n h = image.get_rect()[3]\n needleW = w + 2*math.sqrt(oX*oX)\n needleH = h + 2*math.sqrt(oY*oY)\n imageOut = pygame.Surface((needleW, needleH))\n imageOut.fill((255,255,0))\n imageOut.set_colorkey((255,255,0))\n imageOut.blit(image, (needleW/2-w/2+oX, needleH/2-h/2+oY), pygame.Rect(x,y,w,h))\n return imageOut", "def clip_singletile_polys(idx, tiles_gdf, all_polys_series, tile_size):\n tile_poly = get_specific_tile(idx = idx, tiles_gdf=tiles_gdf)\n # tfm = from_bounds(*tile_poly.bounds, tile_size, tile_size) \n cropped_polys = [poly for poly in all_polys_series if poly.intersects(tile_poly)]\n cropped_polys_gdf = gpd.GeoDataFrame(geometry=cropped_polys, crs={'init': 'epsg:4326'})\n cropped_polys_gdf.plot()\n return cropped_polys_gdf", "def crop_bounding_box(im, x, y, w, h):\n return im[y:y+h, x:x+w]", "def crop_img(image, bound):\n scale = 1.01 # 1%\n return image.crop((bound.vertices[0].x // scale, bound.vertices[0].y // scale,\n int(bound.vertices[2].x * scale), int(bound.vertices[2].y) * scale))", "def sharpen(im):\n kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n im = cv2.filter2D(im, -1, kernel)\n return im", "def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))", "def crop(img, i, j, h, w):\n return img.crop((j, i, j + w, i + h))" ]
[ "0.5896273", "0.5896273", "0.5873075", "0.57877976", "0.573683", "0.56885505", "0.568509", "0.56691444", "0.5591874", "0.5561925", "0.5535751", "0.5516805", "0.5503301", "0.55026895", "0.54918116", "0.5490103", "0.54314256", "0.5428148", "0.54224825", "0.5398284", "0.53432286", "0.5328293", "0.5322517", "0.5318122", "0.5288019", "0.52723885", "0.5213096", "0.520765", "0.520048", "0.520048" ]
0.6021302
0
Set document comment. Raise CardinalityError if comment already set.
def set_doc_comment(self, doc, comment): if not self.doc_comment_set: self.doc_comment_set = True doc.comment = comment else: raise CardinalityError('Document::Comment')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n if validations.validate_doc_comment(comment):\n doc.comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('Document::Comment')\n else:\n raise CardinalityError('Document::Comment')", "def comment(self, comment):\n self.logger.debug(\"In 'comment' setter.\")\n\n if len(comment) > 512:\n raise Exception(\"Comment is too long, must be less than 512 characters.\")\n\n self._comment = comment", "def comment(self, comment):\n\n self.logger.debug(\"In 'comment' setter.\")\n\n self._comment = comment", "def set_comment(self, comment):\n\t\tself.comment_ = comment", "def comment(self, comment) :\n\t\ttry :\n\t\t\tself._comment = comment\n\t\texcept Exception as e:\n\t\t\traise e", "def set_comment(self, comment):\n self.comment_text = str(comment)", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def set_snippet_comment(self, doc, comment):\n self.assert_snippet_exists()\n if not self.snippet_comment_set:\n self.snippet_comment_set = True\n if validations.validate_snip_comment(comment):\n doc.snippet[-1].comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('Snippet::SnippetComment')\n else:\n raise CardinalityError('Snippet::SnippetComment')", "def comment(self, comment): # type: (str) -> None\n self._tmp_comment = comment", "def comment(self, comment: str):\n\n self._comment = comment", "def comment(self, comment: str):\n\n self._comment = comment", "def set_creation_comment(self, doc, comment):\n if not self.creation_comment_set:\n self.creation_comment_set = True\n if validations.validate_creation_comment(comment):\n doc.creation_info.comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('CreationInfo::Comment')\n else:\n raise CardinalityError('CreationInfo::Comment')", "def set_lic_comment(self, doc, comment):\n if self.has_extr_lic(doc):\n if not self.extr_lic_comment_set:\n self.extr_lic_comment_set = True\n self.extr_lic(doc).comment = comment\n return True\n else:\n raise CardinalityError('ExtractedLicense::comment')\n else:\n raise OrderError('ExtractedLicense::comment')", "def set_file_comment(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_comment_set:\n self.file_comment_set = True\n if validations.validate_file_comment(text):\n self.file(doc).comment = str_from_text(text)\n return True\n else:\n raise SPDXValueError('File::Comment')\n else:\n raise CardinalityError('File::Comment')\n else:\n raise OrderError('File::Comment')", "def comment(self, comment):\r\n\r\n core.FW_conf['connection'].comment(comment)", "def comment(self, value: str):\n self._comment = value", "def add_review_comment(self, doc, comment):\n if len(doc.reviews) != 0:\n if not self.review_comment_set:\n self.review_comment_set = True\n if validations.validate_review_comment(comment):\n doc.reviews[-1].comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('ReviewComment::Comment')\n else:\n raise CardinalityError('ReviewComment')\n else:\n raise OrderError('ReviewComment')", "def set_lic_comment(self, doc, comment):\n if self.has_extr_lic(doc):\n if not self.extr_lic_comment_set:\n self.extr_lic_comment_set = True\n if validations.validate_is_free_form_text(comment):\n self.extr_lic(doc).comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('ExtractedLicense::comment')\n else:\n raise CardinalityError('ExtractedLicense::comment')\n else:\n raise OrderError('ExtractedLicense::comment')", "def edit(self, comment):\n try:\n self.comment = comment\n self.save()\n except Exception as e:\n raise Exception(\"Failed to save, rolling back transaction.\" \\\n \"Details: %s\" % e)", "def set_comment(self, obj, cursor):\n if isinstance(obj, typedesc.T):\n obj.comment = cursor.brief_comment\n return", "def set_pkg_comment(self, doc, text):\n self.assert_package_exists()\n if not self.package_comment_set:\n self.package_comment_set = True\n if validations.validate_pkg_comment(text):\n doc.package.comment = str_from_text(text)\n else:\n raise SPDXValueError('Package::Comment')\n else:\n raise CardinalityError('Package::Comment')" ]
[ "0.83034164", "0.77773416", "0.7719323", "0.7694882", "0.7687957", "0.7328972", "0.7308246", "0.7308246", "0.7308246", "0.7308246", "0.7308246", "0.7308246", "0.7308246", "0.7308246", "0.7308246", "0.7308246", "0.7109099", "0.7056695", "0.70507073", "0.70507073", "0.6976152", "0.69512504", "0.677959", "0.6639237", "0.6630765", "0.6612347", "0.65546423", "0.6518936", "0.6498924", "0.6468406" ]
0.8777668
0
Set license name. Raise SPDXValueError if name is not str or utils.NoAssert Raise CardinalityError if it is already set Raise OrderError if no license id defined.
def set_lic_name(self, doc, name): if self.has_extr_lic(doc): if not self.extr_lic_name_set: self.extr_lic_name_set = True if validations.validate_extr_lic_name(name, True): self.extr_lic(doc).full_name = name return True else: raise SPDXValueError('ExtractedLicense::Name') else: raise CardinalityError('ExtractedLicense::Name') else: raise OrderError('ExtractedLicense::Name')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_lic_name(self, doc, name):\n if self.has_extr_lic(doc):\n if not self.extr_lic_name_set:\n self.extr_lic_name_set = True\n if validations.validate_extr_lic_name(name):\n self.extr_lic(doc).full_name = name\n return True\n else:\n raise SPDXValueError('ExtractedLicense::Name')\n else:\n raise CardinalityError('ExtractedLicense::Name')\n else:\n raise OrderError('ExtractedLicense::Name')", "def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name", "def setName(self, name): \n\n self._name = name", "def setName(self, name):\n # type: (str)->None\n self._validator.validate_one('name', VALID_OPTS['name'], name)\n self._ifAttributes['name'] = str(name)", "def set_name(self, name):\n\n\t\tif name is not None and not isinstance(name, str):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: name EXPECTED TYPE: str', None, None)\n\t\t\n\t\tself.__name = name\n\t\tself.__key_modified['name'] = 1", "def setName(self, name):\n self._name = name", "def setName(self, name):\n self._name = name", "def name(self, name) :\n\t\ttry :\n\t\t\tself._name = name\n\t\texcept Exception as e:\n\t\t\traise e", "def set_doc_name(self, doc, name):\n if not self.doc_name_set:\n doc.name = name\n self.doc_name_set = True\n return True\n else:\n raise CardinalityError('Document::Name')", "def setName(self, name):\n self.name = str(name)", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def setName(self, name):\n self.name = name", "def set_name(self, name):\n\t\tself.name_ = name", "def set_file_license_comment(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_license_comment_set:\n self.file_license_comment_set = True\n if validations.validate_file_lics_comment(text):\n self.file(doc).license_comment = str_from_text(text)\n else:\n raise SPDXValueError('File::LicenseComment')\n else:\n raise CardinalityError('File::LicenseComment')\n else:\n raise OrderError('File::LicenseComment')", "def legal_name(self, legal_name: str):\n\n self._legal_name = legal_name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n self.name = name # overwrite the existing name with the input name", "def set_name(self, name):\n assert isinstance(name, str), 'Name must be string'\n self._name = name", "def setName(self,value):\n assert value == None or type(value) == str, repr(value)+' is not a valid name'\n self._name = value", "def set_name(self, name):\r\n self.__name = name", "def set_name(self, name):\n self._name = name", "def set_name(self, name: str) -> None:\n lib.wlr_seat_set_name(self._ptr, name.encode())", "def setname(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.__name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name" ]
[ "0.74007356", "0.6421445", "0.6295538", "0.62915987", "0.62772936", "0.62594885", "0.62594885", "0.62465584", "0.6132982", "0.6127764", "0.6125195", "0.6125195", "0.6125195", "0.6125195", "0.6112782", "0.6059133", "0.605745", "0.605508", "0.605508", "0.60507804", "0.602856", "0.60128355", "0.6011199", "0.6007523", "0.600507", "0.59887695", "0.59887695", "0.5973975", "0.5973975", "0.5973975" ]
0.7390864
1
Set license comment. Raise CardinalityError if it is already set. Raise OrderError if no license ID defined.
def set_lic_comment(self, doc, comment): if self.has_extr_lic(doc): if not self.extr_lic_comment_set: self.extr_lic_comment_set = True self.extr_lic(doc).comment = comment return True else: raise CardinalityError('ExtractedLicense::comment') else: raise OrderError('ExtractedLicense::comment')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_file_license_comment(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_license_comment_set:\n self.file_license_comment_set = True\n if validations.validate_file_lics_comment(text):\n self.file(doc).license_comment = str_from_text(text)\n else:\n raise SPDXValueError('File::LicenseComment')\n else:\n raise CardinalityError('File::LicenseComment')\n else:\n raise OrderError('File::LicenseComment')", "def set_pkg_license_comment(self, doc, text):\n self.assert_package_exists()\n if not self.package_license_comment_set:\n self.package_license_comment_set = True\n if validations.validate_pkg_lics_comment(text):\n doc.package.license_comment = str_from_text(text)\n return True\n else:\n raise SPDXValueError('Package::LicenseComment')\n else:\n raise CardinalityError('Package::LicenseComment')", "def set_lic_comment(self, doc, comment):\n if self.has_extr_lic(doc):\n if not self.extr_lic_comment_set:\n self.extr_lic_comment_set = True\n if validations.validate_is_free_form_text(comment):\n self.extr_lic(doc).comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('ExtractedLicense::comment')\n else:\n raise CardinalityError('ExtractedLicense::comment')\n else:\n raise OrderError('ExtractedLicense::comment')", "def set_snippet_lic_comment(self, doc, text):\n self.assert_snippet_exists()\n if not self.snippet_lic_comment_set:\n self.snippet_lic_comment_set = True\n if validations.validate_snip_lic_comment(text):\n doc.snippet[-1].license_comment = str_from_text(text)\n return True\n else:\n raise SPDXValueError('Snippet::SnippetLicenseComments')\n else:\n raise CardinalityError('Snippet::SnippetLicenseComments')", "def set_pkg_comment(self, doc, text):\n self.assert_package_exists()\n if not self.package_comment_set:\n self.package_comment_set = True\n if validations.validate_pkg_comment(text):\n doc.package.comment = str_from_text(text)\n else:\n raise SPDXValueError('Package::Comment')\n else:\n raise CardinalityError('Package::Comment')", "def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n if validations.validate_doc_comment(comment):\n doc.comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('Document::Comment')\n else:\n raise CardinalityError('Document::Comment')", "def license(self, license):\n\n self._license = license", "def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n doc.comment = comment\n else:\n raise CardinalityError('Document::Comment')", "def set_lic_text(self, doc, text):\n if self.has_extr_lic(doc):\n if not self.extr_text_set:\n self.extr_text_set = True\n self.extr_lic(doc).text = text\n return True\n else:\n raise CardinalityError('ExtractedLicense::text')\n else:\n raise OrderError('ExtractedLicense::text')", "def license_model_description(self, license_model_description):\n self._license_model_description = license_model_description", "def putlicensedebug(self,licdebug_): # 3\n res = self.__obj.putlicensedebug(licdebug_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def license_number(self, license_number):\n\n self._license_number = license_number", "def set_snippet_copyright(self, doc, text):\n self.assert_snippet_exists()\n if not self.snippet_copyright_set:\n self.snippet_copyright_set = True\n if validations.validate_snippet_copyright(text):\n if isinstance(text, string_types):\n doc.snippet[-1].copyright = str_from_text(text)\n else:\n doc.snippet[-1].copyright = text # None or NoAssert\n else:\n raise SPDXValueError('Snippet::SnippetCopyrightText')\n else:\n raise CardinalityError('Snippet::SnippetCopyrightText')", "def set_file_comment(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_comment_set:\n self.file_comment_set = True\n if validations.validate_file_comment(text):\n self.file(doc).comment = str_from_text(text)\n return True\n else:\n raise SPDXValueError('File::Comment')\n else:\n raise CardinalityError('File::Comment')\n else:\n raise OrderError('File::Comment')", "def add_comments(header):\n try:\n header.comments[\"LICENSE\"] = \"License of data\"\n header.comments[\"LICVER\"] = \"Version of license\"\n header.comments[\"LICURL\"] = \"URL of license\"\n except:\n print(\"Oops! Something's gone wrong :-(\", file=sys.stderr)", "def set_lic_text(self, doc, text):\n if self.has_extr_lic(doc):\n if not self.extr_text_set:\n self.extr_text_set = True\n if validations.validate_is_free_form_text(text):\n self.extr_lic(doc).text = str_from_text(text)\n return True\n else:\n raise SPDXValueError('ExtractedLicense::text')\n else:\n raise CardinalityError('ExtractedLicense::text')\n else:\n raise OrderError('ExtractedLicense::text')", "def set_pkg_license_declared(self, doc, lic):\n self.assert_package_exists()\n if not self.package_license_declared_set:\n self.package_license_declared_set = True\n if validations.validate_lics_conc(lic):\n doc.package.license_declared = lic\n return True\n else:\n raise SPDXValueError('Package::LicenseDeclared')\n else:\n raise CardinalityError('Package::LicenseDeclared')", "def putlicensedebug(self,licdebug_):\n res = __library__.MSK_XX_putlicensedebug(self.__nativep,licdebug_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def set_pkg_cr_text(self, doc, text):\n self.assert_package_exists()\n if not self.package_cr_text_set:\n self.package_cr_text_set = True\n if validations.validate_pkg_cr_text(text):\n if isinstance(text, string_types):\n doc.package.cr_text = str_from_text(text)\n else:\n doc.package.cr_text = text # None or NoAssert\n else:\n raise SPDXValueError('Package::CopyrightText')\n else:\n raise CardinalityError('Package::CopyrightText')", "def set_comment(self, comment):\n\t\tself.comment_ = comment", "def set_creation_comment(self, doc, comment):\n if not self.creation_comment_set:\n self.creation_comment_set = True\n if validations.validate_creation_comment(comment):\n doc.creation_info.comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('CreationInfo::Comment')\n else:\n raise CardinalityError('CreationInfo::Comment')", "def set_comment(self, comment):\n self.comment_text = str(comment)", "def comment(self, comment):\n self.logger.debug(\"In 'comment' setter.\")\n\n if len(comment) > 512:\n raise Exception(\"Comment is too long, must be less than 512 characters.\")\n\n self._comment = comment", "def comment(self, comment):\n\n self.logger.debug(\"In 'comment' setter.\")\n\n self._comment = comment", "def setLicenseKey(self,content):\n self.PDFreactorConfiguration.in1[\"licenseKey\"] = content", "def putlicensepath(self,licensepath_): # 3\n res = self.__obj.putlicensepath(licensepath_)\n if res != 0:\n raise Error(rescode(res),\"\")", "def comment(self, comment) :\n\t\ttry :\n\t\t\tself._comment = comment\n\t\texcept Exception as e:\n\t\t\traise e", "def setPlateComment(self, address: ghidra.program.model.address.Address, comment: unicode) -> bool:\n ...", "def set_license(self, license_code: str) -> PrivXAPIResponse:\n response_status, data = self._http_post(\n UrlEnum.LICENSE.LICENSE,\n body=license_code,\n )\n return PrivXAPIResponse(response_status, HTTPStatus.OK, data)", "def set_concluded_license(self, doc, lic):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_conc_lics_set:\n self.file_conc_lics_set = True\n if validations.validate_lics_conc(lic):\n self.file(doc).conc_lics = lic\n return True\n else:\n raise SPDXValueError('File::ConcludedLicense')\n else:\n raise CardinalityError('File::ConcludedLicense')\n else:\n raise OrderError('File::ConcludedLicense')" ]
[ "0.7798254", "0.7691525", "0.7654628", "0.73221135", "0.65885866", "0.63827676", "0.63612866", "0.63207686", "0.6265315", "0.6208042", "0.6165672", "0.6093353", "0.6088788", "0.60350615", "0.60108227", "0.6003148", "0.5956488", "0.5906547", "0.5886418", "0.5884168", "0.58786047", "0.5873769", "0.5862955", "0.58421975", "0.58171797", "0.5808337", "0.5769525", "0.57161325", "0.56372213", "0.5629561" ]
0.77002937
1
Set file notice Raise OrderError if no package or file defined. Raise CardinalityError if more than one.
def set_file_notice(self, doc, text): if self.has_package(doc) and self.has_file(doc): if not self.file_notice_set: self.file_notice_set = True self.file(doc).notice = text return True else: raise CardinalityError('File::Notice') else: raise OrderError('File::Notice')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_file_notice(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_notice_set:\n self.file_notice_set = True\n if validations.validate_file_notice(text):\n self.file(doc).notice = str_from_text(text)\n else:\n raise SPDXValueError('File::Notice')\n else:\n raise CardinalityError('File::Notice')\n else:\n raise OrderError('File::Notice')", "def set_file_atrificat_of_project(self, doc, symbol, value):\n if self.has_package(doc) and self.has_file(doc):\n self.file(doc).add_artifact(symbol, value)\n else:\n raise OrderError('File::Artificat')", "def check_filekind(self):\n assert self.filekind in self.obs_package.FILEKINDS, \\\n \"Invalid filekind \" + repr(self.filekind) + \" in \" + repr(self.filename)", "def testFileOutSetException(self):\n def testFileOut():\n self.cc.file_out = '../NewFile.cc'\n\n self.assertRaises(\n AttributeError,\n testFileOut\n )", "def add_error_non_file(self, code: Code, msg: str,\n severity: Severity = Severity.FATAL,\n is_persistant: bool = True) -> None:", "def testFileOutSetException(self):\n def testFileOut():\n self.node.file_out = '../NewFile.ccc'\n\n self.assertRaises(\n AttributeError,\n testFileOut\n )", "def unknown(self):\n self.add_file_string('Unknown file')\n self.should_copy = False", "def add_file_dep(self, doc, value):\n if self.has_package(doc) and self.has_file(doc):\n self.file(doc).add_depend(value)\n else:\n raise OrderError('File::Dependency')", "def ensure_file(self):\n if not self.has_file():\n raise AttributeError(\"No file set\")", "def add_warning_non_file(self, code: Code, msg: str,\n is_persistant: bool = False) -> None:", "def setFile(self, filename): #$NON-NLS-1$\r", "def set_file_comment(self, doc, text):\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_comment_set:\n self.file_comment_set = True\n if validations.validate_file_comment(text):\n self.file(doc).comment = str_from_text(text)\n return True\n else:\n raise SPDXValueError('File::Comment')\n else:\n raise CardinalityError('File::Comment')\n else:\n raise OrderError('File::Comment')", "def file_import(self):\r\n\r\n try:\r\n self.process_file_import()\r\n except InputError as ex:\r\n print(ex)\r\n self.file_import()", "def add_error_non_file(self, code: Code, msg: str,\n severity: Severity = Severity.FATAL,\n is_persistant: bool = True) -> None:\n self._insert_error(Error(severity=severity, path=None, code=code,\n message=msg, is_persistant=is_persistant))", "def set_file_type(self, doc, type_value):\n type_dict = {\n 'SOURCE': file.FileType.SOURCE,\n 'BINARY': file.FileType.BINARY,\n 'ARCHIVE': file.FileType.ARCHIVE,\n 'OTHER': file.FileType.OTHER\n }\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_type_set:\n self.file_type_set = True\n if type_value in type_dict.keys():\n self.file(doc).type = type_dict[type_value]\n return True\n else:\n raise SPDXValueError('File::Type')\n else:\n raise CardinalityError('File::Type')\n else:\n raise OrderError('File::Type')", "def test_supply_file(self):\n f = open(self.junk_file, 'w')\n f.close()\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, self.junk_file)", "def file(self, file) :\n\t\ttry :\n\t\t\tself._file = file\n\t\texcept Exception as e:\n\t\t\traise e", "def fix_nonerrors(self):\n if not self.only_error:\n return\n self.line = None\n self.filename = None", "def testFilenameSetBadType(self):\n def setFilename():\n self.mr.filename = 12345\n\n self.assertRaises(\n TypeError,\n setFilename\n )", "def __init__(self, path):\n self.filename = os.path.basename(path)\n self.path = path\n # To be populated in the first check.\n self.py_versions = None\n\n ts = rpm.TransactionSet()\n with open(path, 'rb') as fdno:\n try:\n self.hdr = ts.hdrFromFdno(fdno)\n except rpm.error as err:\n raise PackageException('{}: {}'.format(self.filename, err))", "def test_badfileerror_raise(self, mock_path):\n # Set the mocked functions returned values\n mock_path.isfile.side_effect = [False, True, True, True, True]\n\n # Test execution\n self.assertRaises(ConnectomistBadFileError, dwi_local_modeling,\n **self.kwargs)", "def setErrorFile(fname='dis.err'):\n dislin.errfil(fname)", "def error_check(self, options):\n if not options[\"type\"] in ['dot', 'raw']:\n sys.stderr.write('\\nUnknown file type \"%s\". Type may be either \"dot\" or \"raw\"\\n' %str(options[\"type\"]))\n self.print_usage()", "def set_pkg_files_analyzed(self, doc, files_analyzed):\n self.assert_package_exists()\n if not self.package_files_analyzed_set:\n if files_analyzed:\n if validations.validate_pkg_files_analyzed(files_analyzed):\n self.package_files_analyzed_set = True\n doc.package.files_analyzed = files_analyzed\n print(doc.package.files_analyzed)\n return True\n else:\n raise SPDXValueError('Package::FilesAnalyzed')\n else:\n raise CardinalityError('Package::FilesAnalyzed')", "def error_impresion(self):\n self._info(\"error_impresion\")", "def fileset(self):\n pass", "def Warning_Message( self ):\r\n message = \"This program prefers the FASTA file format\\nPlease check the file for >gi|id|title followed by the sequence,\\n or for enough sequences\"\r\n tkMessageBox.showwarning(\r\n \"File Opening Error\",\r\n message\r\n )", "def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))", "def set_filename(self, file_name):", "def test_style_guide_manager_pre_file_ignores(\n ignores, violation, filename, handle_error_return\n):\n formatter = mock.create_autospec(base.BaseFormatter, instance=True)\n options = create_options(\n ignore=ignores,\n select=[\"E\", \"F\", \"W\"],\n per_file_ignores=PER_FILE_IGNORES_UNPARSED,\n )\n guide = style_guide.StyleGuideManager(options, formatter=formatter)\n assert (\n guide.handle_error(violation, filename, 1, 1, \"Fake text\")\n == handle_error_return\n )" ]
[ "0.7263729", "0.5780258", "0.56514925", "0.56100523", "0.55981684", "0.5560745", "0.54180616", "0.54094934", "0.5397283", "0.5335834", "0.52749735", "0.52060634", "0.51867205", "0.5160868", "0.5109503", "0.5107595", "0.50973636", "0.50885445", "0.5056448", "0.5054006", "0.5052554", "0.49845564", "0.49694577", "0.49509838", "0.49470723", "0.49346158", "0.4925591", "0.491922", "0.4915696", "0.48935285" ]
0.69790244
1
Wrap rdfbuilders.FileBuilder.set_file_type to match the different fileType representations.
def set_file_type(self, doc, type_value): type_dict = { 'fileType_source': 'SOURCE', 'fileType_binary': 'BINARY', 'fileType_archive': 'ARCHIVE', 'fileType_other': 'OTHER' } return super(FileBuilder, self).set_file_type(doc, type_dict.get(type_value))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_file_type(self, doc, type_value):\n type_dict = {\n 'SOURCE': file.FileType.SOURCE,\n 'BINARY': file.FileType.BINARY,\n 'ARCHIVE': file.FileType.ARCHIVE,\n 'OTHER': file.FileType.OTHER\n }\n if self.has_package(doc) and self.has_file(doc):\n if not self.file_type_set:\n self.file_type_set = True\n if type_value in type_dict.keys():\n self.file(doc).type = type_dict[type_value]\n return True\n else:\n raise SPDXValueError('File::Type')\n else:\n raise CardinalityError('File::Type')\n else:\n raise OrderError('File::Type')", "def set_filetype(self, filetype, bufnr=None):\n if bufnr:\n self._vim.command(str(bufnr) + 'bufdo set filetype=' + filetype)\n else:\n self._vim.command('set filetype=' + filetype)", "def file_type(self, file_type):\n allowed_values = [undefined, undefined, undefined, ] # noqa: E501\n\n self._file_type = file_type", "def attachment_file_type(self, attachment_file_type):\n\n self._attachment_file_type = attachment_file_type", "def mime_type(self, mime_type):\n\n self._mime_type = mime_type", "def build_mimetype(self) -> None:\n logger.info(__('writing mimetype file...'))\n copy_asset_file(path.join(self.template_dir, 'mimetype'), self.outdir)", "def AddFileType(self, type, name):\n return _gmat_py.FileManager_AddFileType(self, type, name)", "def set_flowplayer_file_type(obj):\n if obj.id.endswith('mp3'):\n alsoProvides(obj, IAudio)\n obj.reindexObject(idxs=['object_provides'])\n elif obj.id.endswith('mp4'):\n alsoProvides(obj, IVideo)\n obj.reindexObject(idxs=['object_provides'])\n logger.info(u'Tipo de arquivo estabelecido')", "def _file_format_adapter(self):\n raise NotImplementedError", "def _file_type_update(log, file_config):\n if 'path' in file_config:\n file_path = file_config['path']\n elif 'directory' in file_config:\n file_path = _get_most_recent_file(file_config['directory'])\n\n if file_path is None:\n log.error('Data file not found.')\n sys.exit(1)\n\n file_ext = os.path.splitext(file_path)[-1]\n file_mime = MIMES.get(file_ext, MIMES['.csv'])\n return (file_path, file_ext, file_mime)", "def ComputeFileTypes(self):\n for rel_path, file_data in self._files.iteritems():\n if 'ftype' in file_data:\n continue\n ftype = self._file_type_decoder.GetType(rel_path)\n if ftype:\n file_data['ftype'] = ftype", "def set_file(self, sql_file):\n self.file_type = self.get_file_extension(sql_file)\n self.sql_file = sql_file", "def file_type(self):\n return self.__file_type", "def blob_mime_type(self, blob_mime_type):\n\n self._blob_mime_type = blob_mime_type", "def setDocumentType(self,value):\n self.PDFreactorConfiguration.in1[\"documentType\"] = value", "def changeType(self, newType):\n self.__class__ = globals()[newType + 'Format']\n self.format = self.defaultFormat\n self.initFormat()", "def type(self, type):\n allowed_values = [\"None\", \"File\", \"FileManagerFile\", \"BusOb\", \"History\", \"Other\"] # noqa: E501\n if self.local_vars_configuration.client_side_validation and type not in allowed_values: # noqa: E501\n raise ValueError(\n \"Invalid value for `type` ({0}), must be one of {1}\" # noqa: E501\n .format(type, allowed_values)\n )\n\n self._type = type", "def check_media_file_type(media_file_class):\n if media_file_class == 'AudioFile':\n media_file_type = 'Audio file'\n elif media_file_class == 'VideoFile':\n media_file_type = 'Video file'\n elif media_file_class == 'DocumentFile':\n media_file_type = 'Document file'\n elif media_file_class == 'ImageFile':\n media_file_type = 'Image file'\n\n return media_file_type", "def fs_type(self, fs_type):\n\n self._fs_type = fs_type", "def set_type(self, value):\n self._set_one_attribute(self.AttributeNames.TYPE, value)\n return self", "def __init__(self, mimetype):\n self.mimetype = mimetype\n self.name = \"Filters.document.mime_type('{}')\".format(self.mimetype)", "def build_type(self, build_type):\n\n self._build_type = build_type", "def file_type(self):\n return FileType(self.unpack_dword(0x1C))", "def attachment_mime_type(self, attachment_mime_type):\n\n self._attachment_mime_type = attachment_mime_type", "def set_format_by_type(self, value, format):\n self.set_render_func_by_type(value, format.format)", "def set_file_content(self, file_path: Path, content_type: str):\n file_stat = file_path.stat()\n self.set_header(\"Content-Length\", str(file_stat.st_size))\n self.set_header(\"Last-Modified\", gmtime_string(file_stat.st_mtime))\n self.set_header(\"Content-Type\", content_type)\n self._content_io = io.open(file_path, 'rb')", "def file_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"file_type\")", "def file_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"file_type\")", "def test_mimetypes_magic(self, mock_get_content_type):\n\n def get_content_type(value):\n return value.content_type\n\n mock_get_content_type.side_effect = get_content_type\n\n field = TypedFileField(required=False, type_whitelist=self.good_types, use_magic=True)\n\n for t in self.good_types:\n name = 'somefooname'\n file = UploadedFile(name=name, size=1, content_type=t)\n assert field.clean(file) is file\n\n for t in self.bad_types:\n name = 'somefooname'\n file = UploadedFile(name=name, size=1, content_type=t)\n with pytest.raises(forms.ValidationError):\n field.clean(file)", "def setTransformType(self, val): # real signature unknown; restored from __doc__\n pass" ]
[ "0.70331025", "0.6612077", "0.6341896", "0.58677274", "0.58193064", "0.5770286", "0.5716289", "0.57154566", "0.5662334", "0.56170815", "0.5595489", "0.5595484", "0.55319166", "0.547541", "0.541627", "0.5346825", "0.53068495", "0.5305302", "0.528279", "0.5267061", "0.52635944", "0.5256158", "0.5249242", "0.5207849", "0.51957774", "0.5190504", "0.518035", "0.518035", "0.51797503", "0.5152994" ]
0.795146
0
Set the annotation comment. Raise CardinalityError if already set. Raise OrderError if no annotator defined before.
def add_annotation_comment(self, doc, comment): if len(doc.annotations) != 0: if not self.annotation_comment_set: self.annotation_comment_set = True doc.annotations[-1].comment = comment return True else: raise CardinalityError('AnnotationComment') else: raise OrderError('AnnotationComment')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_annotation_comment(self, doc, comment):\n if len(doc.annotations) != 0:\n if not self.annotation_comment_set:\n self.annotation_comment_set = True\n if validations.validate_annotation_comment(comment):\n doc.annotations[-1].comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('AnnotationComment::Comment')\n else:\n raise CardinalityError('AnnotationComment::Comment')\n else:\n raise OrderError('AnnotationComment::Comment')", "def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n doc.comment = comment\n else:\n raise CardinalityError('Document::Comment')", "def comment(self, comment):\n\n self.logger.debug(\"In 'comment' setter.\")\n\n self._comment = comment", "def comment(self, comment):\n self.logger.debug(\"In 'comment' setter.\")\n\n if len(comment) > 512:\n raise Exception(\"Comment is too long, must be less than 512 characters.\")\n\n self._comment = comment", "def set_comment(self, comment):\n\t\tself.comment_ = comment", "def comment(self, comment) :\n\t\ttry :\n\t\t\tself._comment = comment\n\t\texcept Exception as e:\n\t\t\traise e", "def annotate(self, annotation):\n self._data = self._data.annotate(**annotation)", "def setAnnotation(self, *args):\n return _libsbml.Model_setAnnotation(self, *args)", "def setAnnotation(self, *args):\n return _libsbml.SBase_setAnnotation(self, *args)", "def annotate(self, annotation_=None):\n # Important: Need a copy, not the reference to the original object\n annotation_ = copy.deepcopy(annotation_)\n annotation_.annotate(self, from_dataset=True)\n history_record = annotation_.create_history_record()\n self.annotations.append(history_record)\n self._append_task(kind='annotation', task=history_record)", "def set_doc_comment(self, doc, comment):\n if not self.doc_comment_set:\n self.doc_comment_set = True\n if validations.validate_doc_comment(comment):\n doc.comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('Document::Comment')\n else:\n raise CardinalityError('Document::Comment')", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def comment(self, comment):\n\n self._comment = comment", "def set_lic_comment(self, doc, comment):\n if self.has_extr_lic(doc):\n if not self.extr_lic_comment_set:\n self.extr_lic_comment_set = True\n self.extr_lic(doc).comment = comment\n return True\n else:\n raise CardinalityError('ExtractedLicense::comment')\n else:\n raise OrderError('ExtractedLicense::comment')", "def set_comment(self, comment):\n self.comment_text = str(comment)", "def set_comment(self, obj, cursor):\n if isinstance(obj, typedesc.T):\n obj.comment = cursor.brief_comment\n return", "def set_attribute(self, name, value, comment):\n setattr(self, '%s__' % name, value_or_none(value))\n setattr(self, '%s__comment' % name, value_or_none(comment))", "def set_creation_comment(self, doc, comment):\n if not self.creation_comment_set:\n self.creation_comment_set = True\n if validations.validate_creation_comment(comment):\n doc.creation_info.comment = str_from_text(comment)\n return True\n else:\n raise SPDXValueError('CreationInfo::Comment')\n else:\n raise CardinalityError('CreationInfo::Comment')", "def comment(self, comment: str):\n\n self._comment = comment", "def comment(self, comment: str):\n\n self._comment = comment", "def comment(self, value: str):\n self._comment = value", "def set_pkg_comment(self, doc, text):\n self.assert_package_exists()\n if not self.package_comment_set:\n self.package_comment_set = True\n if validations.validate_pkg_comment(text):\n doc.package.comment = str_from_text(text)\n else:\n raise SPDXValueError('Package::Comment')\n else:\n raise CardinalityError('Package::Comment')" ]
[ "0.7566029", "0.65002567", "0.64751667", "0.63237065", "0.6311318", "0.63018346", "0.621435", "0.6135042", "0.61078835", "0.60896176", "0.6081147", "0.6053388", "0.6053388", "0.6053388", "0.6053388", "0.6053388", "0.6053388", "0.6053388", "0.6053388", "0.6053388", "0.6053388", "0.5991814", "0.59582114", "0.59393114", "0.59055984", "0.58005226", "0.57891375", "0.57891375", "0.57586783", "0.57471794" ]
0.75879556
0
Reset builder's state for building new documents. Must be called between usage with different documents.
def reset(self): # FIXME: this state does not make sense self.reset_creation_info() self.reset_document() self.reset_package() self.reset_file_stat() self.reset_reviews() self.reset_annotations() self.reset_extr_lics()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset_document(self):\n # FIXME: this state does not make sense\n self.doc_version_set = False\n self.doc_comment_set = False\n self.doc_namespace_set = False\n self.doc_data_lics_set = False\n self.doc_name_set = False\n self.doc_spdx_id_set = False", "def reset(self):\n # FIXME: this state does not make sense\n self.reset_creation_info()\n self.reset_document()\n self.reset_package()\n self.reset_file_stat()\n self.reset_reviews()\n self.reset_annotations()\n self.reset_extr_lics()\n self.reset_snippet()", "def reset(self):\n self.doc = xml.dom.minidom.Document()", "def reset():\n teardown_db()\n build()", "def reset(self):\n self.set_state(self._initial_state)", "def reset(self):\n self.target = next(self._target_gen)\n self.design = _Design(len(self.target))\n return self._get_state()", "def full_reset(self):\n for docid in self.iter_docids():\n self.delete(docid)\n self.client.delete(self.dbprefix + 'schema')\n self.client.delete(self.dbprefix + 'docs')\n self.client.delete(self.dbprefix + 'nextid')", "def reset(self):\n self.bbox = None\n self.true = None\n self.meta = None", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self):\n pass", "def reset(self) -> None:\n pass", "def reset(self):\n self._set_init()", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError", "def reset(self):\n raise NotImplementedError" ]
[ "0.73523873", "0.6907559", "0.66045606", "0.6456777", "0.62394756", "0.622441", "0.6222067", "0.6208838", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202436", "0.6202212", "0.61921644", "0.61778176", "0.61778176", "0.61778176" ]
0.6995418
1