max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
mindinsight/profiler/proposer/compose_proposer.py | fapbatista/mindinsight | 0 | 12789051 | <reponame>fapbatista/mindinsight
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Compose Proposals."""
from collections import OrderedDict
from mindinsight.profiler.common.log import logger
from mindinsight.profiler.common.util import get_options
from mindinsight.profiler.proposer.proposer_factory import ProposerFactory
class ComposeProposal:
"""Get the proposals from multiple different proposers."""
def __init__(self, profiling_path, device_id, type_list=None):
self.profiling_path = profiling_path
self.device_id = device_id
self.compose_proposer_type_list = type_list
# Postfix of category label, used for UI to identify the label as category label.
self.type_label_postfix = "-proposer_type_label"
def get_proposal(self, options=None):
"""
Get compose proposals.
Args:
options (dict): options for composed proposal.
- compose_proposal_result: execution results of the already running proposers.
- step_trace: include optional parameters for step trace,The dictionary key is iter_interval
used to get the analyser options for iteration interval time.
Returns:
dict, the proposals from multiple different proposers.
Examples:
>>> type_list = ['common', 'step_trace']
>>> condition = {"filter_condition": {'mode': "proc", "proc_name": "iteration_interval"}}
>>> options = {'step_trace': {"iter_interval": condition}}
>>> cp = ComposeProposal(self.profiling_dir, self.device_id, type_list)
>>> result_proposal = cp.get_proposal(options=options)
"""
logger.info("The ComposeProposal is running")
options = get_options(options)
logger.debug("The 'options' is %s", str(options))
# The flag whether to write category label.
type_label_flag = options.get("type_label_flag", True)
compose_proposal_result = OrderedDict()
logger.debug("The 'compose_proposer_type_list' is %s", str(self.compose_proposer_type_list))
for proposer_type in self.compose_proposer_type_list:
proposer = ProposerFactory.instance().get_proposer(proposer_type, self.profiling_path, self.device_id)
if proposer is None:
continue
# Write the result of proposals to option for other proposer to get.
options["compose_proposal_result"] = compose_proposal_result
result = proposer.analyze(options)
# Insert category label.
if result and type_label_flag:
proposer_type_label = proposer_type + "-type_label"
# Get the name of the category label, the default is the same as the proposer type.
type_label_name = options.get(proposer_type_label, proposer_type)
# Add postfix to category label name
type_proposal_label = type_label_name + self.type_label_postfix
compose_proposal_result[type_proposal_label] = None
# Merge results to the proposals dictionary.
compose_proposal_result.update(result)
elif result and not type_label_flag:
# Merge results to the proposals dictionary.
compose_proposal_result.update(result)
logger.debug("The 'compose_proposal_result' is %s", str(compose_proposal_result))
return compose_proposal_result
| 1.828125 | 2 |
10.68. Reversed.py | kyumiouchi/python-basic-to-advanced | 0 | 12789052 | <reponame>kyumiouchi/python-basic-to-advanced
"""
Reversed
OBS: It is different from reverse() from list => the function reverse() just work with
list and reversed() with any iterable.
"""
list_number = [1, 2, 3, 4, 5]
result = reversed(list_number)
print(result) # <list_reverseiterator object at 0x0000029BC4B12FD0>
print(type(result)) # <class 'list_reverseiterator'>
# Convert List, Tuple and Set
# List
print(list(reversed(list_number))) # [5, 4, 3, 2, 1]
# Tuple
print(tuple(reversed(list_number))) # (5, 4, 3, 2, 1)
# Set
print(set(reversed(list_number))) # {1, 2, 3, 4, 5} # not define order of the elements
for character in reversed('Geek University'): # ytisrevinU keeG
print(character, end='')
print('\n')
print(''.join(list(reversed('Geek University')))) # ytisrevinU keeG
# the same as slice of strings
print('Yumi Ouchi'[::-1]) # ihcuO imuY
# reversed() to loop reversed
for item in reversed(range(0, 10)): # 9876543210
print(item, end='')
print('\n')
# range does too
for item in range(9, -1, -1): # 9876543210
print(item, end='')
| 4.5625 | 5 |
apps/landing/models.py | pythonvietnam/nms | 19 | 12789053 | <reponame>pythonvietnam/nms
from django.db import models
from apps.base.models import Timestampable
# Create your models here.
class Landing(Timestampable):
email = models.EmailField(blank=False, max_length=255, unique=True)
class Meta:
db_table = 'landing'
def __str__(self):
return self.email
| 2.546875 | 3 |
functionality/kmeans_cluster_ijcai18_4_203.py | neerbek/taboo-selective | 0 | 12789054 | # -*- coding: utf-8 -*-
"""
Created on January 24, 2018
@author: neerbek
"""
# -*- coding: utf-8 -*-
import os
os.chdir("../../taboo-core")
from numpy.random import RandomState # type: ignore
from sklearn.cluster import KMeans # type: ignore
import ai_util
import confusion_matrix
import kmeans_cluster_util as kutil
import similarity.load_trees as load_trees
# import pylab # type: ignore
import matplotlib.pyplot as plt
import importlib
# importlib.reload(kutil)
importlib.reload(confusion_matrix)
# for information type 203
#
#
# run rnn on data (very low emb size)
# OMP_NUM_THREADS=3 ipython3 functionality/train_model.py -- -traintrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$test.txt -validtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$dev.txt -testtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$test.txt -nx 50 -nh 20 -lr 0.5 -L1_reg 0 -L2_reg 0 -n_epochs -1 -retain_probability 1 -batch_size 90 -valid_batch_size 300 -glove_path ../code/glove/ -train_report_frequency 445/5 -validation_frequency 445 -file_prefix save_exp164
# Epoch 114. On validation set: Best (110, 1.065507, 77.4675%)
# OMP_NUM_THREADS=2 ipython3 functionality/run_model_verbose.py -- -inputtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$train.txt -nx 50 -nh 20 -L1_reg 0 -L2_reg 0 -retain_probability 1 -batch_size 90 -glove_path ../code/glove/ -inputmodel ../taboo-jan/functionality/logs/exp164_epoch480.zip\$save_exp164_best.txt -output_embeddings -max_tree_count 100 -max_count 100 -max_embedding_count 10000
#
# for realz
# OMP_NUM_THREADS=2 ipython3 functionality/run_model_verbose.py -- -inputtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$train.txt -nx 50 -nh 20 -L1_reg 0 -L2_reg 0 -retain_probability 1 -batch_size 90 -glove_path ../code/glove/ -inputmodel ../taboo-jan/functionality/logs/exp164_epoch480.zip\$save_exp164_best.txt -output_embeddings > train.txt
# OMP_NUM_THREADS=2 ipython3 functionality/run_model_verbose.py -- -inputtrees ../taboo-jan/functionality/203/data_full_random_cleaned.zip\$dev.txt -nx 50 -nh 20 -L1_reg 0 -L2_reg 0 -retain_probability 1 -batch_size 90 -glove_path ../code/glove/ -inputmodel ../taboo-jan/functionality/logs/exp164_epoch480.zip\$save_exp164_best.txt -output_embeddings > dev.txt
#
# zip output_embeddings_exp164_e480.zip train.txt dev.txt
# rm train.txt dev.txt
# mv output_embeddings_exp164_e480.zip output
# don't add to git (for now), we should make a backup
totaltimer = ai_util.Timer("Total time: ")
traintimer = ai_util.Timer("Train time: ")
totaltimer.begin()
inputfileTrain = "output/output_embeddings_exp164_e120.zip$train.txt"
inputfileTrain = "output/output_embeddings_exp164_e480.zip$train.txt"
linesTrainFull = confusion_matrix.read_embeddings(inputfileTrain, max_line_count=-1)
linesTrain = [linesTrainFull[i] for i in range(60000)]
inputfileDev = "output/output_embeddings_exp164_e120.zip$dev.txt"
inputfileDev = "output/output_embeddings_exp164_e480.zip$dev.txt"
linesDev = confusion_matrix.read_embeddings(inputfileDev, max_line_count=-1)
numberOfClusters = 35
randomSeed = 7485
doShow = True
low = 3 # 03
high = 22 # 16 not good
rng = RandomState(randomSeed)
aTrain = confusion_matrix.get_embedding_matrix(linesTrain, normalize=True)
aTrainFull = confusion_matrix.get_embedding_matrix(linesTrainFull, normalize=True)
aDev = confusion_matrix.get_embedding_matrix(linesDev, normalize=True)
kmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrain)
# kmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aTrainFull)
# kmeans = KMeans(n_clusters=numberOfClusters, random_state=rng).fit(aDev)
# sort_order = kutil.get_cluster_sen_ratios_sort_order(aTrainFull, linesTrainFull, kmeans)
sort_order = kutil.get_cluster_sen_ratios_sort_order(aTrain, linesTrain, kmeans)
show = kutil.SHOW_ALL
if doShow:
# plot
y1 = kutil.get_cluster_sen_ratios(aTrain, linesTrain, kmeans, sort_order)
y2 = kutil.getScaledSizes(aTrain, kmeans, sort_order)
# y1 = kutil.get_cluster_sen_ratios(aTrainFull, linesTrainFull, kmeans, sort_order)
# y2 = getScaledSizes(aTrainFull, kmeans, sort_order)
# y3 = kutil.get_cluster_sen_ratios(aDev, linesDev, kmeans, sort_order)
# y4 = getScaledSizes(aDev, kmeans, sort_order)
x = kutil.getXRange(show, low, high, numberOfClusters)
y1 = [y1[i] for i in x]
y2 = [y2[i] for i in x]
# y3 = [y3[i] for i in x]
# y4 = [y4[i] for i in x]
confusion_matrix.new_graph('Clusters', 'Ratio')
plt.plot(x, y1, 'k-', label='Sensitivity')
plt.plot(x, y2, 'k+', label='Accumulate size')
# plt.plot(x, y3, 'b-', label='Sensitivity Dev')
# plt.plot(x, y4, 'b+', label='Accumulate size Dev')
if show == kutil.SHOW_ALL:
# plt.plot((low, low), (0, 1), 'k-')
plt.plot((high, high), (0, 1), 'k:')
plt.legend()
plt.savefig('ijcai18_plot_sensitive_sorted_203.eps')
# plt.savefig('tmp.eps')
# plt.show() don't call show from an interactive prompt :(
# https://github.com/matplotlib/matplotlib/issues/8505/
clusterIds = sort_order # clusterIds == sort_order, it's just syntaxtic sugar
(linesC1, aC1) = kutil.get_sentences_from_clusters(clusterIds[:high], linesTrainFull, aTrainFull, kmeans)
(linesC2, aC2) = kutil.get_sentences_from_clusters(clusterIds[high:], linesTrainFull, aTrainFull, kmeans)
(lines2C1, a2C1) = kutil.get_sentences_from_clusters(clusterIds[:high], linesDev, aDev, kmeans)
(lines2C2, a2C2) = kutil.get_sentences_from_clusters(clusterIds[high:], linesDev, aDev, kmeans)
print(len(linesC1), len(linesC2))
print(len(lines2C1), len(lines2C2))
# after some iterations (unknown random seed)
# 78442 45824
# 17034 9966 (27000)
kutil.get_base_accuracy(linesC1, "train C1").report()
kutil.get_base_accuracy(linesC2, "train C2").report()
# if we want to validation score
kutil.get_base_accuracy(lines2C1, "dev C1").report()
kutil.get_base_accuracy(lines2C2, "dev C2").report()
# don't know if these values are updated!
# Accuracy (train C1): 0.9432 (0.6436), f1=0.9179 (24901 1398 49089 3054)
# Accuracy (train C2): 0.9871 (0.0128), f1=0.9935 (45224 579 8 13)
# Accuracy (dev C1): 0.9304 (0.6318), f1=0.9023 (5470 383 10379 802)
# Accuracy (dev C2): 0.9832 (0.0167), f1=0.9915 (9796 163 3 4)
| 1.75 | 2 |
Libraries/warshallfloyd.py | tonko2/AtCoder | 2 | 12789055 | <filename>Libraries/warshallfloyd.py
N = 10
cost = [[float('inf')] * N] * N
for k in range(N):
for i in range(N):
for j in range(N):
if cost[i][k] != float('inf') and cost[k][j] != float('inf'):
cost[i][j] = min(cost[i][j], cost[i][k] + cost[k][j])
| 3.171875 | 3 |
CodeUP/Python basic 100/6047.py | cmsong111/NJ_code | 0 | 12789056 | <gh_stars>0
import math
a,b = map(int,input().split())
c=pow(2,b)
print(a*c) | 2.875 | 3 |
scraper.py | wind1s/proxyscraper | 0 | 12789057 | """
* Copyright (c) 2022, <NAME> <<EMAIL>>
*
* SPDX-License-Identifier: BSD-2-Clause
Compiles a database of proxy servers with their respective metadata.
Links:
https://geonode.com/free-proxy-list
https://proxylist.geonode.com/api/proxy-list?limit=1000&page=1
Custom proxy key value pair:
key = ip (192.168.127.12)
value = (this dict)
{
"port":
"anonymityLevel":
"protocols": [list of supported protocols]
"google": (true or false if google approved proxy)
"org": [list of all orgs and asn]
"latency":
"responseTime":
"upTime":
"upTimeTryCount":
"created_at":
"updated_at":
"hostname":
"city":
"region":
"postal":
"country":
"timezone":
"loc":
"corruptionindex":
"entry_time":
}
"""
from typing import Iterable, Any
from math import ceil
from datetime import timedelta
from json import loads
from aiohttp import ClientSession
from ipinfo import create_ip_info_parser
from asynchttprequest import AsyncRequest, run_async_requests, ParseRequest
from database import Database
from curlget import curl_get_json
from requestlogging import log_request, get_default_logger, log_db_entry_status
from utility import try_get_key, extract_keys, str_join
from config import IP_DB_NAME, PROXY_DB_NAME
PROXYLIST_RESPONSE_KEYS = (
"anonymityLevel",
"protocols",
"google",
"org",
"speed",
"latency",
"responseTime",
"upTime",
"upTimeTryCount",
"created_at",
"updated_at",
)
def forge_proxy_entry(ip_info: dict[str, str], proxylist: dict[str, str]) -> dict[str, Any]:
"""
Creates the custom database entry for a proxies data.
"""
db_entry = {**extract_keys(proxylist, PROXYLIST_RESPONSE_KEYS), **ip_info}
# Creates string of all possible ip origin names.
db_entry["org"] = ";".join(
origin
for origin in (
try_get_key("org", ip_info),
try_get_key("asn", proxylist),
try_get_key("org", proxylist),
try_get_key("isp", proxylist),
)
if origin is not None
)
db_entry["created_at"].replace("T", " ").replace("Z", "")
db_entry["updated_at"].replace("T", " ").replace("Z", "")
# db_entry["corruptionindex"] = get_corruption_index(ip_info["country"])
return db_entry
def create_proxy_data_parser(
proxy_db: Database, ip_db: Database, proxy_expire_time: timedelta, ip_expire_time: timedelta
) -> ParseRequest:
parse_ip_info = create_ip_info_parser(ip_db, ip_expire_time)
async def parse_proxy_data(session: ClientSession, proxy_data: dict[str, str]) -> None:
"""
Retrieves and stores a proxies data, including it's ip address data separetly.
"""
ip_address = proxy_data["ip"]
await parse_ip_info(session, ip_address)
ip_and_port = f"{ip_address}:{proxy_data['port']}"
if proxy_db.key_expired(ip_and_port, proxy_expire_time):
ip_info = ip_db.get(ip_address)
db_entry = forge_proxy_entry(ip_info, proxy_data)
proxy_db.store_entry(ip_and_port, db_entry)
return parse_proxy_data
def fetch_proxylist(page_limit: int, request_limit: int) -> Iterable[dict[str, str]]:
"""
Asynchronosly requests a list of proxies from proxylist.geonode.com.
"""
base_url = "https://proxylist.geonode.com"
api_ref_template = "/api/proxy-list?limit={}&page={{}}"
proxylist_api_template = api_ref_template.format(page_limit)
single_proxy_query_url = str_join(base_url, api_ref_template.format(1, 1))
log = get_default_logger()
request = AsyncRequest("GET", "", headers={"Accept": "application/json"})
responses = []
def fetch_page_range():
# Get the range of page numbers to use for requesting all proxies
# currently available from the api.
response = curl_get_json(single_proxy_query_url)
if response is None:
log.error("Could not fetch proxy count from %s", single_proxy_query_url)
return range(0)
proxy_count = response["total"]
request_count = ceil(proxy_count / page_limit)
return range(1, request_count + 1)
async def proxylist_request(session: ClientSession, page_number: int):
request.url = proxylist_api_template.format(page_number)
resp = await log_request(request, session)
# If response is none, an error occurred and the fetch could not be made.
if resp is None:
log.warning("Could not fetch proxylist from %s", request.url)
return
# Response contains a key data with all the proxies data.
proxylist_data = loads(resp)["data"]
log.info("Fetched %d proxies from page %d", len(proxylist_data), page_number)
responses.append(proxylist_data)
run_async_requests(
fetch_page_range(), proxylist_request, base_url=base_url, limit=request_limit
)
# Each request contains a data key which holds the proxy list.
return (proxy_data for proxylist in responses for proxy_data in proxylist)
def proxy_scraper(
proxy_db: Database,
ip_db: Database,
proxy_expire_time: timedelta,
ip_expire_time: timedelta,
limit: int,
):
proxylist = fetch_proxylist(100, limit)
prev_proxy_db_count = proxy_db.get_count()
prev_ip_db_count = ip_db.get_count()
run_async_requests(
proxylist,
create_proxy_data_parser(proxy_db, ip_db, proxy_expire_time, ip_expire_time),
limit=limit,
)
# Log new ip and proxies entries.
new_proxies_count = proxy_db.get_count() - prev_proxy_db_count
new_ips_count = ip_db.get_count() - prev_ip_db_count
log_db_entry_status(new_proxies_count, PROXY_DB_NAME)
log_db_entry_status(new_ips_count, IP_DB_NAME)
| 2.046875 | 2 |
lldb/test/API/tools/lldb-server/TestGdbRemoteProcessInfo.py | mkinsner/llvm | 2,338 | 12789058 | import gdbremote_testcase
import lldbgdbserverutils
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteProcessInfo(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def test_qProcessInfo_returns_running_process(self):
self.build()
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id looks reasonable.
pid_text = process_info.get("pid")
self.assertIsNotNone(pid_text)
pid = int(pid_text, base=16)
self.assertNotEqual(0, pid)
# If possible, verify that the process is running.
self.assertTrue(lldbgdbserverutils.process_is_running(pid, True))
def test_attach_commandline_qProcessInfo_reports_correct_pid(self):
self.build()
self.set_inferior_startup_attach()
procs = self.prep_debug_monitor_and_inferior()
self.assertIsNotNone(procs)
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id matches what we expected.
pid_text = process_info.get('pid', None)
self.assertIsNotNone(pid_text)
reported_pid = int(pid_text, base=16)
self.assertEqual(reported_pid, procs["inferior"].pid)
def test_qProcessInfo_reports_valid_endian(self):
self.build()
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id looks reasonable.
endian = process_info.get("endian")
self.assertIsNotNone(endian)
self.assertIn(endian, ["little", "big", "pdp"])
def qProcessInfo_contains_keys(self, expected_key_set):
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the expected keys are present and non-None within the process
# info.
missing_key_set = set()
for expected_key in expected_key_set:
if expected_key not in process_info:
missing_key_set.add(expected_key)
self.assertEqual(
missing_key_set,
set(),
"the listed keys are missing in the qProcessInfo result")
def qProcessInfo_does_not_contain_keys(self, absent_key_set):
procs = self.prep_debug_monitor_and_inferior()
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the unexpected keys are not present
unexpected_key_set = set()
for unexpected_key in absent_key_set:
if unexpected_key in process_info:
unexpected_key_set.add(unexpected_key)
self.assertEqual(
unexpected_key_set,
set(),
"the listed keys were present but unexpected in qProcessInfo result")
@add_test_categories(["debugserver"])
def test_qProcessInfo_contains_cputype_cpusubtype(self):
self.build()
self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype']))
@add_test_categories(["llgs"])
def test_qProcessInfo_contains_triple_ppid(self):
self.build()
self.qProcessInfo_contains_keys(set(['triple', 'parent-pid']))
@add_test_categories(["debugserver"])
def test_qProcessInfo_does_not_contain_triple(self):
self.build()
# We don't expect to see triple on darwin. If we do, we'll prefer triple
# to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup
# for the remote Host and Process.
self.qProcessInfo_does_not_contain_keys(set(['triple']))
@add_test_categories(["llgs"])
def test_qProcessInfo_does_not_contain_cputype_cpusubtype(self):
self.build()
self.qProcessInfo_does_not_contain_keys(set(['cputype', 'cpusubtype']))
| 1.976563 | 2 |
defuzzyfikasi.py | cahyoardhi/fuzzy-algorithm | 3 | 12789059 | def fdefuzzyfikasi(inputan):
for k,v in inputan.items():
if k == 'kecil':
kecil = v
else:
besar = v
tempbesar = 0
tempkecil = 0
x = 0
y = 0
for i in range(20,90,7):
if i <= 60:
tempkecil += i
x += 1
else:
tempbesar += i
y += 1
x -=1
return ((tempkecil * kecil) + (tempbesar * besar)) / ((kecil * x) + (besar * y))
#((27+34+41+48+55)*0.2 + (62+69+76+83+90)*0.5) / ((0.5)*5 + (0.2)*5) kasus 1
#((45 + 50 + 55) * 0.75 + (60+65+70+75+80+85+90) * 0.25 / ((0.75)*3) + (0.25)*7)) kasus 2 | 2.9375 | 3 |
giftest.py | TimothyBergstrom/Zergy | 0 | 12789060 | import tkinter as tk
from Gifhandler import *
#Main window
top=tk.Tk()
#Icon
top.iconbitmap('gifs/zergyicon.ico')
#Setting color
top.configure(background='gold')
#Title
top.title('Zergy')
#Fixing picture canvas (will load later)
topcanvas=tk.Canvas(top,width=250,height=250,background='gold')
topcanvas.pack()
#Open gif
mainanimation=Gifhandler(top,topcanvas,'gifs/zergysmall.gif',40)
def runwaitgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/zergysmall.gif',40)
mainanimation.animate()
def runbuggif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/bug.gif',30)
mainanimation.animate_noloop()
def runtraingif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/train.gif',100)
mainanimation.animate()
def rungotitgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/gotit.gif',30,200,130)
mainanimation.animate_noloop()
def runboomgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/boom.gif',40,200,130)
mainanimation.animate()
def runburrowgif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/burrow.gif',30,100,130)
mainanimation.animate()
def runmorechasegif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/morechase.gif',30,100,130)
mainanimation.animate()
def runjumpinggif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/jumping.gif',40,100,130)
mainanimation.animate()
def runannoyinggif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/annoying.gif',30,-45,130)
mainanimation.animate()
def runcutegif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/cute.gif',30,140,150)
mainanimation.animate()
def runchasegif():
global topcanvas, mainanimation
mainanimation.stop_animation()
mainanimation=Gifhandler(top,topcanvas,'gifs/chase.gif',30,50,130)
mainanimation.animate()
gifname=input('gif: ')
vars()['run'+gifname+'gif']()
top.mainloop()
| 2.84375 | 3 |
elastic/elastic_driver.py | HungThinhPhung/Miscellaneous | 0 | 12789061 | import json
import pickle
import requests
from elastic.using_requests import get_gov
demo_tax_codes = pickle.load(open('error.p', 'rb'))
host = 'http://0.0.0.0:9201'
host = 'http://10.0.6.21:30152'
e_index = 'index'
e_index = 'sme_autocomplete_index_2'
e_type = 'sme_autocomplete_type'
default_link = host + '/' + e_index + '/' + e_type + '/'
def get(tax_code):
link = 'http://10.0.6.21:30152/sme_autocomplete_index/sme_autocomplete_type/_search?q=taxCode:' + tax_code + '&size=1'
response = requests.get(link)
response = json.loads(response.content.decode('utf-8'))
return response['hits']['hits'][0] if not len(response['hits']['hits']) == 0 else {}
def add_doc(data):
json_sent = data['_source']
link = 'http://0.0.0.0:9201/' + data['_index'] + '/' + data['_type'] + '/' + json_sent['taxCode']
response = requests.put(link, json=json_sent)
print()
def get_tax_codes(start_uid=0, size=10):
query_data = {"size": size, "_source": ["taxCode", "tax_code"], "query": {"match_all": {}},
"search_after": [start_uid],
"sort": [
{"_uid": "asc"}
]}
url = default_link + '_search'
response = requests.post(url=url, json=query_data)
response_data = json.loads(response.content.decode('utf-8'))
return response_data['hits']['hits']
def update(id, d):
link = default_link + id + '/_update'
script = {
"script": "ctx._source.remove('eng_name'); "
"ctx._source.remove('short_name');"
"ctx._source.remove('tax_code');"
"ctx._source.remove('name');"
"ctx._source.remove('active_status');"
"ctx._source.remove('enterprise_type');"
"ctx._source.remove('founding_date');"
"ctx._source.remove('legal_representative');"
"ctx._source.owner='" + d['legal_representative'] + "';"
"ctx._source.address='" + d['address'] + "';"
"ctx._source.engName='" + d[
'eng_name'] + "';"
"ctx._source.shortName='" + d['short_name'] + "';"
"ctx._source.taxCode='" + d['tax_code'] + "';"
"ctx._source.companyName='" +
d['name'] + "';"
"ctx._source.activeStatus='" + d['active_status'] + "';"
"ctx._source.enterpriceType='" + d['enterprise_type'] + "';"
"ctx._source.foundedDate='" +
d['founding_date'] + "';"
"ctx._source.verify=1;"
}
response = requests.post(link, json=script)
if not response.status_code == 200:
print()
def main():
tax_codes = get_tax_codes()
while not len(tax_codes) == 0:
for code in tax_codes:
if len(code['_source']) == 0:
tax_code = code['_id']
else:
tax_code = code['_source']['taxCode'] if 'taxCode' in code['_source'] else code['_source']['tax_code']
print('Id: ' + code['_id'] + ', taxCode: ' + tax_code)
data = get_gov(tax_code, False)
if len(data) == 0:
print('Error')
continue
update(code['_id'], data)
last_code = tax_codes[-1]
tax_codes = get_tax_codes(start_uid=last_code['_type'] + '#' + last_code['_id'])
if __name__ == '__main__':
# for code in demo_tax_codes:
# data = get(code)
# if data == {}:
# continue
# add_doc(data)
main()
| 2.640625 | 3 |
test/test/pytools/viz/dendrogram/test_dendrogram.py | BCG-Gamma/pytools | 17 | 12789062 | """
Tests for package pytools.viz.dendrogram
"""
# noinspection PyPackageRequirements
import hashlib
import logging
from io import StringIO
import numpy as np
# noinspection PyPackageRequirements
import pytest
# noinspection PyPackageRequirements
import scipy.cluster.hierarchy as hc
from pytools.viz.dendrogram import DendrogramDrawer, DendrogramReportStyle, LinkageTree
log = logging.getLogger(__name__)
@pytest.fixture
def linkage_matrix() -> np.ndarray:
"""Create a linkage matrix."""
x = np.array([[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]])
return hc.linkage(x)
@pytest.fixture
def linkage_tree(linkage_matrix: np.ndarray) -> LinkageTree:
"""Create a linkage tree for drawing tests."""
return LinkageTree(
scipy_linkage_matrix=linkage_matrix,
leaf_names=list("ABCDEFGH"),
leaf_weights=[(w + 1) / 36 for w in range(8)],
)
def test_dendrogram_drawer_text(linkage_matrix: np.ndarray) -> None:
checksum_dendrogram_report = "32427095857f0589f68210ad4b2e8210"
leaf_names = list("ABCDEFGH")
leaf_weights = [(w + 1) / 36 for w in range(8)]
with pytest.raises(ValueError) as value_error:
LinkageTree(
scipy_linkage_matrix=linkage_matrix,
leaf_names=leaf_names,
leaf_weights=leaf_weights,
max_distance=1,
)
assert value_error.value.args == (
"arg max_distance=1 must be equal to or greater than the maximum distance "
"(= 4.0) in the linkage tree",
)
linkage_tree = LinkageTree(
scipy_linkage_matrix=linkage_matrix,
leaf_names=leaf_names,
leaf_weights=[(w + 1) / 36 for w in range(8)],
distance_label="distance",
leaf_label="label",
weight_label="weight",
)
with StringIO() as out:
dd = DendrogramDrawer(style=DendrogramReportStyle(out=out))
dd.draw(data=linkage_tree, title="Test")
report_str = str(out.getvalue())
log.debug(f"\n{report_str}")
assert (
hashlib.md5(str(report_str).encode("utf-8")).hexdigest()
) == checksum_dendrogram_report
| 2.46875 | 2 |
tasks/tests/tasknamefilter_test.py | kemmot/PyTasks | 0 | 12789063 | <reponame>kemmot/PyTasks
import unittest
import unittest.mock as mock
import filters.tasknamefilter as tasknamefilter
class TaskNameFilterTests(unittest.TestCase):
def test_constructor_sets_name(self):
target = tasknamefilter.TaskNameFilter(mock.Mock(), 'test')
self.assertEqual('test', target.name)
def test_is_match_returns_true_on_matching_name(self):
mock_context = mock.Mock()
task = mock.Mock()
task.name = 'test'
self.assertTrue(tasknamefilter.TaskNameFilter(mock_context, 'test').is_match(task))
def test_is_match_returns_false_on_non_matching_index(self):
mock_context = mock.Mock()
task = mock.Mock()
task.name = 'woble'
self.assertFalse(tasknamefilter.TaskNameFilter(mock_context, 'test').is_match(task))
def test_is_match_returns_true_when_name_contains_same_case(self):
mock_context = mock.Mock()
task = mock.Mock()
task.name = 'onetesttwo'
self.assertTrue(tasknamefilter.TaskNameFilter(mock_context, 'test').is_match(task))
def test_is_match_returns_true_when_name_contains_different_case(self):
mock_context = mock.Mock()
task = mock.Mock()
task.name = 'oneTESTtwo'
self.assertTrue(tasknamefilter.TaskNameFilter(mock_context, 'test').is_match(task))
class TaskNameFilterParserTests(unittest.TestCase):
def test_parse_none_returns_none(self):
mock_context = mock.Mock()
target = tasknamefilter.TaskNameFilterParser().parse(mock_context, None)
self.assertIsNone(target)
def test_parse_empty_string_returns_none(self):
mock_context = mock.Mock()
target = tasknamefilter.TaskNameFilterParser().parse(mock_context, '')
self.assertIsNone(target)
def test_parse_non_empty_string_returns_filter(self):
mock_context = mock.Mock()
target = tasknamefilter.TaskNameFilterParser().parse(mock_context, 'test')
self.assertIsInstance(target, tasknamefilter.TaskNameFilter)
self.assertEqual('test', target.name)
def test_parse_strips_forward_slashes(self):
mock_context = mock.Mock()
target = tasknamefilter.TaskNameFilterParser().parse(mock_context, '/test/')
self.assertIsInstance(target, tasknamefilter.TaskNameFilter)
self.assertEqual('test', target.name)
| 2.8125 | 3 |
astar_spark/SP_ASTAR version1/graph.py | adoni91/HGraphProject | 0 | 12789064 | from node import *
from nodeitem import *
from math import sqrt, pow
import time
class Graph:
def __init__(self, node=[]):
self.node=node
def createNodeProperty(self, line):
return [int(line.split()[0]), int(line.split()[1])]
def createEdgeProperty(self, line):
return [int(line.split()[0]), [int(line.split()[1]), int(line.split()[2])]]
def getSucessorProperty(self,rdd):
rdd2 = list()
for values in rdd.__iter__():
rdd2.append([values[0],values[1]])
rdd2.sort()
return rdd2
#transform line into key-value pair
def lineToNode(self, line):
key=line[0]
value=line[1]
return Node(key, value)
# return the transition fuction value of a state x. QxW->Q
def getG(self, succesor):
return succesor[1]
# Euclidean distance between the state s1 and s2
def getEuclideanDistance(self, target1, target2):
return sqrt(pow(target2 - target1, 2)) #+ random.random() * 0.1
#Return the neighboard of the current state
def searchNeighboardOfNode(self, current_node, goal_node, open_list):
for next_sucessor in current_node.get_succesors():
G = self.getG(next_sucessor)
H = self.getEuclideanDistance(goal_node.get_targetID(), current_node.get_targetID())
node_item = Nodeitem(next_sucessor[0], G, H, current_node.get_targetID())
open_list.append(node_item)
#Return the most promising state
def getMinOpenListNode(self, open_list, goal_node):
min_f = open_list[0].get_f()
selected_item = open_list[0]
for i in range(1, len(open_list)):
if open_list[i].get_f() <= min_f:
min_f = open_list[i].get_f()
selected_item = open_list[i]
if open_list[i].get_targetID() == goal_node.get_targetID():
selected_item = open_list[i]
break
return selected_item
# Return the new current node
def searchNewCurrentNode(self, node_list, targetID):
current_node = Node()
for node in node_list:
if node.get_targetID() == targetID:
current_node = node
break
return current_node
# Return sequence from the close list
def extractSequenceFromCloseList(self, close_list):
inter_close_list = []
path = []
size = len(close_list)
for i in range(1, size):
if close_list[i].get_idpreviousnode() == close_list[i - 1].get_idpreviousnode():
inter_close_list.append(close_list[i - 1])
for item in inter_close_list:
close_list.remove(item)
for item in close_list:
sequence = item.get_idpreviousnode(), item.get_targetID()
path.append(sequence)
return path
# return the id of boundary nodes
def getBoundaryNodesId(self, rdd, numberPartition):
nodes_id = []
all_rdd_parts=rdd.glom().collect()
for i in range(1, numberPartition):
nodes_id.append(all_rdd_parts[i][0].get_targetID())
return nodes_id
# compute intermediate path
def A_Star_Mapper(self, rdd_part):
intermediate_open_list=[]
intermediate_close_list=[]
node_list_rdd_part= list(rdd_part.__iter__())
intermediate_init_node=node_list_rdd_part.__getitem__(0)
intermediate_goal_node=node_list_rdd_part.__getitem__(len(node_list_rdd_part)-1)
current_node = intermediate_init_node
while current_node.get_targetID() != intermediate_goal_node.get_targetID():
self.searchNeighboardOfNode(current_node, intermediate_goal_node, intermediate_open_list)
selected_item=self.getMinOpenListNode(intermediate_open_list, intermediate_goal_node)
intermediate_open_list.remove(selected_item)
intermediate_close_list.append(selected_item)
current_node = self.searchNewCurrentNode(node_list_rdd_part, selected_item.get_targetID())
intermediate_path=self.extractSequenceFromCloseList(intermediate_close_list)
return intermediate_path
| 2.859375 | 3 |
app/viewer/__init__.py | Dbrown411/py-oct | 0 | 12789065 | from .viewer_panels import * | 1.1875 | 1 |
scripts/2e_gen_direct_ac3d.py | skywalkerisnull/ImageAnalysis | 0 | 12789066 | #!/usr/bin/python3
import sys
#sys.path.insert(0, "/usr/local/opencv3/lib/python2.7/site-packages/")
import argparse
#import commands
import cv2
import fnmatch
import numpy as np
import os.path
import random
import navpy
sys.path.append('../lib')
import AC3D
import Pose
import ProjectMgr
import SRTM
import transformations
# for all the images in the project image_dir, compute the camera
# poses from the aircraft pose (and camera mounting transform).
# Project the image plane onto an SRTM (DEM) surface for our best
# layout guess (at this point before we do any matching/bundle
# adjustment work.)
parser = argparse.ArgumentParser(description='Set the initial camera poses.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument('--texture-resolution', type=int, default=512, help='texture resolution (should be 2**n, so numbers like 256, 512, 1024, etc.')
parser.add_argument('--ground', type=float, help='ground elevation in meters')
parser.add_argument('--sba', action='store_true', help='use sba pose')
args = parser.parse_args()
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
ref = proj.ned_reference_lla
# setup SRTM ground interpolator
sss = SRTM.NEDGround( ref, 6000, 6000, 30 )
ac3d_steps = 8
# compute the uv grid for each image and project each point out into
# ned space, then intersect each vector with the srtm ground.
# build our local image list for placing
print(args.sba)
if not args.sba:
image_list = proj.image_list
else:
image_list = []
for image in proj.image_list:
if image.camera_pose_sba != None:
#print image.camera_pose_sba
image_list.append(image)
depth = 0.0
camw, camh = proj.cam.get_image_params()
for image in image_list:
print(image.name)
# scale the K matrix if we have scaled the images
scale = float(image.width) / float(camw)
K = proj.cam.get_K(scale)
IK = np.linalg.inv(K)
grid_list = []
u_list = np.linspace(0, image.width, ac3d_steps + 1)
v_list = np.linspace(0, image.height, ac3d_steps + 1)
#print "u_list:", u_list
#print "v_list:", v_list
for v in v_list:
for u in u_list:
grid_list.append( [u, v] )
print('grid_list:', grid_list)
if not args.sba:
proj_list = proj.projectVectors( IK, image.get_body2ned(),
image.get_cam2body(), grid_list )
else:
print(image.get_body2ned_sba())
proj_list = proj.projectVectors( IK, image.get_body2ned_sba(),
image.get_cam2body(), grid_list )
print('proj_list:', proj_list)
if not args.sba:
ned = image.camera_pose['ned']
else:
ned = image.camera_pose_sba['ned']
print('ned', image.camera_pose['ned'], ned)
if args.ground:
pts_ned = proj.intersectVectorsWithGroundPlane(ned,
args.ground, proj_list)
else:
pts_ned = sss.interpolate_vectors(ned, proj_list)
print("pts_3d (ned):\n", pts_ned)
# convert ned to xyz and stash the result for each image
image.grid_list = []
ground_sum = 0
for p in pts_ned:
image.grid_list.append( [p[1], p[0], -(p[2]+depth)] )
ground_sum += -p[2]
depth -= 0.01 # favor last pictures above earlier ones
# call the ac3d generator
AC3D.generate(image_list, src_dir=proj.source_dir,
project_dir=args.project, base_name='direct',
version=1.0, trans=0.1, resolution=args.texture_resolution)
if not args.ground:
print('Avg ground elevation (SRTM):', ground_sum / len(pts_ned))
| 2.3125 | 2 |
focuser.py | chripell/yaaca | 1 | 12789067 | <gh_stars>1-10
from astropy.stats import sigma_clipped_stats
from photutils import DAOStarFinder, IRAFStarFinder
from collections import namedtuple
import numpy as np
FocusData = namedtuple('FocusData', 'bot p10 mean p90 top std back back_std')
class Focuser:
def __init__(self, fwhm=3.0, threshold_stds=100., algo='iraf'):
self.sources = None
self.n = 0
self.par = {}
self.fwhm = fwhm
self.threshold_stds = threshold_stds
self.algo = algo
if self.algo == 'dao':
self.odata = ("sharpness", "roundness1", "roundness2")
else:
self.odata = ("sharpness",)
def evaluate(self, data):
mean, median, std = sigma_clipped_stats(data, sigma=3.0, maxiters=5)
self.back = median
self.back_std = std
if self.algo == 'dao':
finder = DAOStarFinder(
fwhm=self.fwhm, threshold=self.threshold_stds*std)
else:
finder = IRAFStarFinder(
fwhm=self.fwhm, threshold=self.threshold_stds*std)
self.sources = finder(data - median)
for col in self.sources.colnames:
self.sources[col].info.format = "%.8g"
print(self.num())
if self.num() > 0:
for p in self.odata:
self.par[p] = self.calc(p)
return self.sources
def num(self):
if self.sources is not None:
return len(self.sources)
return 0
def calc(self, p):
data = np.absolute(self.sources.field(p))
mean = data.mean()
bot = data.min()
top = data.max()
std = data.std()
p10 = np.percentile(data, 10)
p90 = np.percentile(data, 90)
return FocusData(bot, p10, mean, p90, top, std,
self.back, self.back_std)
def draw(self, cr, par, radius=10):
mean = self.par[par].mean
m_pi = 2 * np.pi
for i in self.sources:
if abs(i[par]) >= mean:
cr.set_source_rgb(0, 1.0, 0)
else:
cr.set_source_rgb(1.0, 0, 0)
cr.arc(i["xcentroid"], i["ycentroid"], radius, 0, m_pi)
cr.stroke()
def get(self, p):
return self.par[p]
| 2.265625 | 2 |
dropout_finetuning.py | alecmeade/ai_for_earth | 1 | 12789068 | import gc
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import torch
import torch.nn as nn
import torchvision
import sys
# To view tensorboard metrics
# tensorboard --logdir=logs --port=6006 --bind_all
from torch.utils.tensorboard import SummaryWriter
from functools import partial
from evolver import CrossoverType, MutationType, InitType, MatrixEvolver, VectorEvolver
from unet import UNet
from dataset_utils import PartitionType
from cuda_utils import maybe_get_cuda_device, clear_cuda
from landcover_dataloader import get_landcover_dataloaders, get_landcover_dataloader
from ignite.contrib.handlers.tensorboard_logger import *
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from ignite.metrics import Accuracy, Loss, ConfusionMatrix, mIoU
from ignite.handlers import ModelCheckpoint
from ignite.utils import setup_logger
from ignite.engine import Engine
# Define directories for data, logging and model saving.
base_dir = os.getcwd()
dataset_name = "landcover_large"
dataset_dir = os.path.join(base_dir, "data/" + dataset_name)
experiment_name = "dropout_single_point_finetuning_100_children"
model_name = "best_model_9_validation_accuracy=0.8940.pt"
model_path = os.path.join(base_dir, "logs/" + dataset_name + "/" + model_name)
log_dir = os.path.join(base_dir, "logs/" + dataset_name + "_" + experiment_name)
# Create DataLoaders for each partition of Landcover data.
dataloader_params = {
'batch_size': 8,
'shuffle': True,
'num_workers': 6,
'pin_memory': True}
partition_types = [PartitionType.TRAIN, PartitionType.VALIDATION,
PartitionType.FINETUNING, PartitionType.TEST]
data_loaders = get_landcover_dataloaders(dataset_dir,
partition_types,
dataloader_params,
force_create_dataset=False)
train_loader = data_loaders[0]
finetuning_loader = data_loaders[2]
dataloader_params['shuffle'] = False
test_loader = get_landcover_dataloader(dataset_dir, PartitionType.TEST, dataloader_params)
# Get GPU device if available.
device = maybe_get_cuda_device()
# Determine model and training params.
params = {
'max_epochs': 10,
'n_classes': 4,
'in_channels': 4,
'depth': 5,
'learning_rate': 0.001,
'log_steps': 1,
'save_top_n_models': 4,
'num_children': 100
}
clear_cuda()
model = UNet(in_channels = params['in_channels'],
n_classes = params['n_classes'],
depth = params['depth'])
model.load_state_dict(torch.load(model_path))
# Create Trainer or Evaluators
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=params['learning_rate'])
# Determine metrics for evaluation.
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
"mean_iou": mIoU(ConfusionMatrix(num_classes = params['n_classes'])),
}
for batch in train_loader:
batch_x = batch[0]
_ = model(batch_x)
break
drop_out_layers = model.get_dropout_layers()
del model, batch_x
clear_cuda()
for layer in drop_out_layers:
layer_name = layer.name
size = layer.x_size[1:]
sizes = [size]
clear_cuda()
model = UNet(in_channels = params['in_channels'],
n_classes = params['n_classes'],
depth = params['depth'])
model.load_state_dict(torch.load(model_path))
model.to(device)
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=params['learning_rate'])
num_channels = size[0]
evolver = VectorEvolver(num_channels,
CrossoverType.UNIFORM,
MutationType.FLIP_BIT,
InitType.RANDOM,
flip_bit_prob=0.25,
flip_bit_decay=0.5)
log_dir_test = log_dir + "_" + layer_name
def mask_from_vec(vec, matrix_size):
mask = np.ones(matrix_size)
for i in range(len(vec)):
if vec[i] == 0:
mask[i, :, :] = 0
elif vec[i] == 1:
mask[i, :, :] = 1
return mask
def dropout_finetune_step(engine, batch):
model.eval()
with torch.no_grad():
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
loss = sys.float_info.max
for i in range(params['num_children']):
model.zero_grad()
child_vec = evolver.spawn_child()
child_mask = mask_from_vec(child_vec, size)
model.set_dropout_masks({layer_name: torch.tensor(child_mask, dtype=torch.float32).to(device)})
outputs = model(batch_x)
current_loss = criterion(outputs[:, :, 127:128,127:128], batch_y[:,127:128,127:128]).item()
loss = min(loss, current_loss)
if current_loss == 0.0:
current_loss = sys.float_info.max
else:
current_loss = 1.0 / current_loss
evolver.add_child(child_vec, current_loss)
priority, best_child = evolver.get_best_child()
best_mask = mask_from_vec(best_child, size)
model.set_dropout_masks({layer_name: torch.tensor(best_mask, dtype=torch.float32).to(device)})
return loss
# Create Trainer or Evaluators
trainer = Engine(dropout_finetune_step)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
trainer.logger = setup_logger("Trainer")
train_evaluator.logger = setup_logger("Train Evaluator")
validation_evaluator.logger = setup_logger("Validation Evaluator")
@trainer.on(Events.ITERATION_COMPLETED(every=1))
def report_evolver_stats(engine):
priorities = np.array(evolver.get_generation_priorities())
# Take reciprocal since we needed to store priorities in min heap.
priorities = 1.0 / priorities
tb_logger.writer.add_scalar("training/evolver_count",
priorities.shape[0], engine.state.iteration)
tb_logger.writer.add_scalar("training/evolver_mean",
np.mean(priorities), engine.state.iteration)
tb_logger.writer.add_scalar("training/evolver_std",
np.std(priorities), engine.state.iteration)
evolver.update_parents()
@trainer.on(Events.EPOCH_COMPLETED)
def visualize_validation_predictions(engine):
for i, batch in enumerate(test_loader):
batch_x, batch_y = batch
batch_x = batch_x.to(device)
batch_y = batch_y.to(device)
outputs = model(batch_x)
num_images = batch_x.shape[0]
batch_y_detach = batch_y.detach().cpu().numpy()
batch_x_detach = batch_x.detach().cpu().numpy()
outputs_detach = outputs.detach().cpu().numpy()
for j in range(num_images):
f, ax = plt.subplots(1, 3, figsize=(10, 4))
ax[0].imshow(np.moveaxis(batch_x_detach[j, :, :, :], [0], [2]) / 255.0)
ax[1].imshow((np.array(batch_y_detach[j, :, :])))
ax[2].imshow(np.argmax(np.moveaxis(np.array(outputs_detach[j, :, :, :]), [0],[ 2]), axis=2))
ax[0].set_title("X")
ax[1].set_title("Y")
ax[2].set_title("Predict")
f.suptitle("Layer: " + layer_name + " Itteration: " + str(engine.state.iteration) + " Image: " + str(j))
plt.show()
if i > 5:
break
break
# Tensorboard Logger setup below based on pytorch ignite example
# https://github.com/pytorch/ignite/blob/master/examples/contrib/mnist/mnist_with_tensorboard_logger.py
@trainer.on(Events.EPOCH_COMPLETED)
def compute_metrics(engine):
"""Callback to compute metrics on the train and validation data."""
train_evaluator.run(finetuning_loader)
validation_evaluator.run(test_loader)
def score_function(engine):
"""Function to determine the metric upon which to compare model."""
return engine.state.metrics["accuracy"]
# Setup Tensor Board Logging
tb_logger = TensorboardLogger(log_dir=log_dir_test)
tb_logger.attach_output_handler(
trainer,
event_name=Events.ITERATION_COMPLETED(every=params['log_steps']),
tag="training",
output_transform=lambda loss: {"batchloss": loss},
metric_names="all",
)
for tag, evaluator in [("training", train_evaluator), ("validation", validation_evaluator)]:
tb_logger.attach_output_handler(
evaluator,
event_name=Events.EPOCH_COMPLETED,
tag=tag,
metric_names="all",
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach_opt_params_handler(trainer,
event_name=Events.ITERATION_COMPLETED(every=params['log_steps']),
optimizer=optimizer)
model_checkpoint = ModelCheckpoint(
log_dir_test,
n_saved=params['save_top_n_models'],
filename_prefix="best",
score_function=score_function,
score_name="validation_accuracy",
global_step_transform=global_step_from_engine(trainer),
)
validation_evaluator.add_event_handler(Events.COMPLETED, model_checkpoint, {"model": model})
trainer.run(finetuning_loader, max_epochs=params['max_epochs'])
tb_logger.close()
| 1.820313 | 2 |
roles/sensu/client/files/plugins/metrics-process-usage.py | Shasthojoy/cuttle | 21 | 12789069 | #!/usr/bin/env python
#
# metrics-process-usage.py
#
# PLATFORMS:
# Linux
#
# DEPENDENCIES:
# Python 2.7+ (untested on Python3, should work though)
# Python module: psutil https://pypi.python.org/pypi/psutil
#
# USAGE:
#
# metrics-process-usage.py -n <process_name> -w <cpu_warning_pct> -c <cpu_critical_pct> -W <mem_warning_pct> -C <mem_critical_pct> [-s <graphite_scheme>] [-z <criticality>]
#
# DESCRIPTION:
# Finds the pid[s] corresponding to a process name and obtains the necessary
# cpu and memory usage stats. Returns WARNING or CRITICAL when these stats
# exceed user specified limits.
#
# Code adapted from Jaime Gogo's script in the Sensu Plugins community:
# https://github.com/sensu-plugins/sensu-plugins-process-checks/blob/master/bin/metrics-per-process.py
#
# Released under the same terms as Sensu (the MIT license); see MITLICENSE
# for details.
#
# <NAME> <<EMAIL>>
import argparse
import sys
import os
import time
import psutil
STATE_OK = 0
STATE_WARNING = 1
STATE_CRITICAL = 2
CRITICALITY = 'critical'
PROC_ROOT_DIR = '/proc/'
def switch_on_criticality():
if CRITICALITY == 'warning':
sys.exit(STATE_WARNING)
else:
sys.exit(STATE_CRITICAL)
def find_pids_from_name(process_name):
'''Find process PID from name using /proc/<pids>/comm'''
pids_in_proc = [ pid for pid in os.listdir(PROC_ROOT_DIR) if pid.isdigit() ]
pids = []
for pid in pids_in_proc:
path = PROC_ROOT_DIR + pid
if 'comm' in os.listdir(path):
file_handler = open(path + '/comm', 'r')
if file_handler.read().rstrip() == process_name:
pids.append(int(pid))
return pids
def sum_dicts(dict1, dict2):
return dict(dict1.items() + dict2.items() +
[(k, dict1[k] + dict2[k]) for k in dict1.viewkeys() & dict2.viewkeys()])
def stats_per_pid(pid):
'''Gets process stats, cpu and memory usage in %, using the psutil module'''
stats = {}
process_handler = psutil.Process(pid)
stats['cpu_percent'] = process_handler.cpu_percent(interval=0.1)
stats['memory_percent'] = process_handler.memory_percent()
return stats
def multi_pid_process_stats(pids):
stats = {'cpu_percent': 0, 'memory_percent': 0}
for pid in pids:
stats = sum_dicts(stats, stats_per_pid(pid))
return stats
def graphite_printer(stats, graphite_scheme):
now = time.time()
for stat in stats:
print "%s.%s %s %d" % (graphite_scheme, stat, stats[stat], now)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--process_name', required=True)
parser.add_argument('-w', '--cpu_warning_pct', required=True)
parser.add_argument('-c', '--cpu_critical_pct', required=True)
parser.add_argument('-W', '--memory_warning_pct', required=True)
parser.add_argument('-C', '--memory_critical_pct', required=True)
parser.add_argument('-s', '--scheme', required=True)
parser.add_argument('-z', '--criticality', default='critical')
args = parser.parse_args()
CRITICALITY = args.criticality
pids = find_pids_from_name(args.process_name)
if not pids:
print 'Cannot find pids for this process. Enter a valid process name.'
switch_on_criticality()
total_process_stats = multi_pid_process_stats(pids)
graphite_printer(total_process_stats, args.scheme)
if total_process_stats['cpu_percent'] > float(args.cpu_critical_pct) or \
total_process_stats['memory_percent'] > float(args.memory_critical_pct):
print 'CPU Usage and/or memory usage at critical levels!!!'
switch_on_criticality()
if total_process_stats['cpu_percent'] > float(args.cpu_warning_pct) or \
total_process_stats['memory_percent'] > float(args.memory_warning_pct):
print 'Warning: CPU Usage and/or memory usage exceeding normal levels!'
sys.exit(STATE_WARNING)
sys.exit(STATE_OK)
if __name__ == "__main__":
main()
| 2.1875 | 2 |
insta/migrations/0007_remove_post_likes.py | Kennedy-karuri/insta-clone | 0 | 12789070 | <reponame>Kennedy-karuri/insta-clone
# Generated by Django 2.2.8 on 2020-10-22 01:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('insta', '0006_auto_20201022_0146'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='likes',
),
]
| 1.359375 | 1 |
setup.py | MarouenMechtri/CNG-Manager | 1 | 12789071 | # Copyright 2010-2012 Institut Mines-Telecom
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Jun 01, 2012
@author: <NAME>
@contact: <EMAIL>
@author: <NAME>
@contact: <EMAIL>
@organization: Institut Mines-Telecom - Telecom SudParis
@license: Apache License, Version 2.0
"""
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='pyocni',
author='<NAME>',
author_email='<EMAIL>',
version='0.3',
description='PyOCNI: A Python implementation of an extended OCCI with a JSON serialization',
#long_description=read('README'),
url='http://www.example.com/pyocni',
#packages=['pyocni'],
packages=find_packages(), #['pyocni'],
package_data = {
# If any package contains *.txt or *.rst files, include them:
'pyocni': ['*.conf', '*.py'],
# And include any *.msg files found in the 'pyocni' package, too:
'pyocni': ['*.conf', '*.msg'],
},
install_requires=[
'config',
'configobj',
#'logging',
'ordereddict',
'simplejson',
'jsonpickle',
'routes',
'webob',
'pesto',
'eventlet',
'sphinx',
'ZODB3',
'httplib2',
'couchdb',
'couchdbkit',
'tornado'
#'pack>=0.97',
#'pack'
],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache License, Version 2.0',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7'
]
)
| 1.734375 | 2 |
Preprocessing/Salimi/utils/_init__.py | maliha93/Fairness-Analysis-Code | 0 | 12789072 | <gh_stars>0
###
### FairDB utilities
### | 0.863281 | 1 |
oops/#040_garbageCollection.py | krishankansal/PythonPrograms | 0 | 12789073 | import gc
print(gc.isenabled())
gc.disable()
print(gc.isenabled())
gc.enable()
print(gc.isenabled())
| 1.640625 | 2 |
chembl_webresource_client/scripts/chembl_m2t.py | RowAnalytics/chembl_webresource_client | 248 | 12789074 | #!/usr/bin/env python
from __future__ import print_function
__author__ = 'mnowotka'
# ----------------------------------------------------------------------------------------------------------------------
import sys
import argparse
from chembl_webresource_client.scripts.utils import get_serializer, chembl_id_regex, smiles_regex, convert_to_smiles
from chembl_webresource_client.scripts.utils import resolve, mols_to_targets
AVAILABLE_SOURCE_FORMATS = ('chembl_id', 'sdf', 'smi')
# ----------------------------------------------------------------------------------------------------------------------
def get_options():
description = 'Find related targets for a set of compounds'
parser = argparse.ArgumentParser(description=description, prog='chembl_m2t')
parser.add_argument('-i', '--input', action='store', dest='input',
help='input file, standard input by default')
parser.add_argument('-o', '--output', action='store', dest='output',
help='output file, standard output by default')
parser.add_argument('-s', '--source-format', action='store', dest='source_format', default='csv',
help='input file format. Can be one of 3: chembl_id (a comma separated list of chembl IDs), '
'sdf: (MDL molfile), smi (file containing smiles)')
parser.add_argument('-d', '--destination-format', action='store', dest='dest_format', default='uniprot',
help='output file format. can be chosen from 3 options: '
'[uniprot, gene_name, chembl_id]')
parser.add_argument('-H', '--Human', action='store_true', dest='human',
help='human readable output: prints header and first column with original names')
parser.add_argument('-O', '--organism', action='store', dest='organism',
help='Filter results by organism')
parser.add_argument('-p', '--parent', action='store_true', dest='parent',
help='when fetching targets include also targets from parents of given molecules')
parser.add_argument('-c', '--chunk-size', action='store', dest='chunk', default='1000',
help='Size of chunk of data retrieved from API')
return parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------------
def main():
options = get_options()
source_format = options.source_format.lower()
if source_format not in AVAILABLE_SOURCE_FORMATS:
sys.stderr.write('Unsupported source format', options.source_format)
return
inp = sys.stdin
if source_format == 'sdf':
with open(options.input) if options.input else sys.stdin as in_f:
options.input = None
inp = convert_to_smiles(in_f)
with open(options.input) if options.input else inp as in_f, \
open(options.output, 'w') if options.output else sys.stdout as out_f:
serializer_cls = get_serializer(options.dest_format)
if not serializer_cls:
sys.stderr.write('Unsupported format', options.dest_format)
return
if options.human:
serializer_cls.write_header(out_f)
for line in in_f:
if not line or line.lower().startswith('smiles'):
continue
chunk = line.strip().split()[0]
identifiers = chunk.strip().split(',')
valid_identifiers = list()
for identifier in identifiers:
if chembl_id_regex.match(identifier):
valid_identifiers.append(identifier)
elif smiles_regex.match(identifier):
valid_identifiers.extend([x['molecule_chembl_id'] for x in resolve(identifier)])
targets = mols_to_targets(valid_identifiers,
organism=options.organism,
only_ids=(options.dest_format == 'chembl_id'),
include_parents=options.parent,
chunk_size=int(options.chunk))
out_f.write(serializer_cls.serialize_line(targets, human=options.human, name=','.join(valid_identifiers)))
# ----------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
main()
# ----------------------------------------------------------------------------------------------------------------------
| 2.125 | 2 |
tests/api/records/test_loaders.py | galterlibrary/InvenioRDM-at-NU | 6 | 12789075 | """Test record form i.e. marshmallow schema is configured as expected."""
from copy import deepcopy
import pytest
from cd2h_repo_project.modules.records.marshmallow.json import (
AuthorSchemaV1, MetadataSchemaV1, RecordSchemaV1, ResourceTypeSchemaV1
)
@pytest.fixture
def create_input_metadatav1():
"""Factory pattern for the input to the marshmallow.json.MetadataSchemaV1.
"""
def _create_input_metadatav1(data={}):
data_to_use = {
'title': 'A title',
'authors': [
{
'first_name': 'An',
'last_name': 'author'
}
],
'description': 'A description',
'resource_type': {
'general': 'other',
'specific': 'other'
},
'license': 'mit-license',
'permissions': 'all_view',
}
data_to_use.update(data)
return data_to_use
return _create_input_metadatav1
@pytest.fixture
def create_input_record(create_input_metadatav1):
"""Factory pattern for an API input Record.
The returned dict is the input to the marshmallow loader used by the API.
"""
def _create_input_record(data=None):
data = deepcopy(data) if data else {}
data_to_use = {
'metadata': create_input_metadatav1(data.pop('metadata', {}))
}
data_to_use.update(data)
return data_to_use
return _create_input_record
class TestRecordSchemaV1(object):
def test_load_for_empty_json_contains_schema(self, appctx):
unmarshalled_record = RecordSchemaV1().load({})
assert not unmarshalled_record.errors
assert unmarshalled_record.data == {
'$schema': (
'https://localhost:5000/schemas/records/record-v0.1.0.json'
)
}
def test_load_for_valid_json_removes_metadata_envelope(
self, create_input_record):
input_record = create_input_record()
unmarshalled_record = RecordSchemaV1().load(input_record)
assert not unmarshalled_record.errors
loaded_record = unmarshalled_record.data
assert 'metadata' not in loaded_record
def test_load_for_invalid_json_returns_errors(self):
input_record = {'foo': 'bar'}
unmarshalled_record = RecordSchemaV1().load(input_record)
assert 'foo' in unmarshalled_record.errors
assert not unmarshalled_record.data
class TestMetadataSchemaV1(object):
def test_extra_key_is_ignored(self, create_input_metadatav1):
serialized_record = create_input_metadatav1({'foo': 'bar'})
unmarshalled_record = MetadataSchemaV1().load(serialized_record)
loaded_record = unmarshalled_record.data
# marshmallow does not care about additional keys
assert 'foo' not in unmarshalled_record.errors
assert 'foo' not in loaded_record
def test_missing_keys_return_errors(self):
serialized_record = {'foo': 'bar'}
unmarshalled_record = MetadataSchemaV1().load(serialized_record)
required_keys = [
'title', 'description', 'authors', 'resource_type', 'license',
'permissions'
]
assert set(unmarshalled_record.errors.keys()) == set(required_keys)
assert (
unmarshalled_record.errors['title'] ==
['Missing data for required field.']
)
def test_authors_loaded(self, create_input_metadatav1):
authors = [
{
'first_name': 'John',
'middle_name': 'Jacob',
'last_name': 'Smith'
},
{
'first_name': 'Jane',
'middle_name': 'Janet',
'last_name': 'Doe',
'full_name': '<NAME>.' # Should be overwritten
}
]
serialized_record = create_input_metadatav1({
'authors': authors
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'authors' in deserialized_metadata
assert deserialized_metadata['authors'][0] == {
'first_name': 'John',
'middle_name': 'Jacob',
'last_name': 'Smith',
'full_name': 'Smith, <NAME>'
}
assert deserialized_metadata['authors'][1] == {
'first_name': 'Jane',
'middle_name': 'Janet',
'last_name': 'Doe',
'full_name': '<NAME>'
}
def test_resource_type_loaded(self, create_input_metadatav1):
serialized_record = create_input_metadatav1({
'resource_type': {
'general': 'other',
'specific': 'other'
}
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'resource_type' in deserialized_metadata
def test_empty_required_key_returns_errors(self, create_input_metadatav1):
serialized_record = create_input_metadatav1({'title': None})
unmarshalled_record = MetadataSchemaV1().load(serialized_record)
assert 'title' in unmarshalled_record.errors
def test_description_too_short_returns_error(
self, create_input_metadatav1):
serialized_record = create_input_metadatav1({'description': 'A '})
unmarshalled_record = MetadataSchemaV1().load(serialized_record)
assert 'description' in unmarshalled_record.errors
# WHY: We place these tests here because we plan on having terms be a
# first-class citizen of the records schema
def test_one_term_loaded(self, create_input_metadatav1):
terms = [
{
'source': 'MeSH',
'value': 'Cognitive Neuroscience',
'id': 'D000066494'
}
]
serialized_record = create_input_metadatav1({
'terms': [{'data': term} for term in terms]
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == terms
def test_multiple_terms_loaded(self, create_input_metadatav1):
terms = [
{
'source': 'MeSH',
'value': 'Cognitive Neuroscience',
'id': 'D000066494'
},
{
'source': 'MeSH',
'value': 'Acanthamoeba',
'id': 'D000048'
}
]
serialized_record = create_input_metadatav1({
'terms': [{'data': term} for term in terms]
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == terms
def test_no_terms_loaded(self, create_input_metadatav1):
terms = []
serialized_record = create_input_metadatav1({
'terms': terms
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == terms
serialized_record2 = create_input_metadatav1()
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record2)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == terms
serialized_record3 = create_input_metadatav1({
'terms': [None, {}, {'data': None}, {'data': {}}, '']
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record3)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert deserialized_metadata['terms'] == []
def test_incorrect_format_terms_returns_error(
self, create_input_metadatav1):
terms = ["bar"]
serialized_record = create_input_metadatav1({
'terms': terms
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert 'terms' in unmarshalled_metadata.errors
assert deserialized_metadata['terms'] == [{}]
def test_coalesce_terms_loaded(self, create_input_metadatav1):
terms = [
{
'source': 'MeSH',
'value': 'Cognitive Neuroscience',
'id': 'D000066494'
},
{
'source': 'FAST',
'value': 'Glucagonoma',
'id': '943672'
}
]
serialized_record = create_input_metadatav1({
'mesh_terms': [{'data': terms[0]}],
'fast_terms': [{'data': terms[1]}]
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert 'terms' in deserialized_metadata
assert 'mesh_terms' not in deserialized_metadata
assert 'fast_terms' not in deserialized_metadata
assert deserialized_metadata['terms'] == terms
def test_permissions_loaded(self, create_input_metadatav1):
serialized_record = create_input_metadatav1({
'permissions': 'restricted_view'
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert not unmarshalled_metadata.errors
assert deserialized_metadata['permissions'] == 'restricted_view'
def test_invalid_permissions_returns_errors(
self, create_input_metadatav1):
serialized_record = create_input_metadatav1({
'permissions': 'foo_view'
})
unmarshalled_metadata = MetadataSchemaV1().load(serialized_record)
deserialized_metadata = unmarshalled_metadata.data
assert 'permissions' in unmarshalled_metadata.errors
class TestAuthorSchemaV1(object):
def test_first_and_last_name_required(self):
author = {
'first_name': 'Jonathan',
}
unmarshalled_author = AuthorSchemaV1().load(author)
assert 'first_name' in unmarshalled_author.data
assert 'middle_name' not in unmarshalled_author.errors
assert 'last_name' in unmarshalled_author.errors
class TestResourceTypeSchemaV1(object):
def test_general_dataset_fills_specific_dataset(self):
resource_type = {
'general': 'dataset'
}
unmarshalled_resource_type = ResourceTypeSchemaV1().load(resource_type)
assert not unmarshalled_resource_type.errors
assert 'general' in unmarshalled_resource_type.data
assert unmarshalled_resource_type.data['specific'] == 'dataset'
def test_valid_general_specific_combination_loads(self):
resource_type = {
'general': 'text resources',
'specific': 'letter'
}
unmarshalled_resource_type = ResourceTypeSchemaV1().load(resource_type)
loaded_resource_type = unmarshalled_resource_type.data
assert not unmarshalled_resource_type.errors
assert loaded_resource_type['general'] == 'text resources'
assert loaded_resource_type['specific'] == 'letter'
def test_invalid_general_specific_combination_errors(self):
resource_type = {
'general': 'articles',
'specific': 'other'
}
unmarshalled_resource_type = ResourceTypeSchemaV1().load(resource_type)
assert (
unmarshalled_resource_type.errors['_schema'][0] ==
'Invalid resource type.'
)
def test_general_specific_combination_maps_to_hierarchy(self):
resource_type = {
'general': 'text resources',
'specific': 'letter'
}
unmarshalled_resource_type = ResourceTypeSchemaV1().load(resource_type)
loaded_resource_type = unmarshalled_resource_type.data
assert loaded_resource_type['full_hierarchy'] == ['text', 'letter']
| 2.703125 | 3 |
python/server/backends/mock.py | liamstar97/searchhub | 48 | 12789076 | <gh_stars>10-100
from collections import defaultdict
import random
from loremipsum import get_sentences
from lucidfind.backends import Backend, Document
from lucidfind.fusion import compare_datasources
class MockBackend(Backend):
def __init__(self):
self.datasources = {}
def get_document(self, doc_id):
return _gen_fake_docs(1).next()
def get_datasource(self, id):
return self.datasources.get(id, None)
def update_datasource(self, id, **config):
datasource = self.get_datasource(id)
if datasource is None:
self.datasources[id] = config
else:
# Update it (maybe)
if compare_datasources(config, datasource) == False:
print "Detected an update in config, updating Fusion"
self.datasources[id] = config
return config
else:
print "No change in datasource, doing nothing"
return
def _gen_fake_docs(count, author=None, source=None, project=None):
authors = ["Joe", "Jane", "Bill", "Mary"]
sources = ["web", "twitter", "email"]
projects = ["solr", "lucene", "elasticsearch"]
def _random_choice(value, choices):
if value is not None:
return value
else:
return random.choice(choices)
for i in range(count):
yield Document(id="doc-%d" % i, author=_random_choice(author, authors), source=_random_choice(source, sources),
project=_random_choice(project, projects), content="".join(get_sentences(random.randint(1, 4))), link="http://example.com/%d" % i,
created_at="2015-01-01T%02d:%02d:00Z" % (random.randint(0,23), random.randint(0,59)))
def _gen_fake_facets(docs, author=None, source=None, project=None):
facets = defaultdict(lambda: defaultdict(int))
for doc in docs:
facets["author"][doc.author] += 1
facets["source"][doc.source] += 1
facets["project"][doc.project] += 1
return facets | 2.3125 | 2 |
pyapps/sensorapp.py | helena-project/beetle | 16 | 12789077 | #!/usr/bin/env python
"""
pygatt cloud monitoring
=======================
This module implements a cloud monitoring application for home sensors
"""
import argparse
import os
import socket
import ssl
import struct
import sys
import threading
from datetime import datetime
from jinja2 import Environment, FileSystemLoader
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
PYGATT_PATH = "../pygatt"
sys.path.append(PYGATT_PATH)
import lib.gatt as gatt
import lib.uuid as uuid
import lib.beetle as beetle
from pygatt import ManagedSocket, GattClient, ClientError
def getArguments():
"""Arguments for script."""
parser = argparse.ArgumentParser()
parser.add_argument("--name", default="sensorapp",
help="name of the application")
parser.add_argument("--app-port", type=int, default="8081",
help="port to run application server")
parser.add_argument("--host", default="localhost",
help="hostname of the Beetle server")
parser.add_argument("--port", "-p", type=int, default=3002,
help="port the server is runnng on")
parser.add_argument("--cert", "-c", type=str,
help="client certificate")
parser.add_argument("--key", "-k", type=str,
help="private key for client certificate")
parser.add_argument("--rootca", "-r",
help="root CA certificate")
parser.add_argument("--nocerts", "-x", action="store_true",
help="disable verification and use of client certificates")
return parser.parse_args()
def printBox(s):
""" Print a header """
print "%s\n|| %s ||\n%s" % ("=" * (len(s) + 6), s, "=" * (len(s) + 6))
ENV_SENSING_SERVICE_UUID = 0x181A
PRESSURE_CHARAC_UUID = 0x2A6D
TEMPERATURE_CHARAC_UUID = 0x2A6E
HUMIDITY_CHARAC_UUID = 0x2A6F
UNK1_CHARAC_UUID = 0xC512
UNK2_CHARAC_UUID = 0xF801
INTERNAL_REFRESH_INTERVAL = 60 * 5
class SensorInstance(object):
def __init__(self, name):
self.name = name
self.address = None
self.connectTime = None
self.gateway = None
self._pressure = None
self._pressure_cached = None
self._temperature = None
self._temperature_cached = None
self._humidity = None
self._humidity_cached = None
self._unk1 = None
self._unk1_cached = None
self._unk2 = None
self._unk2_cached = None
def _unpack_pressure(self, buf):
if len(buf) != 4:
return float("nan")
raw = struct.unpack('<I', bytes(buf))[0]
return float(raw) / 10.0
@property
def pressure(self):
if self._pressure_cached is not None:
return self._pressure_cached
try:
buf = self._pressure.read()
self._pressure_cached = self._unpack_pressure(buf)
return self._pressure_cached
except Exception, err:
print err
return float("nan")
def _unpack_temperature(self, buf):
if len(buf) != 2:
return float("nan")
raw = struct.unpack('<h', bytes(buf))[0]
return float(raw) / 100.0
@property
def temperature(self):
if self._temperature_cached is not None:
return self._temperature_cached
try:
buf = self._temperature.read()
self._temperature_cached = self._unpack_temperature(buf)
return self._temperature_cached
except Exception, err:
print err
return float("nan")
def _unpack_humidity(self, buf):
if len(buf) != 2:
return float("nan")
raw = struct.unpack('<H', bytes(buf))[0]
return float(raw) / 100.0
@property
def humidity(self):
if self._humidity_cached is not None:
return self._humidity_cached
try:
buf = self._humidity.read()
self._humidity_cached = self._unpack_humidity(buf)
return self._humidity_cached
except Exception, err:
print err
return float("nan")
def _unpack_unk1(self, buf):
if len(buf) != 2:
return -1
return struct.unpack('<H', bytes(buf))[0]
@property
def unk1(self):
if self._unk1_cached is not None:
return self._unk1_cached
try:
buf = self._unk1.read()
self._unk1_cached = self._unpack_unk1(buf)
return self._unk1_cached
except Exception, err:
print err
return -1
def _unpack_unk2(self, buf):
if len(buf) != 1:
return -1
return buf[0]
@property
def unk2(self):
if self._unk2_cached is not None:
return self._unk2_cached
try:
buf = self._unk2.read()
self._unk2_cached = self._unpack_unk2(buf)
return self._unk2_cached
except Exception, err:
print err
return -1
@property
def ready(self):
return (self.name is not None and self._pressure is not None
and self._temperature is not None and self._humidity is not None
and self._unk1 is not None and self._unk2 is not None
and self.address is not None and self.connectTime is not None
and self.gateway is not None)
def subscribeAll(self):
assert self.ready
print "Subscribing to notifications: %s" % self.address
def _pressure_handler(buf):
self._pressure_cached = self._unpack_pressure(buf)
self._pressure.subscribe(_pressure_handler)
def _temperature_handler(buf):
self._temperature_cached = self._unpack_temperature(buf)
self._temperature.subscribe(_temperature_handler)
def _humidity_handler(buf):
self._humidity_cached = self._unpack_humidity(buf)
self._humidity.subscribe(_humidity_handler)
def _unk1_handler(buf):
self._unk1_cached = self._unpack_unk1(buf)
self._unk1.subscribe(_unk1_handler)
def _unk2_handler(buf):
self._unk2_cached = self._unpack_unk2(buf)
self._unk2.subscribe(_unk2_handler)
def __str__(self):
return "%s (%s)" % (self.name, self.address)
def runHttpServer(port, client, reset, ready, devices):
"""Start the HTTP server"""
class WebServerHandler(BaseHTTPRequestHandler):
"""Handle HTTP requests and serve a simple web UI"""
def _serve_main(self):
"""Serve the main page"""
ready.wait()
env = Environment(loader=FileSystemLoader("templates"))
template = env.get_template("sensorapp.html")
self.send_response(200, 'OK')
self.send_header('Content-type', 'html')
self.end_headers()
self.wfile.write(template.render(devices=devices))
self.wfile.close()
def _serve_static(self):
if self.path == "" or not os.path.isfile(self.path[1:]):
self.send_error(404)
self.end_headers()
self.wfile.close()
return
path = self.path[1:]
_, extension = os.path.splitext(path)
print extension
if extension == ".css":
extension = "text/css"
elif extension == ".png":
extension = "image/png"
else:
self.send_error(403)
self.end_headers()
self.wfile.close()
return
self.send_response(200, 'OK')
self.send_header('Content-type', extension)
self.end_headers()
with open(path, "rb") as f:
try:
self.wfile.write(f.read())
finally:
self.wfile.close()
def do_GET(self):
if self.path == "/":
self._serve_main()
elif self.path.startswith("/static"):
self._serve_static()
else:
self.send_error(404)
self.end_headers()
self.wfile.close()
def do_POST(self):
if self.path == "/rescan":
ready.clear()
reset.set()
ready.wait()
self.send_response(200, 'OK')
self.end_headers()
self.wfile.close()
else:
self.send_error(404)
self.end_headers()
self.wfile.close()
server = HTTPServer(("", port), WebServerHandler)
try:
server.serve_forever()
finally:
server.socket.close()
def runClient(client, reset, ready, devices):
"""Start a beetle client"""
gapUuid = uuid.UUID(gatt.GAP_SERVICE_UUID)
nameUuid = uuid.UUID(gatt.GAP_CHARAC_DEVICE_NAME_UUID)
envSenseUuid = uuid.UUID(ENV_SENSING_SERVICE_UUID)
pressureUuid = uuid.UUID(PRESSURE_CHARAC_UUID)
temperatureUuid = uuid.UUID(TEMPERATURE_CHARAC_UUID)
humidityUuid = uuid.UUID(HUMIDITY_CHARAC_UUID)
unk1Uuid = uuid.UUID(UNK1_CHARAC_UUID)
unk2Uuid = uuid.UUID(UNK2_CHARAC_UUID)
beetleUuid = uuid.UUID(beetle.BEETLE_SERVICE_UUID)
bdAddrUuid = uuid.UUID(beetle.BEETLE_CHARAC_BDADDR_UUID)
connTimeUuid = uuid.UUID(beetle.BEETLE_CHARAC_CONNECTED_TIME_UUID)
gatewayUuid = uuid.UUID(beetle.BEETLE_CHARAC_CONNECTED_GATEWAY_UUID)
def _daemon():
while True:
del devices[:]
services = client.discoverAll()
currDevice = None
# proceed down the services, separating out devices
printBox("Discovering handles")
for service in services:
print service
if service.uuid == gapUuid:
for charac in service.characteristics:
print " ", charac
if charac.uuid == nameUuid:
currDevice = None
try:
currDeviceName = str(charac.read())
currDevice = SensorInstance(
name=currDeviceName)
except ClientError, err:
print err
except Exception, err:
print err
elif service.uuid == envSenseUuid:
for charac in service.characteristics:
print " ", charac
if currDevice is not None:
if charac.uuid == pressureUuid:
currDevice._pressure = charac
elif charac.uuid == temperatureUuid:
currDevice._temperature = charac
elif charac.uuid == humidityUuid:
currDevice._humidity = charac
elif charac.uuid == unk1Uuid:
currDevice._unk1 = charac
elif charac.uuid == unk2Uuid:
currDevice._unk2 = charac
elif service.uuid == beetleUuid:
for charac in service.characteristics:
print " ", charac
if currDevice is not None:
if charac.uuid == bdAddrUuid:
try:
bdaddr = charac.read()[::-1]
currDevice.address = ":".join(
"%02X" % x for x in bdaddr)
except ClientError, err:
print err
except Exception, err:
print err
elif charac.uuid == connTimeUuid:
try:
raw = charac.read()
if len(raw) != 4:
continue
epoch = struct.unpack('<I', bytes(raw))[0]
currDevice.connectTime = \
datetime.utcfromtimestamp(epoch)
except ClientError, err:
print err
except Exception, err:
print err
elif charac.uuid == gatewayUuid:
try:
gateway = charac.read()
currDevice.gateway = str(gateway)
except ClientError, err:
print err
except Exception, err:
print err
print currDevice, currDevice.ready
if currDevice.ready:
currDevice.subscribeAll()
devices.append(currDevice)
currDevice = None
ready.set()
reset.clear()
reset.wait(INTERNAL_REFRESH_INTERVAL)
ready.clear()
clientThread = threading.Thread(target=_daemon)
clientThread.setDaemon(True)
clientThread.start()
def main(args):
"""Set up a web app"""
def onDisconnect(err):
print "Disconnect:", err
os._exit(0)
# Declare a managed socket, and bind a GATT server and client
managedSocket = ManagedSocket(daemon=True)
client = GattClient(managedSocket, onDisconnect=onDisconnect)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if args.nocerts:
s = ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
else:
s = ssl.wrap_socket(s, keyfile=args.key, certfile=args.cert,
ca_certs=args.rootca, cert_reqs=ssl.CERT_REQUIRED)
s.connect((args.host, args.port))
printBox("Starting as: %s" % args.name)
# Send initial connection parameters.
appParams = ["client %s" % args.name, "server false"]
print ""
printBox("Connection request")
for line in appParams:
print "$ %s" % line
# Send connection request parameters to Beetle
appParamsStr = "\n".join(appParams)
appParamsLength = struct.pack("!i", len(appParamsStr))
s.sendall(appParamsLength.encode('utf-8'))
s.sendall(appParamsStr.encode('utf-8'))
# Read parameters in plaintext from Beetle
serverParamsLength = struct.unpack("!i", s.recv(4))[0]
print ""
printBox("Beetle response")
for serverParam in s.recv(serverParamsLength).split("\n"):
print "$ %s" % serverParam.rstrip()
# Transfer ownership of the socket
managedSocket.bind(s, True)
devices = []
reset = threading.Event()
ready = threading.Event()
runClient(client, reset, ready, devices)
runHttpServer(args.app_port, client, reset, ready, devices)
if __name__ == "__main__":
args = getArguments()
main(args)
os._exit(0)
| 2.625 | 3 |
Remark/Macros/Comment_Macro.py | kaba2/remark | 0 | 12789078 | <gh_stars>0
# -*- coding: utf-8 -*-
# Description: Comment macro
# Detail: Consumes its input and produces no output.
from Remark.Macro_Registry import registerMacro
class Comment_Macro(object):
def name(self):
return 'Comment'
def expand(self, parameter, remark):
# This macro simply eats its parameter. This allows
# for commenting.
text = []
return text
def expandOutput(self):
return False
def htmlHead(self, remark):
return []
def postConversion(self, remark):
None
registerMacro('Comment', Comment_Macro())
| 2.609375 | 3 |
leetcode_python/String/convert-a-number-to-hexadecimal.py | yennanliu/Python_basics | 18 | 12789079 | <reponame>yennanliu/Python_basics<filename>leetcode_python/String/convert-a-number-to-hexadecimal.py
# Time: O(logn)
# Space: O(1)
# Given an integer, write an algorithm to convert it to hexadecimal.
# For negative integer, two’s complement method is used.
#
# IMPORTANT:
# You must not use any method provided by the library which converts/formats
# the number to hex directly. Such solution will result in disqualification of
# all your submissions to this problem. Users may report such solutions after the
# contest ends and we reserve the right of final decision and interpretation
# in the case of reported solutions.
#
# Note:
#
# All letters in hexadecimal (a-f) must be in lowercase.
# The hexadecimal string must not contain extra leading 0s. If the number is zero,
# it is represented by a single zero character '0'; otherwise,
# the first character in the hexadecimal string will not be the zero character.
# The given number is guaranteed to fit within the range of a 32-bit signed integer.
# You must not use any method provided by the library which converts/formats the number to hex directly.
# Example 1:
#
# Input:
# 26
#
# Output:
# "1a"
# Example 2:
#
# Input:
# -1
#
# Output:
# "ffffffff"
# V0
# V1
# IDEA : Decimal -> Hexadecimal
# Decimal : {0,1,2,3,4,5,6,7,8,9}
# Hexadecimal : {0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f}
# https://www.cnblogs.com/grandyang/p/5926674.html
# e.g.
# 50 -> 32 ( 50%16 = 3 + 2)
# 26 -> 1a ( 26%16 = 1 + 10 = 1 + a)
# https://www.jianshu.com/p/2d57cec55393
# divmod(17, 16) = (1,1)
# divmod(3, 1) = (3,0)
def toHex(num):
ret = ''
map = ('0', '1','2','3','4','5','6','7','8','9','a','b','c','d','e','f')
if num == 0:
return '0'
if num < 0:
num += 2**32 # if num < 0, use num = num + 2**32 to deal with it
while num > 0 :
num, val = divmod(num, 16)
ret += map[val]
return ret[::-1]
# V2
# Time: O(logn)
# Space: O(1)
class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
if not num:
return "0"
result = []
while num and len(result) != 8:
h = num & 15
if h < 10:
result.append(str(chr(ord('0') + h)))
else:
result.append(str(chr(ord('a') + h-10)))
num >>= 4
result.reverse()
return "".join(result)
| 4.3125 | 4 |
generate_confirmation_tokens.py | FiwareULPGC/agreement-mail-utilities | 0 | 12789080 | <reponame>FiwareULPGC/agreement-mail-utilities<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import getopt
import sys
import uuid
from openpyxl import *
def generate_token():
return uuid.uuid4().hex
def generate_confirmation_tokens(filepath):
wb = load_workbook(filepath)
ws = wb.worksheets[0]
ws.cell(row=1, column=4).value = "Token"
ws.cell(row=1, column=5).value = "Sent"
row_number = 2
for row in ws.iter_rows(row_offset=1):
if (row[0].value is not None or row[1].value is not None or
row[2].value is not None):
try:
# Abort and notify if an empty field is found
if row[0].value is None:
raise ValueError("Name value not found in row {}."
.format(row_number))
if row[1].value is None:
raise ValueError("Surname value not found in row {}."
.format(row_number))
if row[2].value is None:
raise ValueError("Email value not found in row {}."
.format(row_number))
ws.cell(row=row_number, column=4).value = generate_token()
except Exception as e:
ws.cell(row=row_number, column=4).value = None
print " Error generating token : {}".format(str(e))
finally:
ws.cell(row=row_number, column=5).value = 'No'
row_number += 1
wb.save(filepath)
if __name__ == "__main__":
options, remainder = getopt.getopt(sys.argv[1:], 'f:h', ['file=',
'help',
])
example = ("Example of use: python ./generate_confirmation_tokens.py "
"-f ./<filename>.xlsx")
file = None
for opt, arg in options:
if opt in ('-f', '--file'):
file = arg
if file is None:
print example
else:
generate_confirmation_tokens(file)
| 2.5625 | 3 |
tools/bin/pythonSrc/PSI-0.3b2_gp/psi/_version.py | YangHao666666/hawq | 450 | 12789081 | # The MIT License
#
# Copyright (C) 2007 <NAME>
#
# Copyright (C) 2008-2009 <NAME>
#
# Copyright (C) 2008-2009 Abilisoft Ltd.
#
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""psi._version
This is used so that this information can stay easily in sync in both
psi and setup.py.
"""
version = '0.3b2'
author = '<NAME>, <NAME>, <NAME>'
copyright = """\
Copyright (C) 2007-2009 <NAME>
Copyright (C) 2008, 2009 <NAME>
Copyright (C) 2008, 2009 Abilisoft Ltd.
Copyright (C) 2009 <NAME>"""
license = 'MIT'
| 1.257813 | 1 |
main.py | JiwooKimAR/MWP-solver-with-pretrained-language-model | 5 | 12789082 | import os
import sys
import torch
import argparse
from collections import OrderedDict
from dataloader import Dataset
from evaluation import Evaluator
from experiment import EarlyStop, train_model
from utils import Config, Logger, ResultTable, make_log_dir, set_random_seed
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
def main_train_test(argv):
# multiprocessing.set_start_method('spawn')
# read configs
config = Config(main_conf_path='./', model_conf_path='model_config')
# apply system arguments if exist
if len(argv) > 0:
cmd_arg = OrderedDict()
argvs = ' '.join(sys.argv[1:]).split(' ')
for i in range(0, len(argvs), 2):
arg_name, arg_value = argvs[i], argvs[i + 1]
arg_name = arg_name.strip('-')
cmd_arg[arg_name] = arg_value
config.update_params(cmd_arg)
gpu = config.get_param('Experiment', 'gpu')
gpu = str(gpu)
os.environ["CUDA_VISIBLE_DEVICES"] = gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = config.get_param('Experiment', 'model_name')
# set seed
seed = config.get_param('Experiment', 'seed')
set_random_seed(seed)
# logger
log_dir = make_log_dir(os.path.join('saves', model_name))
logger = Logger(log_dir)
config.save(log_dir)
# dataset
dataset_name = config.get_param('Dataset', 'dataset')
dataset = Dataset(model_name, **config['Dataset'])
# early stop
early_stop = EarlyStop(**config['EarlyStop'])
# evaluator()
evaluator = Evaluator(early_stop.early_stop_measure, **config['Evaluator'])
# Save log & dataset config.
logger.info(config)
logger.info(dataset)
import model
MODEL_CLASS = getattr(model, model_name)
# build model
model = MODEL_CLASS(dataset, config['Model'], device)
model.logger = logger
################################## TRAIN & PREDICT
# train
try:
valid_score, train_time = train_model(model, dataset, evaluator, early_stop, logger, config)
except (KeyboardInterrupt, SystemExit):
valid_score, train_time = dict(), 0
logger.info("학습을 중단하셨습니다.")
m, s = divmod(train_time, 60)
h, m = divmod(m, 60)
logger.info('\nTotal training time - %d:%d:%d(=%.1f sec)' % (h, m, s, train_time))
# test
model.eval()
model.restore(logger.log_dir)
test_score = dict()
for testset in dataset.testsets:
test_score.update(evaluator.evaluate(model, dataset, testset))
# show result
evaluation_table = ResultTable(table_name='Best Result', header=list(test_score.keys()))
evaluation_table.add_row('Valid', valid_score)
evaluation_table.add_row('Test', test_score)
# evaluation_table.show()
logger.info(evaluation_table.to_string())
logger.info("Saved to %s" % (log_dir))
def main_submit(args):
# read configs
config = Config(main_conf_path=args.path, model_conf_path=args.path)
# Final test set (dataset/problemsheet.json)
config.main_config['Dataset']['dataset'] = '/home/agc2021/dataset'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_name = config.get_param('Experiment', 'model_name')
log_dir = args.path
logger = Logger(log_dir)
dataset = Dataset(model_name, **config['Dataset'])
# evaluator
evaluator = Evaluator(**config['Evaluator'])
import model
MODEL_CLASS = getattr(model, model_name)
# build model
model = MODEL_CLASS(dataset, config['Model'], device)
# test
model.eval()
model.restore(logger.log_dir)
model.logger = logger
evaluator.evaluate(model, dataset, 'submit')
logger.info("Saved answer")
if __name__ == '__main__':
## For submission
if os.path.exists('/home/agc2021/dataset/problemsheet_5_00.json'):
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='saves_final/', metavar='P')
args = parser.parse_args()
main_submit(args)
else:
main_train_test(argv=sys.argv[1:]) | 2.1875 | 2 |
servers/web/flask/views/mine.py | ericharmeling/photoblocks | 2 | 12789083 | <reponame>ericharmeling/photoblocks<filename>servers/web/flask/views/mine.py
from flask import request
def mine(blockchain):
# Form data
image_file = request.files['file']
image_loc = blockchain.dir + image_file.filename
label = request.form['label']
last_label = request.form['last_label']
n_key = request.form['node_id']
# Store image file on server
image_file.save(image_loc)
# Go through data
blockchain.last_labels.append(last_label)
iter_block = blockchain.new_block(image_loc, label, blockchain.transactions)
proof = blockchain.proof_of_work(iter_block)
block = blockchain.new_block(image_loc, label, blockchain.transactions, nonce=proof)
blockchain.add_block(block)
blockchain.transactions = []
blockchain.add_transaction_fields(sender="God", recipient=n_key, quantity=1)
return block
| 2.65625 | 3 |
receptiveFields.py | ericrosenbrown/ReceptiveFields | 0 | 12789084 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# We are going to be doing an activity about viewing images through different filters. These filters are similar to things that happen in the brain when the images from our eyes are registered in our brain.
# <codecell>
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import scipy.signal as signal
import numpy as n
# <codecell>
barImg=mpimg.imread('bar.png')
#extract grey values
barImg = barImg[:,:,3]
# <markdowncell>
# We examine the effect on the following images. In the visual pathway the images can be seen as input from our eyes focusing on the center of our vision.
# <codecell>
imgplot = plt.imshow(barImg, cmap=cm.Greys_r)
# <codecell>
img=mpimg.imread('stinkbug.png') #change 'stinkbug.png' into your choice of animal:
# turtle.jpg, turtle2.jpg, zebra.png, doge.png, jaguar.png, leopard.png, mexicanhat.jpg
#extract grey values
bugImg = img[:,:,0]
# <codecell>
imgplot = plt.imshow(bugImg, cmap=cm.Greys_r)
# <markdowncell>
# Receptive field functions
# -------------------
#
# The following function will be used as a blurring filter.
# $$\phi(x,y) = \frac{1}{2\pi\sigma^2}\exp{\{-\frac{1}{2\pi\sigma^2}(x^2+ y^2)\}}$$
# <codecell>
def gaussian2D(x, y, sigma):
return (1.0/(1*math.pi*(sigma**2)))*math.exp(-(1.0/(2*(sigma**2)))*(x**2 + y**2))
"""make matrix from function"""
def receptiveFieldMatrix(func):
h = 30
g = zeros((h,h))
for xi in range(0,h):
for yi in range(0,h):
x = xi-h/2
y = yi-h/2
g[xi, yi] = func(x,y);
return g
def plotFilter(fun):
g = receptiveFieldMatrix(fun)
plt.imshow(g, cmap=cm.Greys_r)
# <markdowncell>
# The function is circular symmetric, meaning it is doing the same thing around a circle.
#
# This filter cancels out higher frequencies, thus blurring the image.
# <codecell>
plotFilter(lambda x,y:gaussian2D(x,y,4))
# <markdowncell>
# Convolution is the process of applying the filter to the input image.
# $$\int \int I(x',y')\phi(x-x',y-y')dx'dy'$$
#
# When applying this filter, the result of the convolution can be visualized in an image.
# <codecell>
Img_barGaussian = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y: gaussian2D(x,y,5)), mode='same')
imgplot = plt.imshow(Img_barGaussian, cmap=cm.Greys_r)
# <codecell>
Img_bugGaussian = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: gaussian2D(x,y,3)), mode='same')
imgplot = plt.imshow(Img_bugGaussian, cmap=cm.Greys_r)
# <markdowncell>
# Difference of Gaussians
# ---------------------
#
# The mexican hat function is a difference between two of the function above, which leads to a filter that happens in certain cells in your eye. It can be seen as a basic edge detector.
# <codecell>
def mexicanHat(x,y,sigma1,sigma2):
return gaussian2D(x,y,sigma1) - gaussian2D(x,y,sigma2)
plotFilter(lambda x,y: mexicanHat(x,y,3,4))
# <codecell>
Img_barHat = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y:mexicanHat(x,y,3,4)), mode='same')
imgplot = plt.imshow(Img_barHat, cmap=cm.Greys_r)
# <codecell>
Img_bugHat = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: mexicanHat(x,y,2,3)), mode='same')
imgplot = plt.imshow(Img_bugHat, cmap=cm.Greys_r)
# <markdowncell>
# Gabor functions
# ---------------
#
# Gabor functions are used to detect edges with a specific orientation in images. There are parts in the brain that see an image through these gabor functions and are found throughout a part of your eye.
#
# There are two different types of gabor function:
# $$g_s(x):=sin(\omega_x x + \omega_y y)\exp{\{-\frac{x^2+y^2}{2\sigma^2}\}}$$
# $$g_c(x):=cos(\omega_x x + \omega_y y)\exp{\{-\frac{x^2+y^2}{2\sigma^2}\}}$$
#
# <codecell>
def oddGabor2D(x,y,sigma,orientation):
return math.sin(x + orientation*y) * math.exp(-(x**2 + y**2)/(2*sigma))
def evenGabor2D(x,y, sigma, orientation):
return math.cos(x + orientation*y) * math.exp(-(x**2 + y**2)/(2*sigma))
plotFilter(lambda x,y: oddGabor2D(x,y,7,1))
# <codecell>
Img_barOddGabor = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y: oddGabor2D(x,y,5,1)), mode='same')
imgplot = plt.imshow(Img_barOddGabor, cmap=cm.Greys_r)
# <codecell>
Img_bugOddGabor = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: oddGabor2D(x,y,5,1)), mode='same')
# <markdowncell>
# In the following image one can see the edge orientations appear in the part of the eye.
# <codecell>
imgplot = plt.imshow(Img_bugOddGabor, cmap=cm.Greys_r)
# <markdowncell>
# Using the previous filter (the edge defining one) as an input to the gabor we obtain different results.
# <codecell>
Img_bugOddGaborEdge = signal.convolve(Img_bugHat,receptiveFieldMatrix(lambda x,y: oddGabor2D(x,y,5,1)), mode='same')
imgplot = plt.imshow(Img_bugOddGaborEdge, cmap=cm.Greys_r)
# <markdowncell>
# Here is an example of the other gabor filter
# <codecell>
plotFilter(lambda x,y: evenGabor2D(x,y,7,1))
Img_barEvenGabor = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y: evenGabor2D(x,y,5,1)), mode='same')
imgplot = plt.imshow(Img_barEvenGabor, cmap=cm.Greys_r)
# <codecell>
Img_bugEvenGabor = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: evenGabor2D(x,y,5,1)), mode='same')
imgplot = plt.imshow(Img_bugEvenGabor, cmap=cm.Greys_r)
# <markdowncell>
# Quadrature Pairs
# ------------------
#
# Now let's combine both gabor filters to see what will happen.
# <codecell>
def edgeEnergy(x,y,sigma, orientation):
g1= oddGabor2D(x,y,sigma,orientation)
g2= evenGabor2D(x,y,sigma,orientation)
return(g1**2+g2**2)
# <codecell>
plotFilter(lambda x,y:edgeEnergy(x,y,50,0))
# <codecell>
Img_barEdgeEnergy = signal.convolve(barImg,receptiveFieldMatrix(lambda x,y: edgeEnergy(x,y,100,1)), mode='same')
imgplot = plt.imshow(Img_barEdgeEnergy, cmap=cm.Greys_r)
# <codecell>
Img_bugEdgeEnergy = signal.convolve(bugImg,receptiveFieldMatrix(lambda x,y: edgeEnergy(x,y,10,1)), mode='same')
imgplot = plt.imshow(Img_bugEdgeEnergy, cmap=cm.Greys_r)
# <codecell>
| 3.625 | 4 |
object_pool/exception.py | dduraipandian/object_pool | 3 | 12789085 | <filename>object_pool/exception.py
class InvalidMinInitCapacity(Exception):
def __init__(self, pool_name):
self.message = f"ERROR:: {pool_name}: min_init can not be less than 0 with lazy=False option."
class InvalidMaxCapacity(Exception):
def __init__(self, pool_name):
self.message = f"ERROR:: {pool_name}: max capacity should not be negative number."
class InvalidClass(Exception):
def __init__(self, klass):
self.message = f"ERROR:: {klass} is not a valid class."
| 2.984375 | 3 |
memtrain/memtrain_common/question.py | iandorsey00/memtrain | 0 | 12789086 | import decimal
import random
import string
import textwrap
import time
import os
from memtrain.memtrain_common.mtstatistics import MtStatistics
class NoResponsesError(Exception):
pass
class Question:
'''Manages the current cue and response interface'''
def __init__(self, settings, database):
# Initialize core objects
self.settings = settings
self.conn = database.conn
self.cur = self.conn.cursor()
self.database = database
self.responses = self.database.get_all_responses()
self.cue_id = 0
self.response_id = 0
self.f_cue = ''
self.mtags = []
self.mchoices = dict()
self.iam = ' '
self.ascii_range = ['a', 'b', 'c', 'd']
self.response = ''
self.user_input = ''
self.synonyms = []
self.plural_responses = [i for i in self.responses if self.is_plural(i)]
self.nonplural_responses = [i for i in self.responses if not self.is_plural(i)]
## Interface text
self.title_text = ''
self.level_text = ''
self.response_number_text = ''
self.cue_text = ''
self.hint_text = ''
self.correctness_str = ''
self.other_answers_str = ''
def get_value(self, value, value_id):
self.cur.execute('''SELECT {} FROM {} WHERE {} = (?)'''
.format(value, value + 's', value + '_id'),
(str(value_id), ))
rows = self.cur.fetchall()
return rows[0][0]
def get_cue(self, cue_id):
return self.get_value('cue', cue_id)
def get_response(self, response_id):
return self.get_value('response', response_id)
def get_hints(self):
# Translate response_id into hint_id
self.cur.execute('''SELECT hint_id FROM responses_to_hints
WHERE response_id = (?)''', (str(self.response_id), ))
rows = self.cur.fetchall()
rows = list(map(lambda x: x[0], rows))
out = []
# Translate hint_id to hint
for hint_id in rows:
out.append(self.get_value('hint', hint_id))
return out
def get_synonyms(self):
# Translate response_id into synonym_id
self.cur.execute('''SELECT synonym_id FROM responses_to_synonyms
WHERE response_id = (?)''', (str(self.response_id), ))
rows = self.cur.fetchall()
rows = list(map(lambda x: x[0], rows))
out = []
# Translate synonym_id to synonym
for synonym_id in rows:
out.append(self.get_value('synonym', synonym_id))
return out
def get_mtags(self):
# Translate response_id into mtag_id
self.cur.execute('''SELECT mtag_id FROM responses_to_mtags
WHERE response_id = (?)''', (str(self.response_id), ))
rows = self.cur.fetchall()
rows = list(map(lambda x: x[0], rows))
out = []
# Translate mtag_id to mtag
for mtag_id in rows:
out.append(self.get_value('mtag', mtag_id))
return out
def get_placement(self, cue_id, response_id):
self.cur.execute('''SELECT placement FROM cues_to_responses
WHERE cue_id = (?) AND response_id = (?)''',
(str(cue_id), str(response_id)))
rows = self.cur.fetchall()
return rows[0][0]
def get_responses_by_mtag(self, mtag):
# Translate mtag_id to mtag
self.cur.execute('''SELECT mtag_id FROM mtags WHERE mtag = (?)''',
(mtag, ))
rows = self.cur.fetchall()
rows = list(map(lambda x: x[0], rows))
# Translate mtag_id to response_id
response_ids = []
for mtag_id in rows:
self.cur.execute('''SELECT response_id FROM responses_to_mtags
WHERE mtag_id = (?)''', (mtag_id, ))
more_rows = self.cur.fetchall()
response_ids += list(map(lambda x: x[0], more_rows))
# Translate response_id to response
out = []
for response_id in response_ids:
out.append(self.get_value('response', response_id))
return out
def is_plural(self, string):
'''Detects most plural words in English'''
return string[-1:] == 's' or string[-2:] == 'es'
# Question rendering ######################################################
def format_cue(self):
self.f_cue = self.cue.replace('{{}}', '_' * 9)
if self.placement == 1:
self.f_cue = self.f_cue.replace('{{1}}', '___(1)___')
else:
self.f_cue = self.f_cue.replace('{{1}}', '_' * 9)
if self.placement == 2:
self.f_cue = self.f_cue.replace('{{2}}', '___(2)___')
else:
self.f_cue = self.f_cue.replace('{{2}}', '_' * 9)
if self.placement == 3:
self.f_cue = self.f_cue.replace('{{3}}', '___(3)___')
else:
self.f_cue = self.f_cue.replace('{{3}}', '_' * 9)
return self.f_cue
def main_data_loop(self, cue_id, response_id, mtstatistics, final=False):
'''Main data processing for question rendering'''
# Initialize core objects
self.cue_id = cue_id
self.response_id = response_id
self.mtstatistics = mtstatistics
# Other important data
self.cue = self.get_cue(self.cue_id)
self.response = self.get_response(self.response_id)
self.placement = self.get_placement(self.cue_id, self.response_id)
self.synonyms = self.get_synonyms()
self.hints = self.get_hints()
self.mtags = self.get_mtags()
self.mtstatistics.update_percentage()
# Determine the level
if self.settings.level == '1':
self.level_text = 'Level 1'
elif self.settings.level == '2':
self.level_text = 'Level 2'
elif self.settings.level == '3':
self.level_text = 'Level 3'
# Important text
self.title_text = self.settings.settings['title']
self.response_number_text = 'Response ' + str(self.mtstatistics.response_number) + ' of ' + str(self.mtstatistics.total)
self.correct_so_far_text = str(self.mtstatistics.number_correct) + '/' + str(self.mtstatistics.response_number-1) + ' · ' + str(round(self.mtstatistics.percentage, 1)) + '%'
self.cue_text = self.format_cue()
def generate_mchoices(self):
'''Return the choices for the multiple choice questions'''
out = dict()
# Get responses for all mtags for this response
same_mtag_responses = []
for mtag in self.mtags:
same_mtag_responses += self.get_responses_by_mtag(mtag)
# Get responses of the same and the other plurality
plurality = self.is_plural(self.response)
same_plurality_responses = list(self.plural_responses) if plurality else list(self.nonplural_responses)
other_plurality_responses = list(self.nonplural_responses) if plurality else list(self.plural_responses)
# We will select first from same_mtag_responses. Then, if
# that's empty, we'll select from same_plurality_responses. If
# that's also empty, we'll resort to other_plurality_responses.
# Filter all three of these lists to make sure they don't contain
# the correct response
same_mtag_responses = [i for i in same_mtag_responses if i != self.response]
same_plurality_responses = [i for i in same_plurality_responses if i != self.response]
# The response won't be located in other_plurality_responses.
# Filter the pluarlity_responses lists
same_plurality_responses = [i for i in same_plurality_responses if i not in same_mtag_responses]
other_plurality_responses = [i for i in other_plurality_responses if i not in same_mtag_responses]
# Shuffle the response lists.
random.shuffle(same_mtag_responses)
random.shuffle(same_plurality_responses)
random.shuffle(other_plurality_responses)
# Get the index of the correct answer.
correct_letter = random.choice(self.ascii_range)
response_pool_consumption_index = 0
response_pool = same_mtag_responses
# Loop through the ascii range
for i in self.ascii_range:
# If we have the correct letter, output the correct response.
if i == correct_letter:
this_response = self.response
# Otherwise...
else:
# If the response_pool is empty...
while len(response_pool) == 0:
response_pool_consumption_index = response_pool_consumption_index + 1
if response_pool_consumption_index == 1:
response_pool = same_plurality_responses
elif response_pool_consumption_index == 2:
response_pool = other_plurality_responses
elif response_pool_consumption_index > 2:
raise NoResponsesError('There are no more responses available.')
this_response = response_pool.pop()
# Capitalize only the first letter of this_response
this_response = this_response[0].upper() + this_response[1:]
# Now that we have our choice, insert it into self.mchoices
out[i] = this_response
return out
def validate_input(self):
'''Determine if input is valid'''
self.mtstatistics.is_input_valid = False
if self.settings.level == '1':
if self.user_input in self.ascii_range:
self.mtstatistics.is_input_valid = True
else:
if self.user_input:
self.mtstatistics.is_input_valid = True
def standardize_string(self, string):
'''Standardize strings so they can be compared for correctness'''
# The idea here is that a question shouldn't be marked wrong just
# because the user forgot to enter a hyphen or a space or used the
# wrong case.
#
# Standarization involves the removal of all case, whitespace, and
# hyphens. This means the grading of questions is not case, whitespace,
# or hyphen sensitive.
# Remove case (transform to lowercase)
out = string.lower()
# Remove whitespace
out = ''.join(out.split())
# Remove hyphens
out = out.replace('-', '')
out = out.replace('–', '')
out = out.replace('—', '')
return out
def determine_equivalence(self):
'''See if input matches a synonym or standarized string'''
self.mtstatistics.is_input_correct = False
# If not, does the input match the response?
std_input = self.standardize_string(self.user_input)
std_response = self.standardize_string(self.response)
if std_input == std_response:
self.mtstatistics.used_response = self.response
self.mtstatistics.is_input_correct = True
if not self.mtstatistics.is_input_correct:
# If not, does the input match a synonym?
for synonym in self.synonyms:
std_synonym = self.standardize_string(synonym)
if std_input == std_synonym:
self.mtstatistics.used_synonym = synonym
self.mtstatistics.is_input_correct = True
def grade_input(self):
'''Determine whether input is correct.'''
if self.settings.level == '1':
# For level 1, check to see if the right letter was entered.
# First, translate the letter to its corresponding choice.
self.user_input = self.mchoices[self.user_input]
self.mtstatistics.is_input_correct = self.response.lower() == self.user_input.lower()
else:
# For levels 2 or 3, make sure the right input was entered.
self.determine_equivalence()
def finalize(self):
'''Notify the user of correctness, update statistics, and print'''
if self.mtstatistics.is_input_correct:
self.mtstatistics.number_correct += 1
if self.mtstatistics.has_synonym_been_used():
self.remaining_synonyms = [i for i in self.synonyms if i != self.mtstatistics.used_synonym]
self.correctness_str = 'Correct. Default answer: ' + self.response
else:
self.correctness_str = 'Correct.'
self.other_answers_str = 'Other correct responses: ' + ', '.join(self.synonyms)
else:
self.mtstatistics.number_incorrect += 1
self.mtstatistics.incorrect_responses.append(self.response)
self.correctness_str = 'Incorrect. Answer: ' + self.response
self.other_answers_str = 'Other correct responses: ' + ', '.join(self.synonyms)
self.mtstatistics.response_number += 1
# Reset
self.mtstatistics.used_synonym = ''
| 2.46875 | 2 |
uvicore/http/routing/auto_api.py | coboyoshi/uvicore | 0 | 12789087 | <filename>uvicore/http/routing/auto_api.py
# Do not import future here or http/bootstrap.py get_type_hints fails
# See https://bugs.python.org/issue41249
# NO from __future__ import annotations
import uvicore
import json
from uvicore.http.request import Request
from uvicore.http.params import Query
from uvicore.typing import Optional, Union, List, Any, Tuple, Generic, TypeVar
from uvicore.orm.query import OrmQueryBuilder
from uvicore.contracts import AutoApi as AutoApiInterface
from uvicore.support.dumper import dump, dd
from uvicore.http.exceptions import PermissionDenied
from uvicore.contracts import UserInfo
E = TypeVar("E")
@uvicore.service()
class AutoApi(Generic[E], AutoApiInterface[E]):
def __init__(self,
Model: E,
scopes: List = [],
*,
request: Request,
include: Optional[List[str]] = None,
where: Optional[str] = None
):
self.Model = Model
self.scopes = scopes
self.request = request
self.user: UserInfo = request.user
self.includes = self._build_include(include)
self.wheres = self._build_where(where)
@classmethod
def listsig(
request: Request,
include: Optional[List[str]] = Query([]),
where: Optional[str] = '',
):
pass
@classmethod
def getsig(
request: Request,
id: Union[str, int],
include: Optional[List[str]] = Query([]),
):
pass
"""
A URL is a ONE-TO-ONE mapping to an ORM QUERY
---------------------------------------------
HTTP: DELETE /api/posts?where={"creator_id":"1"} // mabe 500 posts
ORM: Post.query().where('creator_id', 1).delete()
SQL: DELETE FROM posts WHERE creator_id=1;
HTTP: PATCH /api/posts?where={"creator_id":"1"}
BODY would be the columns to update
{
'creator_id': 12
'owner_id': 5
}
ORM: Post.query().where('creator_id', 1).update('creator_id', 12).update('owner_id', 5)
SQL: UPDATE posts SET creator_id=12, owner_id=4 WHERE creator_id=1;
"""
def orm_query(self) -> OrmQueryBuilder[OrmQueryBuilder, E]:
"""Start a new Uvicore ORM Model QueryBuilder Query"""
query = self.Model.query()
# Include
#if include: query.include(*include.split(','))
if self.includes: query.include(*self.includes)
# Where
if self.wheres: query.where(self.wheres)
#dump(query.query)
return query
def guard_relations(self):
# No includes, skip
if not self.includes: return self
# User is superadmin, allow
if self.user.superadmin: return self
# Loop each include and parts
for include in self.includes:
# Includes are split into dotnotation "parts". We have to walk down each parts relations
parts = [include]
if '.' in include: parts = include.split('.')
entity = self.Model
for part in parts:
# Get field for this "include part". Remember entity here is not only Model, but a walkdown
# based on each part (separated by dotnotation)
field = entity.modelfields.get(part) # Don't use modelfield() as it throws exception
# Field not found, include string was a typo
if not field: continue
# Field has a column entry, which means its NOT a relation
if field.column: continue
# Field is not an actual relation
if not field.relation: continue
# Get actual relation object
relation = field.relation.fill(field)
# Get model permission string for this relation
model_permissions = [relation.entity.tablename + '.read'] if self.scopes['overridden'] == False else self.scopes['read']
#dump('CHILD MODEL PERMISSIONS:', model_permissions)
# User must have ALL scopes defined on the route (an AND statement)
authorized = True
missing_scopes = []
for scope in model_permissions:
if scope not in self.user.permissions:
authorized = False
missing_scopes.append(scope)
# # Check if user has permissino to child relatoin (or superadmin)
# if model_permission not in user.permissions:
# # I convert model_permissins to a list for consistency with other permission denied errors
# # although there will always be just one model_permission in this function
# # raise HTTPException(
# # status_code=401,
# # detail="Permission denied to {}".format(str([model_permission]))
# # )
# print('be')
# raise PermissionDenied(model_permission)
if not authorized:
raise PermissionDenied(missing_scopes)
# Walk down each "include parts" entities
entity = relation.entity
# if not field: continue
# if not field.relation: continue
# relation: Relation = field.relation.fill(field)
# tablename = relation.entity.tablename
# permission = tablename + '.read'
# # Check if user has permissino to child relatoin (or superadmin)
# if user.superadmin == False and permission not in user.permissions:
# raise HTTPException(
# status_code=401,
# detail="Access denied to {}".format(tablename)
# )
# Chainable
return self
def _build_include(self, includes: List):
if not includes: return
results = []
for include in includes:
if ',' in include:
results.extend(include.split(','))
else:
results.append(include)
return results
def _build_where(self, where_str: str) -> List[Tuple]:
# If where_str is already a Dict (from a JSON blog) convert to str first
if type(where_str) == dict: where_str = json.dumps(where_str)
dump('where_str', where_str)
wheres = []
if not where_str: return wheres
try:
# Convert where string JSON to python object
where_json = json.loads(where_str)
# Where must be a dict
if not isinstance(where_json, dict): return
# WORKS - where={"id": [">", 5]}
# WORKS - where={"id": ["in", [1, 2, 3]]}
# WORKS - where={"title": ["like", "%black%"]}
# WORKS - where={"id": 65, "topic_id": ["in", [1, 2, 3, 4, 5]]}
# WORKS - ?include=topic.section.space&where={"topic.slug": "/tools", "topic.section.slug": "/apps", "topic.section.space.slug": "/dev"}
for (key, value) in where_json.items():
#dump("Key: " + str(key) + " - Value: " + str(value))
if isinstance(value, List):
if len(value) != 2: continue # Valid advanced value must be 2 item List
operator = value[0]
value = value[1]
#query.where(key, operator, value)
wheres.append((key, operator, value))
else:
#query.where(key, value)
wheres.append((key, '=', value))
return wheres
except Exception as e:
#self.log.error(e)
dump(e)
| 2.453125 | 2 |
vmssz.py | muralipi/vmssdashboard | 56 | 12789088 | '''vmssz.py - class of basic Azure VM scale set operations, without UDs, with zones'''
import json
import azurerm
class VMSSZ():
'''VMSSZ class - encapsulates the model and status of a zone redundant VM scale set'''
def __init__(self, vmssname, vmssmodel, subscription_id, access_token):
'''class initializtion routine - set basic VMSS properties'''
self.name = vmssname
vmssid = vmssmodel['id']
self.rgname = vmssid[vmssid.index('resourceGroups/') + 15:vmssid.index('/providers')]
self.sub_id = subscription_id
self.access_token = access_token
self.model = vmssmodel
self.adminuser = \
vmssmodel['properties']['virtualMachineProfile']['osProfile']['adminUsername']
self.capacity = vmssmodel['sku']['capacity']
self.location = vmssmodel['location']
self.nameprefix = \
vmssmodel['properties']['virtualMachineProfile']['osProfile']['computerNamePrefix']
self.overprovision = vmssmodel['properties']['overprovision']
self.vm_instance_view = None
self.vm_model_view = None
self.pg_list = []
self.zones = []
if 'zones' in vmssmodel:
self.zonal = True
else:
self.zonal = False
# see if it's a tenant spanning scale set
self.singlePlacementGroup = True
if 'singlePlacementGroup' in vmssmodel['properties']:
self.singlePlacementGroup = vmssmodel['properties']['singlePlacementGroup']
self.tier = vmssmodel['sku']['tier']
self.upgradepolicy = vmssmodel['properties']['upgradePolicy']['mode']
self.vmsize = vmssmodel['sku']['name']
# if it's a platform image, or managed disk based custom image, it has
# an imageReference
if 'imageReference' in vmssmodel['properties']['virtualMachineProfile']['storageProfile']:
# if it's a managed disk based custom image it has an id
if 'id' in vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']:
self.image_type = 'custom'
self.offer = 'custom'
self.sku = 'custom'
img_ref_id = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['id']
self.version = img_ref_id.split(".Compute/", 1)[1]
self.image_resource_id = img_ref_id.split(".Compute/", 1)[0]
else: # platform image
self.image_type = 'platform'
self.offer = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['offer']
self.sku = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['sku']
self.version = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['version']
# else it's an unmanaged disk custom image and has an image URI
else:
self.image_type = 'custom'
if 'osType' in vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']:
self.offer = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['osType']
else:
self.offer = 'custom'
self.sku = 'custom'
self.version = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri']
self.provisioningState = vmssmodel['properties']['provisioningState']
self.status = self.provisioningState
def refresh_model(self):
'''update the model, useful to see if provisioning is complete'''
vmssmodel = azurerm.get_vmss(self.access_token, self.sub_id, self.rgname, self.name)
self.model = vmssmodel
self.capacity = vmssmodel['sku']['capacity']
self.vmsize = vmssmodel['sku']['name']
if self.image_type == 'platform':
self.version = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['imageReference']['version']
else:
self.version = vmssmodel['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri']
self.provisioningState = vmssmodel['properties']['provisioningState']
self.status = self.provisioningState
self.init_vm_details()
def update_token(self, access_token):
'''update the token property'''
self.access_token = access_token
def update_model(self, newsku, newversion, newvmsize):
'''update the VMSS model with any updated properties'''
changes = 0
if self.sku != newsku:
if self.image_type == 'platform': # sku not relevant for custom image
changes += 1
self.model['properties']['virtualMachineProfile']['storageProfile']['imageReference']['sku'] = newsku
self.sku = newsku
else:
self.status = 'You cannot change sku setting for custom image'
if self.version != newversion:
changes += 1
self.version = newversion
if self.image_type == 'platform': # for platform image modify image reference
self.model['properties']['virtualMachineProfile']['storageProfile']['imageReference']['version'] = newversion
else:
# check for managed disk
if 'imageReference' in self.model['properties']['virtualMachineProfile']['storageProfile']:
self.model['properties']['virtualMachineProfile']['storageProfile'][
'imageReference']['id'] = self.image_resource_id + '.Compute/' + newversion
else:
# unmanaged custom image - has a URI which points directly
# to image blob
self.model['properties']['virtualMachineProfile']['storageProfile']['osDisk']['image']['uri'] = newversion
if self.vmsize != newvmsize:
changes += 1
# to do - add a check that the new vm size matches the tier
self.model['sku']['name'] = newvmsize
self.vmsize = newvmsize
if changes == 0:
self.status = 'VMSS model is unchanged, skipping update'
else:
# put the vmss model
updateresult = azurerm.update_vmss(self.access_token, self.sub_id, self.rgname,
self.name, json.dumps(self.model))
self.status = updateresult
def scale(self, capacity):
'''set the VMSS to a new capacity'''
self.model['sku']['capacity'] = capacity
scaleoutput = azurerm.scale_vmss(self.access_token, self.sub_id, self.rgname, self.name,
capacity)
self.status = scaleoutput
def poweron(self):
'''power on all the VMs in the scale set'''
result = azurerm.start_vmss(self.access_token, self.sub_id, self.rgname, self.name)
self.status = result
def restart(self):
'''restart all the VMs in the scale set'''
result = azurerm.restart_vmss(self.access_token, self.sub_id, self.rgname, self.name)
self.status = result
def poweroff(self):
'''power off all the VMs in the scale set'''
result = azurerm.poweroff_vmss(self.access_token, self.sub_id, self.rgname, self.name)
self.status = result
def dealloc(self):
'''stop deallocate all the VMs in the scale set'''
result = azurerm.stopdealloc_vmss(
self.access_token, self.sub_id, self.rgname, self.name)
self.status = result
def init_vm_instance_view(self):
'''get the VMSS instance view and set the class property'''
# get an instance view list in order to build FD heatmap
self.vm_instance_view = \
azurerm.list_vmss_vm_instance_view(self.access_token, self.sub_id, self.rgname,
self.name)
def init_vm_model_view(self):
'''get the VMSS instance view and set the class property'''
# get a model view list in order to build a zones heatmap
self.vm_model_view = \
azurerm.list_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name)
def reimagevm(self, vmstring):
'''reaimge individual VMs or groups of VMs in a scale set'''
result = azurerm.reimage_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def upgradevm(self, vmstring):
'''upgrade individual VMs or groups of VMs in a scale set'''
result = azurerm.upgrade_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def deletevm(self, vmstring):
'''delete individual VMs or groups of VMs in a scale set'''
result = azurerm.delete_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def startvm(self, vmstring):
'''start individual VMs or groups of VMs in a scale set'''
result = azurerm.start_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def restartvm(self, vmstring):
'''restart individual VMs or groups of VMs in a scale set'''
result = azurerm.restart_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def deallocvm(self, vmstring):
'''dealloc individual VMs or groups of VMs in a scale set'''
result = azurerm.stopdealloc_vmss_vms(self.access_token, self.sub_id, self.rgname,
self.name, vmstring)
self.status = result
def poweroffvm(self, vmstring):
'''power off individual VMs or groups of VMs in a scale set'''
result = azurerm.poweroff_vmss_vms(self.access_token, self.sub_id, self.rgname, self.name,
vmstring)
self.status = result
def get_power_state(self, statuses):
'''get power state from a list of VM isntance statuses'''
for status in statuses:
if status['code'].startswith('Power'):
return status['code'][11:]
def init_zones(self):
'''create a structure to represent VMs by zone and FD
- ignore placement groups for now.
'''
self.zones = []
for zone_id in range(1, 4):
zone = {'zone': zone_id}
fds = []
for fd_num in range(5):
fault_domain = {'fd': fd_num, 'vms': []}
fds.append(fault_domain)
zone['fds'] = fds
self.zones.append(zone)
def init_vm_details(self):
'''Populate the self.zones structure
- with a physically ordered representation of the VMs in a scale set.
'''
self.init_zones()
# get the model view
self.vm_model_view = azurerm.list_vmss_vms(self.access_token, self.sub_id, self.rgname,
self.name)
# get the instance view
self.vm_instance_view = azurerm.list_vmss_vm_instance_view(self.access_token, self.sub_id,
self.rgname, self.name)
# do a loop through the number of VMs and populate VMs properties in the zones structure
# make an assumption that len(vm_model_view) == len(vm_instance_view)
# - true if not actively scaling
for idx in range(len(self.vm_model_view['value'])):
vm_id = self.vm_model_view['value'][idx]['instanceId']
zone_num = self.vm_model_view['value'][idx]['zones'][0]
power_state = self.get_power_state(
self.vm_instance_view['value'][idx]['properties']['instanceView']['statuses'])
fault_domain = self.vm_instance_view['value'][idx]['properties']['instanceView']['platformFaultDomain']
vm_data = {'vmid': vm_id, 'power_state': power_state}
self.zones[int(zone_num)-1]['fds'][fault_domain]['vms'].append(vm_data)
#print(json.dumps(self.zones))
| 2.46875 | 2 |
nyc/core/CoreUtils.py | imohitawasthi/nyc | 0 | 12789089 | from nltk.corpus import stopwords
from nltk.stem.lancaster import LancasterStemmer
from utils import Constants
from utils import Utils
####################################################################################
####################################################################################
####################################################################################
# NAIVE BAYES SPECIFIC FUNCTIONS
####################################################################################
####################################################################################
####################################################################################
def create_classification_corpus(data_metric, regex):
"""
:param data_metric:
:param regex: regular expression for text extraction
:param df: path to the data set
:return: tools require for classification engines
df_class,df_words,df_class_count --- refer to ClassificationEngine.py for more details
business logic behind init tools is written here, it basically loops around the
entire data set and separate them into three different components
"""
speech_count, classes, words, word_set, class_words = {}, {}, {}, {}, {}
stem = LancasterStemmer()
stop = set(stopwords.words('english'))
for c in list(set(data[-1] for data in data_metric)):
classes[c] = []
for data in data_metric:
in_class = data[Constants.LABEL_INDEX]
in_speech = data[Constants.CLASSIFICATION_SPEECH_INDEX]
words.update(word_set)
if in_class not in class_words:
class_words[in_class] = [[]]
else:
class_words[in_class].append([])
# word_set = {}
for w in Utils.custom_tokenizer(regex, in_speech.lower()):
# word_set_temp = {}
if w not in stop:
stem_word = stem.stem(w)
word_set[stem_word] = 1 if stem_word not in word_set else word_set[stem_word] + 1
# word_set_temp[stem_word] = word_set[stem_word]
class_words[in_class][len(class_words[in_class])-1].append(stem_word)
# if word_set_temp[stem_word] < 2:
# classes[in_class].extend(word_set_temp)
classes[in_class].append(stem_word)
speech_count[in_class] = 1 if in_class not in speech_count else speech_count[in_class] + 1
return classes, words, speech_count, class_words
def get_word_probabilities(df_class, df_word, df_class_count, frame_class_words):
"""
:param frame_class_words:
:param df_class: all Classes and words (grouped by classes) - dict
:param df_word: all words and word frequencies - dict
:param df_class_count: all classes and class frequencies - dict
:param initial_probabilities: initial probability of any class present in the df_class
:return: every word with probability of classes(word can be in that particular class) - dict
"""
probabilities = {}
for w in df_word:
for c in df_class:
if w not in probabilities:
probabilities[w] = {}
#probability = ((df_class[c].count(w) / len(df_class[c])) * (
# df_class_count[c] / sum(df_class_count.values()))) / (df_word[w] / sum(df_word.values()))
#probability = ((df_class[c].count(w) / len(df_class[c])) * (
# df_word[w] / sum(df_word.values()))) / (df_class_count[c] / sum(df_class_count.values()))
probability_class_words = len([x for x in frame_class_words[c] if w in x]) / len(frame_class_words[c])
probability_words = (df_class[c].count(w) / len(df_class[c]))
#probability_words = (df_word[w] / sum(df_word.values()))
probability_class = (df_class_count[c] / sum(df_class_count.values()))
probability = 0 if probability_words == 0 else (probability_class_words*probability_class)/probability_words
probabilities[w].update({c: probability})
return probabilities
def get_other_class_probabilities(class_probabilities):
"""
:param class_probabilities: probabilities of each class calculated using user input and Naive Bayes word_probabilities
:return: sum of all probabilities
"""
return sum(class_probabilities[c] for c in class_probabilities)
def classify_naive_bayes(speech, probabilities, frame_class, regex):
tokens = Utils.prepare_tokens(speech, regex, set(stopwords.words('english')), LancasterStemmer())
class_probabilities = {}
classify = {}
for c in frame_class:
class_probabilities[c] = 0
for meta in probabilities:
if meta in tokens:
for c in frame_class:
class_probabilities[c] += probabilities[meta][c]
#else:
# for c in frame_class:
# class_probabilities[c] += 1.0 - probabilities[meta][c]
#for c in frame_class:
# class_probabilities[c] = class_probabilities[c]
for c in frame_class:
classify[c] = class_probabilities[c] #/ get_other_class_probabilities(class_probabilities)
return classify
####################################################################################
####################################################################################
####################################################################################
# DECISION TREE SPECIFIC FUNCTIONS
####################################################################################
####################################################################################
####################################################################################
def is_numeric(value): return isinstance(value, int) or isinstance(value, float)
def build_decision_tree(data_metric):
gain, question = find_best_split(data_metric)
if gain == 0:
return Leaf(data_metric)
true_rows, false_rows = partition(data_metric, question)
true_branch = build_decision_tree(true_rows)
false_branch = build_decision_tree(false_rows)
return DecisionNode(question, true_branch, false_branch)
def find_best_split(data_metric):
best_gain = 0
best_question = None
current_uncertainty = gini(data_metric)
n_features = len(data_metric[0]) - 1
for col in range(n_features):
values = set([row[col] for row in data_metric])
for val in values:
question = Question(col, val)
true_rows, false_rows = partition(data_metric, question)
if len(true_rows) == 0 or len(false_rows) == 0:
continue
gain = info_gain(true_rows, false_rows, current_uncertainty)
if gain >= best_gain:
best_gain, best_question = gain, question
return best_gain, best_question
def gini(data_metric):
counts = class_count(data_metric)
impurity = 1
for l in counts:
prob_of_l = counts[l] / float(len(data_metric))
impurity -= prob_of_l ** 2
return impurity
def class_count(data_metric):
counts = {}
for row in data_metric:
label = row[Constants.LABEL_INDEX]
counts[label] = 1 if label not in counts else counts[label] + 1
return counts
def info_gain(left, right, current_uncertainty):
p = float(len(left)) / (len(left) + len(right))
return current_uncertainty - p * gini(left) - (1 - p) * gini(right)
def partition(data_metric, question):
true_rows, false_rows = [], []
for row in data_metric:
true_rows.append(row) if question.match(row) else false_rows.append(row)
return true_rows, false_rows
class Leaf:
def __init__(self, data_metric):
self.predictions = class_count(data_metric)
class DecisionNode:
def __init__(self, question, true_branch, false_branch):
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
class Question:
def __init__(self, col, val):
self.col = col
self.val = val
def match(self, example):
val = example[self.col]
try:
return val >= self.val if is_numeric(val) else val == self.val
except TypeError:
return False
def classify_decision_tree(row, tree):
if isinstance(tree, Leaf):
return tree.predictions
return \
classify_decision_tree(row,
tree.true_branch) \
if tree.question.match(row) \
else classify_decision_tree(row,
tree.false_branch)
| 1.9375 | 2 |
script/stereograph_pointcloud.py | 7675t/theta_camera | 0 | 12789090 | <reponame>7675t/theta_camera
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
# THEATAの画像を取得,球体のPointCloudとして出力する
import sys
import rospy
import cv2
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from sensor_msgs.msg import PointCloud2, PointField
import sensor_msgs.point_cloud2 as pcl2
from std_msgs.msg import Header
def create_cloud_xyz32rgb(header, points):
"""
Create a L{sensor_msgs.msg.PointCloud2} message with 3 float32 fields (x, y, z).
@param header: The point cloud header.
@type header: L{std_msgs.msg.Header}
@param points: The point cloud points.
@type points: iterable
@return: The point cloud.
@rtype: L{sensor_msgs.msg.PointCloud2}
"""
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('rgb', 12, PointField.UINT32, 1)]
return pcl2.create_cloud(header, fields, points)
class point_cloud_converter:
def __init__(self):
self.bridge = CvBridge()
self.center_x = rospy.get_param("center_x", 310)
self.center_y = rospy.get_param("center_y", 320)
self.radius = rospy.get_param("radius", 285)
self.sphere_radius = 1.0
self.step = 2
self.face_sub = rospy.Subscriber("face/image_raw", Image, self.callback_face)
self.rear_sub = rospy.Subscriber("rear/image_raw", Image, self.callback_rear)
self.image_pub = rospy.Publisher("image_out", Image, queue_size=1)
self.face_point_pub = rospy.Publisher("face_points", PointCloud2, queue_size=1)
self.rear_point_pub = rospy.Publisher("rear_points", PointCloud2, queue_size=1)
def callback_face(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(rows,cols,channels) = cv_image.shape
# cv2.circle(cv_image, (self.center_x, self.center_y), self.radius, (0, 255, 0))
# 単位球面上での等分の離散化(theta, phi)
sphere_image = np.ndarray([int(90 / self.step)+1, int(360 / self.step)+1, channels], dtype=np.uint8)
# 点群もついでに作成
points = []
for theta_i in range(int(90 / self.step) + 1):
for phi_i in range(int(360 / self.step) + 1):
theta = self.step * theta_i * np.pi / 180
phi = self.step * phi_i * np.pi / 180
# 単位球面上の3次元座標(xs, ys, zs)
xs = np.sin(theta)*np.cos(phi)
ys = np.sin(theta)*np.sin(phi)
zs = np.cos(theta)
# 立体射影(THETA)上の座標(xt, yt)
xt = self.center_x + self.radius * xs / (1 + zs)
yt = self.center_y + self.radius * ys / (1 + zs)
try:
color = cv_image[int(xt), int(yt)]
except:
print(xs, ys, zs)
print(int(xt), int(yt))
# 球面に写しとる
sphere_image[theta_i, phi_i] = color
icolor = int((color[2] << 16) | (color[1] << 8) | color[0])
points.append([self.sphere_radius * xs, self.sphere_radius * ys, self.sphere_radius * zs, int(icolor)])
#create pcl from points
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = 'face_frame'
cloud = create_cloud_xyz32rgb(header, points)
try:
self.image_pub.publish(self.bridge.cv2_to_imgmsg(sphere_image, "bgr8"))
except CvBridgeError as e:
print(e)
self.face_point_pub.publish(cloud)
def callback_rear(self,data):
try:
cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
(rows,cols,channels) = cv_image.shape
# cv2.circle(cv_image, (self.center_x, self.center_y), self.radius, (0, 255, 0))
# 単位球面上での等分の離散化(theta, phi)
sphere_image = np.ndarray([int(90 / self.step)+1, int(360 / self.step)+1, channels], dtype=np.uint8)
# 点群もついでに作成
points = []
for theta_i in range(int(90 / self.step) + 1):
for phi_i in range(int(360 / self.step) + 1):
theta = self.step * theta_i * np.pi / 180
phi = self.step * phi_i * np.pi / 180
# 単位球面上の3次元座標(xs, ys, zs)
xs = np.sin(theta)*np.cos(phi)
ys = np.sin(theta)*np.sin(phi)
zs = np.cos(theta)
# 立体射影(THETA)上の座標(xt, yt)
xt = self.center_x + self.radius * xs / (1 + zs)
yt = self.center_y + self.radius * ys / (1 + zs)
try:
color = cv_image[int(xt), int(yt)]
except:
print(xs, ys, zs)
print(int(xt), int(yt))
# 球面に写しとる
sphere_image[theta_i, phi_i] = color
icolor = int((color[2] << 16) | (color[1] << 8) | color[0])
points.append([self.sphere_radius * xs, self.sphere_radius * ys, self.sphere_radius * zs, int(icolor)])
#create pcl from points
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = 'rear_frame'
cloud = create_cloud_xyz32rgb(header, points)
self.rear_point_pub.publish(cloud)
def main(args):
pc = point_cloud_converter()
rospy.init_node('point_cloud_converter', anonymous=True)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
cv2.destroyAllWindows()
if __name__ == '__main__':
main(sys.argv)
| 2.484375 | 2 |
senko.py | mtsev/senko | 6 | 12789091 | #!/usr/bin/env python3
import os
import logging
import yaml
from discord.ext.commands import Bot, Context, CommandError, CommandOnCooldown
# create logger
log = logging.getLogger(__package__)
log.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('../senko.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.WARNING)
# create formatter and add it to the handlers
formatter = logging.Formatter('[%(asctime)s] %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
log.addHandler(fh)
log.addHandler(ch)
# Import config file
with open('config.yaml') as stream:
config = yaml.safe_load(stream)
# Initialise bot
bot = Bot(command_prefix=config['prefix'])
bot.remove_command('help')
bot.owner = config['owner']
bot.keys = config['keys']
bot.db = config['database']
bot.quiet = config['quiet']
bot.dt = config['dt_channels']
# Load cogs
for file in filter(lambda file: file.endswith('.py'), os.listdir('./cogs')):
bot.load_extension(f'cogs.{file[:-3]}')
# Log bot startup
@bot.event
async def on_ready() -> None:
log.warning(f'We have logged in as {bot.user} in these servers:')
for guild in bot.guilds:
log.warning(f'{guild.name} ({guild.id})')
log.warning(f'({len(bot.guilds)} servers)')
log.warning('************************')
# Handle command cooldown
@bot.event
async def on_command_error(ctx: Context, error: CommandError) -> None:
if isinstance(error, CommandOnCooldown):
await ctx.send(error)
# Start bot
bot.run(config['token']) | 2.265625 | 2 |
instagramNotifier.py | akaeme/InstagramNotifier | 0 | 12789092 | import argparse
# from fbchat.models import *
import logging
import os
import sys
import urllib.request
from getpass import getpass
from time import sleep
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from fbchat import Client
from API.InstagramAPI import InstagramAPI
from databaseUtils import Database
logger = logging.getLogger('instagramNotifier')
hdlr = logging.FileHandler('instagramNotifier.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
database = Database('InstagramNotifier.db')
MESSAGE_HEADER = 'Check out the new %s\'s photo on instagram! \n'
MESSAGE_BODY = 'The photo was taken in %s, it has size %sx%s and as we can see it is of great quality :) \n'
MESSAGE_FOOTER = 'This was generated by a bot to help you by notifying you of everything that is important. ' \
'Do not consider it as spam.\n'
def login(user_, passw):
"""Login on Instagram"""
instagram_api = InstagramAPI(username=user_, password=<PASSWORD>)
if instagram_api.login(): # login
return True, instagram_api
else:
return False, ''
def follow_closely(api_, follow_username):
"""Search for the user"""
big_list = True
max_id = ''
following = []
while big_list:
api_.getSelfUsersFollowing(maxid=max_id)
followers_ = api_.LastJson
for f in followers_['users']:
following.append(f)
big_list = followers_['big_list']
if not big_list:
break
# this key only exists if there is more pages
max_id = followers_['next_max_id']
for f in following:
if f['username'] == follow_username:
return True, f
def get_message(message_type, source, data=None):
"""Get the notify message"""
if message_type == 'image':
header = (MESSAGE_HEADER % source)
body = (MESSAGE_BODY % (data['location'], data['width'], data['height']))
urllib.request.urlretrieve(data['last_media'], 'tmp.jpg')
return header + body
else:
header = (MESSAGE_HEADER % source)
return header + MESSAGE_FOOTER
def alert(user, follow, data, client_fb):
"""Get the notify message"""
users_notify = database.get_from_notify(username=user, username_follow=follow)
for user in users_notify:
if user['thread_type'] == '0':
if user['image_flag']:
message = get_message(message_type='image', source=follow, data=data)
client_fb.sendLocalImage(image_path='tmp.jpg', message=message, thread_id=str(user['thread_id']))
client_fb.sendMessage(message=MESSAGE_FOOTER, thread_id=str(user['thread_id']))
logger.info('User %s notified %s on facebook.', user, str(user['thread_id']))
# clean image created
os.remove('tmp.jpg')
else:
message = get_message(message_type='no_image', source=follow)
client_fb.sendMessage(message=message, thread_id=str(user['thread_id']))
logger.info('%s got notified on facebook.', str(user['thread_id']))
def run(api_, user_):
"""Run bot"""
email_fb = input('Facebook email: ')
pass_fb = getpass(prompt='Facebook password: ')
client_fb = Client(email=email_fb, password=pass_fb, logging_level=logging.CRITICAL)
try:
print('Running..')
while True:
follows = database.get_from_follows(username=user_)
medias = database.get_from_media(username=user_)
for f_closely, username_follow, id_ in follows:
data = dict(last_media_id=0, media_count=0, user_id=0, last_media='', width=0, height=0, location='')
data['user_id'] = f_closely
api_.getUsernameInfo(str(f_closely))
media_results = api_.LastJson
data['media_count'] = media_results['user']['media_count']
api_.getUserFeed(str(f_closely))
media_results = api_.LastJson
last_media = media_results['items'][0]
try:
data['last_media_id'] = int(last_media['pk'])
data['last_media'] = last_media['image_versions2']['candidates'][0]['url']
data['width'] = last_media['image_versions2']['candidates'][0]['width']
data['height'] = last_media['image_versions2']['candidates'][0]['height']
data['location'] = last_media['location']['name']
except KeyError:
# for debugging
print('KeyError')
data_ = [media for media in medias if media['user_id'] == data['user_id']][0]
if data['last_media_id'] != data_['last_media_id']:
alert(user=user_, follow=username_follow, data=data, client_fb=client_fb)
# Update info on database
database.update_media(last_media_id=data['last_media_id'], media_count=data['media_count'],
foreign_id=id_, last_media=data['last_media'], width=data['width'],
height=data['height'],
location=data['location'], last_media_id_=data_['last_media_id'])
logger.info('Update media for user %s.', data['user_id'])
print('Sleeping')
sleep(120)
except KeyboardInterrupt:
print('Interrupted!')
def get_info(api_, user_id):
"""Save info of the follower"""
data = dict(last_media_id=0, media_count=0, user_id=0, last_media='', width=0, height=0, location='')
data['user_id'] = user_id
api_.getUsernameInfo(user_id)
media_results = api_.LastJson
data['media_count'] = media_results['user']['media_count']
api_.getUserFeed(user_id)
media_results = api_.LastJson
last_media = media_results['items'][0]
try:
data['last_media_id'] = int(last_media['pk'])
data['last_media'] = last_media['image_versions2']['candidates'][0]['url']
data['width'] = last_media['image_versions2']['candidates'][0]['width']
data['height'] = last_media['image_versions2']['candidates'][0]['height']
data['location'] = last_media['location']['name']
except KeyError:
# for debugging
print('KeyError')
exit()
return data
def validate_user(user_, passw, service):
"""Validate a user according to the service"""
if service == 'instagram':
results = database.get_from_users(username=user_, service=service)
elif service == 'facebook':
results = database.get_from_users(username=user_, service=service)
else:
print('Unknown service')
return False
if len(results) == 0:
print('User not registered.')
return False
# it return a list of tuples, dunno why
password_hash = str(results[0][0])
digest_ = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest_.update(bytes(passw, 'utf8'))
if password_hash == str(digest_.finalize().hex()):
logger.info('User %s validated on %s', user_, service)
return True
logger.warning('User %s not validated on %s. Hash do not match.', user_, service)
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Instagram Notifier. It get new posts from user that we want to follow closely and notify other '
'users on facebook messenger')
parser.add_argument('-u', action="store_true", dest='user', help='Add a valid user to the database.')
parser.add_argument('-f', action="store_true", dest='follow', help='Add someone to follow closely.')
parser.add_argument('-n', action="store_true", dest='notify', help='Add someone to get notified on facebook '
'messenger.')
parser.add_argument('-r', action="store_true", dest='run', help='Run Instagram Notifier.')
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = vars(args)
username = input('Instagram username: ')
password = getpass(prompt='Instagram password: ')
if args['user']:
flag, api = login(user_=username, passw=password)
if flag:
print('Login success!')
email = input('Facebook email: ')
password_fb = getpass(prompt='Facebook password: ')
try:
client = Client(email, password_fb, logging_level=logging.CRITICAL)
except Exception:
print('Facebook - invalid username or password. Try again!')
else:
print('Login success!')
# Add confirmed user to database
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(bytes(password, 'utf8'))
insta_hash = digest.finalize().hex()
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(bytes(password_fb, 'utf8'))
fb_hash = digest.finalize().hex()
database.insert_user(username=username, password=<PASSWORD>, email=email, password_fb=fb_hash)
logger.info('User %s inserted on database.', username)
client.logout()
else:
print('Invalid username or password. Try again!')
exit()
if args['follow']:
if validate_user(user_=username, passw=password, service='instagram'):
# flag will be always True
flag, api = login(user_=username, passw=password)
follow_user = input('Let\'s follow (you must know the instagram username of the person ): ')
flag, info = follow_closely(api_=api, follow_username=follow_user)
if flag:
# add to the database
id_row = database.insert_follow(user_id=info['pk'], username=info['username'], user=username,
full_name=info['full_name'], is_private=info['is_private'],
profile_pic=info['profile_pic_url'])
# get actual info about feed and save it
data = get_info(api_=api, user_id=info['pk'])
database.insert_media(last_media_id=data['last_media_id'], media_count=data['media_count'],
foreign_id=id_row, last_media=data['last_media'], width=data['width'],
height=data['height'], location=data['location'])
logger.info('User %s is now following closely %s.', username, follow_user)
else:
print('You are not following the user with the instagram username: ' + follow_user)
else:
print('Invalid username or password. Try again!')
exit()
if args['notify']:
email = input('Facebook email: ')
password_fb = getpass(prompt='Facebook password: ')
if validate_user(user_=email, passw=<PASSWORD>, service='facebook'):
client = Client(email=email, password=<PASSWORD>, logging_level=logging.CRITICAL)
notify = input('Let\'s notify (you must know the facebook name of the person ): ')
# It take in consideration only the first 10 friends, exclude non friends
notify = client.searchForUsers(name=notify, limit=10)
friends = sorted([x for x in notify if x.is_friend], key=lambda x: x.uid)
print('There are many friends with this name: ')
print('\n'.join('{} -> name: {}, photo: {}'.format(i, k.name, k.photo) for i, k in
enumerate(friends)))
io = input('Choose one of them: ')
notify = friends[int(io)]
print('This person should receive notifications about whom?')
follow_ = sorted([f[1] for f in database.get_from_follows(username=username)])
print('\n'.join('{}: {}'.format(*k) for k in enumerate(follow_)))
to_notify = input('Choose the people you want to notify(ie: 1,2): ')
to_notify = [int(s.replace(' ', '')) for s in to_notify.split(',')]
to_notify = [follow_[x] for x in to_notify]
for person in to_notify:
id_follow = database.get_from_follows_find(username=username, username_follow=person)[0]
database.insert_notify(foreign_id=id_follow, thread_id=notify.uid, thread_type=0, image_flag=1)
logger.info('User %s will notify %s about something.', username, notify)
if args['run']:
if validate_user(user_=username, passw=password, service='instagram'):
# flag will be always True
flag, api = login(user_=username, passw=password)
run(api_=api, user_=username)
| 2.484375 | 2 |
ros/src/control/servers/heave.py | srmauvsoftware/URSim | 30 | 12789093 | #!/usr/bin/env python
from control.msg import heaveFeedback, heaveAction, heaveResult
import rospy
import time
import actionlib
class Heave(object):
feedback = heaveFeedback()
result = heaveResult()
def __init__(self, name):
self.heavePub = rospy.Publisher('/heave_setpoint', Float64, queue_size=1)
rospy.Subscriber("/heave", Float64, self.heaveCallback)
self.serverName = name
self.heaveServer = actionlib.SimpleActionServer(
self.serverName,
heaveAction,
execute_cb=self.heaveActionCallback,
auto_start=False)
self.heaveServer.start()
def heaveCallback(self, data):
self.heave = data.data
def heaveActionCallback(self, goal):
success = False
while(goal.heave_setpoint != self.heave):
start = int(time.time())
while(abs(goal.heave_setpoint - self.heave) < 3):
if(int(time.time()) == start + 10):
success = True
break
if(successt):
break
self.heavePub.publish(goal.heave_setpoint)
rospy.loginfo('heave: %f, heave Setpoint: %f, Error: %f', \
self._heave, req.heave_setpoint, \
req.heave_setpoint-self.heave)
if success:
self.result.heave_final = self.heave
rospy.loginfo('%s : Success' % self.serverName)
self.heaveServer.set_succeeded(self.result)
if __name__ == '__main__':
rospy.init_node('heaveServer')
server = Heave(rospy.get_name())
rospy.spin()
| 2.453125 | 2 |
denma_contact_form/migrations/0004_remove_contactform_subscribed_user.py | denma-group/website-backend-django | 0 | 12789094 | <filename>denma_contact_form/migrations/0004_remove_contactform_subscribed_user.py
# Generated by Django 2.2 on 2019-09-05 23:51
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('denma_contact_form', '0003_auto_20190905_1300'),
]
operations = [
migrations.RemoveField(
model_name='contactform',
name='subscribed_user',
),
]
| 1.335938 | 1 |
Pyton_Exp/__temp_migrations/vote_s/0003_group_tipo_votante.py | hbahamonde/Vote_Buying_Inequality | 0 | 12789095 | <gh_stars>0
# Generated by Django 2.2.12 on 2021-03-26 13:39
from django.db import migrations
import otree.db.models
class Migration(migrations.Migration):
dependencies = [
('vote_s', '0002_auto_20210326_1039'),
]
operations = [
migrations.AddField(
model_name='group',
name='tipo_votante',
field=otree.db.models.IntegerField(default=0, null=True),
),
]
| 1.453125 | 1 |
src/main/resources/archetype-resources/src/test/resources/robotframework/quickstart/testlibs/LoginLibrary.py | MarkusBernhardt/robotframework-archetype-quickstart | 2 | 12789096 | import os
import sys
class LoginLibrary:
def __init__(self):
self._sut_path = os.path.join(os.path.dirname(__file__),
'..', 'sut', 'login.py')
self._status = ''
def create_user(self, username, password):
self._run_command('create', username, password)
def change_password(self, username, old_pwd, new_pwd):
self._run_command('change-password', username, old_pwd, new_pwd)
def attempt_to_login_with_credentials(self, username, password):
self._run_command('login', username, password)
def status_should_be(self, expected_status):
if expected_status != self._status:
raise AssertionError("Expected status to be '%s' but was '%s'"
% (expected_status, self._status))
def _run_command(self, command, *args):
command = '"%s" %s %s' % (self._sut_path, command, ' '.join(args))
process = os.popen(command)
self._status = process.read().strip()
process.close()
| 2.875 | 3 |
development/workflow/raw_container.py | pingsutw/flyte-app | 7 | 12789097 | import logging
from flytekit import ContainerTask, kwtypes, task, workflow
logger = logging.getLogger(__file__)
calculate_ellipse_area_shell = ContainerTask(
name="ellipse-area-metadata-shell",
input_data_dir="/var/inputs",
output_data_dir="/var/outputs",
inputs=kwtypes(a=float, b=float),
outputs=kwtypes(area=float, metadata=str),
image="pingsutw/raw:v2",
command=[
"sh",
"-c",
"./calculate-ellipse-area.sh /var/inputs /var/outputs;",
],
)
@task
def report_all_calculated_areas(
area_shell: float,
metadata_shell: str,
):
logger.info(f"shell: area={area_shell}, metadata={metadata_shell}")
@workflow
def wf(a: float = 2.0, b: float = 3.0):
area_shell, metadata_shell = calculate_ellipse_area_shell(a=a, b=b)
report_all_calculated_areas(
area_shell=area_shell,
metadata_shell=metadata_shell,
)
if __name__ == "__main__":
print(f"Running wf() {wf(a=2.0, b=3.0)}")
| 2.15625 | 2 |
core_pipeline.py | Kelsiii/Gaze | 0 | 12789098 | <gh_stars>0
from gaze.pipes import Graph
from gaze.nodes import NetworkSource
from gaze.nodes import NetworkSink
import os
IN_IP="0.0.0.0"
IN_PORT=5001
OUT_IP = os.getenv("OUT_IP") if os.getenv("OUT_IP") else "127.0.0.1"
OUT_PORT = int(os.getenv("OUT_PORT")) if os.getenv("OUT_PORT") else 5001
print(IN_IP,IN_PORT,OUT_IP,OUT_PORT)
x = NetworkSource(ip=IN_IP,port=IN_PORT)
x = NetworkSink(ip=OUT_IP,port=OUT_PORT)(x)
graph = Graph(x)
graph.run()
| 2.453125 | 2 |
research-analytics/dashboard.py | jhupiterz/research-analytics | 0 | 12789099 | #--------------------------------------------------------------------------#
# This code makes use of all other functions of #
# the package to build a Dash Web App #
#--------------------------------------------------------------------------#
# imports ------------------------------------------------------------------
import plots
import utils
from data_collection import semantic_api
from data_preprocessing import data_preprocess
import requests
import pandas as pd
import dash
import dash_cytoscape as cyto
from dash import dcc
from dash import html
from dash.dependencies import Input, Output
# Data loading and cleaning with Semantic Scholar API -----------------------
#df, all_references_df, total_results, query = semantic_api.get_all_results_from_semantic_scholar()
# Instantiate Dash App ------------------------------------------------------
app = dash.Dash(
__name__, suppress_callback_exceptions = True,
meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1", 'charSet':'“UTF-8”'}])
server = app.server
app.title = "Research Analytics"
# Layout --------------------------------------------------------------------
app.layout = html.Div(
[
# Banner ------------------------------------------------------------
html.Div(
[
html.A(
[
html.Img(
src="/assets/web.png",
alt="research intelligence"
),
html.H3("research analytics")
],
href="https://jhupiterz.notion.site/Welcome-to-research-intelligence-\
a36796f418b040f6ade944f9c54e87cb",
target='_blank',
className="logo-banner",
),
html.Div(
[
html.A(
"Contribute",
href="https://github.com/jhupiterz/research-analytics",
target='_blank',
className="doc-link"
),
html.A(
"Documentation",
href="https://github.com/jhupiterz/research-analytics/blob/main/README.md",
target='_blank',
className="doc-link"
),
],
className="navbar"
),
],
className="banner",
),
# Search bar ------------------------------------------------------------
html.Div(
[
html.H1(id='topic', children=[]),
html.Div(
[
html.Img(
src='/assets/loupe.png',
className="loupe-img",
),
dcc.Input(
id='search-query',
type = 'text',
placeholder = "Search for keywords (e.g. \"carbon nanotubes\")",
debounce = True,
spellCheck = True,
inputMode = 'latin',
name = 'text',
autoFocus = False,
minLength = 1, maxLength = 60,
autoComplete='off',
disabled = False,
readOnly = False,
size = '60',
n_submit = 0,
),
],
className="search-bar",
),
],
className="search-wrapper"
),
dcc.Store(id='store-initial-query-response', storage_type='memory'),
dcc.Store(id='store-references-query-response', storage_type='memory'),
# Main content ----------------------------------------------------------
html.Div(id='start-page', children=[], className = 'main-body'),
# Footer ----------------------------------------------------------------
html.Footer(
[
html.P(
[
"Built with ",
html.A("Plotly Dash", href="https://plotly.com/dash/", target="_blank")
],
),
html.P(
[
"Powered by ",
html.A("Semantic Scholar", href="https://www.semanticscholar.org/", target="_blank")
],
),
]
),
],
className="app-layout",
)
# Callbacks --------------------------------------------------------------------
# Store response of initial API query
@app.callback(
Output('store-initial-query-response', 'data'),
Input('search-query', 'n_submit'),
Input('search-query', 'value'))
def store_primary_data(n_submit, value):
if n_submit > 0:
url = f"https://api.semanticscholar.org/graph/v1/paper/search?query={value}&limit=30&fields=url,title,abstract,authors,venue,year,referenceCount,citationCount,influentialCitationCount,isOpenAccess,fieldsOfStudy"
response = requests.get(url).json()
df = pd.DataFrame(response['data'])
df = data_preprocess.extract_key_words(df)
return {
'data': df.to_dict("records")
}
# Store dictionary of references of all initial papers
@app.callback(
Output('store-references-query-response', 'data'),
Input('store-initial-query-response', 'data'))
def store_references_data(data):
if data != None:
ref_dict = []
for paper in data['data']:
paper_id = paper['paperId']
url = f"https://api.semanticscholar.org/graph/v1/paper/{paper_id}/references?limit=50&fields=intents,isInfluential,paperId,url,title,abstract,venue,year,referenceCount,citationCount,influentialCitationCount,isOpenAccess,fieldsOfStudy,authors"
response = requests.get(url).json()
ref_data = response['data']
for cited_paper in ref_data:
cited_paper['citedPaper']['citedBy'] = paper_id
ref_dict.append(cited_paper['citedPaper'])
return ref_dict
# Displays start page
@app.callback(
Output('start-page', 'children'),
Input('search-query', 'n_submit'))
def render_content(n_submit):
""" Returns the content of start page.
If there is data then returns tabs
Else, returns default content of start page (blog posts)"""
if n_submit > 0:
return (
html.Div([
dcc.Tabs(id="tabs-example-graph", value = 'tab-1-example-graph', className= "tabs",
children=[
dcc.Tab(label='📊 Search results 📊', value='tab-1-example-graph',
className= "single-tab", selected_className= "single-tab-selected"),
dcc.Tab(label='🤝 Author network 🤝', value='tab-2-example-graph',
className= "single-tab", selected_className= "single-tab-selected"),
dcc.Tab(label='🌐 Paper network 🌐', value='tab-3-example-graph',
className= "single-tab", selected_className= "single-tab-selected")
])
], className= "tabs-container"),
html.Br(),
html.Div(id='tabs-content-example-graph'))
else:
return html.Div(
[
html.Hr(),
html.P("👇 Or check out the latest blog posts about data-driven academia 👇"), html.Br(),
html.Div([
html.A(
href="https://medium.com/@juhartz/are-scholarly-papers-really-the-best-way-to-disseminate-research-f8d85d3eee62",
children=[
html.Img(
alt="Link to my twitter",
src="assets/blogpost_1.png",
className="zoom"
)
], target= '_blank', className= "blog-post-1"
),
html.A(
href="https://medium.com/@juhartz/what-makes-a-research-paper-impactful-a40f33206fd1",
children=[
html.Img(
alt="Link to my twitter",
src="assets/blogpost_2.png",
className='zoom'
)
], target= '_blank', className= "blog-post-2"
)
],className= "blog-posts")],
className= "start-page")
# Returns content of each tab when selected
@app.callback(Output('tabs-content-example-graph', 'children'),
Input('tabs-example-graph', 'value'),
Input('store-references-query-response', 'data'))
def render_tab_content(tab, data_ref = None):
if tab == 'tab-1-example-graph':
if data_ref != None:
return (
html.Div([
html.Div([
html.Div([
html.P("Filter results in time "),
dcc.RangeSlider(1940, 2030, 10, value=[1940, 2030], id='time-range-slider',
allowCross=False, className= "range-slider",
marks={
1940: {'label': '1940', 'style': {'color': 'black'}},
1950: {'label': '1950', 'style': {'color': 'black'}},
1960: {'label': '1960', 'style': {'color': 'black'}},
1970: {'label': '1970', 'style': {'color': 'black'}},
1980: {'label': '1980', 'style': {'color': 'black'}},
1990: {'label': '1990', 'style': {'color': 'black'}},
2000: {'label': '2000', 'style': {'color': 'black'}},
2010: {'label': '2010', 'style': {'color': 'black'}},
2020: {'label': '2020', 'style': {'color': 'black'}},
2030: {'label': '2030', 'style': {'color': 'black'}},
})], className = "global-time-filter"),
html.Div([html.Button(
"Download data",
title = "Downloads data as .CSV file",
id = "btn-download-data",
className="doc-link-download",
n_clicks= 0
),
dcc.Download(id="download-csv")], style = {'order': '2'})],
className= "upper-filters"),
html.Div([
html.Div([
dcc.Loading(id = "loading-icon-1",
children=[
html.Div([
html.Div(id = 'dp-keywords', children= [], className = "keywords-dropdown"),
html.Div(id = 'keywords-graph-all', children= [], className= "keywords-plot")],
className = "keywords-graph")],
type = 'default', className= "loading-keywords"),
html.Div(id = 'accessibility-pie-all', children = [
html.Div([
html.Div(id = 'dp-access', children=[], style = {'order': '2'}),
html.Div(id = 'access-pie-all', children= [], style = {'order': '1', 'margin': 'auto'})],
className= "accessibility-graph"),
html.Div(id = 'fields-pie-all', children = [], className= "fields-pie-graph")],
className= "fields-pie-and-dropdown")],
className= "tab-1-upper-graphs"),
html.Br(),
html.Br(),
html.Div([
html.Div(id = 'active-authors-graph-all', children = [], className= "active-authors-graph"),
html.Div(id = 'publication-graph-all', children = [], className= "citations-graph")],
className= "tab-1-lower-graphs"),
],
className= "tab-1")], className= "tab-1-with-download"))
else:
return html.Div([html.P("Retrieving info about 1000s of papers, please give it a few seconds",
style = {'order': '1', 'font-size': '1.5rem', 'color':'rgba(3, 3, 3, 0.2)',
'text-align': 'center', 'margin-top': '10vh'}),
html.Img(src='/assets/spinner.gif', style= {'order':'2', 'margin': 'auto'})],
style= {'display': 'flex', 'flex-direction':'column', 'justify-content': 'center',
'align-items': 'center', 'min-height': '400px', 'width':'60vw', 'margin': 'auto'})
if tab == 'tab-2-example-graph':
return html.Div([
html.Div([
html.Div([
html.Button('Reset view', id='bt-reset', className= 'reset-button'),
html.Div(id = 'dp-access-cytoscape', children = [], style={'order':'2'})],
className= "dropdown-and-button-cyto-1"),
cyto.Cytoscape(
id='cytoscape-event-callbacks-1',
layout={'name': 'random', 'height': '58vh', 'width': '44vw'},
className= "cyto-1",
stylesheet = [
{
'selector': 'label',
'style': {
'content': 'data(label)',
'color': 'rgba(60, 25, 240, 0.8)',
'font-size':'14vh',
'font-family':'Arial, sans serif',
}
},
{
'selector': 'node',
'style': {
'label': 'data(label)'
}
},
{
'selector': '[selected ^= "True"]',
'style': {
'background-color': 'green',
'line-color': 'green'
}
},
{
'selector': '.author',
'style': {
'background-color': 'rgba(60, 25, 240, 0.8)'
}
},
{
'selector': '.collaboration',
'style': {
'line-color': '#737373',
'width': 1
}
}
])],
className= "cyto-1-and-button-container"),
html.Div(className= 'vl', style = {'order': '2'}),
html.Div([
html.Div([
html.Div(id = 'author-info-1', className= "author-info")],
className= "author-info-container")],
className= "author-info-big-container")
], className= "tab-2")
if tab == 'tab-3-example-graph':
return html.Div([
html.Div([
html.Button('Reset view', id='bt-reset-papers', className= 'reset-button'),
cyto.Cytoscape(
id='cytoscape-event-callbacks-2',
layout={'name': 'random', 'height': '58vh', 'width': '50vw'},
style={'order':'2','height': '58vh', 'width': '50vw'},
#className= "cyto-2",
stylesheet = [
{
'selector': 'node',
'style': {
'background-color': 'rgba(60, 25, 240, 0.8)',
'height': '9vh',
'width': '9vh'
}
},
{
'selector': '.res',
'style': {
'background-color': 'green',
'color': 'red',
'height': '1.2vh',
'width': '1.2vh'
}
},
{
'selector': '.ref',
'style': {
'background-color': 'white',
'color': 'white',
'height': '0.8vh',
'width': '0.8vh'
}
},
{
'selector': '.citation',
'style': {
'line-color': '#737373',
'width': 1
}
}
])],
className= "cyto-2-and-button-container"),
html.Div(className= 'vl', style = {'order': '2'}),
html.Div([
html.Div([
html.Div(id = 'paper-info-1', className= "paper-info")],
className= "paper-info-container")],
className= "paper-info-big-container")],
className= "tab-3")
# Welcome title
@app.callback(
Output('topic', 'children'),
Input('search-query', 'value'))
def display_topic(value):
return "Welcome researcher!"
# Download data as CSV button
@app.callback(
Output("download-csv", "data"),
Input("btn-download-data", "n_clicks"),
Input("store-initial-query-response", "data"),
Input("store-references-query-response", "data"),
Input("time-range-slider", "value"),
prevent_initial_call=True,
)
def func(n_clicks, data_res, data_ref, filter_values):
if data_ref:
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
if n_clicks > 0:
return dcc.send_data_frame(dff_all.to_csv, "research_data.csv")
# Plots and graphs ----------------------------------------------
# keywords
@app.callback(
Output('keywords-graph-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('search-query', 'value'),
Input('dp-keywords-component', 'value'),
Input('time-range-slider', 'value'))
def create_top_key_words_all(data_res, query, filter, filter_values):
"""Returns keywords graph as dcc.Graph component
Only displays it when all data is retrieved"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_res = data_preprocess.filter_data_by_time(dff_res, filter_values)
if filter == 'All':
fig = plots.make_top_key_words(dff_res, query)
else:
index_list = []
for index, row in dff_res.iterrows():
if isinstance(row.fieldsOfStudy, list):
if filter in row.fieldsOfStudy:
index_list.append(index)
dff_filtered = dff_res.loc[index_list]
fig = plots.make_top_key_words(dff_filtered,query)
return dcc.Graph(figure=fig, className= "keywords-plotly")
@app.callback(
Output('dp-keywords', 'children'),
Input('store-initial-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_keywords_dorpdown(data_res, filter_values):
"""Returns the dropdown menu according to all fields of study in data
as a dcc.Dropdown component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_res = data_preprocess.filter_data_by_time(dff_res, filter_values)
fields_of_study = dff_res['fieldsOfStudy'].tolist()
res = [field for field in fields_of_study if isinstance(field, list)]
flat_list_fields = utils.flatten_list(res)
options = ['All'] + list(set(flat_list_fields))
return dcc.Dropdown(id = 'dp-keywords-component', value = 'All',
options = options, clearable=False,
placeholder= 'Select a field of study', className= 'dp-access-piie')
# loading states for keyword graphs
@app.callback(Output('loading-icon-1', 'children'),
Input('keywords-graph-res', 'children'))
@app.callback(Output('loading-icon-2', 'children'),
Input('keywords-graph-ref', 'children'))
# Accessibility
@app.callback(
Output('dp-access', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_accessibility_pie_dorpdown(data_res, data_ref, filter_values):
"""Returns the dropdown menu according to all fields of study in data
as a dcc.Dropdown component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
fields_of_study = dff_all['fieldsOfStudy'].tolist()
res = [field for field in fields_of_study if isinstance(field, list)]
flat_list_fields = utils.flatten_list(res)
options = ['All'] + list(set(flat_list_fields))
return dcc.Dropdown(id = 'dp-access-component', value = 'All',
options = options, clearable=False,
placeholder= 'Select a field of study', className= 'dp-access-piie')
@app.callback(
Output('access-pie-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('dp-access-component', 'value'),
Input('time-range-slider', 'value'))
def create_accessibility_pie(data_res, data_ref, filter, filter_values):
"""Returns the accessibility pie graph for all data
as a dcc.Graph component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
if filter == 'All':
fig = plots.make_access_pie(dff_all)
else:
index_list = []
for index, row in dff_all.iterrows():
if isinstance(row.fieldsOfStudy, list):
if filter in row.fieldsOfStudy:
index_list.append(index)
dff_filtered = dff_all.loc[index_list]
fig = plots.make_access_pie(dff_filtered)
return dcc.Graph(figure = fig, className= "access-pie-plotly")
# Publications & citations per year
@app.callback(
Output('publication-graph-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_publication_graph_all(data_res, data_ref, filter_values):
"""Returns the pubs + citations graph as a dcc.Graph component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
fig = plots.make_pubs_cites_per_year_line(dff_all, filter_values)
return dcc.Graph(figure=fig, className= "pub-graph-plotly")
# Fields of study
@app.callback(
Output('fields-pie-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_fields_pie_res(data_res, data_ref, filter_values):
"""Returns the fields pie as a dcc.Graph component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
fig = plots.make_fields_pie(dff_all)
return dcc.Graph(figure=fig, className= "fields-pie-plotly")
# Most active authors
@app.callback(
Output('active-authors-graph-all', 'children'),
Input('store-initial-query-response', 'data'),
Input('store-references-query-response', 'data'),
Input('time-range-slider', 'value'))
def create_active_authors_graph_res(data_res, data_ref, filter_values):
"""Returns the most active authors graph as a dcc.Graph component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
dff_ref = pd.DataFrame(data_ref)
dff_ref['result'] = 'reference'
dff_all = pd.concat([dff_res, dff_ref])
dff_all = data_preprocess.filter_data_by_time(dff_all, filter_values)
fig = plots.make_active_authors(dff_all)
return dcc.Graph(figure=fig, className = "pub-graph-plotly")
# Cytoscapes -------------------------------------------------------------------
@app.callback(
Output('dp-access-cytoscape', 'children'),
Input('store-initial-query-response', 'data'))
def create_dropdown_cytoscape(data_res):
"""Returns the dropdown menu according to all fields
of study as a dcc.Dropdown component"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
fields_of_study = dff_res['fieldsOfStudy'].tolist()
res = [field for field in fields_of_study if isinstance(field, list)]
flat_list_fields = utils.flatten_list(res)
options = ['All'] + list(set(flat_list_fields))
return dcc.Dropdown(id = 'dp-access-component_cytoscape', value = 'All',
options = options, clearable=False,
placeholder= 'Select a field of study', className= 'dp-access-pie')
@app.callback(
Output('cytoscape-event-callbacks-1', 'elements'),
Output('cytoscape-event-callbacks-1', 'zoom'),
Input('store-initial-query-response', 'data'),
Input('bt-reset', 'n_clicks'),
Input('dp-access-component_cytoscape', 'value'),
Input('cytoscape-event-callbacks-1', 'zoom'))
def generate_collaboration_network(data_res, n_clicks, filter, zoom):
"""Returns the elements of the collaboaration cytoscape in tab 2"""
dff_res = pd.DataFrame(data_res['data'])
dff_res['result'] = 'direct'
if filter == 'All':
elements = plots.generate_graph_elements_collab(dff_res)
else:
index_list = []
for index, row in dff_res.iterrows():
if isinstance(row.fieldsOfStudy, list):
if filter in row.fieldsOfStudy:
index_list.append(index)
dff_filtered = dff_res.loc[index_list]
elements = plots.generate_graph_elements_collab(dff_filtered)
if n_clicks:
if n_clicks > 0:
zoom = 1
return elements, zoom
return elements, zoom
@app.callback(
Output('cytoscape-event-callbacks-2', 'elements'),
Output('cytoscape-event-callbacks-2', 'zoom'),
Input('store-references-query-response', 'data'),
Input('store-initial-query-response', 'data'),
Input('bt-reset-papers', 'n_clicks'),
Input('cytoscape-event-callbacks-2', 'zoom'))
def generate_citation_network(data_ref, data_res, n_clicks, zoom):
"""Returns the elements of the citation cytoscape in tab 3"""
ref_df = pd.DataFrame(data_ref)
ref_df['reference'] = semantic_api.build_references(ref_df)
res_df = pd.DataFrame(data_res['data'])
res_df['reference'] = semantic_api.build_references(res_df)
elements= plots.generate_graph_elements_network(ref_df, res_df)
if n_clicks:
if n_clicks > 0:
zoom = 1
return elements, zoom
return elements, zoom
# Retrieves info on author
@app.callback(Output('author-info-1', 'children'),
Input('cytoscape-event-callbacks-1', 'tapNodeData'))
def displayTapNodeData(data):
"""Requests and returns the info about an author when node is clicked on"""
if data:
author_info = semantic_api.get_author_info(data['id'])
paragraph = html.Div([
html.B(author_info['name']), html.Br(),html.Br(),
html.Span("Published "), html.B(author_info['paperCount']), html.Span(" papers."), html.Br(),html.Br(),
html.Span("Received "), html.B(author_info['citationCount']), html.Span(" citations."), html.Br(),html.Br(),
html.Span(f"h index: "), html.B(author_info['hIndex']), html.Br(), html.Br(),
html.A("Semantic Scholar profile", href = author_info['url'], target= '_blank')],
className = "author-info-text"),
return paragraph
else:
return html.P("Click on a node to display information about an author",
className= "author-info-default-text")
# Retrieves info on paper
@app.callback(Output('paper-info-1', 'children'),
Input('cytoscape-event-callbacks-2', 'tapNodeData'))
def displayTapNodeData(data):
"""Requests and returns the info about a paper when node is clicked on"""
if data:
paper_info = semantic_api.get_paper_info(data['id'])
if 'paperId' in paper_info:
if paper_info['isOpenAccess']:
oa = ''
else:
oa = 'NOT'
if paper_info['abstract'] == None:
paper_info['abstract'] = 'No abstract available for this paper.'
paragraph = html.Div([html.Br(), html.B(paper_info['title']), html.Br(),html.Br(),
html.Li([html.Span("Published in "), html.B(paper_info['year'])]),
html.Li([html.Span("Includes "), html.B(paper_info['referenceCount']), html.Span(" references.")]),
html.Li([html.Span("Received "), html.B(paper_info['citationCount']), html.Span(" citations.")]),
html.Li([html.Span("Is "), html.B(oa), html.Span(" open access.", style = {'font-size': '1.5vh', 'color': 'black'})]),
html.Li([html.A(' Semantic Scholar URL', href = paper_info['url'], target = '_blank')]), html.Br(),
html.B("Abstract"), html.Br(),
html.Span(paper_info['abstract'])],
className= "paper-info-text")
else:
paragraph = html.P("No info available for this paper", className= "paper-info-default-no-info")
return paragraph
else:
return html.P("Click on a node to display information about a paper",
className= "paper-info-default-text")
# Runs the app ------------------------------------------------------------
if __name__ == '__main__':
app.run_server(debug=True, use_reloader=False) | 2.21875 | 2 |
suppliers/models.py | yanwarsolahudin/regensi_api | 1 | 12789100 | <filename>suppliers/models.py
from django.db import models
from utils.models import Timestamp, generate_string_code
class Supplier(Timestamp):
PREFIX = 'SPR'
supplier_code = models.CharField(max_length=100, unique=True)
name = models.CharField(max_length=100, default='Ex: <NAME>')
address = models.TextField()
phone = models.CharField(max_length=20, default='Ex: +6299999999')
pic = models.CharField(max_length=100, blank=True, null=True)
def save(self, *args, **kwargs):
if not self.supplier_code:
# Newly created object, so set slug
self.supplier_code = generate_string_code(Supplier)
super(Supplier, self).save(*args, **kwargs)
def __str__(self):
return self.supplier_code
| 2.359375 | 2 |
src/python/dnsresolvd.py | rgolubtsov/dnsresolvd-multilang | 2 | 12789101 | <reponame>rgolubtsov/dnsresolvd-multilang
#
# src/python/dnsresolvd.py
# =============================================================================
# DNS Resolver Daemon (dnsresolvd). Version 0.9.9
# =============================================================================
# A daemon that performs DNS lookups for the given hostname
# passed in an HTTP request, with the focus on its implementation
# using various programming languages. (Twisted-boosted impl.)
# =============================================================================
# Copyright (C) 2017-2022 Radislav (Radicchio) Golubtsov
#
# (See the LICENSE file at the top of the source tree.)
#
from twisted.web.resource import Resource
from twisted.internet.endpoints import TCP4ServerEndpoint
from twisted.internet import reactor
from twisted.web.server import Site
import json
from dns_resolv.dns_lookup_controller import DnsLookupController
class DnsResolvd:
"""The main class of the daemon."""
def startup(self, port_number, aux):
"""Starts up the daemon.
Args:
port_number: The server port number to listen on.
aux: The controller helper object instance.
Returns:
The server exit code when interrupted.
"""
ret = aux._EXIT_SUCCESS
class Daemon(Resource):
"""The server class of the daemon.
Used by Twisted engine to run an event loop.
Extends:
Resource: The Twisted web-accessible resource class.
"""
isLeaf = True
def render_GET(self, req):
"""Renders the HTTP response based on the incoming HTTP
request using the HTTP GET method.
It also calls the method to perform DNS lookup for a hostname
passed in the HTTP request.
Args:
req: The incoming HTTP request object.
Returns:
The HTTP response has to be rendered.
"""
query = req.args
h = b"h"
f = b"f"
fmt = aux._PRM_FMT_JSON
# Parsing and validating query params.
if (h in query):
hostname = query[h][0].decode()
# ^
# |
# +----------------+
else: # |
hostname = aux._DEF_HOSTNAME # |
# |
# http://localhost:<port_number>/?h=<hostname>&f=<fmt>
# |
# +-----------------------------+
# |
if (f in query): # v
fmt = query[f][0].decode().lower()
if (fmt not in {
aux._PRM_FMT_HTML,
aux._PRM_FMT_JSON,
}):
fmt = aux._PRM_FMT_JSON
# Instantiating the controller class.
ctrl = DnsLookupController()
# Performing DNS lookup for the given hostname.
(addr, ver) = ctrl.dns_lookup(hostname, aux)
if (fmt == aux._PRM_FMT_HTML):
resp_buffer = ("<!DOCTYPE html>" + aux._NEW_LINE
+ "<html lang=\"en-US\" dir=\"ltr\">" + aux._NEW_LINE
+ "<head>" + aux._NEW_LINE
+ "<meta http-equiv=\"" + aux._HDR_CONTENT_TYPE_N + "\" content=\""
+ aux._HDR_CONTENT_TYPE_V_HTML + "\" />" + aux._NEW_LINE
+ "<meta http-equiv=\"X-UA-Compatible\" content=\"IE=edge\" />" + aux._NEW_LINE
+ "<meta name=\"viewport\" content=\"width=device-width,initial-scale=1\" />" + aux._NEW_LINE
+ "<title>" + aux._DMN_NAME + "</title>" + aux._NEW_LINE
+ "</head>" + aux._NEW_LINE
+ "<body>" + aux._NEW_LINE
+ "<div>" + hostname + aux._ONE_SPACE_STRING)
if (addr == aux._ERR_PREFIX):
resp_buffer += (aux._ERR_PREFIX
+ aux._COLON_SPACE_SEP
+ aux._ERR_COULD_NOT_LOOKUP)
else:
resp_buffer += (str(addr) + aux._ONE_SPACE_STRING
+ aux._DAT_VERSION_V
+ str(ver))
resp_buffer += ("</div>" + aux._NEW_LINE
+ "</body>" + aux._NEW_LINE
+ "</html>" + aux._NEW_LINE)
elif (fmt == aux._PRM_FMT_JSON):
if (addr == aux._ERR_PREFIX):
resp_buffer = json.dumps({
aux._DAT_HOSTNAME_N : hostname,
aux._ERR_PREFIX : aux._ERR_COULD_NOT_LOOKUP,
})
else:
resp_buffer = json.dumps({
aux._DAT_HOSTNAME_N : hostname,
aux._DAT_ADDRESS_N : str(addr),
aux._DAT_VERSION_N : aux._DAT_VERSION_V
+ str(ver),
})
# Adding headers to the response.
aux.add_response_headers(req, fmt)
return resp_buffer.encode()
def render_POST(self, req):
"""Renders the HTTP response based on the incoming HTTP
request using the HTTP POST method.
It simply calls its "GET" counterpart
<code>render_GET()</code>.
Args:
req: The incoming HTTP request object.
Returns:
The HTTP response has to be rendered.
"""
return self.render_GET(req)
## Default constructor.
def __init__(self):
self = []
return None
# Setting up the TCP IPv4-configured server.
TCP4ServerEndpoint(reactor, port_number).listen(Site(Daemon()))
# Running the event loop.
ret = reactor.run()
return ret
def __init__(self):
"""Default constructor."""
self = []
return None
# vim:set nu et ts=4 sw=4:
| 2.484375 | 2 |
src/aceinna/tools/cli.py | LukaszChl/ros_openimu | 6 | 12789102 | import os
import sys
import argparse
try:
from aceinna.bootstrap.cli import CommandLine
from aceinna.framework.constants import BAUDRATE_LIST
except: # pylint: disable=bare-except
print('load package from local')
sys.path.append('./src')
from aceinna.bootstrap.cli import CommandLine
from aceinna.framework.constants import BAUDRATE_LIST
def receive_args():
"""parse input arguments
"""
parser = argparse.ArgumentParser(
description='Aceinna python driver input args command:')
# parser.add_argument("-host", type=str, help="host type", default='web')
# for host as web
parser.add_argument("-p", "--port", type=int,
help="Webserver port")
parser.add_argument("--device-type", type=str,
help="Open Device Type")
parser.add_argument("-b", "--baudrate", type=int,
help="Baudrate for uart", choices=BAUDRATE_LIST)
parser.add_argument("-c", "--com-port", type=str,
help="COM Port")
parser.add_argument("--console-log", dest='console_log', action='store_true',
help="Output log on console", default=False)
parser.add_argument("--debug", dest='debug', action='store_true',
help="Log debug information", default=False)
parser.add_argument("--with-data-log", dest='with_data_log', action='store_true',
help="Contains internal data log (OpenIMU only)", default=False)
parser.add_argument("--with-raw-log", dest='with_raw_log', action='store_true',
help="Contains raw data log (OpenRTK only)", default=False)
return parser.parse_args()
def main():
'''start'''
input_args = receive_args()
command_line = CommandLine(
device_type=input_args.device_type,
com_port=input_args.com_port,
port=input_args.port,
baudrate=input_args.baudrate,
console_log=input_args.console_log,
debug=input_args.debug,
with_data_log=input_args.with_data_log,
with_raw_log=input_args.with_raw_log
)
command_line.listen()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt: # response for KeyboardInterrupt such as Ctrl+C
print('User stop this program by KeyboardInterrupt! File:[{0}], Line:[{1}]'.format(
__file__, sys._getframe().f_lineno))
sys.exit()
except: # pylint: disable=bare-except
os._exit(1)
| 2.34375 | 2 |
pypardot/objects_v3/accounts.py | andyoneal/PyPardotSF | 18 | 12789103 | class Accounts(object):
"""
A class to query and use Pardot accounts.
Account field reference: http://developer.pardot.com/kb/api-version-3/object-field-references/#prospectAccount
"""
def __init__(self, client):
self.client = client
def query(self, **kwargs):
"""
Returns the prospect accounts matching the specified criteria parameters.
Supported search criteria: http://developer.pardot.com/kb/api-version-3/prospect-accounts/#supported-search-criteria
"""
response = self._get(path='/do/query', params=kwargs)
# Ensure result['prospectAccount'] is a list, no matter what.
result = response.get('result')
if result['total_results'] == 0:
result['prospectAccount'] = []
elif result['total_results'] == 1:
result['prospectAccount'] = [result['prospectAccount']]
return result
def create(self, **kwargs):
"""Creates a new prospect account."""
response = self._post(path='/do/create', params=kwargs)
return response
def describe(self, **kwargs):
"""
Returns the field metadata for prospect accounts, explaining what fields are available, their types, whether
they are required, and their options (for dropdowns, radio buttons, etc).
"""
response = self._get(path='/do/describe', params=kwargs)
return response
def read(self, id=None, **kwargs):
"""
Returns the data for the prospect account specified by <id>. <id> is the Pardot ID of the target prospect
account.
"""
response = self._post(path='/do/read/id/{id}'.format(id=id), params=kwargs)
return response
def update(self, id=None, **kwargs):
"""
Updates the data for the prospect account specified by <id>. <id> is the Pardot ID of the target prospect
account.
"""
response = self._post(path='/do/update/id/{id}'.format(id=id), params=kwargs)
return response
def _get(self, object_name='prospectAccount', path=None, params=None):
"""GET requests for the Account object."""
if params is None:
params = {}
response = self.client.get(object_name=object_name, path=path, params=params)
return response
def _post(self, object_name='prospectAccount', path=None, params=None):
"""POST requests for the Account object."""
if params is None:
params = {}
response = self.client.post(object_name=object_name, path=path, params=params)
return response
| 3.15625 | 3 |
day24/aoc2018-day24.py | SebastiaanZ/aoc-2018 | 1 | 12789104 | from reindeer import Disease
# Part I
disease = Disease("day24-input.txt")
found, result = disease.battle()
print(f"Answer part I: {result}")
# Part II
for boost in range(10000):
body = Disease("day24-input.txt", boost)
found, result = body.battle()
if found:
break
print(f"Answer part II: {result} (with boost={boost})")
| 2.828125 | 3 |
Curso Udemy 2022/Curso_Luiz_Otavio/Aula 41.py | Matheusfarmaceutico/Exercicios-Python | 0 | 12789105 | # uso do split e count
'''string = 'O Brasil Brasil I I I I'
lista_1 = string.split(' ')
palavra = ''
cont = 0
for valor in lista_1:
quantvezes = lista_1.count(valor)
if quantvezes > cont:
cont = quantvezes
palavra = valor
print(f'A palavra que mais apareceu nessa frase foi {palavra} ')'''
# uso do Join
'''lista = ['Ana','Vitória']
juntar = '***'.join(lista)
print(juntar)'''
# uso do enumerate: retorna tuplas
'''lista = ['Jogador','Numero']
for indice, valor in enumerate(lista):
print(indice,valor)'''
| 3.65625 | 4 |
tests/test_data.py | lgray/AwkwardQL | 1 | 12789106 | import pytest
def test_data():
from awkwardql.data import (RecordArray,
PrimitiveArray,
ListArray,
UnionArray,
instantiate)
# data in columnar form
events = RecordArray({
"muons": ListArray([0, 3, 3, 5], [3, 3, 5, 9], RecordArray({
"pt": PrimitiveArray([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]),
"iso": PrimitiveArray([0, 0, 100, 50, 30, 1, 2, 3, 4])
})),
"jets": ListArray([0, 5, 6, 8], [5, 6, 8, 12], RecordArray({
"pt": PrimitiveArray([1, 2, 3, 4, 5, 100, 30, 50, 1, 2, 3, 4]),
"mass": PrimitiveArray([10, 10, 10, 10, 10, 5, 15, 15, 9, 8, 7, 6])
})),
"met": PrimitiveArray([100, 200, 300, 400])
})
# same data in rowwise form
assert events == [
{'muons': [
{'pt': 1.1, 'iso': 0},
{'pt': 2.2, 'iso': 0},
{'pt': 3.3, 'iso': 100}],
'jets': [
{'pt': 1, 'mass': 10},
{'pt': 2, 'mass': 10},
{'pt': 3, 'mass': 10},
{'pt': 4, 'mass': 10},
{'pt': 5, 'mass': 10}],
'met': 100},
{'muons': [],
'jets': [{'pt': 100, 'mass': 5}],
'met': 200},
{'muons': [
{'pt': 4.4, 'iso': 50},
{'pt': 5.5, 'iso': 30}],
'jets': [
{'pt': 30, 'mass': 15},
{'pt': 50, 'mass': 15}],
'met': 300},
{'muons': [
{'pt': 6.6, 'iso': 1},
{'pt': 7.7, 'iso': 2},
{'pt': 8.8, 'iso': 3},
{'pt': 9.9, 'iso': 4}],
'jets': [
{'pt': 1, 'mass': 9},
{'pt': 2, 'mass': 8},
{'pt': 3, 'mass': 7},
{'pt': 4, 'mass': 6}],
'met': 400}]
# projection down to the numerical values
assert events["muons"]["pt"] == [[1.1, 2.2, 3.3], [], [4.4, 5.5], [6.6, 7.7, 8.8, 9.9]]
# single record object
assert events[0] == {
'muons': [
{'pt': 1.1, 'iso': 0},
{'pt': 2.2, 'iso': 0},
{'pt': 3.3, 'iso': 100}],
'jets': [
{'pt': 1, 'mass': 10},
{'pt': 2, 'mass': 10},
{'pt': 3, 'mass': 10},
{'pt': 4, 'mass': 10},
{'pt': 5, 'mass': 10}],
'met': 100}
# integer and string indexes commute, but string-string and integer-integer do not
assert events["muons"][0] == events[0]["muons"]
assert events["muons"][0]["pt"] == events[0]["muons"]["pt"]
assert events["muons"][0][2] == events[0]["muons"][2]
assert events["muons"][0]["pt"][2] == events[0]["muons"]["pt"][2]
assert events["muons"][0]["pt"][2] == events[0]["muons"][2]["pt"]
assert events["muons"][0]["pt"][2] == events["muons"][0][2]["pt"]
assert events["muons"][0]["pt"][2] == events["muons"]["pt"][0][2]
events.setindex()
muonpt = events.contents["muons"].content.contents["pt"]
assert muonpt.row == [(0, 0), (0, 1), (0, 2), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2), (3, 3)]
assert muonpt.col == ("muons", "pt")
muoniso = events.contents["muons"].content.contents["iso"]
assert muonpt.row == muoniso.row
assert muonpt.row.same(muoniso.row)
c1, c2 = muonpt.col.tolist()
for i, (r1, r2) in enumerate(muonpt.row):
assert events[c1][c2][r1][r2] == muonpt[i]
instantiate(events)
egamma = UnionArray([0, 0, 1, 0, 1, 1, 1, 0, 0], [0, 1, 0, 2, 1, 2, 3, 3, 4], [
RecordArray({
"q": PrimitiveArray([1, -1, -1, 1, 1]),
"pt": PrimitiveArray([10, 20, 30, 40, 50])
}),
RecordArray({
"pt": PrimitiveArray([1.1, 2.2, 3.3, 4.4])
})
])
assert egamma == [
{'pt': 10, 'q': 1},
{'pt': 20, 'q': -1},
{'pt': 1.1},
{'pt': 30, 'q': -1},
{'pt': 2.2},
{'pt': 3.3},
{'pt': 4.4},
{'pt': 40, 'q': 1},
{'pt': 50, 'q': 1}]
assert egamma["pt"] == [10, 20, 1.1, 30, 2.2, 3.3, 4.4, 40, 50]
egamma.setindex()
assert egamma.contents[0].contents["pt"].row == [(0,), (1,), (3,), (7,), (8,)]
assert egamma.contents[1].contents["pt"].row == [(2,), (4,), (5,), (6,)]
assert egamma.contents[0].contents["pt"].col == ("pt",)
assert egamma.contents[1].contents["pt"].col == ("pt",)
instantiate(egamma)
| 2.484375 | 2 |
general/chainerrl/baselines/train_dqfd.py | marioyc/baselines | 127 | 12789107 | """original source: https://github.com/chainer/chainerrl/pull/480
MIT License
Copyright (c) Preferred Networks, Inc.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import *
from future import standard_library
standard_library.install_aliases()
import argparse
from inspect import getsourcefile
import os
import sys
import numpy as np
import chainer
import minerl # noqa: register MineRL envs as Gym envs.
import gym
import chainerrl
from chainerrl import experiments, explorers
from chainerrl.experiments.evaluator import Evaluator
from dqfd import DQfD, PrioritizedDemoReplayBuffer
from q_functions import CNNBranchingQFunction
from env_wrappers import (
BranchedRandomizedAction, BranchedActionWrapper,
MoveAxisWrapper, FrameSkip, FrameStack, ObtainPoVWrapper,
PoVWithCompassAngleWrapper, FullObservationSpaceWrapper)
from expert_converter import choose_top_experts, fill_buffer
class ScaleGradHook(object):
name = 'ScaleGrad'
call_for_each_param = True
timing = 'pre'
def __init__(self, scale):
self.scale = scale
def __call__(self, rule, param):
if getattr(param, 'scale_param', False):
param.grad *= self.scale
def main():
"""Parses arguments and runs the example
"""
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='MineRLTreechop-v0',
choices=[
'MineRLTreechop-v0',
'MineRLNavigate-v0', 'MineRLNavigateDense-v0', 'MineRLNavigateExtreme-v0', 'MineRLNavigateExtremeDense-v0',
'MineRLObtainIronPickaxe-v0', 'MineRLObtainIronPickaxeDense-v0',
'MineRLObtainDiamond-v0', 'MineRLObtainDiamondDense-v0',
'MineRLNavigateDenseFixed-v0' # for debug use
],
help='MineRL environment identifier')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 31)')
parser.add_argument('--gpu', type=int, default=-1,
help='GPU to use, set to -1 if no GPU.')
parser.add_argument('--final-exploration-frames',
type=int, default=10**6,
help='Timesteps after which we stop ' +
'annealing exploration rate')
parser.add_argument('--final-epsilon', type=float, default=0.01,
help='Final value of epsilon during training.')
parser.add_argument('--eval-epsilon', type=float, default=0.001,
help='Exploration epsilon used during eval episodes.')
parser.add_argument('--replay-start-size', type=int, default=1000,
help='Minimum replay buffer size before ' +
'performing gradient updates.')
parser.add_argument('--target-update-interval', type=int, default=10**4,
help='Frequency (in timesteps) at which ' +
'the target network is updated.')
parser.add_argument('--update-interval', type=int, default=4,
help='Frequency (in timesteps) of network updates.')
parser.add_argument('--eval-n-runs', type=int, default=10)
parser.add_argument('--no-clip-delta',
dest='clip_delta', action='store_false')
parser.add_argument('--error-max', type=float, default=1.0)
parser.add_argument('--num-step-return', type=int, default=10)
parser.set_defaults(clip_delta=True)
parser.add_argument('--logging-level', type=int, default=20,
help='Logging level. 10:DEBUG, 20:INFO etc.')
parser.add_argument('--logging-filename', type=str, default=None)
parser.add_argument('--monitor', action='store_true', default=False,
help='Monitor env. Videos and additional information are saved as output files when evaluation')
# parser.add_argument('--render', action='store_true', default=False,
# help='Render env states in a GUI window.')
parser.add_argument('--optimizer', type=str, default='rmsprop',
choices=['rmsprop', 'adam'])
parser.add_argument('--lr', type=float, default=2.5e-4, help='Learning rate')
parser.add_argument("--replay-buffer-size", type=int, default=10**6,
help="Size of replay buffer (Excluding demonstrations)")
parser.add_argument("--minibatch-size", type=int, default=32)
parser.add_argument('--batch-accumulator', type=str, default="sum")
parser.add_argument('--demo', action='store_true', default=False)
parser.add_argument('--load', type=str, default=None)
parser.add_argument("--save-demo-trajectories", action="store_true",
default=False)
# DQfD specific parameters for loading and pretraining.
parser.add_argument('--n-experts', type=int, default=10)
parser.add_argument('--expert-demo-path', type=str, default=None)
parser.add_argument('--n-pretrain-steps', type=int, default=750000)
parser.add_argument('--demo-supervised-margin', type=float, default=0.8)
parser.add_argument('--loss-coeff-l2', type=float, default=1e-5)
parser.add_argument('--loss-coeff-nstep', type=float, default=1.0)
parser.add_argument('--loss-coeff-supervised', type=float, default=1.0)
parser.add_argument('--bonus-priority-agent', type=float, default=0.001)
parser.add_argument('--bonus-priority-demo', type=float, default=1.0)
# Action branching architecture
parser.add_argument('--gradient-clipping', action='store_true', default=False)
parser.add_argument('--gradient-rescaling', action='store_true', default=False)
# NoisyNet parameters
parser.add_argument('--use-noisy-net', type=str, default=None,
choices=['before-pretraining', 'after-pretraining'])
parser.add_argument('--noisy-net-sigma', type=float, default=0.5)
# Parameters for state/action handling
parser.add_argument('--frame-stack', type=int, default=None, help='Number of frames stacked (None for disable).')
parser.add_argument('--frame-skip', type=int, default=None, help='Number of frames skipped (None for disable).')
parser.add_argument('--camera-atomic-actions', type=int, default=10)
parser.add_argument('--max-range-of-camera', type=float, default=10.)
parser.add_argument('--use-full-observation', action='store_true', default=False)
args = parser.parse_args()
assert args.expert_demo_path is not None,"DQfD needs collected \
expert demonstrations"
import logging
if args.logging_filename is not None:
logging.basicConfig(filename=args.logging_filename, filemode='w',
level=args.logging_level)
else:
logging.basicConfig(level=args.logging_level)
logger = logging.getLogger(__name__)
train_seed = args.seed
test_seed = 2 ** 31 - 1 - args.seed
chainerrl.misc.set_random_seed(args.seed, gpus=(args.gpu,))
args.outdir = experiments.prepare_output_dir(args, args.outdir)
logger.info('Output files are saved in {}'.format(args.outdir))
if args.env == 'MineRLTreechop-v0':
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions]
elif args.env in ['MineRLNavigate-v0', 'MineRLNavigateDense-v0',
'MineRLNavigateExtreme-v0', 'MineRLNavigateExtremeDense-v0']:
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions, 2]
elif args.env in ['MineRLObtainIronPickaxe-v0', 'MineRLObtainIronPickaxeDense-v0',
'MineRLObtainDiamond-v0', 'MineRLObtainDiamondDense-v0']:
branch_sizes = [9, 16, args.camera_atomic_actions, args.camera_atomic_actions, 32]
else:
raise Exception("Unknown environment")
def make_env(env, test):
# wrap env: observation...
# NOTE: wrapping order matters!
if args.use_full_observation:
env = FullObservationSpaceWrapper(env)
elif args.env.startswith('MineRLNavigate'):
env = PoVWithCompassAngleWrapper(env)
else:
env = ObtainPoVWrapper(env)
if test and args.monitor:
env = gym.wrappers.Monitor(
env, os.path.join(args.outdir, 'monitor'),
mode='evaluation' if test else 'training', video_callable=lambda episode_id: True)
if args.frame_skip is not None:
env = FrameSkip(env, skip=args.frame_skip)
# convert hwc -> chw as Chainer requires
env = MoveAxisWrapper(env, source=-1, destination=0,
use_tuple=args.use_full_observation)
#env = ScaledFloatFrame(env)
if args.frame_stack is not None:
env = FrameStack(env, args.frame_stack, channel_order='chw',
use_tuple=args.use_full_observation)
# wrap env: action...
env = BranchedActionWrapper(env, branch_sizes, args.camera_atomic_actions, args.max_range_of_camera)
if test:
env = BranchedRandomizedAction(env, branch_sizes, args.eval_epsilon)
env_seed = test_seed if test else train_seed
env.seed(int(env_seed))
return env
core_env = gym.make(args.env)
env = make_env(core_env, test=False)
eval_env = make_env(core_env, test=True)
# Q function
if args.env.startswith('MineRLNavigate'):
if args.use_full_observation:
base_channels = 3 # RGB
else:
base_channels = 4 # RGB + compass
elif args.env.startswith('MineRLObtain'):
base_channels = 3 # RGB
else:
base_channels = 3 # RGB
if args.frame_stack is None:
n_input_channels = base_channels
else:
n_input_channels = base_channels * args.frame_stack
q_func = CNNBranchingQFunction(branch_sizes,
n_input_channels=n_input_channels,
gradient_rescaling=args.gradient_rescaling,
use_tuple=args.use_full_observation)
def phi(x):
# observation -> NN input
if args.use_full_observation:
pov = np.asarray(x[0], dtype=np.float32)
others = np.asarray(x[1], dtype=np.float32)
return (pov / 255, others)
else:
return np.asarray(x, dtype=np.float32) / 255
explorer = explorers.LinearDecayEpsilonGreedy(
1.0, args.final_epsilon,
args.final_exploration_frames,
lambda: np.array([np.random.randint(n) for n in branch_sizes]))
# Draw the computational graph and save it in the output directory.
if args.use_full_observation:
sample_obs = tuple([x[None] for x in env.observation_space.sample()])
else:
sample_obs = env.observation_space.sample()[None]
chainerrl.misc.draw_computational_graph(
[q_func(phi(sample_obs))], os.path.join(args.outdir, 'model'))
if args.optimizer == 'rmsprop':
opt = chainer.optimizers.RMSpropGraves(args.lr, alpha=0.95, momentum=0.0, eps=1e-2)
elif args.optimizer == 'adam':
opt = chainer.optimizers.Adam(args.lr)
if args.use_noisy_net is None:
opt.setup(q_func)
if args.gradient_rescaling:
opt.add_hook(ScaleGradHook(1 / (1 + len(q_func.branch_sizes))))
if args.gradient_clipping:
opt.add_hook(chainer.optimizer_hooks.GradientClipping(10.0))
# calculate corresponding `steps` and `eval_interval` according to frameskip
maximum_frames = 8640000 # = 1440 episodes if we count an episode as 6000 frames.
if args.frame_skip is None:
steps = maximum_frames
eval_interval = 6000 * 100 # (approx.) every 100 episode (counts "1 episode = 6000 steps")
else:
steps = maximum_frames // args.frame_skip
eval_interval = 6000 * 100 // args.frame_skip # (approx.) every 100 episode (counts "1 episode = 6000 steps")
# Anneal beta from beta0 to 1 throughout training
betasteps = steps / args.update_interval
replay_buffer = PrioritizedDemoReplayBuffer(
args.replay_buffer_size, alpha=0.4,
beta0=0.6, betasteps=betasteps,
error_max=args.error_max,
num_steps=args.num_step_return)
# Fill the demo buffer with expert transitions
if not args.demo:
chosen_dirs = choose_top_experts(args.expert_demo_path, args.n_experts,
logger=logger)
fill_buffer(args.env, chosen_dirs, replay_buffer, args.frame_skip,
args.frame_stack, args.camera_atomic_actions,
args.max_range_of_camera, args.use_full_observation,
logger=logger)
logger.info("Demo buffer loaded with {} transitions".format(
len(replay_buffer)))
def reward_transform(x):
return np.sign(x) * np.log(1 + np.abs(x))
if args.use_noisy_net is not None and args.use_noisy_net == 'before-pretraining':
chainerrl.links.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma)
explorer = explorers.Greedy()
opt.setup(q_func)
agent = DQfD(q_func, opt, replay_buffer,
gamma=0.99,
explorer=explorer,
n_pretrain_steps=args.n_pretrain_steps,
demo_supervised_margin=args.demo_supervised_margin,
bonus_priority_agent=args.bonus_priority_agent,
bonus_priority_demo=args.bonus_priority_demo,
loss_coeff_nstep=args.loss_coeff_nstep,
loss_coeff_supervised=args.loss_coeff_supervised,
loss_coeff_l2=args.loss_coeff_l2,
gpu=args.gpu,
replay_start_size=args.replay_start_size,
target_update_interval=args.target_update_interval,
clip_delta=args.clip_delta,
update_interval=args.update_interval,
batch_accumulator=args.batch_accumulator,
phi=phi, reward_transform=reward_transform,
minibatch_size=args.minibatch_size)
if args.use_noisy_net is not None and args.use_noisy_net == 'after-pretraining':
chainerrl.links.to_factorized_noisy(q_func, sigma_scale=args.noisy_net_sigma)
explorer = explorers.Greedy()
if args.optimizer == 'rmsprop':
opt = chainer.optimizers.RMSpropGraves(args.lr, alpha=0.95, momentum=0.0, eps=1e-2)
elif args.optimizer == 'adam':
opt = chainer.optimizers.Adam(args.lr)
opt.setup(q_func)
opt.add_hook(
chainer.optimizer_hooks.WeightDecay(args.loss_coeff_l2))
agent.optimizer = opt
agent.target_model = None
agent.sync_target_network()
if args.load:
agent.load(args.load)
if args.demo:
eval_stats = experiments.eval_performance(
env=eval_env, agent=agent, n_steps=None, n_episodes=args.eval_n_runs)
logger.info('n_runs: {} mean: {} median: {} stdev: {}'.format(
args.eval_n_runs, eval_stats['mean'], eval_stats['median'], eval_stats['stdev']))
else:
agent.pretrain()
evaluator = Evaluator(agent=agent,
n_steps=None,
n_episodes=args.eval_n_runs,
eval_interval=eval_interval,
outdir=args.outdir,
max_episode_len=None,
env=eval_env,
step_offset=0,
save_best_so_far_agent=True,
logger=logger)
# Evaluate the agent BEFORE training begins
evaluator.evaluate_and_update_max_score(t=0, episodes=0)
experiments.train_agent(agent=agent,
env=env,
steps=steps,
outdir=args.outdir,
max_episode_len=None,
step_offset=0,
evaluator=evaluator,
successful_score=None,
step_hooks=[])
env.close()
if __name__ == "__main__":
main()
| 2.125 | 2 |
pygraph/test/testutil.py | emliunix/pygraph | 0 | 12789108 | # -*- coding: utf-8 -*-
import unittest
from pygraph import util
class TestUtil(unittest.TestCase):
def test_pointsToEdges(self):
points = [(1, 1), (2, 2), (3, 3)]
expected = [
((1, 1), (2, 2)),
((2, 2), (3, 3)),
((3, 3), (1, 1))
]
self.assertListEqual(expected, util.pointsToEdges(points))
| 2.984375 | 3 |
modules/py/bindings/setup.py | ICHEC/QNLP | 29 | 12789109 | import setuptools
setuptools.setup(
name="PyQNLPSimulator",
version="0.1",
author="<NAME> (ICHEC), <NAME> (ICHEC)",
author_email="<EMAIL>, <EMAIL>",
description="Quantum NLP package",
long_description="Quantum NLP project @ ICHEC",
url="https://github.com/ichec/qnlp",
packages=setuptools.find_packages(),
package_data={'': ['_PyQNLPSimulator.*.so'],},
classifiers=[
"Programming Language :: Python :: 3",
],
)
| 1.210938 | 1 |
monitoring/plugins/ref_matching_threshold_real/run.py | CrossRef/reference-matching-evaluation | 14 | 12789110 | #!/usr/bin/env python3
import numpy as np
import matching.cr_search_validation_matcher
import utils.data_format_keys as dfk
import sys
from evaluation.link_metrics import LinkMetricsResults
from multiprocessing import Pool
from utils.utils import read_json, save_json
def modify_simple_threshold(dataset, threshold):
for item in dataset:
if item[dfk.DATASET_SCORE] is not None and \
item[dfk.DATASET_SCORE] < threshold:
item[dfk.DATASET_TARGET_TEST][dfk.CR_ITEM_DOI] = None
return dataset
def find_best(results):
overall = [r[1].get(dfk.EVAL_F1) for r in results]
index = len(overall) - overall[::-1].index(max(overall)) - 1
return index, results[index][0], results[index][1].get(dfk.EVAL_PREC), \
results[index][1].get(dfk.EVAL_REC), results[index][1].get(dfk.EVAL_F1)
dataset = read_json(sys.argv[1])['dataset']
matcher = matching.cr_search_validation_matcher.Matcher(0.4, 0, [])
with Pool(10) as p:
results = p.map(matcher.match,
[item.get('ref_string') for item in dataset])
for item, target in zip(dataset, results):
item['target_test']['DOI'] = target[0]
item['score'] = target[1]
save_json(dataset, sys.argv[2])
results_valid_threshold = \
[(t, LinkMetricsResults(modify_simple_threshold(dataset, t)))
for t in np.arange(0.0, 1.0, 0.01)]
print(','.join([str(i) for i in find_best(results_valid_threshold)[1:]]))
| 2.125 | 2 |
pymclevel/id_definitions_2.py | bennettdc/MCEdit-Unified | 237 | 12789111 | <gh_stars>100-1000
import os
import json
from logging import getLogger
import collections
#from pymclevel import MCEDIT_DEFS, MCEDIT_IDS
#import pymclevel
import re
#import id_definitions
log = getLogger(__name__)
def get_deps(base_version, file_name):
deps = [base_version]
print "Base: {}".format(base_version)
fp = open(os.path.join('mcver', base_version, file_name))
data = json.loads(fp.read())
fp.close()
if "load" in data:
deps.extend(get_deps(data["load"], file_name))
return deps
def update(orig_dict, new_dict):
for key, val in new_dict.iteritems():
if isinstance(val, collections.Mapping):
tmp = update(orig_dict.get(key, { }), val)
orig_dict[key] = tmp
elif isinstance(val, list):
orig_dict[key] = (orig_dict.get(key, []) + val)
else:
orig_dict[key] = new_dict[key]
return orig_dict
def aggregate(base_version, file_name):
deps = get_deps(base_version, file_name)
deps.reverse()
print deps
aggregate_data = {}
for dep in deps:
fp = open(os.path.join('mcver', dep, file_name))
data = json.loads(fp.read())
fp.close()
update(aggregate_data, data)
print aggregate_data
with open("out.json", 'wb') as out:
json.dump(aggregate_data, out)
#print get_deps("1.12", "entities.json")
aggregate("1.12", "entities.json")
| 2.28125 | 2 |
big_data/maps/convert_rates_tsv_to_json.py | paulhtremblay/big-data | 0 | 12789112 | import pprint
import json
pp = pprint.PrettyPrinter(indent = 4)
def convert(path):
with open(path, 'r') as read_obj:
line = 'init'
counter = 0
l = []
while line:
line = read_obj.readline()
counter += 1
if counter == 1:
continue
fields = line.split('\t')
if len(fields) != 2:
continue
l.append({'id':fields[0], 'rate':float(fields[1].strip())})
return l
def main():
return convert('unemployment.tsv')
if __name__ == '__main__':
pp.pprint(main())
| 2.90625 | 3 |
models/nin_.py | billhhh/model-quantization-1 | 66 | 12789113 | <filename>models/nin_.py
import torch.nn as nn
import torch
import torch.nn.functional as F
from .quant import custom_conv
from .layers import norm, actv
class Net(nn.Module):
def __init__(self, args):
super(Net, self).__init__()
self.args = args
self.body = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=5, stride=1, padding=2),
norm(192, args),
actv(args),
custom_conv(192, 160, kernel_size=1, stride=1, padding=0, args=args,),
norm(160, args),
actv(args),
custom_conv(160, 96, kernel_size=1, stride=1, padding=0, args=args),
norm(96, args),
actv(args),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
custom_conv( 96, 192, kernel_size=5, stride=1, padding=2, args=args),
norm(192, args),
actv(args),
custom_conv(192, 192, kernel_size=1, stride=1, padding=0, args=args),
norm(192, args),
actv(args),
custom_conv(192, 192, kernel_size=1, stride=1, padding=0, args=args),
norm(192, args),
actv(args),
nn.AvgPool2d(kernel_size=3, stride=2, padding=1),
custom_conv(192, 192, kernel_size=3, stride=1, padding=1, args=args),
norm(192, args),
actv(args),
custom_conv(192, 192, kernel_size=1, stride=1, padding=0, args=args),
norm(192, args),
actv(args),
nn.Conv2d(192, 10, kernel_size=1, stride=1, padding=0),
norm(10, args),
actv(args),
nn.AvgPool2d(kernel_size=8, stride=1, padding=0),
)
def forward(self, x):
x = self.body(x)
x = x.view(x.size(0), self.args.num_classes)
return x
def nin(args=None):
model = Net(args)
return model
| 2.40625 | 2 |
sync_operation.py | judsoncrouch/photo_sync | 0 | 12789114 | from dirsync import sync
import win32api as win
import os
import click
def get_drive_names():
drives = win.GetLogicalDriveStrings()
drives = drives.split('\000')[:-1]
drive_map = {}
for d in drives:
drive_name = win.GetVolumeInformation(d)[0]
drive_map[drive_name] = d
return drive_map
def sync_raw_to_hdd(raw_dir, hdd):
print "Syncing RAW to %s\n..." % hdd
sync(raw_dir, hdd, "sync")
print " Complete\n"
def sync_catalog_to_backup(catalog, backup):
print "Syncing Catalog to %s\n..." % backup
sync(catalog, backup, "sync")
print " Complete\n"
def sync_hdd_to_backup(hdd, backup):
print "Syncing %s to %s\n..." % (hdd, backup)
sync(hdd, backup, "sync")
print " Complete\n"
@click.command()
@click.option('--raw', 'processes', flag_value='raw')
@click.option('--catalog', 'processes', flag_value='catalog')
@click.option('--all', 'processes', flag_value='all')
def main(processes):
raw_dir = r"E:\Pictures\RAW"
catalog_nm = "Catalog_JC"
hdd_nm = "Photos_A1"
backup_nm = "Photos_A1_Backup"
try:
drives = get_drive_names()
hdd = drives[hdd_nm]
backup = drives[backup_nm]
catalog = drives[catalog_nm]
if processes == 'raw':
sync_raw_to_hdd(raw_dir, hdd)
sync_hdd_to_backup(hdd, backup)
elif processes == 'catalog':
sync_catalog_to_backup(catalog, backup)
else:
sync_raw_to_hdd(raw_dir, hdd)
sync_hdd_to_backup(hdd, backup)
sync_catalog_to_backup(catalog, backup)
except KeyError:
print "Photo drives not found."
if __name__ == '__main__':
main() | 2.96875 | 3 |
fluxgapfill/metrics/metrics.py | stanfordmlgroup/methane-gapfill-ml | 8 | 12789115 | <gh_stars>1-10
import numpy as np
from sklearn.metrics import (
mean_squared_error,
mean_absolute_error,
r2_score
)
from scipy.stats import pearsonr
def pearson_r_squared(truth, prediction):
return pearsonr(truth, prediction)[0] ** 2
def reference_standard_dev(truth, prediction):
return np.std(truth)
def normalized_mean_absolute_error(truth, prediction):
return (mean_absolute_error(truth, prediction) /
reference_standard_dev(truth, prediction))
def bias(truth, prediction):
return (prediction - truth).mean()
metric_dict = {
"mse": mean_squared_error,
"mae": mean_absolute_error,
"nmae": normalized_mean_absolute_error,
"r2": r2_score,
"pr2": pearson_r_squared,
"bias": bias
}
def get_pred_interval(pred_dist):
return np.array([dist.dist.interval(0.95) for dist in pred_dist])
def calibration(truth, pred_dist):
pred_interval = get_pred_interval(pred_dist)
frac_of_truth_in_interval = (
(truth > pred_interval[:, 0]) &
(truth < pred_interval[:, 1])
).mean()
return frac_of_truth_in_interval
def sharpness(truth, pred_dist):
pred_interval = get_pred_interval(pred_dist)
widths = np.diff(pred_interval, axis=1)
return widths.mean()
def normalized_sharpness(truth, pred_dist):
pred_interval = get_pred_interval(pred_dist)
widths = np.diff(pred_interval, axis=1)
return widths.mean() / np.std(truth)
uncertainty_metric_dict = {
"calibration": calibration,
"sharpness": sharpness,
"normalized_sharpness": normalized_sharpness
}
| 2.515625 | 3 |
versions/v1/v1_tb/aux_kfold.py | otavares93/rxpix2pix | 0 | 12789116 | <filename>versions/v1/v1_tb/aux_kfold.py<gh_stars>0
import os
import numpy as np
import argparse
from sklearn.model_selection import StratifiedKFold
#from data.image_folder import make_dataset
#from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import json
import pandas as pd
import numpy as np
import os, sys
import glob
import re
import hashlib
import pathlib
import cv2
#TO-DO acrescentar isTB no options
isTB = True
#from options.train_options import TrainOptions
#from data import create_dataset
#from models import create_model
#from rxwgan.models import *
#from rxwgan.wgangp import wgangp_optimizer
#from rxcore import stratified_train_val_test_splits
def run(command: object) -> object:
print(command)
exit_status = os.system(command)
if exit_status > 0:
exit(1)
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def expand_folder( path , extension):
l = glob.glob(path+'/*.'+extension)
l.sort()
return l
def get_md5(path):
return hashlib.md5(pathlib.Path(path).read_bytes()).hexdigest()
#import numpy as np
#
# Split train/val/test splits
#
def stratified_train_val_test_splits( df_kfold, seed=512 ):
cv_train_test = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
cv_train_val = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
sorts_train_test = []
for train_val_idx, test_idx in cv_train_test.split( df_kfold.values,df_kfold.target.values ):
train_val_df = df_kfold.iloc[train_val_idx]
sorts = []
for train_idx, val_idx in cv_train_val.split( train_val_df.values, train_val_df.target.values ):
sorts.append((train_val_df.index[train_idx].values, train_val_df.index[val_idx].values, test_idx))
sorts_train_test.append(sorts)
return sorts_train_test
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def prepare_my_table(clinical_path, images_path, masks_path, combine = False):
d = {
'target': [],
'image_ID': [],
'raw_image_path': [],
'mask_image_path': [],
'paired_image_path': [],
'raw_image_md5': [],
'age': [],
'sex': [],
'comment': [],
}
def treat_string(lines):
string = ''
for s in lines:
string += s.replace('\n', '').replace('\t', '')
return re.sub(' +', ' ', string)
for idx, path in enumerate(expand_folder(clinical_path, 'txt')):
with open(path, 'r') as f:
lines = f.readlines()
sex = 'male' if 'male' in lines[0] else 'female' # 1 for male and 0 for female
age = int(re.sub('\D', '', lines[0]))
# get TB by file name (_1.txt is PTB or _0.txt is NTB)
target = 1 if '_1.txt' in path else 0
filename = path.split('/')[-1]
image_filename = filename.replace('txt', 'png')
# image_path = images_path+('/tb/' if target else '/no_tb/')+image_filename
image_path = images_path + '/' + image_filename
d['target'].append(target)
d['age'].append(age)
d['sex'].append(sex)
d['raw_image_path'].append(image_path)
d['raw_image_md5'].append(get_md5(image_path))
d['mask_image_path'].append('')
d['paired_image_path'].append('')
d['comment'].append(treat_string(lines[1::]))
d['image_ID'].append(filename.replace('.txt', ''))
l_masks = make_dataset(masks_path)
for mask in l_masks:
if image_path[-17:] == mask[-17:]:
idx = np.where(np.array(d['raw_image_path']) == image_path)[0][0]
d['mask_image_path'][idx] = mask
if combine == True:
path_paired = image_path[:-25] + 'foldAB'
path_paired_img = path_paired + '/' + image_path[-17:]
d['paired_image_path'][idx] = path_paired_img
if not os.path.isdir(path_paired):
os.makedirs(path_paired)
im_A = cv2.imread(image_path)
im_B = cv2.imread(mask)
im_AB = np.concatenate([im_B, im_A], 1)
cv2.imwrite(path_paired_img, im_AB)
return pd.DataFrame(d)
# NOTE: this is optional.
#from rxcore import allow_tf_growth
#allow_tf_growth()
#
# Start your job here
#
#job = json.load(open(args.job, 'r'))
#sort = job['sort']
#target = 1 # tb active
#test = job['test']
seed = 512
#epochs = 1000
#batch_size = 32
base_data_raw_path = '/Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned'
clinical_path = base_data_raw_path + '/ClinicalReadings'
images_path = base_data_raw_path + '/trainA'
masks_path = base_data_raw_path + '/trainB'
df = prepare_my_table(clinical_path, images_path, masks_path, combine = True)
splits = stratified_train_val_test_splits(df,seed)[0]
training_data = df.iloc[splits[0][0]]
validation_data = df.iloc[splits[0][1]]
if(isTB == True):
train_tb = training_data.loc[df.target==1]
val_tb = validation_data.loc[df.target==1]
else:
train_ntb = training_data.loc[df.target==0]
val_ntb = validation_data.loc[df.target == 0]
#training_data = training_data.loc[training_data.target==target]
#validation_data = validation_data.loc[validation_data.target==target]
extra_d = {'sort' : sort, 'test':test, 'target':target, 'seed':seed}
# Run!
#history = optimizer.fit( train_generator , val_generator, extra_d=extra_d, wandb=wandb )
combine_ab = 'python datasets/combine_A_and_B.py --fold_A /Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned/trainA --fold_B /Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned/trainB --fold_AB /Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned'
run(combine_ab)
# pix2pix train/test
#train_cmd = 'python train.py --model pix2pix --name ' + 'test_%d_sort_%d'%(test,sort) + '--dataroot . --n_epochs 1 --n_epochs_decay 5 --save_latest_freq 10 --display_id -1'
#run(train_cmd)
#run('python test.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --num_test 1')
| 2.140625 | 2 |
libs/jsconsole/jsconsolescript.py | bugbound/webnuke | 23 | 12789117 | class JSConsoleScript:
def __init__(self, jsinjector):
self.version=0.1
self.jsinjector = jsinjector
self.jsinjector.add_help_topic('wn_help()', 'Shows WebNuke Help')
| 1.734375 | 2 |
flask_plots/core.py | juniors90/Flask-Plots | 1 | 12789118 | <reponame>juniors90/Flask-Plots
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This docstring was part of Matplotlib. All rights reserved.
# Full Text:
# https://matplotlib.org/stable/users/project/license.html
#
# This file is part of the
# Flask-Plots Project https://github.com/juniors90/Flask-Plots/
#
# Copyright (c) 2021, <NAME>
# License: MIT
# Full Text:
# https://github.com/juniors90/Flask-Plots/blob/master/LICENSE
#
# =============================================================================
# DOCS
# =============================================================================
"""Flask-Plots.
Implementation of Matplotlib in Flask.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import base64
import io
from flask import Blueprint, current_app
def raise_helper(message): # pragma: no cover
"""Handle for raise in jinja templates."""
raise RuntimeError(message)
class Plots(object):
"""Base extension class for different Plots versions.
.. versionadded:: 0.0.1
"""
static_folder = "plots"
# Generate the figure **without using pyplot**.
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Sample factory function for initialize the extension."""
app.config.setdefault("PLOTS_CMAP", "Greys")
app.config.setdefault("STATIC_FOLDER", "plots")
app.config.setdefault("BAR_HEIGHT", 50)
if not hasattr(app, "extensions"): # pragma: no cover
app.extensions = {}
app.extensions["plots"] = self
blueprint = Blueprint(
"plots",
__name__,
static_folder=f"static/{self.static_folder}",
static_url_path=f"{app.static_url_path}",
template_folder="templates",
)
app.register_blueprint(blueprint)
app.jinja_env.globals["plots"] = self
app.jinja_env.globals["raise"] = raise_helper
app.jinja_env.add_extension("jinja2.ext.do")
def get_data(self, fig, fmt="png", decode="ascii"):
"""
Create a data for embed the result in the html output.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
format : str, default: "png"
A extension type for the images.
decode : str, default: "ascii"
A buffer decode.
"""
buf = io.BytesIO()
fig.savefig(buf, format=fmt)
data = base64.b64encode(buf.getbuffer()).decode(decode)
return data
# Statistics plots: Plots for statistical analysis.
def hist(self, fig, x, ax=None, hist_kws=None):
"""
Plot a histogram using Matplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x : (n,) array or sequence of (n,) arrays
Input values, this takes either a single array or a sequence of
arrays which are not required to be of the same length.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
hist_kwargs : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
hist_kws = {} if hist_kws is None else hist_kws
ax.hist(x, **hist_kws)
return ax
def errorbar(self, fig, x, y, ax=None, errorbar_kws=None):
"""
Plot y versus x as lines and/or markers with attached errorbars.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x, y : float or array-like
The data positions.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
errorbar_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
errorbar_kws = {} if errorbar_kws is None else errorbar_kws
ax.errorbar(x, y, **errorbar_kws)
return ax
def violinplot(
self, fig, dataset, positions, ax=None, violinplot_kws=None
):
"""
Make a violin plot using Matlotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
dataset : Array or a sequence of vectors.
The input data.
positions : array-like, default: [1, 2, ..., n]
The positions of the violins. The ticks and limits are
automatically set to match the positions.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
violinplot_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
violinplot_kws = {} if violinplot_kws is None else violinplot_kws
vp = ax.violinplot(dataset, positions, **violinplot_kws)
return vp
def eventplot(self, fig, positions, ax=None, eventplot_kws=None):
"""
Plot identical parallel lines at the given positions.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
positions : array-like or list of array-like
A 1D array-like defines the positions of one sequence of events.
Multiple groups of events may be passed as a list of array-likes.
Each group can be styled independently by passing lists of values
to *lineoffsets*, *linelengths*, *linewidths*, *colors* and
*linestyles*.
Note that *positions* can be a 2D array, but in practice different
event groups usually have different counts so that one will use a
list of different-length arrays rather than a 2D array.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
eventplot_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
eventplot_kws = {} if eventplot_kws is None else eventplot_kws
ax.eventplot(positions, **eventplot_kws)
return ax
def hist2d(self, fig, x, y, ax=None, hist2d_kws=None):
"""
Make a 2D histogram plot using Matplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x, y : array-like, shape (n, )
Input values
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
hist2d_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
hist2d_kws = {} if hist2d_kws is None else hist2d_kws
ax.hist2d(x, y, **hist2d_kws)
return ax
def hexbin(self, fig, x, y, ax=None, hexbin_kws=None):
"""
Make a 2D hexagonal binning plot of points *x*, *y* using Matplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x, y : array-like
The data positions. *x* and *y* must be of the same length.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
hexbin_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
hexbin_kws = {} if hexbin_kws is None else hexbin_kws
ax.hexbin(x, y, **hexbin_kws)
return ax
def scatter_hist2d(
self, fig, x, y, ax=None, hist2d_kws=None, scatter_kws=None
):
"""
Make a 2D histogram plot using Matplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x, y : array-like, shape (n, )
Input values
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
hist2d_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
scatter_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot in term scatter method.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
hist2d_kws = {} if hist2d_kws is None else hist2d_kws
scatter_kws = {} if scatter_kws is None else scatter_kws
hist2d_kws.setdefault("cmap", current_app.config["PLOTS_CMAP"])
ax.hist2d(x, y, **hist2d_kws)
ax.scatter(x, y, **scatter_kws)
return ax
def scatter_hexbin(
self, fig, x, y, ax=None, hexbin_kws=None, scatter_kws=None
):
"""
Make a 2D scatter-hexagonal binning plot of points *x*, *y*.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x, y : array-like
The data positions. *x* and *y* must be of the same length.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
hexbin_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot in term hexbin method.
scatter_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot in term scatter method.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
hexbin_kws = {} if hexbin_kws is None else hexbin_kws
scatter_kws = {} if scatter_kws is None else scatter_kws
hexbin_kws.setdefault("cmap", current_app.config["PLOTS_CMAP"])
ax.hexbin(x, y, **hexbin_kws)
ax.scatter(x, y, **scatter_kws)
return ax
def bar(self, fig, x, bar_height=None, ax=None, bar_kws=None):
"""
Make a bar plot using Matplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x : float or array-like
The x coordinates of the bars. See also *align* for the
alignment of the bars to the coordinates.
bar_height : float or array-like,
The height(s) of the bars. You can config this value
using ``app.config["BAR_HEIGHT"]``.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
bar_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
bar_kws = {} if bar_kws is None else bar_kws
bar_height = (
current_app.config["BAR_HEIGHT"]
if bar_height is None
else bar_height
)
ax.bar(x, bar_height, **bar_kws)
return ax
def pie(self, fig, x, ax=None, pie_kws=None):
"""
Make a pie plot using Matplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x : 1D array-like
The wedge sizes.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
bar_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
pie_kws = {} if pie_kws is None else pie_kws
ax.pie(x, **pie_kws)
return ax
def boxplot(self, fig, x, ax=None, boxplot_kws=None):
"""
Draw a box and whisker plot using MAtplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x : Array or a sequence of vectors.
The input data. If a 2D array, a boxplot is drawn for each column
in *x*. If a sequence of 1D arrays, a boxplot is drawn for each
array in *x*.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
boxplot_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
boxplot_kws = {} if boxplot_kws is None else boxplot_kws
ax.boxplot(x, **boxplot_kws)
return ax
def quiver(self, fig, x, y, u, v, ax=None, quiver_kws=None):
"""
Plot a 2D field of arrows using matplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x, y : 1D or 2D array-like, optional
The x and y coordinates of the arrow locations.
If not given, they will be generated as a uniform integer meshgrid
based on the dimensions of *u* and *v*.
If *x* and *y* are 1D but *u*, *v* are 2D, *x*, *y* are expanded
to 2D using ``x, y = np.meshgrid(x, y)``. In this case ``len(x)``
and ``len(y)`` must match the column and row dimensions of
*u* and *v*.
u, v : 1D or 2D array-like
The x and y direction components of the arrow vectors.
They must have the same number of elements, matching the
number of arrow locations. *u* and *v* may be masked. Only
locations unmasked in *u*, *v*, and *C* will be drawn.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
quiver_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
quiver_kws = {} if quiver_kws is None else quiver_kws
ax.quiver(x, y, u, v, **quiver_kws)
return ax
def streamplot(self, fig, x, y, u, v, ax=None, streamplot_kws=None):
"""
Draw streamlines of a vector flow using matplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x, y : 1D/2D arrays
Evenly spaced strictly increasing arrays to make a grid.
If 2D, all rows of *x* must be equal and all columns of
*y* must be equal; i.e., they must be as if generated
by ``np.meshgrid(x_1d, y_1d)``.
u, v : 2D arrays
*x* and *y*-velocities. The number of rows and columns
must match the length of *y* and *x*, respectively.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
streamplot_kws : ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
"""
ax = fig.gca() if ax is None else ax
streamplot_kws = {} if streamplot_kws is None else streamplot_kws
ax.streamplot(x, y, u, v, **streamplot_kws)
return ax
def contourf(self, fig, x, y, z, levels, ax=None, contourf_kws=None):
"""
Plot contour lines using matplotlib.
Parameters
----------
fig : matplotlib.Figure
A instance of Figure Object.
x, y : array-like, optional
The coordinates of the values in *z*.
*x* and *y* must both be 2D with the same shape as *z* (e.g.
created via `numpy.meshgrid`), or they must both be 1-D such
that ``len(x) == N`` is the number of columns in *z* and
``len(y) == M`` is the number of rows in *z*.
*X* and *Y* must both be ordered monotonically.
If not given, they are assumed to be integer indices, i.e.
``x = range(N)``, ``y = range(M)``.
levels : int or array-like, optional
Determines the number and positions of the contour lines / regions.
If an int *n*, use `~matplotlib.ticker.MaxNLocator`, which tries
to automatically choose no more than *n+1* "nice" contour levels
between *vmin* and *vmax*.
If array-like, draw contour lines at the specified levels.
The values must be in increasing order.
ax : matplotlib.Figure.Axis, (optional)
A matplotlib axis.
contourf_kws: ``dict`` or ``None`` (optional)
The parameters to send to the data plot.
Returns
-------
ax : matplotlib.Figure.Axis
A matplotlib axis.
"""
ax = fig.gca() if ax is None else ax
contourf_kws = {} if contourf_kws is None else contourf_kws
ax.contourf(x, y, z, levels, **contourf_kws)
return ax
| 1.6875 | 2 |
m26-py/m26/speed.py | cjoakim/oss | 0 | 12789119 | __author__ = 'cjoakim'
import math
from .elapsed_time import ElapsedTime
class Speed(object):
def __init__(self, d, et):
self.dist = d # an instance of Distance
self.etime = et # an instance of ElapsedTime
def mph(self):
return self.dist.as_miles() / self.etime.hours()
def kph(self):
return self.dist.as_kilometers() / self.etime.hours()
def yph(self):
return self.dist.as_yards() / self.etime.hours()
def pace_per_mile(self):
spm = self.seconds_per_mile()
mm = math.floor(spm / 60.0)
ss = spm - (mm * 60.0)
if ss < 10:
ss = "0{0}".format(ss)
else:
ss = "{0}".format(ss)
if len(ss) > 5:
ss = ss[0:5]
return "{0}:{1}".format(mm, ss)
def seconds_per_mile(self):
return float(self.etime.secs / self.dist.as_miles())
def projected_time(self, another_distance, algorithm='simple'):
if algorithm is 'riegel':
t1 = float(self.etime.secs)
d1 = self.dist.as_miles()
d2 = another_distance.as_miles()
t2 = t1 * math.pow(float(d2 / d1), float(1.06))
et = ElapsedTime(t2)
return et.as_hhmmss()
else:
secs = float(self.seconds_per_mile() * another_distance.as_miles())
et = ElapsedTime(secs)
return et.as_hhmmss()
def age_graded(self, event_age, graded_age):
ag_factor = event_age.max_pulse() / graded_age.max_pulse()
graded_secs = float((self.etime.secs)) * float(ag_factor)
graded_et = ElapsedTime(graded_secs)
return Speed(self.dist, graded_et)
def __str__(self):
template = "<Speed dist:{0} etime:{1}>"
return template.format(self.dist, self.etime)
| 3.46875 | 3 |
huskar_api/models/instance/__init__.py | mowangdk/huskar | 59 | 12789120 | from __future__ import absolute_import
from .management import InstanceManagement
from .schema import InfraInfo
__all__ = ['InstanceManagement', 'InfraInfo']
| 1.023438 | 1 |
cogs/orb_control.py | azsry/orb | 3 | 12789121 | <reponame>azsry/orb
"""
Handles general control tasks and acts as a function generaliser for other commands. Also contains some admin-only commands and runs the database
"""
import discord
from discord.ext import commands as bot_commands
import csv
import os
from google.cloud import firestore
from utils import repo, logger
# Verifies if the command is allowed to be executed
# This is a utility function and shouldn't be called on it's own (hence the lack of .command decorator)
# Not async because a). Incredibly low complexity (aka fast)
# b). This is a priority to execute
# c). Nothing would be awaited in here so an async function would work the same as a regular
def allowed_channel(ctx):
if ctx in BANNED_CHANNELS:
return False
else:
return True
# Connects to Cloud Firestore
logger.log_and_print("control", "Verifiying with server")
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]=repo.config['keys']['firestore_key_path']
db = firestore.Client()
logger.log_and_print("control", "Connected to Google Cloud Firestore")
BANNED_CHANNELS = []
ADMINS = repo.config['controllers']
with open("data/banned_channels.csv", mode="r") as file:
reader = csv.reader(file, delimiter=",")
for line in reader:
try:
BANNED_CHANNELS.append(int(line[0]))
except:
pass
class ControlCog(bot_commands.Cog):
def __init__(self, bot):
self.bot = bot
# Manual speaking
@bot_commands.command()
async def say(self, ctx, channel, *, target=None):
if str(ctx.author.id) in ADMINS:
if target == None:
await ctx.send("Need a message")
else:
channel = self.bot.get_channel(int(channel))
logger.log_and_print("control", f"Sent message {str(target)} to channel {str(channel)}")
await channel.send(target)
# Update banned channels
@bot_commands.command()
async def update_banned(self, ctx):
if ctx.author.id in ADMINS:
BANNED_CHANNELS = []
with open("data/banned_channels.csv", mode="r", newline="") as file:
reader = csv.reader(file, delimiter=",")
for line in reader:
BANNED_CHANNELS.append(int(line[0]))
logger.log_and_print("control", "Updated internal banned channels list")
await ctx.send("Done!")
# Add a new banned channel
@bot_commands.command()
async def add_banned(self, ctx, target, *, comment=None):
if ctx.author.id in ADMINS:
logger.log_and_print("control", f"Adding {target} to banned channels list with comment {comment}")
with open("data/banned_channels.csv", mode="a", newline="") as file:
writer = csv.writer(file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow([target, str(" # " + str(comment))])
await ctx.send("Done!")
# Remove a banned channel
@bot_commands.command()
async def remove_banned(self, ctx, target):
if ctx.author.id in ADMINS:
logger.log_and_print("control", f"Removing {target} from banned channels list")
temp = []
with open("data/banned_channels.csv", mode="r", newline="") as file:
try:
temp = list(csv.reader(file, delimiter=","))
except:
pass
with open("data/banned_channels.csv", mode="w", newline="") as file:
writer = csv.writer(file, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL)
for line in temp:
try:
if line[0] != target:
try:
writer.writerow(line)
except:
pass
except:
pass
temp=[]
await ctx.send("Done!")
# Lists banned channels
@bot_commands.command()
async def list_banned(self, ctx):
if ctx.author.id in ADMINS:
output = ""
with open("data/banned_channels.csv", mode="r", newline="") as file:
for line in file:
output += "Channel: " + line + "\n"
await ctx.send(output)
def setup(bot):
bot.add_cog(ControlCog(bot))
| 2.734375 | 3 |
customer/migrations/0008_contact.py | martinlehoux/erp-reloaded | 0 | 12789122 | <reponame>martinlehoux/erp-reloaded
# Generated by Django 3.0.3 on 2020-03-02 02:54
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
dependencies = [
('customer', '0007_customer_logo'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=256)),
('last_name', models.CharField(max_length=256)),
('email', models.EmailField(blank=True, max_length=254)),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128, region=None)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='customer.Customer')),
],
),
]
| 1.734375 | 2 |
script/my_codes/train.py | ThivakaranThana/AlignedReid-Reproduction-Pytorch | 0 | 12789123 | <gh_stars>0
"""Train with optional Global Distance, Local Distance, Identification Loss."""
from __future__ import print_function
import sys
sys.path.insert(0, '.')
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
from torch.nn.parallel import DataParallel
import time
import os.path as osp
from tensorboardX import SummaryWriter
import numpy as np
import argparse
from aligned_reid.dataset import create_dataset
from aligned_reid.model.Model import Model
from aligned_reid.model.TripletLoss import TripletLoss
from aligned_reid.model.loss import global_loss
from aligned_reid.model.loss import local_loss
from aligned_reid.utils.utils import time_str
from aligned_reid.utils.utils import str2bool
from aligned_reid.utils.utils import tight_float_str as tfs
from aligned_reid.utils.utils import may_set_mode
from aligned_reid.utils.utils import load_state_dict
from aligned_reid.utils.utils import load_ckpt
from aligned_reid.utils.utils import save_ckpt
from aligned_reid.utils.utils import set_devices
from aligned_reid.utils.utils import AverageMeter
from aligned_reid.utils.utils import to_scalar
from aligned_reid.utils.utils import ReDirectSTD
from aligned_reid.utils.utils import set_seed
from aligned_reid.utils.utils import adjust_lr_exp
from aligned_reid.utils.utils import adjust_lr_staircase
class Config(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--sys_device_ids', type=eval, default=(0,))
parser.add_argument('-r', '--run', type=int, default=1)
parser.add_argument('--set_seed', type=str2bool, default=False)
parser.add_argument('--dataset', type=str, default='custom_input',
choices=['market1501', 'cuhk03', 'duke', 'custom_input', 'combined'])
parser.add_argument('--trainset_part', type=str, default='trainval',
choices=['trainval', 'train'])
# Only for training set.
parser.add_argument('--resize_h_w', type=eval, default=(256, 128))
parser.add_argument('--crop_prob', type=float, default=0)
parser.add_argument('--crop_ratio', type=float, default=1)
parser.add_argument('--ids_per_batch', type=int, default=32)
parser.add_argument('--ims_per_id', type=int, default=4)
parser.add_argument('--log_to_file', type=str2bool, default=True)
parser.add_argument('--normalize_feature', type=str2bool, default=False)
parser.add_argument('--local_dist_own_hard_sample',
type=str2bool, default=False)
parser.add_argument('-gm', '--global_margin', type=float, default=0.3)
parser.add_argument('-lm', '--local_margin', type=float, default=0.3)
parser.add_argument('-glw', '--g_loss_weight', type=float, default=1.)
parser.add_argument('-llw', '--l_loss_weight', type=float, default=0.)
parser.add_argument('-idlw', '--id_loss_weight', type=float, default=0.)
parser.add_argument('--only_test', type=str2bool, default=True)
parser.add_argument('--resume', type=str2bool, default=False)
parser.add_argument('--exp_dir', type=str, default='/home/niruhan/AlignedReID-Re-Production-Pytorch/my_codes')
parser.add_argument('--model_weight_file', type=str,
default='/home/niruhan/AlignedReID-Re-Production-Pytorch/model_weight.pth')
parser.add_argument('--base_lr', type=float, default=2e-4)
parser.add_argument('--lr_decay_type', type=str, default='exp',
choices=['exp', 'staircase'])
parser.add_argument('--exp_decay_at_epoch', type=int, default=76)
parser.add_argument('--staircase_decay_at_epochs',
type=eval, default=(101, 201,))
parser.add_argument('--staircase_decay_multiply_factor',
type=float, default=0.1)
parser.add_argument('--total_epochs', type=int, default=150)
args = parser.parse_known_args()[0]
# gpu ids
self.sys_device_ids = args.sys_device_ids
if args.set_seed:
self.seed = 1
else:
self.seed = None
# The experiments can be run for several times and performances be averaged.
# `run` starts from `1`, not `0`.
self.run = args.run
###########
# Dataset #
###########
# If you want to exactly reproduce the result in training, you have to set
# num of threads to 1.
if self.seed is not None:
self.prefetch_threads = 1
else:
self.prefetch_threads = 2
self.dataset = args.dataset
self.trainset_part = args.trainset_part
# Image Processing
# Just for training set
self.crop_prob = args.crop_prob
self.crop_ratio = args.crop_ratio
self.resize_h_w = args.resize_h_w
# Whether to scale by 1/255
self.scale_im = True
self.im_mean = [0.486, 0.459, 0.408]
self.im_std = [0.229, 0.224, 0.225]
self.ids_per_batch = args.ids_per_batch
self.ims_per_id = args.ims_per_id
self.train_final_batch = False
self.train_mirror_type = ['random', 'always', None][0]
self.train_shuffle = True
self.test_batch_size = 32
self.test_final_batch = True
self.test_mirror_type = ['random', 'always', None][2]
self.test_shuffle = False
dataset_kwargs = dict(
name=self.dataset,
resize_h_w=self.resize_h_w,
scale=self.scale_im,
im_mean=self.im_mean,
im_std=self.im_std,
batch_dims='NCHW',
num_prefetch_threads=self.prefetch_threads)
prng = np.random
if self.seed is not None:
prng = np.random.RandomState(self.seed)
self.train_set_kwargs = dict(
part=self.trainset_part,
ids_per_batch=self.ids_per_batch,
ims_per_id=self.ims_per_id,
final_batch=self.train_final_batch,
shuffle=self.train_shuffle,
crop_prob=self.crop_prob,
crop_ratio=self.crop_ratio,
mirror_type=self.train_mirror_type,
prng=prng)
self.train_set_kwargs.update(dataset_kwargs)
prng = np.random
if self.seed is not None:
prng = np.random.RandomState(self.seed)
self.test_set_kwargs = dict(
part='test',
batch_size=self.test_batch_size,
final_batch=self.test_final_batch,
shuffle=self.test_shuffle,
mirror_type=self.test_mirror_type,
prng=prng)
self.test_set_kwargs.update(dataset_kwargs)
###############
# ReID Model #
###############
self.local_dist_own_hard_sample = args.local_dist_own_hard_sample
self.normalize_feature = args.normalize_feature
self.local_conv_out_channels = 128
self.global_margin = args.global_margin
self.local_margin = args.local_margin
# Identification Loss weight
self.id_loss_weight = args.id_loss_weight
# global loss weight
self.g_loss_weight = args.g_loss_weight
# local loss weight
self.l_loss_weight = args.l_loss_weight
#############
# Training #
#############
self.weight_decay = 0.0005
# Initial learning rate
self.base_lr = args.base_lr
self.lr_decay_type = args.lr_decay_type
self.exp_decay_at_epoch = args.exp_decay_at_epoch
self.staircase_decay_at_epochs = args.staircase_decay_at_epochs
self.staircase_decay_multiply_factor = args.staircase_decay_multiply_factor
# Number of epochs to train
self.total_epochs = args.total_epochs
# How often (in batches) to log. If only need to log the average
# information for each epoch, set this to a large value, e.g. 1e10.
self.log_steps = 1e10
# Only test and without training.
self.only_test = args.only_test
self.resume = args.resume
#######
# Log #
#######
# If True,
# 1) stdout and stderr will be redirected to file,
# 2) training loss etc will be written to tensorboard,
# 3) checkpoint will be saved
self.log_to_file = args.log_to_file
# The root dir of logs.
if args.exp_dir == '':
self.exp_dir = osp.join(
'exp/train',
'{}'.format(self.dataset),
#
('nf_' if self.normalize_feature else 'not_nf_') +
('ohs_' if self.local_dist_own_hard_sample else 'not_ohs_') +
'gm_{}_'.format(tfs(self.global_margin)) +
'lm_{}_'.format(tfs(self.local_margin)) +
'glw_{}_'.format(tfs(self.g_loss_weight)) +
'llw_{}_'.format(tfs(self.l_loss_weight)) +
'idlw_{}_'.format(tfs(self.id_loss_weight)) +
'lr_{}_'.format(tfs(self.base_lr)) +
'{}_'.format(self.lr_decay_type) +
('decay_at_{}_'.format(self.exp_decay_at_epoch)
if self.lr_decay_type == 'exp'
else 'decay_at_{}_factor_{}_'.format(
'_'.join([str(e) for e in args.staircase_decay_at_epochs]),
tfs(self.staircase_decay_multiply_factor))) +
'total_{}'.format(self.total_epochs),
#
'run{}'.format(self.run),
)
else:
self.exp_dir = args.exp_dir
self.stdout_file = osp.join(
self.exp_dir, 'stdout_{}.txt'.format(time_str()))
self.stderr_file = osp.join(
self.exp_dir, 'stderr_{}.txt'.format(time_str()))
# Saving model weights and optimizer states, for resuming.
self.ckpt_file = osp.join(self.exp_dir, 'ckpt.pth')
# Just for loading a pretrained model; no optimizer states is needed.
self.model_weight_file = args.model_weight_file
class ExtractFeature(object):
"""A function to be called in the val/test set, to extract features.
Args:
TVT: A callable to transfer images to specific device.
"""
def __init__(self, model, TVT):
self.model = model
self.TVT = TVT
def __call__(self, ims):
old_train_eval_model = self.model.training
# Set eval mode.
# Force all BN layers to use global mean and variance, also disable
# dropout.
self.model.eval()
ims = Variable(self.TVT(torch.from_numpy(ims).float()))
global_feat, local_feat = self.model(ims)[:2]
global_feat = global_feat.data.cpu().numpy()
local_feat = local_feat.data.cpu().numpy()
# Restore the model to its old train/eval mode.
self.model.train(old_train_eval_model)
return global_feat, local_feat
def main():
cfg = Config()
# Redirect logs to both console and file.
if cfg.log_to_file:
ReDirectSTD(cfg.stdout_file, 'stdout', False)
ReDirectSTD(cfg.stderr_file, 'stderr', False)
# Lazily create SummaryWriter
writer = None
TVT, TMO = set_devices(cfg.sys_device_ids)
if cfg.seed is not None:
set_seed(cfg.seed)
# Dump the configurations to log.
import pprint
print('-' * 60)
print('cfg.__dict__')
pprint.pprint(cfg.__dict__)
print('-' * 60)
###########
# Dataset #
###########
train_set = create_dataset(**cfg.train_set_kwargs)
test_sets = []
test_set_names = []
if cfg.dataset == 'combined':
for name in ['market1501', 'cuhk03', 'duke']:
cfg.test_set_kwargs['name'] = name
test_sets.append(create_dataset(**cfg.test_set_kwargs))
test_set_names.append(name)
else:
test_sets.append(create_dataset(**cfg.test_set_kwargs))
test_set_names.append(cfg.dataset)
###########
# Models #
###########
model = Model(local_conv_out_channels=cfg.local_conv_out_channels,
num_classes=len(train_set.ids2labels))
# Model wrapper
model_w = DataParallel(model)
#############################
# Criteria and Optimizers #
#############################
id_criterion = nn.CrossEntropyLoss()
g_tri_loss = TripletLoss(margin=cfg.global_margin)
l_tri_loss = TripletLoss(margin=cfg.local_margin)
optimizer = optim.Adam(model.parameters(),
lr=cfg.base_lr,
weight_decay=cfg.weight_decay)
# Bind them together just to save some codes in the following usage.
modules_optims = [model, optimizer]
################################
# May Resume Models and Optims #
################################
if cfg.resume:
resume_ep, scores = load_ckpt(modules_optims, cfg.ckpt_file)
# May Transfer Models and Optims to Specified Device. Transferring optimizer
# is to cope with the case when you load the checkpoint to a new device.
TMO(modules_optims)
########
# Test #
########
def test(load_model_weight=False):
if load_model_weight:
if cfg.model_weight_file != '':
map_location = (lambda storage, loc: storage)
sd = torch.load(cfg.model_weight_file, map_location=map_location)
load_state_dict(model, sd)
print('Loaded model weights from {}'.format(cfg.model_weight_file))
else:
load_ckpt(modules_optims, cfg.ckpt_file)
use_local_distance = (cfg.l_loss_weight > 0) \
and cfg.local_dist_own_hard_sample
for test_set, name in zip(test_sets, test_set_names):
test_set.set_feat_func(ExtractFeature(model_w, TVT))
print('\n=========> Test on dataset: {} <=========\n'.format(name))
best_match = test_set.predict(
normalize_feat=cfg.normalize_feature,
use_local_distance=True)
count = 0
for i in range(len(test_set.marks)):
if test_set.marks[i] == 1:
if count == best_match:
best_match_id = test_set.im_names[i][:8]
count += 1
print(best_match_id)
if cfg.only_test:
test(load_model_weight=True)
return
############
# Training #
############
start_ep = resume_ep if cfg.resume else 0
for ep in range(start_ep, cfg.total_epochs):
# Adjust Learning Rate
if cfg.lr_decay_type == 'exp':
adjust_lr_exp(
optimizer,
cfg.base_lr,
ep + 1,
cfg.total_epochs,
cfg.exp_decay_at_epoch)
else:
adjust_lr_staircase(
optimizer,
cfg.base_lr,
ep + 1,
cfg.staircase_decay_at_epochs,
cfg.staircase_decay_multiply_factor)
may_set_mode(modules_optims, 'train')
g_prec_meter = AverageMeter()
g_m_meter = AverageMeter()
g_dist_ap_meter = AverageMeter()
g_dist_an_meter = AverageMeter()
g_loss_meter = AverageMeter()
l_prec_meter = AverageMeter()
l_m_meter = AverageMeter()
l_dist_ap_meter = AverageMeter()
l_dist_an_meter = AverageMeter()
l_loss_meter = AverageMeter()
id_loss_meter = AverageMeter()
loss_meter = AverageMeter()
ep_st = time.time()
step = 0
epoch_done = False
while not epoch_done:
step += 1
step_st = time.time()
ims, im_names, labels, mirrored, epoch_done = train_set.next_batch()
ims_var = Variable(TVT(torch.from_numpy(ims).float()))
labels_t = TVT(torch.from_numpy(labels).long())
labels_var = Variable(labels_t)
global_feat, local_feat, logits = model_w(ims_var)
g_loss, p_inds, n_inds, g_dist_ap, g_dist_an, g_dist_mat = global_loss(
g_tri_loss, global_feat, labels_t,
normalize_feature=cfg.normalize_feature)
if cfg.l_loss_weight == 0:
l_loss = 0
elif cfg.local_dist_own_hard_sample:
# Let local distance find its own hard samples.
l_loss, l_dist_ap, l_dist_an, _ = local_loss(
l_tri_loss, local_feat, None, None, labels_t,
normalize_feature=cfg.normalize_feature)
else:
l_loss, l_dist_ap, l_dist_an = local_loss(
l_tri_loss, local_feat, p_inds, n_inds, labels_t,
normalize_feature=cfg.normalize_feature)
id_loss = 0
if cfg.id_loss_weight > 0:
id_loss = id_criterion(logits, labels_var)
loss = g_loss * cfg.g_loss_weight \
+ l_loss * cfg.l_loss_weight \
+ id_loss * cfg.id_loss_weight
optimizer.zero_grad()
loss.backward()
optimizer.step()
############
# Step Log #
############
# precision
g_prec = (g_dist_an > g_dist_ap).data.float().mean()
# the proportion of triplets that satisfy margin
g_m = (g_dist_an > g_dist_ap + cfg.global_margin).data.float().mean()
g_d_ap = g_dist_ap.data.mean()
g_d_an = g_dist_an.data.mean()
g_prec_meter.update(g_prec)
g_m_meter.update(g_m)
g_dist_ap_meter.update(g_d_ap)
g_dist_an_meter.update(g_d_an)
g_loss_meter.update(to_scalar(g_loss))
if cfg.l_loss_weight > 0:
# precision
l_prec = (l_dist_an > l_dist_ap).data.float().mean()
# the proportion of triplets that satisfy margin
l_m = (l_dist_an > l_dist_ap + cfg.local_margin).data.float().mean()
l_d_ap = l_dist_ap.data.mean()
l_d_an = l_dist_an.data.mean()
l_prec_meter.update(l_prec)
l_m_meter.update(l_m)
l_dist_ap_meter.update(l_d_ap)
l_dist_an_meter.update(l_d_an)
l_loss_meter.update(to_scalar(l_loss))
if cfg.id_loss_weight > 0:
id_loss_meter.update(to_scalar(id_loss))
loss_meter.update(to_scalar(loss))
if step % cfg.log_steps == 0:
time_log = '\tStep {}/Ep {}, {:.2f}s'.format(
step, ep + 1, time.time() - step_st, )
if cfg.g_loss_weight > 0:
g_log = (', gp {:.2%}, gm {:.2%}, '
'gd_ap {:.4f}, gd_an {:.4f}, '
'gL {:.4f}'.format(
g_prec_meter.val, g_m_meter.val,
g_dist_ap_meter.val, g_dist_an_meter.val,
g_loss_meter.val, ))
else:
g_log = ''
if cfg.l_loss_weight > 0:
l_log = (', lp {:.2%}, lm {:.2%}, '
'ld_ap {:.4f}, ld_an {:.4f}, '
'lL {:.4f}'.format(
l_prec_meter.val, l_m_meter.val,
l_dist_ap_meter.val, l_dist_an_meter.val,
l_loss_meter.val, ))
else:
l_log = ''
if cfg.id_loss_weight > 0:
id_log = (', idL {:.4f}'.format(id_loss_meter.val))
else:
id_log = ''
total_loss_log = ', loss {:.4f}'.format(loss_meter.val)
log = time_log + \
g_log + l_log + id_log + \
total_loss_log
print(log)
#############
# Epoch Log #
#############
time_log = 'Ep {}, {:.2f}s'.format(ep + 1, time.time() - ep_st, )
if cfg.g_loss_weight > 0:
g_log = (', gp {:.2%}, gm {:.2%}, '
'gd_ap {:.4f}, gd_an {:.4f}, '
'gL {:.4f}'.format(
g_prec_meter.avg, g_m_meter.avg,
g_dist_ap_meter.avg, g_dist_an_meter.avg,
g_loss_meter.avg, ))
else:
g_log = ''
if cfg.l_loss_weight > 0:
l_log = (', lp {:.2%}, lm {:.2%}, '
'ld_ap {:.4f}, ld_an {:.4f}, '
'lL {:.4f}'.format(
l_prec_meter.avg, l_m_meter.avg,
l_dist_ap_meter.avg, l_dist_an_meter.avg,
l_loss_meter.avg, ))
else:
l_log = ''
if cfg.id_loss_weight > 0:
id_log = (', idL {:.4f}'.format(id_loss_meter.avg))
else:
id_log = ''
total_loss_log = ', loss {:.4f}'.format(loss_meter.avg)
log = time_log + \
g_log + l_log + id_log + \
total_loss_log
print(log)
# Log to TensorBoard
if cfg.log_to_file:
if writer is None:
writer = SummaryWriter(log_dir=osp.join(cfg.exp_dir, 'tensorboard'))
writer.add_scalars(
'loss',
dict(global_loss=g_loss_meter.avg,
local_loss=l_loss_meter.avg,
id_loss=id_loss_meter.avg,
loss=loss_meter.avg, ),
ep)
writer.add_scalars(
'tri_precision',
dict(global_precision=g_prec_meter.avg,
local_precision=l_prec_meter.avg, ),
ep)
writer.add_scalars(
'satisfy_margin',
dict(global_satisfy_margin=g_m_meter.avg,
local_satisfy_margin=l_m_meter.avg, ),
ep)
writer.add_scalars(
'global_dist',
dict(global_dist_ap=g_dist_ap_meter.avg,
global_dist_an=g_dist_an_meter.avg, ),
ep)
writer.add_scalars(
'local_dist',
dict(local_dist_ap=l_dist_ap_meter.avg,
local_dist_an=l_dist_an_meter.avg, ),
ep)
# save ckpt
if cfg.log_to_file:
save_ckpt(modules_optims, ep + 1, 0, cfg.ckpt_file)
########
# Test #
########
test(load_model_weight=False)
if __name__ == '__main__':
main()
| 1.703125 | 2 |
process_model.py | hdm-dt-fb/rvt_model_services | 28 | 12789124 | <reponame>hdm-dt-fb/rvt_model_services<filename>process_model.py
""" process_model.py
Usage:
process_model.py <command> <project_code> <full_model_path> [options]
Arguments:
command action to be run on model, like: qc, audit or dwf
currently available: qc, audit, dwf
project_code unique project code consisting of 'projectnumber_projectModelPart'
like 456_11 , 416_T99 or 377_S
full_model_path revit model path including file name
use cfg shortcut if your full model path is already set in config.ini
Options:
-h, --help Show this help screen.
--viewer run revit in viewer mode (-> no transactions)
--html_path=<html> path to store html bokeh graphs, default in /commands/qc/*.html
--write_warn_ids write warning ids from warning command
--rvt_path=<rvt> full path to force specific rvt version other than detected
--rvt_ver=<rvtver> specify revit version and skip checking revit file version
(helpful if opening revit server files)
--audit activate open model with audit
--noworkshared open non-workshared model
--nodetach do not open workshared model detached
--notify choose to be notified with configured notify module(s)
--nofilecheck skips verifying model path actually exists
(helpful if opening revit server files)
--skip_hash_unchanged skips processing unchanged file
--timeout=<seconds> timeout in seconds before revit process gets terminated
"""
from docopt import docopt
import os
import pathlib
import hashlib
import subprocess
import psutil
import configparser
import time
import datetime
import logging
import colorful as col
import rvt_detector
from collections import defaultdict
from importlib import machinery
from tinydb import TinyDB, Query
from utils import rvt_journal_parser, rvt_journal_purger
from utils.win_utils import proc_open_files
from utils.rms_paths import get_paths
from notify.email import send_mail
from notify.slack import send_slack
from notify.req_post import send_post
def check_cfg_path(prj_number, cfg_str_or_path, cfg_path):
config = configparser.ConfigParser()
ini_file = cfg_path / "config.ini"
if cfg_str_or_path == "cfg":
if not cfg_str_or_path.exists():
if ini_file.exists():
config.read(ini_file)
if prj_number in config:
config_path = config[prj_number]["path"]
return config_path
return pathlib.Path(cfg_str_or_path)
def get_model_hash(rvt_model_path):
"""
Creates a hash of provided rvt model file
:param rvt_model_path:
:return: hash string
"""
BLOCKSIZE = 65536
hasher = hashlib.sha256()
with open(rvt_model_path, "rb") as rvt:
buf = rvt.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = rvt.read(BLOCKSIZE)
return hasher.hexdigest()
def check_hash_unchanged(hash_db, rvt_model_path, model_hash, date):
model_info = {"<full_model_path>": str(rvt_model_path),
">last_hash": model_hash,
">last_hash_date": date,
}
unchanged = hash_db.search((Query()["<full_model_path>"] == str(rvt_model_path)) &
(Query()[">last_hash"] == model_hash)
)
if unchanged:
return True
else:
hash_db.upsert(model_info, Query()["<full_model_path>"] == str(rvt_model_path)
)
def exit_with_log(message, severity=logging.warning, exit_return_code=1):
"""
Ends the whole script with a warning.
:param message:
:param exit_return_code:
:return:
"""
severity(f"{project_code};{current_proc_hash};{exit_return_code};;{message}")
exit()
def get_jrn_and_post_process(search_command, commands_dir):
"""
Searches command paths for register dict in __init__.py in command roots to
prepare appropriate command strings to be inserted into the journal file
:param search_command: command name to look up
:param commands_dir: commands directory
:return: command module, post process dict
"""
found_dir = False
module_rjm = None
post_proc_dict = defaultdict()
for directory in os.scandir(commands_dir):
command_name = directory.name
# print(command_name)
if search_command == command_name:
found_dir = True
print(f" found appropriate command directory {commands_dir / command_name}")
mod_init = commands_dir / command_name / "__init__.py"
if mod_init.exists():
mod = machinery.SourceFileLoader(command_name, str(mod_init)).load_module()
if "register" in dir(mod):
if mod.register["name"] == command_name:
if "rjm" in mod.register:
module_rjm = mod.register["rjm"]
if "post_process" in mod.register:
external_args = []
for arg in mod.register["post_process"]["args"]:
external_args.append(globals().get(arg))
post_proc_dict["func"] = mod.register["post_process"]["func"]
post_proc_dict["args"] = external_args
else:
exit_with_log('__init__.py in command directory not found')
if not found_dir:
print(col.bold_red(f" appropriate command directory for '{search_command}' not found - aborting."))
exit_with_log('command directory not found')
return module_rjm, post_proc_dict
def get_rvt_proc_journal(process, jrn_file_path):
open_files = process.open_files()
for proc_file in open_files:
file_name = pathlib.Path(proc_file.path).name
if file_name.startswith("journal"):
return proc_file.path
# if nothing found using the process.open_files
# dig deeper and get nasty
for proc_res in proc_open_files(process):
res_name = pathlib.Path(proc_res).name
if res_name.startswith("journal") and res_name.endswith("txt"):
return jrn_file_path / res_name
today_int = int(datetime.date.today().strftime("%Y%m%d"))
rms_paths = get_paths(__file__)
args = docopt(__doc__)
command = args["<command>"]
project_code = args["<project_code>"]
full_model_path = args["<full_model_path>"]
full_model_path = check_cfg_path(project_code, full_model_path, rms_paths.root)
model_path = full_model_path.parent
model_file_name = full_model_path.name
timeout = args["--timeout"]
html_path = args["--html_path"]
write_warn_ids = args["--write_warn_ids"]
rvt_override_path = args["--rvt_path"]
rvt_override_version = args["--rvt_ver"]
notify = args["--notify"]
disable_filecheck = args["--nofilecheck"]
disable_detach = args["--nodetach"]
disable_ws = args["--noworkshared"]
skip_hash_unchanged = args["--skip_hash_unchanged"]
audit = args["--audit"]
viewer = args["--viewer"]
if viewer:
viewer = "/viewer"
comma_concat_args = ",".join([f"{k}={v}" for k, v in args.items()])
print(col.bold_blue(f"+process model job control started with command: {command}"))
print(col.bold_orange(f"-detected following root path:"))
print(f" {rms_paths.root}")
format_json = {"sort_keys": True, "indent": 4, "separators": (',', ': ')}
hashes_db = TinyDB(rms_paths.db / "model_hashes.json", **format_json)
journal_file_path = rms_paths.journals / f"{project_code}.txt"
model_exists = full_model_path.exists()
timeout = int(timeout) if timeout else 60
if not html_path:
if command == "qc":
html_path = rms_paths.com_qc
elif command == "warnings":
html_path = rms_paths.com_warnings
elif not pathlib.Path(html_path).exists():
if command == "qc":
html_path = rms_paths.com_qc
print(f"your specified html path was not found - will export html graph to {rms_paths.com_qc} instead")
elif command == "warnings":
html_path = rms_paths.com_warnings
print(f"your specified html path was not found - will export html graph to {rms_paths.com_warnings} instead")
if write_warn_ids:
warn_ids_path = model_path / "RVT_fixme"
pathlib.Path(warn_ids_path).mkdir(exist_ok=True)
print(warn_ids_path)
else:
warn_ids_path = ""
job_logging = rms_paths.logs / "job_logging.csv"
header_logging = "time_stamp;level;project;process_hash;error_code;args;comments\n"
if not job_logging.exists():
with open(job_logging, "w") as logging_file:
logging_file.write(header_logging)
print(col.bold_blue(f"logging goes to: {job_logging}"))
logging.basicConfig(format='%(asctime)s;%(levelname)s;%(message)s',
datefmt="%Y%m%dT%H%M%SZ",
filename=job_logging,
level=logging.INFO)
logger = logging.getLogger(__name__)
logging.getLogger("bokeh").setLevel(logging.CRITICAL)
print(col.bold_orange('-detected following process structure:'))
current_proc_hash = hash(psutil.Process())
print(f" current process hash: {col.cyan(current_proc_hash)}")
logging.info(f"{project_code};{current_proc_hash};;{comma_concat_args};{'task_started'}")
if skip_hash_unchanged:
model_hash = get_model_hash(full_model_path)
print(f" model hash: {col.cyan(model_hash)}")
hash_unchanged = check_hash_unchanged(hashes_db, full_model_path, model_hash, today_int)
if hash_unchanged:
print(col.bold_red(f" model hash has not changed since last run!"))
print(col.bold_red(f" processing this model is skipped!!"))
time.sleep(1)
exit_with_log("unchanged_model", severity=logging.info, exit_return_code=0)
os.environ["RVT_QC_PRJ"] = project_code
os.environ["RVT_QC_PATH"] = str(full_model_path)
os.environ["RVT_LOG_PATH"] = str(rms_paths.logs)
if not rvt_override_version:
rvt_model_version = rvt_detector.get_rvt_file_version(full_model_path)
else:
rvt_model_version = rvt_override_version
if not rvt_override_path:
rvt_install_path = rvt_detector.installed_rvt_detection().get(rvt_model_version)
if not rvt_install_path:
print(f"no installed rvt versions for {rvt_model_version} detected - please use '--rvt_path' to specify path.")
logging.warning(f"{project_code};{current_proc_hash};1;;{'no rvt versions for {rvt_model_version} detected'}")
exit()
rvt_install_path = pathlib.Path(rvt_install_path)
else:
rvt_install_path = pathlib.Path(rvt_override_path)
mod_rjm, post_proc = get_jrn_and_post_process(command, rms_paths.commands)
if disable_filecheck or model_exists:
mod_rjm(project_code, full_model_path, journal_file_path, rms_paths.commands, rms_paths.logs)
proc_args = [arg for arg in [str(rvt_install_path), str(journal_file_path), viewer] if arg]
# print(proc_args)
run_proc = psutil.Popen(proc_args, cwd=str(rms_paths.root), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
run_proc_name = run_proc.name()
# let's wait half a second for rvt process to fire up
time.sleep(0.5)
if run_proc.name() == "Revit.exe":
proc_name_colored = col.bold_green(run_proc_name)
else:
proc_name_colored = col.bold_red(run_proc_name)
print(f" process info: {run_proc.pid} - {proc_name_colored}")
print(col.bold_orange("-detected revit:"))
print(f" version:{rvt_model_version} at path: {rvt_install_path}")
print(col.bold_orange("-process termination countdown:"))
# print(f" timeout until termination of process: {run_proc_id} - {proc_name_colored}:")
log_journal = get_rvt_proc_journal(run_proc, rms_paths.journals)
return_code = 9
return_logging = logging.info
# the main timeout loop
for sec in range(timeout):
time.sleep(1)
poll = run_proc.poll()
print(f" {str(timeout-sec).zfill(4)} seconds, proc poll: {poll}", end="\r")
if poll == 0:
print(col.bold_green(f" {poll} - revit finished!"))
return_code = "0"
return_logging = logging.info
break
elif timeout-sec-1 == 0:
print("\n")
print(col.bold_red(" timeout!!"))
if not poll:
print(col.bold_red(f" kill process now: {run_proc.pid}"))
run_proc.kill()
return_code = "1"
return_logging = logging.warning
# post loop processing, naively parsing journal files
print(col.bold_orange("-post process:"))
print(f" process open journal for post process parsing:\n {log_journal}")
log_journal_result = rvt_journal_parser.read_journal(log_journal)
log_journal_result = ",".join([f"{k}: {v}" for k, v in log_journal_result.items()])
if log_journal_result:
print(f" detected: {log_journal_result}")
if "corrupt" in log_journal_result:
return_logging = logging.critical
# run all notify modules
if notify:
notify_modules = [send_mail, send_slack, send_post]
for notify_function in notify_modules:
notify_function.notify(project_code, full_model_path, log_journal_result)
# getting post process funcs and args from command module for updating graphs and custom functionality
if post_proc:
post_proc["func"](*post_proc["args"])
# write log according to return code
logged_journal_excerpt = log_journal_result.strip('\n').strip('\r')
return_logging(f"{project_code};{current_proc_hash};{return_code};;{logged_journal_excerpt}")
# finally journal cleanup
rvt_journal_purger.purge(rms_paths.journals)
else:
print("model not found")
logging.warning(f"{project_code};{current_proc_hash};1;;{'model not found'}")
print(col.bold_blue("+process model job control script ended"))
| 1.851563 | 2 |
main.py | localhoct/os-remote | 0 | 12789125 | import telebot
from telebot import types
import os
import random
from PIL import ImageGrab
from winsound import Beep
import requests
import platform
import psutil
import time
# proxy = 'http://192.168.88.170:8888'
# os.environ['http_proxy'] = proxy
# os.environ['HTTP_PROXY'] = proxy
# os.environ['https_proxy'] = proxy
# os.environ['HTTPS_PROXY'] = proxy
start_time = time.time()
os.system("cls")
token = 'Your Token Here ;) ' #str
bot = telebot.TeleBot(token)
admin = 440904809 # here Enter Your Telegram UserId (int)
bot.send_message(admin, 'سیستم روشن شد!')
def getfile(filename):
myfile = open(filename, "r+", encoding='utf-8')
return myfile.read()
def putfile(filename, filedata):
myfile = open(filename, "w+", encoding='utf-8')
myfile.write(filedata)
myfile.close()
def startcm(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True)
btn1 = types.KeyboardButton('ScreenShot📸')
btn2 = types.KeyboardButton('Power Option ⚠️')
btn3 = types.KeyboardButton('Sound🔉')
btn4 = types.KeyboardButton('File Manager🗄')
btn5 = types.KeyboardButton('System Info💻')
btn6 = types.KeyboardButton('Open Web🌍')
btns.add(btn1, btn2, btn3, btn4, btn5, btn6)
message = '''
سلام خوش آمدید.😄
لیست دستورات برای فقط شماست😜
'''
bot.send_message(chat_id, message, reply_markup=btns)
# print(id)
def savedb(user):
chat_id = user.from_user.id
text = user.text
con_text = text.replace('/save ', '')
# con_text = con_text.encode("utf-8")
mesid = random.randint(1111, 9999)
message = f'پیام شما: \n {con_text} \n شناسه پیام : {mesid}'
bot.send_message(chat_id, message)
putfile(f'database/data_{mesid}.txt', str(con_text))
# print(con_text, mesid)
def savedb_lsit(user):
chat_id = user.from_user.id
list_file = ''
for r, d, f in os.walk('database'):
for file in f:
list_file = list_file + '\n' + str(file)
bot.send_message(chat_id, 'پیام های شما: \n' + str(list_file))
def power(user):
chat_id = user.from_user.id
# text = user.text
btns = types.ReplyKeyboardMarkup(row_width=1, one_time_keyboard=True)
btn1 = types.KeyboardButton('ShoutDown | خاموش کردن')
btn2 = types.KeyboardButton('ریستارت | Restart')
btn3 = types.KeyboardButton('بازگشت')
btns.add(btn1, btn2, btn3, )
bot.send_message(chat_id, 'شما به بخش power option وارد شدید.لیست دستورات: \n', reply_markup=btns)
def home(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True)
btn1 = types.KeyboardButton('ScreenShot📸')
btn2 = types.KeyboardButton('Power Option ⚠️')
btn3 = types.KeyboardButton('Sound🔉')
btn4 = types.KeyboardButton('File Manager🗄')
btn5 = types.KeyboardButton('System Info💻')
btn6 = types.KeyboardButton('Open Web🌍')
btns.add(btn1, btn2, btn3, btn4, btn5, btn6)
bot.send_message(chat_id, '🏛صفحه اصلی: ', reply_markup=btns)
def playmusic_btn(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1)
btn1 = types.KeyboardButton('Music 🎧')
btn2 = types.KeyboardButton('Beep')
btn3 = types.KeyboardButton('بازگشت')
btns.add(btn1, btn2, btn3)
message = '''
🤔لطفا نوع صدا را انتخاب کنید
'''
bot.send_message(chat_id, message, reply_markup=btns)
def bep(user):
chat_id = user.from_user.id
bot.send_message(chat_id, 'بوق پخش شد😉')
for x in range(1, 6):
Beep(1000 * x, 200)
Beep(1000 * x, 200 - (x * 50))
def music(user):
chat_id = user.from_user.id
message = '''
لطفا آهنگ خود را بفرستید تا برایتان پخش کنم!😊
یا از دستور زیر استفاده کنید👇:
/music [File_id]
برای دریافت آیدی ها دستور زیر را بزنید👇:
/music_list
'''
bot.send_message(chat_id, message)
def music_id(user):
chat_id = user.from_user.id
list_file = ''
music_count = 0
for r, d, f in os.walk('music'):
for file in f:
if 'mp3' in file:
music_count += 1
list_file = list_file + '\n' + str(file)
else:
pass
message = f'''
تعداد آهنگ ها:{music_count}
لیست آهنگ های ذخیره شده:
{list_file}
'''
bot.send_message(chat_id, message)
def music_play(user):
chat_id = user.from_user.id
text = user.text
music_name = text.replace('/music ', '')
os.system(f'start music/{str(music_name)}.mp3')
musiv = open(f'music/{str(music_name)}.mp3', 'rb')
message = f'''
آهنگ با کد اختصاصی زیر درحال پخش:🎶
{str(music_name)}
'''
bot.send_message(chat_id, message)
bot.send_chat_action(chat_id, 'upload_document')
bot.send_audio(chat_id, musiv, caption='آهنگ درحال پخش😐')
def screenshot(user):
chat_id = user.from_user.id
message = 'گرفتن اسکرین...'
bot.send_message(chat_id, message)
photo = ImageGrab.grab()
photo.save('screen.png')
message = 'اسکرین شات گرفته شد!😋'
bot.send_message(chat_id, message)
photo = open('screen.png', 'rb')
bot.send_photo(chat_id, photo)
photo.close()
photo = open('screen.png', 'rb')
bot.send_document(chat_id, photo, caption='اسکرین گرفته شده نسخه با کیفیت 🙄')
photo.close()
os.remove('screen.png')
# print(chat_id)
def systeminfo(user):
chat_id = user.from_user.id
uname = platform.uname()
runtime = time.time() - start_time
if runtime < 60:
runtime = f'{int(runtime)} Second'
else:
runtime = runtime / 60
runtime = f'{int(runtime)} Minutes'
message = f'''
🔰 System: {uname.system} {uname.release}
👥 Node Name: {uname.node}
🔺 CPU Usage {psutil.cpu_percent()} Percent
🔺 RAM Usage: {psutil.virtual_memory()[2]} Percent
📝 Machine Architecture: {uname.machine}
⏱ Bot Run Time: {runtime}
'''
bot.send_message(chat_id, message)
def shutdown_btn(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1)
btn1 = types.KeyboardButton('آره مطمئنم میخوام خاموش بشه!!')
btn2 = types.KeyboardButton('نه دستم خورد !!')
btns.add(btn1, btn2, )
message = '''
آیا مطمئن هستید که سیستم خاموش شود؟🤨
'''
bot.send_message(chat_id, message, reply_markup=btns)
def restart_btn(user):
chat_id = user.from_user.id
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1)
btn1 = types.KeyboardButton('آره مطمئنم میخوام ریستارت بشه!!')
btn2 = types.KeyboardButton('نه دستم خورد !!')
btns.add(btn1, btn2)
message = '''
آیا مطمئن هستید که سیستم ریستارت شود؟🤨
'''
bot.send_message(chat_id, message, reply_markup=btns)
def download_btn(user):
chat_id = user.from_user.id
text = user.text
btns = types.ReplyKeyboardMarkup(one_time_keyboard=True, row_width=1)
btn1 = types.KeyboardButton('Download File From System📥')
btn2 = types.KeyboardButton('File List📂')
btn3 = types.KeyboardButton('بازگشت')
btns.add(btn1, btn2, btn3)
bot.send_message(chat_id, 'به فایل منجر خوش آمدید😅', reply_markup=btns)
def downlaod_message(user):
chat_id = user.from_user.id
# text = user.text
bot.send_message(chat_id, 'نحوه استفاده \n /download [file name or file address]')
def download_file(user):
chat_id = user.from_user.id
text = user.text
filename_or_address = text.replace('/download ', '')
if os.path.isdir(filename_or_address):
bot.send_message(chat_id, 'این یک فولدر هست و قابل دانلود نیست😑')
else:
if os.path.isfile(filename_or_address):
file = open(filename_or_address, 'rb')
bot.send_message(chat_id, 'درحال آپلود کردن فایل درخواستی شما...')
bot.send_document(chat_id, file, caption='این فایل درخواستی شماست 😁')
else:
bot.send_message(chat_id, 'فایل یا فولدری با این نام پیدا نشد.🤐')
def web_btn(user):
chat_id = user.from_user.id
text = user.text
bot.send_message(chat_id, 'نحوه استفاده \n /web [URL]')
def filemanagerlist(user):
userchatid = user.chat.id
usertext = user.text
directory = usertext.replace("/filemanager ", "")
if (os.path.isdir(directory)):
bot.send_message(userchatid, "🔎 درحال اسکن کردن فولدر ...")
foldercount = 0
folderlist = ""
filecount = 0
filelist = ""
for r, d, f in os.walk(directory):
for folder in d:
if (foldercount > 30 or foldercount == 30):
break
else:
if ("\\" in r):
pass
else:
foldercount += 1
folderlist = folderlist + "\n" + "📁 " + r + "/" + folder
for file in f:
if (filecount > 30 or filecount == 30):
break
else:
filecount += 1
filelist = filelist + "\n" + "🧾 " + r + "/" + file
bot.send_message(userchatid, "🗂 30 First Folders In " + directory + " : \n\n" + str(folderlist))
bot.send_message(userchatid, "🗃 30 First File In " + directory + " : \n\n" + str(filelist))
else:
bot.send_message(userchatid, "چیزی پیدا نکردم 😐")
def justfilelist(user):
userchatid = user.chat.id
bot.send_message(userchatid, "نحوه استفاده:\n/filemanager [dir]")
@bot.message_handler(content_types=['text'])
def main(user):
chat_id = user.from_user.id
text = user.text
# print(chat_id)
if chat_id == admin:
if text == '/start':
startcm(user)
if text == '/save':
bot.send_message(chat_id, 'لطفا بعد از دستور پیام خود را اضافه کنید\n به این صورت : \n /save [message] ')
if text.startswith('/save '):
savedb(user)
if text == '/message':
savedb_lsit(user)
if text == 'Power Option ⚠️':
power(user)
if text == 'ShoutDown | خاموش کردن':
shutdown_btn(user)
if text == 'ریستارت | Restart':
restart_btn(user)
if text == 'آره مطمئنم میخوام خاموش بشه!!':
bot.send_message(chat_id, 'سیستم شما خاموش شد!😐')
os.system('shutdown /s /t 1')
home(user)
if text == 'آره مطمئنم میخوام ریستارت بشه!!':
bot.send_message(chat_id, 'سیستم شما ریستارت شد!😐')
os.system('shutdown /r /t 1')
home(user)
if text == 'بازگشت':
home(user)
if text == 'نه دستم خورد !!':
bot.send_message(chat_id, 'کنترل دستت هم نداری بدبخت 😂')
home(user)
if text == 'ScreenShot📸':
screenshot(user)
if text == 'Sound🔉':
playmusic_btn(user)
if text == 'Beep':
bep(user)
if text == 'Music 🎧':
music(user)
if text == 'System Info💻':
systeminfo(user)
if text == 'File Manager🗄':
download_btn(user)
if text == 'Download File From System📥':
downlaod_message(user)
if text.startswith('/download '):
download_file(user)
if text == '/download':
downlaod_message(user)
if text == 'Open Web🌍':
web_btn(user)
if text.startswith('/web '):
url = text.replace('/web ', '')
bot.send_message(chat_id, f'گوگل کروم با آدرس شما[{url}] باز شد🥳')
os.system(f"start chrome {url}")
if text == '/web':
web_btn(user)
if (text == "File List📂" or text == "/filemanager"):
justfilelist(user)
if (text.startswith("/filemanager ")):
filemanagerlist(user)
if text.startswith('/music '):
music_play(user)
if text == '/music':
music(user)
if text == '/music_list':
music_id(user)
else:
bot.send_message(chat_id, 'شما ادمین نیستید 😐')
@bot.message_handler(content_types=['audio'])
def audio(message):
chat_id = message.from_user.id
# print(message.audio)
raw = message.audio.file_id
# file_name = message.audio.file_unique_id
title = message.audio.title
title = title.strip()
title = title.replace(' ', '_')
performer = message.audio.performer
performer = performer.strip()
performer = performer.replace(' ', '_')
file_size = message.audio.file_size
if file_size > 20971520:
bot.send_message(chat_id, 'محدودیت های تلگرام حجم فایل بیش از 20مگابایت هست 🤐')
else:
try:
file_info = bot.get_file(raw)
downloaded_file = bot.download_file(file_info.file_path)
with open(f'music/{str(performer)}-{str(title).strip()}.mp3', 'wb') as new_file:
new_file.write(downloaded_file)
os.system(f'start music/{str(performer)}-{str(title).strip()}.mp3')
bot.send_message(chat_id, 'درحال پخش آهنگ شما...😯')
bot.send_message(chat_id, 'کد اختصاصی این آهنگ👇')
bot.send_message(chat_id, f'``` /music {str(performer)}-{str(title).strip()}```', parse_mode='markdown')
except:
bot.send_message(chat_id, 'این آهنگ درحال پخش است😒')
try:
bot.polling(True)
except:
print(' I Got Error :( ')
| 2.234375 | 2 |
apriori/apriori.py | TCphysics/MLlearning | 0 | 12789126 | <gh_stars>0
class ApriopriCT(object):
'''
This python script finds the frequency list FL with maximum length and
then build rules based on each list in FL.
'''
def __init__(self, dataList, itemList, minSupport=0.2, minConfidence=0.8):
'''
param dataList: original dataList
param minSupport: minimum Support
param minConfidence: minimum Confidence
param itemList: list of all items
param processedData: processed Data with 1 and 0 in each raw representing
if items are included.
param supportDictionary: Dictionary of support
param confidenceList: List of confidence
param Ck: optimal frequency list depending on minSupport
'''
self.dataList = dataList
self.minSupport = minSupport
self.minConfidence = minConfidence
self.itemList = itemList
self.processedData = []
self.supportDictionary = dict()
self.confidenceList = []
self.C0 = self.C0 = [[i] for i in range(len(self.itemList))]
self.freqList = []
self.prepareData()
def prepareData(self):
processedData = []
for raw in self.dataList:
temp = []
for item in self.itemList:
if item in raw:
temp.append(1)
else:
temp.append(0)
self.processedData.append(temp)
return
def match(self, items, dataL):
# decide if a items combo is contained in a single data
for i in items:
if dataL[i] == 0:
return False
return True
def updateC(self, C):
# Drop effective combo when its freq lower than minSuport
c_update = []
for ci in C:
count = 0
for raw in self.processedData:
if self.match(ci, raw):
count += 1
if count > len(self.dataList)*self.minSupport:
c_update.append(ci)
# print(ci)
self.supportDictionary[frozenset(ci)] = count/len(self.dataList)
return c_update
def isApriori(self,l,C):
# decide if a Ck have sublist outside the Ck-1.
for i in range(len(l)):
subList = l[:i]+l[i+1:]
if subList not in C:
return False
return True
def create_Ck(self,C,k):
#generate Ck based on Ck-1
Ck = []
lenC = len(C)
for i in range(lenC):
for j in range(i+1,lenC):
l1, l2 = C[i], C[j]
l1.sort()
l2.sort()
if l1 ==[] or l1[:-1] == l2[:-1]:
l = l1.copy()
l.append(l2[-1])
if i == 1 or self.isApriori(l, C):
Ck.append(sorted(l))
return Ck
def findFreqList(self):
# find the frequency list
C = self.C0.copy()
C = self.updateC(C)
i = 1
while i<len(self.itemList)+1:
updateC = self.create_Ck(C,i)
Ck = self.updateC(updateC)
if len(Ck) == 0:
break
else:
C = Ck
i+=1
self.freqList = C.copy()
return
def printFreqList(self):
for k in self.freqList:
combo = [self.itemList[i] for i in k]
print('Freq list:',combo,',freq:', round(self.supportDictionary[frozenset(k)],5))
# for k in self.supportDictionary.keys():
# combo = [self.itemList[i] for i in k]
# print('combo:',k,',freq:', round(self.supportDictionary[k],5))
return
def calConfidence(self,l,freqCombo):
#calculate Conf of a sublist based on a combo
set1 = frozenset(freqCombo)
set2 = frozenset(freqCombo) - frozenset(l)
ratio = self.supportDictionary[set1]/self.supportDictionary[set2]
return ratio
def updateCr(self, Cr, freqCombo):
# drop items list if its conf is lower han minConfidence
c_update = []
for c in Cr:
if self.calConfidence(c, freqCombo) > self.minConfidence:
c_update.append(c)
return c_update
def findRule(self, freqCombo):
# build rule list for a combo
Cr = [[f] for f in freqCombo]
i = 1
ruleList = []
while i < len(freqCombo):
updateCr = self.updateCr(Cr,freqCombo)
ruleList += updateCr
if len(updateCr) == 0:
break
Cr = self.create_Ck(updateCr,i)
i+=1
return ruleList
def addRuleToConfDict(self,ruleList, freqCombo):
# find rule and fulfill confidenceList for a freqCombo
for rule in ruleList:
conf = self.calConfidence(rule, freqCombo)
tag = [rule, list(frozenset(freqCombo)-frozenset(rule))]
self.confidenceList.append( [tag, conf] )
def buildRules(self):
# iterates over each combo in freqList.
freqList = self.freqList.copy()
for combo in freqList:
# print('combo:',combo)
ruleList = self.findRule(combo)
self.addRuleToConfDict(ruleList, combo)
# print('ruleList:',ruleList)
return
def printRules(self):
for k in self.confidenceList:
r1 = [self.itemList[i] for i in k[0][1]]
r2 = [self.itemList[i] for i in k[0][0]]
print('rule:',r1,' to ',r2 ,', conf=',round(k[1],4))
def run(self):
self.findFreqList()
self.printFreqList()
self.buildRules()
self.printRules()
if __name__ == '__main__':
f = open('MBA_data.txt').readlines()
dataList = []
for x in f:
L = x.split()
dataList.append(L)
itemSet = set()
for raw in dataList:
for l in raw:
itemSet.add(l)
itemList = sorted(list(itemSet),reverse=False)
apriSolution = ApriopriCT(dataList,itemList)
apriSolution.run()
| 3.03125 | 3 |
test/unit/puzzles/student_prizes_test.py | dclark87/pytools | 0 | 12789127 | <reponame>dclark87/pytools
#
#
#
'''
'''
import unittest
from pytools.puzzles import student_prizes
class StudentPrizesTestCase(unittest.TestCase):
'''
'''
def test_student_prizes(self):
'''
:return:
'''
# Import packages
import itertools
# Init variables
n = 3
letters = 'ALO'
expected = len(list(itertools.product(letters, repeat=n)))
returned = student_prizes.prize_combos(n)
self.assertEqual(expected, returned)
if __name__ == '__main__':
unittest.main() | 3.34375 | 3 |
tests/ncoghlan.py | jeamland/asciicompat | 2 | 12789128 | """Test cases for dual bytes/str APIs"""
import unittest
"""
The Python 2 str type conveniently permitted the creation of APIs that
could be used as either binary APIs (8-bit str in, 8-bit str out) or as
text APIs (unicode in, unicode out).
The critical enabler for this feature was the ability to define any
*constants* used in these algorithms as 8 bit strings, and then rely on
the implicit promotion to Unicode to handle text input.
In Python 3, that implicit conversion to Unicode is gone, so APIs that
handle both binary and text data need to be written to either have two
separate code paths, or else to automatically decode binary input to text
and then convert it back to binary output again when returning the result.
However, it should be possible to create a Python 3 extension type that
inherits from str (providing interoperability with str objects) and *also*
implements the buffer API (providing interoperability with bytes and
bytearray, and likely other types).
This is a test suite developed on Python 2, demonstrating the convenience
of the implicit conversion in the case of such dual binary/text interfaces.
While the general recommendation for Python 3 code is to ensure APIs are
either binary *or* text rather than a hybrid combination, libraries
migrating from Python 2 that already publish such hybrid APIs may need to
continue to support both styles of usage for the benefit of clients (as
some clients may be using the binary half of the interface, while others
are using the text half).
The URL parsing APIs in Python 3's urllib.parse module are an example of
such an API. It supported both str and unicode in Python 2 and supports
both str and any type with a decode method in Python 3"""
try:
from asciicompat import asciistr
except ImportError:
# Python 2 fallback
asciistr = str
# Developing the tests on Python 2
try:
text_type = unicode
except:
text_type = str
binary_type = bytes
asciistr = str
# Some test values
TEXT = u"text"
BINARY = b"binary"
HYBRID = asciistr("ascii")
class TestHybridAddition(unittest.TestCase):
def test_text_addition(self):
self.assertEqual(TEXT + HYBRID, u"textascii")
self.assertIsInstance(TEXT + HYBRID, text_type)
self.assertEqual(HYBRID + TEXT, u"asciitext")
self.assertIsInstance(HYBRID + TEXT, text_type)
def test_binary_addition(self):
self.assertEqual(BINARY + HYBRID, b"binaryascii")
self.assertIsInstance(BINARY + HYBRID, binary_type)
# Next two are likely to be affected by
# http://bugs.python.org/issue11477
# as the str subclass on the LHS will throw TypeError directly
# as returning NotImplemented from sq_concat is not currently
# supported correctly
self.assertEqual(HYBRID + BINARY, b"asciibinary")
self.assertIsInstance(HYBRID + BINARY, binary_type)
class HybridTestMixin(object):
input_data = None
output_type = None
exists = asciistr("data")
missing = asciistr("not data")
def test_containment(self):
self.assertIn(self.exists, self.input_data)
self.assertIn(self.exists[:2], self.input_data)
self.assertNotIn(self.missing, self.input_data)
def test_partitioning(self):
before, sep, after = self.input_data.partition(self.exists)
self.assertIsInstance(before, self.output_type)
self.assertIsInstance(sep, self.output_type)
self.assertIsInstance(after, self.output_type)
self.assertEqual(sep, self.exists)
def test_casting(self):
self.assertEqual(self.output_type(self.exists), self.exists)
self.assertIs(type(self.output_type(self.exists)), self.output_type)
# Formatting tests: in Python 2, str formatting always produces
# str objects, *except* when a Unicode object is passed to mod-formatting
def test_mod_formatting(self):
formatted = asciistr("%s") % self.input_data
self.assertEqual(formatted, self.input_data)
self.assertIs(type(formatted), self.output_type)
formatted_int = asciistr("%d") % 42
# asciistr also avoids the byte constructor length init quirk
self.assertEqual(formatted_int, asciistr(42))
self.assertIs(type(formatted_int), binary_type)
def test_format_method(self):
formatted = asciistr("{}").format(self.input_data)
self.assertEqual(formatted, self.input_data)
self.assertIs(type(formatted), binary_type)
formatted_int = asciistr("{:d}").format(42)
# asciistr also avoids the byte constructor length init quirk
self.assertEqual(formatted_int, asciistr(42))
self.assertIs(type(formatted_int), binary_type)
class TestBinaryInteraction(unittest.TestCase, HybridTestMixin):
input_data = b"there is binary data in this test case"
output_type = binary_type
class TestTextInteraction(unittest.TestCase, HybridTestMixin):
input_data = u"there is text data in this test case"
output_type = text_type
if __name__ == "__main__":
unittest.main()
| 3.484375 | 3 |
BOJ/1697.py | Jaesin22/TIL | 0 | 12789129 | <reponame>Jaesin22/TIL
from collections import deque
max = 10 ** 5
n, k = map(int, input().split())
visited = [0] * (max + 1)
def BFS():
queue = deque()
queue.append(n)
while queue:
x = queue.popleft()
if x == k:
print(visited[x])
break
for nx in (x-1, x+1, x * 2):
if 0 <= nx <= max and not visited[nx]:
visited[nx] = visited[x] + 1
queue.append(nx)
BFS() | 2.984375 | 3 |
app/api/routers/auth.py | ABGEO/magtifun.abgeo.dev | 0 | 12789130 | """
This file is part of the magtifun.abgeo.dev.
(c) 2021 <NAME> <<EMAIL>>
For the full copyright and license information, please view the LICENSE
file that was distributed with this source code.
"""
from datetime import timedelta
from fastapi import Depends, HTTPException, status
from fastapi.security import OAuth2PasswordRequestForm
from fastapi import APIRouter
from app.core.config import ACCESS_TOKEN_EXPIRE_MINUTES
from app.models.schemas.jwt import Token
from app.resources import strings
from app.services.jwt import create_access_token
from app.services.magtifun import authenticate_user
router = APIRouter(tags=["Auth"])
@router.post("/token", response_model=Token)
async def token_authentication(
form_data: OAuth2PasswordRequestForm = Depends(),
) -> Token:
"""
Create Authentication JWT Token.
"""
user = authenticate_user(form_data.username, form_data.password)
if not user:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail=strings.INCORRECT_LOGIN_INPUT,
headers={"WWW-Authenticate": "Bearer"},
)
access_token = create_access_token(
data={"sub": user.key},
expires_delta=timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES),
)
return Token(access_token=access_token, token_type="bearer")
| 2.609375 | 3 |
piot/inputs/pozyx.py | frantp/iot-sensor-reader | 0 | 12789131 | <gh_stars>0
from collections import OrderedDict
import time
from ..core import DriverBase
import pypozyx
class Driver(DriverBase):
def __init__(self, i2c=False, bus=1, port=None):
super().__init__()
self._sensor = pypozyx.PozyxI2C(bus) if i2c else \
pypozyx.PozyxSerial(port or pypozyx.get_first_pozyx_serial_port())
def close(self):
if isinstance(self._sensor, pypozyx.PozyxI2C):
self._sensor.bus.close()
super().close()
def run(self):
position = pypozyx.Coordinates()
while position.x == position.y == position.z == 0:
self._sensor.doPositioning(position)
return [(self.sid(), time.time_ns(), OrderedDict([
("pos_x", position.x),
("pos_y", position.y),
("pos_z", position.z),
]))]
| 2.46875 | 2 |
Algorithms_medium/1288. Remove Covered Intervals.py | VinceW0/Leetcode_Python_solutions | 4 | 12789132 | """
1288. Remove Covered Intervals
Medium
Given a list of intervals, remove all intervals that are covered by another interval in the list.
Interval [a,b) is covered by interval [c,d) if and only if c <= a and b <= d.
After doing so, return the number of remaining intervals.
Example 1:
Input: intervals = [[1,4],[3,6],[2,8]]
Output: 2
Explanation: Interval [3,6] is covered by [2,8], therefore it is removed.
Example 2:
Input: intervals = [[1,4],[2,3]]
Output: 1
Example 3:
Input: intervals = [[0,10],[5,12]]
Output: 2
Example 4:
Input: intervals = [[3,10],[4,10],[5,11]]
Output: 2
Example 5:
Input: intervals = [[1,2],[1,4],[3,4]]
Output: 1
Constraints:
1 <= intervals.length <= 1000
intervals[i].length == 2
0 <= intervals[i][0] < intervals[i][1] <= 10^5
All the intervals are unique.
"""
class Solution:
def removeCoveredIntervals(self, intervals: List[List[int]]) -> int:
res = right = 0
intervals.sort(key = lambda x: (x[0], -x[1]))
for i, j in intervals:
res += j > right
right = max(right, j)
return res
| 3.78125 | 4 |
Calibration/HcalAlCaRecoProducers/python/ALCARECOHcalCalIsoTrkProducerFilter_cff.py | malbouis/cmssw | 852 | 12789133 | <reponame>malbouis/cmssw
import FWCore.ParameterSet.Config as cms
#------------------------------------------------
#AlCaReco filtering for HCAL isotrk:
#------------------------------------------------
from Calibration.HcalAlCaRecoProducers.alcaHcalIsotrkProducer_cfi import *
from Calibration.HcalAlCaRecoProducers.alcaHcalIsotrkFilter_cfi import *
seqALCARECOHcalCalIsoTrkProducerFilter = cms.Sequence(alcaHcalIsotrkProducer * alcaHcalIsotrkFilter)
| 0.960938 | 1 |
cuffdiffgui.py | AgazW/Seq-Pip | 0 | 12789134 | <reponame>AgazW/Seq-Pip
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'bowtiegui.ui'
#
# Created: Tue Mar 1 14:46:40 2016
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(870, 1500)
MainWindow.setMinimumSize(QtCore.QSize(0, 1500))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 1500))
MainWindow.setWhatsThis(_fromUtf8(""))
MainWindow.setLayoutDirection(QtCore.Qt.LeftToRight)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.scrollArea = QtGui.QScrollArea(self.centralwidget)
self.centralWidget = QtGui.QWidget(MainWindow)
layout = QtGui.QVBoxLayout(self.centralWidget)
layout.addWidget(self.scrollArea)
#self.scrollArea.setGeometry(QtCore.QRect(0, 0, 41, 111))
#self.scrollArea.setWidgetResizable(True)
#self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
#self.scrollAreaWidgetContents = QtGui.QWidget()
# self.scrollArea.setGeometry(QtCore.QRect(230, 0, 841, 2481))
self.scrollArea.setFrameShape(QtGui.QFrame.WinPanel)
self.scrollArea.setFrameShadow(QtGui.QFrame.Plain)
self.scrollArea.setLineWidth(7)
self.scrollArea.setWidgetResizable(False)
# self.scrollArea.setToolTip("place cursor over the controls for information")
self.scrollAreaWidgetContents = QtGui.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 812,3052))
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.scrollAreaWidgetContents.sizePolicy().hasHeightForWidth())
self.scrollAreaWidgetContents.setSizePolicy(sizePolicy)
self.scrollAreaWidgetContents.setObjectName(_fromUtf8("scrollAreaWidgetContents"))
layout = QtGui.QHBoxLayout(self.scrollAreaWidgetContents)
self.frame = QtGui.QFrame(self.scrollAreaWidgetContents)
self.frame.setGeometry(QtCore.QRect(50, 10, 741, 3000))
self.frame.setFocusPolicy(QtCore.Qt.ClickFocus)
self.frame.setFrameShape(QtGui.QFrame.WinPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setLineWidth(10)
#self.setStyleSheet('background-image: url("/home/amrata/PycharmProjects/bowtieuser/images/images/logn.jpg")')
self.frame.setObjectName(_fromUtf8("frame"))
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.selgtflbl = QtGui.QLabel(self.frame) # select gtf file
self.selgtflbl.setGeometry(QtCore.QRect(10, 30, 291, 16))
font = QtGui.QFont()
font.setBold(True)
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setWeight(75)
self.selgtflbl.setFont(font)
self.selgtflbl.setToolTip("select the GTF file produced by cufflinks")
self.selgtflbl.setObjectName(_fromUtf8("selgtflbl"))
self.selfile = QtGui.QPushButton(self.frame) # single gtf file
self.selfile.setGeometry(QtCore.QRect(10, 50, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.selfile.setFont(font)
self.selfile.setToolTip("Click here to select single GTF file")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.selfile.setIcon(icon)
self.selfile.setToolTip("click here to select the file")
self.selfile.setObjectName(_fromUtf8("selfile"))
self.mulfile = QtGui.QPushButton(self.frame)
self.mulfile.setGeometry(QtCore.QRect(50, 50, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.mulfile.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index1.png")),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.mulfile.setIcon(icon)
self.mulfile.setToolTip("Click here to select multiple GTF file")
self.mulfile.setObjectName(_fromUtf8("mulfile"))
self.builselcom = QtGui.QComboBox(self.frame)
self.builselcom.setGeometry(QtCore.QRect(90, 50, 641, 27))
self.builselcom.setToolTip("Please select the file")
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
#font.setBold(True)
#font.setPointSize(11)
#font.setBold(False)
#font.setItalic(False)
font.setWeight(50)
self.builselcom.setFont(font)
self.builselcom.setObjectName(_fromUtf8("builselcom"))
self.omitlbl = QtGui.QLabel(self.frame) #Do you want to Omit Tabular Datasets
self.omitlbl.setGeometry(QtCore.QRect(10, 100, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.omitlbl.setFont(font)
self.omitlbl.setObjectName(_fromUtf8("omitlbl"))
self.asno = QtGui.QPushButton(self.frame)
self.asno.setGeometry(QtCore.QRect(60, 120, 51, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setWeight(50)
self.asno.setFont(font)
self.asno.setObjectName(_fromUtf8("asno"))
self.asyes = QtGui.QPushButton(self.frame)
self.asyes.setGeometry(QtCore.QRect(10, 120, 51, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setWeight(50)
self.asyes.setFont(font)
self.asyes.setObjectName(_fromUtf8("asyes"))
self.sqlbl = QtGui.QLabel(self.frame) #Do you want to Generate SQLite
self.sqlbl.setGeometry(QtCore.QRect(10, 170, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.sqlbl.setFont(font)
self.sqlbl.setObjectName(_fromUtf8("sqlbl"))
self.asno1 = QtGui.QPushButton(self.frame)
self.asno1.setGeometry(QtCore.QRect(60, 190, 51, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setWeight(50)
self.asno1.setFont(font)
self.asno1.setObjectName(_fromUtf8("asno1"))
self.asyes1 = QtGui.QPushButton(self.frame)
self.asyes1.setGeometry(QtCore.QRect(10, 190, 51, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setWeight(50)
self.asyes1.setFont(font)
self.asyes1.setObjectName(_fromUtf8("asyes1"))
self.inputdatalbl = QtGui.QLabel(self.frame) #Do you want to Provide Input data type
self.inputdatalbl.setGeometry(QtCore.QRect(10, 240, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.inputdatalbl.setFont(font)
self.inputdatalbl.setObjectName(_fromUtf8("inputdatalbl"))
self.inpcmb = QtGui.QComboBox(self.frame)
self.inpcmb.setToolTip("Select any one of the option")
self.inpcmb.setGeometry(QtCore.QRect(10, 260, 721, 27))
self.inpcmb.addItem(_fromUtf8(""))
self.inpcmb.addItem(_fromUtf8(""))
self.inpcmb.addItem(_fromUtf8(""))
self.inpcmb.addItem(_fromUtf8(""))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setBold(False)
font.setWeight(50)
self.inpcmb.setFont(font)
self.inpcmb.setObjectName(_fromUtf8("inpcmb"))
self.frame1 = QtGui.QFrame(self.scrollAreaWidgetContents) #frame that containes sam bam cxb option
self.frame1.setGeometry(QtCore.QRect(50, 10, 741, 3000))
self.frame1.setFocusPolicy(QtCore.Qt.ClickFocus)
self.frame1.setFrameShape(QtGui.QFrame.WinPanel)
self.frame1.setFrameShadow(QtGui.QFrame.Raised)
self.frame1.setLineWidth(10)
#self.setStyleSheet('background-image: url("/home/amrata/PycharmProjects/bowtieuser/images/images/logn.jpg")')
self.frame1.setObjectName(_fromUtf8("frame1"))
self.ovrhngtollbl0 = QtGui.QLabel(self.frame1) #Conditions
self.ovrhngtollbl0.setGeometry(QtCore.QRect(10, 10, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.ovrhngtollbl0.setFont(font)
self.ovrhngtollbl0.setObjectName(_fromUtf8("ovrhngtollbl0"))
self.groupBox = QtGui.QGroupBox(self.frame1)
self.groupBox.setGeometry(QtCore.QRect(10, 30, 711, 261))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setTitle(_fromUtf8(""))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.ovrhngtollbl = QtGui.QLabel(self.groupBox) #Name
self.ovrhngtollbl.setGeometry(QtCore.QRect(10, 10, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.ovrhngtollbl.setFont(font)
self.ovrhngtollbl.setObjectName(_fromUtf8("ovrhngtollbl"))
self.ovrtoltxt = QtGui.QLineEdit(self.groupBox)
self.ovrtoltxt.setGeometry(QtCore.QRect(10, 30, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.ovrtoltxt.setFont(font)
self.ovrtoltxt.setToolTip("Please provide the name for the file")
self.ovrtoltxt.setObjectName(_fromUtf8("ovrtoltxt"))
self.subrefanlbl1 = QtGui.QLabel(self.groupBox) # Replicates
self.subrefanlbl1.setGeometry(QtCore.QRect(10, 80, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.subrefanlbl1.setFont(font)
self.subrefanlbl1.setObjectName(_fromUtf8("subrefanlbl1"))
self.subrefanpushgrp1 = QtGui.QPushButton(self.groupBox)
self.subrefanpushgrp1.setGeometry(QtCore.QRect(10, 100, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.subrefanpushgrp1.setFont(font)
self.subrefanpushgrp1.setToolTip("Click here to select single file")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.subrefanpushgrp1.setIcon(icon)
self.subrefanpushgrp1.setObjectName(_fromUtf8("subrefanpushgrp1"))
self.mulfile1 = QtGui.QPushButton(self.groupBox)
self.mulfile1.setToolTip("Click here to select mutiple file")
self.mulfile1.setGeometry(QtCore.QRect(50, 100, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.mulfile1.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index1.png")),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.mulfile1.setIcon(icon)
self.mulfile1.setObjectName(_fromUtf8("mulfile1"))
self.subrefcombgrp1 = QtGui.QComboBox(self.groupBox)
self.subrefcombgrp1.setGeometry(QtCore.QRect(90, 100, 601, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.subrefcombgrp1.setFont(font)
self.subrefcombgrp1.setObjectName(_fromUtf8("subrefcomb"))
self.groupBox1 = QtGui.QGroupBox(self.frame1) # another groupbox that contain replicates and sam files
self.groupBox1.setGeometry(QtCore.QRect(10, 170, 711, 261))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.groupBox1.setFont(font)
self.groupBox1.setTitle(_fromUtf8(""))
self.groupBox1.setObjectName(_fromUtf8("groupBox1"))
self.ovrhngtollbl1 = QtGui.QLabel(self.groupBox1) # Name
self.ovrhngtollbl1.setGeometry(QtCore.QRect(10, 10, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.ovrhngtollbl1.setFont(font)
self.ovrhngtollbl1.setObjectName(_fromUtf8("ovrhngtollbl1"))
self.ovrtoltxt1 = QtGui.QLineEdit(self.groupBox1)
self.ovrtoltxt1.setToolTip("Please provide the name for the file")
self.ovrtoltxt1.setGeometry(QtCore.QRect(10, 30, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.ovrtoltxt1.setFont(font)
self.ovrtoltxt1.setObjectName(_fromUtf8("ovrtoltxt1"))
self.subrefanlbl = QtGui.QLabel(self.groupBox1)
self.subrefanlbl.setGeometry(QtCore.QRect(10, 80, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.subrefanlbl.setFont(font)
self.subrefanlbl.setObjectName(_fromUtf8("subrefanlbl"))
self.subrefanpushgrp2 = QtGui.QPushButton(self.groupBox1)
self.subrefanpushgrp2.setGeometry(QtCore.QRect(10, 100, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.subrefanpushgrp2.setFont(font)
self.subrefanpushgrp2.setToolTip("Click here to select single file")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.subrefanpushgrp2.setIcon(icon)
self.subrefanpushgrp2.setObjectName(_fromUtf8("subrefanpushgrp2"))
self.mulfile3 = QtGui.QPushButton(self.groupBox1)
self.mulfile3.setGeometry(QtCore.QRect(50, 100, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.mulfile3.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index1.png")),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.mulfile3.setIcon(icon)
self.mulfile3.setToolTip("Click here to select multiple file")
self.mulfile3.setObjectName(_fromUtf8("mulfile3"))
self.subrefcombgrp2 = QtGui.QComboBox(self.groupBox1)
self.subrefcombgrp2.setGeometry(QtCore.QRect(90, 100, 601, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.subrefcombgrp2.setFont(font)
self.subrefcombgrp2.setObjectName(_fromUtf8("subrefcombgrp2"))
self.subrefanpushp = QtGui.QPushButton(self.frame1)
self.subrefanpushp.setGeometry(QtCore.QRect(10, 330, 155, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.subrefanpushp.setFont(font)
self.subrefanpushp.setText(_fromUtf8(""))
self.subrefanpushp.setToolTip("Click here to Submite these files and insert some other files")
self.subrefanpushp.setObjectName(_fromUtf8("subrefanpushp"))
self.subrefanpushok = QtGui.QPushButton(self.frame1)
self.subrefanpushok.setGeometry(QtCore.QRect(620, 330, 85, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.subrefanpushok.setFont(font)
self.subrefanpushok.setToolTip("Click here to submit these values and go-back to previous menu")
self.subrefanpushok.setObjectName(_fromUtf8("subrefanpushok"))
self.sublib = QtGui.QLabel(self.frame) #Select Library normalization method
self.sublib.setGeometry(QtCore.QRect(10, 310, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.sublib.setFont(font)
self.sublib.setObjectName(_fromUtf8("sublib"))
self.subrefcomb = QtGui.QComboBox(self.frame)
self.subrefcomb.setGeometry(QtCore.QRect(10, 330, 721, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.subrefcomb.setFont(font)
self.subrefcomb.setToolTip("Select any one of the options")
self.subrefcomb.addItem(_fromUtf8(""))
self.subrefcomb.addItem(_fromUtf8(""))
self.subrefcomb.addItem(_fromUtf8(""))
self.subrefcomb.setObjectName(_fromUtf8("subrefcomb"))
self.displib = QtGui.QLabel(self.frame) #Select Dispersion estimation method
self.displib.setGeometry(QtCore.QRect(10, 380, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.displib.setFont(font)
self.displib.setObjectName(_fromUtf8("displib"))
self.dispcomb = QtGui.QComboBox(self.frame)
self.dispcomb.setGeometry(QtCore.QRect(10, 400, 721, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.dispcomb.setFont(font)
self.dispcomb.setToolTip("Select any one of the options")
self.dispcomb.addItem(_fromUtf8(""))
self.dispcomb.addItem(_fromUtf8(""))
self.dispcomb.addItem(_fromUtf8(""))
self.dispcomb.addItem(_fromUtf8(""))
self.dispcomb.setObjectName(_fromUtf8("dispcomb"))
self.faldlbl = QtGui.QLabel(self.frame) #Set vaue for False Discovery Rate
self.faldlbl.setGeometry(QtCore.QRect(10, 450, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.faldlbl.setFont(font)
self.faldlbl.setObjectName(_fromUtf8("faldlbl"))
self.faldtxt = QtGui.QLineEdit(self.frame)
self.faldtxt.setGeometry(QtCore.QRect(10, 470, 721, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.faldtxt.setFont(font)
self.faldtxt.setToolTip("Please Provide integer value")
self.faldtxt.setObjectName(_fromUtf8("faldtxt"))
self.faldlbl1 = QtGui.QLabel(self.frame) #Set vaue for Min Alignment Count
self.faldlbl1.setGeometry(QtCore.QRect(10, 520, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.faldlbl1.setFont(font)
self.faldlbl1.setObjectName(_fromUtf8("faldlbl1"))
self.faldtxt1 = QtGui.QLineEdit(self.frame)
self.faldtxt1.setGeometry(QtCore.QRect(10, 540, 721, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.faldtxt1.setFont(font)
self.faldtxt1.setToolTip("Please Provide integer value")
self.faldtxt1.setObjectName(_fromUtf8("faldtxt"))
self.sqlbl1 = QtGui.QLabel(self.frame) #Do you want to Use multi-read correct
self.sqlbl1.setGeometry(QtCore.QRect(10, 590, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.sqlbl1.setFont(font)
self.sqlbl1.setObjectName(_fromUtf8("sqlbl1"))
self.asno11 = QtGui.QPushButton(self.frame)
self.asno11.setGeometry(QtCore.QRect(60, 610, 51, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setWeight(50)
self.asno11.setFont(font)
self.asno11.setObjectName(_fromUtf8("asno11"))
self.asyes11 = QtGui.QPushButton(self.frame)
self.asyes11.setToolTip(("Click here to Use multi-read correct otherwise click no"))
self.asyes11.setGeometry(QtCore.QRect(10, 610, 51, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setWeight(50)
self.asyes11.setFont(font)
self.asyes11.setObjectName(_fromUtf8("asyes11"))
self.multireadlbl = QtGui.QLabel(self.frame) #Do you want to Include Read Group Datasets
self.multireadlbl.setGeometry(QtCore.QRect(10, 660, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.multireadlbl.setFont(font)
self.multireadlbl.setObjectName(_fromUtf8("multireadlbl"))
self.multireadcmb = QtGui.QComboBox(self.frame)
self.multireadcmb.setGeometry(QtCore.QRect(10, 680, 721, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.multireadcmb.setFont(font)
self.multireadcmb.setObjectName(_fromUtf8("multireadcmb"))
self.multireadcmb.setToolTip("Click yes to include Read Group Datasets otherwise Click No")
self.multireadcmb.addItem(_fromUtf8(""))
self.multireadcmb.addItem(_fromUtf8(""))
self.multireadlbl1 = QtGui.QLabel(self.frame) #Do you want to Include Count Based output files
self.multireadlbl1.setGeometry(QtCore.QRect(10, 730, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.multireadlbl1.setFont(font)
self.multireadlbl1.setObjectName(_fromUtf8("multireadlbl1"))
self.multireadcmb1 = QtGui.QComboBox(self.frame)
self.multireadcmb1.setGeometry(QtCore.QRect(10, 750, 721, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.multireadcmb1.setFont(font)
self.multireadcmb1.setObjectName(_fromUtf8("multireadcmb1"))
self.multireadcmb1.setToolTip("Click yes to include Count Based output files otherwise click No")
self.multireadcmb1.addItem(_fromUtf8(""))
self.multireadcmb1.addItem(_fromUtf8(""))
self.lengthcorlbl = QtGui.QLabel(self.frame) #Do you want to Apply length correction
self.lengthcorlbl.setGeometry(QtCore.QRect(10, 800, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.lengthcorlbl.setFont(font)
self.lengthcorlbl.setObjectName(_fromUtf8("lengthcorlbl"))
self.lengthcorcmb = QtGui.QComboBox(self.frame)
self.lengthcorcmb.setToolTip("Select any one of the following options")
self.lengthcorcmb.setGeometry(QtCore.QRect(10, 820, 721, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.lengthcorcmb.setFont(font)
self.lengthcorcmb.setObjectName(_fromUtf8("lengthcorcmb"))
self.lengthcorcmb.addItem(_fromUtf8(""))
self.lengthcorcmb.addItem(_fromUtf8(""))
self.lengthcorcmb.addItem(_fromUtf8(""))
self.advcufoptlbl = QtGui.QLabel(self.frame) #Do you want to Set Additional Parameters for single end reads
self.advcufoptlbl.setGeometry(QtCore.QRect(10, 870, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.advcufoptlbl.setFont(font)
self.advcufoptlbl.setObjectName(_fromUtf8("advcufoptlbl"))
self.advcufoptcmb = QtGui.QComboBox(self.frame)
self.advcufoptcmb.setGeometry(QtCore.QRect(10, 890, 721, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.advcufoptcmb.setFont(font)
self.advcufoptcmb.setObjectName(_fromUtf8("advcufoptcmb"))
self.advcufoptcmb.setToolTip("Click yes to set additional Cuffdiff option otherwise click No")
self.advcufoptcmb.addItem(_fromUtf8(""))
self.advcufoptcmb.addItem(_fromUtf8(""))
self.groupBox_4 = QtGui.QGroupBox(self.frame) #groupbox that containes additional Cuffdiff option
self.groupBox_4.setGeometry(QtCore.QRect(10, 910, 741, 1081))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.groupBox_4.setFont(font)
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.avgfraglbl = QtGui.QLabel(self.groupBox_4) #Do you want to set the Average Fragment Length
self.avgfraglbl.setGeometry(QtCore.QRect(0, 20, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.avgfraglbl.setFont(font)
self.avgfraglbl.setObjectName(_fromUtf8("avgfraglbl"))
self.avgtxt = QtGui.QLineEdit(self.groupBox_4)
self.avgtxt.setGeometry(QtCore.QRect(0, 40, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.avgtxt.setFont(font)
self.avgtxt.setToolTip("Please provide integer value")
self.avgtxt.setObjectName(_fromUtf8("avgtxt"))
self.intdiststddevlbl = QtGui.QLabel(self.groupBox_4) #Do you want to set the Fragment Length Standard Deviation
self.intdiststddevlbl.setGeometry(QtCore.QRect(0,90, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.intdiststddevlbl.setFont(font)
self.intdiststddevlbl.setObjectName(_fromUtf8("intdiststddevlbl"))
self.intdiststddevtxt = QtGui.QLineEdit(self.groupBox_4)
self.intdiststddevtxt.setToolTip("Please provide integer value")
self.intdiststddevtxt.setGeometry(QtCore.QRect(0, 110, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.intdiststddevtxt.setFont(font)
self.intdiststddevtxt.setObjectName(_fromUtf8("intdiststddevtxt"))
self.advgrp = QtGui.QGroupBox(self.frame) # groupbox that containes advanced cuffdiff options
self.advgrp.setGeometry(QtCore.QRect(10, 940, 741, 1081))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.advgrp.setFont(font)
self.advgrp.setObjectName(_fromUtf8("advgrp"))
self.advcufoptlbl1 = QtGui.QLabel(self.advgrp) #Do you want to Set Advanced Cuffdiff parameters
self.advcufoptlbl1.setGeometry(QtCore.QRect(0,0, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.advcufoptlbl1.setFont(font)
self.advcufoptlbl1.setObjectName(_fromUtf8("advcufoptlbl1"))
self.advcufoptcmb1 = QtGui.QComboBox(self.advgrp)
self.advcufoptcmb1.setGeometry(QtCore.QRect(0, 20, 721, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.advcufoptcmb1.setFont(font)
self.advcufoptcmb1.setObjectName(_fromUtf8("advcufoptcmb"))
self.advcufoptcmb1.setToolTip("Click yes to set advanced Cuffdiff option otherwise click No")
self.advcufoptcmb1.addItem(_fromUtf8(""))
self.advcufoptcmb1.addItem(_fromUtf8(""))
self.advancegrp = QtGui.QFrame(self.scrollAreaWidgetContents) # frame that containes advanced cuffdiff options
self.advancegrp.setGeometry(QtCore.QRect(50, 50, 761, 2081))
self.advancegrp.setFocusPolicy(QtCore.Qt.ClickFocus)
self.advancegrp.setFrameShape(QtGui.QFrame.WinPanel)
self.advancegrp.setFrameShadow(QtGui.QFrame.Raised)
self.advancegrp.setLineWidth(10)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.advancegrp.setFont(font)
self.advancegrp.setObjectName(_fromUtf8("advancegrp"))
self.libinplbl = QtGui.QLabel(self.advancegrp) #Do you want to use Library prep used for input reads
self.libinplbl.setGeometry(QtCore.QRect(30, 10, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.libinplbl.setFont(font)
self.libinplbl.setObjectName(_fromUtf8("libinplbl"))
self.libinpcmb = QtGui.QComboBox(self.advancegrp)
self.libinpcmb.setGeometry(QtCore.QRect(30, 30, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.libinpcmb.setFont(font)
self.libinpcmb.setToolTip("Select any one option")
self.libinpcmb.setObjectName(_fromUtf8("libinpcmb"))
self.libinpcmb.addItem(_fromUtf8(""))
self.libinpcmb.addItem(_fromUtf8(""))
self.libinpcmb.addItem(_fromUtf8(""))
self.libinpcmb.addItem(_fromUtf8(""))
self.libinpcmb.addItem(_fromUtf8(""))
self.libinpcmb.addItem(_fromUtf8(""))
self.libinpcmb.addItem(_fromUtf8(""))
self.libinpcmb.addItem(_fromUtf8(""))
self.masklbl = QtGui.QLabel(self.advancegrp) #Select Mask File
self.masklbl.setGeometry(QtCore.QRect(30, 80, 711, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.masklbl.setFont(font)
self.masklbl.setObjectName(_fromUtf8("masklbl"))
self.maskpush = QtGui.QPushButton(self.advancegrp)
self.maskpush.setToolTip("Click here to select single mask file")
self.maskpush.setGeometry(QtCore.QRect(30, 100, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.maskpush.setFont(font)
self.maskpush.setText(_fromUtf8(""))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index.png")),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.maskpush.setIcon(icon)
self.maskpush.setObjectName(_fromUtf8("maskpush"))
self.mulfile5 = QtGui.QPushButton(self.advancegrp)
self.mulfile5.setGeometry(QtCore.QRect(70, 100, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.mulfile5.setFont(font)
self.mulfile5.setToolTip("Click here to select mulitiple mask file")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index1.png")),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.mulfile5.setIcon(icon)
self.mulfile5.setObjectName(_fromUtf8("mulfile5"))
self.maskcmb = QtGui.QComboBox(self.advancegrp)
self.maskcmb.setGeometry(QtCore.QRect(110, 100, 601, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
# font.setBold(True)
font.setWeight(50)
self.maskcmb.setFont(font)
self.maskcmb.setObjectName(_fromUtf8("maskcmb"))
self.timlbl = QtGui.QLabel(self.advancegrp) #Do you want to Perform Time Series analysis
self.timlbl.setGeometry(QtCore.QRect(30, 150, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.timlbl.setFont(font)
self.timlbl.setObjectName(_fromUtf8("timlbl"))
self.no1 = QtGui.QPushButton(self.advancegrp)
self.no1.setGeometry(QtCore.QRect(80, 170, 51, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setWeight(50)
self.no1.setFont(font)
self.no1.setObjectName(_fromUtf8("no1"))
self.yes1 = QtGui.QPushButton(self.advancegrp)
self.yes1.setToolTip("Click here if want to Perform Time Series analysis otherwise Click NO")
self.yes1.setGeometry(QtCore.QRect(30, 170, 51, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setWeight(50)
self.yes1.setFont(font)
self.yes1.setObjectName(_fromUtf8("yes1"))
self.maxmlelbl = QtGui.QLabel(self.advancegrp) #Do you want to set the Max MLE iterations
self.maxmlelbl.setGeometry(QtCore.QRect(30, 220, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.maxmlelbl.setFont(font)
self.maxmlelbl.setObjectName(_fromUtf8("maxmlelbl"))
self.maxmletxt = QtGui.QLineEdit(self.advancegrp)
self.maxmletxt.setToolTip("Please provide integer value")
self.maxmletxt.setGeometry(QtCore.QRect(30, 240, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.maxmletxt.setFont(font)
self.maxmletxt.setObjectName(_fromUtf8("maxmletxt"))
self.hitslbl1 = QtGui.QLabel(self.advancegrp) #Do you want to Select the Hits included in normalization
self.hitslbl1.setGeometry(QtCore.QRect(30, 290, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.hitslbl1.setFont(font)
self.hitslbl1.setObjectName(_fromUtf8("hitslbl1"))
self.hitscmb = QtGui.QComboBox(self.advancegrp)
self.hitscmb.setToolTip("Select any one option")
self.hitscmb.setGeometry(QtCore.QRect(30, 310, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
#font.setBold(True)
font.setWeight(50)
self.hitscmb.setFont(font)
self.hitscmb.setObjectName(_fromUtf8("hitscmb"))
self.hitscmb.addItem(_fromUtf8(""))
self.hitscmb.addItem(_fromUtf8(""))
self.maxfraglbl = QtGui.QLabel(self.advancegrp) #Do you want to set Maximum number of fragments per locus
self.maxfraglbl.setGeometry(QtCore.QRect(30, 360, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.maxfraglbl.setFont(font)
self.maxfraglbl.setObjectName(_fromUtf8("maxfraglbl"))
self.maxtxt = QtGui.QLineEdit(self.advancegrp)
self.maxtxt.setToolTip("Please provide integer values")
self.maxtxt.setGeometry(QtCore.QRect(30, 380, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.maxtxt.setFont(font)
self.maxtxt.setObjectName(_fromUtf8("maxtxt"))
self.numfraglbl = QtGui.QLabel(self.advancegrp) #Do you want to set Number of fragment generation sample
self.numfraglbl.setGeometry(QtCore.QRect(30, 430, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.numfraglbl.setFont(font)
self.numfraglbl.setObjectName(_fromUtf8("numfraglbl"))
self.numtxt = QtGui.QLineEdit(self.advancegrp)
self.numtxt.setToolTip("Please provide integer values")
self.numtxt.setGeometry(QtCore.QRect(30, 450, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.numtxt.setFont(font)
self.numtxt.setObjectName(_fromUtf8("numtxt"))
self.assamplbl = QtGui.QLabel(self.advancegrp) #Do you want to set Number of fragment assignment samples per generations
self.assamplbl.setGeometry(QtCore.QRect(30, 500, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.assamplbl.setFont(font)
self.assamplbl.setObjectName(_fromUtf8("assamplbl"))
self.asamptxt = QtGui.QLineEdit(self.advancegrp)
self.asamptxt.setToolTip("Please provide integer values")
self.asamptxt.setGeometry(QtCore.QRect(30, 520, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.asamptxt.setFont(font)
self.asamptxt.setObjectName(_fromUtf8("asamptxt"))
self.minreplbl = QtGui.QLabel(self.advancegrp) #Do you want to set Minimal Replicates for isoform shift testing
self.minreplbl.setGeometry(QtCore.QRect(30, 570, 711, 17))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.minreplbl.setFont(font)
self.minreplbl.setObjectName(_fromUtf8("minreplbl"))
self.minreptxt = QtGui.QLineEdit(self.advancegrp)
self.minreptxt.setToolTip("Please provide integer values")
self.minreptxt.setGeometry(QtCore.QRect(30, 590, 681, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.minreptxt.setFont(font)
self.minreptxt.setObjectName(_fromUtf8("minreptxt"))
self.otherfrm = QtGui.QFrame(self.advancegrp) #frame that contains other options
self.otherfrm.setGeometry(QtCore.QRect(20, 640, 791, 1521))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.otherfrm.setFont(font)
self.otherfrm.setObjectName(_fromUtf8("otherfrm"))
self.help = QtGui.QLabel(self.otherfrm) #Do you want to Prints the help message
self.help.setGeometry(QtCore.QRect(10, 20, 511, 17))
self.help.setObjectName(_fromUtf8("help"))
self.helpyes = QtGui.QPushButton(self.otherfrm)
self.helpyes.setToolTip("Click here if you want to Prints the help message otherwise click NO")
self.helpyes.setGeometry(QtCore.QRect(10, 40, 51, 27))
self.helpyes.setObjectName(_fromUtf8("helpyes"))
self.helpno = QtGui.QPushButton(self.otherfrm)
self.helpno.setGeometry(QtCore.QRect(60, 40, 51, 27))
self.helpno.setObjectName(_fromUtf8("helpno"))
self.threadl = QtGui.QLabel(self.otherfrm) #Please enter the number of alignment threads to launch
self.threadl.setGeometry(QtCore.QRect(10, 160, 641, 17))
self.threadl.setObjectName(_fromUtf8("threadl"))
self.thrdtxt = QtGui.QLineEdit(self.otherfrm)
self.thrdtxt.setToolTip("Please provide integer values")
self.thrdtxt.setGeometry(QtCore.QRect(10, 180, 681, 27))
self.thrdtxt.setObjectName(_fromUtf8("thrdtxt"))
self.outlb = QtGui.QLabel(self.otherfrm) #Do you want to write all output files to this directory
self.outlb.setGeometry(QtCore.QRect(10, 90, 541, 17))
self.outlb.setObjectName(_fromUtf8("outlb"))
self.outdirtxt = QtGui.QLineEdit(self.otherfrm)
self.outdirtxt.setToolTip("Please Provide path of the directory")
self.outdirtxt.setGeometry(QtCore.QRect(10, 110, 681, 27))
self.outdirtxt.setObjectName(_fromUtf8("outdirtxt"))
self.idl = QtGui.QLabel(self.otherfrm) #Do you want to assembled transcripts have this ID prefix
self.idl.setGeometry(QtCore.QRect(10, 230, 711, 17))
self.idl.setObjectName(_fromUtf8("idl"))
self.idtxt = QtGui.QLineEdit(self.otherfrm)
self.idtxt.setToolTip("Please provide ID value")
self.idtxt.setGeometry(QtCore.QRect(10, 250, 681, 27))
self.idtxt.setObjectName(_fromUtf8("idtxt"))
self.biasln = QtGui.QLabel(self.otherfrm) #Do you want to use bias correction
self.biasln.setGeometry(QtCore.QRect(10, 300, 491, 17))
self.biasln.setObjectName(_fromUtf8("biasln"))
self.biastxtcor = QtGui.QComboBox(self.otherfrm)
self.biastxtcor.setGeometry(QtCore.QRect(90, 320, 601, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.biastxtcor.setFont(font)
self.biastxtcor.setObjectName(_fromUtf8("biastxtcor"))
self.biaspus = QtGui.QPushButton(self.otherfrm)
self.biaspus.setToolTip("Click here to select single file")
self.biaspus.setGeometry(QtCore.QRect(10, 320, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.biaspus.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.biaspus.setIcon(icon)
self.biaspus.setObjectName(_fromUtf8("biaspus"))
self.mulfile4 = QtGui.QPushButton(self.otherfrm)
self.mulfile4.setGeometry(QtCore.QRect(50, 320, 31, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.mulfile4.setFont(font)
self.mulfile4.setToolTip("Click here to select multiple file for bias correction")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("/home/amrata/PycharmProjects/bowtieuser/images/index1.png")),
QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.mulfile4.setIcon(icon)
self.mulfile4.setObjectName(_fromUtf8("mulfile4"))
self.difanal = QtGui.QLabel(self.otherfrm) #Do you want to generate differential analysis files
self.difanal.setGeometry(QtCore.QRect(10, 370, 611, 17))
self.difanal.setObjectName(_fromUtf8("difanal"))
self.diffno = QtGui.QPushButton(self.otherfrm)
self.diffno.setGeometry(QtCore.QRect(60, 390, 51, 27))
self.diffno.setObjectName(_fromUtf8("diffno"))
self.diffyes = QtGui.QPushButton(self.otherfrm)
self.diffyes.setToolTip("Click here if you want to generate differential analysis files otherwise click No" )
self.diffyes.setGeometry(QtCore.QRect(10, 390, 51, 27))
self.diffyes.setObjectName(_fromUtf8("diffyes"))
self.iso = QtGui.QLabel(self.otherfrm) #Do you want to perform isoform switching tests
self.iso.setGeometry(QtCore.QRect(10, 440, 601, 17))
self.iso.setObjectName(_fromUtf8("iso"))
self.isono = QtGui.QPushButton(self.otherfrm)
self.isono.setGeometry(QtCore.QRect(60, 460, 51, 27))
self.isono.setObjectName(_fromUtf8("isono"))
self.isoyes = QtGui.QPushButton(self.otherfrm)
self.isoyes.setToolTip("Click here if you want to perform isoform switching tests otherwise click No")
self.isoyes.setGeometry(QtCore.QRect(10, 460, 51, 27))
self.isoyes.setObjectName(_fromUtf8("isoyes"))
self.dias = QtGui.QLabel(self.otherfrm) #Do you want to Print lots of status updates and other diagnostic information
self.dias.setGeometry(QtCore.QRect(10, 510, 631, 17))
self.dias.setObjectName(_fromUtf8("dias"))
self.printyes = QtGui.QPushButton(self.otherfrm)
self.printyes.setToolTip("Click here if you want to Print lots of status updates and other diagnostic information otherwise click No")
self.printyes.setGeometry(QtCore.QRect(10, 530, 51, 27))
self.printyes.setObjectName(_fromUtf8("printyes"))
self.prntno = QtGui.QPushButton(self.otherfrm)
self.prntno.setGeometry(QtCore.QRect(60, 530, 51, 27))
self.prntno.setObjectName(_fromUtf8("prntno"))
self.warn = QtGui.QLabel(self.otherfrm) #Do you want to Suppress messages other than serious warnings and errors
self.warn.setGeometry(QtCore.QRect(10, 580, 611, 17))
self.warn.setObjectName(_fromUtf8("warn"))
self.suppno = QtGui.QPushButton(self.otherfrm)
self.suppno.setGeometry(QtCore.QRect(60, 600, 51, 27))
self.suppno.setObjectName(_fromUtf8("suppno"))
self.suppyes = QtGui.QPushButton(self.otherfrm)
self.suppyes.setToolTip("Click here if you want to Suppress messages other than serious warnings and errors otherwise click No")
self.suppyes.setGeometry(QtCore.QRect(10, 600, 51, 27))
self.suppyes.setObjectName(_fromUtf8("suppyes"))
self.updat = QtGui.QLabel(self.otherfrm) #Do you want to contact server to check for update availability
self.updat.setGeometry(QtCore.QRect(10, 650, 661, 17))
self.updat.setObjectName(_fromUtf8("updat"))
self.servyes = QtGui.QPushButton(self.otherfrm)
self.servyes.setToolTip("Click here if you want to contact server to check for update availability otherwise click No")
self.servyes.setGeometry(QtCore.QRect(10, 670, 51, 27))
self.servyes.setObjectName(_fromUtf8("servyes"))
self.servno = QtGui.QPushButton(self.otherfrm)
self.servno.setGeometry(QtCore.QRect(60, 670, 51, 27))
self.servno.setObjectName(_fromUtf8("servno"))
self.dispersion = QtGui.QLabel(self.otherfrm) #Want to Use the Poisson fragment dispersion model instead of learning one in each condition
self.dispersion.setGeometry(QtCore.QRect(10, 720, 771, 17))
self.dispersion.setObjectName(_fromUtf8("dispersion"))
self.poisno = QtGui.QPushButton(self.otherfrm)
self.poisno.setGeometry(QtCore.QRect(60, 740, 51, 27))
self.poisno.setObjectName(_fromUtf8("poisno"))
self.Poiyes = QtGui.QPushButton(self.otherfrm)
self.Poiyes.setToolTip("click here if you Want to Use the Poisson fragment dispersion model instead of learning one in each condition otherwise click No")
self.Poiyes.setGeometry(QtCore.QRect(10, 740, 51, 27))
self.Poiyes.setObjectName(_fromUtf8("Poiyes"))
self.overdisp = QtGui.QLabel(self.otherfrm) #Do you want to print count tables used to fit overdispersion
self.overdisp.setGeometry(QtCore.QRect(10, 790, 641, 17))
self.overdisp.setObjectName(_fromUtf8("overdisp"))
self.tabno = QtGui.QPushButton(self.otherfrm)
self.tabno.setGeometry(QtCore.QRect(60, 810, 51, 27))
self.tabno.setObjectName(_fromUtf8("tabno"))
self.tabyes = QtGui.QPushButton(self.otherfrm)
self.tabyes.setToolTip("Click here if you want to print count tables used to fit overdispersion otherwise click No")
self.tabyes.setGeometry(QtCore.QRect(10, 810, 51, 27))
self.tabyes.setObjectName(_fromUtf8("tabyes"))
self.skipl = QtGui.QLabel(self.otherfrm) #Set the maximum fragments allowed in a bundle before skipping
self.skipl.setGeometry(QtCore.QRect(10, 860, 671, 17))
self.skipl.setObjectName(_fromUtf8("skipl"))
self.skiptxt = QtGui.QLineEdit(self.otherfrm)
self.skiptxt.setToolTip("Please provide integer value")
self.skiptxt.setGeometry(QtCore.QRect(10, 880, 681, 27))
self.skiptxt.setObjectName(_fromUtf8("skiptxt"))
self.numsaml = QtGui.QLabel(self.otherfrm) #Set the Number of fragment generation samples
self.numsaml.setGeometry(QtCore.QRect(10, 930, 641, 17))
self.numsaml.setObjectName(_fromUtf8("numsaml"))
self.fragentxsm = QtGui.QLineEdit(self.otherfrm)
self.fragentxsm.setToolTip("Please provide integer value")
self.fragentxsm.setGeometry(QtCore.QRect(10, 950, 681, 27))
self.fragentxsm.setObjectName(_fromUtf8("fragentxsm"))
self.assgn = QtGui.QLabel(self.otherfrm) #Set the Number of fragment assignment samples per generation
self.assgn.setGeometry(QtCore.QRect(10, 1000, 731, 17))
self.assgn.setObjectName(_fromUtf8("assgn"))
self.pergen = QtGui.QLineEdit(self.otherfrm)
self.pergen.setToolTip("Please provide integer value")
self.pergen.setGeometry(QtCore.QRect(10, 1020, 681, 27))
self.pergen.setObjectName(_fromUtf8("pergen"))
self.testing = QtGui.QLabel(self.otherfrm) #Set Replicates needed for relative isoform shift testing
self.testing.setGeometry(QtCore.QRect(10, 1070, 681, 17))
self.testing.setObjectName(_fromUtf8("testing"))
self.pergen_2 = QtGui.QLineEdit(self.otherfrm)
self.pergen_2.setToolTip("Please provide integer value")
self.pergen_2.setGeometry(QtCore.QRect(10, 1090, 681, 27))
self.pergen_2.setObjectName(_fromUtf8("pergen_2"))
self.ok = QtGui.QPushButton(self.advancegrp)
self.ok.setGeometry(QtCore.QRect(550, 1800, 98, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setBold(False)
font.setWeight(50)
self.ok.setFont(font)
self.ok.setToolTip("Click here to goback previous menu")
self.ok.setObjectName(_fromUtf8("ok"))
self.execute = QtGui.QPushButton(self.frame)
self.execute.setToolTip("Click here if you are submit these values")
self.execute.setGeometry(QtCore.QRect(600, 1030, 98, 27))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Droid Serif"))
font.setPointSize(11)
font.setBold(False)
font.setWeight(50)
self.execute.setFont(font)
self.execute.setObjectName(_fromUtf8("execute"))
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 829, 25))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName(_fromUtf8("toolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
MainWindow.setCentralWidget(self.centralWidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "CUFFDIFF", None))
self.selgtflbl.setText(_translate("MainWindow","Transcripts",None))
self.omitlbl.setText(_translate("MainWindow", "Do you want to Omit Tabular Datasets.", None))
self.asno.setText(_translate("MainWindow", "No", None))
self.asyes.setText(_translate("MainWindow", "Yes", None))
self.sqlbl.setText(_translate("MainWindow", "Do you want to Generate SQLite.", None))
self.asno1.setText(_translate("MainWindow", "No", None))
self.asyes1.setText(_translate("MainWindow", "Yes", None))
self.inputdatalbl.setText(_translate("MainWindow", "Do you want to Provide Input data type.", None))
self.inpcmb.setItemText(0, _translate("MainWindow", "SAM/BAM", None))
self.inpcmb.setItemText(1, _translate("MainWindow", "Cuffquant(CXB)", None))
self.inpcmb.setItemText(2, _translate("MainWindow", "List of single replicate condition", None))
self.inpcmb.setItemText(3, _translate("MainWindow", "List of multiple replicate condition", None))
self.ovrhngtollbl0.setText(_translate("MainWindow", "Conditions.", None))
self.ovrhngtollbl.setText(_translate("MainWindow", "Name.", None))
self.ovrhngtollbl1.setText(_translate("MainWindow", "Name.", None))
self.subrefanlbl1.setText(_translate("MainWindow", "Replicates", None))
self.subrefanlbl.setText(_translate("MainWindow", "Replicates.", None))
self.subrefanpushp.setText(_translate("MainWindow", " + Insert condition", None))
self.sublib.setText(_translate("MainWindow", "Select Library normalization method\n"
"", None))
self.subrefcomb.setItemText(0, _translate("MainWindow", "geometric", None))
self.subrefcomb.setItemText(1, _translate("MainWindow", "classic-fpkm", None))
self.subrefcomb.setItemText(2, _translate("MainWindow", "quartile", None))
self.displib.setText(_translate("MainWindow", "Select Dispersion estimation method\n"
"", None))
self.dispcomb.setItemText(0, _translate("MainWindow", "pooled", None))
self.dispcomb.setItemText(1, _translate("MainWindow", "per-condition", None))
self.dispcomb.setItemText(2, _translate("MainWindow", "blind", None))
self.dispcomb.setItemText(2, _translate("MainWindow", "poisson", None))
self.faldlbl.setText(_translate("MainWindow", "Set vaue for False Discovery Rate", None))
self.faldlbl1.setText(_translate("MainWindow", "Set vaue for Min Alignment Count", None))
self.sqlbl1.setText(_translate("MainWindow", "Do you want to Use multi-read correct.", None))
self.asno11.setText(_translate("MainWindow", "No", None))
self.asyes11.setText(_translate("MainWindow", "Yes", None))
self.multireadlbl.setText(_translate("MainWindow", "Do you want to Include Read Group Datasets?", None))
self.multireadcmb.setItemText(0, _translate("MainWindow", "No", None))
self.multireadcmb.setItemText(1, _translate("MainWindow", "Yes", None))
self.multireadlbl1.setText(_translate("MainWindow", "Do you want to Include Count Based output files", None))
self.multireadcmb1.setItemText(0, _translate("MainWindow", "No", None))
self.multireadcmb1.setItemText(1, _translate("MainWindow", "Yes", None))
self.lengthcorlbl.setText(_translate("MainWindow", "Do you want to Apply length correction?", None))
self.lengthcorcmb.setItemText(0, _translate("MainWindow", "Cufflinks Effective Length Correction", None))
self.lengthcorcmb.setItemText(1, _translate("MainWindow", "Standard alength Correction", None))
self.lengthcorcmb.setItemText(2, _translate("MainWindow", "No Length Correction at all (use raw counts)", None))
self.advcufoptlbl.setText(_translate("MainWindow", "Do you want to Set Additional Parameters for single end reads?", None))
self.advcufoptcmb.setItemText(0, _translate("MainWindow", "Yes", None))
self.advcufoptcmb.setItemText(1, _translate("MainWindow", "No", None))
self.avgfraglbl.setText(_translate("MainWindow", "Do you want to set the Average Fragment Length", None))
self.intdiststddevlbl.setText(_translate("MainWindow", "Do you want to set the Fragment Length Standard Deviation.", None))
self.advcufoptlbl1.setText(_translate("MainWindow", "Do you want to Set Advanced Cuffdiff parameters? ", None))
self.advcufoptcmb1.setItemText(0, _translate("MainWindow", "Yes", None))
self.advcufoptcmb1.setItemText(1, _translate("MainWindow", "No", None))
self.libinplbl.setText(_translate("MainWindow", "Do you want to use Library prep used for input reads?", None))
self.libinpcmb.setItemText(0, _translate("MainWindow", "Auto Detect", None))
self.libinpcmb.setItemText(1, _translate("MainWindow", "ff-firststrand", None))
self.libinpcmb.setItemText(2, _translate("MainWindow", "ff-secondstrand", None))
self.libinpcmb.setItemText(3, _translate("MainWindow", "ff-unstranded", None))
self.libinpcmb.setItemText(4, _translate("MainWindow", "fr-firststrand", None))
self.libinpcmb.setItemText(5, _translate("MainWindow", "fr-secondstrand", None))
self.libinpcmb.setItemText(6, _translate("MainWindow", "fr-unstranded", None))
self.libinpcmb.setItemText(7, _translate("MainWindow", "transfrags", None))
self.masklbl.setText(_translate("MainWindow", "Select Mask File", None))
self.timlbl.setText(_translate("MainWindow", "Do you want to Perform Time Series analysis.", None))
self.no1.setText(_translate("MainWindow", "No", None))
self.yes1.setText(_translate("MainWindow", "Yes", None))
self.maxmlelbl.setText(_translate("MainWindow", "Do you want to set the Max MLE iterations.", None))
self.hitslbl1.setText(_translate("MainWindow", "Do you want to Select the Hits included in normalization?", None))
self.hitscmb.setItemText(0, _translate("MainWindow", "Compatible Hits", None))
self.hitscmb.setItemText(1, _translate("MainWindow", "All Hits", None))
self.maxfraglbl.setText(_translate("MainWindow", "Do you want to set Maximum number of fragments per locus?", None))
self.numfraglbl.setText(_translate("MainWindow", "Do you want to set Number of fragment generation samples", None))
self.assamplbl.setText(_translate("MainWindow", "Do you want to set Number of fragment assignment samples per generations", None))
self.minreplbl.setText(_translate("MainWindow", "Do you want to set Minimal Replicates for isoform shift testing", None))
self.execute.setText(_translate("MainWindow", "Execute", None))
self.ok.setText(_translate("MainWindow", "OK", None))
self.help.setText(_translate("MainWindow", "Do you want to Prints the help message", None))
self.helpyes.setText(_translate("MainWindow", "Yes", None))
self.helpno.setText(_translate("MainWindow", "No", None))
self.threadl.setText(_translate("MainWindow", "Please enter the number of alignment threads to launch", None))
self.outlb.setText(_translate("MainWindow", "Do you want to write all output files to this directory ", None))
self.idl.setText(_translate("MainWindow", "Do you want to assembled transcripts have this ID prefix", None))
self.biasln.setText(_translate("MainWindow", "Do you want to use bias correction", None))
self.biaspus.setToolTip(_translate("MainWindow", "<html><head/><body><p>single</p></body></html>", None))
self.difanal.setText(_translate("MainWindow", "Do you want to generate differential analysis files", None))
self.diffno.setText(_translate("MainWindow", "No", None))
self.diffyes.setText(_translate("MainWindow", "Yes", None))
self.iso.setText(_translate("MainWindow", "Do you want to perform isoform switching tests", None))
self.isono.setText(_translate("MainWindow", "No", None))
self.isoyes.setText(_translate("MainWindow", "Yes", None))
self.dias.setText(
_translate("MainWindow", "Do you want to Print lots of status updates and other diagnostic information.",
None))
self.printyes.setText(_translate("MainWindow", "Yes", None))
self.prntno.setText(_translate("MainWindow", "No", None))
self.warn.setText(
_translate("MainWindow", "Do you want to Suppress messages other than serious warnings and errors.", None))
self.suppno.setText(_translate("MainWindow", "No", None))
self.suppyes.setText(_translate("MainWindow", "Yes", None))
self.updat.setText(
_translate("MainWindow", "Do you want to contact server to check for update availability", None))
self.servyes.setText(_translate("MainWindow", "Yes", None))
self.servno.setText(_translate("MainWindow", "No", None))
self.subrefanpushok.setText(_translate("MainWindow", "OK", None))
self.dispersion.setText(_translate("MainWindow",
"Want to Use the Poisson fragment dispersion model instead of learning one in each condition.",
None))
self.poisno.setText(_translate("MainWindow", "No", None))
self.Poiyes.setText(_translate("MainWindow", "Yes", None))
self.overdisp.setText(
_translate("MainWindow", "Do you want to print count tables used to fit overdispersion", None))
self.tabno.setText(_translate("MainWindow", "No", None))
self.tabyes.setText(_translate("MainWindow", "Yes", None))
self.skipl.setText(
_translate("MainWindow", "Set the maximum fragments allowed in a bundle before skipping", None))
self.numsaml.setText(_translate("MainWindow", "Set the Number of fragment generation samples", None))
self.assgn.setText(
_translate("MainWindow", "Set the Number of fragment assignment samples per generation", None))
self.testing.setText(
_translate("MainWindow", "Set Replicates needed for relative isoform shift testing ", None))
| 1.757813 | 2 |
LeNet5/mnist_inference.py | carbo-T/TF | 1 | 12789135 | <filename>LeNet5/mnist_inference.py
# -*- coding: utf8 -*-
import tensorflow as tf
# define basic params
INPUT_NODE = 784
OUTPUT_NODE = 10
IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10
CONV1_DEPTH = 6
CONV1_SIZE = 5
CONV2_DEPTH = 16
CONV2_SIZE = 5
FC_SIZE = 84
def inference(input_tensor, train, regularizer):
# define layer1 forward propagation
with tf.variable_scope('layer1-conv1'):
conv1_weights = tf.get_variable(
"weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEPTH],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
conv1_biases = tf.get_variable("bias", [CONV1_DEPTH], initializer=tf.constant_initializer(0.0))
# strides 中间两项表示长宽方向步长1
conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME')
relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))
# define layer2 forward propagation, max pooling, size 2*2, step 2*2, all 0 filling
with tf.variable_scope('layer2-pool1'):
pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
with tf.variable_scope('layer3-conv2'):
conv2_weights = tf.get_variable(
"weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEPTH, CONV2_DEPTH],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
conv2_biases = tf.get_variable("bias", [CONV2_DEPTH], initializer=tf.constant_initializer(0.0))
# size 5*5, depth 64, step 1, all 0 filling
conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1, ], padding='SAME')
relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))
with tf.variable_scope('layer4-poll2'):
pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# pool_shape[0] means the num of data from a batch, get_shape->[num, width, height, depth]
pool_shape = pool2.get_shape().as_list()
nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
reshaped = tf.reshape(pool2, [pool_shape[0], nodes])
with tf.variable_scope('layer5-fc1'):
fc1_weights = tf.get_variable(
'weights',
[nodes, FC_SIZE],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
# fc layer regularize
if regularizer is not None:
tf.add_to_collection('losses', regularizer(fc1_weights))
fc1_biases = tf.get_variable('bias', [FC_SIZE], initializer=tf.constant_initializer(0.1))
fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
if train:
fc1 = tf.nn.dropout(fc1, 0.5)
with tf.variable_scope('layer6-fc2'):
fc2_weight = tf.get_variable(
'weight',
[FC_SIZE, NUM_LABELS],
initializer=tf.truncated_normal_initializer(stddev=0.1)
)
if regularizer is not None:
tf.add_to_collection('losses', regularizer(fc2_weight))
fc2_biases = tf.get_variable('bias', [NUM_LABELS], initializer=tf.constant_initializer(0.1))
logit = tf.matmul(fc1, fc2_weight)+fc2_biases
return logit
| 2.65625 | 3 |
geninv.py | manticode/wow-inventory-offline | 1 | 12789136 | <gh_stars>1-10
import argparse
import re
import csv
from slpp import slpp as lua
def prerun():
""" Check input args are valid. """
argparser = argparse.ArgumentParser(description="Inventory database file.")
argparser.add_argument("-i", help="the lua datafile", dest="infilename")
argparser.add_argument("-o", help="Output filename in CSV format", dest="outfilename")
argparser.add_argument("-n", "--toon_name", help="Character (toon) nam", dest="toon_name")
argparser.add_argument("-r", "--realm_name", help="Realm (server) name. Defaults to Hydraxian Waterlords if not "
"specified", dest="realm_name")
argparser.print_help()
args = argparser.parse_args()
luafile = open(args.infilename, "r")
gu_char_name = get_unique_char_name(args.toon_name, args.realm_name)
return luafile, gu_char_name
def parse_lua(luadb, gu_toon_name):
""" Parse the lua data"""
inventorydata = luadb.read()
inventorydata = "{ "+inventorydata+" }"
inventorydataparsed = lua.decode(inventorydata)
itemid_list, itemname_list = iter_luadb(inventorydataparsed, gu_toon_name)
qty_list = get_item_qty(inventorydataparsed, gu_toon_name, itemid_list)
return itemid_list, itemname_list, qty_list
def extract_item_name(item_string):
item_name = re.search("^.*\[([a-zA-Z0-9\s\:\',\-]*)\].*$", item_string)
if item_name:
return item_name.group(1)
def get_item_qty(lua_obj, gu_toon_name, item_id_list):
""" Correlate quantities for respective items."""
bank_inv_qty_lookup = lua_obj["AskMrRobotDbClassic"]["char"][gu_toon_name]["BankItemsAndCounts"]
storage_list_qty = []
qty_insert = 0
for item_id_lookup in item_id_list:
for container_id in bank_inv_qty_lookup:
bank_container = bank_inv_qty_lookup[container_id]
item_qty = bank_container.get(item_id_lookup)
if item_qty:
qty_insert = qty_insert + item_qty
else:
pass
storage_list_qty.append(qty_insert)
qty_insert = 0
return storage_list_qty
def get_unique_char_name(toon_name, realm_name):
""" globally unique toon name in Name - Realm format"""
gu_char_name = toon_name + " - " + realm_name
return gu_char_name
def iter_luadb(lua_obj, gu_char_name):
""" Extract the stuff we want. Each bag """
bank_inv_lookup = lua_obj["AskMrRobotDbClassic"]["char"][gu_char_name]["BankItems"]
storage_list_itemid = []
storage_list_itemname = []
for key in bank_inv_lookup:
bank_container = bank_inv_lookup[key]
for slot_item in bank_container:
if slot_item["id"] in storage_list_itemid:
pass
else:
storage_list_itemid.append(slot_item["id"])
storage_list_itemname.append(extract_item_name(slot_item["link"]))
if isinstance(bank_inv_lookup[key], dict):
iter_luadb(bank_inv_lookup[key], toon_name, realm_name)
return storage_list_itemid, storage_list_itemname
def create_combined_inv(item_id_list, item_name_list, item_qty_list):
zip_inv = zip(item_name_list, item_qty_list)
dict_inv = dict(zip_inv)
return dict_inv
def write_out_csv(inv_dict, outfile):
with open(outfile, "w") as file_handle:
writer = csv.writer(file_handle)
writer.writerows(inv_dict.items())
file_handle.close()
if __name__ == "__main__":
databaseobj, gu_name = prerun()
itemid_list, itemname_list, itemqty_list = parse_lua(databaseobj, gu_name)
inventory_dict = create_combined_inv(itemid_list, itemname_list, itemqty_list)
write_out_csv(inventory_dict, "inventory.csv") | 2.9375 | 3 |
SalesforceApi.py | mvogelgesang/SF-Event-Monitoring-Log-Retrieval | 0 | 12789137 | <reponame>mvogelgesang/SF-Event-Monitoring-Log-Retrieval<filename>SalesforceApi.py
import datetime
import getopt
import json
import logging
import os
import requests
import sys
from FileWriter import FileWriter
from requests.auth import HTTPBasicAuth
"""
Salesforce API
"""
class SalesforceApi:
def __init__(self,environment,debug=0):
self.username = environment['username']
self.password = environment['password']
self.securityToken = environment['securityToken']
self.sfConsumerKey = environment['consumerKey']
self.sfConsumerSecret = environment['consumerSecret']
self.sfURL = environment['salesforceURL']
self.accessToken = ''
self.debug = debug
self.logPath = 'logs/tmp_'+datetime.datetime.now().strftime('%Y%m%d-%H%M.%S')+'.json'
if self.debug:
print "salesforce api initiated"
def authenticate(self):
"""Authenticate against Salesforce using an OAuth connection. Sets the accessToken attribute of the SalesforceApi class
Parameters
----------
Returns
-------
void
"""
# Login Step 1, Request Access token
# To do so, you must have created a connected App in Salesforce and have a clientId and clientSecret available along with username, password, and securityToken
authHeaders = {'Content-Type': 'application/x-www-form-urlencoded'}
# Constrcut the body of the request for access token
payload = {'grant_type':'password','client_id':self.sfConsumerKey,'client_secret':self.sfConsumerSecret,'username':self.username,'password':self.password+self.securityToken}
# Post to https://login.salesforce.com/services/oauth2/token
rawResponse = requests.post('https://'+self.sfURL+'/services/oauth2/token',headers=authHeaders, data=payload)
response = json.loads(rawResponse.text)
if self.debug:
print "[DEBUG] authenticate >> "
print response
self.accessToken = response['access_token']
def queryEventLogFile(self, eventType=''):
"""Query the Event Log File object for API events. Requires that accessToken is set
Parameters
----------
Returns
-------
json query response
"""
# If accessToken is not set, throw error
if (self.accessToken == ''):
raise ValueError('accessToken has not been set, run authenticate method to set token')
exit
# Set headers
headers = {'Content-Type': 'application/json','Authorization':'Bearer '+self.accessToken}
# Build WHERE clause
whereClause = ''
if eventType != '':
whereClause = "WHERE++EventType+=+'"+eventType+"'"
# post the request
rawResponse = requests.get("https://"+self.sfURL+"/services/data/v32.0/query?q=SELECT+Id+,+EventType+,+LogFile+,+LogDate+,+LogFileLength+FROM+EventLogFile+"+whereClause, headers=headers)
response = json.loads(rawResponse.text)
if self.debug:
print "[DEBUG] queryEventLogFile >> "
print response
return response
def eventLogFile(self,eventLogFile):
"""Retrieves a single Event File Log and writes it to the appropriate directory
Parameters
----------
param: eventLogFile
ex:
{
'LogFileLength': 5199.0,
'EventType': 'API',
'LogDate': '2016-11-22T00:00:00.000+0000',
'attributes': {
'url': '/services/data/v32.0/sobjects/EventLogFile/0ATr00000000TWHGA2',
'type': 'EventLogFile'
},
'LogFile': '/services/data/v32.0/sobjects/EventLogFile/0ATr00000000TWHGA2/LogFile',
'Id': '0ATr00000000TWHGA2'
}
Returns
-------
csv containing event file log
"""
if (self.accessToken == ''):
raise ValueError('accessToken has not been set, run authenticate method to set token')
exit
eventFileId = eventLogFile['Id']
headers = {'Authorization':'Bearer '+self.accessToken,'X-PrettyPrint':'1','Accept-Encoding': 'gzip'}
rawResponse = requests.get('https://'+self.sfURL+'/services/data/v32.0/sobjects/EventLogFile/'+eventFileId+'/LogFile',headers=headers)
if self.debug:
print "[DEBUG] eventLogFile >> "
print rawResponse
print rawResponse.content
# if self.log:
# w = FileWriter('log', eventFileId)
# w.writeFile(rawResponse.content)
w = FileWriter(eventLogFile)
w.writeFile(rawResponse.content)
return rawResponse
| 2.6875 | 3 |
randomstate/prng/mt19937/__init__.py | bashtage/ng-numpy-randomstate | 43 | 12789138 | <gh_stars>10-100
from .mt19937 import * | 1.210938 | 1 |
code/python/visualize_contingency_tables.py | vishalbelsare/S3M | 52 | 12789139 | #!/usr/bin/env python3
#
# visualize_contingency_tables.py: Visualizes all contingency tables
# obtained by our method in the form of a diagram in the plane.
#
# Input: JSON file with shapelets
#
# Output: A set of points in the plane, each representing one table,
# such that the distance to the origin refers to preferences
# in splitting behaviour.
#
# The output will be written to `stdout`.
import argparse
import json
import sys
import numpy as np
def transform_table(table):
"""
Transforms a contingency table into a point on a two-dimensional
plane, in which the distance to the origin shows the suitability
of a contingency table for separating cases and controls.
"""
# Yes, this ordering is correct. Please refer to our paper for
# more details.
a, b, d, c = table
n1 = a+b
n0 = c+d
return (a-b) / n1, (c-d) / n0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Contingency Table Visualization")
parser.add_argument("input",
metavar = "INPUT",
help = "Input file"
)
parser.add_argument("-f", "--flip",
required = False,
action = "store_true",
help = "If set, flips values in the visualization to ensure that quadrant 3 is not used"
)
parser.add_argument("-p", "--prune",
required = False,
action = "store_true",
help = "If set, prunes duplicates points"
)
arguments = parser.parse_args()
input_file = arguments.input
flip = arguments.flip
prune = arguments.prune
with open(input_file) as f:
data = json.load(f)
shapelets = data["shapelets"]
tables = []
for shapelet in shapelets:
tables.append( shapelet["table"] )
points = []
for table in tables:
x,y = transform_table(table)
if flip and ( (x < 0 and y < 0) or (np.sign(x) != np.sign(y) and -x > y) ):
x,y = -y,-x
points.append( (x,y) )
if prune:
points = set(points)
for x,y in points:
print("{}\t{}".format(x,y))
| 3.328125 | 3 |
app/auth/forms.py | INASIC/AnnotateChange | 13 | 12789140 | # -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: See LICENSE file
# Copyright: 2020 (c) The Alan Turing Institute
from flask import current_app, flash
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import (
DataRequired,
Email,
EqualTo,
Optional,
ValidationError,
)
from app.models import User
class LoginForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
submit = SubmitField("Sign In")
class RegistrationForm(FlaskForm):
username = StringField("Username", validators=[DataRequired()])
email = StringField("Email", validators=[DataRequired(), Email()])
fullname = StringField("Full Name (optional)", validators=[])
password = PasswordField("Password", validators=[DataRequired()])
password2 = PasswordField(
"Repeat Password", validators=[DataRequired(), EqualTo("password")]
)
toc = BooleanField(
"I agree to the Terms and Conditions.", validators=[DataRequired()]
)
credit = BooleanField(
"Check this box if you would like to be publically credited with having "
"contributed to this work. By default, users will remain anonymous.",
validators=[Optional()],
)
updated = BooleanField(
"Check this box if you wish to be kept up to date with the "
"progress of this work by email.",
validators=[Optional()],
)
submit = SubmitField("Register")
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(
"Username already in use, please use a different one."
)
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(
"Email address already in use, please use a different one."
)
if current_app.config["USER_EMAILS"]:
if email.data in current_app.config["USER_EMAILS"]:
return
if current_app.config["USER_EMAIL_DOMAINS"]:
if (
not email.data.split("@")[-1]
in current_app.config["USER_EMAIL_DOMAINS"]
):
raise ValidationError(
"Access to AnnotateChange is restricted to "
"individuals with email addresses from specific "
"institutions. Please use your employee email address "
"when signing up. If that does not solve the issue, "
"you unfortunately do not have access to "
"AnnotateChange at this time."
)
def validate_credit(self, credit):
if credit.data and not self.fullname.data:
flash(
"Please provide your full name if you wish to "
"be credited with contributing to this work.", "error")
raise ValidationError(
"Please provide your full name if you wish to "
"be credited with contributing to this work."
)
class ResetPasswordRequestForm(FlaskForm):
email = StringField("Email", validators=[DataRequired(), Email()])
submit = SubmitField("Request password reset")
class ResetPasswordForm(FlaskForm):
password = PasswordField("Password", validators=[DataRequired()])
password2 = PasswordField(
"<PASSWORD>", validators=[DataRequired(), EqualTo("password")]
)
submit = SubmitField("Request Password Reset")
| 3 | 3 |
{{cookiecutter.project_slug}}/backend/app/app/core/config.py | abnerjacobsen/full-stack | 516 | 12789141 | <filename>{{cookiecutter.project_slug}}/backend/app/app/core/config.py
import os
def getenv_boolean(var_name, default_value=False):
result = default_value
env_value = os.getenv(var_name)
if env_value is not None:
result = env_value.upper() in ("TRUE", "1")
return result
API_V1_STR = "/api/v1"
SECRET_KEY = os.getenvb(b"SECRET_KEY")
if not SECRET_KEY:
SECRET_KEY = os.urandom(32)
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 8 # 60 minutes * 24 hours * 8 days
SERVER_NAME = os.getenv("SERVER_NAME")
BACKEND_CORS_ORIGINS = os.getenv("BACKEND_CORS_ORIGINS")
PROJECT_NAME = os.getenv("PROJECT_NAME")
SENTRY_DSN = os.getenv("SENTRY_DSN")
POSTGRES_SERVER = os.getenv("POSTGRES_SERVER")
POSTGRES_USER = os.getenv("POSTGRES_USER")
POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD")
POSTGRES_DB = os.getenv("POSTGRES_DB")
SQLALCHEMY_DATABASE_URI = (
f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_SERVER}/{POSTGRES_DB}"
)
FIRST_SUPERUSER = os.getenv("FIRST_SUPERUSER")
FIRST_SUPERUSER_PASSWORD = os.getenv("FIRST_SUPERUSER_PASSWORD")
USERS_OPEN_REGISTRATION = getenv_boolean("USERS_OPEN_REGISTRATION")
| 1.890625 | 2 |
metrics.py | theletterf/collectd-spark | 3 | 12789142 | GAUGE = 'gauge'
COUNTER = 'counter'
SPARK_PROCESS_METRICS = {
# jvm generics
"jvm.total.used": (
GAUGE,
"gauges",
"value"
),
"jvm.total.committed": (
GAUGE,
"gauges",
"value"
),
# heap memory
"jvm.heap.used": (
GAUGE,
"gauges",
"value"
),
"jvm.heap.committed": (
GAUGE,
"gauges",
"value"
),
# non-heap memory
# if max = -1, this is technically unbounded
# (switching calculation to used/committed)
"jvm.non-heap.used": (
GAUGE,
"gauges",
"value"
),
"jvm.non-heap.committed": (
GAUGE,
"gauges",
"value"
),
# marksweep
"jvm.MarkSweepCompact.count": (
GAUGE,
"gauges",
"value"
),
"jvm.MarkSweepCompact.time": (
GAUGE,
"gauges",
"value"
),
# worker specific
"worker.coresFree": (
GAUGE,
"gauges",
"value"
),
"worker.coresUsed": (
GAUGE,
"gauges",
"value"
),
"worker.executors": (
GAUGE,
"gauges",
"value"
),
"worker.memFree_MB": (
GAUGE,
"gauges",
"value"
),
"worker.memUsed_MB": (
GAUGE,
"gauges",
"value"
),
# master specific
"master.aliveWorkers": (
GAUGE,
"gauges",
"value"
),
"master.apps": (
GAUGE,
"gauges",
"value"
),
"master.waitingApps": (
GAUGE,
"gauges",
"value"
),
"master.workers": (
GAUGE,
"gauges",
"value"
)
}
SPARK_PROCESS_METRICS_ENHANCED = {
# memory pool
"jvm.pools.Code-Cache.used": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Code-Cache.committed": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Compressed-Class-Space.used": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Compressed-Class-Space.committed": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Metaspace.used": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Metaspace.committed": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Eden-Space.used": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Eden-Space.committed": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Survivor-Space.used": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Survivor-Space.committed": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Tenured-Gen.used": (
GAUGE,
"gauges",
"value"
),
"jvm.pools.Tenured-Gen.committed": (
GAUGE,
"gauges",
"value"
),
# hive specifics
"HiveExternalCatalog.fileCacheHits": (
COUNTER,
"counters",
"count"
),
"HiveExternalCatalog.filesDiscovered": (
COUNTER,
"counters",
"count"
),
"HiveExternalCatalog.hiveClientCalls": (
COUNTER,
"counters",
"count"
),
"HiveExternalCatalog.parallelListingJobCount": (
COUNTER,
"counters",
"count"
),
"HiveExternalCatalog.partitionsFetched": (
COUNTER,
"counters",
"count"
)
}
SPARK_JOB_METRICS = {
"numTasks": (GAUGE, "spark.job.num_tasks"),
"numActiveTasks": (GAUGE, "spark.job.num_active_tasks"),
"numCompletedTasks": (GAUGE, "spark.job.num_completed_tasks"),
"numSkippedTasks": (GAUGE, "spark.job.num_skipped_tasks"),
"numFailedTasks": (GAUGE, "spark.job.num_failed_tasks"),
"numActiveStages": (GAUGE, "spark.job.num_active_stages"),
"numCompletedStages": (GAUGE, "spark.job.num_completed_stages"),
"numSkippedStages": (GAUGE, "spark.job.num_skipped_stages"),
"numFailedStages": (GAUGE, "spark.job.num_failed_stages")
}
SPARK_STAGE_METRICS = {
"executorRunTime": (GAUGE, "spark.stage.executor_run_time"),
"inputBytes": (GAUGE, "spark.stage.input_bytes"),
"inputRecords": (GAUGE, "spark.stage.input_records"),
"outputBytes": (GAUGE, "spark.stage.output_bytes"),
"outputRecords": (GAUGE, "spark.stage.output_records"),
"memoryBytesSpilled": (GAUGE, "spark.stage.memory_bytes_spilled"),
"diskBytesSpilled": (GAUGE, "spark.stage.disk_bytes_spilled")
}
SPARK_STAGE_METRICS_ENHANCED = {
"shuffleReadBytes": (GAUGE, "spark.stage.shuffle_read_bytes"),
"shuffleReadRecords": (GAUGE, "spark.stage.shuffle_read_records"),
"shuffleWriteBytes": (GAUGE, "spark.stage.shuffle_write_bytes"),
"shuffleWriteRecords": (GAUGE, "spark.stage.shuffle_write_records"),
}
SPARK_DRIVER_METRICS = {
"memoryUsed": (COUNTER, "spark.driver.memory_used"),
"diskUsed": (COUNTER, "spark.driver.disk_used"),
"totalInputBytes": (COUNTER, "spark.driver.total_input_bytes"),
"totalShuffleRead": (COUNTER, "spark.driver.total_shuffle_read"),
"totalShuffleWrite": (COUNTER, "spark.driver.total_shuffle_write"),
"totalTasks": (COUNTER, "spark.driver.total_tasks"),
"maxMemory": (GAUGE, "spark.driver.max_memory")
}
SPARK_DRIVER_METRICS_ENHANCED = {
"rddBlocks": (GAUGE, "spark.driver.rdd_blocks"),
"activeTasks": (GAUGE, "spark.driver.active_tasks"),
"failedTasks": (COUNTER, "spark.driver.failed_tasks"),
"completedTasks": (COUNTER, "spark.driver.completed_tasks"),
"totalDuration": (COUNTER, "spark.driver.total_duration")
}
SPARK_EXECUTOR_METRICS = {
"memoryUsed": (COUNTER, "spark.executor.memory_used"),
"diskUsed": (COUNTER, "spark.executor.disk_used"),
"totalInputBytes": (COUNTER, "spark.executor.total_input_bytes"),
"totalShuffleRead": (COUNTER, "spark.executor.total_shuffle_read"),
"totalShuffleWrite": (COUNTER, "spark.executor.total_shuffle_write"),
"maxMemory": (GAUGE, "spark.executor.max_memory")
}
SPARK_EXECUTOR_METRICS_ENHANCED = {
"rddBlocks": (GAUGE, "spark.executor.rdd_blocks"),
"activeTasks": (GAUGE, "spark.executor.active_tasks"),
"failedTasks": (COUNTER, "spark.executor.failed_tasks"),
"completedTasks": (COUNTER, "spark.executor.completed_tasks"),
"totalTasks": (COUNTER, "spark.executor.total_tasks"),
"totalDuration": (COUNTER, "spark.executor.total_duration")
}
SPARK_STREAMING_METRICS = {
"avgInputRate": (GAUGE, "spark.streaming.avg_input_rate"),
"numTotalCompletedBatches":
(COUNTER, "spark.streaming.num_total_completed_batches"),
"numActiveBatches": (GAUGE, "spark.streaming.num_active_batches"),
"numInactiveReceivers": (GAUGE, "spark.streaming.num_inactive_receivers"),
"numReceivedRecords": (COUNTER, "spark.streaming.num_received_records"),
"numProcessedRecords": (COUNTER, "spark.streaming.num_processed_records"),
"avgProcessingTime": (GAUGE, "spark.streaming.avg_processing_time"),
"avgSchedulingDelay": (GAUGE, "spark.streaming.avg_scheduling_delay"),
"avgTotalDelay": (GAUGE, "spark.streaming.avg_total_delay")
}
| 2.203125 | 2 |
setup.py | petejh/catclass | 0 | 12789143 | <reponame>petejh/catclass<gh_stars>0
from setuptools import setup
setup(
name = 'cat-class',
version = '0.1.0',
description = ('An image classifier employing a deep neural network '
'to identify pictures of cats.'),
url = 'https://github.com/petejh/cat-class',
author = '<NAME>',
author_email = '<EMAIL>',
license = 'MIT',
packages = ['catclass'],
python_requires = '',
install_requires = ['deepen', 'h5py', 'numpy'],
zip_safe = False
)
| 1.539063 | 2 |
tests/test_quadrature/test_quadrature.py | AI-Pranto/OpenMOC | 97 | 12789144 | <reponame>AI-Pranto/OpenMOC<gh_stars>10-100
#!/usr/bin/env python
import os
import sys
import math
sys.path.insert(0, os.pardir)
sys.path.insert(0, os.path.join(os.pardir, 'openmoc'))
from testing_harness import TestHarness
from input_set import LatticeGridInput
import openmoc
class QuadratureTestHarness(TestHarness):
"""Tests tracking over a lattice geometry."""
def __init__(self):
super(QuadratureTestHarness, self).__init__()
self.quadratures = list()
quadratures = openmoc.Quadrature.__subclasses__()
for quadrature in quadratures:
self.quadratures.append(quadrature())
self._result = ''
def _setup(self):
return
def _run_openmoc(self):
"""Segment tracks over the geometry and save the result to a string"""
# Segmentize tracks over the geometry
for azim in [4, 8, 16, 32, 64]:
for polar in [4, 6]:
for quad in self.quadratures:
sum_weights = 0
int_sine = 0
quad.setNumAzimAngles(azim)
quad.setNumPolarAngles(polar)
quad.initialize()
for a in range(int(azim/4)):
quad.setAzimSpacing(1.0, a)
quad.precomputeWeights(False)
for a in range(int(azim/2)):
for p in range(polar):
sum_weights += quad.getWeight(a, p) / \
math.sin(quad.getTheta(a, p))
int_sine += quad.getWeight(a, p)
if abs(sum_weights - 4 * math.pi) > 0.005:
self._result += 'Calculated ' + str(sum_weights) + \
' for sum of weights for ' + type(quad).__name__\
+ ' with ' + str(azim) + ' azimuthal angles and '\
+ str(polar) + ' which exceeds tolerance of 0.005'\
' from 4 PI\n'
if abs(int_sine - 9.8696) > 0.5:
self._result += 'Calculated ' + str(int_sine)\
+ ' for the integral of sine for '\
+ type(quad).__name__ + ' with ' + str(azim)\
+ ' azimuthal angles and ' + str(polar)\
+ ' which exceeds tolerance of 0.5 from 9.8696\n'
if (self._result == ''):
self._result += 'All Quadrature sets correctly summed their ' \
'weights to 4 PI with tolerance 0.005 and calculated the ' \
'integral of sine as 9.8696 with tolerance 0.5'
def _get_results(self, num_iters=False, keff=False, fluxes=False,
num_fsrs=True, num_segments=True, num_tracks=True,
hash_output=False):
"""Return the result string"""
return self._result
if __name__ == '__main__':
harness = QuadratureTestHarness()
harness.main()
| 2.28125 | 2 |
2014-2015/2-jan2015/p2/CowRouting2.py | esqu1/USACO | 0 | 12789145 | <reponame>esqu1/USACO<gh_stars>0
#########
# USACO CONTEST 1 PROBLEM 2
# SOLUTION BY <NAME>
# PYTHON 2.7.6
#########
import sys
def readin():
f = open("cowroute.in",'r')
s = f.read().split("\n")
f.close()
return s
def find(list,el):
if el in list:
return list.index(el)
else:
return -1
l = readin()
min = sys.maxint
firstline = l[0].split()
count = 2
count = 2
while count < len(l):
if int(l[count - 1].split()[0]) < min and find(l[count].split(),firstline[0]) != -1 and find(l[count].split(),firstline[1]) != -1 and find(l[count].split(),firstline[0]) < find(l[count].split(),firstline[1]): # Same code from Cow Routing 1
min = int(l[count - 1].split()[0])
count += 2
count = 2
while count < len(l):
inc = 2
while inc < len(l) - count:
if find(l[count].split(),firstline[0]) == -1 and find(l[count].split(),firstline[1]) == -1: # Break if neither of them has the cities
break;
count2 = count + inc
if find(l[count].split(),firstline[0]) != -1: # If we can find first city in first line
listofone = l[count].split()[find(l[count].split(),firstline[0]):] # Make a list of everything afterwards
if find(l[count2].split(),firstline[1]) != -1: # If we can find second city in second line
listoftwo = l[count2].split()[:find(l[count2].split(),firstline[1])] # make a list of everything beforehand
print listofone
print listoftwo
if len([i for i in listofone if i in listoftwo]) > 0 and int(l[count-1].split()[0]) + int(l[count2-1].split()[0]) < min:
min = int(l[count-1].split()[0]) + int(l[count2-1].split()[0]) # If we find a common city, make this the new min
print min
if find(l[count2].split(),firstline[0]) != -1: # Likewise
listofone = l[count2].split()[:find(l[count2].split(),firstline[0])]
if find(l[count].split(),firstline[1]) != -1:
listoftwo = l[count].split()[find(l[count].split(),firstline[1]):]
print listofone
print listoftwo
if len([i for i in listofone if i in listoftwo]) > 0 and int(l[count-1].split()[0]) + int(l[count2-1].split()[0]) < min:
min = int(l[count-1].split()[0]) + int(l[count2-1].split()[0])
print min
inc += 2
count += 2
if min == sys.maxint:
min = -1
g = open("cowroute.out",'w')
g.write(str(min) + "\n")
g.close()
| 2.984375 | 3 |
62.py | r9y9/nlp100 | 18 | 12789146 | <filename>62.py
import plyvel
from tqdm import tqdm
db = plyvel.DB("artist.ldb", create_if_missing=False)
r = []
for k, v in tqdm(db):
if v == b"Japan":
r.append(k.decode("utf-8"))
for k in r:
print(k)
print("Total: {}".format(len(r)))
| 2.5 | 2 |
terrascript/provider/hashicorp/googleworkspace.py | mjuenema/python-terrascript | 507 | 12789147 | # terrascript/provider/hashicorp/googleworkspace.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:17:22 UTC)
import terrascript
class googleworkspace(terrascript.Provider):
"""terraform-provider-googleworkspace"""
__description__ = "terraform-provider-googleworkspace"
__namespace__ = "hashicorp"
__name__ = "googleworkspace"
__source__ = "https://github.com/hashicorp/terraform-provider-googleworkspace"
__version__ = "0.4.1"
__published__ = "2021-08-16T19:18:13Z"
__tier__ = "official"
__all__ = ["googleworkspace"]
| 1.53125 | 2 |
misc_files/GPSVehicle.py | twardokus/v2verifier | 9 | 12789148 | <gh_stars>1-10
import math
import json
import time
import subprocess
import pynmea2
from txrx import Utility
from WavePacketBuilder import WAVEPacketBuilder
class GPSVehicle:
def __init__(self, vehicle_num, gps_sock, gui_sock, gui_lock):
self.vehicle_num = vehicle_num
self.gps_sock = gps_sock
self.sock = gui_sock
self.lock = gui_lock
self.wave_builder = WAVEPacketBuilder()
self.util = Utility()
self.key = "keys/0/p256.key"
def start(self):
last_nmea = pynmea2.parse("$GPGGA,000000.00,0000.0000,N,00000.0000,E,0,99,1.0,0.0,M,0.0,M,,*5C")
while True:
gps_loc = self.gps_sock.recv(1024)
nmea = pynmea2.parse(gps_loc.split(b":")[1].decode().replace("GPS_GPGGA", "").strip())
print(nmea)
lat = float(nmea.latitude)
lon = float(nmea.longitidue)
last_lat = float(last_nmea.latitude)
last_lon = float(last_nmea.longitude)
speed = (
math.sqrt(math.pow(lat - last_lat, 2) + math.pow(lon - last_lon, 2))
* 36
)
heading = self.get_heading(nmea, last_nmea)
bsm_text = f"{self.vehicle_num},{nmea.latitude},{nmea.longitude},{heading},{speed}\n"
message = self.build_packet(nmea.latitude, nmea.longitude, heading, speed, self.key)
last_nmea = nmea
self.send_to_radio(message)
self.send_to_gui(bsm_text)
def get_heading(self, nmea, last_nmea):
if nmea.longitude == last_nmea.longitude:
# no change
if nmea.latitude == last_nmea.latitude:
return "-"
# heading North
elif nmea.latitude > last_nmea.latitude:
return N
# heading South
else:
return "S"
# heading East
elif nmea.longitude > last_nmea.longitude:
if nmea.latitude > last_nmea.latitude:
return "NE"
elif nmea.latitude < last_nmea.latitude:
return "SE"
else:
return "E"
# heading West
else:
if nmea.latitude > last_nmea.latitude:
return "NW"
elif nmea.latitude < last_nmea.latitude:
return "SW"
else:
return "W"
def build_packet(self, lat, lng, heading, speed, key):
speed = str(round(speed, 2))
bsm_text = f"{self.vehicle_num},{nmea.latitude},{nmea.longitude},{heading},{speed}\n"
return self.wave_builder.get_wsm_payload(bsm_text, key)
def send_to_radio(self, message):
print("Sending BSM to radio")
bsm = self.util.inject_time(message)
loader = subprocess.Popen(("echo", "-n", "-e", bsm), stdout=subprocess.PIPE)
sender = subprocess.check_output(
("nc", "-w0", "-u", "localhost", "52001"), stdin=loader.stdout
)
def send_to_gui(self, message):
bsm = message.split(",")
decoded_data = {}
decoded_data['id'] = bsm[0]
decoded_data['x'] = bsm[1]
decoded_data['y'] = bsm[2]
decoded_data['heading'] = bsm[3]
decoded_data['speed'] = bsm[4]
decoded_data['sig'] = True
decoded_data['elapsed'] = 0
decoded_data['recent'] = True
decoded_data['receiver'] = True
vehicle_data_json = json.dumps(decoded_data)
with self.lock:
self.sock.send(vehicle_data_json.encode())
| 2.65625 | 3 |
networkmonitor/src/configuration/contextConfig.py | luther38/NetworkMonitor | 0 | 12789149 | <reponame>luther38/NetworkMonitor
import os
from networkmonitor.src.configuration import IConfig, YamlConfig, JsonConfig
from networkmonitor.src.collections import Configuration
class ContextConfig:
"""
ConfigContext is the handler for the IConfig and tells the process who needs to do what.
Methods are the same as IConfig
"""
def __init__(self, config: IConfig):
self.config:IConfig = config
self.type:str = self.__GetConfigType__()
self.configuration = Configuration()
#self.SleepInterval:int = -1
#self.Nodes:Nodes = []
pass
def __GetConfigType__(self):
# Did we get our IConfig on its own?
try:
if self.config.argPathConfig.endswith('yaml'):
return 'yaml'
elif self.config.argPathConfig.endswith('json'):
return 'json'
else:
return None
except:
pass
# it came in with another class attached.
# Looks like it should be YamlConfig or JsonConfig then IConfig
try:
if self.config.config.PathConfig.endswith('yaml'):
return 'yaml'
elif self.config.config.PathConfig.endswith('json'):
return 'json'
else:
return None
except:
pass
def GetWorkingConfigClass(self, replaceConfig:bool = False):
"""
This is used to generate a working class based off the config type.
If we know the type and have a class we can use, make a new instance and return it.
Currently returns YamlConfig or JsonConfig.
@param:replaceConfig = If True it will take the generated class and place it in self.config. If False, return value.
"""
if self.type == "yaml":
c = YamlConfig(self.config)
return self.__ReplaceWorkingConfig(replaceConfig, c)
elif self.type == "json":
j = JsonConfig(self.config)
return self.__ReplaceWorkingConfig(replaceConfig, j)
else:
pass
def __ReplaceWorkingConfig(self, replaceConfig:bool, passedClass):
if replaceConfig == True:
self.config = passedClass
else:
return passedClass
def ReadConfig(self):
self.configuration.nodes = []
self.config.ReadConfig()
self.configuration = self.config.configuration
#self.configuration.nodes = self.config.nodes
#self.configuration.sleepInterval = self.config.sleepInterval
#self.Nodes = self.config.config.Nodes
#self.SleepInterval = self.config.config.SleepInterval
pass
def NewConfig(self):
"""
Handler for NewConfig requests.
"""
default = {
"SleepInterval":{
"Hours": 0,
"Minutes": 2,
"Seconds": 0
},
"Protocols":{
"ICMP":{
"Timeout":0
}
},
'Nodes': [
{
'Name':'LocalHost',
'Address': '192.168.127.12',
'Protocol': 'ICMP',
'Required': True,
'Category': "local"
},
{
'Name' : 'Wan',
'Address' : '192.168.0.1',
'Protocol' : 'ICMP',
'Required' : True,
'Category' : "Local"
},
{
'Name' : 'Google',
'Address' : 'google.com',
'Protocol' : 'ICMP',
'Required' : False,
'Category' : "External"
},
{
'Name' : 'Google',
'Address' : 'https://google.com',
'Protocol' : 'Http:Get',
'Required' : False,
'Category' : "External"
}
]
}
self.config.NewConfig(default)
pass
| 2.890625 | 3 |
roles/openshift_health_checker/test/curator_test.py | KoteikinyDrova/openshift-ansible | 1 | 12789150 | import pytest
from openshift_checks.logging.curator import Curator
def canned_curator(exec_oc=None):
"""Create a Curator check object with canned exec_oc method"""
check = Curator("dummy") # fails if a module is actually invoked
if exec_oc:
check._exec_oc = exec_oc
return check
def assert_error(error, expect_error):
if expect_error:
assert error
assert expect_error in error
else:
assert not error
plain_curator_pod = {
"metadata": {
"labels": {"component": "curator", "deploymentconfig": "logging-curator"},
"name": "logging-curator-1",
},
"status": {
"containerStatuses": [{"ready": True}],
"conditions": [{"status": "True", "type": "Ready"}],
"podIP": "10.10.10.10",
}
}
not_running_curator_pod = {
"metadata": {
"labels": {"component": "curator", "deploymentconfig": "logging-curator"},
"name": "logging-curator-2",
},
"status": {
"containerStatuses": [{"ready": False}],
"conditions": [{"status": "False", "type": "Ready"}],
"podIP": "10.10.10.10",
}
}
@pytest.mark.parametrize('pods, expect_error', [
(
[],
"no Curator pods",
),
(
[plain_curator_pod],
None,
),
(
[not_running_curator_pod],
"not currently in a running state",
),
(
[plain_curator_pod, plain_curator_pod],
"more than one Curator pod",
),
])
def test_get_curator_pods(pods, expect_error):
check = canned_curator()
error = check.check_curator(pods)
assert_error(error, expect_error)
| 2.234375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.