id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/classipy-1.1.1.tar.gz/classipy-1.1.1/classy/predict.py
|
import logging
import sys
from classy.generate import fix_column_offset
from sklearn.externals import joblib
from .data import load_index, get_n_rows, load_vocabulary
from .extract import row_generator, row_generator_from_file
from .transform import FeatureEncoder, transform_input
L = logging.getLogger(__name__)
def predict_labels(args):
L.debug("%s", args)
if args.text:
stream_predictor(args)
else:
batch_predictor(args)
def batch_predictor(args):
if not args.index:
raise ValueError('missing input data file (inverted index)')
elif len(args.index) > 1:
raise ValueError('more than one input data file (inverted index)')
data = load_index(args.index[0])
pipeline = joblib.load(args.model)
predictions = pipeline.predict(data.index.tocsr())
scores = get_scores(pipeline, data) if args.scores else None
text_ids = get_or_make_text_ids(data)
make_label = str
if args.label:
n_labels = len(args.label)
make_label = lambda i: (args.label[i] if i < n_labels else i)
if args.scores:
report_with_scores(predictions, scores, text_ids, make_label)
else:
report_labels(predictions, text_ids, make_label)
def report_labels(predictions, text_ids, make_label):
for text_id, prediction in zip(text_ids, predictions):
print(text_id, make_label(prediction), sep='\t')
def report_with_scores(predictions, scores, text_ids, make_label):
for text_id, prediction, i_scores in \
zip(text_ids, predictions, scores):
if isinstance(i_scores, float):
i_scores = (i_scores,)
score_str = '\t'.join('{: 0.8f}'.format(s) for s in i_scores)
print(text_id, make_label(prediction), score_str, sep='\t')
def get_scores(pipeline, data):
try:
return pipeline.predict_proba(data.index.tocsr())
except AttributeError: # svm and others do not have proba
return pipeline.decision_function(data.index.tocsr())
def get_or_make_text_ids(data):
if data.text_ids:
text_ids = data.text_ids
else:
text_ids = range(1, get_n_rows(data) + 1)
return text_ids
def stream_predictor(args):
if args.vocabulary is None:
raise ValueError(
'missing required option for text input: --vocabulary VOCAB'
)
vocabulary = load_vocabulary(args.vocabulary)
dialect = 'excel' if args.csv else 'plain'
args.annotate = fix_column_offset(args.annotate)
args.feature = fix_column_offset(args.feature)
if not args.index:
gen = row_generator(sys.stdin, dialect=dialect)
predict_from(gen, args, vocabulary)
else:
for file in args.index:
gen = row_generator_from_file(file, dialect=dialect,
encoding=args.encoding)
predict_from(gen, args, vocabulary)
def predict_from(text, args, vocab):
stream = transform_input(text, args)
id_col = find_id_col(args)
stream = FeatureEncoder(stream, vocabulary=vocab,
id_col=id_col, label_col=None)
pipeline = joblib.load(args.model)
make_label = str
here = '\t' if args.scores else '\n' # append score or not
no_proba = False
if args.label:
n_labels = len(args.label)
make_label = lambda i: args.label[i] if i < n_labels else i
for text_id, features in stream:
prediction = pipeline.predict(features)
assert prediction.shape[0] == 1, "not a single prediction: %s" % str(
prediction.shape
)
print(text_id, make_label(prediction[0]), sep='\t', end=here)
if args.scores:
append_scores(pipeline, features, no_proba)
def append_scores(pipeline, features, no_proba):
if no_proba:
scores = pipeline.decision_function(features)[0]
else:
try:
scores = pipeline.predict_proba(features)[0]
except AttributeError: # svm and other do not have this
scores = pipeline.decision_function(features)[0]
no_proba = True
if isinstance(scores, float):
scores = (scores,)
print('\t'.join('{: 0.8f}'.format(s) for s in scores))
def find_id_col(args):
if args.no_id:
id_col = None
elif args.id_second:
id_col = 1
elif args.id_last:
id_col = -1
else:
id_col = 0
return id_col
|
PypiClean
|
/monopoly_simulator_test-0.0.9.tar.gz/monopoly_simulator_test-0.0.9/monopoly_simulator/background_agent_v3_2.py
|
from monopoly_simulator import agent_helper_functions_v2 as agent_helper_functions# helper functions are internal to the agent and will not be recorded in the function log.
from monopoly_simulator import diagnostics
from monopoly_simulator.bank import Bank
from monopoly_simulator.flag_config import flag_config_dict
import logging
logger = logging.getLogger('monopoly_simulator.logging_info.background_agent')
UNSUCCESSFUL_LIMIT = 2
"""
All external decision_agent functions must have the exact signatures we have indicated in this document. Beyond
that, we impose no restrictions (you can make the decision agent as complex as you like (including maintaining state),
and we use good faith to ensure you do not manipulate the gameboard. We will have mechanisms to check for inadvertent
changes or inconsistencies that get introduced in the gameboard (due to any reason, including possible subtle errors
in the simulator itself) a short while later.
If you decision agent does maintain state, or some kind of global data structure, please be careful when assigning the
same decision agent (as we do) to each player. We do provide some basic state to you already via 'code' in the make_*_move
functions. Specifically, if code is 1 it means the 'previous' move selected by the player was successful,
and if -1 it means it was unsuccessful. code of -1 is usually returned when an allowable move is invoked with parameters
that preempt the action from happening e.g., the player may decide to mortgage property that is already mortgaged,
which will return the failure code of -1 when the game actually tries to mortgage the property in action_choices.
Be careful to note what each function is supposed to return in addition to adhering to the expected signature. The examples
here are good guides.
Your functions can be called whatever you like, but the keys in decision_agent_methods should not be changed. The
respective functions must adhere in their signatures to the examples here. The agent in this file is simple and rule-based,
rather than adaptive but capable of taking good actions in a number of eventualities.
We detail the logic behind each decision in a separate document. This is the agent that will serve as the 'background'
agent for purposes of evaluation.
"""
def make_pre_roll_move(player, current_gameboard, allowable_moves, code):
"""
Many actions are possible in pre_roll but we prefer to save the logic for out_of_turn. The only decision
we'll make here is whether we want to leave jail (if we're in jail).
:param player: A Player instance. You should expect this to be the player that is 'making' the decision (i.e. the player
instantiated with the functions specified by this decision agent).
:param current_gameboard: A dict. The global data structure representing the current game board.
:param allowable_moves: A set of function names, each of which is defined in action_choices (imported in this file), and that
will always be a subset of the action choices for pre_die_roll in the game schema. Your returned action choice name must be from
allowable_moves; we will check for this when you return.
:param code: See the preamble of this file for an explanation of this code
:return: A 2-element tuple, the first of which is the name of the action you want to take, and the second is a dictionary of
parameters that will be passed into the function representing that action when it is executed.
The dictionary must exactly contain the keys and expected value types expected by that action in
action_choices
"""
'''
phase_game defines which phase the player is in during the game
0 -> preroll
1 -> out of turn
2 -> postroll
count_unsuccessful_tries in the agent memory keeps a record of unsuccessful actions executed by that player agent in each phase_game.
If this count reaches UNSUCCESSFUL_LIMIT before a phase_game change, then the player has no option but to either skip_turn or
conclude_actions. This count resets to 0 when the phase_game changes.
This ensures that the game doesnot go on for too long trying to execute unsuccessful actions.
'''
for p in current_gameboard['players']:
if 'phase_game' not in p.agent._agent_memory:
p.agent._agent_memory['phase_game'] = 0
p.agent._agent_memory['count_unsuccessful_tries'] = 0
if player.agent._agent_memory['phase_game'] != 0:
player.agent._agent_memory['phase_game'] = 0
for p in current_gameboard['players']:
if p.status != 'lost':
p.agent._agent_memory['count_unsuccessful_tries'] = 0
if code == flag_config_dict['failure_code']:
player.agent._agent_memory['count_unsuccessful_tries'] += 1
logger.debug(player.player_name + ' has executed an unsuccessful preroll action, incrementing unsuccessful_tries ' +
'counter to ' + str(player.agent._agent_memory['count_unsuccessful_tries']))
if player.agent._agent_memory['count_unsuccessful_tries'] >= UNSUCCESSFUL_LIMIT:
logger.debug(player.player_name + ' has reached preroll unsuccessful action limits.')
if "skip_turn" in allowable_moves:
logger.debug(player.player_name+ ': I am skipping turn since I have crossed unsuccessful limits.')
player.agent._agent_memory['previous_action'] = "skip_turn"
return ("skip_turn", dict())
elif "concluded_actions" in allowable_moves:
# player.agent._agent_memory['previous_action'] = action_choices.concluded_actions
logger.debug(player.player_name+ ': I am concluding actions since I have crossed unsuccessful limits.')
return ("concluded_actions", dict())
else:
logger.error("Exception")
raise Exception
if player.current_cash >= current_gameboard['go_increment']: # if we don't have enough money, best to stay put.
param = dict()
param['player'] = player.player_name
param['current_gameboard'] = "current_gameboard"
if "use_get_out_of_jail_card" in allowable_moves:
logger.debug(player.player_name+': I am using get out of jail card.')
player.agent._agent_memory['previous_action'] = "use_get_out_of_jail_card"
return ("use_get_out_of_jail_card", param)
elif "pay_jail_fine" in allowable_moves:
logger.debug(player.player_name+': I am going to pay jail fine.')
player.agent._agent_memory['previous_action'] = "pay_jail_fine"
return ("pay_jail_fine", param)
# if we ran the gamut, and did not return, then it's time to skip turn or conclude actions
if "skip_turn" in allowable_moves:
# testing hypothetical simulator (will comment when done testing). Note that this was written for the Python 2
# version (the GNOME repo). Make sure to appropriately modify by instantiating agent instead of sending in the
# decision agent methods as being done below.
# player_decision_agents = dict()
# import simple_decision_agent_1
# player_decision_agents['player_1'] = simple_decision_agent_1.decision_agent_methods # the reason I am doing this for all agents is to avoid infinite loops.
# player_decision_agents['player_2'] = simple_decision_agent_1.decision_agent_methods
# player_decision_agents['player_3'] = simple_decision_agent_1.decision_agent_methods
# player_decision_agents['player_4'] = simple_decision_agent_1.decision_agent_methods
# alternate_univ = hypothetical_simulator.initialize_hypothetical_universe(current_gameboard, player_decision_agents)
# logger.debug(player.player_name,' has spawned alternate universe to try out things.')
# hypothetical_winner = hypothetical_simulator.simulate_hypothetical_game(hypothetical_gameboard=alternate_univ,
# die_roll_substitute=hypothetical_simulator.die_roll_substitute,num_total_die_rolls=15) # we will only run for fifteen die rolls.
# if hypothetical_winner is None:
# logger.debug(diagnostics.logger.debug_player_cash_balances(alternate_univ))
# else:
# logger.debug(hypothetical_winner.player_name)
logger.debug(player.player_name+ ': I am skipping turn')
player.agent._agent_memory['previous_action'] = "skip_turn"
return ("skip_turn", dict())
elif "concluded_actions" in allowable_moves:
# player.agent._agent_memory['previous_action'] = action_choices.concluded_actions
logger.debug(player.player_name+ ': I am concluding actions')
return ("concluded_actions", dict())
else:
logger.error("Exception")
raise Exception
def make_out_of_turn_move(player, current_gameboard, allowable_moves, code):
"""
The agent is in the out-of-turn phase and must decide what to do (next). This simple dummy agent skips the turn, and
doesn't do anything.
:param player: A Player instance. You should expect this to be the player that is 'making' the decision (i.e. the player
instantiated with the functions specified by this decision agent).
:param current_gameboard: A dict. The global data structure representing the current game board.
:param allowable_moves: A set of function names, each of which is defined in action_choices (imported in this file), and that
will always be a subset of the action choices for out_of_turn in the game schema. Your returned action choice must be from
allowable_moves; we will check for this when you return.
:param code: See the preamble of this file for an explanation of this code
:return: A 2-element tuple, the first of which is the name of the action you want to take, and the second is a dictionary of
parameters that will be passed into the function representing that action when it is executed.
The dictionary must exactly contain the keys and expected value types expected by that action in
action_choices
"""
'''
Agent V3
This updated version of the agent can make trade offers with MULTIPLE players simultaneously.
All strategies available in Agent V2 is still available in V3
Note that this version of the agent also engages in the trade of only one set of properties like the previous version, ie
- only one property will be requested if it is a buy property offer or
- only one property will be offered if it is a sell property offer or
- only one property will be offered and one property requested during an exchange property offer.
Agent V2
NOTE: The background agent that could make_sell_property_offer is deprecated (available as background_agent_v1_deprecated.py)
This version of the agent can only make_trade_offer and accept trade offer. Trade involves buy or sell or exchange property offers.
Accept_sell_property_offer function is still available in case some different agent decides to make a sell property offer.
Ideally, accept_sell_property_offer() function should never enter allowable moves.
Make sell property offer can be replicated by making a trade offer that only offers to sell properties in return for cash
and doesnot involve a buy property or exchange property offer.
A buy property offer can be duplicated by including only requested properties by offering cash without offering properties.
Properties and cash can be exchanged which lets both players get an advantage of increasing their respective number of monopolies.
This version of the agent background_agent_v1 supports making sell property offers in return for cash via make_trade_offer,
buy trade offers and exchange property offers.
Note that this version of the agent engages in the trade of only one set of properties, ie
- only one property will be requested if it is a buy property offer or
- only one property will be offered if it is a sell property offer or
- only one property will be offered and one property requested during an exchange property offer.
'''
'''
phase_game defines which phase the player is in during the game
0 -> preroll
1 -> out of turn
2 -> postroll
count_unsuccessful_tries in the agent memory keeps a record of unsuccessful actions executed by that player agent in each phase_game.
If this count reaches UNSUCCESSFUL_LIMIT before a phase_game change, then the player has no option but to either skip_turn or
conclude_actions. This count resets to 0 when the phase_game changes.
This ensures that the game doesnot go on for too long trying to execute unsuccessful actions.
'''
for p in current_gameboard['players']:
if 'phase_game' not in p.agent._agent_memory:
p.agent._agent_memory['phase_game'] = 1
p.agent._agent_memory['count_unsuccessful_tries'] = 0
if player.agent._agent_memory['phase_game'] != 1:
player.agent._agent_memory['phase_game'] = 1
player.agent._agent_memory['count_unsuccessful_tries'] = 0
if isinstance(code, list):
code_flag = 0
for c in code:
if c == flag_config_dict['failure_code']:
code_flag = 1
break
if code_flag:
player.agent._agent_memory['count_unsuccessful_tries'] += 1
logger.debug(player.player_name + ' has executed an unsuccessful out of turn action, incrementing unsuccessful_tries ' +
'counter to ' + str(player.agent._agent_memory['count_unsuccessful_tries']))
elif code == flag_config_dict['failure_code']:
player.agent._agent_memory['count_unsuccessful_tries'] += 1
logger.debug(player.player_name + ' has executed an unsuccessful out of turn action, incrementing unsuccessful_tries ' +
'counter to ' + str(player.agent._agent_memory['count_unsuccessful_tries']))
if player.agent._agent_memory['count_unsuccessful_tries'] >= UNSUCCESSFUL_LIMIT:
logger.debug(player.player_name + ' has reached out of turn unsuccessful action limits.')
if "skip_turn" in allowable_moves:
logger.debug(player.player_name+ ': I am skipping turn since I have crossed unsuccessful limits.')
player.agent._agent_memory['previous_action'] = "skip_turn"
return ("skip_turn", dict())
elif "concluded_actions" in allowable_moves:
# player.agent._agent_memory['previous_action'] = action_choices.concluded_actions
logger.debug(player.player_name+ ': I am concluding actions since I have crossed unsuccessful limits.')
return ("concluded_actions", dict())
else:
logger.error("Exception")
raise Exception
if "accept_trade_offer" in allowable_moves:
param = dict()
param['player'] = player.player_name
param['current_gameboard'] = "current_gameboard"
logger.debug(player.player_name+ ': Should I accept the trade offer by '+player.outstanding_trade_offer['from_player'].player_name+'?')
logger.debug('('+player.player_name+' currently has cash balance of '+str(player.current_cash)+')')
if (player.outstanding_trade_offer['cash_offered'] <= 0 and len(player.outstanding_trade_offer['property_set_offered'])==0) and \
(player.outstanding_trade_offer['cash_wanted'] > 0 or len(player.outstanding_trade_offer['property_set_wanted']) > 0):
logger.debug('Asking for free money or property without money or property in return.')
logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
pass #asking for free money or property without anything in return(ie no money and no property offered), -->reject the trade offer
elif player.outstanding_trade_offer['cash_wanted'] - player.outstanding_trade_offer['cash_offered'] > player.current_cash:
logger.debug('Cash wanted from me in the trade offer is more than the cash in hand with me or I am near bankruptcy situation and need to play safe.')
logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
pass #cash wanted is more than that offered and the net difference exceeds the cash that the player has --> then reject the tade offer
else:
reject_flag = 0
offered_properties_net_worth = 0
wanted_properties_net_worth = 0
for prop in player.outstanding_trade_offer['property_set_wanted']:
if prop.is_mortgaged:
reject_flag = 1 #cannot trade mortgaged properties, reject trade offer
logger.debug('Trade offer invovlves mortgaged properties.')
logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
break
else:
wanted_properties_net_worth += prop.price
if reject_flag == 0:
for prop in player.outstanding_trade_offer['property_set_offered']:
if prop.is_mortgaged:
reject_flag = 1 #from_player cannot offer mortgaged properties, reject trade offer
logger.debug('Trade offer invovlves mortgaged properties.')
logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
break
else:
offered_properties_net_worth += prop.price
if reject_flag == 0:
#GOAL -- increase monopolies
#calculate the net worth of offer vs net worth of request --> makes sense to accept trade only if the offer is greater than request
#net worth of offer = cash + total price of all houses
#positive net_amount_requested implies that the requested net amount is greater than offered net amount
net_offer_worth = (offered_properties_net_worth + player.outstanding_trade_offer['cash_offered']) - \
(wanted_properties_net_worth + player.outstanding_trade_offer['cash_wanted'])
net_amount_requested = -1*net_offer_worth
count_create_new_monopoly = 0
count_lose_existing_monopoly = 0 ##ideally player doesnot have to worry about losing monopolies since the player who makes the offer
#only requests for lone properties
for prop in player.outstanding_trade_offer['property_set_offered']:
if agent_helper_functions.will_property_complete_set(player,prop,current_gameboard):
count_create_new_monopoly += 1
for prop in player.outstanding_trade_offer['property_set_wanted']:
if prop.color in player.full_color_sets_possessed:
count_lose_existing_monopoly += 1
#if you end up losing more monopolies than gaining monopolies (although this condition should never come up) then reject trade offer
if count_lose_existing_monopoly - count_create_new_monopoly > 0:
logger.debug('Player loses more monopolies than he gains.')
logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
reject_flag = 1
#if you end up losing the same number of monopolies as you gain, then accept the offer based on the following multiple conditions.
#Basically you get no new monopolies since ideally you dont lose monopolies (only properties that dont belong to your monopolized color
# groups are only requested from you in the trade.)
elif count_lose_existing_monopoly - count_create_new_monopoly == 0:
if (player.outstanding_trade_offer['cash_wanted'] - player.outstanding_trade_offer['cash_offered']) >= player.current_cash:
logger.debug('Cash wanted from me in the trade offer is more than the cash in hand with me or I am near bankruptcy situation and need to play safe.')
logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
reject_flag = 1 ##just double checking although this condition was verified before getting here.
elif player.current_cash - (player.outstanding_trade_offer['cash_wanted'] - player.outstanding_trade_offer['cash_offered']) < current_gameboard['go_increment']/2:
logger.debug('Cash wanted from me in the trade offer is more than the cash in hand with me or I am near bankruptcy situation and need to play safe.')
logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
reject_flag = 1 ##too risky if players cash after transaction drops below half of go_increment value --> hence reject trade offer
elif (player.current_cash - (player.outstanding_trade_offer['cash_wanted'] - player.outstanding_trade_offer['cash_offered']) < current_gameboard['go_increment']) \
and net_offer_worth <= 0:
logger.debug('No gain from accepting trade offer.')
logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
reject_flag =1 ##if player has cash > go_increement/2 and < go_increement but net worth of total transaction is negative --> reject trade offer
else:
reject_flag =0 ##accept only if you end up getting a higher net worth by accepting the trade although you get no new monopolies
#else you get to monopolize more locations than you had before --> then ACCEPT THE TRADE OFFER
elif count_create_new_monopoly - count_lose_existing_monopoly > 0:
if (player.outstanding_trade_offer['cash_wanted'] - player.outstanding_trade_offer['cash_offered']) >= player.current_cash:
logger.debug('Cash wanted from me in the trade offer is more than the cash in hand with me or I am near bankruptcy situation and need to play safe.')
logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
reject_flag = 1 ##just double checking although this condition was verified before getting here.
else:
reject_flag = 0
if reject_flag == 0 and player.agent._agent_memory['previous_action'] != "accept_trade_offer": # so that does not keep trying to accept a trade offer that got declined previously
logger.debug(player.player_name + " accepted trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
logger.debug(player.player_name + " recieved amount = " + str(player.outstanding_trade_offer['cash_offered']) + " and offered amount = " +
str(player.outstanding_trade_offer['cash_wanted']) + " during trade")
player.agent._agent_memory['previous_action'] = "accept_trade_offer"
return ("accept_trade_offer", param)
elif reject_flag == 1:
#logger.debug(player.player_name + " rejected trade offer from " + player.outstanding_trade_offer['from_player'].player_name)
pass
if "accept_sell_property_offer" in allowable_moves:
## Ideally accept_sell_offer should never enter allowable moves since henceforth make_trade_offer also takes care of make_sell_offer and
## accept_trade_offer takes care of accept_sell_offer.
## This case is included to accomodate a make_sell_property offer raised by an external agent.
## Our agent will never make a sell property offer, only makes trade offers which raises an accpet_trade_offer action.
param = dict()
param['player'] = player.player_name
param['current_gameboard'] = "current_gameboard"
# we accept an offer under one of two conditions:
logger.debug(player.player_name+ ': Should I accept the offer by '+player.outstanding_property_offer['from_player'].player_name+' to buy '+\
player.outstanding_property_offer['asset'].name+' for '+str(player.outstanding_property_offer['price'])+'?')
logger.debug('('+player.player_name+' currently has cash balance of '+str(player.current_cash)+')')
if player.outstanding_property_offer['asset'].is_mortgaged or player.outstanding_property_offer['price']>player.current_cash:
pass # ignore the offer if the property is mortgaged or will result in insolvency. This pass doesn't require 'filling' in.
elif player.current_cash-player.outstanding_property_offer['price'] >= current_gameboard['go_increment'] and \
player.outstanding_property_offer['price']<=player.outstanding_property_offer['asset'].price:
# 1. we can afford it, and it's at or below market rate so let's buy it
logger.debug(player.player_name+ ': I am accepting the offer to buy '+player.outstanding_property_offer['asset'].name+' since I can afford' \
'it and it is being offered at or below market rate.')
player.agent._agent_memory['previous_action'] = "accept_sell_property_offer"
return ("accept_sell_property_offer", param)
elif agent_helper_functions.will_property_complete_set(player, player.outstanding_property_offer['asset'],current_gameboard):
# 2. less affordable, but we stand to gain by monopoly
if player.current_cash - player.outstanding_property_offer['price'] >= current_gameboard['go_increment']/2: # risky, but worth it
logger.debug(player.player_name+ ': I am accepting the offer to buy '+ player.outstanding_property_offer[
'asset'].name+ ' since I can afford ' \
'it (albeit barely so) and it will let me complete my color set.')
player.agent._agent_memory['previous_action'] = "accept_sell_property_offer"
return ("accept_sell_property_offer", param)
if player.status != 'current_move': # these actions are considered only if it's NOT our turn to roll the dice.
if "improve_property" in allowable_moves: # beef up full color sets to maximize rent potential.
param = agent_helper_functions.identify_improvement_opportunity(player, current_gameboard)
if param:
if player.agent._agent_memory['previous_action'] == "improve_property" and code == flag_config_dict['failure_code']:
logger.debug(player.player_name+ ': I want to improve property '+param['asset'].name+ ' but I cannot, due to reasons I do not understand. Aborting improvement attempt...')
else:
logger.debug(player.player_name+ ': I am going to improve property '+param['asset'].name)
player.agent._agent_memory['previous_action'] = "improve_property"
param['player'] = param['player'].player_name
param['asset'] = param['asset'].name
param['current_gameboard'] = "current_gameboard"
return ("improve_property", param)
player_mortgaged_assets_list = list()
if player.mortgaged_assets:
player_mortgaged_assets_list = _set_to_sorted_list_mortgaged_assets(player.mortgaged_assets)
for m in player_mortgaged_assets_list:
if player.current_cash-(m.mortgage*(1+current_gameboard['bank'].mortgage_percentage)) >= current_gameboard['go_increment'] and "free_mortgage" in allowable_moves:
# free mortgages till we can afford it. the second condition should not be necessary but just in case.
param = dict()
param['player'] = player.player_name
param['asset'] = m.name
param['current_gameboard'] = "current_gameboard"
logger.debug(player.player_name+ ': I am going to free mortgage on '+ m.name)
player.agent._agent_memory['previous_action'] = "free_mortgage"
return ("free_mortgage", param)
else:
#purpose_flags are sent while curating a trade offer to imply why the trade offer was made:
## 1 --> low on cash, urgently in need of cash
## 2 --> gain monopoly
if player.current_cash < current_gameboard['go_increment'] and "make_trade_offer" in allowable_moves:
# in this case, the trade offer is a duplication of make_sell_property_offer since the player is in urgent need of cash and
#cannot strategize a trade
potential_offer_list = agent_helper_functions.identify_property_trade_offer_to_player(player, current_gameboard)
potential_request_list = agent_helper_functions.identify_property_trade_wanted_from_player(player, current_gameboard)
param_list = agent_helper_functions.curate_trade_offer_multiple_players(player, potential_offer_list, potential_request_list, current_gameboard, purpose_flag=1)
#logger.debug(param)
return_action_list = []
return_param_list = []
if param_list and player.agent._agent_memory['previous_action'] != "make_trade_offer": # we only make one offer per turn. Otherwise we'd
# be stuck in a loop
if len(param_list)>1:
logger.debug(player.player_name + ": I am going to make trade offers to multiple players, ie " + str(len(param_list)) + " players.")
for param in param_list:
logger.debug(player.player_name+ ': I am making an offer to trade '+list(param['offer']['property_set_offered'])[0].name+' to '+
param['to_player'].player_name+' for '+str(param['offer']['cash_wanted'])+' dollars')
param['from_player'] = param['from_player'].player_name
param['to_player'] = param['to_player'].player_name
prop_set_offered = set()
for item in param['offer']['property_set_offered']:
prop_set_offered.add(item.name)
param['offer']['property_set_offered'] = prop_set_offered
prop_set_wanted = set()
for item in param['offer']['property_set_wanted']:
prop_set_wanted.add(item.name)
param['offer']['property_set_wanted'] = prop_set_wanted
player.agent._agent_memory['previous_action'] = "make_trade_offer"
return_action_list.append("make_trade_offer")
return_param_list.append(param)
return (return_action_list, return_param_list)
elif "make_trade_offer" in allowable_moves:
# trade offer is being curated to maximise monopolies
potential_offer_list = agent_helper_functions.identify_property_trade_offer_to_player(player, current_gameboard)
potential_request_list = agent_helper_functions.identify_property_trade_wanted_from_player(player, current_gameboard)
param_list = agent_helper_functions.curate_trade_offer_multiple_players(player, potential_offer_list, potential_request_list, current_gameboard, purpose_flag=2)
#logger.debug(param)
return_action_list = []
return_param_list = []
if param_list and player.agent._agent_memory['previous_action'] != "make_trade_offer": # we only make one offer per turn. Otherwise we'd
# be stuck in a loop
if len(param_list)>1:
logger.debug(player.player_name + ": I am going to make trade offers to multiple players, ie " + str(len(param_list)) + " players.")
for param in param_list:
logger.debug(player.player_name+ ': I am making a trade offer with '+ param['to_player'].player_name)
param['from_player'] = param['from_player'].player_name
param['to_player'] = param['to_player'].player_name
prop_set_offered = set()
for item in param['offer']['property_set_offered']:
prop_set_offered.add(item.name)
param['offer']['property_set_offered'] = prop_set_offered
prop_set_wanted = set()
for item in param['offer']['property_set_wanted']:
prop_set_wanted.add(item.name)
param['offer']['property_set_wanted'] = prop_set_wanted
player.agent._agent_memory['previous_action'] = "make_trade_offer"
return_action_list.append("make_trade_offer")
return_param_list.append(param)
return (return_action_list, return_param_list)
# if we ran the gamut, and did not return, then it's time to skip turn or conclude actions
if "skip_turn" in allowable_moves:
logger.debug(player.player_name+ ': I am skipping turn')
player.agent._agent_memory['previous_action'] = "skip_turn"
return ("skip_turn", dict())
elif "concluded_actions" in allowable_moves:
logger.debug(player.player_name+ ': I am concluding actions')
# player.agent._agent_memory['previous_action'] = action_choices.concluded_actions
return ("concluded_actions", dict())
else:
logger.error("Exception")
raise Exception
def make_post_roll_move(player, current_gameboard, allowable_moves, code):
"""
The agent is in the post-roll phase and must decide what to do (next). The main decision we make here is singular:
should we buy the property we landed on, if that option is available?
--If we do buy the property, we end the phase by concluding the turn.
--If we cannot buy a property, we conclude the turn. If we have negative cash balance, we do not handle it here, but
in the handle_negative_cash_balance function. This means that the background agent never calls any of
the mortgage or sell properties here UNLESS we need to mortgage or sell a property in order to buy the current
one and it is well worth our while.
Note that if your agent decides not to buy the property before concluding the turn, the property will move to
auction before your turn formally concludes.
This background agent never sells a house or hotel in post_roll.
:param player: A Player instance. You should expect this to be the player that is 'making' the decision (i.e. the player
instantiated with the functions specified by this decision agent).
:param current_gameboard: A dict. The global data structure representing the current game board.
:param allowable_moves: A set of functions, each of which is defined in action_choices (imported in this file), and that
will always be a subset of the action choices for post-die-roll in the game schema. Your returned action choice must be from
allowable_moves; we will check for this when you return.
:param code: See the preamble of this file for an explanation of this code
:return: A 2-element tuple, the first of which is the action you want to take, and the second is a dictionary of
parameters that will be passed into the function representing that action when it is executed.
The dictionary must exactly contain the keys and expected value types expected by that action in
action_choices
"""
'''
phase_game defines which phase the player is in during the game
0 -> preroll
1 -> out of turn
2 -> postroll
count_unsuccessful_tries in the agent memory keeps a record of unsuccessful actions executed by that player agent in each phase_game.
If this count reaches UNSUCCESSFUL_LIMIT before a phase_game change, then the player has no option but to either skip_turn or
conclude_actions. This count resets to 0 when the phase_game changes.
This ensures that the game doesnot go on for too long trying to execute unsuccessful actions.
'''
for p in current_gameboard['players']:
if 'phase_game' not in p.agent._agent_memory:
p.agent._agent_memory['phase_game'] = 2
p.agent._agent_memory['count_unsuccessful_tries'] = 0
if player.agent._agent_memory['phase_game'] != 2:
player.agent._agent_memory['phase_game'] = 2
for p in current_gameboard['players']:
if p.status != 'lost':
p.agent._agent_memory['count_unsuccessful_tries'] = 0
if code == flag_config_dict['failure_code']:
player.agent._agent_memory['count_unsuccessful_tries'] += 1
logger.debug(player.player_name + ' has executed an unsuccessful postroll action, incrementing unsuccessful_tries ' +
'counter to ' + str(player.agent._agent_memory['count_unsuccessful_tries']))
if player.agent._agent_memory['count_unsuccessful_tries'] >= UNSUCCESSFUL_LIMIT:
logger.debug(player.player_name + ' has reached postroll unsuccessful action limits.')
if "concluded_actions" in allowable_moves:
# player.agent._agent_memory['previous_action'] = action_choices.concluded_actions
logger.debug(player.player_name+ ': I am concluding actions since I have crossed unsuccessful limits.')
return ("concluded_actions", dict())
else:
logger.error("Exception")
raise Exception
current_location = current_gameboard['location_sequence'][player.current_position]
if "buy_property" in allowable_moves:
if code == flag_config_dict['failure_code']:
logger.debug(player.player_name+': I did not succeed the last time in buying this property. Concluding actions...')
return ("concluded_actions", dict())
params = dict()
params['player'] = player.player_name
params['asset'] = current_location.name
params['current_gameboard'] = "current_gameboard"
if make_buy_property_decision(player, current_gameboard, current_location):
logger.debug(player.player_name+ ': I am attempting to buy property '+current_location.name)
player.agent._agent_memory['previous_action'] = "buy_property"
return ("buy_property", params)
else:
# make_buy returned false, but is there still a chance?
if agent_helper_functions.will_property_complete_set(player,current_location,current_gameboard):
# if we can raise enough money, then the 'next' time around we'll succeed in buying
to_mortgage = agent_helper_functions.identify_potential_mortgage(player,current_location.price,True)
if to_mortgage:
params['asset'] = to_mortgage.name
logger.debug(player.player_name+ ': I am attempting to mortgage property '+ params['asset'])
player.agent._agent_memory['previous_action'] = "mortgage_property"
return ("mortgage_property", params)
else: # last chance.
to_sell = agent_helper_functions.identify_potential_sale(player, current_gameboard, current_location.price,True)
if to_sell:
params['asset'] = to_sell.name
logger.debug(player.player_name+ ': I am attempting to sell property '+ current_location.name+' to the bank')
player.agent._agent_memory['previous_action'] = "sell_property"
return ("sell_property", params)
if "concluded_actions" in allowable_moves:
# player.agent._agent_memory['previous_action'] = action_choices.concluded_actions
return ("concluded_actions", dict())
else:
logger.error("Exception")
raise Exception
def make_buy_property_decision(player, current_gameboard, asset):
"""
The agent decides to buy the property if:
(i) it can 'afford' it. Our definition of afford is that we must have at least go_increment cash balance after
the purchase.
(ii) we can obtain a full color set through the purchase, and still have positive cash balance afterwards (though
it may be less than go_increment).
:param player: A Player instance. You should expect this to be the player that is 'making' the decision (i.e. the player
instantiated with the functions specified by this decision agent).
:param current_gameboard: A dict. The global data structure representing the current game board.
:return: A Boolean. If True, then you decided to purchase asset from the bank, otherwise False. We allow you to
purchase the asset even if you don't have enough cash; however, if you do you will end up with a negative
cash balance and will have to handle that if you don't want to lose the game at the end of your move (see notes
in handle_negative_cash_balance)
"""
decision = False
if player.current_cash - asset.price >= current_gameboard['go_increment']: # case 1: can we afford it?
logger.debug(player.player_name+ ': I will attempt to buy '+ asset.name+ ' from the bank.')
decision = True
elif asset.price <= player.current_cash and \
agent_helper_functions.will_property_complete_set(player,asset,current_gameboard):
logger.debug(player.player_name+ ': I will attempt to buy '+ asset.name+ ' from the bank.')
decision = True
return decision
def make_bid(player, current_gameboard, asset, current_bid):
"""
Decide the amount you wish to bid for asset in auction, given the current_bid that is currently going. If you don't
return a bid that is strictly higher than current_bid you will be removed from the auction and won't be able to
bid anymore. Note that it is not necessary that you are actually on the location on the board representing asset, since
you will be invited to the auction automatically once a player who lands on a bank-owned asset rejects buying that asset
(this could be you or anyone else).
:param player: A Player instance. You should expect this to be the player that is 'making' the decision (i.e. the player
instantiated with the functions specified by this decision agent).
:param current_gameboard: A dict. The global data structure representing the current game board.
:param asset: An purchaseable instance of Location (i.e. real estate, utility or railroad)
:param current_bid: The current bid that is going in the auction. If you don't bid higher than this amount, the bank
will remove you from the auction proceedings. You could also always return 0 to voluntarily exit the auction.
:return: An integer that indicates what you wish to bid for asset
"""
if current_bid < asset.price:
new_bid = current_bid + (asset.price-current_bid)/2
if new_bid < player.current_cash:
return new_bid
else: # We are aware that this can be simplified with a simple return 0 statement at the end. However in the final baseline agent
# the return 0's would be replaced with more sophisticated rules. Think of them as placeholders.
return 0 # this will lead to a rejection of the bid downstream automatically
elif current_bid < player.current_cash and agent_helper_functions.will_property_complete_set(player,asset,current_gameboard):
# We are prepared to bid more than the price of the asset only if it doesn't result in insolvency, and
# if we can get a monopoly this way
return current_bid+(player.current_cash-current_bid)/4
else:
return 0 # no reason to bid
def handle_negative_cash_balance(player, current_gameboard):
"""
You have a negative cash balance at the end of your move (i.e. your post-roll phase is over) and you must handle
this issue before we move to the next player's pre-roll. If you do not succeed in restoring your cash balance to
0 or positive, bankruptcy proceeds will begin and you will lost the game.
The background agent tries a number of things to get itself out of a financial hole. First, it checks whether
mortgaging alone can save it. If not, then it begins selling unimproved properties in ascending order of price, the idea being
that it might as well get rid of cheap properties. This may not be the most optimal move but it is reasonable.
If it ends up selling all unimproved properties and is still insolvent, it starts selling improvements, followed
by a sale of the (now) unimproved properties.
:param player: A Player instance. You should expect this to be the player that is 'making' the decision (i.e. the player
instantiated with the functions specified by this decision agent).
:param current_gameboard: A dict. The global data structure representing the current game board.
:return: -1 (failure code) if you do not try to address your negative cash balance, or 1 if you tried and believed you succeeded.
Note that even if you do return 1 (successful move action), we will check to see whether you have non-negative cash balance. The rule of thumb
is to return 1 (successful move action) as long as you 'try', or -1 if you don't try (in which case you will be declared bankrupt and lose the game)
"""
if player.current_cash >= 0: # prelim check to see if player has negative cash balance
return (None, flag_config_dict['successful_action'])
#player should evaluate all the possible options that can save it from bankruptcy and take most promising actions
mortgage_potentials = list()
max_sum = 0
sorted_player_assets_list = _set_to_sorted_list_assets(player.assets)
for a in sorted_player_assets_list:
if a.is_mortgaged:
continue
elif a.loc_class=='real_estate' and (a.num_houses>0 or a.num_hotels>0):
continue
else:
mortgage_potentials.append((a, a.mortgage))
max_sum += a.mortgage
if mortgage_potentials and max_sum+player.current_cash >= 0: # if the second condition is not met, no point in mortgaging
sorted_potentials = sorted(mortgage_potentials, key=lambda x: x[1]) # sort by mortgage in ascending order
for p in sorted_potentials:
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action']) # we're done
params = dict()
params['player'] = player.player_name
params['asset'] = p[0].name
params['current_gameboard'] = "current_gameboard"
logger.debug(player.player_name+ ': I am attempting to mortgage property '+ params['asset'])
player.agent._agent_memory['previous_action'] = "mortgage_property"
return ("mortgage_property", params)
# if we got here, it means we're still in trouble. Next move is to sell unimproved properties. We don't check if
# the total will cover our debts, since we're desperate at this point.
# following sale potentials doesnot include properties from monopolized color groups
sale_potentials = list()
sorted_player_assets_list = _set_to_sorted_list_assets(player.assets)
for a in sorted_player_assets_list:
if a.color in player.full_color_sets_possessed:
continue
elif a.is_mortgaged:
if (a.price*current_gameboard['bank'].property_sell_percentage)-Bank.calculate_mortgage_owed(a, current_gameboard) > 0:
# default case, this will never be > 0 unless novelty is introduced
sale_potentials.append((a, (a.price*current_gameboard['bank'].property_sell_percentage)-Bank.calculate_mortgage_owed(a, current_gameboard)))
else:
continue # no point selling a mortgaged property if you dont anything out of it
elif a.loc_class=='real_estate' and (a.num_houses>0 or a.num_hotels>0):
continue
else:
sale_potentials.append((a, a.price*current_gameboard['bank'].property_sell_percentage))
if sale_potentials: # if the second condition is not met, no point in mortgaging
sorted_potentials = sorted(sale_potentials, key=lambda x: x[1]) # sort by mortgage in ascending order
for p in sorted_potentials:
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action']) # we're done
params = dict()
params['player'] = player.player_name
params['asset'] = p[0].name
params['current_gameboard'] = "current_gameboard"
logger.debug(player.player_name + ': I am attempting to sell property '+ p[0].name + ' to the bank')
player.agent._agent_memory['previous_action'] = "sell_property"
return ("sell_property", params)
# if selling properties from non monopolized color groups doesnot relieve the player from debt, then only we start thinking about giving up monopolized groups.
# If we come across a unimproved property which belongs to a monopoly, we still have to loop through the other properties from the same color group and
# sell the houses and hotels first because we cannot sell this property when the color group has improved properties
# We first check if selling houses and hotels one by one on the other improved properties of the same color group relieves the player of his debt. If it does
# then we return without selling the current property else we sell the property and the player loses monopoly of that color group.
max_sum = 0
sale_potentials = list()
sorted_player_assets_list = _set_to_sorted_list_assets(player.assets)
for a in sorted_player_assets_list:
if a.is_mortgaged:
if a.price*current_gameboard['bank'].property_sell_percentage-Bank.calculate_mortgage_owed(a, current_gameboard) > 0:
# default case, this will never be > 0 unless novelty is introduced
sale_potentials.append((a, (a.price*current_gameboard['bank'].property_sell_percentage)-Bank.calculate_mortgage_owed(a, current_gameboard)))
else:
continue # no point selling mortgaged property if you dont get anything out of it
elif a.loc_class=='real_estate' and (a.num_houses > 0 or a.num_hotels > 0):
continue
else:
sale_potentials.append((a, a.price*current_gameboard['bank'].property_sell_percentage))
if sale_potentials:
sorted_potentials = sorted(sale_potentials, key=lambda x: x[1]) # sort by sell value in ascending order
for p in sorted_potentials:
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action']) # we're done
sorted_player_assets_list = _set_to_sorted_list_assets(player.assets)
for prop in sorted_player_assets_list:
if prop != p[0] and prop.color == p[0].color and p[0].color in player.full_color_sets_possessed:
if hasattr(prop, 'num_hotels'): # add by Peter, for composite novelty
if prop.num_hotels > 0: # if current asset has no hotels, prop can only have max of 1 hotel (uniform improvement rule)
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action'])
params = dict()
params['player'] = player.player_name
params['asset'] = prop.name
params['current_gameboard'] = "current_gameboard"
params['sell_house'] = False
params['sell_hotel'] = True
logger.debug(player.player_name+ ': I am attempting to sell hotel on '+ prop.name + ' to the bank')
player.agent._agent_memory['previous_action'] = "sell_house_hotel"
return ("sell_house_hotel", params)
elif prop.num_houses > 0: # if current asset has no houses, prop can only have max of 1 house (uniform improvement rule)
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action'])
params = dict()
params['player'] = player.player_name
params['asset'] = prop.name
params['current_gameboard'] = "current_gameboard"
params['sell_house'] = True
params['sell_hotel'] = False
logger.debug(player.player_name+ ': I am attempting to sell house on '+ prop.name + ' to the bank')
player.agent._agent_memory['previous_action'] = "sell_house_hotel"
return ("sell_house_hotel", params)
else:
continue
params = dict()
params['player'] = player.player_name
params['asset'] = p[0].name
params['current_gameboard'] = "current_gameboard"
logger.debug(player.player_name + ': I am attempting to sell property '+ p[0].name + ' to the bank')
player.agent._agent_memory['previous_action'] = "sell_property"
return ("sell_property", params)
#we reach here if the player still hasnot cleared his debt. The above loop has now resulted in some more non monopolized properties.
#Hence we have to go through the process of looping through these properties once again to decide on the potential properties that can be mortgaged or sold.
mortgage_potentials = list()
sorted_player_assets_list = _set_to_sorted_list_assets(player.assets)
for a in sorted_player_assets_list:
if a.is_mortgaged:
continue
elif a.loc_class=='real_estate' and (a.num_houses>0 or a.num_hotels>0):
continue
else:
mortgage_potentials.append((a,a.mortgage))
if mortgage_potentials and max_sum+player.current_cash >= 0: # if the second condition is not met, no point in mortgaging
sorted_potentials = sorted(mortgage_potentials, key=lambda x: x[1]) # sort by mortgage in ascending order
for p in sorted_potentials:
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action']) # we're done
params = dict()
params['player'] = player.player_name
params['asset'] = p[0].name
params['current_gameboard'] = "current_gameboard"
logger.debug(player.player_name+ ': I am attempting to mortgage property '+ params['asset'])
player.agent._agent_memory['previous_action'] = "mortgage_property"
return ("mortgage_property", params)
# following sale potentials loops through the properties that have become unmonopolized due to the above loops and
# doesnot include properties from monopolized color groups
sale_potentials = list()
sorted_player_assets_list = _set_to_sorted_list_assets(player.assets)
for a in sorted_player_assets_list:
if a.color in player.full_color_sets_possessed:
continue
elif a.is_mortgaged:
if (a.price*current_gameboard['bank'].property_sell_percentage)-Bank.calculate_mortgage_owed(a, current_gameboard) > 0:
sale_potentials.append((a, (a.price*current_gameboard['bank'].property_sell_percentage)-Bank.calculate_mortgage_owed(a, current_gameboard)))
else:
continue
elif a.loc_class=='real_estate' and (a.num_houses>0 or a.num_hotels>0):
continue
else:
sale_potentials.append((a, a.price*current_gameboard['bank'].property_sell_percentage))
if sale_potentials: # if the second condition is not met, no point in mortgaging
sorted_potentials = sorted(sale_potentials, key=lambda x: x[1]) # sort by mortgage in ascending order
for p in sorted_potentials:
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action']) # we're done
params = dict()
params['player'] = player.player_name
params['asset'] = p[0].name
params['current_gameboard'] = "current_gameboard"
logger.debug(player.player_name + ': I am attempting to sell property '+ p[0].name + ' to the bank')
player.agent._agent_memory['previous_action'] = "sell_property"
return ("sell_property", params)
count = 0
# if we're STILL not done, then the only option is to start selling houses and hotels from the remaining improved monopolized properties, if we have 'em
while (player.num_total_houses > 0 or player.num_total_hotels > 0) and count <3: # often times, a sale may not succeed due to uniformity requirements. We keep trying till everything is sold,
# or cash balance turns non-negative.
count += 1 # there is a slim chance that it is impossible to sell an improvement unless the player does something first (e.g., replace 4 houses with a hotel).
# The count ensures we terminate at some point, regardless.
sorted_assets_list = _set_to_sorted_list_assets(player.assets)
for a in sorted_assets_list:
if a.loc_class == 'real_estate' and a.num_houses > 0:
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action']) # we're done
flag = True
for same_colored_asset in current_gameboard['color_assets'][a.color]:
if same_colored_asset == a:
continue
if same_colored_asset.num_houses > a.num_houses or a.num_hotels == 1:
flag = False
break
if flag:
params = dict()
params['player'] = player.player_name
params['asset'] = a.name
params['current_gameboard'] = "current_gameboard"
params['sell_house'] = True
params['sell_hotel'] = False
logger.debug(player.player_name+ ': I am attempting to sell house on '+ a.name + ' to the bank')
player.agent._agent_memory['previous_action'] = "sell_house_hotel"
return ("sell_house_hotel", params)
elif a.loc_class == 'real_estate' and a.num_hotels > 0:
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action']) # we're done
flag = True
for same_colored_asset in current_gameboard['color_assets'][a.color]:
if same_colored_asset == a:
continue
if a.num_hotels == 1 and not (same_colored_asset.num_hotels == 1 or (same_colored_asset.num_hotels == 0 and
same_colored_asset.num_houses == 0)) : # if there are no hotels on other properties,
# there must not be houses either, otherwise the uniform improvement rule gets broken. The not on the
# outside enforces this rule.
flag = False
break
elif a.num_hotels < same_colored_asset.num_hotels: # need to follow uniform improvement rule
flag = False
break
if flag:
params = dict()
params['player'] = player.player_name
params['asset'] = a.name
params['current_gameboard'] = "current_gameboard"
params['sell_house'] = False
params['sell_hotel'] = True
logger.debug(player.player_name+ ': I am attempting to sell house on '+ a.name + ' to the bank')
player.agent._agent_memory['previous_action'] = "sell_house_hotel"
return ("sell_house_hotel", params)
# final straw
final_sale_assets = player.assets.copy()
sorted_player_assets_list = _set_to_sorted_list_assets(final_sale_assets)
for a in sorted_player_assets_list:
if player.current_cash >= 0:
return (None, flag_config_dict['successful_action']) # we're done
if a.is_mortgaged:
continue
elif a.loc_class=='real_estate' and (a.num_houses>0 or a.num_hotels>0):
continue
params = dict()
params['player'] = player.player_name
params['asset'] = a.name
params['current_gameboard'] = "current_gameboard"
logger.debug(player.player_name + ': I am attempting to sell property '+ a.name + ' to the bank')
player.agent._agent_memory['previous_action'] = "sell_property"
return ("sell_property", params)
return (None, flag_config_dict['successful_action']) # if we didn't succeed in establishing solvency, it will get caught by the simulator. Since we tried, we return 1.
def _set_to_sorted_list_mortgaged_assets(player_mortgaged_assets):
player_m_assets_list = list()
player_m_assets_dict = dict()
for item in player_mortgaged_assets:
player_m_assets_dict[item.name] = item
for sorted_key in sorted(player_m_assets_dict):
player_m_assets_list.append(player_m_assets_dict[sorted_key])
return player_m_assets_list
def _set_to_sorted_list_assets(player_assets):
player_assets_list = list()
player_assets_dict = dict()
for item in player_assets:
player_assets_dict[item.name] = item
for sorted_key in sorted(player_assets_dict):
player_assets_list.append(player_assets_dict[sorted_key])
return player_assets_list
def _build_decision_agent_methods_dict():
"""
This function builds the decision agent methods dictionary.
:return: The decision agent dict. Keys should be exactly as stated in this example, but the functions can be anything
as long as you use/expect the exact function signatures we have indicated in this document.
"""
ans = dict()
ans['handle_negative_cash_balance'] = handle_negative_cash_balance
ans['make_pre_roll_move'] = make_pre_roll_move
ans['make_out_of_turn_move'] = make_out_of_turn_move
ans['make_post_roll_move'] = make_post_roll_move
ans['make_buy_property_decision'] = make_buy_property_decision
ans['make_bid'] = make_bid
ans['type'] = "decision_agent_methods"
return ans
decision_agent_methods = _build_decision_agent_methods_dict() # this is the main data structure that is needed by gameplay
|
PypiClean
|
/portapy-0.0.1.post2-py3-none-any.whl/portapy-js/node_modules/js-tokens/CHANGELOG.md
|
### Version 4.0.0 (2018-01-28) ###
- Added: Support for ES2018. The only change needed was recognizing the `s`
regex flag.
- Changed: _All_ tokens returned by the `matchToToken` function now have a
`closed` property. It is set to `undefined` for the tokens where “closed”
doesn’t make sense. This means that all tokens objects have the same shape,
which might improve performance.
These are the breaking changes:
- `'/a/s'.match(jsTokens)` no longer returns `['/', 'a', '/', 's']`, but
`['/a/s']`. (There are of course other variations of this.)
- Code that rely on some token objects not having the `closed` property could
now behave differently.
### Version 3.0.2 (2017-06-28) ###
- No code changes. Just updates to the readme.
### Version 3.0.1 (2017-01-30) ###
- Fixed: ES2015 unicode escapes with more than 6 hex digits are now matched
correctly.
### Version 3.0.0 (2017-01-11) ###
This release contains one breaking change, that should [improve performance in
V8][v8-perf]:
> So how can you, as a JavaScript developer, ensure that your RegExps are fast?
> If you are not interested in hooking into RegExp internals, make sure that
> neither the RegExp instance, nor its prototype is modified in order to get the
> best performance:
>
> ```js
> var re = /./g;
> re.exec(''); // Fast path.
> re.new_property = 'slow';
> ```
This module used to export a single regex, with `.matchToToken` bolted
on, just like in the above example. This release changes the exports of
the module to avoid this issue.
Before:
```js
import jsTokens from "js-tokens"
// or:
var jsTokens = require("js-tokens")
var matchToToken = jsTokens.matchToToken
```
After:
```js
import jsTokens, {matchToToken} from "js-tokens"
// or:
var jsTokens = require("js-tokens").default
var matchToToken = require("js-tokens").matchToToken
```
[v8-perf]: http://v8project.blogspot.se/2017/01/speeding-up-v8-regular-expressions.html
### Version 2.0.0 (2016-06-19) ###
- Added: Support for ES2016. In other words, support for the `**` exponentiation
operator.
These are the breaking changes:
- `'**'.match(jsTokens)` no longer returns `['*', '*']`, but `['**']`.
- `'**='.match(jsTokens)` no longer returns `['*', '*=']`, but `['**=']`.
### Version 1.0.3 (2016-03-27) ###
- Improved: Made the regex ever so slightly smaller.
- Updated: The readme.
### Version 1.0.2 (2015-10-18) ###
- Improved: Limited npm package contents for a smaller download. Thanks to
@zertosh!
### Version 1.0.1 (2015-06-20) ###
- Fixed: Declared an undeclared variable.
### Version 1.0.0 (2015-02-26) ###
- Changed: Merged the 'operator' and 'punctuation' types into 'punctuator'. That
type is now equivalent to the Punctuator token in the ECMAScript
specification. (Backwards-incompatible change.)
- Fixed: A `-` followed by a number is now correctly matched as a punctuator
followed by a number. It used to be matched as just a number, but there is no
such thing as negative number literals. (Possibly backwards-incompatible
change.)
### Version 0.4.1 (2015-02-21) ###
- Added: Support for the regex `u` flag.
### Version 0.4.0 (2015-02-21) ###
- Improved: `jsTokens.matchToToken` performance.
- Added: Support for octal and binary number literals.
- Added: Support for template strings.
### Version 0.3.1 (2015-01-06) ###
- Fixed: Support for unicode spaces. They used to be allowed in names (which is
very confusing), and some unicode newlines were wrongly allowed in strings and
regexes.
### Version 0.3.0 (2014-12-19) ###
- Changed: The `jsTokens.names` array has been replaced with the
`jsTokens.matchToToken` function. The capturing groups of `jsTokens` are no
longer part of the public API; instead use said function. See this [gist] for
an example. (Backwards-incompatible change.)
- Changed: The empty string is now considered an “invalid” token, instead an
“empty” token (its own group). (Backwards-incompatible change.)
- Removed: component support. (Backwards-incompatible change.)
[gist]: https://gist.github.com/lydell/be49dbf80c382c473004
### Version 0.2.0 (2014-06-19) ###
- Changed: Match ES6 function arrows (`=>`) as an operator, instead of its own
category (“functionArrow”), for simplicity. (Backwards-incompatible change.)
- Added: ES6 splats (`...`) are now matched as an operator (instead of three
punctuations). (Backwards-incompatible change.)
### Version 0.1.0 (2014-03-08) ###
- Initial release.
|
PypiClean
|
/combi-1.1.4.tar.gz/combi-1.1.4/docs/theme/static/doctools.js
|
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir", "dirxml",
"group", "groupEnd", "time", "timeEnd", "count", "trace", "profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {}
}
/**
* small helper function to urldecode strings
*/
jQuery.urldecode = function(x) {
return decodeURIComponent(x).replace(/\+/g, ' ');
}
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s == 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
}
/**
* small function to check if an array contains
* a given item.
*/
jQuery.contains = function(arr, item) {
for (var i = 0; i < arr.length; i++) {
if (arr[i] == item)
return true;
}
return false;
}
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node) {
if (node.nodeType == 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 && !jQuery.className.has(node.parentNode, className)) {
var span = document.createElement("span");
span.className = className;
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this)
});
}
}
return this.each(function() {
highlight(this);
});
}
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initModIndex();
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can savely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated == 'undefined')
return string;
return (typeof translated == 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated == 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlight');
});
}, 10);
$('<li class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></li>')
.appendTo($('.sidebar .this-page-menu'));
}
},
/**
* init the modindex toggle buttons
*/
initModIndex : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
console.log($('tr.cg-' + idnum).toggle());
if (src.substr(-9) == 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_MODINDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('.sidebar .this-page-menu li.highlight-link').fadeOut(300);
$('span.highlight').removeClass('highlight');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this == '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
});
|
PypiClean
|
/mindspore_gpu-1.10.0-cp39-cp39-manylinux1_x86_64.whl/mindspore/_akg/akg/topi/x86/conv2d_int8.py
|
"""Conv2D int8 schedule on x86"""
import re
import tvm
from tvm import autotvm
from tvm.autotvm.task import get_config
from tvm.autotvm.task.topi_integration import deserialize_args
from ..nn.conv2d import _get_workload as _get_conv2d_workload
from .. import generic, tag
from ..generic import conv2d as conv2d_generic
from ..util import get_const_tuple
from ..nn.conv2d import conv2d_NCHWc_int8
from .. import nn
from . import conv2d_avx_1x1, conv2d_avx_common
def _get_default_config_int8(cfg, data, kernel, strides, padding, out_dtype, is_depthwise=False,
layout='NCHW'):
"""
Get default schedule config for the workload
"""
assert not is_depthwise, "Depthwise Int8 not supported"
wkl = _get_conv2d_workload(data, kernel, strides, padding, out_dtype, layout)
is_kernel_1x1 = wkl.hkernel == 1 and wkl.wkernel == 1
if is_kernel_1x1:
conv2d_generic.fallback_schedule_cpu_1x1_int8(
cfg, wkl, int32_lanes=16, num_int8_elements=4)
else:
conv2d_generic.fallback_schedule_cpu_common_int8(
cfg, wkl, int32_lanes=16, num_int8_elements=4)
def _is_int8_hw_support(data_dtype, kernel_dtype):
"""
Checks to ensure that we can use Intel DLBoost instructions
1) The datatypes are correct.
2) LLVM version has support for the instructions.
3) Target is skylake and above.
"""
# 1) Check datatypes
is_dtype_support = data_dtype == 'uint8' and kernel_dtype == 'int8'
# 2) Check LLVM support
llvm_version = tvm.codegen.llvm_version_major()
is_llvm_support = llvm_version >= 8
# 3) Check target
mcpu = tvm.target.current_target().mcpu
is_target_support = False
if mcpu == 'skylake-avx512' or mcpu == 'cascadelake':
is_target_support = True
return is_dtype_support and is_llvm_support and is_target_support
def _create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, layout):
"""Create schedule configuration from input arguments"""
dshape = get_const_tuple(data.shape)
kshape = get_const_tuple(kernel.shape)
pat = re.compile(r'NCHW.+(\d+)c')
if layout == 'NCHW':
n, ic, h, w = dshape
oc, _, kh, kw = kshape
elif layout == 'NHWC':
n, h, w, ic = dshape
kh, kw, oc, _ = kshape
elif pat.match(layout) is not None:
n, ic_chunk, h, w, ic_bn = dshape
target = tvm.target.current_target(allow_none=False)
oc_chunk, k_ic, kh, kw, k_ic_f, oc_bn, k_ic_s = kshape
ic = ic_chunk * ic_bn
assert ic == k_ic * k_ic_f * k_ic_s
oc = oc_chunk*oc_bn
else:
raise ValueError("Not support this layout {} with "
"schedule template.".format(layout))
is_kernel_1x1 = kh == 1 and kw == 1
ph, pw = padding if isinstance(padding, (tuple, list)) else (padding, padding)
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
oh = (h - kh + 2 * ph) // sh + 1
ow = (w - kw + 2 * pw) // sw + 1
# Create schedule config
cfg.define_split('tile_ic', ic, num_outputs=2, filter=lambda y: y.size[-1] % 4 == 0)
cfg.define_split('tile_oc', oc, num_outputs=2, filter=lambda y: y.size[-1] % 16 == 0)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
if is_kernel_1x1:
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
# Define template function for autotvm task
# We define schedule template in this function instead of
# declaration function since actual input arguments need
# to be altered by the schedule selected.
@autotvm.task.register("topi_x86_conv2d_NCHWc_int8")
def _topi_nn_conv2d_NCHWc_int8(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
if len(args) == 7:
data, kernel, strides, padding, dilation, origin_layout, dtype = args
else:
assert len(args) == 8
data, kernel, strides, padding, dilation, origin_layout, out_layout, dtype = args
raw_data_shape = get_const_tuple(data.shape)
raw_kernel_shape = get_const_tuple(kernel.shape)
# get config here
cfg = get_config()
_create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, origin_layout)
# change shape with the value in config
ic_bn, oc_bn, ow_bn = (cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1])
data_layout = "NCHW%dc" % ic_bn
out_layout = "NCHW%dc" % oc_bn
# Set up the new shape for data and kernel
new_data_shape = (raw_data_shape[0], raw_data_shape[1] // ic_bn,
raw_data_shape[2], raw_data_shape[3], ic_bn)
n_elems = 4
new_kernel_shape = (raw_kernel_shape[0] // oc_bn,
raw_kernel_shape[1] // ic_bn,
raw_kernel_shape[2],
raw_kernel_shape[3],
ic_bn // n_elems,
oc_bn,
n_elems)
new_data = tvm.placeholder(new_data_shape, data.dtype)
new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype)
C = _declaration_conv_NCHWc_int8(cfg, new_data, new_kernel, strides, padding, dilation,
data_layout, out_layout, dtype)
s = _schedule_conv2d_NCHWc_int8(cfg, [C])
return s, [new_data, new_kernel, C]
@autotvm.register_topi_compute(conv2d_NCHWc_int8, 'cpu', 'direct')
def _declaration_conv_NCHWc_int8(cfg, data, kernel, strides,
padding, dilation, layout, out_layout, out_dtype):
return nn.conv2d_NCHWc_int8_compute(data,
kernel,
strides,
padding,
dilation,
layout,
out_layout,
out_dtype)
@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc_int8, 'cpu', ['direct'])
def _schedule_conv2d_NCHWc_int8(cfg, outs):
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d_NCHWc_int8' in op.tag:
conv_out = op.output(0)
kernel = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0] \
if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \
else data_vec
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, conv_out, outs[0]]
target = tvm.target.current_target(allow_none=False)
# int8 conv kernel is 7-dim
_, _, kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_NCHWc_int8(*args)
else:
conv2d_avx_common._schedule_conv_NCHWc_int8(*args)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
@autotvm.register_topi_schedule(generic.schedule_conv2d_nhwc_pack, 'cpu', ['direct'])
def schedule_conv2d_nhwc_pack(cfg, outs):
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 4: # schedule bias + bn + relu
n, h, w, c = op.axis
fused = s[op].fuse(n, h, w)
s[op].parallel(fused)
s[op].vectorize(c)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d_nhwc_pack_int8' in op.tag:
conv_out = op.output(0)
kernel = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0] \
if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \
else data_vec
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, conv_out, outs[0]]
if data.dtype == 'uint8':
kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_nhwc_pack_int8(*args)
else:
raise ValueError("Only support 1x1 kernel with "
"schedule_conv2d_nhwc_pack.")
else:
raise ValueError("Not support this data type {} with "
"schedule_conv2d_nhwc_pack. Only support int8".format(data.dtype))
scheduled_ops.append(op)
traverse(output_op)
return s
|
PypiClean
|
/spyder-terminal-1.2.2.tar.gz/spyder-terminal-1.2.2/spyder_terminal/server/static/components/regenerator-transform/src/replaceShorthandObjectMethod.js
|
import * as util from "./util";
// this function converts a shorthand object generator method into a normal
// (non-shorthand) object property which is a generator function expression. for
// example, this:
//
// var foo = {
// *bar(baz) { return 5; }
// }
//
// should be replaced with:
//
// var foo = {
// bar: function*(baz) { return 5; }
// }
//
// to do this, it clones the parameter array and the body of the object generator
// method into a new FunctionExpression.
//
// this method can be passed any Function AST node path, and it will return
// either:
// a) the path that was passed in (iff the path did not need to be replaced) or
// b) the path of the new FunctionExpression that was created as a replacement
// (iff the path did need to be replaced)
//
// In either case, though, the caller can count on the fact that the return value
// is a Function AST node path.
//
// If this function is called with an AST node path that is not a Function (or with an
// argument that isn't an AST node path), it will throw an error.
export default function replaceShorthandObjectMethod(path) {
const t = util.getTypes();
if (!path.node || !t.isFunction(path.node)) {
throw new Error("replaceShorthandObjectMethod can only be called on Function AST node paths.");
}
// this function only replaces shorthand object methods (called ObjectMethod
// in Babel-speak).
if (!t.isObjectMethod(path.node)) {
return path;
}
// this function only replaces generators.
if (!path.node.generator) {
return path;
}
const parameters = path.node.params.map(function (param) {
return t.cloneDeep(param);
})
const functionExpression = t.functionExpression(
null, // id
parameters, // params
t.cloneDeep(path.node.body), // body
path.node.generator,
path.node.async
);
util.replaceWithOrRemove(path,
t.objectProperty(
t.cloneDeep(path.node.key), // key
functionExpression, //value
path.node.computed, // computed
false // shorthand
)
);
// path now refers to the ObjectProperty AST node path, but we want to return a
// Function AST node path for the function expression we created. we know that
// the FunctionExpression we just created is the value of the ObjectProperty,
// so return the "value" path off of this path.
return path.get("value");
}
|
PypiClean
|
/daot-0.7.4.zip/daot-0.7.4/dao/builtins/quasiquote.py
|
from dao import builtin
from dao.builtin import Builtin, Function
from dao.term import CommandCall
from dao.solve import DaoSyntaxError, mycont
# quasiquote and backquote
## dao and t language
## (if (> i 1) i (+ i 1))
##`(if ,i>1: ,i; else ,i+1)
def evaluate_quasiquote_list_cont(solver, cont, exps):
@mycont(cont)
def quasi_cont(result, solver):
if len(exps)==0:
yield cont, result
else:
element0 = exps[0]
left_cont = evaluate_quasiquote_list_cont(solver, cont, exps[1:])
if element0==():
yield left_cont, result+((),)
return
if not isinstance(element0, tuple):
if element0==unquote or element0==unquote_slice:
raise DaoSyntaxError
else:
yield left_cont, result+(element0,)
return
elif len(element0)==2:
if element0[0]==unquote:
@mycont(quasi_cont)
def gather_cont(value, solver):
yield left_cont, result+(value,)
yield solver.cont(element0[1], gather_cont), True
return
elif element0[0]==unquote_slice:
@mycont(quasi_cont)
def gather_cont(value, solver):
yield left_cont, result+value
yield solver.cont(element0[1], gather_cont), True
return
elif element0[0]==unquote or element0[0]==unquote_slice:
raise DaoSyntaxError
@mycont(quasi_cont)
def gather_cont(value, solver):
yield left_cont, result+(value,)
yield evaluate_quasiquote_list_cont(solver, gather_cont, element0), ()
return quasi_cont
@builtin.macro('quasiquote')
def quasiquote(solver, cont, item):
if not isinstance(item, tuple) or item==():
yield cont, item
return
elif len(item)==2:
if item[0]==unquote:
yield solver.cont(item[1], cont), True
return
elif item[0]==unquote_slice:
raise DaoSyntaxError
elif item[0]==unquote or item[0]==unquote_slice:
raise DaoSyntaxError
yield evaluate_quasiquote_list_cont(solver, cont, item), ()
@builtin.macro('unquote')
def unquote(solver, cont, *args):
raise DaoSyntaxError
@builtin.macro('unquote_slice')
def unquote_slice(solver, cont, *args):
raise DaoSyntaxError
##Back when JAR first suggested making quasiquote standard, I transcribed
##my quasiquote implementation from the C-coded reader into Scheme-coded
##syntactic-extensions. I promised to send the code to David Bartley at
##TI and figured some of the rest of you might be interested as well.
##
##I believe that this gives different results from JAR's, because it can
##actually fold up explicit calls to "list" and "list*" (for better or for
##worse). It also insists that quasiquote, unquote, and unquote-splice
##forms be well-formed, rather than ignoring those that aren't. As with
##JAR's, nested quasiquotes work properly.
##
##Because quasiquote and company are expanded at compile time rather than
##read time, it is reasonable to write code that produces quasiquote forms.
##
##"list*" (Common Lisp's name) is the same as JAR's "cons*". The meaning
##of everything else should be obvious.
##
##(let ((check
## (lambda (x)
## (unless (and (pair? (cdr x)) (null? (cddr x)))
## (ferror (car x) "invalid form ~s" x)))))
## (define-macro! quasiquote (x)
## (recur f ((x x))
## (cond
## ((not (pair? x)) `',x)
## ((eq? (car x) 'quasiquote) (check x) (f (f (cadr x))))
## ((eq? (car x) 'unquote) (check x) (cadr x))
## ((eq? (car x) 'unquote-splice)
## (ferror 'unquote-splice "invalid context for ~s" x))
## ((and (pair? (car x)) (eq? (caar x) 'unquote-splice))
## (check (car x))
## (let ((d (f (cdr x))))
## (if (equal? d '(quote ()))
## (cadar x)
## `(append ,(cadar x) ,d))))
## (else
## (let ((a (f (car x))) (d (f (cdr x))))
## (if (pair? d)
## (if (eq? (car d) 'quote)
## (if (and (pair? a) (eq? (car a) 'quote))
## `'(,(cadr a) . ,(cadr d))
## (if (null? (cadr d))
## `(list ,a)
## `(list* ,a ,d)))
## (if (memq (car d) '(list list*))
## `(,(car d) ,a ,@(cdr d))
## `(list* ,a ,d)))
## `(list* ,a ,d))))))))
##
##(define-macro! unquote (x)
## (ferror 'unquote
## "unquote form ,~s not valid outside of quasiquote"
## x))
##
##(define-macro! unquote-splice (x)
## (ferror 'unquote
## "unquote-splice form ,@~s not valid outside of quasiquote"
## x))
##;; usage: (qq (x y (uq (+ 1 2)) (uq@ (list 1 2 3))))
## ;; ==> (x y 3 1 2 3)
##
## (define-macro (qq s-expr)
## (qq-eval s-expr))
##
##
## ;; Since qq is a macro you can't use (args) within.
## ;; Use qq-eval instead which is not a macro and thus
## ;; (args) will not capture the qq's (args).
##
## ;; usage: (qq-eval '(x y (uq (+ 1 2)) (uq@ (list 1 2 3))))
## ;; ==> (x y 3 1 2 3)
## (define (qq-eval s-expr , i)
## (if (list? s-expr)
## (begin
## (setq i 0)
## (while (< i (length s-expr))
## (let ((ss-expr (nth i s-expr)))
## (if (list? ss-expr)
## (cond
## ((= 'uq (first ss-expr))
## (nth-set i s-expr (eval (qq-eval (last ss-expr))))
## (inc 'i))
## ((= 'uq@ (first ss-expr))
## (let ((ss-exprs (eval (qq-eval (last ss-expr)))))
## (if (list? ss-exprs)
## (begin
## (pop s-expr i)
## (dotimes (j (length ss-exprs))
## (push (nth j ss-exprs) s-expr i)
## (inc 'i)))
## (begin
## (nth-set i s-expr ss-exprs)
## (inc 'i)))))
## (true
## (nth-set i s-expr (qq-eval ss-expr))
## (inc 'i)))
## (begin
## (inc 'i)
## s-expr))))
## s-expr)
## s-expr))
##
## ;; Abbreviation for lambda or fn
##
## (define-macro (\ )
## (eval (qq-eval '(lambda (uq (first (args))) (uq@ (rest (args)))))))
##
##
## ;; Abbreviation for define
##
## (define-macro (: _var-or-fn _value)
## (if (list? _var-or-fn)
## (eval (qq-eval '(define (uq _var-or-fn) (uq@ (rest (args))))))
## (eval (qq (set _var-or-fn (uq _value))))))
##
|
PypiClean
|
/python-appengine-1.9.15.tar.gz/python-appengine-1.9.15/google/appengine/api/blobstore/blobstore_service_pb.py
|
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.api.api_base_pb import *
import google.appengine.api.api_base_pb
class BlobstoreServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
INTERNAL_ERROR = 1
URL_TOO_LONG = 2
PERMISSION_DENIED = 3
BLOB_NOT_FOUND = 4
DATA_INDEX_OUT_OF_RANGE = 5
BLOB_FETCH_SIZE_TOO_LARGE = 6
ARGUMENT_OUT_OF_RANGE = 8
INVALID_BLOB_KEY = 9
_ErrorCode_NAMES = {
0: "OK",
1: "INTERNAL_ERROR",
2: "URL_TOO_LONG",
3: "PERMISSION_DENIED",
4: "BLOB_NOT_FOUND",
5: "DATA_INDEX_OUT_OF_RANGE",
6: "BLOB_FETCH_SIZE_TOO_LARGE",
8: "ARGUMENT_OUT_OF_RANGE",
9: "INVALID_BLOB_KEY",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.BlobstoreServiceError'
class CreateUploadURLRequest(ProtocolBuffer.ProtocolMessage):
has_success_path_ = 0
success_path_ = ""
has_max_upload_size_bytes_ = 0
max_upload_size_bytes_ = 0
has_max_upload_size_per_blob_bytes_ = 0
max_upload_size_per_blob_bytes_ = 0
has_gs_bucket_name_ = 0
gs_bucket_name_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def success_path(self): return self.success_path_
def set_success_path(self, x):
self.has_success_path_ = 1
self.success_path_ = x
def clear_success_path(self):
if self.has_success_path_:
self.has_success_path_ = 0
self.success_path_ = ""
def has_success_path(self): return self.has_success_path_
def max_upload_size_bytes(self): return self.max_upload_size_bytes_
def set_max_upload_size_bytes(self, x):
self.has_max_upload_size_bytes_ = 1
self.max_upload_size_bytes_ = x
def clear_max_upload_size_bytes(self):
if self.has_max_upload_size_bytes_:
self.has_max_upload_size_bytes_ = 0
self.max_upload_size_bytes_ = 0
def has_max_upload_size_bytes(self): return self.has_max_upload_size_bytes_
def max_upload_size_per_blob_bytes(self): return self.max_upload_size_per_blob_bytes_
def set_max_upload_size_per_blob_bytes(self, x):
self.has_max_upload_size_per_blob_bytes_ = 1
self.max_upload_size_per_blob_bytes_ = x
def clear_max_upload_size_per_blob_bytes(self):
if self.has_max_upload_size_per_blob_bytes_:
self.has_max_upload_size_per_blob_bytes_ = 0
self.max_upload_size_per_blob_bytes_ = 0
def has_max_upload_size_per_blob_bytes(self): return self.has_max_upload_size_per_blob_bytes_
def gs_bucket_name(self): return self.gs_bucket_name_
def set_gs_bucket_name(self, x):
self.has_gs_bucket_name_ = 1
self.gs_bucket_name_ = x
def clear_gs_bucket_name(self):
if self.has_gs_bucket_name_:
self.has_gs_bucket_name_ = 0
self.gs_bucket_name_ = ""
def has_gs_bucket_name(self): return self.has_gs_bucket_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_success_path()): self.set_success_path(x.success_path())
if (x.has_max_upload_size_bytes()): self.set_max_upload_size_bytes(x.max_upload_size_bytes())
if (x.has_max_upload_size_per_blob_bytes()): self.set_max_upload_size_per_blob_bytes(x.max_upload_size_per_blob_bytes())
if (x.has_gs_bucket_name()): self.set_gs_bucket_name(x.gs_bucket_name())
def Equals(self, x):
if x is self: return 1
if self.has_success_path_ != x.has_success_path_: return 0
if self.has_success_path_ and self.success_path_ != x.success_path_: return 0
if self.has_max_upload_size_bytes_ != x.has_max_upload_size_bytes_: return 0
if self.has_max_upload_size_bytes_ and self.max_upload_size_bytes_ != x.max_upload_size_bytes_: return 0
if self.has_max_upload_size_per_blob_bytes_ != x.has_max_upload_size_per_blob_bytes_: return 0
if self.has_max_upload_size_per_blob_bytes_ and self.max_upload_size_per_blob_bytes_ != x.max_upload_size_per_blob_bytes_: return 0
if self.has_gs_bucket_name_ != x.has_gs_bucket_name_: return 0
if self.has_gs_bucket_name_ and self.gs_bucket_name_ != x.gs_bucket_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_success_path_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: success_path not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.success_path_))
if (self.has_max_upload_size_bytes_): n += 1 + self.lengthVarInt64(self.max_upload_size_bytes_)
if (self.has_max_upload_size_per_blob_bytes_): n += 1 + self.lengthVarInt64(self.max_upload_size_per_blob_bytes_)
if (self.has_gs_bucket_name_): n += 1 + self.lengthString(len(self.gs_bucket_name_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_success_path_):
n += 1
n += self.lengthString(len(self.success_path_))
if (self.has_max_upload_size_bytes_): n += 1 + self.lengthVarInt64(self.max_upload_size_bytes_)
if (self.has_max_upload_size_per_blob_bytes_): n += 1 + self.lengthVarInt64(self.max_upload_size_per_blob_bytes_)
if (self.has_gs_bucket_name_): n += 1 + self.lengthString(len(self.gs_bucket_name_))
return n
def Clear(self):
self.clear_success_path()
self.clear_max_upload_size_bytes()
self.clear_max_upload_size_per_blob_bytes()
self.clear_gs_bucket_name()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.success_path_)
if (self.has_max_upload_size_bytes_):
out.putVarInt32(16)
out.putVarInt64(self.max_upload_size_bytes_)
if (self.has_max_upload_size_per_blob_bytes_):
out.putVarInt32(24)
out.putVarInt64(self.max_upload_size_per_blob_bytes_)
if (self.has_gs_bucket_name_):
out.putVarInt32(34)
out.putPrefixedString(self.gs_bucket_name_)
def OutputPartial(self, out):
if (self.has_success_path_):
out.putVarInt32(10)
out.putPrefixedString(self.success_path_)
if (self.has_max_upload_size_bytes_):
out.putVarInt32(16)
out.putVarInt64(self.max_upload_size_bytes_)
if (self.has_max_upload_size_per_blob_bytes_):
out.putVarInt32(24)
out.putVarInt64(self.max_upload_size_per_blob_bytes_)
if (self.has_gs_bucket_name_):
out.putVarInt32(34)
out.putPrefixedString(self.gs_bucket_name_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_success_path(d.getPrefixedString())
continue
if tt == 16:
self.set_max_upload_size_bytes(d.getVarInt64())
continue
if tt == 24:
self.set_max_upload_size_per_blob_bytes(d.getVarInt64())
continue
if tt == 34:
self.set_gs_bucket_name(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_success_path_: res+=prefix+("success_path: %s\n" % self.DebugFormatString(self.success_path_))
if self.has_max_upload_size_bytes_: res+=prefix+("max_upload_size_bytes: %s\n" % self.DebugFormatInt64(self.max_upload_size_bytes_))
if self.has_max_upload_size_per_blob_bytes_: res+=prefix+("max_upload_size_per_blob_bytes: %s\n" % self.DebugFormatInt64(self.max_upload_size_per_blob_bytes_))
if self.has_gs_bucket_name_: res+=prefix+("gs_bucket_name: %s\n" % self.DebugFormatString(self.gs_bucket_name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
ksuccess_path = 1
kmax_upload_size_bytes = 2
kmax_upload_size_per_blob_bytes = 3
kgs_bucket_name = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "success_path",
2: "max_upload_size_bytes",
3: "max_upload_size_per_blob_bytes",
4: "gs_bucket_name",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CreateUploadURLRequest'
class CreateUploadURLResponse(ProtocolBuffer.ProtocolMessage):
has_url_ = 0
url_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def url(self): return self.url_
def set_url(self, x):
self.has_url_ = 1
self.url_ = x
def clear_url(self):
if self.has_url_:
self.has_url_ = 0
self.url_ = ""
def has_url(self): return self.has_url_
def MergeFrom(self, x):
assert x is not self
if (x.has_url()): self.set_url(x.url())
def Equals(self, x):
if x is self: return 1
if self.has_url_ != x.has_url_: return 0
if self.has_url_ and self.url_ != x.url_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_url_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: url not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.url_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_url_):
n += 1
n += self.lengthString(len(self.url_))
return n
def Clear(self):
self.clear_url()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.url_)
def OutputPartial(self, out):
if (self.has_url_):
out.putVarInt32(10)
out.putPrefixedString(self.url_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_url(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_url_: res+=prefix+("url: %s\n" % self.DebugFormatString(self.url_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kurl = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "url",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CreateUploadURLResponse'
class DeleteBlobRequest(ProtocolBuffer.ProtocolMessage):
has_token_ = 0
token_ = ""
def __init__(self, contents=None):
self.blob_key_ = []
if contents is not None: self.MergeFromString(contents)
def blob_key_size(self): return len(self.blob_key_)
def blob_key_list(self): return self.blob_key_
def blob_key(self, i):
return self.blob_key_[i]
def set_blob_key(self, i, x):
self.blob_key_[i] = x
def add_blob_key(self, x):
self.blob_key_.append(x)
def clear_blob_key(self):
self.blob_key_ = []
def token(self): return self.token_
def set_token(self, x):
self.has_token_ = 1
self.token_ = x
def clear_token(self):
if self.has_token_:
self.has_token_ = 0
self.token_ = ""
def has_token(self): return self.has_token_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.blob_key_size()): self.add_blob_key(x.blob_key(i))
if (x.has_token()): self.set_token(x.token())
def Equals(self, x):
if x is self: return 1
if len(self.blob_key_) != len(x.blob_key_): return 0
for e1, e2 in zip(self.blob_key_, x.blob_key_):
if e1 != e2: return 0
if self.has_token_ != x.has_token_: return 0
if self.has_token_ and self.token_ != x.token_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.blob_key_)
for i in xrange(len(self.blob_key_)): n += self.lengthString(len(self.blob_key_[i]))
if (self.has_token_): n += 1 + self.lengthString(len(self.token_))
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.blob_key_)
for i in xrange(len(self.blob_key_)): n += self.lengthString(len(self.blob_key_[i]))
if (self.has_token_): n += 1 + self.lengthString(len(self.token_))
return n
def Clear(self):
self.clear_blob_key()
self.clear_token()
def OutputUnchecked(self, out):
for i in xrange(len(self.blob_key_)):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_[i])
if (self.has_token_):
out.putVarInt32(18)
out.putPrefixedString(self.token_)
def OutputPartial(self, out):
for i in xrange(len(self.blob_key_)):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_[i])
if (self.has_token_):
out.putVarInt32(18)
out.putPrefixedString(self.token_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_blob_key(d.getPrefixedString())
continue
if tt == 18:
self.set_token(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.blob_key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("blob_key%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_token_: res+=prefix+("token: %s\n" % self.DebugFormatString(self.token_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kblob_key = 1
ktoken = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "blob_key",
2: "token",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DeleteBlobRequest'
class FetchDataRequest(ProtocolBuffer.ProtocolMessage):
has_blob_key_ = 0
blob_key_ = ""
has_start_index_ = 0
start_index_ = 0
has_end_index_ = 0
end_index_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def blob_key(self): return self.blob_key_
def set_blob_key(self, x):
self.has_blob_key_ = 1
self.blob_key_ = x
def clear_blob_key(self):
if self.has_blob_key_:
self.has_blob_key_ = 0
self.blob_key_ = ""
def has_blob_key(self): return self.has_blob_key_
def start_index(self): return self.start_index_
def set_start_index(self, x):
self.has_start_index_ = 1
self.start_index_ = x
def clear_start_index(self):
if self.has_start_index_:
self.has_start_index_ = 0
self.start_index_ = 0
def has_start_index(self): return self.has_start_index_
def end_index(self): return self.end_index_
def set_end_index(self, x):
self.has_end_index_ = 1
self.end_index_ = x
def clear_end_index(self):
if self.has_end_index_:
self.has_end_index_ = 0
self.end_index_ = 0
def has_end_index(self): return self.has_end_index_
def MergeFrom(self, x):
assert x is not self
if (x.has_blob_key()): self.set_blob_key(x.blob_key())
if (x.has_start_index()): self.set_start_index(x.start_index())
if (x.has_end_index()): self.set_end_index(x.end_index())
def Equals(self, x):
if x is self: return 1
if self.has_blob_key_ != x.has_blob_key_: return 0
if self.has_blob_key_ and self.blob_key_ != x.blob_key_: return 0
if self.has_start_index_ != x.has_start_index_: return 0
if self.has_start_index_ and self.start_index_ != x.start_index_: return 0
if self.has_end_index_ != x.has_end_index_: return 0
if self.has_end_index_ and self.end_index_ != x.end_index_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_blob_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: blob_key not set.')
if (not self.has_start_index_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: start_index not set.')
if (not self.has_end_index_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: end_index not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.blob_key_))
n += self.lengthVarInt64(self.start_index_)
n += self.lengthVarInt64(self.end_index_)
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_blob_key_):
n += 1
n += self.lengthString(len(self.blob_key_))
if (self.has_start_index_):
n += 1
n += self.lengthVarInt64(self.start_index_)
if (self.has_end_index_):
n += 1
n += self.lengthVarInt64(self.end_index_)
return n
def Clear(self):
self.clear_blob_key()
self.clear_start_index()
self.clear_end_index()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_)
out.putVarInt32(16)
out.putVarInt64(self.start_index_)
out.putVarInt32(24)
out.putVarInt64(self.end_index_)
def OutputPartial(self, out):
if (self.has_blob_key_):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_)
if (self.has_start_index_):
out.putVarInt32(16)
out.putVarInt64(self.start_index_)
if (self.has_end_index_):
out.putVarInt32(24)
out.putVarInt64(self.end_index_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_blob_key(d.getPrefixedString())
continue
if tt == 16:
self.set_start_index(d.getVarInt64())
continue
if tt == 24:
self.set_end_index(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_blob_key_: res+=prefix+("blob_key: %s\n" % self.DebugFormatString(self.blob_key_))
if self.has_start_index_: res+=prefix+("start_index: %s\n" % self.DebugFormatInt64(self.start_index_))
if self.has_end_index_: res+=prefix+("end_index: %s\n" % self.DebugFormatInt64(self.end_index_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kblob_key = 1
kstart_index = 2
kend_index = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "blob_key",
2: "start_index",
3: "end_index",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FetchDataRequest'
class FetchDataResponse(ProtocolBuffer.ProtocolMessage):
has_data_ = 0
data_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def data(self): return self.data_
def set_data(self, x):
self.has_data_ = 1
self.data_ = x
def clear_data(self):
if self.has_data_:
self.has_data_ = 0
self.data_ = ""
def has_data(self): return self.has_data_
def MergeFrom(self, x):
assert x is not self
if (x.has_data()): self.set_data(x.data())
def Equals(self, x):
if x is self: return 1
if self.has_data_ != x.has_data_: return 0
if self.has_data_ and self.data_ != x.data_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_data_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: data not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.data_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_data_):
n += 2
n += self.lengthString(len(self.data_))
return n
def Clear(self):
self.clear_data()
def OutputUnchecked(self, out):
out.putVarInt32(8002)
out.putPrefixedString(self.data_)
def OutputPartial(self, out):
if (self.has_data_):
out.putVarInt32(8002)
out.putPrefixedString(self.data_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8002:
self.set_data(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_data_: res+=prefix+("data: %s\n" % self.DebugFormatString(self.data_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdata = 1000
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1000: "data",
}, 1000)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1000: ProtocolBuffer.Encoder.STRING,
}, 1000, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FetchDataResponse'
class CloneBlobRequest(ProtocolBuffer.ProtocolMessage):
has_blob_key_ = 0
blob_key_ = ""
has_mime_type_ = 0
mime_type_ = ""
has_target_app_id_ = 0
target_app_id_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def blob_key(self): return self.blob_key_
def set_blob_key(self, x):
self.has_blob_key_ = 1
self.blob_key_ = x
def clear_blob_key(self):
if self.has_blob_key_:
self.has_blob_key_ = 0
self.blob_key_ = ""
def has_blob_key(self): return self.has_blob_key_
def mime_type(self): return self.mime_type_
def set_mime_type(self, x):
self.has_mime_type_ = 1
self.mime_type_ = x
def clear_mime_type(self):
if self.has_mime_type_:
self.has_mime_type_ = 0
self.mime_type_ = ""
def has_mime_type(self): return self.has_mime_type_
def target_app_id(self): return self.target_app_id_
def set_target_app_id(self, x):
self.has_target_app_id_ = 1
self.target_app_id_ = x
def clear_target_app_id(self):
if self.has_target_app_id_:
self.has_target_app_id_ = 0
self.target_app_id_ = ""
def has_target_app_id(self): return self.has_target_app_id_
def MergeFrom(self, x):
assert x is not self
if (x.has_blob_key()): self.set_blob_key(x.blob_key())
if (x.has_mime_type()): self.set_mime_type(x.mime_type())
if (x.has_target_app_id()): self.set_target_app_id(x.target_app_id())
def Equals(self, x):
if x is self: return 1
if self.has_blob_key_ != x.has_blob_key_: return 0
if self.has_blob_key_ and self.blob_key_ != x.blob_key_: return 0
if self.has_mime_type_ != x.has_mime_type_: return 0
if self.has_mime_type_ and self.mime_type_ != x.mime_type_: return 0
if self.has_target_app_id_ != x.has_target_app_id_: return 0
if self.has_target_app_id_ and self.target_app_id_ != x.target_app_id_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_blob_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: blob_key not set.')
if (not self.has_mime_type_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: mime_type not set.')
if (not self.has_target_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: target_app_id not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.blob_key_))
n += self.lengthString(len(self.mime_type_))
n += self.lengthString(len(self.target_app_id_))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_blob_key_):
n += 1
n += self.lengthString(len(self.blob_key_))
if (self.has_mime_type_):
n += 1
n += self.lengthString(len(self.mime_type_))
if (self.has_target_app_id_):
n += 1
n += self.lengthString(len(self.target_app_id_))
return n
def Clear(self):
self.clear_blob_key()
self.clear_mime_type()
self.clear_target_app_id()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_)
out.putVarInt32(18)
out.putPrefixedString(self.mime_type_)
out.putVarInt32(26)
out.putPrefixedString(self.target_app_id_)
def OutputPartial(self, out):
if (self.has_blob_key_):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_)
if (self.has_mime_type_):
out.putVarInt32(18)
out.putPrefixedString(self.mime_type_)
if (self.has_target_app_id_):
out.putVarInt32(26)
out.putPrefixedString(self.target_app_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_blob_key(d.getPrefixedString())
continue
if tt == 18:
self.set_mime_type(d.getPrefixedString())
continue
if tt == 26:
self.set_target_app_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_blob_key_: res+=prefix+("blob_key: %s\n" % self.DebugFormatString(self.blob_key_))
if self.has_mime_type_: res+=prefix+("mime_type: %s\n" % self.DebugFormatString(self.mime_type_))
if self.has_target_app_id_: res+=prefix+("target_app_id: %s\n" % self.DebugFormatString(self.target_app_id_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kblob_key = 1
kmime_type = 2
ktarget_app_id = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "blob_key",
2: "mime_type",
3: "target_app_id",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CloneBlobRequest'
class CloneBlobResponse(ProtocolBuffer.ProtocolMessage):
has_blob_key_ = 0
blob_key_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def blob_key(self): return self.blob_key_
def set_blob_key(self, x):
self.has_blob_key_ = 1
self.blob_key_ = x
def clear_blob_key(self):
if self.has_blob_key_:
self.has_blob_key_ = 0
self.blob_key_ = ""
def has_blob_key(self): return self.has_blob_key_
def MergeFrom(self, x):
assert x is not self
if (x.has_blob_key()): self.set_blob_key(x.blob_key())
def Equals(self, x):
if x is self: return 1
if self.has_blob_key_ != x.has_blob_key_: return 0
if self.has_blob_key_ and self.blob_key_ != x.blob_key_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_blob_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: blob_key not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.blob_key_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_blob_key_):
n += 1
n += self.lengthString(len(self.blob_key_))
return n
def Clear(self):
self.clear_blob_key()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_)
def OutputPartial(self, out):
if (self.has_blob_key_):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_blob_key(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_blob_key_: res+=prefix+("blob_key: %s\n" % self.DebugFormatString(self.blob_key_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kblob_key = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "blob_key",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CloneBlobResponse'
class DecodeBlobKeyRequest(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.blob_key_ = []
if contents is not None: self.MergeFromString(contents)
def blob_key_size(self): return len(self.blob_key_)
def blob_key_list(self): return self.blob_key_
def blob_key(self, i):
return self.blob_key_[i]
def set_blob_key(self, i, x):
self.blob_key_[i] = x
def add_blob_key(self, x):
self.blob_key_.append(x)
def clear_blob_key(self):
self.blob_key_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.blob_key_size()): self.add_blob_key(x.blob_key(i))
def Equals(self, x):
if x is self: return 1
if len(self.blob_key_) != len(x.blob_key_): return 0
for e1, e2 in zip(self.blob_key_, x.blob_key_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.blob_key_)
for i in xrange(len(self.blob_key_)): n += self.lengthString(len(self.blob_key_[i]))
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.blob_key_)
for i in xrange(len(self.blob_key_)): n += self.lengthString(len(self.blob_key_[i]))
return n
def Clear(self):
self.clear_blob_key()
def OutputUnchecked(self, out):
for i in xrange(len(self.blob_key_)):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_[i])
def OutputPartial(self, out):
for i in xrange(len(self.blob_key_)):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_blob_key(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.blob_key_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("blob_key%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kblob_key = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "blob_key",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DecodeBlobKeyRequest'
class DecodeBlobKeyResponse(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.decoded_ = []
if contents is not None: self.MergeFromString(contents)
def decoded_size(self): return len(self.decoded_)
def decoded_list(self): return self.decoded_
def decoded(self, i):
return self.decoded_[i]
def set_decoded(self, i, x):
self.decoded_[i] = x
def add_decoded(self, x):
self.decoded_.append(x)
def clear_decoded(self):
self.decoded_ = []
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.decoded_size()): self.add_decoded(x.decoded(i))
def Equals(self, x):
if x is self: return 1
if len(self.decoded_) != len(x.decoded_): return 0
for e1, e2 in zip(self.decoded_, x.decoded_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.decoded_)
for i in xrange(len(self.decoded_)): n += self.lengthString(len(self.decoded_[i]))
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.decoded_)
for i in xrange(len(self.decoded_)): n += self.lengthString(len(self.decoded_[i]))
return n
def Clear(self):
self.clear_decoded()
def OutputUnchecked(self, out):
for i in xrange(len(self.decoded_)):
out.putVarInt32(10)
out.putPrefixedString(self.decoded_[i])
def OutputPartial(self, out):
for i in xrange(len(self.decoded_)):
out.putVarInt32(10)
out.putPrefixedString(self.decoded_[i])
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.add_decoded(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.decoded_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("decoded%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kdecoded = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "decoded",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.DecodeBlobKeyResponse'
class CreateEncodedGoogleStorageKeyRequest(ProtocolBuffer.ProtocolMessage):
has_filename_ = 0
filename_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def filename(self): return self.filename_
def set_filename(self, x):
self.has_filename_ = 1
self.filename_ = x
def clear_filename(self):
if self.has_filename_:
self.has_filename_ = 0
self.filename_ = ""
def has_filename(self): return self.has_filename_
def MergeFrom(self, x):
assert x is not self
if (x.has_filename()): self.set_filename(x.filename())
def Equals(self, x):
if x is self: return 1
if self.has_filename_ != x.has_filename_: return 0
if self.has_filename_ and self.filename_ != x.filename_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_filename_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: filename not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.filename_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_filename_):
n += 1
n += self.lengthString(len(self.filename_))
return n
def Clear(self):
self.clear_filename()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.filename_)
def OutputPartial(self, out):
if (self.has_filename_):
out.putVarInt32(10)
out.putPrefixedString(self.filename_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_filename(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_filename_: res+=prefix+("filename: %s\n" % self.DebugFormatString(self.filename_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kfilename = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "filename",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CreateEncodedGoogleStorageKeyRequest'
class CreateEncodedGoogleStorageKeyResponse(ProtocolBuffer.ProtocolMessage):
has_blob_key_ = 0
blob_key_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def blob_key(self): return self.blob_key_
def set_blob_key(self, x):
self.has_blob_key_ = 1
self.blob_key_ = x
def clear_blob_key(self):
if self.has_blob_key_:
self.has_blob_key_ = 0
self.blob_key_ = ""
def has_blob_key(self): return self.has_blob_key_
def MergeFrom(self, x):
assert x is not self
if (x.has_blob_key()): self.set_blob_key(x.blob_key())
def Equals(self, x):
if x is self: return 1
if self.has_blob_key_ != x.has_blob_key_: return 0
if self.has_blob_key_ and self.blob_key_ != x.blob_key_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_blob_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: blob_key not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.blob_key_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_blob_key_):
n += 1
n += self.lengthString(len(self.blob_key_))
return n
def Clear(self):
self.clear_blob_key()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_)
def OutputPartial(self, out):
if (self.has_blob_key_):
out.putVarInt32(10)
out.putPrefixedString(self.blob_key_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_blob_key(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_blob_key_: res+=prefix+("blob_key: %s\n" % self.DebugFormatString(self.blob_key_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kblob_key = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "blob_key",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.CreateEncodedGoogleStorageKeyResponse'
if _extension_runtime:
pass
__all__ = ['BlobstoreServiceError','CreateUploadURLRequest','CreateUploadURLResponse','DeleteBlobRequest','FetchDataRequest','FetchDataResponse','CloneBlobRequest','CloneBlobResponse','DecodeBlobKeyRequest','DecodeBlobKeyResponse','CreateEncodedGoogleStorageKeyRequest','CreateEncodedGoogleStorageKeyResponse']
|
PypiClean
|
/bpy36-1.0.0-py3-none-any.whl/bpy2/2.79/scripts/addons/mesh_bsurfaces.py
|
bl_info = {
"name": "Bsurfaces GPL Edition",
"author": "Eclectiel",
"version": (1, 5, 1),
"blender": (2, 76, 0),
"location": "View3D > EditMode > ToolShelf",
"description": "Modeling and retopology tool",
"wiki_url": "https://wiki.blender.org/index.php/Dev:Ref/Release_Notes/2.64/Bsurfaces_1.5",
"category": "Mesh",
}
import bpy
import bmesh
import operator
from mathutils import Vector
from mathutils.geometry import (
intersect_line_line,
intersect_point_line,
)
from math import (
degrees,
pi,
sqrt,
)
from bpy.props import (
BoolProperty,
FloatProperty,
IntProperty,
StringProperty,
PointerProperty,
)
from bpy.types import (
Operator,
Panel,
PropertyGroup,
AddonPreferences,
)
class VIEW3D_PT_tools_SURFSK_mesh(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = 'Tools'
bl_context = "mesh_edit"
bl_label = "Bsurfaces"
@classmethod
def poll(cls, context):
return context.active_object
def draw(self, context):
layout = self.layout
scn = context.scene.bsurfaces
col = layout.column(align=True)
row = layout.row()
row.separator()
col.operator("gpencil.surfsk_add_surface", text="Add Surface")
col.operator("gpencil.surfsk_edit_strokes", text="Edit Strokes")
col.prop(scn, "SURFSK_cyclic_cross")
col.prop(scn, "SURFSK_cyclic_follow")
col.prop(scn, "SURFSK_loops_on_strokes")
col.prop(scn, "SURFSK_automatic_join")
col.prop(scn, "SURFSK_keep_strokes")
class VIEW3D_PT_tools_SURFSK_curve(Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_context = "curve_edit"
bl_category = 'Tools'
bl_label = "Bsurfaces"
@classmethod
def poll(cls, context):
return context.active_object
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
row = layout.row()
row.separator()
col.operator("curve.surfsk_first_points", text="Set First Points")
col.operator("curve.switch_direction", text="Switch Direction")
col.operator("curve.surfsk_reorder_splines", text="Reorder Splines")
# Returns the type of strokes used
def get_strokes_type(main_object):
strokes_type = ""
strokes_num = 0
# Check if they are grease pencil
try:
# Get the active grease pencil layer
strokes_num = len(main_object.grease_pencil.layers.active.active_frame.strokes)
if strokes_num > 0:
strokes_type = "GP_STROKES"
except:
pass
# Check if they are curves, if there aren't grease pencil strokes
if strokes_type == "":
if len(bpy.context.selected_objects) == 2:
for ob in bpy.context.selected_objects:
if ob != bpy.context.scene.objects.active and ob.type == "CURVE":
strokes_type = "EXTERNAL_CURVE"
strokes_num = len(ob.data.splines)
# Check if there is any non-bezier spline
for i in range(len(ob.data.splines)):
if ob.data.splines[i].type != "BEZIER":
strokes_type = "CURVE_WITH_NON_BEZIER_SPLINES"
break
elif ob != bpy.context.scene.objects.active and ob.type != "CURVE":
strokes_type = "EXTERNAL_NO_CURVE"
elif len(bpy.context.selected_objects) > 2:
strokes_type = "MORE_THAN_ONE_EXTERNAL"
# Check if there is a single stroke without any selection in the object
if strokes_num == 1 and main_object.data.total_vert_sel == 0:
if strokes_type == "EXTERNAL_CURVE":
strokes_type = "SINGLE_CURVE_STROKE_NO_SELECTION"
elif strokes_type == "GP_STROKES":
strokes_type = "SINGLE_GP_STROKE_NO_SELECTION"
if strokes_num == 0 and main_object.data.total_vert_sel > 0:
strokes_type = "SELECTION_ALONE"
if strokes_type == "":
strokes_type = "NO_STROKES"
return strokes_type
# Surface generator operator
class GPENCIL_OT_SURFSK_add_surface(Operator):
bl_idname = "gpencil.surfsk_add_surface"
bl_label = "Bsurfaces add surface"
bl_description = "Generates surfaces from grease pencil strokes, bezier curves or loose edges"
bl_options = {'REGISTER', 'UNDO'}
edges_U = IntProperty(
name="Cross",
description="Number of face-loops crossing the strokes",
default=1,
min=1,
max=200
)
edges_V = IntProperty(
name="Follow",
description="Number of face-loops following the strokes",
default=1,
min=1,
max=200
)
cyclic_cross = BoolProperty(
name="Cyclic Cross",
description="Make cyclic the face-loops crossing the strokes",
default=False
)
cyclic_follow = BoolProperty(
name="Cyclic Follow",
description="Make cyclic the face-loops following the strokes",
default=False
)
loops_on_strokes = BoolProperty(
name="Loops on strokes",
description="Make the loops match the paths of the strokes",
default=False
)
automatic_join = BoolProperty(
name="Automatic join",
description="Join automatically vertices of either surfaces generated "
"by crosshatching, or from the borders of closed shapes",
default=False
)
join_stretch_factor = FloatProperty(
name="Stretch",
description="Amount of stretching or shrinking allowed for "
"edges when joining vertices automatically",
default=1,
min=0,
max=3,
subtype='FACTOR'
)
def draw(self, context):
layout = self.layout
col = layout.column(align=True)
row = layout.row()
if not self.is_fill_faces:
row.separator()
if not self.is_crosshatch:
if not self.selection_U_exists:
col.prop(self, "edges_U")
row.separator()
if not self.selection_V_exists:
col.prop(self, "edges_V")
row.separator()
row.separator()
if not self.selection_U_exists:
if not (
(self.selection_V_exists and not self.selection_V_is_closed) or
(self.selection_V2_exists and not self.selection_V2_is_closed)
):
col.prop(self, "cyclic_cross")
if not self.selection_V_exists:
if not (
(self.selection_U_exists and not self.selection_U_is_closed) or
(self.selection_U2_exists and not self.selection_U2_is_closed)
):
col.prop(self, "cyclic_follow")
col.prop(self, "loops_on_strokes")
col.prop(self, "automatic_join")
if self.automatic_join:
row.separator()
col.separator()
row.separator()
col.prop(self, "join_stretch_factor")
# Get an ordered list of a chain of vertices
def get_ordered_verts(self, ob, all_selected_edges_idx, all_selected_verts_idx,
first_vert_idx, middle_vertex_idx, closing_vert_idx):
# Order selected vertices.
verts_ordered = []
if closing_vert_idx is not None:
verts_ordered.append(ob.data.vertices[closing_vert_idx])
verts_ordered.append(ob.data.vertices[first_vert_idx])
prev_v = first_vert_idx
prev_ed = None
finish_while = False
while True:
edges_non_matched = 0
for i in all_selected_edges_idx:
if ob.data.edges[i] != prev_ed and ob.data.edges[i].vertices[0] == prev_v and \
ob.data.edges[i].vertices[1] in all_selected_verts_idx:
verts_ordered.append(ob.data.vertices[ob.data.edges[i].vertices[1]])
prev_v = ob.data.edges[i].vertices[1]
prev_ed = ob.data.edges[i]
elif ob.data.edges[i] != prev_ed and ob.data.edges[i].vertices[1] == prev_v and \
ob.data.edges[i].vertices[0] in all_selected_verts_idx:
verts_ordered.append(ob.data.vertices[ob.data.edges[i].vertices[0]])
prev_v = ob.data.edges[i].vertices[0]
prev_ed = ob.data.edges[i]
else:
edges_non_matched += 1
if edges_non_matched == len(all_selected_edges_idx):
finish_while = True
if finish_while:
break
if closing_vert_idx is not None:
verts_ordered.append(ob.data.vertices[closing_vert_idx])
if middle_vertex_idx is not None:
verts_ordered.append(ob.data.vertices[middle_vertex_idx])
verts_ordered.reverse()
return tuple(verts_ordered)
# Calculates length of a chain of points.
def get_chain_length(self, object, verts_ordered):
matrix = object.matrix_world
edges_lengths = []
edges_lengths_sum = 0
for i in range(0, len(verts_ordered)):
if i == 0:
prev_v_co = matrix * verts_ordered[i].co
else:
v_co = matrix * verts_ordered[i].co
v_difs = [prev_v_co[0] - v_co[0], prev_v_co[1] - v_co[1], prev_v_co[2] - v_co[2]]
edge_length = abs(sqrt(v_difs[0] * v_difs[0] + v_difs[1] * v_difs[1] + v_difs[2] * v_difs[2]))
edges_lengths.append(edge_length)
edges_lengths_sum += edge_length
prev_v_co = v_co
return edges_lengths, edges_lengths_sum
# Calculates the proportion of the edges of a chain of edges, relative to the full chain length.
def get_edges_proportions(self, edges_lengths, edges_lengths_sum, use_boundaries, fixed_edges_num):
edges_proportions = []
if use_boundaries:
verts_count = 1
for l in edges_lengths:
edges_proportions.append(l / edges_lengths_sum)
verts_count += 1
else:
verts_count = 1
for n in range(0, fixed_edges_num):
edges_proportions.append(1 / fixed_edges_num)
verts_count += 1
return edges_proportions
# Calculates the angle between two pairs of points in space
def orientation_difference(self, points_A_co, points_B_co):
# each parameter should be a list with two elements,
# and each element should be a x,y,z coordinate
vec_A = points_A_co[0] - points_A_co[1]
vec_B = points_B_co[0] - points_B_co[1]
angle = vec_A.angle(vec_B)
if angle > 0.5 * pi:
angle = abs(angle - pi)
return angle
# Calculate the which vert of verts_idx list is the nearest one
# to the point_co coordinates, and the distance
def shortest_distance(self, object, point_co, verts_idx):
matrix = object.matrix_world
for i in range(0, len(verts_idx)):
dist = (point_co - matrix * object.data.vertices[verts_idx[i]].co).length
if i == 0:
prev_dist = dist
nearest_vert_idx = verts_idx[i]
shortest_dist = dist
if dist < prev_dist:
prev_dist = dist
nearest_vert_idx = verts_idx[i]
shortest_dist = dist
return nearest_vert_idx, shortest_dist
# Returns the index of the opposite vert tip in a chain, given a vert tip index
# as parameter, and a multidimentional list with all pairs of tips
def opposite_tip(self, vert_tip_idx, all_chains_tips_idx):
opposite_vert_tip_idx = None
for i in range(0, len(all_chains_tips_idx)):
if vert_tip_idx == all_chains_tips_idx[i][0]:
opposite_vert_tip_idx = all_chains_tips_idx[i][1]
if vert_tip_idx == all_chains_tips_idx[i][1]:
opposite_vert_tip_idx = all_chains_tips_idx[i][0]
return opposite_vert_tip_idx
# Simplifies a spline and returns the new points coordinates
def simplify_spline(self, spline_coords, segments_num):
simplified_spline = []
points_between_segments = round(len(spline_coords) / segments_num)
simplified_spline.append(spline_coords[0])
for i in range(1, segments_num):
simplified_spline.append(spline_coords[i * points_between_segments])
simplified_spline.append(spline_coords[len(spline_coords) - 1])
return simplified_spline
# Cleans up the scene and gets it the same it was at the beginning,
# in case the script is interrupted in the middle of the execution
def cleanup_on_interruption(self):
# If the original strokes curve comes from conversion
# from grease pencil and wasn't made by hand, delete it
if not self.using_external_curves:
try:
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.original_curve.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.original_curve.name]
bpy.ops.object.delete()
except:
pass
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_object.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
else:
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.original_curve.name].select = True
bpy.data.objects[self.main_object.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Returns a list with the coords of the points distributed over the splines
# passed to this method according to the proportions parameter
def distribute_pts(self, surface_splines, proportions):
# Calculate the length of each final surface spline
surface_splines_lengths = []
surface_splines_parsed = []
for sp_idx in range(0, len(surface_splines)):
# Calculate spline length
surface_splines_lengths.append(0)
for i in range(0, len(surface_splines[sp_idx].bezier_points)):
if i == 0:
prev_p = surface_splines[sp_idx].bezier_points[i]
else:
p = surface_splines[sp_idx].bezier_points[i]
edge_length = (prev_p.co - p.co).length
surface_splines_lengths[sp_idx] += edge_length
prev_p = p
# Calculate vertex positions with appropriate edge proportions, and ordered, for each spline
for sp_idx in range(0, len(surface_splines)):
surface_splines_parsed.append([])
surface_splines_parsed[sp_idx].append(surface_splines[sp_idx].bezier_points[0].co)
prev_p_co = surface_splines[sp_idx].bezier_points[0].co
p_idx = 0
for prop_idx in range(len(proportions) - 1):
target_length = surface_splines_lengths[sp_idx] * proportions[prop_idx]
partial_segment_length = 0
finish_while = False
while True:
# if not it'll pass the p_idx as an index bellow and crash
if p_idx < len(surface_splines[sp_idx].bezier_points):
p_co = surface_splines[sp_idx].bezier_points[p_idx].co
new_dist = (prev_p_co - p_co).length
# The new distance that could have the partial segment if
# it is still shorter than the target length
potential_segment_length = partial_segment_length + new_dist
# If the potential is still shorter, keep adding
if potential_segment_length < target_length:
partial_segment_length = potential_segment_length
p_idx += 1
prev_p_co = p_co
# If the potential is longer than the target, calculate the target
# (a point between the last two points), and assign
elif potential_segment_length > target_length:
remaining_dist = target_length - partial_segment_length
vec = p_co - prev_p_co
vec.normalize()
intermediate_co = prev_p_co + (vec * remaining_dist)
surface_splines_parsed[sp_idx].append(intermediate_co)
partial_segment_length += remaining_dist
prev_p_co = intermediate_co
finish_while = True
# If the potential is equal to the target, assign
elif potential_segment_length == target_length:
surface_splines_parsed[sp_idx].append(p_co)
prev_p_co = p_co
finish_while = True
if finish_while:
break
# last point of the spline
surface_splines_parsed[sp_idx].append(
surface_splines[sp_idx].bezier_points[len(surface_splines[sp_idx].bezier_points) - 1].co
)
return surface_splines_parsed
# Counts the number of faces that belong to each edge
def edge_face_count(self, ob):
ed_keys_count_dict = {}
for face in ob.data.polygons:
for ed_keys in face.edge_keys:
if ed_keys not in ed_keys_count_dict:
ed_keys_count_dict[ed_keys] = 1
else:
ed_keys_count_dict[ed_keys] += 1
edge_face_count = []
for i in range(len(ob.data.edges)):
edge_face_count.append(0)
for i in range(len(ob.data.edges)):
ed = ob.data.edges[i]
v1 = ed.vertices[0]
v2 = ed.vertices[1]
if (v1, v2) in ed_keys_count_dict:
edge_face_count[i] = ed_keys_count_dict[(v1, v2)]
elif (v2, v1) in ed_keys_count_dict:
edge_face_count[i] = ed_keys_count_dict[(v2, v1)]
return edge_face_count
# Fills with faces all the selected vertices which form empty triangles or quads
def fill_with_faces(self, object):
all_selected_verts_count = self.main_object_selected_verts_count
bpy.ops.object.mode_set('INVOKE_REGION_WIN', mode='OBJECT')
# Calculate average length of selected edges
all_selected_verts = []
original_sel_edges_count = 0
for ed in object.data.edges:
if object.data.vertices[ed.vertices[0]].select and object.data.vertices[ed.vertices[1]].select:
coords = []
coords.append(object.data.vertices[ed.vertices[0]].co)
coords.append(object.data.vertices[ed.vertices[1]].co)
original_sel_edges_count += 1
if not ed.vertices[0] in all_selected_verts:
all_selected_verts.append(ed.vertices[0])
if not ed.vertices[1] in all_selected_verts:
all_selected_verts.append(ed.vertices[1])
tuple(all_selected_verts)
# Check if there is any edge selected. If not, interrupt the script
if original_sel_edges_count == 0 and all_selected_verts_count > 0:
return 0
# Get all edges connected to selected verts
all_edges_around_sel_verts = []
edges_connected_to_sel_verts = {}
verts_connected_to_every_vert = {}
for ed_idx in range(len(object.data.edges)):
ed = object.data.edges[ed_idx]
include_edge = False
if ed.vertices[0] in all_selected_verts:
if not ed.vertices[0] in edges_connected_to_sel_verts:
edges_connected_to_sel_verts[ed.vertices[0]] = []
edges_connected_to_sel_verts[ed.vertices[0]].append(ed_idx)
include_edge = True
if ed.vertices[1] in all_selected_verts:
if not ed.vertices[1] in edges_connected_to_sel_verts:
edges_connected_to_sel_verts[ed.vertices[1]] = []
edges_connected_to_sel_verts[ed.vertices[1]].append(ed_idx)
include_edge = True
if include_edge is True:
all_edges_around_sel_verts.append(ed_idx)
# Get all connected verts to each vert
if not ed.vertices[0] in verts_connected_to_every_vert:
verts_connected_to_every_vert[ed.vertices[0]] = []
if not ed.vertices[1] in verts_connected_to_every_vert:
verts_connected_to_every_vert[ed.vertices[1]] = []
verts_connected_to_every_vert[ed.vertices[0]].append(ed.vertices[1])
verts_connected_to_every_vert[ed.vertices[1]].append(ed.vertices[0])
# Get all verts connected to faces
all_verts_part_of_faces = []
all_edges_faces_count = []
all_edges_faces_count += self.edge_face_count(object)
# Get only the selected edges that have faces attached.
count_faces_of_edges_around_sel_verts = {}
selected_verts_with_faces = []
for ed_idx in all_edges_around_sel_verts:
count_faces_of_edges_around_sel_verts[ed_idx] = all_edges_faces_count[ed_idx]
if all_edges_faces_count[ed_idx] > 0:
ed = object.data.edges[ed_idx]
if not ed.vertices[0] in selected_verts_with_faces:
selected_verts_with_faces.append(ed.vertices[0])
if not ed.vertices[1] in selected_verts_with_faces:
selected_verts_with_faces.append(ed.vertices[1])
all_verts_part_of_faces.append(ed.vertices[0])
all_verts_part_of_faces.append(ed.vertices[1])
tuple(selected_verts_with_faces)
# Discard unneeded verts from calculations
participating_verts = []
movable_verts = []
for v_idx in all_selected_verts:
vert_has_edges_with_one_face = False
# Check if the actual vert has at least one edge connected to only one face
for ed_idx in edges_connected_to_sel_verts[v_idx]:
if count_faces_of_edges_around_sel_verts[ed_idx] == 1:
vert_has_edges_with_one_face = True
# If the vert has two or less edges connected and the vert is not part of any face.
# Or the vert is part of any face and at least one of
# the connected edges has only one face attached to it.
if (len(edges_connected_to_sel_verts[v_idx]) == 2 and
v_idx not in all_verts_part_of_faces) or \
len(edges_connected_to_sel_verts[v_idx]) == 1 or \
(v_idx in all_verts_part_of_faces and
vert_has_edges_with_one_face):
participating_verts.append(v_idx)
if v_idx not in all_verts_part_of_faces:
movable_verts.append(v_idx)
# Remove from movable verts list those that are part of closed geometry (ie: triangles, quads)
for mv_idx in movable_verts:
freeze_vert = False
mv_connected_verts = verts_connected_to_every_vert[mv_idx]
for actual_v_idx in all_selected_verts:
count_shared_neighbors = 0
checked_verts = []
for mv_conn_v_idx in mv_connected_verts:
if mv_idx != actual_v_idx:
if mv_conn_v_idx in verts_connected_to_every_vert[actual_v_idx] and \
mv_conn_v_idx not in checked_verts:
count_shared_neighbors += 1
checked_verts.append(mv_conn_v_idx)
if actual_v_idx in mv_connected_verts:
freeze_vert = True
break
if count_shared_neighbors == 2:
freeze_vert = True
break
if freeze_vert:
break
if freeze_vert:
movable_verts.remove(mv_idx)
# Calculate merge distance for participating verts
shortest_edge_length = None
for ed in object.data.edges:
if ed.vertices[0] in movable_verts and ed.vertices[1] in movable_verts:
v1 = object.data.vertices[ed.vertices[0]]
v2 = object.data.vertices[ed.vertices[1]]
length = (v1.co - v2.co).length
if shortest_edge_length is None:
shortest_edge_length = length
else:
if length < shortest_edge_length:
shortest_edge_length = length
if shortest_edge_length is not None:
edges_merge_distance = shortest_edge_length * 0.5
else:
edges_merge_distance = 0
# Get together the verts near enough. They will be merged later
remaining_verts = []
remaining_verts += participating_verts
for v1_idx in participating_verts:
if v1_idx in remaining_verts and v1_idx in movable_verts:
verts_to_merge = []
coords_verts_to_merge = {}
verts_to_merge.append(v1_idx)
v1_co = object.data.vertices[v1_idx].co
coords_verts_to_merge[v1_idx] = (v1_co[0], v1_co[1], v1_co[2])
for v2_idx in remaining_verts:
if v1_idx != v2_idx:
v2_co = object.data.vertices[v2_idx].co
dist = (v1_co - v2_co).length
if dist <= edges_merge_distance: # Add the verts which are near enough
verts_to_merge.append(v2_idx)
coords_verts_to_merge[v2_idx] = (v2_co[0], v2_co[1], v2_co[2])
for vm_idx in verts_to_merge:
remaining_verts.remove(vm_idx)
if len(verts_to_merge) > 1:
# Calculate middle point of the verts to merge.
sum_x_co = 0
sum_y_co = 0
sum_z_co = 0
movable_verts_to_merge_count = 0
for i in range(len(verts_to_merge)):
if verts_to_merge[i] in movable_verts:
v_co = object.data.vertices[verts_to_merge[i]].co
sum_x_co += v_co[0]
sum_y_co += v_co[1]
sum_z_co += v_co[2]
movable_verts_to_merge_count += 1
middle_point_co = [
sum_x_co / movable_verts_to_merge_count,
sum_y_co / movable_verts_to_merge_count,
sum_z_co / movable_verts_to_merge_count
]
# Check if any vert to be merged is not movable
shortest_dist = None
are_verts_not_movable = False
verts_not_movable = []
for v_merge_idx in verts_to_merge:
if v_merge_idx in participating_verts and v_merge_idx not in movable_verts:
are_verts_not_movable = True
verts_not_movable.append(v_merge_idx)
if are_verts_not_movable:
# Get the vert connected to faces, that is nearest to
# the middle point of the movable verts
shortest_dist = None
for vcf_idx in verts_not_movable:
dist = abs((object.data.vertices[vcf_idx].co -
Vector(middle_point_co)).length)
if shortest_dist is None:
shortest_dist = dist
nearest_vert_idx = vcf_idx
else:
if dist < shortest_dist:
shortest_dist = dist
nearest_vert_idx = vcf_idx
coords = object.data.vertices[nearest_vert_idx].co
target_point_co = [coords[0], coords[1], coords[2]]
else:
target_point_co = middle_point_co
# Move verts to merge to the middle position
for v_merge_idx in verts_to_merge:
if v_merge_idx in movable_verts: # Only move the verts that are not part of faces
object.data.vertices[v_merge_idx].co[0] = target_point_co[0]
object.data.vertices[v_merge_idx].co[1] = target_point_co[1]
object.data.vertices[v_merge_idx].co[2] = target_point_co[2]
# Perform "Remove Doubles" to weld all the disconnected verts
bpy.ops.object.mode_set('INVOKE_REGION_WIN', mode='EDIT')
bpy.ops.mesh.remove_doubles(threshold=0.0001)
bpy.ops.object.mode_set('INVOKE_REGION_WIN', mode='OBJECT')
# Get all the definitive selected edges, after weldding
selected_edges = []
edges_per_vert = {} # Number of faces of each selected edge
for ed in object.data.edges:
if object.data.vertices[ed.vertices[0]].select and object.data.vertices[ed.vertices[1]].select:
selected_edges.append(ed.index)
# Save all the edges that belong to each vertex.
if not ed.vertices[0] in edges_per_vert:
edges_per_vert[ed.vertices[0]] = []
if not ed.vertices[1] in edges_per_vert:
edges_per_vert[ed.vertices[1]] = []
edges_per_vert[ed.vertices[0]].append(ed.index)
edges_per_vert[ed.vertices[1]].append(ed.index)
# Check if all the edges connected to each vert have two faces attached to them.
# To discard them later and make calculations faster
a = []
a += self.edge_face_count(object)
tuple(a)
verts_surrounded_by_faces = {}
for v_idx in edges_per_vert:
edges = edges_per_vert[v_idx]
edges_with_two_faces_count = 0
for ed_idx in edges_per_vert[v_idx]:
if a[ed_idx] == 2:
edges_with_two_faces_count += 1
if edges_with_two_faces_count == len(edges_per_vert[v_idx]):
verts_surrounded_by_faces[v_idx] = True
else:
verts_surrounded_by_faces[v_idx] = False
# Get all the selected vertices
selected_verts_idx = []
for v in object.data.vertices:
if v.select:
selected_verts_idx.append(v.index)
# Get all the faces of the object
all_object_faces_verts_idx = []
for face in object.data.polygons:
face_verts = []
face_verts.append(face.vertices[0])
face_verts.append(face.vertices[1])
face_verts.append(face.vertices[2])
if len(face.vertices) == 4:
face_verts.append(face.vertices[3])
all_object_faces_verts_idx.append(face_verts)
# Deselect all vertices
bpy.ops.object.mode_set('INVOKE_REGION_WIN', mode='EDIT')
bpy.ops.mesh.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.mode_set('INVOKE_REGION_WIN', mode='OBJECT')
# Make a dictionary with the verts related to each vert
related_key_verts = {}
for ed_idx in selected_edges:
ed = object.data.edges[ed_idx]
if not verts_surrounded_by_faces[ed.vertices[0]]:
if not ed.vertices[0] in related_key_verts:
related_key_verts[ed.vertices[0]] = []
if not ed.vertices[1] in related_key_verts[ed.vertices[0]]:
related_key_verts[ed.vertices[0]].append(ed.vertices[1])
if not verts_surrounded_by_faces[ed.vertices[1]]:
if not ed.vertices[1] in related_key_verts:
related_key_verts[ed.vertices[1]] = []
if not ed.vertices[0] in related_key_verts[ed.vertices[1]]:
related_key_verts[ed.vertices[1]].append(ed.vertices[0])
# Get groups of verts forming each face
faces_verts_idx = []
for v1 in related_key_verts: # verts-1 ....
for v2 in related_key_verts: # verts-2
if v1 != v2:
related_verts_in_common = []
v2_in_rel_v1 = False
v1_in_rel_v2 = False
for rel_v1 in related_key_verts[v1]:
# Check if related verts of verts-1 are related verts of verts-2
if rel_v1 in related_key_verts[v2]:
related_verts_in_common.append(rel_v1)
if v2 in related_key_verts[v1]:
v2_in_rel_v1 = True
if v1 in related_key_verts[v2]:
v1_in_rel_v2 = True
repeated_face = False
# If two verts have two related verts in common, they form a quad
if len(related_verts_in_common) == 2:
# Check if the face is already saved
all_faces_to_check_idx = faces_verts_idx + all_object_faces_verts_idx
for f_verts in all_faces_to_check_idx:
repeated_verts = 0
if len(f_verts) == 4:
if v1 in f_verts:
repeated_verts += 1
if v2 in f_verts:
repeated_verts += 1
if related_verts_in_common[0] in f_verts:
repeated_verts += 1
if related_verts_in_common[1] in f_verts:
repeated_verts += 1
if repeated_verts == len(f_verts):
repeated_face = True
break
if not repeated_face:
faces_verts_idx.append(
[v1, related_verts_in_common[0], v2, related_verts_in_common[1]]
)
# If Two verts have one related vert in common and
# they are related to each other, they form a triangle
elif v2_in_rel_v1 and v1_in_rel_v2 and len(related_verts_in_common) == 1:
# Check if the face is already saved.
all_faces_to_check_idx = faces_verts_idx + all_object_faces_verts_idx
for f_verts in all_faces_to_check_idx:
repeated_verts = 0
if len(f_verts) == 3:
if v1 in f_verts:
repeated_verts += 1
if v2 in f_verts:
repeated_verts += 1
if related_verts_in_common[0] in f_verts:
repeated_verts += 1
if repeated_verts == len(f_verts):
repeated_face = True
break
if not repeated_face:
faces_verts_idx.append([v1, related_verts_in_common[0], v2])
# Keep only the faces that don't overlap by ignoring quads
# that overlap with two adjacent triangles
faces_to_not_include_idx = [] # Indices of faces_verts_idx to eliminate
all_faces_to_check_idx = faces_verts_idx + all_object_faces_verts_idx
for i in range(len(faces_verts_idx)):
for t in range(len(all_faces_to_check_idx)):
if i != t:
verts_in_common = 0
if len(faces_verts_idx[i]) == 4 and len(all_faces_to_check_idx[t]) == 3:
for v_idx in all_faces_to_check_idx[t]:
if v_idx in faces_verts_idx[i]:
verts_in_common += 1
# If it doesn't have all it's vertices repeated in the other face
if verts_in_common == 3:
if i not in faces_to_not_include_idx:
faces_to_not_include_idx.append(i)
# Build faces discarding the ones in faces_to_not_include
me = object.data
bm = bmesh.new()
bm.from_mesh(me)
num_faces_created = 0
for i in range(len(faces_verts_idx)):
if i not in faces_to_not_include_idx:
bm.faces.new([bm.verts[v] for v in faces_verts_idx[i]])
num_faces_created += 1
bm.to_mesh(me)
bm.free()
for v_idx in selected_verts_idx:
self.main_object.data.vertices[v_idx].select = True
bpy.ops.object.mode_set('INVOKE_REGION_WIN', mode='EDIT')
bpy.ops.mesh.normals_make_consistent(inside=False)
bpy.ops.object.mode_set('INVOKE_REGION_WIN', mode='OBJECT')
return num_faces_created
# Crosshatch skinning
def crosshatch_surface_invoke(self, ob_original_splines):
self.is_crosshatch = False
self.crosshatch_merge_distance = 0
objects_to_delete = [] # duplicated strokes to be deleted.
# If the main object uses modifiers deactivate them temporarily until the surface is joined
# (without this the surface verts merging with the main object doesn't work well)
self.modifiers_prev_viewport_state = []
if len(self.main_object.modifiers) > 0:
for m_idx in range(len(self.main_object.modifiers)):
self.modifiers_prev_viewport_state.append(
self.main_object.modifiers[m_idx].show_viewport
)
self.main_object.modifiers[m_idx].show_viewport = False
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_original_splines.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[ob_original_splines.name]
if len(ob_original_splines.data.splines) >= 2:
bpy.ops.object.duplicate('INVOKE_REGION_WIN')
ob_splines = bpy.context.object
ob_splines.name = "SURFSKIO_NE_STR"
# Get estimative merge distance (sum up the distances from the first point to
# all other points, then average them and then divide them)
first_point_dist_sum = 0
first_dist = 0
second_dist = 0
coords_first_pt = ob_splines.data.splines[0].bezier_points[0].co
for i in range(len(ob_splines.data.splines)):
sp = ob_splines.data.splines[i]
if coords_first_pt != sp.bezier_points[0].co:
first_dist = (coords_first_pt - sp.bezier_points[0].co).length
if coords_first_pt != sp.bezier_points[len(sp.bezier_points) - 1].co:
second_dist = (coords_first_pt - sp.bezier_points[len(sp.bezier_points) - 1].co).length
first_point_dist_sum += first_dist + second_dist
if i == 0:
if first_dist != 0:
shortest_dist = first_dist
elif second_dist != 0:
shortest_dist = second_dist
if shortest_dist > first_dist and first_dist != 0:
shortest_dist = first_dist
if shortest_dist > second_dist and second_dist != 0:
shortest_dist = second_dist
self.crosshatch_merge_distance = shortest_dist / 20
# Recalculation of merge distance
bpy.ops.object.duplicate('INVOKE_REGION_WIN')
ob_calc_merge_dist = bpy.context.object
ob_calc_merge_dist.name = "SURFSKIO_CALC_TMP"
objects_to_delete.append(ob_calc_merge_dist)
# Smooth out strokes a little to improve crosshatch detection
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='SELECT')
for i in range(4):
bpy.ops.curve.smooth('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Convert curves into mesh
ob_calc_merge_dist.data.resolution_u = 12
bpy.ops.object.convert(target='MESH', keep_original=False)
# Find "intersection-nodes"
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.mesh.select_all('INVOKE_REGION_WIN', action='SELECT')
bpy.ops.mesh.remove_doubles('INVOKE_REGION_WIN',
threshold=self.crosshatch_merge_distance)
bpy.ops.mesh.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Remove verts with less than three edges
verts_edges_count = {}
for ed in ob_calc_merge_dist.data.edges:
v = ed.vertices
if v[0] not in verts_edges_count:
verts_edges_count[v[0]] = 0
if v[1] not in verts_edges_count:
verts_edges_count[v[1]] = 0
verts_edges_count[v[0]] += 1
verts_edges_count[v[1]] += 1
nodes_verts_coords = []
for v_idx in verts_edges_count:
v = ob_calc_merge_dist.data.vertices[v_idx]
if verts_edges_count[v_idx] < 3:
v.select = True
# Remove them
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.mesh.delete('INVOKE_REGION_WIN', type='VERT')
bpy.ops.mesh.select_all('INVOKE_REGION_WIN', action='SELECT')
# Remove doubles to discard very near verts from calculations of distance
bpy.ops.mesh.remove_doubles(
'INVOKE_REGION_WIN',
threshold=self.crosshatch_merge_distance * 4.0
)
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Get all coords of the resulting nodes
nodes_verts_coords = [(v.co[0], v.co[1], v.co[2]) for
v in ob_calc_merge_dist.data.vertices]
# Check if the strokes are a crosshatch
if len(nodes_verts_coords) >= 3:
self.is_crosshatch = True
shortest_dist = None
for co_1 in nodes_verts_coords:
for co_2 in nodes_verts_coords:
if co_1 != co_2:
dist = (Vector(co_1) - Vector(co_2)).length
if shortest_dist is not None:
if dist < shortest_dist:
shortest_dist = dist
else:
shortest_dist = dist
self.crosshatch_merge_distance = shortest_dist / 3
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_splines.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[ob_splines.name]
# Deselect all points
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Smooth splines in a localized way, to eliminate "saw-teeth"
# like shapes when there are many points
for sp in ob_splines.data.splines:
angle_sum = 0
angle_limit = 2 # Degrees
for t in range(len(sp.bezier_points)):
# Because on each iteration it checks the "next two points"
# of the actual. This way it doesn't go out of range
if t <= len(sp.bezier_points) - 3:
p1 = sp.bezier_points[t]
p2 = sp.bezier_points[t + 1]
p3 = sp.bezier_points[t + 2]
vec_1 = p1.co - p2.co
vec_2 = p2.co - p3.co
if p2.co != p1.co and p2.co != p3.co:
angle = vec_1.angle(vec_2)
angle_sum += degrees(angle)
if angle_sum >= angle_limit: # If sum of angles is grater than the limit
if (p1.co - p2.co).length <= self.crosshatch_merge_distance:
p1.select_control_point = True
p1.select_left_handle = True
p1.select_right_handle = True
p2.select_control_point = True
p2.select_left_handle = True
p2.select_right_handle = True
if (p1.co - p2.co).length <= self.crosshatch_merge_distance:
p3.select_control_point = True
p3.select_left_handle = True
p3.select_right_handle = True
angle_sum = 0
sp.bezier_points[0].select_control_point = False
sp.bezier_points[0].select_left_handle = False
sp.bezier_points[0].select_right_handle = False
sp.bezier_points[len(sp.bezier_points) - 1].select_control_point = False
sp.bezier_points[len(sp.bezier_points) - 1].select_left_handle = False
sp.bezier_points[len(sp.bezier_points) - 1].select_right_handle = False
# Smooth out strokes a little to improve crosshatch detection
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
for i in range(15):
bpy.ops.curve.smooth('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Simplify the splines
for sp in ob_splines.data.splines:
angle_sum = 0
sp.bezier_points[0].select_control_point = True
sp.bezier_points[0].select_left_handle = True
sp.bezier_points[0].select_right_handle = True
sp.bezier_points[len(sp.bezier_points) - 1].select_control_point = True
sp.bezier_points[len(sp.bezier_points) - 1].select_left_handle = True
sp.bezier_points[len(sp.bezier_points) - 1].select_right_handle = True
angle_limit = 15 # Degrees
for t in range(len(sp.bezier_points)):
# Because on each iteration it checks the "next two points"
# of the actual. This way it doesn't go out of range
if t <= len(sp.bezier_points) - 3:
p1 = sp.bezier_points[t]
p2 = sp.bezier_points[t + 1]
p3 = sp.bezier_points[t + 2]
vec_1 = p1.co - p2.co
vec_2 = p2.co - p3.co
if p2.co != p1.co and p2.co != p3.co:
angle = vec_1.angle(vec_2)
angle_sum += degrees(angle)
# If sum of angles is grater than the limit
if angle_sum >= angle_limit:
p1.select_control_point = True
p1.select_left_handle = True
p1.select_right_handle = True
p2.select_control_point = True
p2.select_left_handle = True
p2.select_right_handle = True
p3.select_control_point = True
p3.select_left_handle = True
p3.select_right_handle = True
angle_sum = 0
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all(action='INVERT')
bpy.ops.curve.delete(type='VERT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
objects_to_delete.append(ob_splines)
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Check if the strokes are a crosshatch
if self.is_crosshatch:
all_points_coords = []
for i in range(len(ob_splines.data.splines)):
all_points_coords.append([])
all_points_coords[i] = [Vector((x, y, z)) for
x, y, z in [bp.co for
bp in ob_splines.data.splines[i].bezier_points]]
all_intersections = []
checked_splines = []
for i in range(len(all_points_coords)):
for t in range(len(all_points_coords[i]) - 1):
bp1_co = all_points_coords[i][t]
bp2_co = all_points_coords[i][t + 1]
for i2 in range(len(all_points_coords)):
if i != i2 and i2 not in checked_splines:
for t2 in range(len(all_points_coords[i2]) - 1):
bp3_co = all_points_coords[i2][t2]
bp4_co = all_points_coords[i2][t2 + 1]
intersec_coords = intersect_line_line(
bp1_co, bp2_co, bp3_co, bp4_co
)
if intersec_coords is not None:
dist = (intersec_coords[0] - intersec_coords[1]).length
if dist <= self.crosshatch_merge_distance * 1.5:
temp_co, percent1 = intersect_point_line(
intersec_coords[0], bp1_co, bp2_co
)
if (percent1 >= -0.02 and percent1 <= 1.02):
temp_co, percent2 = intersect_point_line(
intersec_coords[1], bp3_co, bp4_co
)
if (percent2 >= -0.02 and percent2 <= 1.02):
# Format: spline index, first point index from
# corresponding segment, percentage from first point of
# actual segment, coords of intersection point
all_intersections.append(
(i, t, percent1,
ob_splines.matrix_world * intersec_coords[0])
)
all_intersections.append(
(i2, t2, percent2,
ob_splines.matrix_world * intersec_coords[1])
)
checked_splines.append(i)
# Sort list by spline, then by corresponding first point index of segment,
# and then by percentage from first point of segment: elements 0 and 1 respectively
all_intersections.sort(key=operator.itemgetter(0, 1, 2))
self.crosshatch_strokes_coords = {}
for i in range(len(all_intersections)):
if not all_intersections[i][0] in self.crosshatch_strokes_coords:
self.crosshatch_strokes_coords[all_intersections[i][0]] = []
self.crosshatch_strokes_coords[all_intersections[i][0]].append(
all_intersections[i][3]
) # Save intersection coords
else:
self.is_crosshatch = False
# Delete all duplicates
for o in objects_to_delete:
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[o.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[o.name]
bpy.ops.object.delete()
# If the main object has modifiers, turn their "viewport view status" to
# what it was before the forced deactivation above
if len(self.main_object.modifiers) > 0:
for m_idx in range(len(self.main_object.modifiers)):
self.main_object.modifiers[m_idx].show_viewport = self.modifiers_prev_viewport_state[m_idx]
return
# Part of the Crosshatch process that is repeated when the operator is tweaked
def crosshatch_surface_execute(self):
# If the main object uses modifiers deactivate them temporarily until the surface is joined
# (without this the surface verts merging with the main object doesn't work well)
self.modifiers_prev_viewport_state = []
if len(self.main_object.modifiers) > 0:
for m_idx in range(len(self.main_object.modifiers)):
self.modifiers_prev_viewport_state.append(self.main_object.modifiers[m_idx].show_viewport)
self.main_object.modifiers[m_idx].show_viewport = False
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
me_name = "SURFSKIO_STK_TMP"
me = bpy.data.meshes.new(me_name)
all_verts_coords = []
all_edges = []
for st_idx in self.crosshatch_strokes_coords:
for co_idx in range(len(self.crosshatch_strokes_coords[st_idx])):
coords = self.crosshatch_strokes_coords[st_idx][co_idx]
all_verts_coords.append(coords)
if co_idx > 0:
all_edges.append((len(all_verts_coords) - 2, len(all_verts_coords) - 1))
me.from_pydata(all_verts_coords, all_edges, [])
me.update()
ob = bpy.data.objects.new(me_name, me)
ob.data = me
bpy.context.scene.objects.link(ob)
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[ob.name]
# Get together each vert and its nearest, to the middle position
verts = ob.data.vertices
checked_verts = []
for i in range(len(verts)):
shortest_dist = None
if i not in checked_verts:
for t in range(len(verts)):
if i != t and t not in checked_verts:
dist = (verts[i].co - verts[t].co).length
if shortest_dist is not None:
if dist < shortest_dist:
shortest_dist = dist
nearest_vert = t
else:
shortest_dist = dist
nearest_vert = t
middle_location = (verts[i].co + verts[nearest_vert].co) / 2
verts[i].co = middle_location
verts[nearest_vert].co = middle_location
checked_verts.append(i)
checked_verts.append(nearest_vert)
# Calculate average length between all the generated edges
ob = bpy.context.object
lengths_sum = 0
for ed in ob.data.edges:
v1 = ob.data.vertices[ed.vertices[0]]
v2 = ob.data.vertices[ed.vertices[1]]
lengths_sum += (v1.co - v2.co).length
edges_count = len(ob.data.edges)
# possible division by zero here
average_edge_length = lengths_sum / edges_count if edges_count != 0 else 0.0001
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.mesh.select_all('INVOKE_REGION_WIN', action='SELECT')
bpy.ops.mesh.remove_doubles('INVOKE_REGION_WIN',
threshold=average_edge_length / 15.0)
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
final_points_ob = bpy.context.scene.objects.active
# Make a dictionary with the verts related to each vert
related_key_verts = {}
for ed in final_points_ob.data.edges:
if not ed.vertices[0] in related_key_verts:
related_key_verts[ed.vertices[0]] = []
if not ed.vertices[1] in related_key_verts:
related_key_verts[ed.vertices[1]] = []
if not ed.vertices[1] in related_key_verts[ed.vertices[0]]:
related_key_verts[ed.vertices[0]].append(ed.vertices[1])
if not ed.vertices[0] in related_key_verts[ed.vertices[1]]:
related_key_verts[ed.vertices[1]].append(ed.vertices[0])
# Get groups of verts forming each face
faces_verts_idx = []
for v1 in related_key_verts: # verts-1 ....
for v2 in related_key_verts: # verts-2
if v1 != v2:
related_verts_in_common = []
v2_in_rel_v1 = False
v1_in_rel_v2 = False
for rel_v1 in related_key_verts[v1]:
# Check if related verts of verts-1 are related verts of verts-2
if rel_v1 in related_key_verts[v2]:
related_verts_in_common.append(rel_v1)
if v2 in related_key_verts[v1]:
v2_in_rel_v1 = True
if v1 in related_key_verts[v2]:
v1_in_rel_v2 = True
repeated_face = False
# If two verts have two related verts in common, they form a quad
if len(related_verts_in_common) == 2:
# Check if the face is already saved
for f_verts in faces_verts_idx:
repeated_verts = 0
if len(f_verts) == 4:
if v1 in f_verts:
repeated_verts += 1
if v2 in f_verts:
repeated_verts += 1
if related_verts_in_common[0] in f_verts:
repeated_verts += 1
if related_verts_in_common[1] in f_verts:
repeated_verts += 1
if repeated_verts == len(f_verts):
repeated_face = True
break
if not repeated_face:
faces_verts_idx.append([v1, related_verts_in_common[0],
v2, related_verts_in_common[1]])
# If Two verts have one related vert in common and they are
# related to each other, they form a triangle
elif v2_in_rel_v1 and v1_in_rel_v2 and len(related_verts_in_common) == 1:
# Check if the face is already saved.
for f_verts in faces_verts_idx:
repeated_verts = 0
if len(f_verts) == 3:
if v1 in f_verts:
repeated_verts += 1
if v2 in f_verts:
repeated_verts += 1
if related_verts_in_common[0] in f_verts:
repeated_verts += 1
if repeated_verts == len(f_verts):
repeated_face = True
break
if not repeated_face:
faces_verts_idx.append([v1, related_verts_in_common[0], v2])
# Keep only the faces that don't overlap by ignoring
# quads that overlap with two adjacent triangles
faces_to_not_include_idx = [] # Indices of faces_verts_idx to eliminate
for i in range(len(faces_verts_idx)):
for t in range(len(faces_verts_idx)):
if i != t:
verts_in_common = 0
if len(faces_verts_idx[i]) == 4 and len(faces_verts_idx[t]) == 3:
for v_idx in faces_verts_idx[t]:
if v_idx in faces_verts_idx[i]:
verts_in_common += 1
# If it doesn't have all it's vertices repeated in the other face
if verts_in_common == 3:
if i not in faces_to_not_include_idx:
faces_to_not_include_idx.append(i)
# Build surface
all_surface_verts_co = []
verts_idx_translation = {}
for i in range(len(final_points_ob.data.vertices)):
coords = final_points_ob.data.vertices[i].co
all_surface_verts_co.append([coords[0], coords[1], coords[2]])
# Verts of each face.
all_surface_faces = []
for i in range(len(faces_verts_idx)):
if i not in faces_to_not_include_idx:
face = []
for v_idx in faces_verts_idx[i]:
face.append(v_idx)
all_surface_faces.append(face)
# Build the mesh
surf_me_name = "SURFSKIO_surface"
me_surf = bpy.data.meshes.new(surf_me_name)
me_surf.from_pydata(all_surface_verts_co, [], all_surface_faces)
me_surf.update()
ob_surface = bpy.data.objects.new(surf_me_name, me_surf)
bpy.context.scene.objects.link(ob_surface)
# Delete final points temporal object
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[final_points_ob.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[final_points_ob.name]
bpy.ops.object.delete()
# Delete isolated verts if there are any
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_surface.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[ob_surface.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_face_by_sides(type='NOTEQUAL')
bpy.ops.mesh.delete()
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Join crosshatch results with original mesh
# Calculate a distance to merge the verts of the crosshatch surface to the main object
edges_length_sum = 0
for ed in ob_surface.data.edges:
edges_length_sum += (
ob_surface.data.vertices[ed.vertices[0]].co -
ob_surface.data.vertices[ed.vertices[1]].co
).length
if len(ob_surface.data.edges) > 0:
average_surface_edges_length = edges_length_sum / len(ob_surface.data.edges)
else:
average_surface_edges_length = 0.0001
# Make dictionary with all the verts connected to each vert, on the new surface object.
surface_connected_verts = {}
for ed in ob_surface.data.edges:
if not ed.vertices[0] in surface_connected_verts:
surface_connected_verts[ed.vertices[0]] = []
surface_connected_verts[ed.vertices[0]].append(ed.vertices[1])
if ed.vertices[1] not in surface_connected_verts:
surface_connected_verts[ed.vertices[1]] = []
surface_connected_verts[ed.vertices[1]].append(ed.vertices[0])
# Duplicate the new surface object, and use shrinkwrap to
# calculate later the nearest verts to the main object
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.mesh.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.duplicate('INVOKE_REGION_WIN')
final_ob_duplicate = bpy.context.scene.objects.active
bpy.ops.object.modifier_add('INVOKE_REGION_WIN', type='SHRINKWRAP')
final_ob_duplicate.modifiers["Shrinkwrap"].wrap_method = "NEAREST_VERTEX"
final_ob_duplicate.modifiers["Shrinkwrap"].target = self.main_object
bpy.ops.object.modifier_apply('INVOKE_REGION_WIN', apply_as='DATA', modifier='Shrinkwrap')
# Make list with verts of original mesh as index and coords as value
main_object_verts_coords = []
for v in self.main_object.data.vertices:
coords = self.main_object.matrix_world * v.co
# To avoid problems when taking "-0.00" as a different value as "0.00"
for c in range(len(coords)):
if "%.3f" % coords[c] == "-0.00":
coords[c] = 0
main_object_verts_coords.append(["%.3f" % coords[0], "%.3f" % coords[1], "%.3f" % coords[2]])
tuple(main_object_verts_coords)
# Determine which verts will be merged, snap them to the nearest verts
# on the original verts, and get them selected
crosshatch_verts_to_merge = []
if self.automatic_join:
for i in range(len(ob_surface.data.vertices)):
# Calculate the distance from each of the connected verts to the actual vert,
# and compare it with the distance they would have if joined.
# If they don't change much, that vert can be joined
merge_actual_vert = True
if len(surface_connected_verts[i]) < 4:
for c_v_idx in surface_connected_verts[i]:
points_original = []
points_original.append(ob_surface.data.vertices[c_v_idx].co)
points_original.append(ob_surface.data.vertices[i].co)
points_target = []
points_target.append(ob_surface.data.vertices[c_v_idx].co)
points_target.append(final_ob_duplicate.data.vertices[i].co)
vec_A = points_original[0] - points_original[1]
vec_B = points_target[0] - points_target[1]
dist_A = (points_original[0] - points_original[1]).length
dist_B = (points_target[0] - points_target[1]).length
if not (
points_original[0] == points_original[1] or
points_target[0] == points_target[1]
): # If any vector's length is zero
angle = vec_A.angle(vec_B) / pi
else:
angle = 0
# Set a range of acceptable variation in the connected edges
if dist_B > dist_A * 1.7 * self.join_stretch_factor or \
dist_B < dist_A / 2 / self.join_stretch_factor or \
angle >= 0.15 * self.join_stretch_factor:
merge_actual_vert = False
break
else:
merge_actual_vert = False
if merge_actual_vert:
coords = final_ob_duplicate.data.vertices[i].co
# To avoid problems when taking "-0.000" as a different value as "0.00"
for c in range(len(coords)):
if "%.3f" % coords[c] == "-0.00":
coords[c] = 0
comparison_coords = ["%.3f" % coords[0], "%.3f" % coords[1], "%.3f" % coords[2]]
if comparison_coords in main_object_verts_coords:
# Get the index of the vert with those coords in the main object
main_object_related_vert_idx = main_object_verts_coords.index(comparison_coords)
if self.main_object.data.vertices[main_object_related_vert_idx].select is True or \
self.main_object_selected_verts_count == 0:
ob_surface.data.vertices[i].co = final_ob_duplicate.data.vertices[i].co
ob_surface.data.vertices[i].select = True
crosshatch_verts_to_merge.append(i)
# Make sure the vert in the main object is selected,
# in case it wasn't selected and the "join crosshatch" option is active
self.main_object.data.vertices[main_object_related_vert_idx].select = True
# Delete duplicated object
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[final_ob_duplicate.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[final_ob_duplicate.name]
bpy.ops.object.delete()
# Join crosshatched surface and main object
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_surface.name].select = True
bpy.data.objects[self.main_object.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
bpy.ops.object.join('INVOKE_REGION_WIN')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Perform Remove doubles to merge verts
if not (self.automatic_join is False and self.main_object_selected_verts_count == 0):
bpy.ops.mesh.remove_doubles(threshold=0.0001)
bpy.ops.mesh.select_all(action='DESELECT')
# If the main object has modifiers, turn their "viewport view status"
# to what it was before the forced deactivation above
if len(self.main_object.modifiers) > 0:
for m_idx in range(len(self.main_object.modifiers)):
self.main_object.modifiers[m_idx].show_viewport = self.modifiers_prev_viewport_state[m_idx]
return {'FINISHED'}
def rectangular_surface(self):
# Selected edges
all_selected_edges_idx = []
all_selected_verts = []
all_verts_idx = []
for ed in self.main_object.data.edges:
if ed.select:
all_selected_edges_idx.append(ed.index)
# Selected vertices
if not ed.vertices[0] in all_selected_verts:
all_selected_verts.append(self.main_object.data.vertices[ed.vertices[0]])
if not ed.vertices[1] in all_selected_verts:
all_selected_verts.append(self.main_object.data.vertices[ed.vertices[1]])
# All verts (both from each edge) to determine later
# which are at the tips (those not repeated twice)
all_verts_idx.append(ed.vertices[0])
all_verts_idx.append(ed.vertices[1])
# Identify the tips and "middle-vertex" that separates U from V, if there is one
all_chains_tips_idx = []
for v_idx in all_verts_idx:
if all_verts_idx.count(v_idx) < 2:
all_chains_tips_idx.append(v_idx)
edges_connected_to_tips = []
for ed in self.main_object.data.edges:
if (ed.vertices[0] in all_chains_tips_idx or ed.vertices[1] in all_chains_tips_idx) and \
not (ed.vertices[0] in all_verts_idx and ed.vertices[1] in all_verts_idx):
edges_connected_to_tips.append(ed)
# Check closed selections
# List with groups of three verts, where the first element of the pair is
# the unselected vert of a closed selection and the other two elements are the
# selected neighbor verts (it will be useful to determine which selection chain
# the unselected vert belongs to, and determine the "middle-vertex")
single_unselected_verts_and_neighbors = []
# To identify a "closed" selection (a selection that is a closed chain except
# for one vertex) find the vertex in common that have the edges connected to tips.
# If there is a vertex in common, that one is the unselected vert that closes
# the selection or is a "middle-vertex"
single_unselected_verts = []
for ed in edges_connected_to_tips:
for ed_b in edges_connected_to_tips:
if ed != ed_b:
if ed.vertices[0] == ed_b.vertices[0] and \
not self.main_object.data.vertices[ed.vertices[0]].select and \
ed.vertices[0] not in single_unselected_verts:
# The second element is one of the tips of the selected
# vertices of the closed selection
single_unselected_verts_and_neighbors.append(
[ed.vertices[0], ed.vertices[1], ed_b.vertices[1]]
)
single_unselected_verts.append(ed.vertices[0])
break
elif ed.vertices[0] == ed_b.vertices[1] and \
not self.main_object.data.vertices[ed.vertices[0]].select and \
ed.vertices[0] not in single_unselected_verts:
single_unselected_verts_and_neighbors.append(
[ed.vertices[0], ed.vertices[1], ed_b.vertices[0]]
)
single_unselected_verts.append(ed.vertices[0])
break
elif ed.vertices[1] == ed_b.vertices[0] and \
not self.main_object.data.vertices[ed.vertices[1]].select and \
ed.vertices[1] not in single_unselected_verts:
single_unselected_verts_and_neighbors.append(
[ed.vertices[1], ed.vertices[0], ed_b.vertices[1]]
)
single_unselected_verts.append(ed.vertices[1])
break
elif ed.vertices[1] == ed_b.vertices[1] and \
not self.main_object.data.vertices[ed.vertices[1]].select and \
ed.vertices[1] not in single_unselected_verts:
single_unselected_verts_and_neighbors.append(
[ed.vertices[1], ed.vertices[0], ed_b.vertices[0]]
)
single_unselected_verts.append(ed.vertices[1])
break
middle_vertex_idx = None
tips_to_discard_idx = []
# Check if there is a "middle-vertex", and get its index
for i in range(0, len(single_unselected_verts_and_neighbors)):
actual_chain_verts = self.get_ordered_verts(
self.main_object, all_selected_edges_idx,
all_verts_idx, single_unselected_verts_and_neighbors[i][1],
None, None
)
if single_unselected_verts_and_neighbors[i][2] != \
actual_chain_verts[len(actual_chain_verts) - 1].index:
middle_vertex_idx = single_unselected_verts_and_neighbors[i][0]
tips_to_discard_idx.append(single_unselected_verts_and_neighbors[i][1])
tips_to_discard_idx.append(single_unselected_verts_and_neighbors[i][2])
# List with pairs of verts that belong to the tips of each selection chain (row)
verts_tips_same_chain_idx = []
if len(all_chains_tips_idx) >= 2:
checked_v = []
for i in range(0, len(all_chains_tips_idx)):
if all_chains_tips_idx[i] not in checked_v:
v_chain = self.get_ordered_verts(
self.main_object, all_selected_edges_idx,
all_verts_idx, all_chains_tips_idx[i],
middle_vertex_idx, None
)
verts_tips_same_chain_idx.append([v_chain[0].index, v_chain[len(v_chain) - 1].index])
checked_v.append(v_chain[0].index)
checked_v.append(v_chain[len(v_chain) - 1].index)
# Selection tips (vertices).
verts_tips_parsed_idx = []
if len(all_chains_tips_idx) >= 2:
for spec_v_idx in all_chains_tips_idx:
if (spec_v_idx not in tips_to_discard_idx):
verts_tips_parsed_idx.append(spec_v_idx)
# Identify the type of selection made by the user
if middle_vertex_idx is not None:
# If there are 4 tips (two selection chains), and
# there is only one single unselected vert (the middle vert)
if len(all_chains_tips_idx) == 4 and len(single_unselected_verts_and_neighbors) == 1:
selection_type = "TWO_CONNECTED"
else:
# The type of the selection was not identified, the script stops.
self.report({'WARNING'}, "The selection isn't valid.")
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
self.cleanup_on_interruption()
self.stopping_errors = True
return{'CANCELLED'}
else:
if len(all_chains_tips_idx) == 2: # If there are 2 tips
selection_type = "SINGLE"
elif len(all_chains_tips_idx) == 4: # If there are 4 tips
selection_type = "TWO_NOT_CONNECTED"
elif len(all_chains_tips_idx) == 0:
if len(self.main_splines.data.splines) > 1:
selection_type = "NO_SELECTION"
else:
# If the selection was not identified and there is only one stroke,
# there's no possibility to build a surface, so the script is interrupted
self.report({'WARNING'}, "The selection isn't valid.")
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
self.cleanup_on_interruption()
self.stopping_errors = True
return{'CANCELLED'}
else:
# The type of the selection was not identified, the script stops
self.report({'WARNING'}, "The selection isn't valid.")
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
self.cleanup_on_interruption()
self.stopping_errors = True
return{'CANCELLED'}
# If the selection type is TWO_NOT_CONNECTED and there is only one stroke, stop the script
if selection_type == "TWO_NOT_CONNECTED" and len(self.main_splines.data.splines) == 1:
self.report({'WARNING'},
"At least two strokes are needed when there are two not connected selections")
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
self.cleanup_on_interruption()
self.stopping_errors = True
return{'CANCELLED'}
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_splines.name].select = True
bpy.context.scene.objects.active = bpy.context.scene.objects[self.main_splines.name]
# Enter editmode for the new curve (converted from grease pencil strokes), to smooth it out
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.smooth('INVOKE_REGION_WIN')
bpy.ops.curve.smooth('INVOKE_REGION_WIN')
bpy.ops.curve.smooth('INVOKE_REGION_WIN')
bpy.ops.curve.smooth('INVOKE_REGION_WIN')
bpy.ops.curve.smooth('INVOKE_REGION_WIN')
bpy.ops.curve.smooth('INVOKE_REGION_WIN')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
self.selection_U_exists = False
self.selection_U2_exists = False
self.selection_V_exists = False
self.selection_V2_exists = False
self.selection_U_is_closed = False
self.selection_U2_is_closed = False
self.selection_V_is_closed = False
self.selection_V2_is_closed = False
# Define what vertices are at the tips of each selection and are not the middle-vertex
if selection_type == "TWO_CONNECTED":
self.selection_U_exists = True
self.selection_V_exists = True
closing_vert_U_idx = None
closing_vert_V_idx = None
closing_vert_U2_idx = None
closing_vert_V2_idx = None
# Determine which selection is Selection-U and which is Selection-V
points_A = []
points_B = []
points_first_stroke_tips = []
points_A.append(
self.main_object.matrix_world * self.main_object.data.vertices[verts_tips_parsed_idx[0]].co
)
points_A.append(
self.main_object.matrix_world * self.main_object.data.vertices[middle_vertex_idx].co
)
points_B.append(
self.main_object.matrix_world * self.main_object.data.vertices[verts_tips_parsed_idx[1]].co
)
points_B.append(
self.main_object.matrix_world * self.main_object.data.vertices[middle_vertex_idx].co
)
points_first_stroke_tips.append(
self.main_splines.data.splines[0].bezier_points[0].co
)
points_first_stroke_tips.append(
self.main_splines.data.splines[0].bezier_points[
len(self.main_splines.data.splines[0].bezier_points) - 1
].co
)
angle_A = self.orientation_difference(points_A, points_first_stroke_tips)
angle_B = self.orientation_difference(points_B, points_first_stroke_tips)
if angle_A < angle_B:
first_vert_U_idx = verts_tips_parsed_idx[0]
first_vert_V_idx = verts_tips_parsed_idx[1]
else:
first_vert_U_idx = verts_tips_parsed_idx[1]
first_vert_V_idx = verts_tips_parsed_idx[0]
elif selection_type == "SINGLE" or selection_type == "TWO_NOT_CONNECTED":
first_sketched_point_first_stroke_co = self.main_splines.data.splines[0].bezier_points[0].co
last_sketched_point_first_stroke_co = \
self.main_splines.data.splines[0].bezier_points[
len(self.main_splines.data.splines[0].bezier_points) - 1
].co
first_sketched_point_last_stroke_co = \
self.main_splines.data.splines[
len(self.main_splines.data.splines) - 1
].bezier_points[0].co
if len(self.main_splines.data.splines) > 1:
first_sketched_point_second_stroke_co = self.main_splines.data.splines[1].bezier_points[0].co
last_sketched_point_second_stroke_co = \
self.main_splines.data.splines[1].bezier_points[
len(self.main_splines.data.splines[1].bezier_points) - 1
].co
single_unselected_neighbors = [] # Only the neighbors of the single unselected verts
for verts_neig_idx in single_unselected_verts_and_neighbors:
single_unselected_neighbors.append(verts_neig_idx[1])
single_unselected_neighbors.append(verts_neig_idx[2])
all_chains_tips_and_middle_vert = []
for v_idx in all_chains_tips_idx:
if v_idx not in single_unselected_neighbors:
all_chains_tips_and_middle_vert.append(v_idx)
all_chains_tips_and_middle_vert += single_unselected_verts
all_participating_verts = all_chains_tips_and_middle_vert + all_verts_idx
# The tip of the selected vertices nearest to the first point of the first sketched stroke
nearest_tip_to_first_st_first_pt_idx, shortest_distance_to_first_stroke = \
self.shortest_distance(
self.main_object,
first_sketched_point_first_stroke_co,
all_chains_tips_and_middle_vert
)
# If the nearest tip is not from a closed selection, get the opposite tip vertex index
if nearest_tip_to_first_st_first_pt_idx not in single_unselected_verts or \
nearest_tip_to_first_st_first_pt_idx == middle_vertex_idx:
nearest_tip_to_first_st_first_pt_opposite_idx = \
self.opposite_tip(
nearest_tip_to_first_st_first_pt_idx,
verts_tips_same_chain_idx
)
# The tip of the selected vertices nearest to the last point of the first sketched stroke
nearest_tip_to_first_st_last_pt_idx, temp_dist = \
self.shortest_distance(
self.main_object,
last_sketched_point_first_stroke_co,
all_chains_tips_and_middle_vert
)
# The tip of the selected vertices nearest to the first point of the last sketched stroke
nearest_tip_to_last_st_first_pt_idx, shortest_distance_to_last_stroke = \
self.shortest_distance(
self.main_object,
first_sketched_point_last_stroke_co,
all_chains_tips_and_middle_vert
)
if len(self.main_splines.data.splines) > 1:
# The selected vertex nearest to the first point of the second sketched stroke
# (This will be useful to determine the direction of the closed
# selection V when extruding along strokes)
nearest_vert_to_second_st_first_pt_idx, temp_dist = \
self.shortest_distance(
self.main_object,
first_sketched_point_second_stroke_co,
all_verts_idx
)
# The selected vertex nearest to the first point of the second sketched stroke
# (This will be useful to determine the direction of the closed
# selection V2 when extruding along strokes)
nearest_vert_to_second_st_last_pt_idx, temp_dist = \
self.shortest_distance(
self.main_object,
last_sketched_point_second_stroke_co,
all_verts_idx
)
# Determine if the single selection will be treated as U or as V
edges_sum = 0
for i in all_selected_edges_idx:
edges_sum += (
(self.main_object.matrix_world *
self.main_object.data.vertices[self.main_object.data.edges[i].vertices[0]].co) -
(self.main_object.matrix_world *
self.main_object.data.vertices[self.main_object.data.edges[i].vertices[1]].co)
).length
average_edge_length = edges_sum / len(all_selected_edges_idx)
# Get shortest distance from the first point of the last stroke to any participating vertex
temp_idx, shortest_distance_to_last_stroke = \
self.shortest_distance(
self.main_object,
first_sketched_point_last_stroke_co,
all_participating_verts
)
# If the beginning of the first stroke is near enough, and its orientation
# difference with the first edge of the nearest selection chain is not too high,
# interpret things as an "extrude along strokes" instead of "extrude through strokes"
if shortest_distance_to_first_stroke < average_edge_length / 4 and \
shortest_distance_to_last_stroke < average_edge_length and \
len(self.main_splines.data.splines) > 1:
self.selection_U_exists = False
self.selection_V_exists = True
# If the first selection is not closed
if nearest_tip_to_first_st_first_pt_idx not in single_unselected_verts or \
nearest_tip_to_first_st_first_pt_idx == middle_vertex_idx:
self.selection_V_is_closed = False
first_neighbor_V_idx = None
closing_vert_U_idx = None
closing_vert_U2_idx = None
closing_vert_V_idx = None
closing_vert_V2_idx = None
first_vert_V_idx = nearest_tip_to_first_st_first_pt_idx
if selection_type == "TWO_NOT_CONNECTED":
self.selection_V2_exists = True
first_vert_V2_idx = nearest_tip_to_first_st_last_pt_idx
else:
self.selection_V_is_closed = True
closing_vert_V_idx = nearest_tip_to_first_st_first_pt_idx
# Get the neighbors of the first (unselected) vert of the closed selection U.
vert_neighbors = []
for verts in single_unselected_verts_and_neighbors:
if verts[0] == nearest_tip_to_first_st_first_pt_idx:
vert_neighbors.append(verts[1])
vert_neighbors.append(verts[2])
break
verts_V = self.get_ordered_verts(
self.main_object, all_selected_edges_idx,
all_verts_idx, vert_neighbors[0], middle_vertex_idx, None
)
for i in range(0, len(verts_V)):
if verts_V[i].index == nearest_vert_to_second_st_first_pt_idx:
# If the vertex nearest to the first point of the second stroke
# is in the first half of the selected verts
if i >= len(verts_V) / 2:
first_vert_V_idx = vert_neighbors[1]
break
else:
first_vert_V_idx = vert_neighbors[0]
break
if selection_type == "TWO_NOT_CONNECTED":
self.selection_V2_exists = True
# If the second selection is not closed
if nearest_tip_to_first_st_last_pt_idx not in single_unselected_verts or \
nearest_tip_to_first_st_last_pt_idx == middle_vertex_idx:
self.selection_V2_is_closed = False
first_neighbor_V2_idx = None
closing_vert_V2_idx = None
first_vert_V2_idx = nearest_tip_to_first_st_last_pt_idx
else:
self.selection_V2_is_closed = True
closing_vert_V2_idx = nearest_tip_to_first_st_last_pt_idx
# Get the neighbors of the first (unselected) vert of the closed selection U
vert_neighbors = []
for verts in single_unselected_verts_and_neighbors:
if verts[0] == nearest_tip_to_first_st_last_pt_idx:
vert_neighbors.append(verts[1])
vert_neighbors.append(verts[2])
break
verts_V2 = self.get_ordered_verts(
self.main_object, all_selected_edges_idx,
all_verts_idx, vert_neighbors[0], middle_vertex_idx, None
)
for i in range(0, len(verts_V2)):
if verts_V2[i].index == nearest_vert_to_second_st_last_pt_idx:
# If the vertex nearest to the first point of the second stroke
# is in the first half of the selected verts
if i >= len(verts_V2) / 2:
first_vert_V2_idx = vert_neighbors[1]
break
else:
first_vert_V2_idx = vert_neighbors[0]
break
else:
self.selection_V2_exists = False
else:
self.selection_U_exists = True
self.selection_V_exists = False
# If the first selection is not closed
if nearest_tip_to_first_st_first_pt_idx not in single_unselected_verts or \
nearest_tip_to_first_st_first_pt_idx == middle_vertex_idx:
self.selection_U_is_closed = False
first_neighbor_U_idx = None
closing_vert_U_idx = None
points_tips = []
points_tips.append(
self.main_object.matrix_world *
self.main_object.data.vertices[nearest_tip_to_first_st_first_pt_idx].co
)
points_tips.append(
self.main_object.matrix_world *
self.main_object.data.vertices[nearest_tip_to_first_st_first_pt_opposite_idx].co
)
points_first_stroke_tips = []
points_first_stroke_tips.append(self.main_splines.data.splines[0].bezier_points[0].co)
points_first_stroke_tips.append(
self.main_splines.data.splines[0].bezier_points[
len(self.main_splines.data.splines[0].bezier_points) - 1
].co
)
vec_A = points_tips[0] - points_tips[1]
vec_B = points_first_stroke_tips[0] - points_first_stroke_tips[1]
# Compare the direction of the selection and the first
# grease pencil stroke to determine which is the "first" vertex of the selection
if vec_A.dot(vec_B) < 0:
first_vert_U_idx = nearest_tip_to_first_st_first_pt_opposite_idx
else:
first_vert_U_idx = nearest_tip_to_first_st_first_pt_idx
else:
self.selection_U_is_closed = True
closing_vert_U_idx = nearest_tip_to_first_st_first_pt_idx
# Get the neighbors of the first (unselected) vert of the closed selection U
vert_neighbors = []
for verts in single_unselected_verts_and_neighbors:
if verts[0] == nearest_tip_to_first_st_first_pt_idx:
vert_neighbors.append(verts[1])
vert_neighbors.append(verts[2])
break
points_first_and_neighbor = []
points_first_and_neighbor.append(
self.main_object.matrix_world *
self.main_object.data.vertices[nearest_tip_to_first_st_first_pt_idx].co
)
points_first_and_neighbor.append(
self.main_object.matrix_world *
self.main_object.data.vertices[vert_neighbors[0]].co
)
points_first_stroke_tips = []
points_first_stroke_tips.append(self.main_splines.data.splines[0].bezier_points[0].co)
points_first_stroke_tips.append(self.main_splines.data.splines[0].bezier_points[1].co)
vec_A = points_first_and_neighbor[0] - points_first_and_neighbor[1]
vec_B = points_first_stroke_tips[0] - points_first_stroke_tips[1]
# Compare the direction of the selection and the first grease pencil stroke to
# determine which is the vertex neighbor to the first vertex (unselected) of
# the closed selection. This will determine the direction of the closed selection
if vec_A.dot(vec_B) < 0:
first_vert_U_idx = vert_neighbors[1]
else:
first_vert_U_idx = vert_neighbors[0]
if selection_type == "TWO_NOT_CONNECTED":
self.selection_U2_exists = True
# If the second selection is not closed
if nearest_tip_to_last_st_first_pt_idx not in single_unselected_verts or \
nearest_tip_to_last_st_first_pt_idx == middle_vertex_idx:
self.selection_U2_is_closed = False
first_neighbor_U2_idx = None
closing_vert_U2_idx = None
first_vert_U2_idx = nearest_tip_to_last_st_first_pt_idx
else:
self.selection_U2_is_closed = True
closing_vert_U2_idx = nearest_tip_to_last_st_first_pt_idx
# Get the neighbors of the first (unselected) vert of the closed selection U
vert_neighbors = []
for verts in single_unselected_verts_and_neighbors:
if verts[0] == nearest_tip_to_last_st_first_pt_idx:
vert_neighbors.append(verts[1])
vert_neighbors.append(verts[2])
break
points_first_and_neighbor = []
points_first_and_neighbor.append(
self.main_object.matrix_world *
self.main_object.data.vertices[nearest_tip_to_last_st_first_pt_idx].co
)
points_first_and_neighbor.append(
self.main_object.matrix_world *
self.main_object.data.vertices[vert_neighbors[0]].co
)
points_last_stroke_tips = []
points_last_stroke_tips.append(
self.main_splines.data.splines[
len(self.main_splines.data.splines) - 1
].bezier_points[0].co
)
points_last_stroke_tips.append(
self.main_splines.data.splines[
len(self.main_splines.data.splines) - 1
].bezier_points[1].co
)
vec_A = points_first_and_neighbor[0] - points_first_and_neighbor[1]
vec_B = points_last_stroke_tips[0] - points_last_stroke_tips[1]
# Compare the direction of the selection and the last grease pencil stroke to
# determine which is the vertex neighbor to the first vertex (unselected) of
# the closed selection. This will determine the direction of the closed selection
if vec_A.dot(vec_B) < 0:
first_vert_U2_idx = vert_neighbors[1]
else:
first_vert_U2_idx = vert_neighbors[0]
else:
self.selection_U2_exists = False
elif selection_type == "NO_SELECTION":
self.selection_U_exists = False
self.selection_V_exists = False
# Get an ordered list of the vertices of Selection-U
verts_ordered_U = []
if self.selection_U_exists:
verts_ordered_U = self.get_ordered_verts(
self.main_object, all_selected_edges_idx,
all_verts_idx, first_vert_U_idx,
middle_vertex_idx, closing_vert_U_idx
)
verts_ordered_U_indices = [x.index for x in verts_ordered_U]
# Get an ordered list of the vertices of Selection-U2
verts_ordered_U2 = []
if self.selection_U2_exists:
verts_ordered_U2 = self.get_ordered_verts(
self.main_object, all_selected_edges_idx,
all_verts_idx, first_vert_U2_idx,
middle_vertex_idx, closing_vert_U2_idx
)
verts_ordered_U2_indices = [x.index for x in verts_ordered_U2]
# Get an ordered list of the vertices of Selection-V
verts_ordered_V = []
if self.selection_V_exists:
verts_ordered_V = self.get_ordered_verts(
self.main_object, all_selected_edges_idx,
all_verts_idx, first_vert_V_idx,
middle_vertex_idx, closing_vert_V_idx
)
verts_ordered_V_indices = [x.index for x in verts_ordered_V]
# Get an ordered list of the vertices of Selection-V2
verts_ordered_V2 = []
if self.selection_V2_exists:
verts_ordered_V2 = self.get_ordered_verts(
self.main_object, all_selected_edges_idx,
all_verts_idx, first_vert_V2_idx,
middle_vertex_idx, closing_vert_V2_idx
)
verts_ordered_V2_indices = [x.index for x in verts_ordered_V2]
# Check if when there are two-not-connected selections both have the same
# number of verts. If not terminate the script
if ((self.selection_U2_exists and len(verts_ordered_U) != len(verts_ordered_U2)) or
(self.selection_V2_exists and len(verts_ordered_V) != len(verts_ordered_V2))):
# Display a warning
self.report({'WARNING'}, "Both selections must have the same number of edges")
self.cleanup_on_interruption()
self.stopping_errors = True
return{'CANCELLED'}
# Calculate edges U proportions
# Sum selected edges U lengths
edges_lengths_U = []
edges_lengths_sum_U = 0
if self.selection_U_exists:
edges_lengths_U, edges_lengths_sum_U = self.get_chain_length(
self.main_object,
verts_ordered_U
)
if self.selection_U2_exists:
edges_lengths_U2, edges_lengths_sum_U2 = self.get_chain_length(
self.main_object,
verts_ordered_U2
)
# Sum selected edges V lengths
edges_lengths_V = []
edges_lengths_sum_V = 0
if self.selection_V_exists:
edges_lengths_V, edges_lengths_sum_V = self.get_chain_length(
self.main_object,
verts_ordered_V
)
if self.selection_V2_exists:
edges_lengths_V2, edges_lengths_sum_V2 = self.get_chain_length(
self.main_object,
verts_ordered_V2
)
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.subdivide('INVOKE_REGION_WIN',
number_cuts=bpy.context.scene.bsurfaces.SURFSK_precision)
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Proportions U
edges_proportions_U = []
edges_proportions_U = self.get_edges_proportions(
edges_lengths_U, edges_lengths_sum_U,
self.selection_U_exists, self.edges_U
)
verts_count_U = len(edges_proportions_U) + 1
if self.selection_U2_exists:
edges_proportions_U2 = []
edges_proportions_U2 = self.get_edges_proportions(
edges_lengths_U2, edges_lengths_sum_U2,
self.selection_U2_exists, self.edges_V
)
verts_count_U2 = len(edges_proportions_U2) + 1
# Proportions V
edges_proportions_V = []
edges_proportions_V = self.get_edges_proportions(
edges_lengths_V, edges_lengths_sum_V,
self.selection_V_exists, self.edges_V
)
verts_count_V = len(edges_proportions_V) + 1
if self.selection_V2_exists:
edges_proportions_V2 = []
edges_proportions_V2 = self.get_edges_proportions(
edges_lengths_V2, edges_lengths_sum_V2,
self.selection_V2_exists, self.edges_V
)
verts_count_V2 = len(edges_proportions_V2) + 1
# Cyclic Follow: simplify sketched curves, make them Cyclic, and complete
# the actual sketched curves with a "closing segment"
if self.cyclic_follow and not self.selection_V_exists and not \
((self.selection_U_exists and not self.selection_U_is_closed) or
(self.selection_U2_exists and not self.selection_U2_is_closed)):
simplified_spline_coords = []
simplified_curve = []
ob_simplified_curve = []
splines_first_v_co = []
for i in range(len(self.main_splines.data.splines)):
# Create a curve object for the actual spline "cyclic extension"
simplified_curve.append(bpy.data.curves.new('SURFSKIO_simpl_crv', 'CURVE'))
ob_simplified_curve.append(bpy.data.objects.new('SURFSKIO_simpl_crv', simplified_curve[i]))
bpy.context.scene.objects.link(ob_simplified_curve[i])
simplified_curve[i].dimensions = "3D"
spline_coords = []
for bp in self.main_splines.data.splines[i].bezier_points:
spline_coords.append(bp.co)
# Simplification
simplified_spline_coords.append(self.simplify_spline(spline_coords, 5))
# Get the coordinates of the first vert of the actual spline
splines_first_v_co.append(simplified_spline_coords[i][0])
# Generate the spline
spline = simplified_curve[i].splines.new('BEZIER')
# less one because one point is added when the spline is created
spline.bezier_points.add(len(simplified_spline_coords[i]) - 1)
for p in range(0, len(simplified_spline_coords[i])):
spline.bezier_points[p].co = simplified_spline_coords[i][p]
spline.use_cyclic_u = True
spline_bp_count = len(spline.bezier_points)
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_simplified_curve[i].name].select = True
bpy.context.scene.objects.active = bpy.context.scene.objects[ob_simplified_curve[i].name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='SELECT')
bpy.ops.curve.handle_type_set('INVOKE_REGION_WIN', type='AUTOMATIC')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Select the "closing segment", and subdivide it
ob_simplified_curve[i].data.splines[0].bezier_points[0].select_control_point = True
ob_simplified_curve[i].data.splines[0].bezier_points[0].select_left_handle = True
ob_simplified_curve[i].data.splines[0].bezier_points[0].select_right_handle = True
ob_simplified_curve[i].data.splines[0].bezier_points[spline_bp_count - 1].select_control_point = True
ob_simplified_curve[i].data.splines[0].bezier_points[spline_bp_count - 1].select_left_handle = True
ob_simplified_curve[i].data.splines[0].bezier_points[spline_bp_count - 1].select_right_handle = True
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
segments = sqrt(
(ob_simplified_curve[i].data.splines[0].bezier_points[0].co -
ob_simplified_curve[i].data.splines[0].bezier_points[spline_bp_count - 1].co).length /
self.average_gp_segment_length
)
for t in range(2):
bpy.ops.curve.subdivide('INVOKE_REGION_WIN', number_cuts=segments)
# Delete the other vertices and make it non-cyclic to
# keep only the needed verts of the "closing segment"
bpy.ops.curve.select_all(action='INVERT')
bpy.ops.curve.delete(type='VERT')
ob_simplified_curve[i].data.splines[0].use_cyclic_u = False
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Add the points of the "closing segment" to the original curve from grease pencil stroke
first_new_index = len(self.main_splines.data.splines[i].bezier_points)
self.main_splines.data.splines[i].bezier_points.add(
len(ob_simplified_curve[i].data.splines[0].bezier_points) - 1
)
for t in range(1, len(ob_simplified_curve[i].data.splines[0].bezier_points)):
self.main_splines.data.splines[i].bezier_points[t - 1 + first_new_index].co = \
ob_simplified_curve[i].data.splines[0].bezier_points[t].co
# Delete the temporal curve
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_simplified_curve[i].name].select = True
bpy.context.scene.objects.active = bpy.context.scene.objects[ob_simplified_curve[i].name]
bpy.ops.object.delete()
# Get the coords of the points distributed along the sketched strokes,
# with proportions-U of the first selection
pts_on_strokes_with_proportions_U = self.distribute_pts(
self.main_splines.data.splines,
edges_proportions_U
)
sketched_splines_parsed = []
if self.selection_U2_exists:
# Initialize the multidimensional list with the proportions of all the segments
proportions_loops_crossing_strokes = []
for i in range(len(pts_on_strokes_with_proportions_U)):
proportions_loops_crossing_strokes.append([])
for t in range(len(pts_on_strokes_with_proportions_U[0])):
proportions_loops_crossing_strokes[i].append(None)
# Calculate the proportions of each segment of the loops-U from pts_on_strokes_with_proportions_U
for lp in range(len(pts_on_strokes_with_proportions_U[0])):
loop_segments_lengths = []
for st in range(len(pts_on_strokes_with_proportions_U)):
# When on the first stroke, add the segment from the selection to the dirst stroke
if st == 0:
loop_segments_lengths.append(
((self.main_object.matrix_world * verts_ordered_U[lp].co) -
pts_on_strokes_with_proportions_U[0][lp]).length
)
# For all strokes except for the last, calculate the distance
# from the actual stroke to the next
if st != len(pts_on_strokes_with_proportions_U) - 1:
loop_segments_lengths.append(
(pts_on_strokes_with_proportions_U[st][lp] -
pts_on_strokes_with_proportions_U[st + 1][lp]).length
)
# When on the last stroke, add the segments
# from the last stroke to the second selection
if st == len(pts_on_strokes_with_proportions_U) - 1:
loop_segments_lengths.append(
(pts_on_strokes_with_proportions_U[st][lp] -
(self.main_object.matrix_world * verts_ordered_U2[lp].co)).length
)
# Calculate full loop length
loop_seg_lengths_sum = 0
for i in range(len(loop_segments_lengths)):
loop_seg_lengths_sum += loop_segments_lengths[i]
# Fill the multidimensional list with the proportions of all the segments
for st in range(len(pts_on_strokes_with_proportions_U)):
proportions_loops_crossing_strokes[st][lp] = \
loop_segments_lengths[st] / loop_seg_lengths_sum
# Calculate proportions for each stroke
for st in range(len(pts_on_strokes_with_proportions_U)):
actual_stroke_spline = []
# Needs to be a list for the "distribute_pts" method
actual_stroke_spline.append(self.main_splines.data.splines[st])
# Calculate the proportions for the actual stroke.
actual_edges_proportions_U = []
for i in range(len(edges_proportions_U)):
proportions_sum = 0
# Sum the proportions of this loop up to the actual.
for t in range(0, st + 1):
proportions_sum += proportions_loops_crossing_strokes[t][i]
# i + 1, because proportions_loops_crossing_strokes refers to loops,
# and the proportions refer to edges, so we start at the element 1
# of proportions_loops_crossing_strokes instead of element 0
actual_edges_proportions_U.append(
edges_proportions_U[i] -
((edges_proportions_U[i] - edges_proportions_U2[i]) * proportions_sum)
)
points_actual_spline = self.distribute_pts(actual_stroke_spline, actual_edges_proportions_U)
sketched_splines_parsed.append(points_actual_spline[0])
else:
sketched_splines_parsed = pts_on_strokes_with_proportions_U
# If the selection type is "TWO_NOT_CONNECTED" replace the
# points of the last spline with the points in the "target" selection
if selection_type == "TWO_NOT_CONNECTED":
if self.selection_U2_exists:
for i in range(0, len(sketched_splines_parsed[len(sketched_splines_parsed) - 1])):
sketched_splines_parsed[len(sketched_splines_parsed) - 1][i] = \
self.main_object.matrix_world * verts_ordered_U2[i].co
# Create temporary curves along the "control-points" found
# on the sketched curves and the mesh selection
mesh_ctrl_pts_name = "SURFSKIO_ctrl_pts"
me = bpy.data.meshes.new(mesh_ctrl_pts_name)
ob_ctrl_pts = bpy.data.objects.new(mesh_ctrl_pts_name, me)
ob_ctrl_pts.data = me
bpy.context.scene.objects.link(ob_ctrl_pts)
cyclic_loops_U = []
first_verts = []
second_verts = []
last_verts = []
for i in range(0, verts_count_U):
vert_num_in_spline = 1
if self.selection_U_exists:
ob_ctrl_pts.data.vertices.add(1)
last_v = ob_ctrl_pts.data.vertices[len(ob_ctrl_pts.data.vertices) - 1]
last_v.co = self.main_object.matrix_world * verts_ordered_U[i].co
vert_num_in_spline += 1
for t in range(0, len(sketched_splines_parsed)):
ob_ctrl_pts.data.vertices.add(1)
v = ob_ctrl_pts.data.vertices[len(ob_ctrl_pts.data.vertices) - 1]
v.co = sketched_splines_parsed[t][i]
if vert_num_in_spline > 1:
ob_ctrl_pts.data.edges.add(1)
ob_ctrl_pts.data.edges[len(ob_ctrl_pts.data.edges) - 1].vertices[0] = \
len(ob_ctrl_pts.data.vertices) - 2
ob_ctrl_pts.data.edges[len(ob_ctrl_pts.data.edges) - 1].vertices[1] = \
len(ob_ctrl_pts.data.vertices) - 1
if t == 0:
first_verts.append(v.index)
if t == 1:
second_verts.append(v.index)
if t == len(sketched_splines_parsed) - 1:
last_verts.append(v.index)
last_v = v
vert_num_in_spline += 1
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_ctrl_pts.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[ob_ctrl_pts.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Determine which loops-U will be "Cyclic"
for i in range(0, len(first_verts)):
# When there is Cyclic Cross there is no need of
# Automatic Join, (and there are at least three strokes)
if self.automatic_join and not self.cyclic_cross and \
selection_type != "TWO_CONNECTED" and len(self.main_splines.data.splines) >= 3:
v = ob_ctrl_pts.data.vertices
first_point_co = v[first_verts[i]].co
second_point_co = v[second_verts[i]].co
last_point_co = v[last_verts[i]].co
# Coordinates of the point in the center of both the first and last verts.
verts_center_co = [
(first_point_co[0] + last_point_co[0]) / 2,
(first_point_co[1] + last_point_co[1]) / 2,
(first_point_co[2] + last_point_co[2]) / 2
]
vec_A = second_point_co - first_point_co
vec_B = second_point_co - Vector(verts_center_co)
# Calculate the length of the first segment of the loop,
# and the length it would have after moving the first vert
# to the middle position between first and last
length_original = (second_point_co - first_point_co).length
length_target = (second_point_co - Vector(verts_center_co)).length
angle = vec_A.angle(vec_B) / pi
# If the target length doesn't stretch too much, and the
# its angle doesn't change to much either
if length_target <= length_original * 1.03 * self.join_stretch_factor and \
angle <= 0.008 * self.join_stretch_factor and not self.selection_U_exists:
cyclic_loops_U.append(True)
# Move the first vert to the center coordinates
ob_ctrl_pts.data.vertices[first_verts[i]].co = verts_center_co
# Select the last verts from Cyclic loops, for later deletion all at once
v[last_verts[i]].select = True
else:
cyclic_loops_U.append(False)
else:
# If "Cyclic Cross" is active then "all" crossing curves become cyclic
if self.cyclic_cross and not self.selection_U_exists and not \
((self.selection_V_exists and not self.selection_V_is_closed) or
(self.selection_V2_exists and not self.selection_V2_is_closed)):
cyclic_loops_U.append(True)
else:
cyclic_loops_U.append(False)
# The cyclic_loops_U list needs to be reversed.
cyclic_loops_U.reverse()
# Delete the previously selected (last_)verts.
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.mesh.delete('INVOKE_REGION_WIN', type='VERT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Create curves from control points.
bpy.ops.object.convert('INVOKE_REGION_WIN', target='CURVE', keep_original=False)
ob_curves_surf = bpy.context.scene.objects.active
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.spline_type_set('INVOKE_REGION_WIN', type='BEZIER')
bpy.ops.curve.handle_type_set('INVOKE_REGION_WIN', type='AUTOMATIC')
# Make Cyclic the splines designated as Cyclic.
for i in range(0, len(cyclic_loops_U)):
ob_curves_surf.data.splines[i].use_cyclic_u = cyclic_loops_U[i]
# Get the coords of all points on first loop-U, for later comparison with its
# subdivided version, to know which points of the loops-U are crossed by the
# original strokes. The indices wiil be the same for the other loops-U
if self.loops_on_strokes:
coords_loops_U_control_points = []
for p in ob_ctrl_pts.data.splines[0].bezier_points:
coords_loops_U_control_points.append(["%.4f" % p.co[0], "%.4f" % p.co[1], "%.4f" % p.co[2]])
tuple(coords_loops_U_control_points)
# Calculate number of edges-V in case option "Loops on strokes" is active or inactive
if self.loops_on_strokes and not self.selection_V_exists:
edges_V_count = len(self.main_splines.data.splines) * self.edges_V
else:
edges_V_count = len(edges_proportions_V)
# The Follow precision will vary depending on the number of Follow face-loops
precision_multiplier = round(2 + (edges_V_count / 15))
curve_cuts = bpy.context.scene.bsurfaces.SURFSK_precision * precision_multiplier
# Subdivide the curves
bpy.ops.curve.subdivide('INVOKE_REGION_WIN', number_cuts=curve_cuts)
# The verts position shifting that happens with splines subdivision.
# For later reorder splines points
verts_position_shift = curve_cuts + 1
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Reorder coordinates of the points of each spline to put the first point of
# the spline starting at the position it was the first point before sudividing
# the curve. And make a new curve object per spline (to handle memory better later)
splines_U_objects = []
for i in range(len(ob_curves_surf.data.splines)):
spline_U_curve = bpy.data.curves.new('SURFSKIO_spline_U_' + str(i), 'CURVE')
ob_spline_U = bpy.data.objects.new('SURFSKIO_spline_U_' + str(i), spline_U_curve)
bpy.context.scene.objects.link(ob_spline_U)
spline_U_curve.dimensions = "3D"
# Add points to the spline in the new curve object
ob_spline_U.data.splines.new('BEZIER')
for t in range(len(ob_curves_surf.data.splines[i].bezier_points)):
if cyclic_loops_U[i] is True and not self.selection_U_exists: # If the loop is cyclic
if t + verts_position_shift <= len(ob_curves_surf.data.splines[i].bezier_points) - 1:
point_index = t + verts_position_shift
else:
point_index = t + verts_position_shift - len(ob_curves_surf.data.splines[i].bezier_points)
else:
point_index = t
# to avoid adding the first point since it's added when the spline is created
if t > 0:
ob_spline_U.data.splines[0].bezier_points.add(1)
ob_spline_U.data.splines[0].bezier_points[t].co = \
ob_curves_surf.data.splines[i].bezier_points[point_index].co
if cyclic_loops_U[i] is True and not self.selection_U_exists: # If the loop is cyclic
# Add a last point at the same location as the first one
ob_spline_U.data.splines[0].bezier_points.add(1)
ob_spline_U.data.splines[0].bezier_points[len(ob_spline_U.data.splines[0].bezier_points) - 1].co = \
ob_spline_U.data.splines[0].bezier_points[0].co
else:
ob_spline_U.data.splines[0].use_cyclic_u = False
splines_U_objects.append(ob_spline_U)
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_spline_U.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[ob_spline_U.name]
# When option "Loops on strokes" is active each "Cross" loop will have
# its own proportions according to where the original strokes "touch" them
if self.loops_on_strokes:
# Get the indices of points where the original strokes "touch" loops-U
points_U_crossed_by_strokes = []
for i in range(len(splines_U_objects[0].data.splines[0].bezier_points)):
bp = splines_U_objects[0].data.splines[0].bezier_points[i]
if ["%.4f" % bp.co[0], "%.4f" % bp.co[1], "%.4f" % bp.co[2]] in coords_loops_U_control_points:
points_U_crossed_by_strokes.append(i)
# Make a dictionary with the number of the edge, in the selected chain V, corresponding to each stroke
edge_order_number_for_splines = {}
if self.selection_V_exists:
# For two-connected selections add a first hypothetic stroke at the begining.
if selection_type == "TWO_CONNECTED":
edge_order_number_for_splines[0] = 0
for i in range(len(self.main_splines.data.splines)):
sp = self.main_splines.data.splines[i]
v_idx, dist_temp = self.shortest_distance(
self.main_object,
sp.bezier_points[0].co,
verts_ordered_V_indices
)
# Get the position (edges count) of the vert v_idx in the selected chain V
edge_idx_in_chain = verts_ordered_V_indices.index(v_idx)
# For two-connected selections the strokes go after the
# hypothetic stroke added before, so the index adds one per spline
if selection_type == "TWO_CONNECTED":
spline_number = i + 1
else:
spline_number = i
edge_order_number_for_splines[spline_number] = edge_idx_in_chain
# Get the first and last verts indices for later comparison
if i == 0:
first_v_idx = v_idx
elif i == len(self.main_splines.data.splines) - 1:
last_v_idx = v_idx
if self.selection_V_is_closed:
# If there is no last stroke on the last vertex (same as first vertex),
# add a hypothetic spline at last vert order
if first_v_idx != last_v_idx:
edge_order_number_for_splines[(len(self.main_splines.data.splines) - 1) + 1] = \
len(verts_ordered_V_indices) - 1
else:
if self.cyclic_cross:
edge_order_number_for_splines[len(self.main_splines.data.splines) - 1] = \
len(verts_ordered_V_indices) - 2
edge_order_number_for_splines[(len(self.main_splines.data.splines) - 1) + 1] = \
len(verts_ordered_V_indices) - 1
else:
edge_order_number_for_splines[len(self.main_splines.data.splines) - 1] = \
len(verts_ordered_V_indices) - 1
# Get the coords of the points distributed along the
# "crossing curves", with appropriate proportions-V
surface_splines_parsed = []
for i in range(len(splines_U_objects)):
sp_ob = splines_U_objects[i]
# If "Loops on strokes" option is active, calculate the proportions for each loop-U
if self.loops_on_strokes:
# Segments distances from stroke to stroke
dist = 0
full_dist = 0
segments_distances = []
for t in range(len(sp_ob.data.splines[0].bezier_points)):
bp = sp_ob.data.splines[0].bezier_points[t]
if t == 0:
last_p = bp.co
else:
actual_p = bp.co
dist += (last_p - actual_p).length
if t in points_U_crossed_by_strokes:
segments_distances.append(dist)
full_dist += dist
dist = 0
last_p = actual_p
# Calculate Proportions.
used_edges_proportions_V = []
for t in range(len(segments_distances)):
if self.selection_V_exists:
if t == 0:
order_number_last_stroke = 0
segment_edges_length_V = 0
segment_edges_length_V2 = 0
for order in range(order_number_last_stroke, edge_order_number_for_splines[t + 1]):
segment_edges_length_V += edges_lengths_V[order]
if self.selection_V2_exists:
segment_edges_length_V2 += edges_lengths_V2[order]
for order in range(order_number_last_stroke, edge_order_number_for_splines[t + 1]):
# Calculate each "sub-segment" (the ones between each stroke) length
if self.selection_V2_exists:
proportion_sub_seg = (edges_lengths_V2[order] -
((edges_lengths_V2[order] - edges_lengths_V[order]) /
len(splines_U_objects) * i)) / (segment_edges_length_V2 -
(segment_edges_length_V2 - segment_edges_length_V) /
len(splines_U_objects) * i)
sub_seg_dist = segments_distances[t] * proportion_sub_seg
else:
proportion_sub_seg = edges_lengths_V[order] / segment_edges_length_V
sub_seg_dist = segments_distances[t] * proportion_sub_seg
used_edges_proportions_V.append(sub_seg_dist / full_dist)
order_number_last_stroke = edge_order_number_for_splines[t + 1]
else:
for c in range(self.edges_V):
# Calculate each "sub-segment" (the ones between each stroke) length
sub_seg_dist = segments_distances[t] / self.edges_V
used_edges_proportions_V.append(sub_seg_dist / full_dist)
actual_spline = self.distribute_pts(sp_ob.data.splines, used_edges_proportions_V)
surface_splines_parsed.append(actual_spline[0])
else:
if self.selection_V2_exists:
used_edges_proportions_V = []
for p in range(len(edges_proportions_V)):
used_edges_proportions_V.append(
edges_proportions_V2[p] -
((edges_proportions_V2[p] -
edges_proportions_V[p]) / len(splines_U_objects) * i)
)
else:
used_edges_proportions_V = edges_proportions_V
actual_spline = self.distribute_pts(sp_ob.data.splines, used_edges_proportions_V)
surface_splines_parsed.append(actual_spline[0])
# Set the verts of the first and last splines to the locations
# of the respective verts in the selections
if self.selection_V_exists:
for i in range(0, len(surface_splines_parsed[0])):
surface_splines_parsed[len(surface_splines_parsed) - 1][i] = \
self.main_object.matrix_world * verts_ordered_V[i].co
if selection_type == "TWO_NOT_CONNECTED":
if self.selection_V2_exists:
for i in range(0, len(surface_splines_parsed[0])):
surface_splines_parsed[0][i] = self.main_object.matrix_world * verts_ordered_V2[i].co
# When "Automatic join" option is active (and the selection type is not "TWO_CONNECTED"),
# merge the verts of the tips of the loops when they are "near enough"
if self.automatic_join and selection_type != "TWO_CONNECTED":
# Join the tips of "Follow" loops that are near enough and must be "closed"
if not self.selection_V_exists and len(edges_proportions_U) >= 3:
for i in range(len(surface_splines_parsed[0])):
sp = surface_splines_parsed
loop_segment_dist = (sp[0][i] - sp[1][i]).length
full_loop_dist = loop_segment_dist * self.edges_U
verts_middle_position_co = [
(sp[0][i][0] + sp[len(sp) - 1][i][0]) / 2,
(sp[0][i][1] + sp[len(sp) - 1][i][1]) / 2,
(sp[0][i][2] + sp[len(sp) - 1][i][2]) / 2
]
points_original = []
points_original.append(sp[1][i])
points_original.append(sp[0][i])
points_target = []
points_target.append(sp[1][i])
points_target.append(Vector(verts_middle_position_co))
vec_A = points_original[0] - points_original[1]
vec_B = points_target[0] - points_target[1]
# check for zero angles, not sure if it is a great fix
if vec_A.length != 0 and vec_B.length != 0:
angle = vec_A.angle(vec_B) / pi
edge_new_length = (Vector(verts_middle_position_co) - sp[1][i]).length
else:
angle = 0
edge_new_length = 0
# If after moving the verts to the middle point, the segment doesn't stretch too much
if edge_new_length <= loop_segment_dist * 1.5 * \
self.join_stretch_factor and angle < 0.25 * self.join_stretch_factor:
# Avoid joining when the actual loop must be merged with the original mesh
if not (self.selection_U_exists and i == 0) and \
not (self.selection_U2_exists and i == len(surface_splines_parsed[0]) - 1):
# Change the coords of both verts to the middle position
surface_splines_parsed[0][i] = verts_middle_position_co
surface_splines_parsed[len(surface_splines_parsed) - 1][i] = verts_middle_position_co
# Delete object with control points and object from grease pencil convertion
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_ctrl_pts.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[ob_ctrl_pts.name]
bpy.ops.object.delete()
for sp_ob in splines_U_objects:
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[sp_ob.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[sp_ob.name]
bpy.ops.object.delete()
# Generate surface
# Get all verts coords
all_surface_verts_co = []
for i in range(0, len(surface_splines_parsed)):
# Get coords of all verts and make a list with them
for pt_co in surface_splines_parsed[i]:
all_surface_verts_co.append(pt_co)
# Define verts for each face
all_surface_faces = []
for i in range(0, len(all_surface_verts_co) - len(surface_splines_parsed[0])):
if ((i + 1) / len(surface_splines_parsed[0]) != int((i + 1) / len(surface_splines_parsed[0]))):
all_surface_faces.append(
[i + 1, i, i + len(surface_splines_parsed[0]),
i + len(surface_splines_parsed[0]) + 1]
)
# Build the mesh
surf_me_name = "SURFSKIO_surface"
me_surf = bpy.data.meshes.new(surf_me_name)
me_surf.from_pydata(all_surface_verts_co, [], all_surface_faces)
me_surf.update()
ob_surface = bpy.data.objects.new(surf_me_name, me_surf)
bpy.context.scene.objects.link(ob_surface)
# Select all the "unselected but participating" verts, from closed selection
# or double selections with middle-vertex, for later join with remove doubles
for v_idx in single_unselected_verts:
self.main_object.data.vertices[v_idx].select = True
# Join the new mesh to the main object
ob_surface.select = True
self.main_object.select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
bpy.ops.object.join('INVOKE_REGION_WIN')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.mesh.remove_doubles('INVOKE_REGION_WIN', threshold=0.0001)
bpy.ops.mesh.normals_make_consistent('INVOKE_REGION_WIN', inside=False)
bpy.ops.mesh.select_all('INVOKE_REGION_WIN', action='DESELECT')
return{'FINISHED'}
def execute(self, context):
bpy.context.user_preferences.edit.use_global_undo = False
if not self.is_fill_faces:
bpy.ops.wm.context_set_value(data_path='tool_settings.mesh_select_mode',
value='True, False, False')
# Build splines from the "last saved splines".
last_saved_curve = bpy.data.curves.new('SURFSKIO_last_crv', 'CURVE')
self.main_splines = bpy.data.objects.new('SURFSKIO_last_crv', last_saved_curve)
bpy.context.scene.objects.link(self.main_splines)
last_saved_curve.dimensions = "3D"
for sp in self.last_strokes_splines_coords:
spline = self.main_splines.data.splines.new('BEZIER')
# less one because one point is added when the spline is created
spline.bezier_points.add(len(sp) - 1)
for p in range(0, len(sp)):
spline.bezier_points[p].co = [sp[p][0], sp[p][1], sp[p][2]]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_splines.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_splines.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='SELECT')
# Important to make it vector first and then automatic, otherwise the
# tips handles get too big and distort the shrinkwrap results later
bpy.ops.curve.handle_type_set(type='VECTOR')
bpy.ops.curve.handle_type_set('INVOKE_REGION_WIN', type='AUTOMATIC')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
self.main_splines.name = "SURFSKIO_temp_strokes"
if self.is_crosshatch:
strokes_for_crosshatch = True
strokes_for_rectangular_surface = False
else:
strokes_for_rectangular_surface = True
strokes_for_crosshatch = False
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_object.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
if strokes_for_rectangular_surface:
self.rectangular_surface()
elif strokes_for_crosshatch:
self.crosshatch_surface_execute()
# Delete main splines
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_splines.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_splines.name]
bpy.ops.object.delete()
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_object.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.context.user_preferences.edit.use_global_undo = self.initial_global_undo_state
return{'FINISHED'}
def invoke(self, context, event):
self.initial_global_undo_state = bpy.context.user_preferences.edit.use_global_undo
self.main_object = bpy.context.scene.objects.active
self.main_object_selected_verts_count = int(self.main_object.data.total_vert_sel)
bpy.context.user_preferences.edit.use_global_undo = False
bpy.ops.wm.context_set_value(data_path='tool_settings.mesh_select_mode',
value='True, False, False')
# Out Edit mode and In again to make sure the actual mesh selections are being taken
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bsurfaces_props = bpy.context.scene.bsurfaces
self.cyclic_cross = bsurfaces_props.SURFSK_cyclic_cross
self.cyclic_follow = bsurfaces_props.SURFSK_cyclic_follow
self.automatic_join = bsurfaces_props.SURFSK_automatic_join
self.loops_on_strokes = bsurfaces_props.SURFSK_loops_on_strokes
self.keep_strokes = bsurfaces_props.SURFSK_keep_strokes
self.edges_U = 5
if self.loops_on_strokes:
self.edges_V = 1
else:
self.edges_V = 5
self.is_fill_faces = False
self.stopping_errors = False
self.last_strokes_splines_coords = []
# Determine the type of the strokes
self.strokes_type = get_strokes_type(self.main_object)
# Check if it will be used grease pencil strokes or curves
# If there are strokes to be used
if self.strokes_type == "GP_STROKES" or self.strokes_type == "EXTERNAL_CURVE":
if self.strokes_type == "GP_STROKES":
# Convert grease pencil strokes to curve
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.gpencil.convert('INVOKE_REGION_WIN', type='CURVE', use_link_strokes=False)
# XXX gpencil.convert now keep org object as active/selected, *not* newly created curve!
# XXX This is far from perfect, but should work in most cases...
# self.original_curve = bpy.context.object
for ob in bpy.context.selected_objects:
if ob != bpy.context.scene.objects.active and ob.name.startswith("GP_Layer"):
self.original_curve = ob
self.using_external_curves = False
elif self.strokes_type == "EXTERNAL_CURVE":
for ob in bpy.context.selected_objects:
if ob != bpy.context.scene.objects.active:
self.original_curve = ob
self.using_external_curves = True
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Make sure there are no objects left from erroneous
# executions of this operator, with the reserved names used here
for o in bpy.data.objects:
if o.name.find("SURFSKIO_") != -1:
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[o.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[o.name]
bpy.ops.object.delete()
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.original_curve.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.original_curve.name]
bpy.ops.object.duplicate('INVOKE_REGION_WIN')
self.temporary_curve = bpy.context.scene.objects.active
# Deselect all points of the curve
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Delete splines with only a single isolated point
for i in range(len(self.temporary_curve.data.splines)):
sp = self.temporary_curve.data.splines[i]
if len(sp.bezier_points) == 1:
sp.bezier_points[0].select_control_point = True
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.delete(type='VERT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.temporary_curve.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.temporary_curve.name]
# Set a minimum number of points for crosshatch
minimum_points_num = 15
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Check if the number of points of each curve has at least the number of points
# of minimum_points_num, which is a bit more than the face-loops limit.
# If not, subdivide to reach at least that number of ponts
for i in range(len(self.temporary_curve.data.splines)):
sp = self.temporary_curve.data.splines[i]
if len(sp.bezier_points) < minimum_points_num:
for bp in sp.bezier_points:
bp.select_control_point = True
if (len(sp.bezier_points) - 1) != 0:
# Formula to get the number of cuts that will make a curve
# of N number of points have near to "minimum_points_num"
# points, when subdividing with this number of cuts
subdivide_cuts = int(
(minimum_points_num - len(sp.bezier_points)) /
(len(sp.bezier_points) - 1)
) + 1
else:
subdivide_cuts = 0
bpy.ops.curve.subdivide('INVOKE_REGION_WIN', number_cuts=subdivide_cuts)
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Detect if the strokes are a crosshatch and do it if it is
self.crosshatch_surface_invoke(self.temporary_curve)
if not self.is_crosshatch:
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.temporary_curve.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.temporary_curve.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Set a minimum number of points for rectangular surfaces
minimum_points_num = 60
# Check if the number of points of each curve has at least the number of points
# of minimum_points_num, which is a bit more than the face-loops limit.
# If not, subdivide to reach at least that number of ponts
for i in range(len(self.temporary_curve.data.splines)):
sp = self.temporary_curve.data.splines[i]
if len(sp.bezier_points) < minimum_points_num:
for bp in sp.bezier_points:
bp.select_control_point = True
if (len(sp.bezier_points) - 1) != 0:
# Formula to get the number of cuts that will make a curve of
# N number of points have near to "minimum_points_num" points,
# when subdividing with this number of cuts
subdivide_cuts = int(
(minimum_points_num - len(sp.bezier_points)) /
(len(sp.bezier_points) - 1)
) + 1
else:
subdivide_cuts = 0
bpy.ops.curve.subdivide('INVOKE_REGION_WIN', number_cuts=subdivide_cuts)
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Save coordinates of the actual strokes (as the "last saved splines")
for sp_idx in range(len(self.temporary_curve.data.splines)):
self.last_strokes_splines_coords.append([])
for bp_idx in range(len(self.temporary_curve.data.splines[sp_idx].bezier_points)):
coords = self.temporary_curve.matrix_world * \
self.temporary_curve.data.splines[sp_idx].bezier_points[bp_idx].co
self.last_strokes_splines_coords[sp_idx].append([coords[0], coords[1], coords[2]])
# Check for cyclic splines, put the first and last points in the middle of their actual positions
for sp_idx in range(len(self.temporary_curve.data.splines)):
if self.temporary_curve.data.splines[sp_idx].use_cyclic_u is True:
first_p_co = self.last_strokes_splines_coords[sp_idx][0]
last_p_co = self.last_strokes_splines_coords[sp_idx][
len(self.last_strokes_splines_coords[sp_idx]) - 1
]
target_co = [
(first_p_co[0] + last_p_co[0]) / 2,
(first_p_co[1] + last_p_co[1]) / 2,
(first_p_co[2] + last_p_co[2]) / 2
]
self.last_strokes_splines_coords[sp_idx][0] = target_co
self.last_strokes_splines_coords[sp_idx][
len(self.last_strokes_splines_coords[sp_idx]) - 1
] = target_co
tuple(self.last_strokes_splines_coords)
# Estimation of the average length of the segments between
# each point of the grease pencil strokes.
# Will be useful to determine whether a curve should be made "Cyclic"
segments_lengths_sum = 0
segments_count = 0
random_spline = self.temporary_curve.data.splines[0].bezier_points
for i in range(0, len(random_spline)):
if i != 0 and len(random_spline) - 1 >= i:
segments_lengths_sum += (random_spline[i - 1].co - random_spline[i].co).length
segments_count += 1
self.average_gp_segment_length = segments_lengths_sum / segments_count
# Delete temporary strokes curve object
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.temporary_curve.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.temporary_curve.name]
bpy.ops.object.delete()
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_object.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
self.execute(context)
# Set again since "execute()" will turn it again to its initial value
bpy.context.user_preferences.edit.use_global_undo = False
# If "Keep strokes" option is not active, delete original strokes curve object
if (not self.stopping_errors and not self.keep_strokes) or self.is_crosshatch:
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.original_curve.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.original_curve.name]
bpy.ops.object.delete()
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_object.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Delete grease pencil strokes
if self.strokes_type == "GP_STROKES" and not self.stopping_errors:
bpy.ops.gpencil.active_frame_delete('INVOKE_REGION_WIN')
bpy.context.user_preferences.edit.use_global_undo = self.initial_global_undo_state
if not self.stopping_errors:
return {"FINISHED"}
else:
return{"CANCELLED"}
elif self.strokes_type == "SELECTION_ALONE":
self.is_fill_faces = True
created_faces_count = self.fill_with_faces(self.main_object)
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.context.user_preferences.edit.use_global_undo = self.initial_global_undo_state
if created_faces_count == 0:
self.report({'WARNING'}, "There aren't any strokes attatched to the object")
return {"CANCELLED"}
else:
return {"FINISHED"}
bpy.context.user_preferences.edit.use_global_undo = self.initial_global_undo_state
if self.strokes_type == "EXTERNAL_NO_CURVE":
self.report({'WARNING'}, "The secondary object is not a Curve.")
return{"CANCELLED"}
elif self.strokes_type == "MORE_THAN_ONE_EXTERNAL":
self.report({'WARNING'}, "There shouldn't be more than one secondary object selected.")
return{"CANCELLED"}
elif self.strokes_type == "SINGLE_GP_STROKE_NO_SELECTION" or \
self.strokes_type == "SINGLE_CURVE_STROKE_NO_SELECTION":
self.report({'WARNING'}, "It's needed at least one stroke and one selection, or two strokes.")
return{"CANCELLED"}
elif self.strokes_type == "NO_STROKES":
self.report({'WARNING'}, "There aren't any strokes attatched to the object")
return{"CANCELLED"}
elif self.strokes_type == "CURVE_WITH_NON_BEZIER_SPLINES":
self.report({'WARNING'}, "All splines must be Bezier.")
return{"CANCELLED"}
else:
return{"CANCELLED"}
# Edit strokes operator
class GPENCIL_OT_SURFSK_edit_strokes(Operator):
bl_idname = "gpencil.surfsk_edit_strokes"
bl_label = "Bsurfaces edit strokes"
bl_description = "Edit the grease pencil strokes or curves used"
def execute(self, context):
# Determine the type of the strokes
self.strokes_type = get_strokes_type(self.main_object)
# Check if strokes are grease pencil strokes or a curves object
selected_objs = bpy.context.selected_objects
if self.strokes_type == "EXTERNAL_CURVE" or self.strokes_type == "SINGLE_CURVE_STROKE_NO_SELECTION":
for ob in selected_objs:
if ob != bpy.context.scene.objects.active:
curve_ob = ob
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[curve_ob.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[curve_ob.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
elif self.strokes_type == "GP_STROKES" or self.strokes_type == "SINGLE_GP_STROKE_NO_SELECTION":
# Convert grease pencil strokes to curve
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.gpencil.convert('INVOKE_REGION_WIN', type='CURVE', use_link_strokes=False)
for ob in bpy.context.selected_objects:
if ob != bpy.context.scene.objects.active and ob.name.startswith("GP_Layer"):
ob_gp_strokes = ob
# ob_gp_strokes = bpy.context.object
# Delete grease pencil strokes
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_object.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_object.name]
bpy.ops.gpencil.active_frame_delete('INVOKE_REGION_WIN')
# Clean up curves
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[ob_gp_strokes.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[ob_gp_strokes.name]
curve_crv = ob_gp_strokes.data
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.spline_type_set('INVOKE_REGION_WIN', type="BEZIER")
bpy.ops.curve.handle_type_set('INVOKE_REGION_WIN', type="AUTOMATIC")
bpy.data.curves[curve_crv.name].show_handles = False
bpy.data.curves[curve_crv.name].show_normal_face = False
elif self.strokes_type == "EXTERNAL_NO_CURVE":
self.report({'WARNING'}, "The secondary object is not a Curve.")
return{"CANCELLED"}
elif self.strokes_type == "MORE_THAN_ONE_EXTERNAL":
self.report({'WARNING'}, "There shouldn't be more than one secondary object selected.")
return{"CANCELLED"}
elif self.strokes_type == "NO_STROKES" or self.strokes_type == "SELECTION_ALONE":
self.report({'WARNING'}, "There aren't any strokes attatched to the object")
return{"CANCELLED"}
else:
return{"CANCELLED"}
def invoke(self, context, event):
self.main_object = bpy.context.object
self.execute(context)
return {"FINISHED"}
class CURVE_OT_SURFSK_reorder_splines(Operator):
bl_idname = "curve.surfsk_reorder_splines"
bl_label = "Bsurfaces reorder splines"
bl_description = "Defines the order of the splines by using grease pencil strokes"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
objects_to_delete = []
# Convert grease pencil strokes to curve.
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.gpencil.convert('INVOKE_REGION_WIN', type='CURVE', use_link_strokes=False)
for ob in bpy.context.selected_objects:
if ob != bpy.context.scene.objects.active and ob.name.startswith("GP_Layer"):
GP_strokes_curve = ob
# GP_strokes_curve = bpy.context.object
objects_to_delete.append(GP_strokes_curve)
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[GP_strokes_curve.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[GP_strokes_curve.name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='SELECT')
bpy.ops.curve.subdivide('INVOKE_REGION_WIN', number_cuts=100)
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.duplicate('INVOKE_REGION_WIN')
GP_strokes_mesh = bpy.context.object
objects_to_delete.append(GP_strokes_mesh)
GP_strokes_mesh.data.resolution_u = 1
bpy.ops.object.convert(target='MESH', keep_original=False)
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_curve.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_curve.name]
bpy.ops.object.duplicate('INVOKE_REGION_WIN')
curves_duplicate_1 = bpy.context.object
objects_to_delete.append(curves_duplicate_1)
minimum_points_num = 500
# Some iterations since the subdivision operator
# has a limit of 100 subdivisions per iteration
for x in range(round(minimum_points_num / 100)):
# Check if the number of points of each curve has at least the number of points
# of minimum_points_num. If not, subdivide to reach at least that number of ponts
for i in range(len(curves_duplicate_1.data.splines)):
sp = curves_duplicate_1.data.splines[i]
if len(sp.bezier_points) < minimum_points_num:
for bp in sp.bezier_points:
bp.select_control_point = True
if (len(sp.bezier_points) - 1) != 0:
# Formula to get the number of cuts that will make a curve of N
# number of points have near to "minimum_points_num" points,
# when subdividing with this number of cuts
subdivide_cuts = int(
(minimum_points_num - len(sp.bezier_points)) /
(len(sp.bezier_points) - 1)
) + 1
else:
subdivide_cuts = 0
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.subdivide('INVOKE_REGION_WIN', number_cuts=subdivide_cuts)
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.object.duplicate('INVOKE_REGION_WIN')
curves_duplicate_2 = bpy.context.object
objects_to_delete.append(curves_duplicate_2)
# Duplicate the duplicate and add Shrinkwrap to it, with the grease pencil strokes curve as target
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[curves_duplicate_2.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[curves_duplicate_2.name]
bpy.ops.object.modifier_add('INVOKE_REGION_WIN', type='SHRINKWRAP')
curves_duplicate_2.modifiers["Shrinkwrap"].wrap_method = "NEAREST_VERTEX"
curves_duplicate_2.modifiers["Shrinkwrap"].target = GP_strokes_mesh
bpy.ops.object.modifier_apply('INVOKE_REGION_WIN', apply_as='DATA', modifier='Shrinkwrap')
# Get the distance of each vert from its original position to its position with Shrinkwrap
nearest_points_coords = {}
for st_idx in range(len(curves_duplicate_1.data.splines)):
for bp_idx in range(len(curves_duplicate_1.data.splines[st_idx].bezier_points)):
bp_1_co = curves_duplicate_1.matrix_world * \
curves_duplicate_1.data.splines[st_idx].bezier_points[bp_idx].co
bp_2_co = curves_duplicate_2.matrix_world * \
curves_duplicate_2.data.splines[st_idx].bezier_points[bp_idx].co
if bp_idx == 0:
shortest_dist = (bp_1_co - bp_2_co).length
nearest_points_coords[st_idx] = ("%.4f" % bp_2_co[0],
"%.4f" % bp_2_co[1],
"%.4f" % bp_2_co[2])
dist = (bp_1_co - bp_2_co).length
if dist < shortest_dist:
nearest_points_coords[st_idx] = ("%.4f" % bp_2_co[0],
"%.4f" % bp_2_co[1],
"%.4f" % bp_2_co[2])
shortest_dist = dist
# Get all coords of GP strokes points, for comparison
GP_strokes_coords = []
for st_idx in range(len(GP_strokes_curve.data.splines)):
GP_strokes_coords.append(
[("%.4f" % x if "%.4f" % x != "-0.00" else "0.00",
"%.4f" % y if "%.4f" % y != "-0.00" else "0.00",
"%.4f" % z if "%.4f" % z != "-0.00" else "0.00") for
x, y, z in [bp.co for bp in GP_strokes_curve.data.splines[st_idx].bezier_points]]
)
# Check the point of the GP strokes with the same coords as
# the nearest points of the curves (with shrinkwrap)
# Dictionary with GP stroke index as index, and a list as value.
# The list has as index the point index of the GP stroke
# nearest to the spline, and as value the spline index
GP_connection_points = {}
for gp_st_idx in range(len(GP_strokes_coords)):
GPvert_spline_relationship = {}
for splines_st_idx in range(len(nearest_points_coords)):
if nearest_points_coords[splines_st_idx] in GP_strokes_coords[gp_st_idx]:
GPvert_spline_relationship[
GP_strokes_coords[gp_st_idx].index(nearest_points_coords[splines_st_idx])
] = splines_st_idx
GP_connection_points[gp_st_idx] = GPvert_spline_relationship
# Get the splines new order
splines_new_order = []
for i in GP_connection_points:
dict_keys = sorted(GP_connection_points[i].keys()) # Sort dictionaries by key
for k in dict_keys:
splines_new_order.append(GP_connection_points[i][k])
# Reorder
curve_original_name = self.main_curve.name
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[self.main_curve.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[self.main_curve.name]
self.main_curve.name = "SURFSKIO_CRV_ORD"
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
for sp_idx in range(len(self.main_curve.data.splines)):
self.main_curve.data.splines[0].bezier_points[0].select_control_point = True
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.separate('EXEC_REGION_WIN')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
# Get the names of the separated splines objects in the original order
splines_unordered = {}
for o in bpy.data.objects:
if o.name.find("SURFSKIO_CRV_ORD") != -1:
spline_order_string = o.name.partition(".")[2]
if spline_order_string != "" and int(spline_order_string) > 0:
spline_order_index = int(spline_order_string) - 1
splines_unordered[spline_order_index] = o.name
# Join all splines objects in final order
for order_idx in splines_new_order:
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[splines_unordered[order_idx]].select = True
bpy.data.objects["SURFSKIO_CRV_ORD"].select = True
bpy.context.scene.objects.active = bpy.data.objects["SURFSKIO_CRV_ORD"]
bpy.ops.object.join('INVOKE_REGION_WIN')
# Go back to the original name of the curves object.
bpy.context.object.name = curve_original_name
# Delete all unused objects
for o in objects_to_delete:
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[o.name].select = True
bpy.context.scene.objects.active = bpy.data.objects[o.name]
bpy.ops.object.delete()
bpy.ops.object.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.data.objects[curve_original_name].select = True
bpy.context.scene.objects.active = bpy.data.objects[curve_original_name]
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.gpencil.active_frame_delete('INVOKE_REGION_WIN')
return {"FINISHED"}
def invoke(self, context, event):
self.main_curve = bpy.context.object
there_are_GP_strokes = False
try:
# Get the active grease pencil layer
strokes_num = len(self.main_curve.grease_pencil.layers.active.active_frame.strokes)
if strokes_num > 0:
there_are_GP_strokes = True
except:
pass
if there_are_GP_strokes:
self.execute(context)
self.report({'INFO'}, "Splines have been reordered")
else:
self.report({'WARNING'}, "Draw grease pencil strokes to connect splines")
return {"FINISHED"}
class CURVE_OT_SURFSK_first_points(Operator):
bl_idname = "curve.surfsk_first_points"
bl_label = "Bsurfaces set first points"
bl_description = "Set the selected points as the first point of each spline"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
splines_to_invert = []
# Check non-cyclic splines to invert
for i in range(len(self.main_curve.data.splines)):
b_points = self.main_curve.data.splines[i].bezier_points
if i not in self.cyclic_splines: # Only for non-cyclic splines
if b_points[len(b_points) - 1].select_control_point:
splines_to_invert.append(i)
# Reorder points of cyclic splines, and set all handles to "Automatic"
# Check first selected point
cyclic_splines_new_first_pt = {}
for i in self.cyclic_splines:
sp = self.main_curve.data.splines[i]
for t in range(len(sp.bezier_points)):
bp = sp.bezier_points[t]
if bp.select_control_point or bp.select_right_handle or bp.select_left_handle:
cyclic_splines_new_first_pt[i] = t
break # To take only one if there are more
# Reorder
for spline_idx in cyclic_splines_new_first_pt:
sp = self.main_curve.data.splines[spline_idx]
spline_old_coords = []
for bp_old in sp.bezier_points:
coords = (bp_old.co[0], bp_old.co[1], bp_old.co[2])
left_handle_type = str(bp_old.handle_left_type)
left_handle_length = float(bp_old.handle_left.length)
left_handle_xyz = (
float(bp_old.handle_left.x),
float(bp_old.handle_left.y),
float(bp_old.handle_left.z)
)
right_handle_type = str(bp_old.handle_right_type)
right_handle_length = float(bp_old.handle_right.length)
right_handle_xyz = (
float(bp_old.handle_right.x),
float(bp_old.handle_right.y),
float(bp_old.handle_right.z)
)
spline_old_coords.append(
[coords, left_handle_type,
right_handle_type, left_handle_length,
right_handle_length, left_handle_xyz,
right_handle_xyz]
)
for t in range(len(sp.bezier_points)):
bp = sp.bezier_points
if t + cyclic_splines_new_first_pt[spline_idx] + 1 <= len(bp) - 1:
new_index = t + cyclic_splines_new_first_pt[spline_idx] + 1
else:
new_index = t + cyclic_splines_new_first_pt[spline_idx] + 1 - len(bp)
bp[t].co = Vector(spline_old_coords[new_index][0])
bp[t].handle_left.length = spline_old_coords[new_index][3]
bp[t].handle_right.length = spline_old_coords[new_index][4]
bp[t].handle_left_type = "FREE"
bp[t].handle_right_type = "FREE"
bp[t].handle_left.x = spline_old_coords[new_index][5][0]
bp[t].handle_left.y = spline_old_coords[new_index][5][1]
bp[t].handle_left.z = spline_old_coords[new_index][5][2]
bp[t].handle_right.x = spline_old_coords[new_index][6][0]
bp[t].handle_right.y = spline_old_coords[new_index][6][1]
bp[t].handle_right.z = spline_old_coords[new_index][6][2]
bp[t].handle_left_type = spline_old_coords[new_index][1]
bp[t].handle_right_type = spline_old_coords[new_index][2]
# Invert the non-cyclic splines designated above
for i in range(len(splines_to_invert)):
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
self.main_curve.data.splines[splines_to_invert[i]].bezier_points[0].select_control_point = True
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
bpy.ops.curve.switch_direction()
bpy.ops.curve.select_all('INVOKE_REGION_WIN', action='DESELECT')
# Keep selected the first vert of each spline
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
for i in range(len(self.main_curve.data.splines)):
if not self.main_curve.data.splines[i].use_cyclic_u:
bp = self.main_curve.data.splines[i].bezier_points[0]
else:
bp = self.main_curve.data.splines[i].bezier_points[
len(self.main_curve.data.splines[i].bezier_points) - 1
]
bp.select_control_point = True
bp.select_right_handle = True
bp.select_left_handle = True
bpy.ops.object.editmode_toggle('INVOKE_REGION_WIN')
return {'FINISHED'}
def invoke(self, context, event):
self.main_curve = bpy.context.object
# Check if all curves are Bezier, and detect which ones are cyclic
self.cyclic_splines = []
for i in range(len(self.main_curve.data.splines)):
if self.main_curve.data.splines[i].type != "BEZIER":
self.report({'WARNING'}, "All splines must be Bezier type")
return {'CANCELLED'}
else:
if self.main_curve.data.splines[i].use_cyclic_u:
self.cyclic_splines.append(i)
self.execute(context)
self.report({'INFO'}, "First points have been set")
return {'FINISHED'}
# Add-ons Preferences Update Panel
# Define Panel classes for updating
panels = (
VIEW3D_PT_tools_SURFSK_mesh,
VIEW3D_PT_tools_SURFSK_curve,
)
def update_panel(self, context):
message = "Bsurfaces GPL Edition: Updating Panel locations has failed"
try:
for panel in panels:
if "bl_rna" in panel.__dict__:
bpy.utils.unregister_class(panel)
for panel in panels:
panel.bl_category = context.user_preferences.addons[__name__].preferences.category
bpy.utils.register_class(panel)
except Exception as e:
print("\n[{}]\n{}\n\nError:\n{}".format(__name__, message, e))
pass
class BsurfPreferences(AddonPreferences):
# this must match the addon name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __name__
category = StringProperty(
name="Tab Category",
description="Choose a name for the category of the panel",
default="Tools",
update=update_panel
)
def draw(self, context):
layout = self.layout
row = layout.row()
col = row.column()
col.label(text="Tab Category:")
col.prop(self, "category", text="")
# Properties
class BsurfacesProps(PropertyGroup):
SURFSK_cyclic_cross = BoolProperty(
name="Cyclic Cross",
description="Make cyclic the face-loops crossing the strokes",
default=False
)
SURFSK_cyclic_follow = BoolProperty(
name="Cyclic Follow",
description="Make cyclic the face-loops following the strokes",
default=False
)
SURFSK_keep_strokes = BoolProperty(
name="Keep strokes",
description="Keeps the sketched strokes or curves after adding the surface",
default=False
)
SURFSK_automatic_join = BoolProperty(
name="Automatic join",
description="Join automatically vertices of either surfaces "
"generated by crosshatching, or from the borders of closed shapes",
default=True
)
SURFSK_loops_on_strokes = BoolProperty(
name="Loops on strokes",
description="Make the loops match the paths of the strokes",
default=True
)
SURFSK_precision = IntProperty(
name="Precision",
description="Precision level of the surface calculation",
default=2,
min=1,
max=100
)
classes = (
VIEW3D_PT_tools_SURFSK_mesh,
VIEW3D_PT_tools_SURFSK_curve,
GPENCIL_OT_SURFSK_add_surface,
GPENCIL_OT_SURFSK_edit_strokes,
CURVE_OT_SURFSK_reorder_splines,
CURVE_OT_SURFSK_first_points,
BsurfPreferences,
BsurfacesProps,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.bsurfaces = PointerProperty(type=BsurfacesProps)
update_panel(None, bpy.context)
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
del bpy.types.Scene.bsurfaces
if __name__ == "__main__":
register()
|
PypiClean
|
/engines/veusz_engine.py
|
from .bot_engine import BotEngine
from .plotter_utils import get_line_color, get_line_type, get_marker_type
from dataclasses import dataclass, field
from typing import Any, List, Union
import numpy as np
import os, sys, re
try:
import veusz.embed as veusz
from veusz.embed import Embedded
except Exception as e:
Embedded = object
print(f"Veusz is not available: {e}")
try: from storer import Storer
except: print("Storer is not available. Aborting..."); sys.exit(1)
class VeuszEngineError(Exception):
pass
@dataclass
class VeuszEngine(BotEngine):
internal_name : str = "[VeuszEngine]"
#
g : Embedded = None
title : str = field(default="Notitle")
pages_info : dict = field(default_factory=dict)
_xy : Any = None # flag for animation
#
showkey : bool = True
keyBorderHide : bool = True
keyFontSize : int = 14
plotLine : bool = True
#
xname : str = "x"
yname : str = "y"
xlog : bool = False
ylog : bool = False
ymin : str = "Auto"
ymax : str = "Auto"
xmin : str = "Auto"
xmax : str = "Auto"
#
transparency : float = 50.0
def __post_init__(self):
self.storer = Storer(exit_dump=False)
self.g = veusz.Embedded(name=self.title, hidden=self.hidden)
self.g.EnableToolbar()
self.init_pages()
def _init(self, page_name=""):
# creating initial values for plotting per page.
self.storer.put(what="xname", name=page_name+"/xname")
self.storer.put(what="yname", name=page_name+"/yname")
self.storer.put(what=False, name=page_name+"/xlog")
self.storer.put(what=False, name=page_name+"/ylog")
self.storer.put(what="Auto", name=page_name+"/xmin")
self.storer.put(what="Auto", name=page_name+"/xmax")
self.storer.put(what="Auto", name=page_name+"/ymin")
self.storer.put(what="Auto", name=page_name+"/ymax")
def init_pages(self):
if self.pages_info:
for page in self.pages_info:
self._init(page_name=page)
for prop in self.pages_info[page]:
self.storer.put(what=self.pages_info[page][prop] , name=page+"/"+prop)
else:
self._init(page_name="page1")
self.storer.put(what=self.xname , name="page1/xname")
self.storer.put(what=self.yname , name="page1/yname")
self.storer.put(what=self.xlog , name="page1/xlog")
self.storer.put(what=self.ylog , name="page1/ylog")
self.storer.put(what=self.xmin , name="page1/xmin")
self.storer.put(what=self.xmax , name="page1/xmax")
self.storer.put(what=self.ymax , name="page1/ymax")
self.storer.put(what=self.ymin , name="page1/ymin")
def get_page(self, name="page1"):
try:
self.page = self.g.Root[name]
_num_lines = self.storer.get(name=name+ "/_num_lines")
__num_lines = self.storer.get(name=name+"/__num_lines") # if save_previous_state is applied
except KeyError:
self.page = self.g.Root.Add("page")
self.page.Rename(name)
__num_lines = 1; _num_lines = 1
self.storer.put(what=_num_lines, name=name+ "/_num_lines")
self.storer.put(what=__num_lines, name=name+ "/__num_lines")
self.page.width.val = '15cm'
self.page.height.val = '10cm'
try: self.graph = self.g.Root[name + '/graph1']
except: self.graph = self.page.Add('graph')
try:
# key exist
self.key = self.g.Root[name + "/graph1/key1"]
except:
if self.showkey:
self.graph.Add('key')
self.graph.key1.Border.hide.val = self.keyBorderHide
self.graph.key1.Text.size.val = f"{str(self.keyFontSize)}pt"
return _num_lines, __num_lines
def plot(
self,
x : List,
y : List,
key_name_f : str = "",
key_name : str = "",
markersize : str = "2.5pt",
plotLine : bool = True,
color_num : Union[str, int] = "auto",
marker_type : Union[str, int] = "auto",
line_type : Union[str, int] = "auto",
save_previous_state: bool = False,
animation : bool = False,
errorStyle : str = None,
internal_text : str = "",
page : str = "page1",
):
_num_lines, __num_lines = self.get_page(name=page)
if animation:
color_num = _num_lines
line_type = _num_lines
save_previous_state = True
xy = self._xy
if save_previous_state: _num_lines -= 1
if color_num == "auto": color_num = _num_lines
if line_type == "auto": line_type = _num_lines
if not animation:
x_dataname = self.xname + str(_num_lines) + str(save_previous_state) + str(__num_lines) + str(page)
y_dataname = self.yname + str(_num_lines) + str(save_previous_state) + str(__num_lines) + str(page)
else:
x_dataname = self.xname + str(_num_lines) + str(save_previous_state) + str(page)
y_dataname = self.yname + str(_num_lines) + str(save_previous_state) + str(page)
x_dataname += internal_text
y_dataname += internal_text
if len(np.shape(x)) == 2:
x_arr = np.array(x)
x_data, x_data_err = x_arr[:,0], x[:,1]
self.g.SetData(x_dataname, x_data, symerr=x_data_err)
else:
x_arr = np.array(x)
x_data = x_arr
self.g.SetData(x_dataname, x_data)
if len(np.shape(y)) == 2:
y_arr = np.array(y)
y_data, y_data_err = y_arr[:,0], y_arr[:,1]
self.g.SetData(y_dataname, y_data, symerr=y_data_err)
else:
y_arr = np.array(y)
y_data = y_arr
self.g.SetData(y_dataname, y_data)
# self.graph = self.g.Root[name + '/graph1']
if animation:
if not self._xy: self._xy = xy = self.g.Root[page + '/graph1'].Add('xy')
else: xy = self.g.Root[page + '/graph1'].Add('xy')
# nn.plotter_progress.g.Root.xyz_file.graph1.xy1.Clone(nn.plotter_progress.g.Root.xyz_file.graph1, 'xy7')
xy.xData.val = x_dataname
xy.yData.val = y_dataname
if marker_type != "auto": xy.marker.val = get_marker_type(marker_type)
else: xy.marker.val = get_marker_type(line_type)
if color_num % 2: xy.MarkerFill.color.val = get_line_color(color_num)
else: xy.MarkerFill.color.val = 'white'
xy.MarkerLine.color.val = get_line_color(color_num)
xy.markerSize.val = markersize
xy.PlotLine.width.val = '1pt'
xy.PlotLine.style.val = get_line_type(line_type)
xy.PlotLine.color.val = get_line_color(color_num)
xy.PlotLine.hide.val = not plotLine
if errorStyle:
xy.errorStyle.val = errorStyle
xy.FillBelow.color.val = get_line_color(color_num)
xy.FillBelow.transparency.val = self.transparency
xy.FillAbove.color.val = get_line_color(color_num)
xy.FillAbove.transparency.val = self.transparency
#ErrorBarLine/style
xy.ErrorBarLine.color.val = get_line_type(line_type)
xy.ErrorBarLine.style.val = get_line_type(line_type)
else:
xy.errorStyle.val = 'none'
xy.ErrorBarLine.width.val = '1pt'
xy.ErrorBarLine.color.val = get_line_color(color_num)
if self.showkey and key_name_f: xy.key.val = self.name_converter(key_name_f)
if self.showkey and key_name: xy.key.val = key_name
x_axis = self.graph.x
y_axis = self.graph.y
x_axis.label.val = self.storer.get(page+"/xname") # self.xname
y_axis.label.val = self.storer.get(page+"/yname") # self.yname
x_axis.log.val = self.storer.get(page+"/xlog") # self.xlog
y_axis.log.val = self.storer.get(page+"/ylog") # self.ylog
x_axis.min.val = self.storer.get(page+"/xmin") # self.xmin
x_axis.max.val = self.storer.get(page+"/xmax") # self.xmax
y_axis.min.val = self.storer.get(page+"/ymin") # self.ymin
y_axis.max.val = self.storer.get(page+"/ymax") # self.ymax
_num_lines += 1
__num_lines += 1
self.storer.put(_num_lines, name=page+ "/_num_lines")
self.storer.put(__num_lines, name=page+ "/__num_lines")
def export(self, filename:str = "output.pdf", extension:str = "pdf", color:bool = True, page:int = 0, dpi:int = 100, antialias:bool = True, quality:int = 85, backcolor:str = '#ffffff00', pdfdpi:int = 150, svgtextastext:bool = False):
if not filename or not extension:
print(f"{self.internal_name} You have to specify filename and extension!")
print(f"{self.internal_name} For example: filename='my_amazing_figure', extension='pdf'")
print(f"{self.internal_name} color=True, extension='pdf', quality='85', pdfdpi='150'")
print(f"{self.internal_name} Available extensions: [pdf]/[eps]/[ps]/[svg]/[jpg]/[jpeg]/[bmp]/[png]")
else: self.g.Export(filename, color=color, page=page, dpi=dpi, antialias=antialias, quality=quality, backcolor=backcolor, pdfdpi=pdfdpi, svgtextastext=svgtextastext)
def save(self, filename=None):
if not filename:
print(f"{self.internal_name} You have to specify filename! [Labels from Y and X will be added automatically]")
else:
if filename.find(".") != -1 or filename.find(":") or filename.find("\\") or filename.find("*") or filename.find("/") or filename.find("\\\\"):
print(f"{self.internal_name} I found forbidden symbols [.]/[:]...")
filename.replace(".", "").replace(":", "_").replace("\\\\","").replace("*", "").replace("/", "_").replace("\\", "")
# latex reduction
xname = self.xname.replace("\\italic", "").replace("{", "").replace("}","").replace("_", "").replace("^", "").replace("\\\\", "").replace("\\", "").replace("/", "_").replace("*", "")
yname = self.yname.replace("\\italic", "").replace("{", "").replace("}","").replace("_", "").replace("^", "").replace("\\\\", "").replace("\\", "").replace("/", "_").replace("*", "")
# space reduction
xname = xname.replace(" ", "")
yname = yname.replace(" ", "")
name4saving = filename+"_"+yname+"_"+xname
if not os.path.exists(name4saving+".vsz"): self.g.Save(name4saving+".vsz")
else:
print(f"{self.internal_name} The file exists!")
i = 0
while os.path.exists(name4saving+str(i)+".vsz"): i+=1
name4saving += str(i) + ".vsz"
self.g.Save(name4saving)
print(f"{self.internal_name} Saved! filename: {name4saving}")
|
PypiClean
|
/tencentcloud-sdk-python-3.0.973.tar.gz/tencentcloud-sdk-python-3.0.973/tencentcloud/irp/v20220324/models.py
|
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AuthorInfo(AbstractModel):
"""作者信息
"""
def __init__(self):
r"""
:param _Id: 作者id
注意:此字段可能返回 null,表示取不到有效值。
:type Id: str
:param _Name: 作者名称
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param _SourceId: 作者来源
注意:此字段可能返回 null,表示取不到有效值。
:type SourceId: int
:param _FollowType: 关注类型:1-关注,2-取关
注意:此字段可能返回 null,表示取不到有效值。
:type FollowType: int
:param _IconUrl: 作者头像icon地址
注意:此字段可能返回 null,表示取不到有效值。
:type IconUrl: str
"""
self._Id = None
self._Name = None
self._SourceId = None
self._FollowType = None
self._IconUrl = None
@property
def Id(self):
return self._Id
@Id.setter
def Id(self, Id):
self._Id = Id
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def SourceId(self):
return self._SourceId
@SourceId.setter
def SourceId(self, SourceId):
self._SourceId = SourceId
@property
def FollowType(self):
return self._FollowType
@FollowType.setter
def FollowType(self, FollowType):
self._FollowType = FollowType
@property
def IconUrl(self):
return self._IconUrl
@IconUrl.setter
def IconUrl(self, IconUrl):
self._IconUrl = IconUrl
def _deserialize(self, params):
self._Id = params.get("Id")
self._Name = params.get("Name")
self._SourceId = params.get("SourceId")
self._FollowType = params.get("FollowType")
self._IconUrl = params.get("IconUrl")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DislikeInfo(AbstractModel):
"""不喜欢信息
"""
def __init__(self):
r"""
:param _Type: 不喜欢的物料类别,对应物料上传协议中的字段名,如authorId,keyword,topic等
:type Type: str
:param _Value: type对应字段名的值,如具体的topic名,作者id等
:type Value: str
"""
self._Type = None
self._Value = None
@property
def Type(self):
return self._Type
@Type.setter
def Type(self, Type):
self._Type = Type
@property
def Value(self):
return self._Value
@Value.setter
def Value(self, Value):
self._Value = Value
def _deserialize(self, params):
self._Type = params.get("Type")
self._Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DocBehavior(AbstractModel):
"""行为数据
"""
def __init__(self):
r"""
:param _ItemId: 内容唯一ID,如 2824324234
:type ItemId: str
:param _BehaviorType: 行为类型
:type BehaviorType: int
:param _BehaviorValue: 行为值
:type BehaviorValue: str
:param _BehaviorTimestamp: 行为时间戳: 秒级时间戳(默认为当前时间),不能延迟太久,尽量实时上报,否则会影响推荐结果的准确性。
:type BehaviorTimestamp: int
:param _SceneId: 场景id,在控制台创建场景后获取。
:type SceneId: str
:param _UserIdList: 用户id列表
:type UserIdList: list of UserIdInfo
:param _RecTraceId: 会话id,使用获取推荐结果中返回的RecTraceId填入。<br>注意:如果和在线推荐请求中的traceId不同,会影响行为特征归因,影响推荐算法效果
:type RecTraceId: str
:param _Source: 算法来源:用来区分行为来源于哪个算法。值为**business,tencent,other** 三者之一<br>● business 表示业务自己的算法对照组<br>● tencent 为腾讯算法<br>● other 为其他算法
:type Source: str
:param _ItemType: 物料类型
:type ItemType: int
:param _AppId: 微信开放平台上查看appId
:type AppId: str
:param _VideoPlayDuration: 回传video_over事件的时候,回传的用户播放视频的总时长(真正播放的,拖动不算,单位为秒)
:type VideoPlayDuration: int
:param _ReferrerItemId: 来源物料内容:用来标识在指定内容页面产生的行为,如需要统计用户在A内容详情页里,对推荐内容B点击等行为,则ReferrerItemId代表内容A,ItemId代表内容B
:type ReferrerItemId: str
:param _Country: 国家,统一用简写,比如中国则填写CN
:type Country: str
:param _Province: 省
:type Province: str
:param _City: 城市
:type City: str
:param _District: 区县
:type District: str
:param _IP: 客户端ip
:type IP: str
:param _Network: 客户端网络类型
:type Network: str
:param _Platform: 客户端平台,ios/android/h5
:type Platform: str
:param _AppVersion: 客户端app版本
:type AppVersion: str
:param _OsVersion: 操作系统版本
:type OsVersion: str
:param _DeviceModel: 机型
:type DeviceModel: str
:param _Extension: json字符串,用于行为数据的扩展
:type Extension: str
"""
self._ItemId = None
self._BehaviorType = None
self._BehaviorValue = None
self._BehaviorTimestamp = None
self._SceneId = None
self._UserIdList = None
self._RecTraceId = None
self._Source = None
self._ItemType = None
self._AppId = None
self._VideoPlayDuration = None
self._ReferrerItemId = None
self._Country = None
self._Province = None
self._City = None
self._District = None
self._IP = None
self._Network = None
self._Platform = None
self._AppVersion = None
self._OsVersion = None
self._DeviceModel = None
self._Extension = None
@property
def ItemId(self):
return self._ItemId
@ItemId.setter
def ItemId(self, ItemId):
self._ItemId = ItemId
@property
def BehaviorType(self):
return self._BehaviorType
@BehaviorType.setter
def BehaviorType(self, BehaviorType):
self._BehaviorType = BehaviorType
@property
def BehaviorValue(self):
return self._BehaviorValue
@BehaviorValue.setter
def BehaviorValue(self, BehaviorValue):
self._BehaviorValue = BehaviorValue
@property
def BehaviorTimestamp(self):
return self._BehaviorTimestamp
@BehaviorTimestamp.setter
def BehaviorTimestamp(self, BehaviorTimestamp):
self._BehaviorTimestamp = BehaviorTimestamp
@property
def SceneId(self):
return self._SceneId
@SceneId.setter
def SceneId(self, SceneId):
self._SceneId = SceneId
@property
def UserIdList(self):
return self._UserIdList
@UserIdList.setter
def UserIdList(self, UserIdList):
self._UserIdList = UserIdList
@property
def RecTraceId(self):
return self._RecTraceId
@RecTraceId.setter
def RecTraceId(self, RecTraceId):
self._RecTraceId = RecTraceId
@property
def Source(self):
return self._Source
@Source.setter
def Source(self, Source):
self._Source = Source
@property
def ItemType(self):
return self._ItemType
@ItemType.setter
def ItemType(self, ItemType):
self._ItemType = ItemType
@property
def AppId(self):
return self._AppId
@AppId.setter
def AppId(self, AppId):
self._AppId = AppId
@property
def VideoPlayDuration(self):
return self._VideoPlayDuration
@VideoPlayDuration.setter
def VideoPlayDuration(self, VideoPlayDuration):
self._VideoPlayDuration = VideoPlayDuration
@property
def ReferrerItemId(self):
return self._ReferrerItemId
@ReferrerItemId.setter
def ReferrerItemId(self, ReferrerItemId):
self._ReferrerItemId = ReferrerItemId
@property
def Country(self):
return self._Country
@Country.setter
def Country(self, Country):
self._Country = Country
@property
def Province(self):
return self._Province
@Province.setter
def Province(self, Province):
self._Province = Province
@property
def City(self):
return self._City
@City.setter
def City(self, City):
self._City = City
@property
def District(self):
return self._District
@District.setter
def District(self, District):
self._District = District
@property
def IP(self):
return self._IP
@IP.setter
def IP(self, IP):
self._IP = IP
@property
def Network(self):
return self._Network
@Network.setter
def Network(self, Network):
self._Network = Network
@property
def Platform(self):
return self._Platform
@Platform.setter
def Platform(self, Platform):
self._Platform = Platform
@property
def AppVersion(self):
return self._AppVersion
@AppVersion.setter
def AppVersion(self, AppVersion):
self._AppVersion = AppVersion
@property
def OsVersion(self):
return self._OsVersion
@OsVersion.setter
def OsVersion(self, OsVersion):
self._OsVersion = OsVersion
@property
def DeviceModel(self):
return self._DeviceModel
@DeviceModel.setter
def DeviceModel(self, DeviceModel):
self._DeviceModel = DeviceModel
@property
def Extension(self):
return self._Extension
@Extension.setter
def Extension(self, Extension):
self._Extension = Extension
def _deserialize(self, params):
self._ItemId = params.get("ItemId")
self._BehaviorType = params.get("BehaviorType")
self._BehaviorValue = params.get("BehaviorValue")
self._BehaviorTimestamp = params.get("BehaviorTimestamp")
self._SceneId = params.get("SceneId")
if params.get("UserIdList") is not None:
self._UserIdList = []
for item in params.get("UserIdList"):
obj = UserIdInfo()
obj._deserialize(item)
self._UserIdList.append(obj)
self._RecTraceId = params.get("RecTraceId")
self._Source = params.get("Source")
self._ItemType = params.get("ItemType")
self._AppId = params.get("AppId")
self._VideoPlayDuration = params.get("VideoPlayDuration")
self._ReferrerItemId = params.get("ReferrerItemId")
self._Country = params.get("Country")
self._Province = params.get("Province")
self._City = params.get("City")
self._District = params.get("District")
self._IP = params.get("IP")
self._Network = params.get("Network")
self._Platform = params.get("Platform")
self._AppVersion = params.get("AppVersion")
self._OsVersion = params.get("OsVersion")
self._DeviceModel = params.get("DeviceModel")
self._Extension = params.get("Extension")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DocItem(AbstractModel):
"""推荐物料信息
"""
def __init__(self):
r"""
:param _ItemId: 内容唯一id
:type ItemId: str
:param _ItemType: 内容类型
:type ItemType: int
:param _Status: 内容状态:1 - 上架, 2 - 下架
:type Status: int
:param _PublishTimestamp: 内容生成时间,秒级时间戳(1639624786),需大于0
:type PublishTimestamp: int
:param _SourceId: 物料来源ID
:type SourceId: int
:param _Title: 标题名称
:type Title: str
:param _Content: 内容正文
:type Content: str
:param _Author: 作者
:type Author: str
:param _AuthorId: 作者id
:type AuthorId: str
:param _Keyword: 标签关键词,多个用英文分号分割
:type Keyword: str
:param _Desc: 内容物料描述:物料的描述信息,推荐系统会对内容的描述信息,使用否LP技术,进行分词、提取关键词,作为news的特征使用。
:type Desc: str
:param _PicUrlList: 图片url
:type PicUrlList: list of str
:param _VideoUrlList: 视频url
:type VideoUrlList: list of str
:param _VideoDuration: 视频时长,时间秒
:type VideoDuration: int
:param _CategoryLevel: 类目层级数,例如3级类目,则填3,和CategoryPath字段的类数据匹配
:type CategoryLevel: int
:param _CategoryPath: 类目路径,一级二级三级等依次用英文冒号联接,如体育:“足球:巴塞罗那”
:type CategoryPath: str
:param _Country: 国家,统一用简写,比如中国则填写CN
:type Country: str
:param _Province: 省
:type Province: str
:param _City: 城市
:type City: str
:param _District: 区县
:type District: str
:param _ExpireTimestamp: 内容过期时间,秒级时间戳(1639624786),如未填,则默认PublishTimestamp往后延一年
:type ExpireTimestamp: int
:param _Topic: 所属话题
:type Topic: str
:param _AuthorFans: 作者粉丝数
:type AuthorFans: int
:param _AuthorLevel: 作者评级
:type AuthorLevel: str
:param _CollectCnt: 内容累计收藏次数
:type CollectCnt: int
:param _PraiseCnt: 内容累积点赞次数
:type PraiseCnt: int
:param _CommentCnt: 内容累计评论次数
:type CommentCnt: int
:param _ShareCnt: 内容累计分享次数
:type ShareCnt: int
:param _RewardCnt: 内容累积打赏数
:type RewardCnt: int
:param _Score: 内容质量评分,类似豆瓣电影的评分,这里为100分制,比如97分,满分100分,最低0分,范围外的将会被拦截
:type Score: float
:param _PoolIdList: 内容池id,用于分内容池召回,一个内容支持指定一个或多个内容池, 内容池id不建议使用0(0表示不区分内容池)
:type PoolIdList: list of str
:param _TagInfoList: 描述用户标签
:type TagInfoList: list of TagInfo
:param _Extension: json字符串,用于物料数据的扩展
:type Extension: str
"""
self._ItemId = None
self._ItemType = None
self._Status = None
self._PublishTimestamp = None
self._SourceId = None
self._Title = None
self._Content = None
self._Author = None
self._AuthorId = None
self._Keyword = None
self._Desc = None
self._PicUrlList = None
self._VideoUrlList = None
self._VideoDuration = None
self._CategoryLevel = None
self._CategoryPath = None
self._Country = None
self._Province = None
self._City = None
self._District = None
self._ExpireTimestamp = None
self._Topic = None
self._AuthorFans = None
self._AuthorLevel = None
self._CollectCnt = None
self._PraiseCnt = None
self._CommentCnt = None
self._ShareCnt = None
self._RewardCnt = None
self._Score = None
self._PoolIdList = None
self._TagInfoList = None
self._Extension = None
@property
def ItemId(self):
return self._ItemId
@ItemId.setter
def ItemId(self, ItemId):
self._ItemId = ItemId
@property
def ItemType(self):
return self._ItemType
@ItemType.setter
def ItemType(self, ItemType):
self._ItemType = ItemType
@property
def Status(self):
return self._Status
@Status.setter
def Status(self, Status):
self._Status = Status
@property
def PublishTimestamp(self):
return self._PublishTimestamp
@PublishTimestamp.setter
def PublishTimestamp(self, PublishTimestamp):
self._PublishTimestamp = PublishTimestamp
@property
def SourceId(self):
return self._SourceId
@SourceId.setter
def SourceId(self, SourceId):
self._SourceId = SourceId
@property
def Title(self):
return self._Title
@Title.setter
def Title(self, Title):
self._Title = Title
@property
def Content(self):
return self._Content
@Content.setter
def Content(self, Content):
self._Content = Content
@property
def Author(self):
return self._Author
@Author.setter
def Author(self, Author):
self._Author = Author
@property
def AuthorId(self):
return self._AuthorId
@AuthorId.setter
def AuthorId(self, AuthorId):
self._AuthorId = AuthorId
@property
def Keyword(self):
return self._Keyword
@Keyword.setter
def Keyword(self, Keyword):
self._Keyword = Keyword
@property
def Desc(self):
return self._Desc
@Desc.setter
def Desc(self, Desc):
self._Desc = Desc
@property
def PicUrlList(self):
return self._PicUrlList
@PicUrlList.setter
def PicUrlList(self, PicUrlList):
self._PicUrlList = PicUrlList
@property
def VideoUrlList(self):
return self._VideoUrlList
@VideoUrlList.setter
def VideoUrlList(self, VideoUrlList):
self._VideoUrlList = VideoUrlList
@property
def VideoDuration(self):
return self._VideoDuration
@VideoDuration.setter
def VideoDuration(self, VideoDuration):
self._VideoDuration = VideoDuration
@property
def CategoryLevel(self):
return self._CategoryLevel
@CategoryLevel.setter
def CategoryLevel(self, CategoryLevel):
self._CategoryLevel = CategoryLevel
@property
def CategoryPath(self):
return self._CategoryPath
@CategoryPath.setter
def CategoryPath(self, CategoryPath):
self._CategoryPath = CategoryPath
@property
def Country(self):
return self._Country
@Country.setter
def Country(self, Country):
self._Country = Country
@property
def Province(self):
return self._Province
@Province.setter
def Province(self, Province):
self._Province = Province
@property
def City(self):
return self._City
@City.setter
def City(self, City):
self._City = City
@property
def District(self):
return self._District
@District.setter
def District(self, District):
self._District = District
@property
def ExpireTimestamp(self):
return self._ExpireTimestamp
@ExpireTimestamp.setter
def ExpireTimestamp(self, ExpireTimestamp):
self._ExpireTimestamp = ExpireTimestamp
@property
def Topic(self):
return self._Topic
@Topic.setter
def Topic(self, Topic):
self._Topic = Topic
@property
def AuthorFans(self):
return self._AuthorFans
@AuthorFans.setter
def AuthorFans(self, AuthorFans):
self._AuthorFans = AuthorFans
@property
def AuthorLevel(self):
return self._AuthorLevel
@AuthorLevel.setter
def AuthorLevel(self, AuthorLevel):
self._AuthorLevel = AuthorLevel
@property
def CollectCnt(self):
return self._CollectCnt
@CollectCnt.setter
def CollectCnt(self, CollectCnt):
self._CollectCnt = CollectCnt
@property
def PraiseCnt(self):
return self._PraiseCnt
@PraiseCnt.setter
def PraiseCnt(self, PraiseCnt):
self._PraiseCnt = PraiseCnt
@property
def CommentCnt(self):
return self._CommentCnt
@CommentCnt.setter
def CommentCnt(self, CommentCnt):
self._CommentCnt = CommentCnt
@property
def ShareCnt(self):
return self._ShareCnt
@ShareCnt.setter
def ShareCnt(self, ShareCnt):
self._ShareCnt = ShareCnt
@property
def RewardCnt(self):
return self._RewardCnt
@RewardCnt.setter
def RewardCnt(self, RewardCnt):
self._RewardCnt = RewardCnt
@property
def Score(self):
return self._Score
@Score.setter
def Score(self, Score):
self._Score = Score
@property
def PoolIdList(self):
return self._PoolIdList
@PoolIdList.setter
def PoolIdList(self, PoolIdList):
self._PoolIdList = PoolIdList
@property
def TagInfoList(self):
return self._TagInfoList
@TagInfoList.setter
def TagInfoList(self, TagInfoList):
self._TagInfoList = TagInfoList
@property
def Extension(self):
return self._Extension
@Extension.setter
def Extension(self, Extension):
self._Extension = Extension
def _deserialize(self, params):
self._ItemId = params.get("ItemId")
self._ItemType = params.get("ItemType")
self._Status = params.get("Status")
self._PublishTimestamp = params.get("PublishTimestamp")
self._SourceId = params.get("SourceId")
self._Title = params.get("Title")
self._Content = params.get("Content")
self._Author = params.get("Author")
self._AuthorId = params.get("AuthorId")
self._Keyword = params.get("Keyword")
self._Desc = params.get("Desc")
self._PicUrlList = params.get("PicUrlList")
self._VideoUrlList = params.get("VideoUrlList")
self._VideoDuration = params.get("VideoDuration")
self._CategoryLevel = params.get("CategoryLevel")
self._CategoryPath = params.get("CategoryPath")
self._Country = params.get("Country")
self._Province = params.get("Province")
self._City = params.get("City")
self._District = params.get("District")
self._ExpireTimestamp = params.get("ExpireTimestamp")
self._Topic = params.get("Topic")
self._AuthorFans = params.get("AuthorFans")
self._AuthorLevel = params.get("AuthorLevel")
self._CollectCnt = params.get("CollectCnt")
self._PraiseCnt = params.get("PraiseCnt")
self._CommentCnt = params.get("CommentCnt")
self._ShareCnt = params.get("ShareCnt")
self._RewardCnt = params.get("RewardCnt")
self._Score = params.get("Score")
self._PoolIdList = params.get("PoolIdList")
if params.get("TagInfoList") is not None:
self._TagInfoList = []
for item in params.get("TagInfoList"):
obj = TagInfo()
obj._deserialize(item)
self._TagInfoList.append(obj)
self._Extension = params.get("Extension")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class PortraitInfo(AbstractModel):
"""画像信息
"""
def __init__(self):
r"""
:param _UserIdList: 用户id列表
:type UserIdList: list of UserIdInfo
:param _AppId: 如果"userIdType"是10则必传,在微信开放平台上查看appId
:type AppId: str
:param _Age: 用户年龄,值域在 0-200
:type Age: int
:param _Gender: 用户性别:0-未知,1-男, 2-女
:type Gender: int
:param _Degree: 用户学历 :小学,初中,高中,大专,本科,硕士,博士
:type Degree: str
:param _School: 用户毕业学校全称
:type School: str
:param _Occupation: 用户职业,保证业务的唯一性
:type Occupation: str
:param _Industry: 用户所属行业,保证业务的唯一性
:type Industry: str
:param _ResidentCountry: 用户常驻国家,统一用简写,比如中国则填写CN
:type ResidentCountry: str
:param _ResidentProvince: 用户常驻省份
:type ResidentProvince: str
:param _ResidentCity: 用户常驻城市
:type ResidentCity: str
:param _ResidentDistrict: 用户常驻区县
:type ResidentDistrict: str
:param _PhoneMd5: 用户手机的MD5值
:type PhoneMd5: str
:param _PhoneImei: 用户手机的IMEI号
:type PhoneImei: str
:param _Idfa: 设备idfa信息
:type Idfa: str
:param _RegisterTimestamp: 用户注册时间,秒级时间戳(1639624786)
:type RegisterTimestamp: int
:param _MembershipLevel: 用户会员等级
:type MembershipLevel: str
:param _LastLoginTimestamp: 用户上一次登录时间,秒级时间戳(1639624786)
:type LastLoginTimestamp: int
:param _LastLoginIp: 用户上一次登录的ip
:type LastLoginIp: str
:param _LastModifyTimestamp: 用户信息的最后修改时间戳,秒级时间戳(1639624786)
:type LastModifyTimestamp: int
:param _TagInfoList: 用户标签
:type TagInfoList: list of TagInfo
:param _AuthorInfoList: 用户关注作者列表
:type AuthorInfoList: list of AuthorInfo
:param _DislikeInfoList: 用户不喜欢列表
:type DislikeInfoList: list of DislikeInfo
:param _Extension: json字符串,用于画像数据的扩展
:type Extension: str
:param _Oaid: 设备oaid信息
:type Oaid: str
:param _AndroidId: 设备AndroidId信息
:type AndroidId: str
"""
self._UserIdList = None
self._AppId = None
self._Age = None
self._Gender = None
self._Degree = None
self._School = None
self._Occupation = None
self._Industry = None
self._ResidentCountry = None
self._ResidentProvince = None
self._ResidentCity = None
self._ResidentDistrict = None
self._PhoneMd5 = None
self._PhoneImei = None
self._Idfa = None
self._RegisterTimestamp = None
self._MembershipLevel = None
self._LastLoginTimestamp = None
self._LastLoginIp = None
self._LastModifyTimestamp = None
self._TagInfoList = None
self._AuthorInfoList = None
self._DislikeInfoList = None
self._Extension = None
self._Oaid = None
self._AndroidId = None
@property
def UserIdList(self):
return self._UserIdList
@UserIdList.setter
def UserIdList(self, UserIdList):
self._UserIdList = UserIdList
@property
def AppId(self):
return self._AppId
@AppId.setter
def AppId(self, AppId):
self._AppId = AppId
@property
def Age(self):
return self._Age
@Age.setter
def Age(self, Age):
self._Age = Age
@property
def Gender(self):
return self._Gender
@Gender.setter
def Gender(self, Gender):
self._Gender = Gender
@property
def Degree(self):
return self._Degree
@Degree.setter
def Degree(self, Degree):
self._Degree = Degree
@property
def School(self):
return self._School
@School.setter
def School(self, School):
self._School = School
@property
def Occupation(self):
return self._Occupation
@Occupation.setter
def Occupation(self, Occupation):
self._Occupation = Occupation
@property
def Industry(self):
return self._Industry
@Industry.setter
def Industry(self, Industry):
self._Industry = Industry
@property
def ResidentCountry(self):
return self._ResidentCountry
@ResidentCountry.setter
def ResidentCountry(self, ResidentCountry):
self._ResidentCountry = ResidentCountry
@property
def ResidentProvince(self):
return self._ResidentProvince
@ResidentProvince.setter
def ResidentProvince(self, ResidentProvince):
self._ResidentProvince = ResidentProvince
@property
def ResidentCity(self):
return self._ResidentCity
@ResidentCity.setter
def ResidentCity(self, ResidentCity):
self._ResidentCity = ResidentCity
@property
def ResidentDistrict(self):
return self._ResidentDistrict
@ResidentDistrict.setter
def ResidentDistrict(self, ResidentDistrict):
self._ResidentDistrict = ResidentDistrict
@property
def PhoneMd5(self):
return self._PhoneMd5
@PhoneMd5.setter
def PhoneMd5(self, PhoneMd5):
self._PhoneMd5 = PhoneMd5
@property
def PhoneImei(self):
return self._PhoneImei
@PhoneImei.setter
def PhoneImei(self, PhoneImei):
self._PhoneImei = PhoneImei
@property
def Idfa(self):
return self._Idfa
@Idfa.setter
def Idfa(self, Idfa):
self._Idfa = Idfa
@property
def RegisterTimestamp(self):
return self._RegisterTimestamp
@RegisterTimestamp.setter
def RegisterTimestamp(self, RegisterTimestamp):
self._RegisterTimestamp = RegisterTimestamp
@property
def MembershipLevel(self):
return self._MembershipLevel
@MembershipLevel.setter
def MembershipLevel(self, MembershipLevel):
self._MembershipLevel = MembershipLevel
@property
def LastLoginTimestamp(self):
return self._LastLoginTimestamp
@LastLoginTimestamp.setter
def LastLoginTimestamp(self, LastLoginTimestamp):
self._LastLoginTimestamp = LastLoginTimestamp
@property
def LastLoginIp(self):
return self._LastLoginIp
@LastLoginIp.setter
def LastLoginIp(self, LastLoginIp):
self._LastLoginIp = LastLoginIp
@property
def LastModifyTimestamp(self):
return self._LastModifyTimestamp
@LastModifyTimestamp.setter
def LastModifyTimestamp(self, LastModifyTimestamp):
self._LastModifyTimestamp = LastModifyTimestamp
@property
def TagInfoList(self):
return self._TagInfoList
@TagInfoList.setter
def TagInfoList(self, TagInfoList):
self._TagInfoList = TagInfoList
@property
def AuthorInfoList(self):
return self._AuthorInfoList
@AuthorInfoList.setter
def AuthorInfoList(self, AuthorInfoList):
self._AuthorInfoList = AuthorInfoList
@property
def DislikeInfoList(self):
return self._DislikeInfoList
@DislikeInfoList.setter
def DislikeInfoList(self, DislikeInfoList):
self._DislikeInfoList = DislikeInfoList
@property
def Extension(self):
return self._Extension
@Extension.setter
def Extension(self, Extension):
self._Extension = Extension
@property
def Oaid(self):
return self._Oaid
@Oaid.setter
def Oaid(self, Oaid):
self._Oaid = Oaid
@property
def AndroidId(self):
return self._AndroidId
@AndroidId.setter
def AndroidId(self, AndroidId):
self._AndroidId = AndroidId
def _deserialize(self, params):
if params.get("UserIdList") is not None:
self._UserIdList = []
for item in params.get("UserIdList"):
obj = UserIdInfo()
obj._deserialize(item)
self._UserIdList.append(obj)
self._AppId = params.get("AppId")
self._Age = params.get("Age")
self._Gender = params.get("Gender")
self._Degree = params.get("Degree")
self._School = params.get("School")
self._Occupation = params.get("Occupation")
self._Industry = params.get("Industry")
self._ResidentCountry = params.get("ResidentCountry")
self._ResidentProvince = params.get("ResidentProvince")
self._ResidentCity = params.get("ResidentCity")
self._ResidentDistrict = params.get("ResidentDistrict")
self._PhoneMd5 = params.get("PhoneMd5")
self._PhoneImei = params.get("PhoneImei")
self._Idfa = params.get("Idfa")
self._RegisterTimestamp = params.get("RegisterTimestamp")
self._MembershipLevel = params.get("MembershipLevel")
self._LastLoginTimestamp = params.get("LastLoginTimestamp")
self._LastLoginIp = params.get("LastLoginIp")
self._LastModifyTimestamp = params.get("LastModifyTimestamp")
if params.get("TagInfoList") is not None:
self._TagInfoList = []
for item in params.get("TagInfoList"):
obj = TagInfo()
obj._deserialize(item)
self._TagInfoList.append(obj)
if params.get("AuthorInfoList") is not None:
self._AuthorInfoList = []
for item in params.get("AuthorInfoList"):
obj = AuthorInfo()
obj._deserialize(item)
self._AuthorInfoList.append(obj)
if params.get("DislikeInfoList") is not None:
self._DislikeInfoList = []
for item in params.get("DislikeInfoList"):
obj = DislikeInfo()
obj._deserialize(item)
self._DislikeInfoList.append(obj)
self._Extension = params.get("Extension")
self._Oaid = params.get("Oaid")
self._AndroidId = params.get("AndroidId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RecItemData(AbstractModel):
"""推荐内容信息
"""
def __init__(self):
r"""
:param _ItemId: 推荐的内容id,即用户行为上报中的itemId
:type ItemId: str
:param _ItemType: 物料子类型,包括如下: 1-图文、2-长视频(横视频)、3-短视频(横视频)、4-小说、5-小视频(竖视频)、6-纯文本
注意:此字段可能返回 null,表示取不到有效值。
:type ItemType: int
:param _Weight: 推荐内容的权重,取值范围[0,1000000]
注意:此字段可能返回 null,表示取不到有效值。
:type Weight: int
:param _Score: 推荐预测分
注意:此字段可能返回 null,表示取不到有效值。
:type Score: float
:param _Keyword: 关键词,多个用英文分号分割,和物料上传的keyword一致
注意:此字段可能返回 null,表示取不到有效值。
:type Keyword: str
"""
self._ItemId = None
self._ItemType = None
self._Weight = None
self._Score = None
self._Keyword = None
@property
def ItemId(self):
return self._ItemId
@ItemId.setter
def ItemId(self, ItemId):
self._ItemId = ItemId
@property
def ItemType(self):
return self._ItemType
@ItemType.setter
def ItemType(self, ItemType):
self._ItemType = ItemType
@property
def Weight(self):
return self._Weight
@Weight.setter
def Weight(self, Weight):
self._Weight = Weight
@property
def Score(self):
return self._Score
@Score.setter
def Score(self, Score):
self._Score = Score
@property
def Keyword(self):
return self._Keyword
@Keyword.setter
def Keyword(self, Keyword):
self._Keyword = Keyword
def _deserialize(self, params):
self._ItemId = params.get("ItemId")
self._ItemType = params.get("ItemType")
self._Weight = params.get("Weight")
self._Score = params.get("Score")
self._Keyword = params.get("Keyword")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RecommendContentRequest(AbstractModel):
"""RecommendContent请求参数结构体
"""
def __init__(self):
r"""
:param _Bid: 业务id
:type Bid: str
:param _SceneId: 场景id:比如有“猜你喜欢”,“热门内容”等推荐模块,每一个模块都有一个scene_id来表示。 在控制台创建场景后获取。需要跟行为上报时的id一致
:type SceneId: str
:param _UserIdList: 用户唯一ID数组,每个数组元素详见userId结构体,若不填,则接口返回热门结果
:type UserIdList: list of UserIdInfo
:param _RecTraceId: 会话id:必须和行为数据上报时所填写的traceId相同,用于行为数据来自于那次在线推荐请求的归因。**注意:此处如果没传,则响应会返回一个全局唯一ID返回给客户,并需客户透传给行为日志上报接口**
:type RecTraceId: str
:param _ItemCnt: 推荐数量:物料优选的结果, 默认50个,目前最多支持200个的内容返回,如果返回个数更多,会影响性能,容易超时。
:type ItemCnt: int
:param _PoolId: 物料池id,用于召回该pool_id下的商品,如果有多个,用英文;分割。**注意:此处poolId需和物料上报时的poolIdList对应上**
:type PoolId: str
:param _CurrentItemId: 来源物料id,即用户当前浏览的物料id,用于在内容详情页获取关联推荐内容
:type CurrentItemId: str
:param _ResponseTimeout: 请求响应超时时间,单位ms,默认300ms,数值设置的过小,会影响推荐效果,最小支持250ms
:type ResponseTimeout: int
:param _ItemTypeRatio: 返回结果中不同物料类型的比例,比例顺序需严格按照(图文,长视频,短视频,小视频)进行。只允许传[0,100]数字,多个请用**英文冒号**分割,且加起来不能超过100,以及比例数量不能超过**场景绑定的物料类型**(图文,长视频,短视频,小视频)数。**示例:**图文和短视频比例为40%:60%时,则填40:60图文和短视频比例为0%:100%时,则填0:100图文,长视频和短视频的比例为,图文占20%,剩余80%由长视频和短视频随机返回,则填20:80或仅填20均可
:type ItemTypeRatio: str
"""
self._Bid = None
self._SceneId = None
self._UserIdList = None
self._RecTraceId = None
self._ItemCnt = None
self._PoolId = None
self._CurrentItemId = None
self._ResponseTimeout = None
self._ItemTypeRatio = None
@property
def Bid(self):
return self._Bid
@Bid.setter
def Bid(self, Bid):
self._Bid = Bid
@property
def SceneId(self):
return self._SceneId
@SceneId.setter
def SceneId(self, SceneId):
self._SceneId = SceneId
@property
def UserIdList(self):
return self._UserIdList
@UserIdList.setter
def UserIdList(self, UserIdList):
self._UserIdList = UserIdList
@property
def RecTraceId(self):
return self._RecTraceId
@RecTraceId.setter
def RecTraceId(self, RecTraceId):
self._RecTraceId = RecTraceId
@property
def ItemCnt(self):
return self._ItemCnt
@ItemCnt.setter
def ItemCnt(self, ItemCnt):
self._ItemCnt = ItemCnt
@property
def PoolId(self):
return self._PoolId
@PoolId.setter
def PoolId(self, PoolId):
self._PoolId = PoolId
@property
def CurrentItemId(self):
return self._CurrentItemId
@CurrentItemId.setter
def CurrentItemId(self, CurrentItemId):
self._CurrentItemId = CurrentItemId
@property
def ResponseTimeout(self):
return self._ResponseTimeout
@ResponseTimeout.setter
def ResponseTimeout(self, ResponseTimeout):
self._ResponseTimeout = ResponseTimeout
@property
def ItemTypeRatio(self):
return self._ItemTypeRatio
@ItemTypeRatio.setter
def ItemTypeRatio(self, ItemTypeRatio):
self._ItemTypeRatio = ItemTypeRatio
def _deserialize(self, params):
self._Bid = params.get("Bid")
self._SceneId = params.get("SceneId")
if params.get("UserIdList") is not None:
self._UserIdList = []
for item in params.get("UserIdList"):
obj = UserIdInfo()
obj._deserialize(item)
self._UserIdList.append(obj)
self._RecTraceId = params.get("RecTraceId")
self._ItemCnt = params.get("ItemCnt")
self._PoolId = params.get("PoolId")
self._CurrentItemId = params.get("CurrentItemId")
self._ResponseTimeout = params.get("ResponseTimeout")
self._ItemTypeRatio = params.get("ItemTypeRatio")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class RecommendContentResponse(AbstractModel):
"""RecommendContent返回参数结构体
"""
def __init__(self):
r"""
:param _RecTraceId: 推荐追踪id,用于行为上报。每次接口调用返回的traceId不同
:type RecTraceId: str
:param _DataList: 标识具体的物料信息
:type DataList: list of RecItemData
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RecTraceId = None
self._DataList = None
self._RequestId = None
@property
def RecTraceId(self):
return self._RecTraceId
@RecTraceId.setter
def RecTraceId(self, RecTraceId):
self._RecTraceId = RecTraceId
@property
def DataList(self):
return self._DataList
@DataList.setter
def DataList(self, DataList):
self._DataList = DataList
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RecTraceId = params.get("RecTraceId")
if params.get("DataList") is not None:
self._DataList = []
for item in params.get("DataList"):
obj = RecItemData()
obj._deserialize(item)
self._DataList.append(obj)
self._RequestId = params.get("RequestId")
class ReportActionRequest(AbstractModel):
"""ReportAction请求参数结构体
"""
def __init__(self):
r"""
:param _Bid: 业务id
:type Bid: str
:param _DocBehaviorList: 上报的行为对象数组,数量不超过50
:type DocBehaviorList: list of DocBehavior
"""
self._Bid = None
self._DocBehaviorList = None
@property
def Bid(self):
return self._Bid
@Bid.setter
def Bid(self, Bid):
self._Bid = Bid
@property
def DocBehaviorList(self):
return self._DocBehaviorList
@DocBehaviorList.setter
def DocBehaviorList(self, DocBehaviorList):
self._DocBehaviorList = DocBehaviorList
def _deserialize(self, params):
self._Bid = params.get("Bid")
if params.get("DocBehaviorList") is not None:
self._DocBehaviorList = []
for item in params.get("DocBehaviorList"):
obj = DocBehavior()
obj._deserialize(item)
self._DocBehaviorList.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ReportActionResponse(AbstractModel):
"""ReportAction返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class ReportMaterialRequest(AbstractModel):
"""ReportMaterial请求参数结构体
"""
def __init__(self):
r"""
:param _Bid: 业务id
:type Bid: str
:param _DocItemList: 上报的信息流数组,一次数量不超过50
:type DocItemList: list of DocItem
"""
self._Bid = None
self._DocItemList = None
@property
def Bid(self):
return self._Bid
@Bid.setter
def Bid(self, Bid):
self._Bid = Bid
@property
def DocItemList(self):
return self._DocItemList
@DocItemList.setter
def DocItemList(self, DocItemList):
self._DocItemList = DocItemList
def _deserialize(self, params):
self._Bid = params.get("Bid")
if params.get("DocItemList") is not None:
self._DocItemList = []
for item in params.get("DocItemList"):
obj = DocItem()
obj._deserialize(item)
self._DocItemList.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ReportMaterialResponse(AbstractModel):
"""ReportMaterial返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class ReportPortraitRequest(AbstractModel):
"""ReportPortrait请求参数结构体
"""
def __init__(self):
r"""
:param _Bid: 推荐平台上的业务id
:type Bid: str
:param _PortraitList: 上报的用户画像数组,数量不超过50
:type PortraitList: list of PortraitInfo
"""
self._Bid = None
self._PortraitList = None
@property
def Bid(self):
return self._Bid
@Bid.setter
def Bid(self, Bid):
self._Bid = Bid
@property
def PortraitList(self):
return self._PortraitList
@PortraitList.setter
def PortraitList(self, PortraitList):
self._PortraitList = PortraitList
def _deserialize(self, params):
self._Bid = params.get("Bid")
if params.get("PortraitList") is not None:
self._PortraitList = []
for item in params.get("PortraitList"):
obj = PortraitInfo()
obj._deserialize(item)
self._PortraitList.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ReportPortraitResponse(AbstractModel):
"""ReportPortrait返回参数结构体
"""
def __init__(self):
r"""
:param _RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self._RequestId = None
@property
def RequestId(self):
return self._RequestId
@RequestId.setter
def RequestId(self, RequestId):
self._RequestId = RequestId
def _deserialize(self, params):
self._RequestId = params.get("RequestId")
class TagInfo(AbstractModel):
"""标题信息
"""
def __init__(self):
r"""
:param _Id: 标签id
注意:此字段可能返回 null,表示取不到有效值。
:type Id: str
:param _Name: 标签名
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param _Weight: 推荐权重
注意:此字段可能返回 null,表示取不到有效值。
:type Weight: float
"""
self._Id = None
self._Name = None
self._Weight = None
@property
def Id(self):
return self._Id
@Id.setter
def Id(self, Id):
self._Id = Id
@property
def Name(self):
return self._Name
@Name.setter
def Name(self, Name):
self._Name = Name
@property
def Weight(self):
return self._Weight
@Weight.setter
def Weight(self, Weight):
self._Weight = Weight
def _deserialize(self, params):
self._Id = params.get("Id")
self._Name = params.get("Name")
self._Weight = params.get("Weight")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UserIdInfo(AbstractModel):
"""用户信息
"""
def __init__(self):
r"""
:param _UserIdType: 用户ID类型:
1 - qq
2 - qq_md5:md5后的qq
3 - imei:设备imei(安卓10之后不会再授权imei,安卓10之后的imei映射关系可能拿不到,故安卓10之后的设备建议用oaid)
4 - imei_md5:md5后的imei
5 - idfa: Apple 向用户设备随机分配的设备标识符
6 - idfa_md5:md5之后的idfa
7 - gdt_openid:广点通生成的openid
8 - oaid:安卓10之后一种非永久性设备标识符
9 - oaid_md5:md5后的oaid
10 - wx_openid:微信openid
11 - qq_openid:QQ的openid
12 - phone:电话号码
13 - phone_md5:md5后的电话号码
14 - phone_sha256:SHA256加密的手机号
15 - phone_sm3:国密SM3加密的手机号
1000 - 客户自定义id
:type UserIdType: int
:param _UserId: 用户id
:type UserId: str
"""
self._UserIdType = None
self._UserId = None
@property
def UserIdType(self):
return self._UserIdType
@UserIdType.setter
def UserIdType(self, UserIdType):
self._UserIdType = UserIdType
@property
def UserId(self):
return self._UserId
@UserId.setter
def UserId(self, UserId):
self._UserId = UserId
def _deserialize(self, params):
self._UserIdType = params.get("UserIdType")
self._UserId = params.get("UserId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
property_name = name[1:]
if property_name in memeber_set:
memeber_set.remove(property_name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
|
PypiClean
|
/django_xprez-0.2.3-py3-none-any.whl/xprez/static/xprez/admin/libs/handlebars/handlebars.runtime.min.js
|
!function(a,b){"object"==typeof exports&&"object"==typeof module?module.exports=b():"function"==typeof define&&define.amd?define([],b):"object"==typeof exports?exports.Handlebars=b():a.Handlebars=b()}(this,function(){return function(a){function b(d){if(c[d])return c[d].exports;var e=c[d]={exports:{},id:d,loaded:!1};return a[d].call(e.exports,e,e.exports,b),e.loaded=!0,e.exports}var c={};return b.m=a,b.c=c,b.p="",b(0)}([function(a,b,c){"use strict";function d(){var a=new h.HandlebarsEnvironment;return n.extend(a,h),a.SafeString=j["default"],a.Exception=l["default"],a.Utils=n,a.escapeExpression=n.escapeExpression,a.VM=p,a.template=function(b){return p.template(b,a)},a}var e=c(1)["default"],f=c(2)["default"];b.__esModule=!0;var g=c(3),h=e(g),i=c(17),j=f(i),k=c(5),l=f(k),m=c(4),n=e(m),o=c(18),p=e(o),q=c(19),r=f(q),s=d();s.create=d,r["default"](s),s["default"]=s,b["default"]=s,a.exports=b["default"]},function(a,b){"use strict";b["default"]=function(a){if(a&&a.__esModule)return a;var b={};if(null!=a)for(var c in a)Object.prototype.hasOwnProperty.call(a,c)&&(b[c]=a[c]);return b["default"]=a,b},b.__esModule=!0},function(a,b){"use strict";b["default"]=function(a){return a&&a.__esModule?a:{"default":a}},b.__esModule=!0},function(a,b,c){"use strict";function d(a,b,c){this.helpers=a||{},this.partials=b||{},this.decorators=c||{},i.registerDefaultHelpers(this),j.registerDefaultDecorators(this)}var e=c(2)["default"];b.__esModule=!0,b.HandlebarsEnvironment=d;var f=c(4),g=c(5),h=e(g),i=c(6),j=c(14),k=c(16),l=e(k),m="4.0.5";b.VERSION=m;var n=7;b.COMPILER_REVISION=n;var o={1:"<= 1.0.rc.2",2:"== 1.0.0-rc.3",3:"== 1.0.0-rc.4",4:"== 1.x.x",5:"== 2.0.0-alpha.x",6:">= 2.0.0-beta.1",7:">= 4.0.0"};b.REVISION_CHANGES=o;var p="[object Object]";d.prototype={constructor:d,logger:l["default"],log:l["default"].log,registerHelper:function(a,b){if(f.toString.call(a)===p){if(b)throw new h["default"]("Arg not supported with multiple helpers");f.extend(this.helpers,a)}else this.helpers[a]=b},unregisterHelper:function(a){delete this.helpers[a]},registerPartial:function(a,b){if(f.toString.call(a)===p)f.extend(this.partials,a);else{if("undefined"==typeof b)throw new h["default"]('Attempting to register a partial called "'+a+'" as undefined');this.partials[a]=b}},unregisterPartial:function(a){delete this.partials[a]},registerDecorator:function(a,b){if(f.toString.call(a)===p){if(b)throw new h["default"]("Arg not supported with multiple decorators");f.extend(this.decorators,a)}else this.decorators[a]=b},unregisterDecorator:function(a){delete this.decorators[a]}};var q=l["default"].log;b.log=q,b.createFrame=f.createFrame,b.logger=l["default"]},function(a,b){"use strict";function c(a){return k[a]}function d(a){for(var b=1;b<arguments.length;b++)for(var c in arguments[b])Object.prototype.hasOwnProperty.call(arguments[b],c)&&(a[c]=arguments[b][c]);return a}function e(a,b){for(var c=0,d=a.length;d>c;c++)if(a[c]===b)return c;return-1}function f(a){if("string"!=typeof a){if(a&&a.toHTML)return a.toHTML();if(null==a)return"";if(!a)return a+"";a=""+a}return m.test(a)?a.replace(l,c):a}function g(a){return a||0===a?p(a)&&0===a.length?!0:!1:!0}function h(a){var b=d({},a);return b._parent=a,b}function i(a,b){return a.path=b,a}function j(a,b){return(a?a+".":"")+b}b.__esModule=!0,b.extend=d,b.indexOf=e,b.escapeExpression=f,b.isEmpty=g,b.createFrame=h,b.blockParams=i,b.appendContextPath=j;var k={"&":"&","<":"<",">":">",'"':""","'":"'","`":"`","=":"="},l=/[&<>"'`=]/g,m=/[&<>"'`=]/,n=Object.prototype.toString;b.toString=n;var o=function(a){return"function"==typeof a};o(/x/)&&(b.isFunction=o=function(a){return"function"==typeof a&&"[object Function]"===n.call(a)}),b.isFunction=o;var p=Array.isArray||function(a){return a&&"object"==typeof a?"[object Array]"===n.call(a):!1};b.isArray=p},function(a,b){"use strict";function c(a,b){var e=b&&b.loc,f=void 0,g=void 0;e&&(f=e.start.line,g=e.start.column,a+=" - "+f+":"+g);for(var h=Error.prototype.constructor.call(this,a),i=0;i<d.length;i++)this[d[i]]=h[d[i]];Error.captureStackTrace&&Error.captureStackTrace(this,c),e&&(this.lineNumber=f,this.column=g)}b.__esModule=!0;var d=["description","fileName","lineNumber","message","name","number","stack"];c.prototype=new Error,b["default"]=c,a.exports=b["default"]},function(a,b,c){"use strict";function d(a){g["default"](a),i["default"](a),k["default"](a),m["default"](a),o["default"](a),q["default"](a),s["default"](a)}var e=c(2)["default"];b.__esModule=!0,b.registerDefaultHelpers=d;var f=c(7),g=e(f),h=c(8),i=e(h),j=c(9),k=e(j),l=c(10),m=e(l),n=c(11),o=e(n),p=c(12),q=e(p),r=c(13),s=e(r)},function(a,b,c){"use strict";b.__esModule=!0;var d=c(4);b["default"]=function(a){a.registerHelper("blockHelperMissing",function(b,c){var e=c.inverse,f=c.fn;if(b===!0)return f(this);if(b===!1||null==b)return e(this);if(d.isArray(b))return b.length>0?(c.ids&&(c.ids=[c.name]),a.helpers.each(b,c)):e(this);if(c.data&&c.ids){var g=d.createFrame(c.data);g.contextPath=d.appendContextPath(c.data.contextPath,c.name),c={data:g}}return f(b,c)})},a.exports=b["default"]},function(a,b,c){"use strict";var d=c(2)["default"];b.__esModule=!0;var e=c(4),f=c(5),g=d(f);b["default"]=function(a){a.registerHelper("each",function(a,b){function c(b,c,f){j&&(j.key=b,j.index=c,j.first=0===c,j.last=!!f,k&&(j.contextPath=k+b)),i+=d(a[b],{data:j,blockParams:e.blockParams([a[b],b],[k+b,null])})}if(!b)throw new g["default"]("Must pass iterator to #each");var d=b.fn,f=b.inverse,h=0,i="",j=void 0,k=void 0;if(b.data&&b.ids&&(k=e.appendContextPath(b.data.contextPath,b.ids[0])+"."),e.isFunction(a)&&(a=a.call(this)),b.data&&(j=e.createFrame(b.data)),a&&"object"==typeof a)if(e.isArray(a))for(var l=a.length;l>h;h++)h in a&&c(h,h,h===a.length-1);else{var m=void 0;for(var n in a)a.hasOwnProperty(n)&&(void 0!==m&&c(m,h-1),m=n,h++);void 0!==m&&c(m,h-1,!0)}return 0===h&&(i=f(this)),i})},a.exports=b["default"]},function(a,b,c){"use strict";var d=c(2)["default"];b.__esModule=!0;var e=c(5),f=d(e);b["default"]=function(a){a.registerHelper("helperMissing",function(){if(1!==arguments.length)throw new f["default"]('Missing helper: "'+arguments[arguments.length-1].name+'"')})},a.exports=b["default"]},function(a,b,c){"use strict";b.__esModule=!0;var d=c(4);b["default"]=function(a){a.registerHelper("if",function(a,b){return d.isFunction(a)&&(a=a.call(this)),!b.hash.includeZero&&!a||d.isEmpty(a)?b.inverse(this):b.fn(this)}),a.registerHelper("unless",function(b,c){return a.helpers["if"].call(this,b,{fn:c.inverse,inverse:c.fn,hash:c.hash})})},a.exports=b["default"]},function(a,b){"use strict";b.__esModule=!0,b["default"]=function(a){a.registerHelper("log",function(){for(var b=[void 0],c=arguments[arguments.length-1],d=0;d<arguments.length-1;d++)b.push(arguments[d]);var e=1;null!=c.hash.level?e=c.hash.level:c.data&&null!=c.data.level&&(e=c.data.level),b[0]=e,a.log.apply(a,b)})},a.exports=b["default"]},function(a,b){"use strict";b.__esModule=!0,b["default"]=function(a){a.registerHelper("lookup",function(a,b){return a&&a[b]})},a.exports=b["default"]},function(a,b,c){"use strict";b.__esModule=!0;var d=c(4);b["default"]=function(a){a.registerHelper("with",function(a,b){d.isFunction(a)&&(a=a.call(this));var c=b.fn;if(d.isEmpty(a))return b.inverse(this);var e=b.data;return b.data&&b.ids&&(e=d.createFrame(b.data),e.contextPath=d.appendContextPath(b.data.contextPath,b.ids[0])),c(a,{data:e,blockParams:d.blockParams([a],[e&&e.contextPath])})})},a.exports=b["default"]},function(a,b,c){"use strict";function d(a){g["default"](a)}var e=c(2)["default"];b.__esModule=!0,b.registerDefaultDecorators=d;var f=c(15),g=e(f)},function(a,b,c){"use strict";b.__esModule=!0;var d=c(4);b["default"]=function(a){a.registerDecorator("inline",function(a,b,c,e){var f=a;return b.partials||(b.partials={},f=function(e,f){var g=c.partials;c.partials=d.extend({},g,b.partials);var h=a(e,f);return c.partials=g,h}),b.partials[e.args[0]]=e.fn,f})},a.exports=b["default"]},function(a,b,c){"use strict";b.__esModule=!0;var d=c(4),e={methodMap:["debug","info","warn","error"],level:"info",lookupLevel:function(a){if("string"==typeof a){var b=d.indexOf(e.methodMap,a.toLowerCase());a=b>=0?b:parseInt(a,10)}return a},log:function(a){if(a=e.lookupLevel(a),"undefined"!=typeof console&&e.lookupLevel(e.level)<=a){var b=e.methodMap[a];console[b]||(b="log");for(var c=arguments.length,d=Array(c>1?c-1:0),f=1;c>f;f++)d[f-1]=arguments[f];console[b].apply(console,d)}}};b["default"]=e,a.exports=b["default"]},function(a,b){"use strict";function c(a){this.string=a}b.__esModule=!0,c.prototype.toString=c.prototype.toHTML=function(){return""+this.string},b["default"]=c,a.exports=b["default"]},function(a,b,c){"use strict";function d(a){var b=a&&a[0]||1,c=r.COMPILER_REVISION;if(b!==c){if(c>b){var d=r.REVISION_CHANGES[c],e=r.REVISION_CHANGES[b];throw new q["default"]("Template was precompiled with an older version of Handlebars than the current runtime. Please update your precompiler to a newer version ("+d+") or downgrade your runtime to an older version ("+e+").")}throw new q["default"]("Template was precompiled with a newer version of Handlebars than the current runtime. Please update your runtime to a newer version ("+a[1]+").")}}function e(a,b){function c(c,d,e){e.hash&&(d=o.extend({},d,e.hash),e.ids&&(e.ids[0]=!0)),c=b.VM.resolvePartial.call(this,c,d,e);var f=b.VM.invokePartial.call(this,c,d,e);if(null==f&&b.compile&&(e.partials[e.name]=b.compile(c,a.compilerOptions,b),f=e.partials[e.name](d,e)),null!=f){if(e.indent){for(var g=f.split("\n"),h=0,i=g.length;i>h&&(g[h]||h+1!==i);h++)g[h]=e.indent+g[h];f=g.join("\n")}return f}throw new q["default"]("The partial "+e.name+" could not be compiled when running in runtime-only mode")}function d(b){function c(b){return""+a.main(e,b,e.helpers,e.partials,g,i,h)}var f=arguments.length<=1||void 0===arguments[1]?{}:arguments[1],g=f.data;d._setup(f),!f.partial&&a.useData&&(g=j(b,g));var h=void 0,i=a.useBlockParams?[]:void 0;return a.useDepths&&(h=f.depths?b!==f.depths[0]?[b].concat(f.depths):f.depths:[b]),(c=k(a.main,c,e,f.depths||[],g,i))(b,f)}if(!b)throw new q["default"]("No environment passed to template");if(!a||!a.main)throw new q["default"]("Unknown template object: "+typeof a);a.main.decorator=a.main_d,b.VM.checkRevision(a.compiler);var e={strict:function(a,b){if(!(b in a))throw new q["default"]('"'+b+'" not defined in '+a);return a[b]},lookup:function(a,b){for(var c=a.length,d=0;c>d;d++)if(a[d]&&null!=a[d][b])return a[d][b]},lambda:function(a,b){return"function"==typeof a?a.call(b):a},escapeExpression:o.escapeExpression,invokePartial:c,fn:function(b){var c=a[b];return c.decorator=a[b+"_d"],c},programs:[],program:function(a,b,c,d,e){var g=this.programs[a],h=this.fn(a);return b||e||d||c?g=f(this,a,h,b,c,d,e):g||(g=this.programs[a]=f(this,a,h)),g},data:function(a,b){for(;a&&b--;)a=a._parent;return a},merge:function(a,b){var c=a||b;return a&&b&&a!==b&&(c=o.extend({},b,a)),c},noop:b.VM.noop,compilerInfo:a.compiler};return d.isTop=!0,d._setup=function(c){c.partial?(e.helpers=c.helpers,e.partials=c.partials,e.decorators=c.decorators):(e.helpers=e.merge(c.helpers,b.helpers),a.usePartial&&(e.partials=e.merge(c.partials,b.partials)),(a.usePartial||a.useDecorators)&&(e.decorators=e.merge(c.decorators,b.decorators)))},d._child=function(b,c,d,g){if(a.useBlockParams&&!d)throw new q["default"]("must pass block params");if(a.useDepths&&!g)throw new q["default"]("must pass parent depths");return f(e,b,a[b],c,0,d,g)},d}function f(a,b,c,d,e,f,g){function h(b){var e=arguments.length<=1||void 0===arguments[1]?{}:arguments[1],h=g;return g&&b!==g[0]&&(h=[b].concat(g)),c(a,b,a.helpers,a.partials,e.data||d,f&&[e.blockParams].concat(f),h)}return h=k(c,h,a,g,d,f),h.program=b,h.depth=g?g.length:0,h.blockParams=e||0,h}function g(a,b,c){return a?a.call||c.name||(c.name=a,a=c.partials[a]):a="@partial-block"===c.name?c.data["partial-block"]:c.partials[c.name],a}function h(a,b,c){c.partial=!0,c.ids&&(c.data.contextPath=c.ids[0]||c.data.contextPath);var d=void 0;if(c.fn&&c.fn!==i&&(c.data=r.createFrame(c.data),d=c.data["partial-block"]=c.fn,d.partials&&(c.partials=o.extend({},c.partials,d.partials))),void 0===a&&d&&(a=d),void 0===a)throw new q["default"]("The partial "+c.name+" could not be found");return a instanceof Function?a(b,c):void 0}function i(){return""}function j(a,b){return b&&"root"in b||(b=b?r.createFrame(b):{},b.root=a),b}function k(a,b,c,d,e,f){if(a.decorator){var g={};b=a.decorator(b,g,c,d&&d[0],e,f,d),o.extend(b,g)}return b}var l=c(1)["default"],m=c(2)["default"];b.__esModule=!0,b.checkRevision=d,b.template=e,b.wrapProgram=f,b.resolvePartial=g,b.invokePartial=h,b.noop=i;var n=c(4),o=l(n),p=c(5),q=m(p),r=c(3)},function(a,b){(function(c){"use strict";b.__esModule=!0,b["default"]=function(a){var b="undefined"!=typeof c?c:window,d=b.Handlebars;a.noConflict=function(){return b.Handlebars===a&&(b.Handlebars=d),a}},a.exports=b["default"]}).call(b,function(){return this}())}])});
|
PypiClean
|
/iredis_bin-1.9.4-cp310-cp310-manylinux2010_x86_64.whl/iredis_bin-1.9.4.data/scripts/lib/curses/ascii.py
|
NUL = 0x00 # ^@
SOH = 0x01 # ^A
STX = 0x02 # ^B
ETX = 0x03 # ^C
EOT = 0x04 # ^D
ENQ = 0x05 # ^E
ACK = 0x06 # ^F
BEL = 0x07 # ^G
BS = 0x08 # ^H
TAB = 0x09 # ^I
HT = 0x09 # ^I
LF = 0x0a # ^J
NL = 0x0a # ^J
VT = 0x0b # ^K
FF = 0x0c # ^L
CR = 0x0d # ^M
SO = 0x0e # ^N
SI = 0x0f # ^O
DLE = 0x10 # ^P
DC1 = 0x11 # ^Q
DC2 = 0x12 # ^R
DC3 = 0x13 # ^S
DC4 = 0x14 # ^T
NAK = 0x15 # ^U
SYN = 0x16 # ^V
ETB = 0x17 # ^W
CAN = 0x18 # ^X
EM = 0x19 # ^Y
SUB = 0x1a # ^Z
ESC = 0x1b # ^[
FS = 0x1c # ^\
GS = 0x1d # ^]
RS = 0x1e # ^^
US = 0x1f # ^_
SP = 0x20 # space
DEL = 0x7f # delete
controlnames = [
"NUL", "SOH", "STX", "ETX", "EOT", "ENQ", "ACK", "BEL",
"BS", "HT", "LF", "VT", "FF", "CR", "SO", "SI",
"DLE", "DC1", "DC2", "DC3", "DC4", "NAK", "SYN", "ETB",
"CAN", "EM", "SUB", "ESC", "FS", "GS", "RS", "US",
"SP"
]
def _ctoi(c):
if type(c) == type(""):
return ord(c)
else:
return c
def isalnum(c): return isalpha(c) or isdigit(c)
def isalpha(c): return isupper(c) or islower(c)
def isascii(c): return 0 <= _ctoi(c) <= 127 # ?
def isblank(c): return _ctoi(c) in (9, 32)
def iscntrl(c): return 0 <= _ctoi(c) <= 31 or _ctoi(c) == 127
def isdigit(c): return 48 <= _ctoi(c) <= 57
def isgraph(c): return 33 <= _ctoi(c) <= 126
def islower(c): return 97 <= _ctoi(c) <= 122
def isprint(c): return 32 <= _ctoi(c) <= 126
def ispunct(c): return isgraph(c) and not isalnum(c)
def isspace(c): return _ctoi(c) in (9, 10, 11, 12, 13, 32)
def isupper(c): return 65 <= _ctoi(c) <= 90
def isxdigit(c): return isdigit(c) or \
(65 <= _ctoi(c) <= 70) or (97 <= _ctoi(c) <= 102)
def isctrl(c): return 0 <= _ctoi(c) < 32
def ismeta(c): return _ctoi(c) > 127
def ascii(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x7f)
else:
return _ctoi(c) & 0x7f
def ctrl(c):
if type(c) == type(""):
return chr(_ctoi(c) & 0x1f)
else:
return _ctoi(c) & 0x1f
def alt(c):
if type(c) == type(""):
return chr(_ctoi(c) | 0x80)
else:
return _ctoi(c) | 0x80
def unctrl(c):
bits = _ctoi(c)
if bits == 0x7f:
rep = "^?"
elif isprint(bits & 0x7f):
rep = chr(bits & 0x7f)
else:
rep = "^" + chr(((bits & 0x7f) | 0x20) + 0x20)
if bits & 0x80:
return "!" + rep
return rep
|
PypiClean
|
/block.bootstrap.pytorch-0.1.6.tar.gz/block.bootstrap.pytorch-0.1.6/block/models/metrics/compute_oe_accuracy.py
|
import argparse
import json
import random
import os
from os.path import join
import sys
from bootstrap.lib.logger import Logger
from block.external.VQA.PythonHelperTools.vqaTools.vqa import VQA
from block.external.VQA.PythonEvaluationTools.vqaEvaluation.vqaEval import VQAEval
def real_split_name(split):
if split in ['train', 'val']:
return split+'2014'
elif split == 'test':
return split+'2015'
elif split == 'testdev':
return 'test-dev2015'
else:
raise ValueError()
def main(dir_vqa, dir_exp, dir_rslt, epoch, split, cmd_line=True, logs_name="logs", rm=True):
real_split = real_split_name(split)
if cmd_line:
Logger(dir_exp, name=f'{logs_name}_{split}_oe')
diranno = join(dir_vqa, 'raw', 'annotations')
annFile = join(diranno, 'mscoco_%s_annotations.json' % (real_split))
quesFile = join(diranno, 'OpenEnded_mscoco_%s_questions.json' % (real_split))
vqa = VQA(annFile, quesFile)
taskType = 'OpenEnded'
dataType = 'mscoco'
dataSubType = real_split
resultType = 'model'
fileTypes = ['results', 'accuracy', 'evalQA', 'evalQuesType', 'evalAnsType']
[resFile, accuracyFile, evalQAFile, evalQuesTypeFile, evalAnsTypeFile] = \
['%s/%s_%s_%s_%s_%s.json' % (dir_rslt, taskType, dataType,
dataSubType, resultType, fileType) for fileType in fileTypes]
vqaRes = vqa.loadRes(resFile, quesFile)
vqaEval = VQAEval(vqa, vqaRes, n=2)
quesIds = [int(d['question_id']) for d in json.loads(open(resFile).read())]
# if split != 'train':
# annQuesIds = [int(d['question_id']) for d in json.loads(open(annFile).read())['annotations']]
# assert len(set(quesIds) - set(annQuesIds)) == 0, "Some questions in results are not in annotations"
# assert len(set(annQuesIds) - set(quesIds)) == 0, "Some questions in annotations are not in results"
vqaEval.evaluate(quesIds=quesIds)
mode = 'train' if 'train' in split else 'eval'
Logger().log_value(f'{mode}_epoch.epoch', epoch)
Logger().log_value(f'{mode}_epoch.overall', vqaEval.accuracy['overall'])
for key in vqaEval.accuracy['perQuestionType']:
rkey = key.replace(' ', '_')
Logger().log_value(f'{mode}_epoch.perQuestionType.{rkey}', vqaEval.accuracy['perQuestionType'][key])
for key in vqaEval.accuracy['perAnswerType']:
rkey = key.replace(' ', '_')
Logger().log_value(f'{mode}_epoch.perAnswerType.{rkey}', vqaEval.accuracy['perAnswerType'][key])
Logger().flush()
json.dump(vqaEval.accuracy, open(accuracyFile, 'w'))
if rm:
os.system('rm -rf '+dir_rslt)
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dir_vqa', type=str, default='/local/cadene/data/vqa')
parser.add_argument('--dir_exp', type=str, default='logs/16_12_13_20:39:55/')
parser.add_argument('--dir_rslt', type=str, default='logs/16_12_13_20:39:55/results/train/epoch,1')
parser.add_argument('--epoch', type=int, default=1)
parser.add_argument('--split', type=str, default='train')
parser.add_argument('--logs_name', type=str, default='logs')
parser.add_argument('--rm', type=int, default=1)
args = parser.parse_args()
main(args.dir_vqa, args.dir_exp, args.dir_rslt, args.epoch, args.split, logs_name=args.logs_name, rm=args.rm)
#json.dump(vqaEval.evalQA, open(evalQAFile, 'w'))
#json.dump(vqaEval.evalQuesType, open(evalQuesTypeFile, 'w'))
#json.dump(vqaEval.evalAnsType, open(evalAnsTypeFile, 'w'))
|
PypiClean
|
/Super_ML-0.0.3-py3-none-any.whl/auto_machine_learning/hyperparameter_optimization/hpo.py
|
from sklearn.model_selection import train_test_split
from auto_machine_learning.utils import get_model, get_features
from auto_machine_learning.hyperparameter_optimization.hpo_methods import *
#---------------------------------------------------------------------------------------------------------------------#
def get_trained_model(dataset, label, model_name, task, method_name='standard', max_evals=100, test_size=0.3, random_state=1):
'''
Train the model with given data and hpo method . Returns the trained model
Parameters:
dataset(dataframe) : data to be used for training model
label (string): target column of the dataframe
task (string) : type of task
model_name(string) : name of the model on which data is to be trained
method_name(string) : Name of the hyper parameter method to be used while training
max_evals(int) : Number of evaluators
test_size(float) : Fraction of the data to be used for testing
random_state(int) : Random state to be used
Returns:
model (model object) : the trained model on which hpo is performed
'''
features = get_features(dataset, label)
X, Y = dataset[features], dataset[label]
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state = random_state)
model = get_model(model_name)
if method_name=='standard':
model=model()
trained_model = model.fit(X_train,Y_train)
print('Standard Model without HPO')
print('Model Trained')
return trained_model
elif method_name == 'grid_search':
trained_model = grid_search(model, X_train, Y_train)
elif method_name == 'random_search':
trained_model = random_search(model, X_train, Y_train)
# elif method_name == 'bayesian_gp':
# trained_model = bayesian_gp(model, X_train, Y_train)
elif method_name == 'bayesian_tpe':
trained_model = bayesian_tpe(model, X_train, X_test, Y_train, Y_test, task, max_evals=max_evals)
else:
raise Exception("'No hpo method named {}'.format(method_name)")
print('Hyperparameters Optimized')
print('Model Trained')
return trained_model
#---------------------------------------------------------------------------------------------------------------------#
|
PypiClean
|
/odooku_odoo_purchase-11.0.7-py35-none-any.whl/odoo/addons/purchase/models/stock.py
|
from odoo import api, fields, models, _
from odoo.exceptions import UserError
class StockPicking(models.Model):
_inherit = 'stock.picking'
purchase_id = fields.Many2one('purchase.order', related='move_lines.purchase_line_id.order_id',
string="Purchase Orders", readonly=True)
class StockMove(models.Model):
_inherit = 'stock.move'
purchase_line_id = fields.Many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', index=True, readonly=True)
created_purchase_line_id = fields.Many2one('purchase.order.line',
'Created Purchase Order Line', ondelete='set null', readonly=True, copy=False)
@api.model
def _prepare_merge_moves_distinct_fields(self):
distinct_fields = super(StockMove, self)._prepare_merge_moves_distinct_fields()
distinct_fields += ['purchase_line_id', 'created_purchase_line_id']
return distinct_fields
@api.model
def _prepare_merge_move_sort_method(self, move):
move.ensure_one()
keys_sorted = super(StockMove, self)._prepare_merge_move_sort_method(move)
keys_sorted += [move.purchase_line_id.id, move.created_purchase_line_id.id]
return keys_sorted
@api.multi
def _get_price_unit(self):
""" Returns the unit price for the move"""
self.ensure_one()
if self.purchase_line_id and self.product_id.id == self.purchase_line_id.product_id.id:
line = self.purchase_line_id
order = line.order_id
price_unit = line.price_unit
if line.taxes_id:
price_unit = line.taxes_id.with_context(round=False).compute_all(price_unit, currency=line.order_id.currency_id, quantity=1.0)['total_excluded']
if line.product_uom.id != line.product_id.uom_id.id:
price_unit *= line.product_uom.factor / line.product_id.uom_id.factor
if order.currency_id != order.company_id.currency_id:
price_unit = order.currency_id.compute(price_unit, order.company_id.currency_id, round=False)
return price_unit
return super(StockMove, self)._get_price_unit()
def _prepare_extra_move_vals(self, qty):
vals = super(StockMove, self)._prepare_extra_move_vals(qty)
vals['purchase_line_id'] = self.purchase_line_id.id
return vals
def _prepare_move_split_vals(self, uom_qty):
vals = super(StockMove, self)._prepare_move_split_vals(uom_qty)
vals['purchase_line_id'] = self.purchase_line_id.id
return vals
def _clean_merged(self):
super(StockMove, self)._clean_merged()
self.write({'created_purchase_line_id': False})
def _action_cancel(self):
for move in self:
if move.created_purchase_line_id:
try:
activity_type_id = self.env.ref('mail.mail_activity_data_todo').id
except ValueError:
activity_type_id = False
self.env['mail.activity'].create({
'activity_type_id': activity_type_id,
'note': _('A sale order that generated this purchase order has been deleted. Check if an action is needed.'),
'user_id': move.created_purchase_line_id.product_id.responsible_id.id,
'res_id': move.created_purchase_line_id.order_id.id,
'res_model_id': self.env.ref('purchase.model_purchase_order').id,
})
return super(StockMove, self)._action_cancel()
class StockWarehouse(models.Model):
_inherit = 'stock.warehouse'
buy_to_resupply = fields.Boolean('Purchase to resupply this warehouse', default=True,
help="When products are bought, they can be delivered to this warehouse")
buy_pull_id = fields.Many2one('procurement.rule', 'Buy rule')
@api.multi
def _get_buy_pull_rule(self):
try:
buy_route_id = self.env['ir.model.data'].get_object_reference('purchase', 'route_warehouse0_buy')[1]
except:
buy_route_id = self.env['stock.location.route'].search([('name', 'like', _('Buy'))])
buy_route_id = buy_route_id[0].id if buy_route_id else False
if not buy_route_id:
raise UserError(_("Can't find any generic Buy route."))
return {
'name': self._format_routename(_(' Buy')),
'location_id': self.in_type_id.default_location_dest_id.id,
'route_id': buy_route_id,
'action': 'buy',
'picking_type_id': self.in_type_id.id,
'warehouse_id': self.id,
'group_propagation_option': 'none',
}
@api.multi
def create_routes(self):
res = super(StockWarehouse, self).create_routes() # super applies ensure_one()
if self.buy_to_resupply:
buy_pull_vals = self._get_buy_pull_rule()
buy_pull = self.env['procurement.rule'].create(buy_pull_vals)
res['buy_pull_id'] = buy_pull.id
return res
@api.multi
def write(self, vals):
if 'buy_to_resupply' in vals:
if vals.get("buy_to_resupply"):
for warehouse in self:
if not warehouse.buy_pull_id:
buy_pull_vals = self._get_buy_pull_rule()
buy_pull = self.env['procurement.rule'].create(buy_pull_vals)
vals['buy_pull_id'] = buy_pull.id
else:
for warehouse in self:
if warehouse.buy_pull_id:
warehouse.buy_pull_id.unlink()
return super(StockWarehouse, self).write(vals)
@api.multi
def _get_all_routes(self):
routes = super(StockWarehouse, self).get_all_routes_for_wh()
routes |= self.filtered(lambda self: self.buy_to_resupply and self.buy_pull_id and self.buy_pull_id.route_id).mapped('buy_pull_id').mapped('route_id')
return routes
@api.multi
def _update_name_and_code(self, name=False, code=False):
res = super(StockWarehouse, self)._update_name_and_code(name, code)
warehouse = self[0]
#change the buy procurement rule name
if warehouse.buy_pull_id and name:
warehouse.buy_pull_id.write({'name': warehouse.buy_pull_id.name.replace(warehouse.name, name, 1)})
return res
@api.multi
def _update_routes(self):
res = super(StockWarehouse, self)._update_routes()
for warehouse in self:
if warehouse.in_type_id.default_location_dest_id != warehouse.buy_pull_id.location_id:
warehouse.buy_pull_id.write({'location_id': warehouse.in_type_id.default_location_dest_id.id})
return res
class ReturnPicking(models.TransientModel):
_inherit = "stock.return.picking"
def _prepare_move_default_values(self, return_line, new_picking):
vals = super(ReturnPicking, self)._prepare_move_default_values(return_line, new_picking)
vals['purchase_line_id'] = return_line.move_id.purchase_line_id.id
return vals
class Orderpoint(models.Model):
_inherit = "stock.warehouse.orderpoint"
def _quantity_in_progress(self):
res = super(Orderpoint, self)._quantity_in_progress()
for poline in self.env['purchase.order.line'].search([('state','in',('draft','sent','to approve')),('orderpoint_id','in',self.ids)]):
res[poline.orderpoint_id.id] += poline.product_uom._compute_quantity(poline.product_qty, poline.orderpoint_id.product_uom, round=False)
return res
def action_view_purchase(self):
""" This function returns an action that display existing
purchase orders of given orderpoint.
"""
action = self.env.ref('purchase.purchase_rfq')
result = action.read()[0]
# Remvove the context since the action basically display RFQ and not PO.
result['context'] = {}
order_line_ids = self.env['purchase.order.line'].search([('orderpoint_id', '=', self.id)])
purchase_ids = order_line_ids.mapped('order_id')
result['domain'] = "[('id','in',%s)]" % (purchase_ids.ids)
return result
class PushedFlow(models.Model):
_inherit = "stock.location.path"
def _prepare_move_copy_values(self, move_to_copy, new_date):
res = super(PushedFlow, self)._prepare_move_copy_values(move_to_copy, new_date)
res['purchase_line_id'] = None
return res
|
PypiClean
|
/pytest-yapf3-0.7.0.tar.gz/pytest-yapf3-0.7.0/README.md
|
# pytest-yapf3
<!-- [](https://travis-ci.org/yanqd0/pytest-yapf3) -->
<!-- [](https://ci.appveyor.com/project/yanqd0/pytest-yapf3/branch/master) -->
<!-- [](https://codecov.io/gh/yanqd0/pytest-yapf3) -->
[](https://github.com/yanqd0/pytest-yapf3/actions/workflows/python-pytest.yml)
[](https://bestpractices.coreinfrastructure.org/projects/3446)
[](https://github.com/google/yapf)
Validate your Python file format with yapf.
This is a [pytest] plugin,
which make sure your python file is exactly formatted by yapf,
or it will crash when running `pytest`.
[pytest]:https://pytest.org/
## Install
[](https://pypi.org/project/pytest-yapf3/)
[](https://pypi.org/project/pytest-yapf3/)
[](https://pypi.org/project/pytest-yapf3/)
[](https://pypi.org/classifiers/)
[](https://pypi.org/project/pytest-yapf3/)
[](https://github.com/yanqd0/pytest-yapf3/blob/master/LICENSE)
```sh
pip install pytest-yapf3
```
Requires:
- Python 3.6 or above
- Pytest:
- `pytest>=5.4,<8` when `pytest-yapf3<0.7`
- `pytest>=7` when `pytest-yapf3>=0.7`
If someone use this with Python 3.5 or Pytest less than 5.4, set `'pytest-yapf3<0.6.0'`.
## Usage
Modify `setup.cfg` (or `pytest.ini`):
```ini
[tool:pytest]
addopts =
--yapf
--yapfdiff
yapf-ignore =
setup.py
src/package/auto_generated/**.py
```
Add `--yapf` to [pytest] configuration `addopts`.
If run with `pytest -m yapf`, only `yapf` is checked.
By default, only line summaries is displayed.
With an optional `--yapfdiff`, a full text of `yapf -d` is displayed.
If not configured here, `pytest --yapfdiff` will also work.
An optional `yapf-ignore` is supported.
Each line specifies a glob pattern of files which should not check `yapf`.
The `pytest` will cache success results of pytest-yapf3, and will not check again if files not changed.
Sometimes you may want to disable it.
There are 2 ways:
- `pytest -p no:cacheprovider`
This may crash if any other pytest plugin not supports.
- `rm -rf .pytest_cache/v/yapf`
This is ugly, but safe for any environment.
## Features and Todos
- [x] Basic support to validate `yapf`.
- [x] Fix the diff line count error and improve the performance.
- [x] Display `YAPF-check` as the error session name.
- [x] Display `YAPF` in `pytest --verbose`.
- [x] Add `yapf` as a marker to enable `pytest -m yapf`.
- [x] Support `yapf-ignore` to ignore specified files.
- [x] Skip running if a file is not changed.
- [x] 100% test coverage.
## Develop
Prepare the environment:
```sh
pipenv install --dev
pipenv shell
```
Run test:
```sh
pytest
```
## Badge
Like [black], if your project always pass yapf checking provided by this project,
you can use an unofficial badge to show off.
[black]:https://github.com/psf/black#show-your-style
### Markdown (README.md)
```markdown
[](https://github.com/google/yapf)
```
### reStructuredText (README.rst)
```rst
.. image:: https://img.shields.io/badge/code%20style-yapf-blue
:target: https://github.com/google/yapf
:alt: Code style: yapf
```
## License
> The MIT License (MIT)
>
> Copyright (c) 2019~2022 Yan QiDong
This repository is forked from [pytest-yapf] in 2019, which is [not maintained] since 2017.
Besides extra features, the project structure is adjusted,
and the code is enhanced to pass linters like flake8, pylint and, of course, yapf.
The `3` in `pytest-yapf3` means this package supports Python 3 only.
[pytest-yapf]:https://github.com/django-stars/pytest-yapf
[not maintained]:https://github.com/django-stars/pytest-yapf/issues/1
|
PypiClean
|
/encuentro-4.0.tar.gz/encuentro-4.0/external/youtube-dl/youtube_dl/extractor/mtv.py
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_str,
)
from ..utils import (
ExtractorError,
find_xpath_attr,
fix_xml_ampersands,
HEADRequest,
sanitized_Request,
unescapeHTML,
url_basename,
RegexNotFoundError,
)
def _media_xml_tag(tag):
return '{http://search.yahoo.com/mrss/}%s' % tag
class MTVServicesInfoExtractor(InfoExtractor):
_MOBILE_TEMPLATE = None
_LANG = None
@staticmethod
def _id_from_uri(uri):
return uri.split(':')[-1]
# This was originally implemented for ComedyCentral, but it also works here
@staticmethod
def _transform_rtmp_url(rtmp_video_url):
m = re.match(r'^rtmpe?://.*?/(?P<finalid>gsp\..+?/.*)$', rtmp_video_url)
if not m:
return rtmp_video_url
base = 'http://viacommtvstrmfs.fplive.net/'
return base + m.group('finalid')
def _get_feed_url(self, uri):
return self._FEED_URL
def _get_thumbnail_url(self, uri, itemdoc):
search_path = '%s/%s' % (_media_xml_tag('group'), _media_xml_tag('thumbnail'))
thumb_node = itemdoc.find(search_path)
if thumb_node is None:
return None
else:
return thumb_node.attrib['url']
def _extract_mobile_video_formats(self, mtvn_id):
webpage_url = self._MOBILE_TEMPLATE % mtvn_id
req = sanitized_Request(webpage_url)
# Otherwise we get a webpage that would execute some javascript
req.add_header('User-Agent', 'curl/7')
webpage = self._download_webpage(req, mtvn_id,
'Downloading mobile page')
metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
req = HEADRequest(metrics_url)
response = self._request_webpage(req, mtvn_id, 'Resolving url')
url = response.geturl()
# Transform the url to get the best quality:
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
return [{'url': url, 'ext': 'mp4'}]
def _extract_video_formats(self, mdoc, mtvn_id):
if re.match(r'.*/(error_country_block\.swf|geoblock\.mp4|copyright_error\.flv(?:\?geo\b.+?)?)$', mdoc.find('.//src').text) is not None:
if mtvn_id is not None and self._MOBILE_TEMPLATE is not None:
self.to_screen('The normal version is not available from your '
'country, trying with the mobile version')
return self._extract_mobile_video_formats(mtvn_id)
raise ExtractorError('This video is not available from your country.',
expected=True)
formats = []
for rendition in mdoc.findall('.//rendition'):
try:
_, _, ext = rendition.attrib['type'].partition('/')
rtmp_video_url = rendition.find('./src').text
if rtmp_video_url.endswith('siteunavail.png'):
continue
formats.append({
'ext': ext,
'url': self._transform_rtmp_url(rtmp_video_url),
'format_id': rendition.get('bitrate'),
'width': int(rendition.get('width')),
'height': int(rendition.get('height')),
})
except (KeyError, TypeError):
raise ExtractorError('Invalid rendition field.')
self._sort_formats(formats)
return formats
def _extract_subtitles(self, mdoc, mtvn_id):
subtitles = {}
for transcript in mdoc.findall('.//transcript'):
if transcript.get('kind') != 'captions':
continue
lang = transcript.get('srclang')
subtitles[lang] = [{
'url': compat_str(typographic.get('src')),
'ext': typographic.get('format')
} for typographic in transcript.findall('./typographic')]
return subtitles
def _get_video_info(self, itemdoc):
uri = itemdoc.find('guid').text
video_id = self._id_from_uri(uri)
self.report_extraction(video_id)
mediagen_url = itemdoc.find('%s/%s' % (_media_xml_tag('group'), _media_xml_tag('content'))).attrib['url']
# Remove the templates, like &device={device}
mediagen_url = re.sub(r'&[^=]*?={.*?}(?=(&|$))', '', mediagen_url)
if 'acceptMethods' not in mediagen_url:
mediagen_url += '&' if '?' in mediagen_url else '?'
mediagen_url += 'acceptMethods=fms'
mediagen_doc = self._download_xml(mediagen_url, video_id,
'Downloading video urls')
item = mediagen_doc.find('./video/item')
if item is not None and item.get('type') == 'text':
message = '%s returned error: ' % self.IE_NAME
if item.get('code') is not None:
message += '%s - ' % item.get('code')
message += item.text
raise ExtractorError(message, expected=True)
description_node = itemdoc.find('description')
if description_node is not None:
description = description_node.text.strip()
else:
description = None
title_el = None
if title_el is None:
title_el = find_xpath_attr(
itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:video_title')
if title_el is None:
title_el = itemdoc.find('.//{http://search.yahoo.com/mrss/}title')
if title_el is None:
title_el = itemdoc.find('.//title') or itemdoc.find('./title')
if title_el.text is None:
title_el = None
title = title_el.text
if title is None:
raise ExtractorError('Could not find video title')
title = title.strip()
# This a short id that's used in the webpage urls
mtvn_id = None
mtvn_id_node = find_xpath_attr(itemdoc, './/{http://search.yahoo.com/mrss/}category',
'scheme', 'urn:mtvn:id')
if mtvn_id_node is not None:
mtvn_id = mtvn_id_node.text
return {
'title': title,
'formats': self._extract_video_formats(mediagen_doc, mtvn_id),
'subtitles': self._extract_subtitles(mediagen_doc, mtvn_id),
'id': video_id,
'thumbnail': self._get_thumbnail_url(uri, itemdoc),
'description': description,
}
def _get_videos_info(self, uri):
video_id = self._id_from_uri(uri)
feed_url = self._get_feed_url(uri)
data = compat_urllib_parse.urlencode({'uri': uri})
info_url = feed_url + '?'
if self._LANG:
info_url += 'lang=%s&' % self._LANG
info_url += data
return self._get_videos_info_from_url(info_url, video_id)
def _get_videos_info_from_url(self, url, video_id):
idoc = self._download_xml(
url, video_id,
'Downloading info', transform_source=fix_xml_ampersands)
return self.playlist_result(
[self._get_video_info(item) for item in idoc.findall('.//item')])
def _real_extract(self, url):
title = url_basename(url)
webpage = self._download_webpage(url, title)
try:
# the url can be http://media.mtvnservices.com/fb/{mgid}.swf
# or http://media.mtvnservices.com/{mgid}
og_url = self._og_search_video_url(webpage)
mgid = url_basename(og_url)
if mgid.endswith('.swf'):
mgid = mgid[:-4]
except RegexNotFoundError:
mgid = None
if mgid is None or ':' not in mgid:
mgid = self._search_regex(
[r'data-mgid="(.*?)"', r'swfobject.embedSWF\(".*?(mgid:.*?)"'],
webpage, 'mgid', default=None)
if not mgid:
sm4_embed = self._html_search_meta(
'sm4:video:embed', webpage, 'sm4 embed', default='')
mgid = self._search_regex(
r'embed/(mgid:.+?)["\'&?/]', sm4_embed, 'mgid')
videos_info = self._get_videos_info(mgid)
return videos_info
class MTVServicesEmbeddedIE(MTVServicesInfoExtractor):
IE_NAME = 'mtvservices:embedded'
_VALID_URL = r'https?://media\.mtvnservices\.com/embed/(?P<mgid>.+?)(\?|/|$)'
_TEST = {
# From http://www.thewrap.com/peter-dinklage-sums-up-game-of-thrones-in-45-seconds-video/
'url': 'http://media.mtvnservices.com/embed/mgid:uma:video:mtv.com:1043906/cp~vid%3D1043906%26uri%3Dmgid%3Auma%3Avideo%3Amtv.com%3A1043906',
'md5': 'cb349b21a7897164cede95bd7bf3fbb9',
'info_dict': {
'id': '1043906',
'ext': 'mp4',
'title': 'Peter Dinklage Sums Up \'Game Of Thrones\' In 45 Seconds',
'description': '"Sexy sexy sexy, stabby stabby stabby, beautiful language," says Peter Dinklage as he tries summarizing "Game of Thrones" in under a minute.',
},
}
@staticmethod
def _extract_url(webpage):
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//media.mtvnservices.com/embed/.+?)\1', webpage)
if mobj:
return mobj.group('url')
def _get_feed_url(self, uri):
video_id = self._id_from_uri(uri)
site_id = uri.replace(video_id, '')
config_url = ('http://media.mtvnservices.com/pmt/e1/players/{0}/'
'context4/context5/config.xml'.format(site_id))
config_doc = self._download_xml(config_url, video_id)
feed_node = config_doc.find('.//feed')
feed_url = feed_node.text.strip().split('?')[0]
return feed_url
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
mgid = mobj.group('mgid')
return self._get_videos_info(mgid)
class MTVIE(MTVServicesInfoExtractor):
_VALID_URL = r'''(?x)^https?://
(?:(?:www\.)?mtv\.com/videos/.+?/(?P<videoid>[0-9]+)/[^/]+$|
m\.mtv\.com/videos/video\.rbml\?.*?id=(?P<mgid>[^&]+))'''
_FEED_URL = 'http://www.mtv.com/player/embed/AS3/rss/'
_TESTS = [
{
'url': 'http://www.mtv.com/videos/misc/853555/ours-vh1-storytellers.jhtml',
'md5': '850f3f143316b1e71fa56a4edfd6e0f8',
'info_dict': {
'id': '853555',
'ext': 'mp4',
'title': 'Taylor Swift - "Ours (VH1 Storytellers)"',
'description': 'Album: Taylor Swift performs "Ours" for VH1 Storytellers at Harvey Mudd College.',
},
},
]
def _get_thumbnail_url(self, uri, itemdoc):
return 'http://mtv.mtvnimages.com/uri/' + uri
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('videoid')
uri = mobj.groupdict().get('mgid')
if uri is None:
webpage = self._download_webpage(url, video_id)
# Some videos come from Vevo.com
m_vevo = re.search(
r'(?s)isVevoVideo = true;.*?vevoVideoId = "(.*?)";', webpage)
if m_vevo:
vevo_id = m_vevo.group(1)
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
uri = self._html_search_regex(r'/uri/(.*?)\?', webpage, 'uri')
return self._get_videos_info(uri)
class MTVIggyIE(MTVServicesInfoExtractor):
IE_NAME = 'mtviggy.com'
_VALID_URL = r'https?://www\.mtviggy\.com/videos/.+'
_TEST = {
'url': 'http://www.mtviggy.com/videos/arcade-fire-behind-the-scenes-at-the-biggest-music-experiment-yet/',
'info_dict': {
'id': '984696',
'ext': 'mp4',
'title': 'Arcade Fire: Behind the Scenes at the Biggest Music Experiment Yet',
}
}
_FEED_URL = 'http://all.mtvworldverticals.com/feed-xml/'
class MTVDEIE(MTVServicesInfoExtractor):
IE_NAME = 'mtv.de'
_VALID_URL = r'https?://(?:www\.)?mtv\.de/(?:artists|shows|news)/(?:[^/]+/)*(?P<id>\d+)-[^/#?]+/*(?:[#?].*)?$'
_TESTS = [{
'url': 'http://www.mtv.de/artists/10571-cro/videos/61131-traum',
'info_dict': {
'id': 'music_video-a50bc5f0b3aa4b3190aa',
'ext': 'mp4',
'title': 'MusicVideo_cro-traum',
'description': 'Cro - Traum',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# mediagen URL without query (e.g. http://videos.mtvnn.com/mediagen/e865da714c166d18d6f80893195fcb97)
'url': 'http://www.mtv.de/shows/933-teen-mom-2/staffeln/5353/folgen/63565-enthullungen',
'info_dict': {
'id': 'local_playlist-f5ae778b9832cc837189',
'ext': 'mp4',
'title': 'Episode_teen-mom-2_shows_season-5_episode-1_full-episode_part1',
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
# single video in pagePlaylist with different id
'url': 'http://www.mtv.de/news/77491-mtv-movies-spotlight-pixels-teil-3',
'info_dict': {
'id': 'local_playlist-4e760566473c4c8c5344',
'ext': 'mp4',
'title': 'Article_mtv-movies-spotlight-pixels-teil-3_short-clips_part1',
'description': 'MTV Movies Supercut',
},
'params': {
# rtmp download
'skip_download': True,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
playlist = self._parse_json(
self._search_regex(
r'window\.pagePlaylist\s*=\s*(\[.+?\]);\n', webpage, 'page playlist'),
video_id)
# news pages contain single video in playlist with different id
if len(playlist) == 1:
return self._get_videos_info_from_url(playlist[0]['mrss'], video_id)
for item in playlist:
item_id = item.get('id')
if item_id and compat_str(item_id) == video_id:
return self._get_videos_info_from_url(item['mrss'], video_id)
|
PypiClean
|
/kipartman-0.6.2.tar.gz/kipartman-0.6.2/kipartbase/__main__.py
|
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
#sys.path.append(os.path.join(os.path.dirname(__file__), 'kipartbase'))
import connexion
from swagger_server.encoder import JSONEncoder
from os.path import expanduser
import argparse
home = expanduser("~")
def migrate():
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(["manage.py", "migrate"])
def serve(args=None):
app = connexion.App(__name__, specification_dir='./swagger_server/swagger/')
app.app.json_encoder = JSONEncoder
app.add_api('swagger.yaml', arguments={'title': 'Kipartman api specifications'})
# add a static file server
from flask import Flask, request, send_from_directory
@app.route('/file/<path:path>')
def send_js(path):
return send_from_directory(os.path.join(os.environ['DATA_DIR'], '/storage'), path)
app.run(port=8200, debug=True)
def main(args=None):
"""The main routine."""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(description='Kipartbase, the kicad part manager backend')
parser.add_argument('-d', '--data', help='data dir (default: ~/.kipartman)')
args = parser.parse_args(args)
if args.data:
os.environ['DATA_DIR'] = args.data
else:
os.environ['DATA_DIR'] = os.getenv('KIPARTBASE_PATH', os.path.join(os.path.expanduser("~"), '.kipartman'))
if os.path.exists(os.environ['DATA_DIR'])==False:
os.mkdir(os.environ['DATA_DIR'])
os.chdir(os.path.dirname(__file__))
# do django migrations
migrate()
# serve api
serve()
if __name__ == '__main__':
main()
|
PypiClean
|
/FlexGet-3.9.6-py3-none-any.whl/flexget/components/notify/notifiers/cronitor.py
|
import socket
from loguru import logger
from requests.exceptions import RequestException
from flexget import plugin
from flexget.event import event
from flexget.plugin import PluginWarning
from flexget.utils.requests import Session as RequestSession
requests = RequestSession(max_retries=3)
plugin_name = "cronitor"
logger = logger.bind(name=plugin_name)
class Cronitor:
"""
Example::
cronitor: ABC123
Or:
cronitor:
monitor_code: ABC123
on_start: yes
on_abort: no
message: Ping
host: foo.bar
auth_key: secret
"""
base_url = "https://cronitor.link/{monitor_code}/{status}"
schema = {
"oneOf": [
{
"type": "object",
"properties": {
"monitor_code": {"type": "string"},
"on_start": {"type": "boolean"},
"on_abort": {"type": "boolean"},
"message": {"type": "string"},
"host": {"type": "string"},
"auth_key": {"type": "string"},
},
"required": ["monitor_code"],
"additionalProperties": False,
},
{"type": "string"},
]
}
@staticmethod
def prepare_config(config):
if isinstance(config, str):
config = {"monitor_code": config}
config.setdefault("on_start", True)
config.setdefault("on_abort", True)
config.setdefault("host", socket.gethostname())
return config
def _send_request(self, status, config, task_name):
url = self.base_url.format(monitor_code=config["monitor_code"], status=status)
message = config.get("message", f"{task_name} task {status}")
data = {"msg": message, "host": config["host"]}
if config.get("auth_key"):
data["auth_key"] = config["auth_key"]
try:
rsp = requests.get(url, params=data)
rsp.raise_for_status()
except RequestException as e:
raise PluginWarning(f"Could not report to cronitor: {e}")
def on_task_start(self, task, config):
config = self.prepare_config(config)
if not config["on_start"]:
return
self._send_request("run", config, task.name)
def on_task_abort(self, task, config):
config = self.prepare_config(config)
if not config["on_abort"]:
return
self._send_request("fail", config, task.name)
def on_task_exit(self, task, config):
config = self.prepare_config(config)
self._send_request("complete", config, task.name)
@event("plugin.register")
def register_plugin():
plugin.register(Cronitor, plugin_name, api_ver=2)
|
PypiClean
|
/django3-dashing-0.5.2.tar.gz/django3-dashing-0.5.2/dashing/static/dashing/libs/moment/locale/nl.js
|
(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['moment'], factory) :
factory(global.moment)
}(this, function (moment) { 'use strict';
var monthsShortWithDots = 'jan._feb._mrt._apr._mei_jun._jul._aug._sep._okt._nov._dec.'.split('_'),
monthsShortWithoutDots = 'jan_feb_mrt_apr_mei_jun_jul_aug_sep_okt_nov_dec'.split('_');
var nl = moment.defineLocale('nl', {
months : 'januari_februari_maart_april_mei_juni_juli_augustus_september_oktober_november_december'.split('_'),
monthsShort : function (m, format) {
if (/-MMM-/.test(format)) {
return monthsShortWithoutDots[m.month()];
} else {
return monthsShortWithDots[m.month()];
}
},
weekdays : 'zondag_maandag_dinsdag_woensdag_donderdag_vrijdag_zaterdag'.split('_'),
weekdaysShort : 'zo._ma._di._wo._do._vr._za.'.split('_'),
weekdaysMin : 'Zo_Ma_Di_Wo_Do_Vr_Za'.split('_'),
longDateFormat : {
LT : 'HH:mm',
LTS : 'LT:ss',
L : 'DD-MM-YYYY',
LL : 'D MMMM YYYY',
LLL : 'D MMMM YYYY LT',
LLLL : 'dddd D MMMM YYYY LT'
},
calendar : {
sameDay: '[vandaag om] LT',
nextDay: '[morgen om] LT',
nextWeek: 'dddd [om] LT',
lastDay: '[gisteren om] LT',
lastWeek: '[afgelopen] dddd [om] LT',
sameElse: 'L'
},
relativeTime : {
future : 'over %s',
past : '%s geleden',
s : 'een paar seconden',
m : 'één minuut',
mm : '%d minuten',
h : 'één uur',
hh : '%d uur',
d : 'één dag',
dd : '%d dagen',
M : 'één maand',
MM : '%d maanden',
y : 'één jaar',
yy : '%d jaar'
},
ordinalParse: /\d{1,2}(ste|de)/,
ordinal : function (number) {
return number + ((number === 1 || number === 8 || number >= 20) ? 'ste' : 'de');
},
week : {
dow : 1, // Monday is the first day of the week.
doy : 4 // The week that contains Jan 4th is the first week of the year.
}
});
return nl;
}));
|
PypiClean
|
/templateproj-1.0.1.tar.gz/templateproj-1.0.1/README.md
|
<!--
* @Author: BDFD
* @Date: 2021-10-27 18:39:19
* @LastEditTime: 2021-10-27 21:22:31
* @LastEditors: Please set LastEditors
* @Description: In User Settings Edit
* @FilePath: \6.0-PyPI_Missing_Value_Table\README.md
-->
# Package Name
Package function description
## Installation
`pip install package-name`
## How to use it?
Function of the package
## License
copyright @ 2021 BDFD
This repository is licensed under the MIT license. See LICENSE for details.
### References
https://github.com/bdfd/6.0-PyPI_Template
|
PypiClean
|
/odoo_addon_l10n_ro_fiscal_validation-16.0.1.2.0-py3-none-any.whl/odoo/addons/l10n_ro_fiscal_validation/models/res_partner.py
|
import logging
import time
import requests
from odoo import api, fields, models
_logger = logging.getLogger(__name__)
CEDILLATRANS = bytes.maketrans(
"\u015f\u0163\u015e\u0162".encode("utf8"),
"\u0219\u021b\u0218\u021a".encode("utf8"),
)
headers = {
"User-Agent": "Mozilla/5.0 (compatible; MSIE 7.01; Windows NT 5.0)",
"Content-Type": "application/json;",
}
ANAF_BULK_URL = "https://webservicesp.anaf.ro/AsynchWebService/api/v8/ws/tva"
ANAF_CORR = "https://webservicesp.anaf.ro/AsynchWebService/api/v8/ws/tva?id=%s"
class ResPartner(models.Model):
_inherit = "res.partner"
@api.model
def update_l10n_ro_vat_subjected(self):
anaf_dict = []
check_date = fields.Date.to_string(fields.Date.today())
# Build list of vat numbers to be checked on ANAF
for partner in self:
anaf_dict.append(partner.l10n_ro_vat_number)
chunk = []
chunks = []
# Process 500 vat numbers once
max_no = 499
for position in range(0, len(anaf_dict), max_no):
chunk = anaf_dict[position : position + max_no]
chunks.append(chunk)
for chunk in chunks:
anaf_ask = []
for item in chunk:
if item:
anaf_ask.append({"cui": int(item), "data": check_date})
try:
res = requests.post(
ANAF_BULK_URL, json=anaf_ask, headers=headers, timeout=30
)
if res.status_code == 200:
result = {}
try:
result = res.json()
except Exception:
_logger.warning("ANAF sync not working: %s" % res.content)
if result.get("correlationId"):
time.sleep(3)
resp = False
try:
resp = requests.get(
ANAF_CORR % result["correlationId"], timeout=30
)
except Exception as e:
_logger.warning("ANAF sync not working: %s" % e)
if resp and resp.status_code == 200:
resp = resp.json()
for result_partner in resp["found"] + resp["notfound"]:
vat = result_partner.get("date_generale").get("cui")
if vat:
partners = self.search(
[
("l10n_ro_vat_number", "=", vat),
("is_company", "=", True),
]
)
for partner in partners:
data = partner._Anaf_to_Odoo(result_partner)
partner.update(data)
except Exception:
_logger.warning("ANAF sync not working: %s" % res.content)
@api.model
def update_l10n_ro_vat_subjected_all(self):
partners = self.search(
[
("l10n_ro_vat_number", "!=", False),
("l10n_ro_vat_number", "!=", ""),
("country_id", "=", self.env.ref("base.ro").id),
("is_company", "=", True),
]
)
partners.update_l10n_ro_vat_subjected()
@api.model
def _update_l10n_ro_vat_subjected_all(self):
self.update_l10n_ro_vat_subjected_all()
|
PypiClean
|
/pd3f_flair-0.6.0.1-py3-none-any.whl/flair/datasets/sequence_labeling.py
|
import logging
import re
import os
from pathlib import Path
from typing import Union, Dict, List
import flair
from flair.data import Corpus, FlairDataset, Sentence, Token
from flair.datasets.base import find_train_dev_test_files
from flair.file_utils import cached_path
log = logging.getLogger("flair")
class ColumnCorpus(Corpus):
def __init__(
self,
data_folder: Union[str, Path],
column_format: Dict[int, str],
train_file=None,
test_file=None,
dev_file=None,
tag_to_bioes=None,
column_delimiter: str = r"\s+",
comment_symbol: str = None,
encoding: str = "utf-8",
document_separator_token: str = None,
skip_first_line: bool = False,
in_memory: bool = True,
):
"""
Instantiates a Corpus from CoNLL column-formatted task data such as CoNLL03 or CoNLL2000.
:param data_folder: base folder with the task data
:param column_format: a map specifying the column format
:param train_file: the name of the train file
:param test_file: the name of the test file
:param dev_file: the name of the dev file, if None, dev data is sampled from train
:param tag_to_bioes: whether to convert to BIOES tagging scheme
:param column_delimiter: default is to split on any separatator, but you can overwrite for instance with "\t"
to split only on tabs
:param comment_symbol: if set, lines that begin with this symbol are treated as comments
:param document_separator_token: If provided, multiple sentences are read into one object. Provide the string token
that indicates that a new document begins
:param skip_first_line: set to True if your dataset has a header line
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:return: a Corpus with annotated train, dev and test data
"""
# find train, dev and test files if not specified
dev_file, test_file, train_file = \
find_train_dev_test_files(data_folder, dev_file, test_file, train_file)
# get train data
train = ColumnDataset(
train_file,
column_format,
tag_to_bioes,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
)
# read in test file if exists
test = ColumnDataset(
test_file,
column_format,
tag_to_bioes,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
) if test_file is not None else None
# read in dev file if exists
dev = ColumnDataset(
dev_file,
column_format,
tag_to_bioes,
encoding=encoding,
comment_symbol=comment_symbol,
column_delimiter=column_delimiter,
in_memory=in_memory,
document_separator_token=document_separator_token,
skip_first_line=skip_first_line,
) if dev_file is not None else None
super(ColumnCorpus, self).__init__(train, dev, test, name=str(data_folder))
class ColumnDataset(FlairDataset):
# special key for space after
SPACE_AFTER_KEY = "space-after"
def __init__(
self,
path_to_column_file: Union[str, Path],
column_name_map: Dict[int, str],
tag_to_bioes: str = None,
column_delimiter: str = r"\s+",
comment_symbol: str = None,
in_memory: bool = True,
document_separator_token: str = None,
encoding: str = "utf-8",
skip_first_line: bool = False,
):
"""
Instantiates a column dataset (typically used for sequence labeling or word-level prediction).
:param path_to_column_file: path to the file with the column-formatted data
:param column_name_map: a map specifying the column format
:param tag_to_bioes: whether to convert to BIOES tagging scheme
:param column_delimiter: default is to split on any separatator, but you can overwrite for instance with "\t"
to split only on tabs
:param comment_symbol: if set, lines that begin with this symbol are treated as comments
:param in_memory: If set to True, the dataset is kept in memory as Sentence objects, otherwise does disk reads
:param document_separator_token: If provided, multiple sentences are read into one object. Provide the string token
that indicates that a new document begins
:param skip_first_line: set to True if your dataset has a header line
"""
if type(path_to_column_file) is str:
path_to_column_file = Path(path_to_column_file)
assert path_to_column_file.exists()
self.path_to_column_file = path_to_column_file
self.tag_to_bioes = tag_to_bioes
self.column_name_map = column_name_map
self.column_delimiter = column_delimiter
self.comment_symbol = comment_symbol
self.document_separator_token = document_separator_token
# store either Sentence objects in memory, or only file offsets
self.in_memory = in_memory
if self.in_memory:
self.sentences: List[Sentence] = []
else:
self.indices: List[int] = []
self.total_sentence_count: int = 0
# most data sets have the token text in the first column, if not, pass 'text' as column
self.text_column: int = 0
for column in self.column_name_map:
if column_name_map[column] == "text":
self.text_column = column
# determine encoding of text file
self.encoding = encoding
sentence: Sentence = Sentence()
sentence_started: bool = False
with open(str(self.path_to_column_file), encoding=self.encoding) as f:
# skip first line if to selected
if skip_first_line:
f.readline()
line = f.readline()
position = 0
while line:
if self.comment_symbol is not None and line.startswith(comment_symbol):
line = f.readline()
continue
if self.__line_completes_sentence(line):
if sentence_started:
if self.in_memory:
if self.tag_to_bioes is not None:
sentence.convert_tag_scheme(
tag_type=self.tag_to_bioes, target_scheme="iobes"
)
self.sentences.append(sentence)
else:
self.indices.append(position)
position = f.tell()
self.total_sentence_count += 1
sentence: Sentence = Sentence()
sentence_started = False
elif self.in_memory:
token = self._parse_token(line)
if not line.isspace():
sentence.add_token(token)
sentence_started = True
elif not line.isspace():
sentence_started = True
line = f.readline()
if sentence_started:
if self.in_memory:
self.sentences.append(sentence)
else:
self.indices.append(position)
self.total_sentence_count += 1
def _parse_token(self, line: str) -> Token:
fields: List[str] = re.split(self.column_delimiter, line)
token = Token(fields[self.text_column])
for column in self.column_name_map:
if len(fields) > column:
if column != self.text_column and self.column_name_map[column] != self.SPACE_AFTER_KEY:
token.add_label(
self.column_name_map[column], fields[column]
)
if self.column_name_map[column] == self.SPACE_AFTER_KEY and fields[column] == '-':
token.whitespace_after = False
return token
def __line_completes_sentence(self, line: str) -> bool:
sentence_completed = line.isspace()
if self.document_separator_token:
sentence_completed = False
fields: List[str] = re.split(self.column_delimiter, line)
if len(fields) >= self.text_column:
if fields[self.text_column] == self.document_separator_token:
sentence_completed = True
return sentence_completed
def is_in_memory(self) -> bool:
return self.in_memory
def __len__(self):
return self.total_sentence_count
def __getitem__(self, index: int = 0) -> Sentence:
if self.in_memory:
sentence = self.sentences[index]
else:
with open(str(self.path_to_column_file), encoding=self.encoding) as file:
file.seek(self.indices[index])
line = file.readline()
sentence: Sentence = Sentence()
while line:
if self.comment_symbol is not None and line.startswith(
self.comment_symbol
):
line = file.readline()
continue
if self.__line_completes_sentence(line):
if len(sentence) > 0:
if self.tag_to_bioes is not None:
sentence.convert_tag_scheme(
tag_type=self.tag_to_bioes, target_scheme="iobes"
)
return sentence
else:
token = self._parse_token(line)
if not line.isspace():
sentence.add_token(token)
line = file.readline()
return sentence
class BIOFID(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "lemma", 2: "pos", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
biofid_path = "https://raw.githubusercontent.com/texttechnologylab/BIOfid/master/BIOfid-Dataset-NER/"
cached_path(f"{biofid_path}train.conll", Path("datasets") / dataset_name)
cached_path(f"{biofid_path}dev.conll", Path("datasets") / dataset_name)
cached_path(f"{biofid_path}test.conll", Path("datasets") / dataset_name)
super(BIOFID, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class CONLL_03(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
document_as_sequence: bool = False,
):
"""
Initialize the CoNLL-03 corpus. This is only possible if you've manually downloaded it to your machine.
Obtain the corpus from https://www.clips.uantwerpen.be/conll2003/ner/ and put the eng.testa, .testb, .train
files in a folder called 'conll_03'. Then set the base_path parameter in the constructor to the path to the
parent directory where the conll_03 folder resides.
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03' folder) on your machine
:param tag_to_bioes: NER by default, need not be changed, but you could also select 'pos' or 'np' to predict
POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "np", 3: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# check if data there
if not data_folder.exists():
log.warning("-" * 100)
log.warning(f'WARNING: CoNLL-03 dataset not found at "{data_folder}".')
log.warning(
'Instructions for obtaining the data can be found here: https://www.clips.uantwerpen.be/conll2003/ner/"'
)
log.warning("-" * 100)
super(CONLL_03, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
)
class CONLL_03_GERMAN(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
document_as_sequence: bool = False,
):
"""
Initialize the CoNLL-03 corpus for German. This is only possible if you've manually downloaded it to your machine.
Obtain the corpus from https://www.clips.uantwerpen.be/conll2003/ner/ and put the respective files in a folder called
'conll_03_german'. Then set the base_path parameter in the constructor to the path to the parent directory where
the conll_03_german folder resides.
:param base_path: Path to the CoNLL-03 corpus (i.e. 'conll_03_german' folder) on your machine
:param tag_to_bioes: NER by default, need not be changed, but you could also select 'lemma', 'pos' or 'np' to predict
word lemmas, POS tags or chunks respectively
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "lemma", 2: "pos", 3: "np", 4: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# check if data there
if not data_folder.exists():
log.warning("-" * 100)
log.warning(f'WARNING: CoNLL-03 dataset not found at "{data_folder}".')
log.warning(
'Instructions for obtaining the data can be found here: https://www.clips.uantwerpen.be/conll2003/ner/"'
)
log.warning("-" * 100)
super(CONLL_03_GERMAN, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
)
class CONLL_03_DUTCH(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
document_as_sequence: bool = False,
):
"""
Initialize the CoNLL-03 corpus for Dutch. The first time you call this constructor it will automatically
download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param tag_to_bioes: NER by default, need not be changed, but you could also select 'pos' to predict
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
conll_02_path = "https://www.clips.uantwerpen.be/conll2002/ner/data/"
cached_path(f"{conll_02_path}ned.testa", Path("datasets") / dataset_name)
cached_path(f"{conll_02_path}ned.testb", Path("datasets") / dataset_name)
cached_path(f"{conll_02_path}ned.train", Path("datasets") / dataset_name)
super(CONLL_03_DUTCH, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
encoding="latin-1",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
)
class TWITTER_NER(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
document_as_sequence: bool = False,
):
"""
Initialize a dataset called twitter_ner which can be found on the following page:
https://raw.githubusercontent.com/aritter/twitter_nlp/master/data/annotated/ner.txt.
The first time you call this constructor it will automatically
download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param tag_to_bioes: NER by default, need not be changed
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: 'text', 1: 'ner'}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
twitter_ner_path = "https://raw.githubusercontent.com/aritter/twitter_nlp/master/data/annotated/"
cached_path(f"{twitter_ner_path}ner.txt", Path("datasets") / dataset_name)
super(TWITTER_NER, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
encoding="latin-1",
train_file="ner.txt",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
)
class MIT_RESTAURANTS(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
document_as_sequence: bool = False,
):
"""
Initialize the experimental MIT Restaurant corpus available on https://groups.csail.mit.edu/sls/downloads/restaurant/.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param tag_to_bioes: NER by default, need not be changed, but you could also select 'pos' to predict
POS tags instead
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
mit_restaurants_path = "https://megantosh.s3.eu-central-1.amazonaws.com/MITRestoCorpus/"
cached_path(f"{mit_restaurants_path}test.txt", Path("datasets") / dataset_name)
cached_path(f"{mit_restaurants_path}train.txt", Path("datasets") / dataset_name)
super(MIT_RESTAURANTS, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
encoding="latin-1",
in_memory=in_memory,
document_separator_token=None if not document_as_sequence else "-DOCSTART-",
)
def add_IOB_tags(data_file: Union[str, Path], encoding: str = "utf8", ner_column: int = 1):
"""
Function that adds IOB tags if only chunk names are provided (e.g. words are tagged PER instead
of B-PER or I-PER). Replaces '0' with 'O' as the no-chunk tag since ColumnCorpus expects
the letter 'O'. Additionally it removes lines with no tags in the data file and can also
be used if the data is only partially IOB tagged.
Parameters
----------
data_file : Union[str, Path]
Path to the data file.
encoding : str, optional
Encoding used in open function. The default is "utf8".
ner_column : int, optional
Specifies the ner-tagged column. The default is 1 (the second column).
"""
def add_I_prefix(current_line: List[str], ner: int, tag: str):
for i in range(0, len(current_line)):
if i == 0:
f.write(line_list[i])
elif i == ner:
f.write(' I-' + tag)
else:
f.write(' ' + current_line[i])
f.write('\n')
with open(file=data_file, mode='r', encoding=encoding) as f:
lines = f.readlines()
with open(file=data_file, mode='w', encoding=encoding) as f:
pred = 'O' # remembers ner tag of predecessing line
for line in lines:
line_list = line.split()
if len(line_list) > 2: # word with tags
ner_tag = line_list[ner_column]
if ner_tag in ['0', 'O']: # no chunk
for i in range(0,len(line_list)):
if i == 0:
f.write(line_list[i])
elif i == ner_column:
f.write(' O')
else:
f.write(' ' + line_list[i])
f.write('\n')
pred = 'O'
elif '-' not in ner_tag: # no IOB tags
if pred == 'O': # found a new chunk
add_I_prefix(line_list, ner_column, ner_tag)
pred = ner_tag
else: # found further part of chunk or new chunk directly after old chunk
add_I_prefix(line_list, ner_column, ner_tag)
pred = ner_tag
else: # line already has IOB tag (tag contains '-')
f.write(line)
pred = ner_tag.split('-')[1]
elif len(line_list) == 0: # empty line
f.write('\n')
pred = 'O'
def add_IOB2_tags(data_file: Union[str, Path], encoding: str = "utf8"):
"""
Function that adds IOB2 tags if only chunk names are provided (e.g. words are tagged PER instead
of B-PER or I-PER). Replaces '0' with 'O' as the no-chunk tag since ColumnCorpus expects
the letter 'O'. Additionally it removes lines with no tags in the data file and can also
be used if the data is only partially IOB tagged.
Parameters
----------
data_file : Union[str, Path]
Path to the data file.
encoding : str, optional
Encoding used in open function. The default is "utf8".
"""
with open(file=data_file, mode='r', encoding=encoding) as f:
lines = f.readlines()
with open(file=data_file, mode='w', encoding=encoding) as f:
pred = 'O' # remembers tag of predecessing line
for line in lines:
line_list = line.split()
if len(line_list) == 2: # word with tag
word = line_list[0]
tag = line_list[1]
if tag in ['0', 'O']: # no chunk
f.write(word + ' O\n')
pred = 'O'
elif '-' not in tag: # no IOB tags
if pred == 'O': # found a new chunk
f.write(word + ' B-' + tag + '\n')
pred = tag
else: # found further part of chunk or new chunk directly after old chunk
if pred == tag:
f.write(word + ' I-' + tag + '\n')
else:
f.write(word + ' B-' + tag + '\n')
pred = tag
else: # line already has IOB tag (tag contains '-')
f.write(line)
pred = tag.split('-')[1]
elif len(line_list) == 0: # empty line
f.write('\n')
pred = 'O'
class CONLL_03_SPANISH(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
):
"""
Initialize the CoNLL-03 corpus for Spanish. The first time you call this constructor it will automatically
download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param tag_to_bioes: NER by default, should not be changed
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
conll_02_path = "https://www.clips.uantwerpen.be/conll2002/ner/data/"
cached_path(f"{conll_02_path}esp.testa", Path("datasets") / dataset_name)
cached_path(f"{conll_02_path}esp.testb", Path("datasets") / dataset_name)
cached_path(f"{conll_02_path}esp.train", Path("datasets") / dataset_name)
super(CONLL_03_SPANISH, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
encoding="latin-1",
in_memory=in_memory,
)
class CONLL_2000(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "np",
in_memory: bool = True,
):
"""
Initialize the CoNLL-2000 corpus for English chunking.
The first time you call this constructor it will automatically download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param tag_to_bioes: 'np' by default, should not be changed, but you can set 'pos' instead to predict POS tags
:param in_memory: If True, keeps dataset in memory giving speedups in training.
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "np"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
conll_2000_path = "https://www.clips.uantwerpen.be/conll2000/chunking/"
data_file = Path(flair.cache_root) / "datasets" / dataset_name / "train.txt"
if not data_file.is_file():
cached_path(
f"{conll_2000_path}train.txt.gz", Path("datasets") / dataset_name
)
cached_path(
f"{conll_2000_path}test.txt.gz", Path("datasets") / dataset_name
)
import gzip, shutil
with gzip.open(
Path(flair.cache_root) / "datasets" / dataset_name / "train.txt.gz",
"rb",
) as f_in:
with open(
Path(flair.cache_root) / "datasets" / dataset_name / "train.txt",
"wb",
) as f_out:
shutil.copyfileobj(f_in, f_out)
with gzip.open(
Path(flair.cache_root) / "datasets" / dataset_name / "test.txt.gz", "rb"
) as f_in:
with open(
Path(flair.cache_root) / "datasets" / dataset_name / "test.txt",
"wb",
) as f_out:
shutil.copyfileobj(f_in, f_out)
super(CONLL_2000, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class DANE(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {1: 'text', 3: 'pos', 9: 'ner'}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
data_path = Path(flair.cache_root) / "datasets" / dataset_name
train_data_file = data_path / "ddt.train.conllu"
if not train_data_file.is_file():
temp_file = cached_path(
'https://danlp.alexandra.dk/304bd159d5de/datasets/ddt.zip',
Path("datasets") / dataset_name
)
from zipfile import ZipFile
with ZipFile(temp_file, 'r') as zip_file:
zip_file.extractall(path=data_path)
# Remove CoNLL-U meta information in the last column
for part in ['train', 'dev', 'test']:
lines = []
data_file = "ddt.{}.conllu".format(part)
with open(data_path / data_file, 'r') as file:
for line in file:
if line.startswith("#") or line == "\n":
lines.append(line)
lines.append(line.replace("name=", "").replace("|SpaceAfter=No", ""))
with open(data_path / data_file, 'w') as file:
file.writelines(lines)
print(data_path / data_file)
super(DANE, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes,
in_memory=in_memory, comment_symbol="#"
)
class EUROPARL_NER_GERMAN(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
"""
Initialize the EUROPARL_NER_GERMAN corpus. The first time you call this constructor it will automatically
download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param tag_to_bioes: 'ner' by default, should not be changed.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: 'text', 1: 'lemma', 2: 'pos', 3: 'np', 4: 'ner'}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
europarl_ner_german_path = "https://nlpado.de/~sebastian/software/ner/"
cached_path(f"{europarl_ner_german_path}ep-96-04-15.conll", Path("datasets") / dataset_name)
cached_path(f"{europarl_ner_german_path}ep-96-04-16.conll", Path("datasets") / dataset_name)
add_IOB_tags(data_file=Path(data_folder / "ep-96-04-15.conll"), encoding="latin-1", ner_column=4)
add_IOB_tags(data_file=Path(data_folder / "ep-96-04-16.conll"), encoding="latin-1", ner_column=4)
super(EUROPARL_NER_GERMAN, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
encoding="latin-1",
in_memory=in_memory,
train_file='ep-96-04-16.conll',
test_file='ep-96-04-15.conll'
)
class GERMEVAL_14(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
):
"""
Initialize the GermEval NER corpus for German. This is only possible if you've manually downloaded it to your
machine. Obtain the corpus from https://sites.google.com/site/germeval2014ner/data and put it into some folder.
Then point the base_path parameter in the constructor to this folder
:param base_path: Path to the GermEval corpus on your machine
:param tag_to_bioes: 'ner' by default, should not be changed.
:param in_memory:If True, keeps dataset in memory giving speedups in training.
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {1: "text", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# check if data there
if not data_folder.exists():
log.warning("-" * 100)
log.warning(f'WARNING: GermEval-14 dataset not found at "{data_folder}".')
log.warning(
'Instructions for obtaining the data can be found here: https://sites.google.com/site/germeval2014ner/data"'
)
log.warning("-" * 100)
super(GERMEVAL_14, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
comment_symbol="#",
in_memory=in_memory,
)
class INSPEC(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "keyword",
in_memory: bool = True,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "keyword"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
inspec_path = "https://raw.githubusercontent.com/midas-research/keyphrase-extraction-as-sequence-labeling-data/master/Inspec"
cached_path(f"{inspec_path}/train.txt", Path("datasets") / dataset_name)
cached_path(f"{inspec_path}/test.txt", Path("datasets") / dataset_name)
if not "dev.txt" in os.listdir(data_folder):
cached_path(f"{inspec_path}/valid.txt", Path("datasets") / dataset_name)
# rename according to train - test - dev - convention
os.rename(data_folder / "valid.txt", data_folder / "dev.txt")
super(INSPEC, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class LER_GERMAN(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
"""
Initialize the LER_GERMAN (Legal Entity Recognition) corpus. The first time you call this constructor it will automatically
download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training. Not recommended due to heavy RAM usage.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ler_path = "https://raw.githubusercontent.com/elenanereiss/Legal-Entity-Recognition/master/data/"
cached_path(f"{ler_path}ler.conll", Path("datasets") / dataset_name)
super(LER_GERMAN, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
in_memory=in_memory,
train_file='ler.conll'
)
class NER_BASQUE(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ner_basque_path = "http://ixa2.si.ehu.eus/eiec/"
data_path = Path(flair.cache_root) / "datasets" / dataset_name
data_file = data_path / "named_ent_eu.train"
if not data_file.is_file():
cached_path(
f"{ner_basque_path}/eiec_v1.0.tgz", Path("datasets") / dataset_name
)
import tarfile, shutil
with tarfile.open(
Path(flair.cache_root) / "datasets" / dataset_name / "eiec_v1.0.tgz",
"r:gz",
) as f_in:
corpus_files = (
"eiec_v1.0/named_ent_eu.train",
"eiec_v1.0/named_ent_eu.test",
)
for corpus_file in corpus_files:
f_in.extract(corpus_file, data_path)
shutil.move(f"{data_path}/{corpus_file}", data_path)
super(NER_BASQUE, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class NER_FINNISH(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ner_finnish_path = "https://raw.githubusercontent.com/mpsilfve/finer-data/master/data/digitoday."
cached_path(f"{ner_finnish_path}2014.train.csv", Path("datasets") / dataset_name)
cached_path(f"{ner_finnish_path}2014.dev.csv", Path("datasets") / dataset_name)
cached_path(f"{ner_finnish_path}2015.test.csv", Path("datasets") / dataset_name)
_remove_lines_without_annotations(data_file=Path(data_folder / "digitoday.2015.test.csv"))
super(NER_FINNISH, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory, skip_first_line=True
)
def _remove_lines_without_annotations(data_file: Union[str, Path] = None):
with open(data_file, 'r') as f:
lines = f.readlines()
with open(data_file, 'w') as f:
for line in lines:
if len(line.split()) != 1:
f.write(line)
class NER_SWEDISH(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
):
"""
Initialize the NER_SWEDISH corpus for Swedish. The first time you call this constructor it will automatically
download the dataset.
:param base_path: Default is None, meaning that corpus gets auto-downloaded and loaded. You can override this
to point to a different folder but typically this should not be necessary.
:param in_memory: If True, keeps dataset in memory giving speedups in training.
:param document_as_sequence: If True, all sentences of a document are read into a single Sentence object
"""
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
ner_spraakbanken_path = "https://raw.githubusercontent.com/klintan/swedish-ner-corpus/master/"
cached_path(f"{ner_spraakbanken_path}test_corpus.txt", Path("datasets") / dataset_name)
cached_path(f"{ner_spraakbanken_path}train_corpus.txt", Path("datasets") / dataset_name)
# data is not in IOB2 format. Thus we transform it to IOB2
add_IOB2_tags(data_file=Path(data_folder / "test_corpus.txt"))
add_IOB2_tags(data_file=Path(data_folder / "train_corpus.txt"))
super(NER_SWEDISH, self).__init__(
data_folder,
columns,
tag_to_bioes=tag_to_bioes,
in_memory=in_memory,
)
class SEMEVAL2017(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "keyword",
in_memory: bool = True,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "keyword"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
semeval2017_path = "https://raw.githubusercontent.com/midas-research/keyphrase-extraction-as-sequence-labeling-data/master/SemEval-2017"
cached_path(f"{semeval2017_path}/train.txt", Path("datasets") / dataset_name)
cached_path(f"{semeval2017_path}/test.txt", Path("datasets") / dataset_name)
cached_path(f"{semeval2017_path}/dev.txt", Path("datasets") / dataset_name)
super(SEMEVAL2017, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class SEMEVAL2010(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "keyword",
in_memory: bool = True,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "keyword"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
semeval2010_path = "https://raw.githubusercontent.com/midas-research/keyphrase-extraction-as-sequence-labeling-data/master/processed_semeval-2010"
cached_path(f"{semeval2010_path}/train.txt", Path("datasets") / dataset_name)
cached_path(f"{semeval2010_path}/test.txt", Path("datasets") / dataset_name)
super(SEMEVAL2010, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WIKINER_ENGLISH(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
_download_wikiner("en", dataset_name)
super(WIKINER_ENGLISH, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WIKINER_GERMAN(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
_download_wikiner("de", dataset_name)
super(WIKINER_GERMAN, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WIKINER_DUTCH(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
_download_wikiner("nl", dataset_name)
super(WIKINER_DUTCH, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WIKINER_FRENCH(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
_download_wikiner("fr", dataset_name)
super(WIKINER_FRENCH, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WIKINER_ITALIAN(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
_download_wikiner("it", dataset_name)
super(WIKINER_ITALIAN, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WIKINER_SPANISH(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
_download_wikiner("es", dataset_name)
super(WIKINER_SPANISH, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WIKINER_PORTUGUESE(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
_download_wikiner("pt", dataset_name)
super(WIKINER_PORTUGUESE, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WIKINER_POLISH(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
_download_wikiner("pl", dataset_name)
super(WIKINER_POLISH, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WIKINER_RUSSIAN(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = False,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "pos", 2: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
_download_wikiner("ru", dataset_name)
super(WIKINER_RUSSIAN, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class WNUT_17(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
tag_to_bioes: str = "ner",
in_memory: bool = True,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "ner"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
wnut_path = "https://noisy-text.github.io/2017/files/"
cached_path(f"{wnut_path}wnut17train.conll", Path("datasets") / dataset_name)
cached_path(f"{wnut_path}emerging.dev.conll", Path("datasets") / dataset_name)
cached_path(
f"{wnut_path}emerging.test.annotated", Path("datasets") / dataset_name
)
super(WNUT_17, self).__init__(
data_folder, columns, tag_to_bioes=tag_to_bioes, in_memory=in_memory
)
class BIOSCOPE(ColumnCorpus):
def __init__(
self,
base_path: Union[str, Path] = None,
in_memory: bool = True,
):
if type(base_path) == str:
base_path: Path = Path(base_path)
# column format
columns = {0: "text", 1: "tag"}
# this dataset name
dataset_name = self.__class__.__name__.lower()
# default dataset folder is the cache root
if not base_path:
base_path = Path(flair.cache_root) / "datasets"
data_folder = base_path / dataset_name
# download data if necessary
bioscope_path = "https://raw.githubusercontent.com/whoisjones/BioScopeSequenceLabelingData/master/sequence_labeled/"
cached_path(f"{bioscope_path}output.txt", Path("datasets") / dataset_name)
super(BIOSCOPE, self).__init__(
data_folder, columns, in_memory=in_memory, train_file="output.txt"
)
def _download_wikiner(language_code: str, dataset_name: str):
# download data if necessary
wikiner_path = (
"https://raw.githubusercontent.com/dice-group/FOX/master/input/Wikiner/"
)
lc = language_code
data_file = (
Path(flair.cache_root)
/ "datasets"
/ dataset_name
/ f"aij-wikiner-{lc}-wp3.train"
)
if not data_file.is_file():
cached_path(
f"{wikiner_path}aij-wikiner-{lc}-wp3.bz2", Path("datasets") / dataset_name
)
import bz2, shutil
# unpack and write out in CoNLL column-like format
bz_file = bz2.BZ2File(
Path(flair.cache_root)
/ "datasets"
/ dataset_name
/ f"aij-wikiner-{lc}-wp3.bz2",
"rb",
)
with bz_file as f, open(
Path(flair.cache_root)
/ "datasets"
/ dataset_name
/ f"aij-wikiner-{lc}-wp3.train",
"w",
encoding="utf-8"
) as out:
for line in f:
line = line.decode("utf-8")
words = line.split(" ")
for word in words:
out.write("\t".join(word.split("|")) + "\n")
|
PypiClean
|
/pypcs-0.0.5.tar.gz/pypcs-0.0.5/pyPCS/_basicData.py
|
import math
# 打印函数运行的时间
def _prt_func_time(func):
def f(*args, **kwargs):
from time import time
st = time()
_return = func(*args, **kwargs)
print("Time: \033[0;33m" + str(time() - st) + "pypcs\033[0m")
return _return
return f
# 打印函数每运行一定次数的时间
def _prt_funcs_time(times):
def __prt_funcs_time(func):
st = 0
counter = 0
def f(*args, **kwargs):
from time import time
nonlocal counter, st
st = time() if counter == 0 else st
counter += 1
if counter == times:
print(time() - st)
counter = 0
return func(*args, **kwargs)
return f
return __prt_funcs_time
# 打印函数运行过的次数
def _prt_func_run_num(func):
counter = 0
def f(*args, **kwargs):
nonlocal counter
counter += 1
print(counter)
return func(*args, **kwargs)
return f
value_note = {
0: 'C', 1: '#C', 2: 'D', 3: '#D', 4: 'E', 5: 'F', 6: '#F', 7: 'G', 8: '#G', 9: 'A', 10: '#A', 11: 'B',
# 第一行用于音级集和判断音名
21: 'A0', 22: '#A0', 23: 'B0', 24: 'C1', 25: '#C1', 26: 'D1', 27: '#D1', 28: 'E1', 29: 'F1',
30: '#F1',
31: 'G1', 32: '#G1', 33: 'A1', 34: '#A1', 35: 'B1', 36: 'C2', 37: '#C2', 38: 'D2', 39: '#D2',
40: 'E2',
41: 'F2', 42: '#F2', 43: 'G2', 44: '#G2', 45: 'A2', 46: '#A2', 47: 'B2', 48: 'C3', 49: '#C3',
50: 'D3',
51: '#D3', 52: 'E3', 53: 'F3', 54: '#F3', 55: 'G3', 56: '#G3', 57: 'A3', 58: '#A3', 59: 'B3',
60: 'C4',
61: '#C4', 62: 'D4', 63: '#D4', 64: 'E4', 65: 'F4', 66: '#F4', 67: 'G4', 68: '#G4', 69: 'A4',
70: '#A4',
71: 'B4', 72: 'C5', 73: '#C5', 74: 'D5', 75: '#D5', 76: 'E5', 77: 'F5', 78: '#F5', 79: 'G5',
80: '#G5',
81: 'A5', 82: '#A5', 83: 'B5', 84: 'C6', 85: '#C6', 86: 'D6', 87: '#D6', 88: 'E6', 89: 'F6',
90: '#F6',
91: 'G6', 92: '#G6', 93: 'A6', 94: '#A6', 95: 'B6', 96: 'C7', 97: '#C7', 98: 'D7', 99: '#D7',
100: 'E7',
101: 'F7', 102: '#F7', 103: 'G7', 104: '#G7', 105: 'A7', 106: '#A7', 107: 'B7', 108: 'C8'
}
# 倒过来
note_value = {
'A0': 21, '#A0': 22, 'B0': 23, 'C1': 24, '#C1': 25, 'D1': 26, '#D1': 27, 'E1': 28, 'F1': 29, '#F1': 30,
'G1': 31, '#G1': 32, 'A1': 33, '#A1': 34, 'B1': 35, 'C2': 36, '#C2': 37, 'D2': 38, '#D2': 39, 'E2': 40,
'F2': 41, '#F2': 42, 'G2': 43, '#G2': 44, 'A2': 45, '#A2': 46, 'B2': 47, 'C3': 48, '#C3': 49, 'D3': 50,
'#D3': 51, 'E3': 52, 'F3': 53, '#F3': 54, 'G3': 55, '#G3': 56, 'A3': 57, '#A3': 58, 'B3': 59, 'C4': 60,
'#C4': 61, 'D4': 62, '#D4': 63, 'E4': 64, 'F4': 65, '#F4': 66, 'G4': 67, '#G4': 68, 'A4': 69, '#A4': 70,
'B4': 71, 'C5': 72, '#C5': 73, 'D5': 74, '#D5': 75, 'E5': 76, 'F5': 77, '#F5': 78, 'G5': 79, '#G5': 80,
'A5': 81, '#A5': 82, 'B5': 83, 'C6': 84, '#C6': 85, 'D6': 86, '#D6': 87, 'E6': 88, 'F6': 89, '#F6': 90,
'G6': 91, '#G6': 92, 'A6': 93, '#A6': 94, 'B6': 95, 'C7': 108, '#C7': 97, 'D7': 98, '#D7': 99, 'E7': 100,
'F7': 101, '#F7': 102, 'G7': 103, '#G7': 104, 'A7': 105, '#A7': 106, 'B7': 107, 'C8': 108,
# 若输入小写
'a0': 21, '#a0': 22, 'b0': 23, 'c1': 24, '#c1': 25, 'd1': 26, '#d1': 27, 'e1': 28, 'f1': 29, '#f1': 30,
'g1': 31, '#g1': 32, 'a1': 33, '#a1': 34, 'b1': 35, 'c2': 36, '#c2': 37, 'd2': 38, '#d2': 39, 'e2': 40,
'f2': 41, '#f2': 42, 'g2': 43, '#g2': 44, 'a2': 45, '#a2': 46, 'b2': 47, 'c3': 48, '#c3': 49, 'd3': 50,
'#d3': 51, 'e3': 52, 'f3': 53, '#f3': 54, 'g3': 55, '#g3': 56, 'a3': 57, '#a3': 58, 'b3': 59, 'c4': 60,
'#c4': 61, 'd4': 62, '#d4': 63, 'e4': 64, 'f4': 65, '#f4': 66, 'g4': 67, '#g4': 68, 'a4': 69, '#a4': 70,
'b4': 71, 'c5': 72, '#c5': 73, 'd5': 74, '#d5': 75, 'e5': 76, 'f5': 77, '#f5': 78, 'g5': 79, '#g5': 80,
'a5': 81, '#a5': 82, 'b5': 83, 'c6': 84, '#c6': 85, 'd6': 86, '#d6': 87, 'e6': 88, 'f6': 89, '#f6': 90,
'g6': 91, '#g6': 92, 'a6': 93, '#a6': 94, 'b6': 95, 'c7': 108, '#c7': 97, 'd7': 98, '#d7': 99, 'e7': 100,
'f7': 101, '#f7': 102, 'g7': 103, '#g7': 104, 'a7': 105, '#a7': 106, 'b7': 107, 'c8': 108,
# 如果没有输入音区:
'C': 60, '#C': 61, 'D': 62, '#D': 63, 'E': 64, 'F': 65,
'#F': 66, 'G': 67, '#G': 68, 'A': 69, '#A': 70, 'B': 71,
# 如果没有音区也没有大小写
'c': 60, '#c': 61, 'd': 62, '#d': 63, 'e': 64, 'f': 65,
'#f': 66, 'g': 67, '#g': 68, 'a': 69, '#a': 70, 'b': 71
}
# note_names = list(note_value.key())
notes_names = [
'A0', '#A0', 'B0', 'C1', '#C1', 'D1', '#D1', 'E1', 'F1', '#F1', 'G1', '#G1', 'A1', '#A1', 'B1',
'C2', '#C2', 'D2', '#D2', 'E2', 'F2', '#F2', 'G2', '#G2', 'A2', '#A2', 'B2',
'C3', '#C3', 'D3', '#D3', 'E3', 'F3', '#F3', 'G3', '#G3', 'A3', '#A3', 'B3',
'C4', '#C4', 'D4', '#D4', 'E4', 'F4', '#F4', 'G4', '#G4', 'A4', '#A4', 'B4',
'C5', '#C5', 'D5', '#D5', 'E5', 'F5', '#F5', 'G5', '#G5', 'A5', '#A5', 'B5',
'C6', '#C6', 'D6', '#D6', 'E6', 'F6', '#F6', 'G6', '#G6', 'A6', '#A6', 'B6',
'C7', '#C7', 'D7', '#D7', 'E7', 'F7', '#F7', 'G7', '#G7', 'A7', '#A7', 'B7', 'C8',
'a0', '#a0', 'b0', 'c1', '#c1', 'd1', '#d1', 'e1', 'f1', '#f1', 'g1', '#g1', 'a1', '#a1', 'b1',
'c2', '#c2', 'd2', '#d2', 'e2', 'f2', '#f2', 'g2', '#g2', 'a2', '#a2', 'b2',
'c3', '#c3', 'd3', '#d3', 'e3', 'f3', '#f3', 'g3', '#g3', 'a3', '#a3', 'b3',
'c4', '#c4', 'd4', '#d4', 'e4', 'f4', '#f4', 'g4', '#g4', 'a4', '#a4', 'b4',
'c5', '#c5', 'd5', '#d5', 'e5', 'f5', '#f5', 'g5', '#g5', 'a5', '#a5', 'b5',
'c6', '#c6', 'd6', '#d6', 'e6', 'f6', '#f6', 'g6', '#g6', 'a6', '#a6', 'b6',
'c7', '#c7', 'd7', '#d7', 'e7', 'f7', '#f7', 'g7', '#g7', 'a7', '#a7', 'b7', 'c8',
'C', '#C', 'D', '#D', 'E', 'F', '#F', 'G', '#G', 'A', '#A', 'B',
'c', '#c', 'd', '#d', 'e', 'f', '#f', 'g', '#g', 'a', '#a', 'b'
]
ChordsAttributes = {
}
chords_chroma_vector = {
#
# 三和弦
#
# 大三和弦 index:
'C': [1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0], # 12*0
'#C': [0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],
'D': [0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0],
'#D': [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0],
'E': [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
'F': [1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0],
'#F': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0],
'G': [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1],
'bA': [1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0],
'A': [0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
'bB': [0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0],
'B': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
# 小三和弦
'Cm': [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0], # 12*1
'#Cm': [0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
'Dm': [0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
'#Dm': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0],
'Em': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1],
'Fm': [1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],
'#Fm': [0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
'Gm': [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0],
'bAm': [0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1],
'Am': [1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0],
'bBm': [0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
'Bm': [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
# 减三和弦
'Cdim': [1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0], # 12*2
'#Cdim': [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],
'Ddim': [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
'#Ddim': [0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0],
'Edim': [0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0],
'Fdim': [0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1],
'#Fdim': [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
'Gdim': [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0],
'bAdim': [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'Adim': [1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0],
'bBdim': [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
'Bdim': [0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1],
# 增三和弦
'C/E/#Gaug': [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0], # 12*3
'bDaug/F/A': [0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0],
'D/#F/#Aaug': [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
'bD/G/Baug': [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1],
# 挂二/四和弦
'Csus2/Gsus4': [1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0], # 4 + 12*3
'#Csus2/bAsus4': [0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0],
'Dsus2/Asus4': [0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0],
'#Dsus2/bBsus4': [0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
'Esus2/Bsus4': [0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1],
'Fsus2/Csus4': [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],
'#Fsus2/#Csus4': [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'Gsus2/Dsus4': [0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0],
'bAsus2/#Dsus4': [0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0],
'Asus2/Esus4': [0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1],
'bBsus2/Fsus4': [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0],
'Bsus2/#Fsus4': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1],
#
# 省略音七和弦
# 大七和弦(或增大七和弦)省略五音
'CM7,-5': [1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1], # 4 + 12*4
'#CM7,-5': [1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'DM7,-5': [0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'#DM7,-5': [0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0],
'EM7,-5': [0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0],
'FM7,-5': [0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0],
'#FM7,-5': [0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],
'GM7,-5': [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1],
'bAM7,-5': [1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0],
'AM7,-5': [0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
'bBM7,-5': [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
'BM7,-5': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1],
# 小大七和弦省略五音(有没有减大七和弦?)
'CmM7,-5': [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1], # 4 + 12*5
'#CmM7,-5': [1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'DmM7,-5': [0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'#DmM7,-5': [0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0],
'EmM7,-5': [0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
'FmM7,-5': [0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0],
'#FmM7,-5': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0],
'GmM7,-5': [0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0],
'bAmM7,-5': [0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'AmM7,-5': [1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0],
'bBmM7,-5': [0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
'BmM7,-5': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1],
# 属七和弦(或增小七和弦)省略五音
'C7,-5': [1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0], # 4 + 12*6
'#C7,-5': [0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
'D7,-5': [1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'#D7,-5': [0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
'E7,-5': [0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0],
'F7,-5': [0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0],
'#F7,-5': [0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0],
'G7,-5': [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1],
'bA7,-5': [1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'A7,-5': [0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0],
'bB7,-5': [0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0],
'B7,-5': [0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1],
# 属七和弦(或小七和弦)省略三音
'C7,-3': [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0], # 4 + 12*7
'#C7,-3': [0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'D7,-3': [1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'#D7,-3': [0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0],
'E7,-3': [0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1],
'F7,-3': [1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
'#F7,-3': [0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
'G7,-3': [0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
'bA7,-3': [0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0],
'A7,-3': [0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0],
'bB7,-3': [0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],
'B7,-3': [0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1],
# 小七和弦(或半减七和弦)省略五音
'Cm7,-5': [1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0], # 4 + 12*8
'#Cm7,-5': [0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1],
'Dm7,-5': [1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'#Dm7,-5': [0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
'Em7,-5': [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0],
'Fm7,-5': [0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],
'#Fm7,-5': [0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
'Gm7,-5': [0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0],
'bAm7,-5': [0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1],
'Am7,-5': [1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0],
'bBm7,-5': [0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0],
'Bm7,-5': [0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1],
# 半减七和弦省略三音
'Cm7-3': [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0], # 4 + 12*9
'#Cm7-3': [0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
'Dm7-3': [1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'#Dm7-3': [0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0],
'Em7-3': [0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
'Fm7-3': [0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
'#Fm7-3': [1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
'Gm7-3': [0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],
'bAm7-3': [0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0],
'Am7-3': [0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0],
'bBm7-3': [0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0],
'Bm7-3': [0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1],
#
# 七和弦
# 大七和弦
'CM7': [1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1], # 4 + 12*10
'#CM7': [1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0],
'DM7': [0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0],
'#DM7': [0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0],
'EM7': [0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1],
'FM7': [1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0],
'#FM7': [0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],
'GM7': [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1],
'bAM7': [1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0],
'AM7': [0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
'bBM7': [0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0],
'BM7': [0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1],
# 增大七和弦(半增七和弦)
'Caug7': [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1], # 4 + 12*11
'#Caug7': [1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0],
'Daug7': [0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
'#Daug7': [0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1],
'Eaug7': [1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0],
'Faug7': [0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0],
'#Faug7': [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0],
'Gaug7': [0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1],
'bAaug7': [1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0],
'Aaug7': [0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0],
'bBaug7': [0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0],
'Baug7': [0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1],
# 小大七和弦
'CmM7': [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1], # 4 + 12*12
'#CmM7': [1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
'DmM7': [0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
'#DmM7': [0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0],
'EmM7': [0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1],
'FmM7': [1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0],
'#FmM7': [0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0],
'GmM7': [0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0],
'bAmM7': [0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1],
'AmM7': [1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0],
'bBmM7': [0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
'BmM7': [0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1],
# 属七和弦
'C7': [1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0], # 4 + 12*13
'#C7': [0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1],
'D7': [1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0],
'#D7': [0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0],
'E7': [0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1],
'F7': [1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0],
'#F7': [0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0],
'G7': [0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1],
'bA7': [1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0],
'A7': [0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0],
'bB7': [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0],
'B7': [0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1],
# 小七和弦
'Cm7': [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0], # 4 + 12*14
'#Cm7': [0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
'Dm7': [1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
'#Dm7': [0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0],
'Em7': [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1],
'Fm7': [1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],
'#Fm7': [0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
'Gm7': [0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0],
'bAm7': [0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1],
'Am7': [1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0],
'bBm7': [0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],
'Bm7': [0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1],
# 半减七和弦(导七和弦,旧名减小七和弦)
'Cm7-5': [1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0], # 4 + 12*15
'#Cm7-5': [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1],
'Dm7-5': [1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
'#Dm7-5': [0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0],
'Em7-5': [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0],
'Fm7-5': [0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1],
'#Fm7-5': [1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
'Gm7-5': [0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0],
'bAm7-5': [0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1],
'Am7-5': [1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0],
'bBm7-5': [0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0],
'Bm7-5': [0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1],
# 减七和弦
'C/#D/#F/A dim7': [1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0], # 4 + 12*16
'#C/E/G/bB dim7': [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0],
'D/F/bA/B dim7': [0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1],
#
# 六和弦和挂留七和弦
# 大六和弦
'C6': [1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0], # 7 + 12*16
'#C6': [0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],
'D6': [0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1],
'#D6': [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0],
'E6': [0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
'F6': [1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
'#F6': [0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0],
'G6': [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1],
'bA6': [1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],
'A6': [0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
'bB6': [0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0],
'B6': [0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1],
# 小六和弦
'Cm6': [1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0], # 7 + 12*17
'#Cm6': [0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0],
'Dm6': [0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1],
'#Dm6': [1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0],
'Em6': [0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1],
'Fm6': [1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
'#Fm6': [0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0],
'Gm6': [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0],
'bAm6': [0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1],
'Am6': [1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
'bBm6': [0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0],
'Bm6': [0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1],
# 属七挂四和弦
'C7sus4': [1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0], # 7 + 12*18
'#C7sus4': [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1],
'D7sus4': [1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0],
'#D7sus4': [0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0],
'E7sus4': [0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1],
'F7sus4': [1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0],
'#F7sus4': [0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1],
'G7sus4': [1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0],
'bA7sus4': [0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0],
'A7sus4': [0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0],
'bB7sus4': [0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0],
'B7sus4': [0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1],
#
# 省略音九和弦:
# 大九和弦省略七音(或三和弦add9)
'Cadd9': [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0], # 7 + 12*19
'#Cadd9': [0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0],
'Dadd9': [0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0],
'#Dadd9': [0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0],
'Eadd9': [0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1],
'Fadd9': [1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0],
'#Fadd9': [0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0],
'Gadd9': [0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1],
'bAadd9': [1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0],
'Aadd9': [0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1],
'bBadd9': [1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0],
'Badd9': [0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1],
# 大九和弦省略三音(也是五音的大三和弦加四音)
'CM9,-3': [1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1], # 7 + 12*20
'#CM9,-3': [1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0],
'DM9,-3': [0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0],
'#DM9,-3': [0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0],
'EM9,-3': [0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1],
'FM9,-3': [1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0],
'#FM9,-3': [0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0],
'GM9,-3': [0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0],
'bAM9,-3': [0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0],
'AM9,-3': [0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1],
'bBM9,-3': [1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
'BM9,-3': [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1],
# 大九和弦(小九和弦)省略五音
'CM9,-5': [1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1], # 7 + 12*21
'#CM9,-5': [1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0],
'DM9,-5': [0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0],
'#DM9,-5': [0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0],
'EM9,-5': [0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0],
'FM9,-5': [0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0],
'#FM9,-5': [0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
'GM9,-5': [0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1],
'bAM9,-5': [1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0],
'AM9,-5': [0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1],
'bBM9,-5': [1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
'BM9,-5': [0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1],
# TODO: unfinished
}
# The circle of fifth value of a pitch-class.
# 五度圈音级集和:
note_cof_value = {0: 0, 1: -5, 2: 2, 3: -3, 4: 4, 5: -1, 6: 6, 7: 1, 8: -4, 9: 3, 10: -2, 11: 5}
# 五度圈角度映射:
note_cof_angle = {
0: math.pi * 7 / 12, 1: math.pi * 17 / 12, 2: math.pi / 4, 3: math.pi * 13 / 12, 4: -math.pi / 12, 5: math.pi * 3 / 4,
6: -math.pi*5/12, 7: math.pi * 5/12, 8: math.pi*5/4, 9: math.pi/12, 10: math.pi*11/12, 11: -math.pi*1/4}
# 紧张度计算的音程预设值
note_tension = {0: 0, 1: 32, 2: 8, 3: 4, 4: 2, 5: 1, 6: 16, 7: 1, 8: 2, 9: 4, 10: 8, 11: 32}
# 泛音强度
overtone_strength = {12: 0.85, 19: 0.825, 24: 0.8, 28: 0.75, 31: 0.7, 34: 0.6, 36: 0.6, 38: 0.5}
# 音程和不协和度之间的关系
interval_dissonance_t1 = {
0: 0,
1: 5.5, 2: 3.3, 3: 2, 4: 2.3, 5: 1.5, 6: 3, 7: 0.5, 8: 2.8, 9: 1, 10: 1.8, 11: 2.9, 12: 0,
13: 2.2, 14: 1.5, 15: 1, 16: 1.2, 17: 0.7, 18: 2, 19: 0, 20: 1.4, 21: 0.5, 22: 0.9, 23: 1.4, 24: 0,
25: 1, 26: 0, 27: 0.4, 28: 0, 29: 0.3, 30: 1, 31: 0, 32: 0.6, 33: 0.2, 34: 0.1, 35: 0.6, 36: 0,
37: 0.5, 38: 0, 39: 0.2, 40: 0, 41: 0.15, 42: 0.5, 43: 0, 44: 0.3, 45: 0.1, 46: 0, 47: 0.3, 48: 0
}
interval_dissonance_t2 = {
0: 0,
1: 5.5, 2: 3.3, 3: 2, 4: 2.3, 5: 1.5, 6: 3, 7: 0.5, 8: 2.8, 9: 1, 10: 1.8, 11: 2.9, 12: 0,
13: 3.2, 14: 2.1, 15: 1.3, 16: 1.4, 17: 1, 18: 2, 19: 0, 20: 1.8, 21: 0.7, 22: 1.2, 23: 2, 24: 0,
25: 1.6, 26: 1, 27: 0.6, 28: 0, 29: 0.7, 30: 1, 31: 0, 32: 0.9, 33: 0.35, 34: 0, 35: 1, 36: 0,
37: 0.5, 38: 0, 39: 0.2, 40: 0, 41: 0.15, 42: 0.5, 43: 0, 44: 0.3, 45: 0.1, 46: 0, 47: 0.3, 48: 0
}
|
PypiClean
|
/adversarial_robustness_toolbox-1.15.1-py3-none-any.whl/art/attacks/inference/attribute_inference/meminf_based.py
|
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import Optional, Union, List, TYPE_CHECKING
import numpy as np
from art.estimators.estimator import BaseEstimator
from art.estimators.classification.classifier import ClassifierMixin
from art.attacks.attack import AttributeInferenceAttack, MembershipInferenceAttack
from art.estimators.regression import RegressorMixin
from art.exceptions import EstimatorError
if TYPE_CHECKING:
from art.utils import CLASSIFIER_TYPE, REGRESSOR_TYPE
logger = logging.getLogger(__name__)
class AttributeInferenceMembership(AttributeInferenceAttack):
"""
Implementation of a an attribute inference attack that utilizes a membership inference attack.
The idea is to find the target feature value that causes the membership inference attack to classify the sample
as a member with the highest confidence.
"""
_estimator_requirements = (BaseEstimator, (ClassifierMixin, RegressorMixin))
def __init__(
self,
estimator: Union["CLASSIFIER_TYPE", "REGRESSOR_TYPE"],
membership_attack: MembershipInferenceAttack,
attack_feature: Union[int, slice] = 0,
):
"""
Create an AttributeInferenceMembership attack instance.
:param estimator: Target estimator.
:param membership_attack: The membership inference attack to use. Should be fit/calibrated in advance, and
should support returning probabilities. Should also support the target estimator.
:param attack_feature: The index of the feature to be attacked or a slice representing multiple indexes in
case of a one-hot encoded feature.
"""
super().__init__(estimator=estimator, attack_feature=attack_feature)
if not membership_attack.is_estimator_valid(estimator, estimator_requirements=self.estimator_requirements):
raise EstimatorError(membership_attack.__class__, membership_attack.estimator_requirements, estimator)
self.membership_attack = membership_attack
self._check_params()
def infer(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:
"""
Infer the attacked feature.
:param x: Input to attack. Includes all features except the attacked feature.
:param y: The labels expected by the membership attack.
:param values: Possible values for attacked feature. For a single column feature this should be a simple list
containing all possible values, in increasing order (the smallest value in the 0 index and so
on). For a multi-column feature (for example 1-hot encoded and then scaled), this should be a
list of lists, where each internal list represents a column (in increasing order) and the values
represent the possible values for that column (in increasing order).
:type values: list
:return: The inferred feature values.
"""
if self.estimator.input_shape is not None:
if isinstance(self.attack_feature, int) and self.estimator.input_shape[0] != x.shape[1] + 1:
raise ValueError("Number of features in x + 1 does not match input_shape of the estimator")
if "values" not in kwargs:
raise ValueError("Missing parameter `values`.")
values: Optional[List] = kwargs.get("values")
if not values:
raise ValueError("`values` cannot be None or empty")
if y is not None:
if y.shape[0] != x.shape[0]:
raise ValueError("Number of rows in x and y do not match")
# single index
if isinstance(self.attack_feature, int):
first = True
for value in values:
v_full = np.full((x.shape[0], 1), value).astype(x.dtype)
x_value = np.concatenate((x[:, : self.attack_feature], v_full), axis=1)
x_value = np.concatenate((x_value, x[:, self.attack_feature :]), axis=1)
predicted = self.membership_attack.infer(x_value, y, probabilities=True)
if first:
probabilities = predicted
first = False
else:
probabilities = np.hstack((probabilities, predicted))
# needs to be of type float so we can later replace back the actual values
value_indexes = np.argmax(probabilities, axis=1).astype(x.dtype)
pred_values = np.zeros_like(value_indexes)
for index, value in enumerate(values):
pred_values[value_indexes == index] = value
else: # 1-hot encoded feature. Can also be scaled.
first = True
# assumes that the second value is the "positive" value and that there can only be one positive column
for index, value in enumerate(values):
curr_value = np.zeros((x.shape[0], len(values)))
curr_value[:, index] = value[1]
for not_index, not_value in enumerate(values):
if not_index != index:
curr_value[:, not_index] = not_value[0]
x_value = np.concatenate((x[:, : self.attack_feature.start], curr_value), axis=1)
x_value = np.concatenate((x_value, x[:, self.attack_feature.start :]), axis=1)
predicted = self.membership_attack.infer(x_value, y, probabilities=True)
if first:
probabilities = predicted
else:
probabilities = np.hstack((probabilities, predicted))
first = False
value_indexes = np.argmax(probabilities, axis=1).astype(x.dtype)
pred_values = np.zeros_like(probabilities)
for index, value in enumerate(values):
curr_value = np.zeros(len(values))
curr_value[index] = value[1]
for not_index, not_value in enumerate(values):
if not_index != index:
curr_value[not_index] = not_value[0]
pred_values[value_indexes == index] = curr_value
return pred_values
def _check_params(self) -> None:
super()._check_params()
if not isinstance(self.membership_attack, MembershipInferenceAttack):
raise ValueError("membership_attack should be a sub-class of MembershipInferenceAttack")
|
PypiClean
|
/barbara_updater-1.2.1.tar.gz/barbara_updater-1.2.1/barbara.py
|
import smbus
import requests
from bs4 import BeautifulSoup
from bs4.element import Tag
import time #allows the sleep commands
from time import strftime
import datetime as dt
from datetime import datetime, timedelta
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)
from matplotlib import pyplot as plt
from matplotlib import rcParams
import matplotlib.dates as mdates
import pandas as pd
import json
import io
from io import BytesIO
from PIL import Image
import matplotlib.image as mpimg
import traceback
from PIL import Image, ImageDraw, ImageFont
import re
import imageio
from matplotlib.animation import FuncAnimation
import os
#from google.oauth2 import service_account
#from google.oauth2.credentials import Credentials
#from googleapiclient.discovery import build
#from google_auth_oauthlib.flow import InstalledAppFlow
#from googleapiclient.http import MediaIoBaseUpload
from math import radians, sin, cos, sqrt, atan2
from geopy.geocoders import Nominatim
from geopy.exc import GeocoderTimedOut
import urllib.parse
from geopy.exc import GeocoderUnavailable
import subprocess
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
import threading #allows to manage hang ups in solenium
import tkinter as tk
from tkinter import IntVar, Checkbutton
import tkinter as tk
from tkinter import ttk, IntVar
xs = []
ys = []
#global sonde_letter_identifier
global radar_identifier
global day
global hourmin_str
now = datetime.now()
current_year = float(now.strftime("%Y"))
def get_location():
try:
response = requests.get('http://ip-api.com/json')
data = response.json()
if data['status'] == 'success':
lat = data['lat']
lon = data['lon']
return float(lat), float(lon)
except requests.exceptions.RequestException:
pass
return None
def get_aobs_site(latitude, longitude):
global baro_input
aobs_url = generate_aobs_url(latitude, longitude)
nearest_html = requests.get(aobs_url)
nearest_soup = BeautifulSoup(nearest_html.content, 'html.parser')
panel_title = nearest_soup.find('h2', class_='panel-title')
if panel_title:
aobs_site = panel_title.text.strip()
current_conditions = nearest_soup.find(id='current_conditions_detail')
if current_conditions and isinstance(current_conditions, Tag):
tds = current_conditions.find_all('td')
if len(tds) > 5 and tds[5].string is not None:
baro_input = tds[5].string.strip()
try:
baro_input = float(baro_input[:5])
return aobs_site
except ValueError:
print("This site doesn't have a barometric pressure reading we can use.")
print("Please choose an alternate site when given the chance.")
else:
print("The barometric reading at this site is not available for use.")
else:
print("Observation site not found.")
return None
def get_standard_radar_site_url(latitude, longitude):
global radar_site, radar_site_url
aobs_url = generate_aobs_url(latitude, longitude)
nws_html = requests.get(aobs_url)
nws_soup = BeautifulSoup(nws_html.content, 'html.parser')
radar_img = nws_soup.find('img', src=lambda src: src and 'radar.weather.gov/ridge/standard' in src)
if radar_img:
radar_src = radar_img['src']
radar_site_url = radar_src.split('"')[0]
radar_site = radar_src.split("standard/")[1][:4]
radar_site_url = radar_site_url.replace('_0.gif', '_loop.gif')
return radar_site_url
return "Standard Radar site URL not found"
def generate_aobs_url(latitude, longitude, aobs_site=''):
aobs_url = f"https://forecast.weather.gov/MapClick.php?lon={longitude}&lat={latitude}"
if aobs_site:
aobs_url += f"&site={aobs_site}"
return aobs_url
# station_list_url is list of radiosonde sites
station_list_url = "https://www1.ncdc.noaa.gov/pub/data/igra/igra2-station-list.txt"
def get_nearest_radiosonde_station(latitude, longitude):
response = requests.get(station_list_url)
station_data = response.text.splitlines()[2:] # Skip header lines
min_distance = float('inf')
nearest_station = None
for station in station_data:
station_info = station.split()
try:
station_lat = float(station_info[1])
station_lon = float(station_info[2])
sonde_town = " ".join(station_info[5:-3]) # Join town name with spaces
sonde_state = station_info[4]
station_year = station_info[-2] # Second column from the right
if station_year.isdigit() and int(station_year) in {current_year, current_year - 1}:
distance = calculate_distance(latitude, longitude, station_lat, station_lon)
if distance < min_distance:
min_distance = distance
nearest_station = sonde_town + ", " + sonde_state
except (ValueError, IndexError):
continue # Skip station if there are errors in extracting data
return nearest_station
def calculate_distance(latitude1, longitude1, latitude2, longitude2):
# Convert degrees to radians
lat1, lon1, lat2, lon2 = map(radians, [latitude1, longitude1, latitude2, longitude2])
# Haversine formula for distance calculation
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance = 6371 * c # Earth radius in kilometers
return distance
# Example usage
location = get_location()
if location:
latitude, longitude = location
aobs_site = get_aobs_site(latitude, longitude)
standard_radar_site_url = get_standard_radar_site_url(latitude, longitude)
if aobs_site:
print("Welcome to The Weather Observer!")
print("In order to begin, your new instrument needs to be calibrated,")
print("and you need to make choices about which weather to observe.")
print(" ")
print("The nearest NWS Observation site found is:", aobs_site)
print("This site will be used to calibrate the first barometric pressure reading.")
print("The current barometric pressure reading there is: {:.2f} inches".format(baro_input))
print(" ")
print("The nearest radar site is:", radar_site)
print("This site will be used to display local radar.")
nearest_radiosonde_station = get_nearest_radiosonde_station(latitude, longitude)
print(" ")
print("The nearest radiosonde site is:", nearest_radiosonde_station)
print("This site will be used to show a skew t log p diagram.")
print(" ")
else:
print("Default observation site not found.")
# Suppress the error message by redirecting standard error output to /dev/null
os.system("onboard 2>/dev/null &")
valid_choices = ['y', 'n']
try:
change_site = input("Do you want to change the site used for calibration? (y/n): ")
while change_site.lower() not in valid_choices:
print("Invalid input. Please enter 'y' or 'n'.")
change_site = input("Do you want to change the site used for calibration? (y/n): ")
except KeyboardInterrupt:
# Perform cleanup tasks or handle interruption during input prompt
print("\nKeyboardInterrupt occurred. Opening terminal window...")
# Open a new terminal window
subprocess.run(["lxterminal"])
if change_site.lower() == "y":
while True:
alternative_town = input("Please enter the town name: ")
alternative_state = input("Please enter the 2-letter ID for the state: ").upper()
try:
# Geocode the alternative town to get the latitude and longitude
geolocator = Nominatim(user_agent="geocoder_app")
location = geolocator.geocode(f"{alternative_town}, {alternative_state}", country_codes="us")
if location is not None:
alternative_latitude = location.latitude
alternative_longitude = location.longitude
# Generate the NWS URL for the alternative site
aobs_url = generate_aobs_url(alternative_latitude, alternative_longitude)
alternative_html = requests.get(aobs_url)
alternative_soup = BeautifulSoup(alternative_html.content, 'html.parser')
current_conditions_detail = alternative_soup.find(id='current_conditions_detail')
if current_conditions_detail is not None:
nearest_baro = current_conditions_detail.find_all('td')[5]
if nearest_baro is not None:
baro_string = nearest_baro.string.strip()
baro_match = re.search(r'\d+\.\d+', baro_string)
if baro_match:
baro_input = float(baro_match.group())
observation_site = alternative_soup.find("h2", class_="panel-title").text.strip()
print("Closest observation site to", alternative_town + ", " + alternative_state,
"is:", observation_site)
print("The barometric pressure reading there is: {:.2f} inches".format(baro_input))
confirm_site = input("Is this the observation site you want to use? (y/n): ")
if confirm_site.lower() == "y":
break # Exit the loop and continue with the selected observation site
else:
print("This site doesn't have a barometric pressure reading that can be used.")
else:
print("Failed to retrieve barometric pressure data for the alternative observation site.")
else:
print("Failed to retrieve latitude and longitude for the specified town and state.")
except GeocoderUnavailable:
print("Geocoding service is unavailable. Please try again later.")
else:
print("Using default calibration site.")
# Ask user to make display choices starting here
box_variables = [None] * 12
print(" ")
input("Press Enter when you're ready to make your display choices...")
class BinaryChoiceGUI:
def __init__(self, root):
self.root = root
self.root.title("Choose Displays")
self.root.geometry("1050x600")
# Create a custom style for the checkboxes
style = ttk.Style()
style.configure("Custom.TCheckbutton", font=("Helvetica", 14, "bold")) # Change font size here
self.choice_vars = []
self.choices = ['Barograph', 'National Radar', 'Local Radar', 'Lightning', 'GOES16 East Satellite',
'Local Satellite', 'National Surface Analysis', 'Local Station Plots', 'Radiosonde', '500mb Vorticity',
'GFS 1 Week', 'GFS 2 Week']
self.create_ui()
def create_ui(self):
instruction_text = "Please select your display choices:"
instructions_label = tk.Label(self.root, text=instruction_text, font=("Helvetica", 12, "bold"))
instructions_label.pack(pady=10)
self.column1_frame = tk.Frame(self.root)
self.column2_frame = tk.Frame(self.root)
self.column3_frame = tk.Frame(self.root)
self.v_spacing = 65
self.h_spacing = 65
for index in range(len(self.choices)):
var = IntVar(value=0)
self.choice_vars.append(var)
choice_check_button = ttk.Checkbutton(
self.column1_frame if index < 4 else (self.column2_frame if index < 8 else self.column3_frame),
text=self.choices[index], variable=var, onvalue=1, offvalue=0,
style="Custom.TCheckbutton"
)
choice_check_button.pack(side=tk.TOP, padx=10, pady=(5, self.v_spacing), anchor='w')
if index == 0:
var.set(1) # Set the variable to 1 (selected/checked)
choice_check_button.state(["disabled"]) # Disable the button
elif index in (5, 9, 10, 11): # Boxes 6, 10, 11, 12
var.set(2) # Set the variable to 2 (unchecked)
choice_check_button.state(["disabled"]) # Disable the button
self.column1_frame.pack(side=tk.LEFT, padx=(20, self.h_spacing))
self.column2_frame.pack(side=tk.LEFT, padx=(self.h_spacing, self.h_spacing))
self.column3_frame.pack(side=tk.LEFT, padx=(self.h_spacing, 20))
submit_frame = tk.Frame(self.root)
submit_frame.pack(side=tk.BOTTOM, padx=20, pady=10, anchor='se')
self.submit_button = tk.Button(submit_frame, text="Submit", command=self.submit_choices,
font=("Helvetica", 16, "bold"), padx=20, pady=10) # Change font size, padx, and pady here
self.submit_button.pack()
def submit_choices(self):
global box_variables
box_variables = [1 if var.get() == 1 else 2 for var in self.choice_vars]
self.root.destroy()
def main():
root = tk.Tk()
gui = BinaryChoiceGUI(root)
root.mainloop()
if __name__ == "__main__":
main()
# Is this a user choice?
if box_variables[2] == 1:
print(" ")
radar_identifier = radar_site[-3:]
change_radar_site = input("Do you want to change the radar site? (y/n): ")
while change_radar_site.lower() not in ["y", "n"]:
print("Invalid input. Please enter 'y' or 'n'.")
change_radar_site = input("Do you want to change the radar site? (y/n): ")
if change_radar_site.lower() == "y":
confirmed = False
while not confirmed:
radar_identifier = input("Please enter the 3-letter identifier of the radar site: ").upper()
radar_url = f"https://radar.weather.gov/ridge/standard/K{radar_identifier}_loop.gif"
response = requests.head(radar_url)
if response.status_code == 200:
radar_site_url = radar_url
print("The local radar site has been updated to:", radar_identifier)
confirm_choice = input("Is this the radar site you want? (y/n): ")
while confirm_choice.lower() not in ["y", "n"]:
print("Invalid input. Please enter 'y' or 'n'.")
confirm_choice = input("Is this the radar site you want? (y/n): ")
if confirm_choice.lower() == "y":
confirmed = True
else:
print("Please choose another radar site.")
else:
print("Invalid radar site. Please choose another radar site.")
else:
pass
# using this website to keep track of radiosonde sites
# https://www1.ncdc.noaa.gov/pub/data/igra/igra2-station-list.txt
# will write to them and ask when master list is updated upon change of year
# Is this a user choice?
if box_variables[8] == 1:
print("")
global sonde_letter_identifier
sonde_letter_identifier = nearest_radiosonde_station
change_radiosonde_site = input("Do you want to change the radiosonde site? (y/n): ")
while change_radiosonde_site.lower() not in valid_choices:
print("Invalid input. Please enter 'y' or 'n'.")
change_radiosonde_site = input("Do you want to change the radiosonde site? (y/n): ")
if change_radiosonde_site.lower() == "y":
radiosonde_state = input("Please enter the 2-letter ID of the state: ").upper()
# Check if the entered value is a 2-letter state ID
while len(radiosonde_state) != 2 or not radiosonde_state.isalpha():
print("Invalid state ID. Please enter a 2-letter state ID.")
radiosonde_state = input("Please enter the 2-letter ID of the state: ").upper()
response = requests.get(station_list_url)
station_data = response.text.splitlines()[2:] # Skip header lines
active_radiosonde_sites = []
for station in station_data:
station_info = station.split()
sonde_state = station_info[4]
sonde_town = " ".join(station_info[5:-3]) # Join town name with spaces
station_year = station_info[-2] # Second column from the right
if station_year.isdigit() and int(station_year) in {current_year, current_year - 1}:
if sonde_state == radiosonde_state:
active_radiosonde_sites.append(sonde_town)
while not active_radiosonde_sites:
print("No active radiosonde sites found in", radiosonde_state)
radiosonde_state = input("Please enter another 2-letter ID of the state: ").upper()
# Check if the entered value is a 2-letter state ID
while len(radiosonde_state) != 2 or not radiosonde_state.isalpha():
print("Invalid state ID. Please enter a 2-letter state ID.")
radiosonde_state = input("Please enter the 2-letter ID of the state: ").upper()
active_radiosonde_sites = []
for station in station_data:
station_info = station.split()
sonde_state = station_info[4]
sonde_town = " ".join(station_info[5:-3]) # Join town name with spaces
station_year = station_info[-2] # Second column from the right
if station_year.isdigit() and int(station_year) in {current_year, current_year - 1}:
if sonde_state == radiosonde_state:
active_radiosonde_sites.append(sonde_town)
if active_radiosonde_sites:
print("Available Radiosonde Sites in", radiosonde_state + ":")
for site in active_radiosonde_sites:
print(site)
alternative_town = input("Please enter the town from the above list: ").upper()
selected_radiosonde_station = None
for station in station_data:
station_info = station.split()
sonde_state = station_info[4]
sonde_town = " ".join(station_info[5:-3]) # Join town name with spaces
if sonde_state == radiosonde_state and sonde_town == alternative_town:
selected_radiosonde_station = sonde_town + ", " + sonde_state
break
if selected_radiosonde_station is not None:
print("New radiosonde site:", selected_radiosonde_station)
while True:
try:
# Use geopy to get the latitude and longitude of the town
geolocator = Nominatim(user_agent="my_app")
location = geolocator.geocode(f"{sonde_town}, {sonde_state}, USA")
if location is None:
print("Location not found.")
else:
latitude = location.latitude
longitude = location.longitude
# Build the URL for the NWS office based on latitude and longitude
nws_url = f"https://forecast.weather.gov/MapClick.php?lat={latitude}&lon={longitude}"
try:
# Fetch the HTML content of the NWS office page
response = requests.get(nws_url)
response.raise_for_status()
# Parse the HTML content
soup = BeautifulSoup(response.content, "html.parser")
# Find the Local Forecast Office link and extract the 3-letter code
local_forecast_link = soup.find("a", id="localWFO")
if local_forecast_link:
local_forecast_url = local_forecast_link["href"]
# Extract the NWS 3-letter code from the Local Forecast Office URL
code_match = re.search(r"https://www.weather.gov/([A-Za-z]{3})/", local_forecast_url)
if code_match:
sonde_letter_identifier = code_match.group(1).upper() # Convert to uppercase
#print(f"NWS 3-Letter Code for {sonde_town}, {sonde_state}: {sonde_letter_identifier}")
else:
print("NWS 3-Letter Code not found in the Local Forecast Office URL.")
else:
print("Could not match site with its 3-letter code.")
except requests.RequestException as e:
print("Error occurred during API request:", str(e))
break
except requests.RequestException as e:
print("Error occurred during API request:", str(e))
else:
# Use default town and state to generate the sonde_letter_identifier
sonde_town, sonde_state = nearest_radiosonde_station.split(", ")
try:
# Use geopy to get the latitude and longitude of the town
geolocator = Nominatim(user_agent="my_app")
location = geolocator.geocode(f"{sonde_town}, {sonde_state}, USA")
if location is None:
print("Location not found.")
else:
latitude = location.latitude
longitude = location.longitude
# Build the URL for the NWS office based on latitude and longitude
nws_url = f"https://forecast.weather.gov/MapClick.php?lat={latitude}&lon={longitude}"
try:
# Fetch the HTML content of the NWS office page
response = requests.get(nws_url)
response.raise_for_status()
# Parse the HTML content
soup = BeautifulSoup(response.content, "html.parser")
# Find the Local Forecast Office link and extract the 3-letter code
local_forecast_link = soup.find("a", id="localWFO")
if local_forecast_link:
local_forecast_url = local_forecast_link["href"]
# Extract the NWS 3-letter code from the Local Forecast Office URL
code_match = re.search(r"https://www.weather.gov/([A-Za-z]{3})/", local_forecast_url)
if code_match:
sonde_letter_identifier = code_match.group(1).upper() # Convert to uppercase
print(f"NWS 3-Letter Code for {sonde_town}, {sonde_state}: {sonde_letter_identifier}")
else:
print("NWS 3-Letter Code not found in the Local Forecast Office URL.")
else:
print("Could not match site with its 3-letter code.")
except requests.RequestException as e:
print("Error occurred during API request:", str(e))
except requests.RequestException as e:
print("Error occurred during API request:", str(e))
valid_choices = ['y', 'n']
# Prompt the user to pick 3 observation sites
print(" ")
print("You will now choose 3 observation sites to be displayed at the top of the display.")
# Observation Site 1
confirmed_site_1 = False
while not confirmed_site_1:
print(" ")
# Prompt the user to choose between a buoy and a regular observation site
use_buoy = input("Do you want to choose a buoy as an observation site? (y/n): ").lower()
while use_buoy not in ['y', 'n']:
print("Please respond with 'y' or 'n'.")
use_buoy = input("Do you want to choose a buoy as an observation site? (y/n): ").lower()
if use_buoy == 'y':
alternative_town_1 = input("Enter the 5-character code for the buoy: ").upper()
# Build the URL using the buoy code
aobs_url = f"https://www.ndbc.noaa.gov/station_page.php?station={alternative_town_1}"
response = requests.get(aobs_url)
if response.status_code == 200:
confirmed_site_1 = True
else:
print("Not able to find data for that buoy. Please choose another site.")
else:
print(" ")
alternative_town_1 = input("Please enter the town name for Observation Site 1: ")
alternative_state_1 = input("Please enter the 2-letter ID for the state for Observation Site 1: ").upper()
try:
# Geocode the alternative town to get the latitude and longitude
geolocator = Nominatim(user_agent="geocoder_app")
location_1 = geolocator.geocode(f"{alternative_town_1}, {alternative_state_1}", country_codes="us")
if location_1 is not None:
alternative_latitude_1 = location_1.latitude
alternative_longitude_1 = location_1.longitude
# Generate the NWS URL for the alternative site
aobs_url = generate_aobs_url(alternative_latitude_1, alternative_longitude_1)
alternative_html = requests.get(aobs_url)
alternative_soup = BeautifulSoup(alternative_html.content, 'html.parser')
extended_forecast = alternative_soup.find("div", id="seven-day-forecast")
current_conditions = alternative_soup.find("div", id="current-conditions")
if extended_forecast is not None:
aobs_town = extended_forecast.find("h2", class_="panel-title").text.strip()
aobs_obs_site = current_conditions.find("h2", class_="panel-title").text.strip()
print("The nearest official observation to " + alternative_town_1.title(), "is " + aobs_obs_site)
confirm_observation_site_1 = input("Is this the observation site you want? (y/n): ")
if confirm_observation_site_1.lower() == "y":
confirmed_site_1 = True
else:
print("Please choose another observation site for Observation Site 1.")
else:
print("Failed to retrieve observation site for Observation Site 1.")
else:
print("Failed to retrieve latitude and longitude for the specified town and state for Observation Site 1.")
except GeocoderUnavailable:
print("Geocoding service is unavailable. Please try again later.")
# Observation Site 2
confirmed_site_2 = False
while not confirmed_site_2:
print(" ")
alternative_town_2 = input("Please enter the town name for Observation Site 2: ")
alternative_state_2 = input("Please enter the 2-letter ID for the state for Observation Site 2: ").upper()
try:
# Geocode the alternative town to get the latitude and longitude
geolocator = Nominatim(user_agent="geocoder_app")
location_2 = geolocator.geocode(f"{alternative_town_2}, {alternative_state_2}", country_codes="us")
if location_2 is not None:
alternative_latitude_2 = location_2.latitude
alternative_longitude_2 = location_2.longitude
# Generate the NWS URL for the alternative site
bobs_url = generate_aobs_url(alternative_latitude_2, alternative_longitude_2)
alternative_html = requests.get(bobs_url)
alternative_soup = BeautifulSoup(alternative_html.content, 'html.parser')
extended_forecast = alternative_soup.find("div", id="seven-day-forecast")
current_conditions = alternative_soup.find("div", id="current-conditions")
if extended_forecast is not None:
bobs_town = extended_forecast.find("h2", class_="panel-title").text.strip()
bobs_obs_site = current_conditions.find("h2", class_="panel-title").text.strip()
print("The nearest official observation to " + alternative_town_2.title(), "is " + bobs_obs_site)
confirm_observation_site_2 = input("Is this the observation site you want? (y/n): ")
if confirm_observation_site_2.lower() == "y":
confirmed_site_2 = True
else:
print("Please choose another observation site for Observation Site 2.")
else:
print("Failed to retrieve observation site for Observation Site 2.")
else:
print("Failed to retrieve latitude and longitude for the specified town and state for Observation Site 2.")
except GeocoderUnavailable:
print("Geocoding service is unavailable. Please try again later.")
# Observation Site 3
confirmed_site_3 = False
while not confirmed_site_3:
print(" ")
alternative_town_3 = input("Please enter the town name for Observation Site 3: ")
alternative_state_3 = input("Please enter the 2-letter ID for the state for Observation Site 3: ").upper()
try:
# Geocode the alternative town to get the latitude and longitude
geolocator = Nominatim(user_agent="geocoder_app")
location_3 = geolocator.geocode(f"{alternative_town_3}, {alternative_state_3}", country_codes="us")
if location_3 is not None:
alternative_latitude_3 = location_3.latitude
alternative_longitude_3 = location_3.longitude
# Generate the NWS URL for the alternative site
cobs_url = generate_aobs_url(alternative_latitude_3, alternative_longitude_3)
alternative_html = requests.get(cobs_url)
alternative_soup = BeautifulSoup(alternative_html.content, 'html.parser')
extended_forecast = alternative_soup.find("div", id="seven-day-forecast")
current_conditions = alternative_soup.find("div", id="current-conditions")
if extended_forecast is not None:
cobs_town = extended_forecast.find("h2", class_="panel-title").text.strip()
cobs_obs_site = current_conditions.find("h2", class_="panel-title").text.strip()
print("The nearest official observation to " + alternative_town_3.title(), "is " + cobs_obs_site)
confirm_observation_site_3 = input("Is this the observation site you want? (y/n): ")
if confirm_observation_site_3.lower() == "y":
confirmed_site_3 = True
else:
print("Please choose another observation site for Observation Site 3.")
else:
print("Failed to retrieve observation site for Observation Site 3.")
else:
print("Failed to retrieve latitude and longitude for the specified town and state for Observation Site 3.")
except GeocoderUnavailable:
print("Geocoding service is unavailable. Please try again later.")
# Is this a user choice?
if box_variables[3] == 1:
# Determine center of lightning map
lightning_geolocator = Nominatim(user_agent="lightning_map")
while True:
print(" ")
print("The lightning detection map is about 850 miles wide")
lightning_town = input("Name the city/town to center the lightning detection map: ")
lightning_state = input("Enter the two-letter state ID for that town: ")
# Combine town and state into a search query
lightning_query = f"{lightning_town}, {lightning_state}"
# Use geocoder to get coordinates
lightning_location = lightning_geolocator.geocode(lightning_query)
if lightning_location:
lightning_lat = lightning_location.latitude
lightning_lon = lightning_location.longitude
break
else:
print("Location not found.")
# Is this a user choice?
if box_variables[7] == 1:
# Determine center of sfc model map
sfc_model_geolocator = Nominatim(user_agent="sfc_model_map")
while True:
print(" ")
sfc_model_town = input("Name the city/town to center the map with station model plots: ")
sfc_model_state = input("Enter the two-letter state ID for that town: ")
# Combine town and state into a search queary
sfc_model_query = f"{sfc_model_town}, {sfc_model_state}"
#Use geocoder to get coordinates
sfc_model_location = sfc_model_geolocator.geocode(sfc_model_query)
if sfc_model_location:
sfc_model_lat = sfc_model_location.latitude
sfc_model_lon = sfc_model_location.longitude
break
else:
print("Location not found")
# Finish setting up graphics display parameters
rcParams['figure.figsize'] = 12,6
# Create a figure for plotting
light_blue = (0.8, 0.9, 1.0)
fig = plt.figure(facecolor=light_blue)
ax = fig.add_subplot(1, 1, 1)
bx = fig.add_subplot(1, 1, 1, label="unique_label")
#shut off Thonny navigation toolbar
if fig.canvas.toolbar:
fig.canvas.toolbar.pack_forget()
plt.axis('off')
# This function is called periodically from FuncAnimation
def animate(i, xs, ys):
global correction_factor
global cycle_counter, frame_index #added while trying to display looping radar
# Get I2C bus
bus = smbus.SMBus(1)
# HP203B address, 0x77(118)
# Send OSR and channel setting command, 0x44(68)
bus.write_byte(0x77, 0x44 | 0x00)
time.sleep(0.5)
# HP203B address, 0x77(118)
# Read data back from 0x10(16), 6 bytes
# cTemp MSB, cTemp CSB, cTemp LSB, pressure MSB, pressure CSB, pressure LSB
data = bus.read_i2c_block_data(0x77, 0x10, 6)
# Convert the data to 20-bits
# Correct for 160 feet above sea level
# cpressure is pressure corrected for elevation
cTemp = (((data[0] & 0x0F) * 65536) + (data[1] * 256) + data[2]) / 100.00
fTemp = (cTemp * 1.8) + 32
pressure = (((data[3] & 0x0F) * 65536) + (data[4] * 256) + data[5]) / 100.00
cpressure = (pressure * 1.0058)
inHg = (cpressure * .029529)
#print (inHg)
if i < 1:
correction_factor = (baro_input/inHg)
inHg = correction_factor * inHg
#print (baro_input, correction_factor, inHg)
# HP203B address, 0x77(118)
# Send OSR and channel setting command, 0x44(68)
bus.write_byte(0x77, 0x44 | 0x01)
time.sleep(0.5)
# HP203B address, 0x76(118)
# Read data back from 0x31(49), 3 bytes
# altitude MSB, altitude CSB, altitude LSB
data = bus.read_i2c_block_data(0x77, 0x31, 3)
# Convert the data to 20-bits
altitude = (((data[0] & 0x0F) * 65536) + (data[1] * 256) + data[2]) / 100.00
if i > 1:
# Specify the file path to your credentials JSON file
#credentials_path = '/home/pi/Downloads/credentials.json'
# Define the scopes for accessing Google Drive
#scopes = ['https://www.googleapis.com/auth/drive.file']
# Load the saved credentials from the token file
#credentials = Credentials.from_authorized_user_file('token.json', scopes)
# Create the drive_service object
#drive_service = build('drive', 'v3', credentials=credentials)
# Save the image using plt.savefig()
plt.savefig('baro_trace.png')
# Upload the image to Google Drive
#file_metadata = {'name': 'baro_trace.png'}
#media_body = MediaIoBaseUpload(open('baro_trace.png', 'rb'), mimetype='image/png')
#upload_request = drive_service.files().create(body=file_metadata, media_body=media_body)
#upload_response = upload_request.execute()
#file_id = upload_response['id']
#permission = drive_service.permissions().create(fileId=file_id, body={'role': 'reader', 'type': 'anyone'}).execute()
#image_download_url = f"https://drive.google.com/uc?id={file_id}"
#print(f"Image URL: {image_download_url}")
# Read the saved image file
#with open('baro_trace.png', 'rb') as image_file:
#image_data = image_file.read()
# Get the file ID of the previous image (assuming you have stored it previously)
#previous_image_file_id = '1G3d1WDyUcFmEdD3re8oKi45tM8MP7oma'
# Update the existing file with the latest image data
#media_body = MediaIoBaseUpload(io.BytesIO(image_data), mimetype='image/png')
#update_request = drive_service.files().update(fileId=previous_image_file_id, media_body=media_body)
#update_response = update_request.execute()
#print("Image updated successfully in Google Drive.")
ax.clear()
bx.clear()
now = datetime.now() # current date and time
day = now.strftime("%A")
hourmin_str = now.strftime("%H:%M")
# Adjust margins
fig.subplots_adjust(left=0.125, right=0.90, bottom=0, top=0.88)
ax.text(0, 1.09, "The",
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(0, 1.05, "Weather",
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(0, 1.01, "Observer",
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.11, 1.01, f'Last Updated\n{now.strftime("%A")}\n{now.strftime("%I:%M %P")}',
transform=ax.transAxes,
fontweight='light', fontstyle='italic', horizontalalignment='left', fontsize=6)
try:
global atemp, awtemp, awind, btemp, bwind, ctemp, cwind
if aobs_url.startswith("https://www.ndbc.noaa.gov/"):
try:
buoy_code = "Buoy: " + alternative_town_1
ax.text(.2, 1.1, str(buoy_code),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=9)
ax.text(.2, 1.07, str(atemp),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=9)
ax.text(.2, 1.04, str(awtemp),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=9)
ax.text(.2, 1.01, str(awind),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=9)
except Exception as e:
print("2nd print of buoy data", e)
pass
else:
ax.text(.20, 1.09, alternative_town_1.title(),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.20, 1.05, atemp,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.20, 1.01, awind,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
except Exception as e:
print( "a obs error:", e)
pass
try:
ax.text(.50, 1.09, alternative_town_2.title(),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.50, 1.05, btemp,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.50, 1.01, bwind,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
except Exception as e:
print("b Obs error:", e)
pass
try:
ax.text(.80, 1.09, alternative_town_3.title(),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.80, 1.05, ctemp,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.80, 1.01, cwind,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
except Exception as e:
print("c obs error:", e)
pass
# Is this a user choice?
if box_variables[1] == 1:
# Display the national composite radar image in the subplot
try:
# Scrape and save the regional composite radar image
radar_url = 'https://radar.weather.gov/ridge/standard/CONUS_0.gif'
radar_response = requests.get(radar_url)
radar_content = radar_response.content
radar_image = Image.open(BytesIO(radar_content))
radar_image.save('radar.png', 'PNG')
if radar_response.status_code == 200:
radar_image = Image.open('radar.png')
bx.imshow(radar_image)
ax.axis('off')
bx.axis('off')
plt.draw()
plt.pause(7)
else:
pass
except Exception as e:
print("Scrape, save and Display regional radar", e)
pass
# Is this a user choice?
if box_variables[2] == 1:
# Scrape, Save and Display local radar loop in the subplot
try:
global radar_identifier
radar_loop_url = f"https://radar.weather.gov/ridge/standard/K{radar_identifier}_loop.gif"
# Scrape and save the radar GIF
radar_loop_response = requests.get(radar_loop_url)
if radar_loop_response.status_code == 200:
with open('radar_loop.gif', 'wb') as f:
f.write(radar_loop_response.content)
# Open the radar GIF and extract frames
radar_loop_image = Image.open('radar_loop.gif')
radar_frames = []
try:
while True:
radar_frames.append(radar_loop_image.copy())
radar_loop_image.seek(len(radar_frames)) # Move to the next frame
except EOFError:
pass
# Display the frames in a loop, cycling 1 time
num_cycles = 1
plt.ion() # Turn on interactive mode
# Pre-load the frames into memory before starting the loop
preloaded_frames = [radar_frame.copy() for radar_frame in radar_frames]
for cycle in range(num_cycles):
for radar_frame in preloaded_frames:
bx.imshow(radar_frame)
ax.axis('off')
bx.axis('off')
plt.draw()
plt.pause(0.01) # Pause for a short duration between frames
except Exception as e:
print("Scrape, Save and Display local radar", e)
pass
# Is this a user choice?
if box_variables[3] == 1:
#Use Selenium to get lightning data
# URL of the website to capture
lightning_url = (
"https://www.lightningmaps.org/?lang=en#m=oss;t=1;s=200;o=0;b=0.00;ts=0;d=2;dl=2;dc=0;y=" +
str(lightning_lat) + ";x=" + str(lightning_lon) + ";z=6;"
)
# Configure Chrome options for headless mode
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
# Use the system-installed ChromeDriver executable
driver = webdriver.Chrome(service=Service("chromedriver"), options=chrome_options)
# Navigate to the URL
driver.get(lightning_url)
try:
# Wait for the "Got it!" button to be clickable
wait = WebDriverWait(driver, 30)
got_it_button = wait.until(EC.element_to_be_clickable((By.XPATH, "//a[@class='cc-btn cc-dismiss']")))
# Click the "Got it!" button
got_it_button.click()
time.sleep(5)
# Capture a screenshot of the entire page
lightning_screenshot = driver.get_screenshot_as_png()
# Close the WebDriver
driver.quit()
# Display the screenshot using PIL
lightning_screenshot_image = Image.open(io.BytesIO(lightning_screenshot))
lightning_screenshot_crop = lightning_screenshot_image.crop((0, 0, lightning_screenshot_image.width, lightning_screenshot_image.height - 90))
bx.imshow(lightning_screenshot_crop, aspect='equal')
ax.axis('off')
bx.axis('off')
plt.draw()
plt.pause(7)
except TimeoutError:
print("Selenium & Display lightning image: Timeout occurred (30 seconds). Exiting current attempt.")
except Exception as e:
print("Selenium & Display lightning image:", e)
# Is this a user choice?
if box_variables[4] == 1:
# Scrape, Save and Display the national satellite image in the subplot
try:
satellite_url = 'https://cdn.star.nesdis.noaa.gov/GOES16/ABI/CONUS/GEOCOLOR/1250x750.jpg'
satellite_response = requests.get(satellite_url)
satellite_content = satellite_response.content
satellite_image = Image.open(BytesIO(satellite_content))
satellite_image.save('satellite.png', 'PNG')
if satellite_response.status_code == 200:
satellite_image = Image.open('satellite.png')
bx.imshow(satellite_image, aspect='equal')
ax.axis('off')
bx.axis('off')
plt.draw()
plt.pause(7)
else:
pass
except Exception as e:
print("Scrape, Save and Display satellite image", e)
pass
# Is this a user choice?
if box_variables[6] == 1:
# Scrape, Save and Display the national surface analysis in the subplot
try:
sfc_url = 'https://www.wpc.ncep.noaa.gov/basicwx/92fndfd.gif'
sfc_response = requests.get(sfc_url)
sfc_content = sfc_response.content
sfc_image = Image.open(BytesIO(sfc_content))
sfc_image.save('sfc.png', 'PNG')
if sfc_response.status_code == 200:
sfc_image = Image.open('sfc.png')
bx.imshow(sfc_image)
ax.axis('off')
bx.axis('off')
plt.draw()
plt.pause(7)
else:
pass
except Exception as e:
print("Scrape, Save and Display sfc analysis", e)
pass
# Is this a user choice?
if box_variables[7] == 1:
#Build, take, and display snapshot of local station models
timeout_seconds = 30
try:
global station_model_url
# URL of the website to capture map of station model
#station_model_url = "http://www.wrh.noaa.gov/map/?&zoom=9&scroll_zoom=false¢er=43.7568782054261,-70.02367715840926&boundaries=false,false,false,false,false,false,false,false,false,false,false&tab=observation&obs=true&obs_type=weather&elements=temp,dew,wind,gust,slp&temp_filter=-80,130&gust_filter=0,150&rh_filter=0,100&elev_filter=-300,14000&precip_filter=0.01,30&obs_popup=false&fontsize=4&obs_density=60&obs_provider=ALL"
base_url = "http://www.wrh.noaa.gov/map/?&zoom=9&scroll_zoom=false"
other_params = "&boundaries=false,false,false,false,false,false,false,false,false,false,false&tab=observation&obs=true&obs_type=weather&elements=temp,dew,wind,gust,slp&temp_filter=-80,130&gust_filter=0,150&rh_filter=0,100&elev_filter=-300,14000&precip_filter=0.01,30&obs_popup=false&fontsize=4&obs_density=60&obs_provider=ALL"
lat_lon_params = "¢er=" + str(sfc_model_lat) + "," + str(sfc_model_lon)
station_model_url = base_url + lat_lon_params + other_params
# Configure Chrome options for headless mode
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--disable-gpu")
# Set the desired aspect ratio
desired_aspect_ratio = 1.8 # Width should be 1.8x the height
# Calculate the browser window size to achieve the desired aspect ratio
desired_width = 1200 # Adjust this value as needed
desired_height = int(desired_width / desired_aspect_ratio)
# Set the browser window size
chrome_options.add_argument(f"--window-size={desired_width},{desired_height}")
# Use the system-installed ChromeDriver executable
driver = webdriver.Chrome(service=Service("chromedriver"), options=chrome_options)
# Navigate to the URL
driver.get(station_model_url)
# Find and wait for the close button to be clickable, then click it
close_button_locator = (By.CSS_SELECTOR, "a.panel-close")
wait = WebDriverWait(driver, timeout_seconds)
wait.until(EC.element_to_be_clickable(close_button_locator)).click()
time.sleep(10)
# Capture a screenshot of the entire page
station_model_screenshot = driver.get_screenshot_as_png()
# Close the WebDriver
driver.quit()
# Display the screenshot using PIL
station_model_screenshot_image = Image.open(io.BytesIO(station_model_screenshot))
station_model_screenshot_crop = station_model_screenshot_image.crop((42, 0, station_model_screenshot_image.width, station_model_screenshot_image.height))
bx.imshow(station_model_screenshot_crop, aspect='equal')
ax.axis('off')
bx.axis('off')
plt.draw()
plt.pause(7)
except Exception as e:
print("Selenium Station models on sfc plot", e)
pass
# Is this a user choice?
if box_variables[8] == 1:
# Scrape, Save and Display the GYX sounding in the subplot
try:
# Get current UTC time and date
scrape_now = datetime.utcnow()
if scrape_now.hour >= 1 and scrape_now.hour < 13:
# Use 00z for current UTC date
date_str = scrape_now.strftime("%y%m%d00")
hour_str = "00Z"
else:
# Use 12z for current UTC date
hour_str = "12Z"
date_str = scrape_now.strftime("%y%m%d12")
if scrape_now.hour < 1:
# Use previous UTC date for 00z images
scrape_now -= timedelta(days=1)
date_str = scrape_now.strftime("%y%m%d12")
month_str = scrape_now.strftime("%b").capitalize()
day_str = str(scrape_now.day)
# Construct image URL
sound_url = f"https://www.spc.noaa.gov/exper/soundings/{date_str}_OBS/{sonde_letter_identifier}.gif"
# Send a GET request to the image URL to get the image content
sound_response = requests.get(sound_url)
# Save the image using Pillow
sound_img = Image.open(BytesIO(sound_response.content))
# Crop the top 50 pixels from the image
crop_box = (0, 250, sound_img.width, sound_img.height)
sound_img = sound_img.crop(crop_box)
sound_img.save('sound.png', 'PNG')
# Pause for 2 seconds include this time when showing baro
plt.pause(2)
if sound_response.status_code == 200:
sound_img = Image.open('sound.png')
# Calculate the aspect ratio of the image
sound_img = sound_img.convert('RGBA')
aspect_ratio = sound_img.width / sound_img.height
# Set the size of the displayed image to 8 inches by 8 inches
display_width = 0.83
display_height = 1
# Calculate the extent of the displayed image
display_extent = [0, display_width, 0, display_height / aspect_ratio]
# Create a new image with a white background
sound_img_with_white_bg = Image.new('RGBA', (int(sound_img.width), int(sound_img.height)), (255, 255, 255, 255))
sound_img_with_white_bg.paste(sound_img, (0, 0), sound_img)
sound_img_with_white_bg.save('sound_img.png', 'PNG')
# Display the image with the adjusted extent
ax.axis('off')
bx.axis('off')
bx.imshow(sound_img_with_white_bg, extent=display_extent)
# Add the text to the subplot
bx.text(0.28, 0.89, f'{month_str} {day_str} {hour_str}\n{sonde_town} {sonde_state}', ha='left', va='center', fontweight='bold', transform=bx.transAxes)
plt.draw()
plt.pause(13)
else:
pass
except Exception as e:
print("Scrape, Save and Display sounding", e)
pass
bx.clear()
bx.axis('off')
# Set custom margins
fig.subplots_adjust(left=0.125, right=0.9, bottom=0.11, top=0.88)
else:
pass
if ".ndbc." in aobs_url:
try:
#Scrape for buoy data
aurl = aobs_url
ahtml = requests.get(aurl)# requests instance
time.sleep(5)
asoup = BeautifulSoup(ahtml.text,'html.parser')
awd = asoup.find(class_="dataTable").find_all('td')[0]
awd = awd.string.split()[0]
aws = asoup.find(class_="dataTable").find_all('td')[1]
aws = float(aws.string) * 1.15078
aws = round(aws)
aws = " at {} mph".format(aws)
awg = asoup.find(class_="dataTable").find_all('td')[2]
awg = round(float(awg.string) * 1.15078)
awg = " G{}".format(awg)
awind = awd + aws + awg
awt = asoup.find(class_="dataTable")
awt = awt.find_all('td')[10]
awt = awt.string
if not "-" in awt:
awtemp = "Water Temp: " + str(round(float(awt.string))) + chr(176)
else:
awtemp = "Water Temp: -"
pass
aat = asoup.find(class_="dataTable")
aat = aat.find_all('td')[9]
atemp = "Air Temp: " + str(round(float(aat.string))) + chr(176)
except Exception as e:
print("Scrape buoy data", e)
pass
else:
#scrape for land aobs
aurl = aobs_url
try:
# Send a GET request to the website and store the response in a variable
ahtml = requests.get(aurl)
# Parse the HTML content of the website using BeautifulSoup
asoup = BeautifulSoup(ahtml.content, 'html.parser')
# Find the current temperature, wind direction, and wind speed
atemp = asoup.find('p', class_='myforecast-current-lrg').text
atemp = atemp[:-1]
awind = asoup.find(id='current_conditions_detail')('td')[3]
awind = awind.string
except Exception as e:
print("Scrape PWM data", e)
pass
#scrape for bobs
burl = bobs_url
try:
# Send a GET request to the website and store the response in a variable
bhtml = requests.get(burl)
# Parse the HTML content of the website using BeautifulSoup
bsoup = BeautifulSoup(bhtml.content, 'html.parser')
# Find the current temperature, wind direction, and wind speed
btemp = bsoup.find('p', class_='myforecast-current-lrg').text
btemp = btemp[:-1]
bwind = bsoup.find(id='current_conditions_detail')('td')[3]
bwind = bwind.string
except Exception as e:
print("Scrape station b data", e)
pass
# scrape for cobs
curl = cobs_url
try:
# Send a GET request to the website and store the response in a variable
chtml = requests.get(curl)
# Parse the HTML content of the website using BeautifulSoup
csoup = BeautifulSoup(chtml.content, 'html.parser')
# Find the current temperature, wind direction, and wind speed
ctemp = csoup.find('p', class_='myforecast-current-lrg').text
ctemp = ctemp[:-1]
cwind = csoup.find(id='current_conditions_detail')('td')[3]
cwind = cwind.string
except Exception as e:
print("Scrape buoy data", e)
pass
# Get time stamp
now = datetime.now() # current date and time
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
time_str = now.strftime("%H:%M:%S")
hourmin_str = now.strftime("%H:%M")
hms = now.strftime("%H:%M:%S")
day = now.strftime("%A")
date_time = now.strftime("%m/%d/%Y, %H:%M:%S")
date_time = pd.to_datetime(date_time) #allows us to label x-axis
now = datetime.now() # current date and time
time_delta = dt.timedelta(minutes=4200)
start_time = now - time_delta
#sec = now.strftime("%S")
# Set axis limits and labels
ax.set_xlim(start_time, now)
dtext=date_time
#Build xs and ys arrays
xs.append(date_time)
ys.append(inHg)
#Limit x and y lists to 20 items
xs = xs[-4200:] #Adjust this neg number to how many obs plotted in one window
ys = ys[-4200:] #At a rate of 1 plot/min for 24 hours change this to 1440
#Draw x and y lists
ax.clear()
ax.plot(xs, ys, 'r-')
ax.text(0, 1.09, "The",
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(0, 1.05, "Weather",
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(0, 1.01, "Observer",
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.11, 1.01, f'Last Updated\n{now.strftime("%A")}\n{now.strftime("%I:%M %P")}',
transform=ax.transAxes,
fontweight='light', fontstyle='italic', horizontalalignment='left', fontsize=7)
if ".ndbc." in aobs_url:
try:
buoy_code = "Buoy: " + alternative_town_1
ax.text(.2, 1.1, str(buoy_code),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=9)
ax.text(.2, 1.07, str(atemp),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=9)
ax.text(.2, 1.04, str(awtemp),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=9)
ax.text(.2, 1.01, str(awind),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=9)
except Exception as e:
print("2nd print of buoy data", e)
pass
else:
try:
ax.text(.20, 1.09, alternative_town_1.title(),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.20, 1.05, atemp,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.20, 1.01, awind,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
except Exception as e:
print("2nd aobs error:", e)
pass
try:
ax.text(.50, 1.09, alternative_town_2.title(),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.50, 1.05, btemp,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.50, 1.01, bwind,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
except Exception as e:
print("2nd bobs error:", e)
pass
try:
ax.text(.80, 1.09, alternative_town_3.title(),
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.80, 1.05, ctemp,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
ax.text(.80, 1.01, cwind,
transform=ax.transAxes,
fontweight='bold', horizontalalignment='left', fontsize=12)
except Exception as e:
print("2nd cobs error:", e)
pass
#set up background colors
gold = 30.75
yellow = 30.35
white = 30.00
gainsboro = 29.65
darkgrey = 29.25
ax.axhline(gold, color='gold', lw=77, alpha=.5)
ax.axhline(yellow, color='yellow', lw=46, alpha=.2)
ax.axhline(white, color='white', lw=40, alpha=.2)
ax.axhline(gainsboro, color='gainsboro', lw=46, alpha=.5)
ax.axhline(darkgrey, color='darkgrey', lw=77, alpha=.5)
#Lines on minor ticks
for t in np.arange(29, 31, 0.05):
ax.axhline(t, color='black', lw=.5, alpha=.2)
for u in np.arange(29, 31, 0.25):
ax.axhline(u, color='black', lw=.7)
ax.tick_params(axis='x', direction='inout', length=5, width=1, color='black')
ax.set_ylim(29, 31)
ax.plot(xs, ys, 'r-')
plt.grid(True, color='.01',) #Draws default horiz and vert grid lines
plt.ylabel("Inches of Mercury")
#plt.title("Barometric Pressure")
ax.yaxis.set_minor_locator(AutoMinorLocator(5)) #Puts small ticks between labeled ticks
ax.yaxis.set_major_formatter(FormatStrFormatter('%2.2f'))
# disable removing overlapping locations
ax.xaxis.remove_overlapping_locs = False
print(i)
ax.xaxis.set(
major_locator=mdates.HourLocator((0,4,8,12,16,20)),
major_formatter=mdates.DateFormatter('%-I%P'),
minor_locator=mdates.DayLocator(),
minor_formatter=mdates.DateFormatter("\n%a,%-m/%-d"),
)
ax.set_xlim(dt.datetime.now() - dt.timedelta(minutes=4200), dt.datetime.now())
#this line seems responsible for vertical lines
ax.grid(which='major', axis='both', linestyle='-', linewidth=1, color='black', alpha=1, zorder=10)
plt.show(block=False)
#command to close Onboard keyboard
os.system("pkill onboard")
try:
# Set up plot to call animate() function periodically
ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=3000, save_count=len(xs))
ani.save('animation.gif', writer='pillow')
except AttributeError:
pass
except IndexError:
pass
|
PypiClean
|
/mapfish.plugin.client-0.2.tar.gz/mapfish.plugin.client-0.2/mapfishpluginclient/template/+package+/public/lib/openlayers/lib/OpenLayers/Handler/RegularPolygon.js
|
* @requires OpenLayers/Handler/Drag.js
*/
/**
* Class: OpenLayers.Handler.RegularPolygon
* Handler to draw a regular polygon on the map. Polygon is displayed on mouse
* down, moves or is modified on mouse move, and is finished on mouse up.
* The handler triggers callbacks for 'done' and 'cancel'. Create a new
* instance with the <OpenLayers.Handler.RegularPolygon> constructor.
*
* Inherits from:
* - <OpenLayers.Handler>
*/
OpenLayers.Handler.RegularPolygon = OpenLayers.Class(OpenLayers.Handler.Drag, {
/**
* APIProperty: sides
* {Integer} Number of sides for the regular polygon. Needs to be greater
* than 2. Defaults to 4.
*/
sides: 4,
/**
* APIProperty: radius
* {Float} Optional radius in map units of the regular polygon. If this is
* set to some non-zero value, a polygon with a fixed radius will be
* drawn and dragged with mose movements. If this property is not
* set, dragging changes the radius of the polygon. Set to null by
* default.
*/
radius: null,
/**
* APIProperty: snapAngle
* {Float} If set to a non-zero value, the handler will snap the polygon
* rotation to multiples of the snapAngle. Value is an angle measured
* in degrees counterclockwise from the positive x-axis.
*/
snapAngle: null,
/**
* APIProperty: snapToggle
* {String} If set, snapToggle is checked on mouse events and will set
* the snap mode to the opposite of what it currently is. To disallow
* toggling between snap and non-snap mode, set freehandToggle to
* null. Acceptable toggle values are 'shiftKey', 'ctrlKey', and
* 'altKey'. Snap mode is only possible if this.snapAngle is set to a
* non-zero value.
*/
snapToggle: 'shiftKey',
/**
* APIProperty: persist
* {Boolean} Leave the feature rendered until clear is called. Default
* is false. If set to true, the feature remains rendered until
* clear is called, typically by deactivating the handler or starting
* another drawing.
*/
persist: false,
/**
* APIProperty: irregular
* {Boolean} Draw an irregular polygon instead of a regular polygon.
* Default is false. If true, the initial mouse down will represent
* one corner of the polygon bounds and with each mouse movement, the
* polygon will be stretched so the opposite corner of its bounds
* follows the mouse position. This property takes precedence over
* the radius property. If set to true, the radius property will
* be ignored.
*/
irregular: false,
/**
* Property: angle
* {Float} The angle from the origin (mouse down) to the current mouse
* position, in radians. This is measured counterclockwise from the
* positive x-axis.
*/
angle: null,
/**
* Property: fixedRadius
* {Boolean} The polygon has a fixed radius. True if a radius is set before
* drawing begins. False otherwise.
*/
fixedRadius: false,
/**
* Property: feature
* {<OpenLayers.Feature.Vector>} The currently drawn polygon feature
*/
feature: null,
/**
* Property: layer
* {<OpenLayers.Layer.Vector>} The temporary drawing layer
*/
layer: null,
/**
* Property: origin
* {<OpenLayers.Geometry.Point>} Location of the first mouse down
*/
origin: null,
/**
* Constructor: OpenLayers.Handler.RegularPolygon
* Create a new regular polygon handler.
*
* Parameters:
* control - {<OpenLayers.Control>} The control that owns this handler
* callbacks - {Object} An object with a properties whose values are
* functions. Various callbacks described below.
* options - {Object} An object with properties to be set on the handler.
* If the options.sides property is not specified, the number of sides
* will default to 4.
*
* Named callbacks:
* create - Called when a sketch is first created. Callback called with
* the creation point geometry and sketch feature.
* done - Called when the sketch drawing is finished. The callback will
* recieve a single argument, the sketch geometry.
* cancel - Called when the handler is deactivated while drawing. The
* cancel callback will receive a geometry.
*/
initialize: function(control, callbacks, options) {
this.style = OpenLayers.Util.extend(OpenLayers.Feature.Vector.style['default'], {});
OpenLayers.Handler.prototype.initialize.apply(this,
[control, callbacks, options]);
this.options = (options) ? options : new Object();
},
/**
* APIMethod: setOptions
*
* Parameters:
* newOptions - {Object}
*/
setOptions: function (newOptions) {
OpenLayers.Util.extend(this.options, newOptions);
OpenLayers.Util.extend(this, newOptions);
},
/**
* APIMethod: activate
* Turn on the handler.
*
* Return:
* {Boolean} The handler was successfully activated
*/
activate: function() {
var activated = false;
if(OpenLayers.Handler.prototype.activate.apply(this, arguments)) {
// create temporary vector layer for rendering geometry sketch
var options = {
displayInLayerSwitcher: false,
// indicate that the temp vector layer will never be out of range
// without this, resolution properties must be specified at the
// map-level for this temporary layer to init its resolutions
// correctly
calculateInRange: OpenLayers.Function.True
};
this.layer = new OpenLayers.Layer.Vector(this.CLASS_NAME, options);
this.map.addLayer(this.layer);
activated = true;
}
return activated;
},
/**
* APIMethod: deactivate
* Turn off the handler.
*
* Return:
* {Boolean} The handler was successfully deactivated
*/
deactivate: function() {
var deactivated = false;
if(OpenLayers.Handler.Drag.prototype.deactivate.apply(this, arguments)) {
// call the cancel callback if mid-drawing
if(this.dragging) {
this.cancel();
}
// If a layer's map property is set to null, it means that that
// layer isn't added to the map. Since we ourself added the layer
// to the map in activate(), we can assume that if this.layer.map
// is null it means that the layer has been destroyed (as a result
// of map.destroy() for example.
if (this.layer.map != null) {
this.layer.destroy(false);
if (this.feature) {
this.feature.destroy();
}
}
this.layer = null;
this.feature = null;
deactivated = true;
}
return deactivated;
},
/**
* Method: down
* Start drawing a new feature
*
* Parameters:
* evt - {Event} The drag start event
*/
down: function(evt) {
this.fixedRadius = !!(this.radius);
var maploc = this.map.getLonLatFromPixel(evt.xy);
this.origin = new OpenLayers.Geometry.Point(maploc.lon, maploc.lat);
// create the new polygon
if(!this.fixedRadius || this.irregular) {
// smallest radius should not be less one pixel in map units
// VML doesn't behave well with smaller
this.radius = this.map.getResolution();
}
if(this.persist) {
this.clear();
}
this.feature = new OpenLayers.Feature.Vector();
this.createGeometry();
this.callback("create", [this.origin, this.feature]);
this.layer.addFeatures([this.feature], {silent: true});
this.layer.drawFeature(this.feature, this.style);
},
/**
* Method: move
* Respond to drag move events
*
* Parameters:
* evt - {Evt} The move event
*/
move: function(evt) {
var maploc = this.map.getLonLatFromPixel(evt.xy);
var point = new OpenLayers.Geometry.Point(maploc.lon, maploc.lat);
if(this.irregular) {
var ry = Math.sqrt(2) * Math.abs(point.y - this.origin.y) / 2;
this.radius = Math.max(this.map.getResolution() / 2, ry);
} else if(this.fixedRadius) {
this.origin = point;
} else {
this.calculateAngle(point, evt);
this.radius = Math.max(this.map.getResolution() / 2,
point.distanceTo(this.origin));
}
this.modifyGeometry();
if(this.irregular) {
var dx = point.x - this.origin.x;
var dy = point.y - this.origin.y;
var ratio;
if(dy == 0) {
ratio = dx / (this.radius * Math.sqrt(2));
} else {
ratio = dx / dy;
}
this.feature.geometry.resize(1, this.origin, ratio);
this.feature.geometry.move(dx / 2, dy / 2);
}
this.layer.drawFeature(this.feature, this.style);
},
/**
* Method: up
* Finish drawing the feature
*
* Parameters:
* evt - {Event} The mouse up event
*/
up: function(evt) {
this.finalize();
// the mouseup method of superclass doesn't call the
// "done" callback if there's been no move between
// down and up
if (this.start == this.last) {
this.callback("done", [evt.xy]);
}
},
/**
* Method: out
* Finish drawing the feature.
*
* Parameters:
* evt - {Event} The mouse out event
*/
out: function(evt) {
this.finalize();
},
/**
* Method: createGeometry
* Create the new polygon geometry. This is called at the start of the
* drag and at any point during the drag if the number of sides
* changes.
*/
createGeometry: function() {
this.angle = Math.PI * ((1/this.sides) - (1/2));
if(this.snapAngle) {
this.angle += this.snapAngle * (Math.PI / 180);
}
this.feature.geometry = OpenLayers.Geometry.Polygon.createRegularPolygon(
this.origin, this.radius, this.sides, this.snapAngle
);
},
/**
* Method: modifyGeometry
* Modify the polygon geometry in place.
*/
modifyGeometry: function() {
var angle, dx, dy, point;
var ring = this.feature.geometry.components[0];
// if the number of sides ever changes, create a new geometry
if(ring.components.length != (this.sides + 1)) {
this.createGeometry();
ring = this.feature.geometry.components[0];
}
for(var i=0; i<this.sides; ++i) {
point = ring.components[i];
angle = this.angle + (i * 2 * Math.PI / this.sides);
point.x = this.origin.x + (this.radius * Math.cos(angle));
point.y = this.origin.y + (this.radius * Math.sin(angle));
point.clearBounds();
}
},
/**
* Method: calculateAngle
* Calculate the angle based on settings.
*
* Parameters:
* point - {<OpenLayers.Geometry.Point>}
* evt - {Event}
*/
calculateAngle: function(point, evt) {
var alpha = Math.atan2(point.y - this.origin.y,
point.x - this.origin.x);
if(this.snapAngle && (this.snapToggle && !evt[this.snapToggle])) {
var snapAngleRad = (Math.PI / 180) * this.snapAngle;
this.angle = Math.round(alpha / snapAngleRad) * snapAngleRad;
} else {
this.angle = alpha;
}
},
/**
* APIMethod: cancel
* Finish the geometry and call the "cancel" callback.
*/
cancel: function() {
// the polygon geometry gets cloned in the callback method
this.callback("cancel", null);
this.finalize();
},
/**
* Method: finalize
* Finish the geometry and call the "done" callback.
*/
finalize: function() {
this.origin = null;
this.radius = this.options.radius;
},
/**
* APIMethod: clear
* Clear any rendered features on the temporary layer. This is called
* when the handler is deactivated, canceled, or done (unless persist
* is true).
*/
clear: function() {
this.layer.renderer.clear();
this.layer.destroyFeatures();
},
/**
* Method: callback
* Trigger the control's named callback with the given arguments
*
* Parameters:
* name - {String} The key for the callback that is one of the properties
* of the handler's callbacks object.
* args - {Array} An array of arguments with which to call the callback
* (defined by the control).
*/
callback: function (name, args) {
// override the callback method to always send the polygon geometry
if (this.callbacks[name]) {
this.callbacks[name].apply(this.control,
[this.feature.geometry.clone()]);
}
// since sketch features are added to the temporary layer
// they must be cleared here if done or cancel
if(!this.persist && (name == "done" || name == "cancel")) {
this.clear();
}
},
CLASS_NAME: "OpenLayers.Handler.RegularPolygon"
});
|
PypiClean
|
/joulescope_ui-1.0.29.tar.gz/joulescope_ui-1.0.29/joulescope_ui/widgets/sidebar/sidebar_widget.py
|
from PySide6 import QtCore, QtGui, QtWidgets
from joulescope_ui import N_, register, tooltip_format, pubsub_singleton
from joulescope_ui.styles import styled_widget, color_as_qcolor
from joulescope_ui.widgets import DeviceControlWidget
from joulescope_ui.widgets import MemoryWidget
from joulescope_ui.widgets import HelpWidget
from joulescope_ui.widgets import HamburgerWidget
from joulescope_ui.widgets.flyout import FlyoutWidget
_DEVICE_TOOLTIP = tooltip_format(
N_('Device control'),
N_("""\
Click to show the device control widget which displays
the connected devices and their settings. Use this
widget to open and close devices and configure their
operation.\
"""))
_MEMORY_TOOLTIP = tooltip_format(
N_('Memory buffer settings'),
N_("""\
Streaming signal sample data is stored in your host
computer's RAM. Click this button to show the
memory management widget which allows you to
configure the memory used by this Joulescope UI instance.\
"""))
_SETTINGS_TOOLTIP = tooltip_format(
N_('Settings'),
N_("""\
Click to show the settings which allows you
to change the global, default, and individual
instance settings for devices and widgets.
Default changes may not affect existing instances,
and may only apply to future instances.\
"""))
_HELP_TOOLTIP = tooltip_format(
N_('Get help'),
N_("""\
Click to display help options.\
"""))
_MISC_TOOLTIP = tooltip_format(
N_('Additional settings and actions'),
N_("""\
Click to display additional settings and actions.\
"""))
@register
@styled_widget(N_('sidebar'))
class SideBar(QtWidgets.QWidget):
# Note: does NOT implement widget CAPABILITY, since not instantiable by user or available as a dock widget.
SETTINGS = {
'flyout_width': {
'dtype': 'int',
'brief': N_('The flyout width in pixels.'),
'default': 300,
},
}
def __init__(self, parent):
self._parent = parent
super().__init__(parent)
self.setObjectName('side_bar_icons')
size_policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
size_policy.setHeightForWidth(True)
self.setSizePolicy(size_policy)
self._style_cache = None
self._selected_area = None
self._selected_area_brush = QtGui.QBrush
self._buttons_by_name = {}
self._buttons_by_flyout_idx = {}
self._buttons_blink = []
self._buttons_flyout = []
self._flyout: FlyoutWidget = None
self._layout = QtWidgets.QVBoxLayout()
self._layout.setSpacing(6)
self._layout.setContentsMargins(3, 3, 3, 3)
self.setLayout(self._layout)
self._add_blink_button('target_power', 'target_power')
self._add_blink_button('signal_play', 'signal_stream_enable')
b = self._add_blink_button('signal_record', 'signal_stream_record')
b.toggled.connect(self._on_signal_stream_record_toggled)
self._add_blink_button('statistics_play', 'statistics_stream_enable')
b = self._add_blink_button('statistics_record', 'statistics_stream_record')
b.toggled.connect(self._on_statistics_stream_record_toggled)
self._add_button('device', _DEVICE_TOOLTIP)
self._add_button('memory', _MEMORY_TOOLTIP)
b = self._add_button('settings', _SETTINGS_TOOLTIP)
b.clicked.connect(self._on_settings_pressed)
self._spacer = QtWidgets.QSpacerItem(10, 0,
QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self._layout.addItem(self._spacer)
self._add_button('help', _HELP_TOOLTIP)
self._add_button('misc', _MISC_TOOLTIP)
self.mousePressEvent = self._on_mousePressEvent
pubsub_singleton.subscribe('registry/ui/events/blink_slow', self._on_blink, ['pub', 'retain'])
def register(self):
pubsub = pubsub_singleton
pubsub.register(self, 'sidebar:0', parent='ui')
self._flyout = FlyoutWidget(self._parent, self)
pubsub.register(self._flyout, 'flyout:0', parent='sidebar:0')
# Create the device control flyout widget for the sidebar
d = DeviceControlWidget()
pubsub.register(d, 'device_control_widget:flyout', parent='flyout:0')
self.widget_set('device', d)
# Create the memory flyout widget for the sidebar
m = MemoryWidget()
pubsub.register(m, 'memory_widget:flyout', parent='flyout:0')
self.widget_set('memory', m)
# Create the help flyout widget for the sidebar
m = HelpWidget(self._flyout)
pubsub.register(m, 'help_widget:flyout', parent='flyout:0')
self.widget_set('help', m)
# Create the hamburger flyout widget for the sidebar
m = HamburgerWidget()
pubsub.register(m, 'hamburger_widget:flyout', parent='flyout:0')
self.widget_set('misc', m)
def _on_mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self.on_cmd_show(-1)
event.accept()
def _on_signal_stream_record_toggled(self, checked):
if bool(checked):
pubsub_singleton.publish('registry/SignalRecord/actions/!start_request', None)
else:
pubsub_singleton.publish('registry/SignalRecord/actions/!stop', None)
def _on_statistics_stream_record_toggled(self, checked):
if bool(checked):
pubsub_singleton.publish('registry/StatisticsRecord/actions/!start_request', None)
else:
pubsub_singleton.publish('registry/StatisticsRecord/actions/!stop', None)
def _on_settings_pressed(self, checked):
pubsub_singleton.publish('registry/view/actions/!widget_open', {
'value': 'registry/settings',
'floating': True,
})
def _add_blink_button(self, name, app_setting):
topic = f'registry/app/settings/{app_setting}'
meta = pubsub_singleton.metadata(topic)
tooltip = tooltip_format(meta.brief, meta.detail)
button = self._add_button(name, tooltip)
button.setProperty('blink', False)
button.setCheckable(True)
self._buttons_blink.append(button)
def update_from_pubsub(value):
block_state = button.blockSignals(True)
button.setChecked(bool(value))
button.blockSignals(block_state)
pubsub_singleton.subscribe(topic, update_from_pubsub, ['pub', 'retain'])
button.toggled.connect(lambda checked: pubsub_singleton.publish(topic, bool(checked)))
return button
def widget_set(self, name, widget):
button = self._buttons_by_name[name]
button.setProperty('selected', False)
idx = self._flyout.addWidget(widget)
self._buttons_by_flyout_idx[idx] = button
button.clicked.connect(lambda: self.on_cmd_show(idx))
def _add_button(self, name, tooltip):
button = QtWidgets.QPushButton(self)
button.setObjectName(name)
button.setFlat(True)
button.setFixedSize(32, 32)
button.setToolTip(tooltip)
self._buttons_by_name[name] = button
self._layout.addWidget(button)
return button
def _on_blink(self, value):
for b in self._buttons_blink:
b.setProperty('blink', value)
b.style().unpolish(b)
b.style().polish(b)
def on_cmd_show(self, value):
value = self._flyout.on_cmd_show(value)
if value is not None and value >= 0:
button = self._buttons_by_flyout_idx[value]
self._selected_area = button.geometry()
else:
self._selected_area = None
self.update()
def paintEvent(self, event):
s = self._style
if s is None:
return
if self._selected_area is not None:
r = self.geometry()
x, w = r.x(), r.width()
r = self._selected_area
y, h = r.y() - 3, r.height() + 6
painter = QtGui.QPainter(self)
painter.setRenderHint(QtGui.QPainter.RenderHint.Antialiasing)
painter.fillRect(x, y, w, h, s['selected_background_brush'])
painter.fillRect(x + w - 1, y, 2, h, s['selected_side_brush'])
def resizeEvent(self, event: QtGui.QResizeEvent) -> None:
self._flyout.on_sidebar_geometry(self.geometry())
@property
def _style(self):
if self._style_cache is not None:
return self._style_cache
if self.style_obj is None:
self._style_cache = None
return None
v = self.style_obj['vars']
self._style_cache = {
'selected_background_brush': QtGui.QBrush(color_as_qcolor(v['sidebar.util_background'])),
'selected_side_brush': QtGui.QBrush(color_as_qcolor(v['sidebar.util_foreground'])),
}
|
PypiClean
|
/simple_plus-7.0b2.tar.gz/simple_plus-7.0b2/simplepro/static/admin/mdeditor/js/lib/codemirror/mode/cobol/cobol.js
|
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("cobol", function () {
var BUILTIN = "builtin", COMMENT = "comment", STRING = "string",
ATOM = "atom", NUMBER = "number", KEYWORD = "keyword", MODTAG = "header",
COBOLLINENUM = "def", PERIOD = "link";
function makeKeywords(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
var atoms = makeKeywords("TRUE FALSE ZEROES ZEROS ZERO SPACES SPACE LOW-VALUE LOW-VALUES ");
var keywords = makeKeywords(
"ACCEPT ACCESS ACQUIRE ADD ADDRESS " +
"ADVANCING AFTER ALIAS ALL ALPHABET " +
"ALPHABETIC ALPHABETIC-LOWER ALPHABETIC-UPPER ALPHANUMERIC ALPHANUMERIC-EDITED " +
"ALSO ALTER ALTERNATE AND ANY " +
"ARE AREA AREAS ARITHMETIC ASCENDING " +
"ASSIGN AT ATTRIBUTE AUTHOR AUTO " +
"AUTO-SKIP AUTOMATIC B-AND B-EXOR B-LESS " +
"B-NOT B-OR BACKGROUND-COLOR BACKGROUND-COLOUR BEEP " +
"BEFORE BELL BINARY BIT BITS " +
"BLANK BLINK BLOCK BOOLEAN BOTTOM " +
"BY CALL CANCEL CD CF " +
"CH CHARACTER CHARACTERS CLASS CLOCK-UNITS " +
"CLOSE COBOL CODE CODE-SET COL " +
"COLLATING COLUMN COMMA COMMIT COMMITMENT " +
"COMMON COMMUNICATION COMP COMP-0 COMP-1 " +
"COMP-2 COMP-3 COMP-4 COMP-5 COMP-6 " +
"COMP-7 COMP-8 COMP-9 COMPUTATIONAL COMPUTATIONAL-0 " +
"COMPUTATIONAL-1 COMPUTATIONAL-2 COMPUTATIONAL-3 COMPUTATIONAL-4 COMPUTATIONAL-5 " +
"COMPUTATIONAL-6 COMPUTATIONAL-7 COMPUTATIONAL-8 COMPUTATIONAL-9 COMPUTE " +
"CONFIGURATION CONNECT CONSOLE CONTAINED CONTAINS " +
"CONTENT CONTINUE CONTROL CONTROL-AREA CONTROLS " +
"CONVERTING COPY CORR CORRESPONDING COUNT " +
"CRT CRT-UNDER CURRENCY CURRENT CURSOR " +
"DATA DATE DATE-COMPILED DATE-WRITTEN DAY " +
"DAY-OF-WEEK DB DB-ACCESS-CONTROL-KEY DB-DATA-NAME DB-EXCEPTION " +
"DB-FORMAT-NAME DB-RECORD-NAME DB-SET-NAME DB-STATUS DBCS " +
"DBCS-EDITED DE DEBUG-CONTENTS DEBUG-ITEM DEBUG-LINE " +
"DEBUG-NAME DEBUG-SUB-1 DEBUG-SUB-2 DEBUG-SUB-3 DEBUGGING " +
"DECIMAL-POINT DECLARATIVES DEFAULT DELETE DELIMITED " +
"DELIMITER DEPENDING DESCENDING DESCRIBED DESTINATION " +
"DETAIL DISABLE DISCONNECT DISPLAY DISPLAY-1 " +
"DISPLAY-2 DISPLAY-3 DISPLAY-4 DISPLAY-5 DISPLAY-6 " +
"DISPLAY-7 DISPLAY-8 DISPLAY-9 DIVIDE DIVISION " +
"DOWN DROP DUPLICATE DUPLICATES DYNAMIC " +
"EBCDIC EGI EJECT ELSE EMI " +
"EMPTY EMPTY-CHECK ENABLE END END. END-ACCEPT END-ACCEPT. " +
"END-ADD END-CALL END-COMPUTE END-DELETE END-DISPLAY " +
"END-DIVIDE END-EVALUATE END-IF END-INVOKE END-MULTIPLY " +
"END-OF-PAGE END-PERFORM END-READ END-RECEIVE END-RETURN " +
"END-REWRITE END-SEARCH END-START END-STRING END-SUBTRACT " +
"END-UNSTRING END-WRITE END-XML ENTER ENTRY " +
"ENVIRONMENT EOP EQUAL EQUALS ERASE " +
"ERROR ESI EVALUATE EVERY EXCEEDS " +
"EXCEPTION EXCLUSIVE EXIT EXTEND EXTERNAL " +
"EXTERNALLY-DESCRIBED-KEY FD FETCH FILE FILE-CONTROL " +
"FILE-STREAM FILES FILLER FINAL FIND " +
"FINISH FIRST FOOTING FOR FOREGROUND-COLOR " +
"FOREGROUND-COLOUR FORMAT FREE FROM FULL " +
"FUNCTION GENERATE GET GIVING GLOBAL " +
"GO GOBACK GREATER GROUP HEADING " +
"HIGH-VALUE HIGH-VALUES HIGHLIGHT I-O I-O-CONTROL " +
"ID IDENTIFICATION IF IN INDEX " +
"INDEX-1 INDEX-2 INDEX-3 INDEX-4 INDEX-5 " +
"INDEX-6 INDEX-7 INDEX-8 INDEX-9 INDEXED " +
"INDIC INDICATE INDICATOR INDICATORS INITIAL " +
"INITIALIZE INITIATE INPUT INPUT-OUTPUT INSPECT " +
"INSTALLATION INTO INVALID INVOKE IS " +
"JUST JUSTIFIED KANJI KEEP KEY " +
"LABEL LAST LD LEADING LEFT " +
"LEFT-JUSTIFY LENGTH LENGTH-CHECK LESS LIBRARY " +
"LIKE LIMIT LIMITS LINAGE LINAGE-COUNTER " +
"LINE LINE-COUNTER LINES LINKAGE LOCAL-STORAGE " +
"LOCALE LOCALLY LOCK " +
"MEMBER MEMORY MERGE MESSAGE METACLASS " +
"MODE MODIFIED MODIFY MODULES MOVE " +
"MULTIPLE MULTIPLY NATIONAL NATIVE NEGATIVE " +
"NEXT NO NO-ECHO NONE NOT " +
"NULL NULL-KEY-MAP NULL-MAP NULLS NUMBER " +
"NUMERIC NUMERIC-EDITED OBJECT OBJECT-COMPUTER OCCURS " +
"OF OFF OMITTED ON ONLY " +
"OPEN OPTIONAL OR ORDER ORGANIZATION " +
"OTHER OUTPUT OVERFLOW OWNER PACKED-DECIMAL " +
"PADDING PAGE PAGE-COUNTER PARSE PERFORM " +
"PF PH PIC PICTURE PLUS " +
"POINTER POSITION POSITIVE PREFIX PRESENT " +
"PRINTING PRIOR PROCEDURE PROCEDURE-POINTER PROCEDURES " +
"PROCEED PROCESS PROCESSING PROGRAM PROGRAM-ID " +
"PROMPT PROTECTED PURGE QUEUE QUOTE " +
"QUOTES RANDOM RD READ READY " +
"REALM RECEIVE RECONNECT RECORD RECORD-NAME " +
"RECORDS RECURSIVE REDEFINES REEL REFERENCE " +
"REFERENCE-MONITOR REFERENCES RELATION RELATIVE RELEASE " +
"REMAINDER REMOVAL RENAMES REPEATED REPLACE " +
"REPLACING REPORT REPORTING REPORTS REPOSITORY " +
"REQUIRED RERUN RESERVE RESET RETAINING " +
"RETRIEVAL RETURN RETURN-CODE RETURNING REVERSE-VIDEO " +
"REVERSED REWIND REWRITE RF RH " +
"RIGHT RIGHT-JUSTIFY ROLLBACK ROLLING ROUNDED " +
"RUN SAME SCREEN SD SEARCH " +
"SECTION SECURE SECURITY SEGMENT SEGMENT-LIMIT " +
"SELECT SEND SENTENCE SEPARATE SEQUENCE " +
"SEQUENTIAL SET SHARED SIGN SIZE " +
"SKIP1 SKIP2 SKIP3 SORT SORT-MERGE " +
"SORT-RETURN SOURCE SOURCE-COMPUTER SPACE-FILL " +
"SPECIAL-NAMES STANDARD STANDARD-1 STANDARD-2 " +
"START STARTING STATUS STOP STORE " +
"STRING SUB-QUEUE-1 SUB-QUEUE-2 SUB-QUEUE-3 SUB-SCHEMA " +
"SUBFILE SUBSTITUTE SUBTRACT SUM SUPPRESS " +
"SYMBOLIC SYNC SYNCHRONIZED SYSIN SYSOUT " +
"TABLE TALLYING TAPE TENANT TERMINAL " +
"TERMINATE TEST TEXT THAN THEN " +
"THROUGH THRU TIME TIMES TITLE " +
"TO TOP TRAILING TRAILING-SIGN TRANSACTION " +
"TYPE TYPEDEF UNDERLINE UNEQUAL UNIT " +
"UNSTRING UNTIL UP UPDATE UPON " +
"USAGE USAGE-MODE USE USING VALID " +
"VALIDATE VALUE VALUES VARYING VLR " +
"WAIT WHEN WHEN-COMPILED WITH WITHIN " +
"WORDS WORKING-STORAGE WRITE XML XML-CODE " +
"XML-EVENT XML-NTEXT XML-TEXT ZERO ZERO-FILL " );
var builtins = makeKeywords("- * ** / + < <= = > >= ");
var tests = {
digit: /\d/,
digit_or_colon: /[\d:]/,
hex: /[0-9a-f]/i,
sign: /[+-]/,
exponent: /e/i,
keyword_char: /[^\s\(\[\;\)\]]/,
symbol: /[\w*+\-]/
};
function isNumber(ch, stream){
// hex
if ( ch === '0' && stream.eat(/x/i) ) {
stream.eatWhile(tests.hex);
return true;
}
// leading sign
if ( ( ch == '+' || ch == '-' ) && ( tests.digit.test(stream.peek()) ) ) {
stream.eat(tests.sign);
ch = stream.next();
}
if ( tests.digit.test(ch) ) {
stream.eat(ch);
stream.eatWhile(tests.digit);
if ( '.' == stream.peek()) {
stream.eat('.');
stream.eatWhile(tests.digit);
}
if ( stream.eat(tests.exponent) ) {
stream.eat(tests.sign);
stream.eatWhile(tests.digit);
}
return true;
}
return false;
}
return {
startState: function () {
return {
indentStack: null,
indentation: 0,
mode: false
};
},
token: function (stream, state) {
if (state.indentStack == null && stream.sol()) {
// update indentation, but only if indentStack is empty
state.indentation = 6 ; //stream.indentation();
}
// skip spaces
if (stream.eatSpace()) {
return null;
}
var returnType = null;
switch(state.mode){
case "string": // multi-line string parsing mode
var next = false;
while ((next = stream.next()) != null) {
if (next == "\"" || next == "\'") {
state.mode = false;
break;
}
}
returnType = STRING; // continue on in string mode
break;
default: // default parsing mode
var ch = stream.next();
var col = stream.column();
if (col >= 0 && col <= 5) {
returnType = COBOLLINENUM;
} else if (col >= 72 && col <= 79) {
stream.skipToEnd();
returnType = MODTAG;
} else if (ch == "*" && col == 6) { // comment
stream.skipToEnd(); // rest of the line is a comment
returnType = COMMENT;
} else if (ch == "\"" || ch == "\'") {
state.mode = "string";
returnType = STRING;
} else if (ch == "'" && !( tests.digit_or_colon.test(stream.peek()) )) {
returnType = ATOM;
} else if (ch == ".") {
returnType = PERIOD;
} else if (isNumber(ch,stream)){
returnType = NUMBER;
} else {
if (stream.current().match(tests.symbol)) {
while (col < 71) {
if (stream.eat(tests.symbol) === undefined) {
break;
} else {
col++;
}
}
}
if (keywords && keywords.propertyIsEnumerable(stream.current().toUpperCase())) {
returnType = KEYWORD;
} else if (builtins && builtins.propertyIsEnumerable(stream.current().toUpperCase())) {
returnType = BUILTIN;
} else if (atoms && atoms.propertyIsEnumerable(stream.current().toUpperCase())) {
returnType = ATOM;
} else returnType = null;
}
}
return returnType;
},
indent: function (state) {
if (state.indentStack == null) return state.indentation;
return state.indentStack.indent;
}
};
});
CodeMirror.defineMIME("text/x-cobol", "cobol");
});
|
PypiClean
|
/teramesh-hardware-tester-0.9.0.tar.gz/teramesh-hardware-tester-0.9.0/teramesh_hardware_tester/idrc_esp_tools/nvs_partition_gen.py
|
import argparse
import array
import binascii
import codecs
import datetime
import distutils.dir_util
import os
import random
import struct
import sys
import zlib
from builtins import bytes, int, range
from io import open
from itertools import zip_longest
VERSION1_PRINT = 'V1 - Multipage Blob Support Disabled'
VERSION2_PRINT = 'V2 - Multipage Blob Support Enabled'
def reverse_hexbytes(addr_tmp):
addr = []
reversed_bytes = ''
for i in range(0, len(addr_tmp), 2):
addr.append(addr_tmp[i:i + 2])
reversed_bytes = ''.join(reversed(addr))
return reversed_bytes
""" Class for standard NVS page structure """
class Page(object):
PAGE_PARAMS = {
'max_size': 4096,
'max_old_blob_size': 1984,
'max_new_blob_size': 4000,
'max_entries': 126
}
# Item type codes
U8 = 0x01
I8 = 0x11
U16 = 0x02
I16 = 0x12
U32 = 0x04
I32 = 0x14
U64 = 0x08
I64 = 0x18
SZ = 0x21
BLOB = 0x41
BLOB_DATA = 0x42
BLOB_IDX = 0x48
# Few Page constants
HEADER_SIZE = 32
BITMAPARRAY_OFFSET = 32
BITMAPARRAY_SIZE_IN_BYTES = 32
FIRST_ENTRY_OFFSET = 64
SINGLE_ENTRY_SIZE = 32
CHUNK_ANY = 0xFF
ACTIVE = 0xFFFFFFFE
FULL = 0xFFFFFFFC
VERSION1 = 0xFF
VERSION2 = 0xFE
def __init__(self, page_num, version, is_rsrv_page=False):
self.entry_num = 0
self.bitmap_array = array.array('B')
self.version = version
self.page_buf = bytearray(b'\xff') * Page.PAGE_PARAMS['max_size']
if not is_rsrv_page:
self.bitmap_array = self.create_bitmap_array()
self.set_header(page_num, version)
def set_header(self, page_num, version):
# set page state to active
page_header = bytearray(b'\xff') * 32
page_state_active_seq = Page.ACTIVE
struct.pack_into('<I', page_header, 0, page_state_active_seq)
# set page sequence number
struct.pack_into('<I', page_header, 4, page_num)
# set version
if version == Page.VERSION2:
page_header[8] = Page.VERSION2
elif version == Page.VERSION1:
page_header[8] = Page.VERSION1
# set header's CRC
crc_data = bytes(page_header[4:28])
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', page_header, 28, crc & 0xFFFFFFFF)
self.page_buf[0:len(page_header)] = page_header
def create_bitmap_array(self):
bitarray = array.array('B')
charsize = 32 # bitmaparray has 256 bits, hence 32 bytes
fill = 255 # Fill all 8 bits with 1's
bitarray.extend((fill,) * charsize)
return bitarray
def write_bitmaparray(self):
bitnum = self.entry_num * 2
byte_idx = bitnum // 8 # Find byte index in the array
bit_offset = bitnum & 7 # Find bit offset in given byte index
mask = ~(1 << bit_offset)
self.bitmap_array[byte_idx] &= mask
start_idx = Page.BITMAPARRAY_OFFSET
end_idx = Page.BITMAPARRAY_OFFSET + Page.BITMAPARRAY_SIZE_IN_BYTES
self.page_buf[start_idx:end_idx] = self.bitmap_array
def write_entry_to_buf(self, data, entrycount,nvs_obj):
data_offset = Page.FIRST_ENTRY_OFFSET + (Page.SINGLE_ENTRY_SIZE * self.entry_num)
start_idx = data_offset
end_idx = data_offset + len(data)
self.page_buf[start_idx:end_idx] = data
# Set bitmap array for entries in current page
for i in range(0, entrycount):
self.write_bitmaparray()
self.entry_num += 1
def set_crc_header(self, entry_struct):
crc_data = bytearray(b'28')
crc_data[0:4] = entry_struct[0:4]
crc_data[4:28] = entry_struct[8:32]
crc_data = bytes(crc_data)
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 4, crc & 0xFFFFFFFF)
return entry_struct
def write_varlen_binary_data(self, entry_struct, ns_index, key, data, data_size, total_entry_count, encoding, nvs_obj):
chunk_start = 0
chunk_count = 0
chunk_index = Page.CHUNK_ANY
offset = 0
remaining_size = data_size
tailroom = None
while True:
chunk_size = 0
# Get the size available in current page
tailroom = (Page.PAGE_PARAMS['max_entries'] - self.entry_num - 1) * Page.SINGLE_ENTRY_SIZE
assert tailroom >= 0, 'Page overflow!!'
# Split the binary data into two and store a chunk of available size onto curr page
if tailroom < remaining_size:
chunk_size = tailroom
else:
chunk_size = remaining_size
remaining_size = remaining_size - chunk_size
# Change type of data to BLOB_DATA
entry_struct[1] = Page.BLOB_DATA
# Calculate no. of entries data chunk will require
datachunk_rounded_size = (chunk_size + 31) & ~31
datachunk_entry_count = datachunk_rounded_size // 32
datachunk_total_entry_count = datachunk_entry_count + 1 # +1 for the entry header
# Set Span
entry_struct[2] = datachunk_total_entry_count
# Update the chunkIndex
chunk_index = chunk_start + chunk_count
entry_struct[3] = chunk_index
# Set data chunk
data_chunk = data[offset:offset + chunk_size]
# Compute CRC of data chunk
struct.pack_into('<H', entry_struct, 24, chunk_size)
if type(data) != bytes:
data_chunk = bytes(data_chunk, encoding='utf8')
crc = zlib.crc32(data_chunk, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 28, crc & 0xFFFFFFFF)
# compute crc of entry header
entry_struct = self.set_crc_header(entry_struct)
# write entry header
self.write_entry_to_buf(entry_struct, 1,nvs_obj)
# write actual data
self.write_entry_to_buf(data_chunk, datachunk_entry_count,nvs_obj)
chunk_count = chunk_count + 1
if remaining_size or (tailroom - chunk_size) < Page.SINGLE_ENTRY_SIZE:
nvs_obj.create_new_page()
self = nvs_obj.cur_page
offset = offset + chunk_size
# All chunks are stored, now store the index
if not remaining_size:
# Initialise data field to 0xff
data_array = bytearray(b'\xff') * 8
entry_struct[24:32] = data_array
# change type of data to BLOB_IDX
entry_struct[1] = Page.BLOB_IDX
# Set Span
entry_struct[2] = 1
# Update the chunkIndex
chunk_index = Page.CHUNK_ANY
entry_struct[3] = chunk_index
struct.pack_into('<I', entry_struct, 24, data_size)
entry_struct[28] = chunk_count
entry_struct[29] = chunk_start
# compute crc of entry header
entry_struct = self.set_crc_header(entry_struct)
# write last entry
self.write_entry_to_buf(entry_struct, 1,nvs_obj)
break
return entry_struct
def write_single_page_entry(self, entry_struct, data, datalen, data_entry_count, nvs_obj):
# compute CRC of data
struct.pack_into('<H', entry_struct, 24, datalen)
if type(data) != bytes:
data = bytes(data, encoding='utf8')
crc = zlib.crc32(data, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 28, crc & 0xFFFFFFFF)
# compute crc of entry header
entry_struct = self.set_crc_header(entry_struct)
# write entry header
self.write_entry_to_buf(entry_struct, 1, nvs_obj)
# write actual data
self.write_entry_to_buf(data, data_entry_count, nvs_obj)
"""
Low-level function to write variable length data into page buffer. Data should be formatted
according to encoding specified.
"""
def write_varlen_data(self, key, data, encoding, ns_index,nvs_obj):
# Set size of data
datalen = len(data)
if datalen > Page.PAGE_PARAMS['max_old_blob_size']:
if self.version == Page.VERSION1:
raise InputError(' Input File: Size (%d) exceeds max allowed length `%s` bytes for key `%s`.'
% (datalen, Page.PAGE_PARAMS['max_old_blob_size'], key))
else:
if encoding == 'string':
raise InputError(' Input File: Size (%d) exceeds max allowed length `%s` bytes for key `%s`.'
% (datalen, Page.PAGE_PARAMS['max_old_blob_size'], key))
# Calculate no. of entries data will require
rounded_size = (datalen + 31) & ~31
data_entry_count = rounded_size // 32
total_entry_count = data_entry_count + 1 # +1 for the entry header
# Check if page is already full and new page is needed to be created right away
if self.entry_num >= Page.PAGE_PARAMS['max_entries']:
raise PageFullError()
elif (self.entry_num + total_entry_count) >= Page.PAGE_PARAMS['max_entries']:
if not (self.version == Page.VERSION2 and encoding in ['hex2bin', 'binary', 'base64']):
raise PageFullError()
# Entry header
entry_struct = bytearray(b'\xff') * 32
# Set Namespace Index
entry_struct[0] = ns_index
# Set Span
if self.version == Page.VERSION2:
if encoding == 'string':
entry_struct[2] = data_entry_count + 1
# Set Chunk Index
chunk_index = Page.CHUNK_ANY
entry_struct[3] = chunk_index
else:
entry_struct[2] = data_entry_count + 1
# set key
key_array = b'\x00' * 16
entry_struct[8:24] = key_array
entry_struct[8:8 + len(key)] = key.encode()
# set Type
if encoding == 'string':
entry_struct[1] = Page.SZ
elif encoding in ['hex2bin', 'binary', 'base64']:
entry_struct[1] = Page.BLOB
if self.version == Page.VERSION2 and (encoding in ['hex2bin', 'binary', 'base64']):
entry_struct = self.write_varlen_binary_data(entry_struct,ns_index,key,data,
datalen,total_entry_count, encoding, nvs_obj)
else:
self.write_single_page_entry(entry_struct, data, datalen, data_entry_count, nvs_obj)
""" Low-level function to write data of primitive type into page buffer. """
def write_primitive_data(self, key, data, encoding, ns_index,nvs_obj):
# Check if entry exceeds max number of entries allowed per page
if self.entry_num >= Page.PAGE_PARAMS['max_entries']:
raise PageFullError()
entry_struct = bytearray(b'\xff') * 32
entry_struct[0] = ns_index # namespace index
entry_struct[2] = 0x01 # Span
chunk_index = Page.CHUNK_ANY
entry_struct[3] = chunk_index
# write key
key_array = b'\x00' * 16
entry_struct[8:24] = key_array
entry_struct[8:8 + len(key)] = key.encode()
if encoding == 'u8':
entry_struct[1] = Page.U8
struct.pack_into('<B', entry_struct, 24, data)
elif encoding == 'i8':
entry_struct[1] = Page.I8
struct.pack_into('<b', entry_struct, 24, data)
elif encoding == 'u16':
entry_struct[1] = Page.U16
struct.pack_into('<H', entry_struct, 24, data)
elif encoding == 'i16':
entry_struct[1] = Page.I16
struct.pack_into('<h', entry_struct, 24, data)
elif encoding == 'u32':
entry_struct[1] = Page.U32
struct.pack_into('<I', entry_struct, 24, data)
elif encoding == 'i32':
entry_struct[1] = Page.I32
struct.pack_into('<i', entry_struct, 24, data)
elif encoding == 'u64':
entry_struct[1] = Page.U64
struct.pack_into('<Q', entry_struct, 24, data)
elif encoding == 'i64':
entry_struct[1] = Page.I64
struct.pack_into('<q', entry_struct, 24, data)
# Compute CRC
crc_data = bytearray(b'28')
crc_data[0:4] = entry_struct[0:4]
crc_data[4:28] = entry_struct[8:32]
crc_data = bytes(crc_data)
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 4, crc & 0xFFFFFFFF)
# write to file
self.write_entry_to_buf(entry_struct, 1,nvs_obj)
""" Get page buffer data of a given page """
def get_data(self):
return self.page_buf
"""
NVS class encapsulates all NVS specific operations to create a binary with given key-value pairs.
Binary can later be flashed onto device via a flashing utility.
"""
class NVS(object):
def __init__(self, fout, input_size, version, encrypt=False, key_input=None):
self.size = input_size
self.encrypt = encrypt
self.encr_key = None
self.namespace_idx = 0
self.page_num = -1
self.pages = []
self.version = version
self.fout = fout
if self.encrypt:
self.encr_key = key_input
self.cur_page = self.create_new_page(version)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None and exc_value is None:
# Create pages for remaining available size
while True:
try:
self.create_new_page()
except InsufficientSizeError:
self.size = None
# Creating the last reserved page
self.create_new_page(is_rsrv_page=True)
break
result = self.get_binary_data()
self.fout.write(result)
def create_new_page(self, version=None, is_rsrv_page=False):
# Set previous page state to FULL before creating new page
if self.pages:
curr_page_state = struct.unpack('<I', self.cur_page.page_buf[0:4])[0]
if curr_page_state == Page.ACTIVE:
page_state_full_seq = Page.FULL
struct.pack_into('<I', self.cur_page.page_buf, 0, page_state_full_seq)
# Set version for NVS binary generated
version = self.version
# Update available size as each page is created
if self.size == 0:
raise InsufficientSizeError('Error: Size parameter is less than the size of data in csv.Please increase size.')
if not is_rsrv_page:
self.size = self.size - Page.PAGE_PARAMS['max_size']
self.page_num += 1
# Set version for each page and page header
new_page = Page(self.page_num, version, is_rsrv_page)
self.pages.append(new_page)
self.cur_page = new_page
return new_page
"""
Write namespace entry and subsequently increase namespace count so that all upcoming entries
will be mapped to a new namespace.
"""
def write_namespace(self, key):
self.namespace_idx += 1
try:
self.cur_page.write_primitive_data(key, self.namespace_idx, 'u8', 0,self)
except PageFullError:
new_page = self.create_new_page()
new_page.write_primitive_data(key, self.namespace_idx, 'u8', 0,self)
"""
Write key-value pair. Function accepts value in the form of ascii character and converts
it into appropriate format before calling Page class's functions to write entry into NVS format.
Function handles PageFullError and creates a new page and re-invokes the function on a new page.
We don't have to guard re-invocation with try-except since no entry can span multiple pages.
"""
def write_entry(self, key, value, encoding):
if encoding == 'hex2bin':
value = value.strip()
if len(value) % 2 != 0:
raise InputError('%s: Invalid data length. Should be multiple of 2.' % key)
value = binascii.a2b_hex(value)
if encoding == 'base64':
value = binascii.a2b_base64(value)
if encoding == 'string':
if type(value) == bytes:
value = value.decode()
value += '\0'
encoding = encoding.lower()
varlen_encodings = ['string', 'binary', 'hex2bin', 'base64']
primitive_encodings = ['u8', 'i8', 'u16', 'i16', 'u32', 'i32', 'u64', 'i64']
if encoding in varlen_encodings:
try:
self.cur_page.write_varlen_data(key, value, encoding, self.namespace_idx,self)
except PageFullError:
new_page = self.create_new_page()
new_page.write_varlen_data(key, value, encoding, self.namespace_idx,self)
elif encoding in primitive_encodings:
try:
self.cur_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self)
except PageFullError:
new_page = self.create_new_page()
new_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self)
else:
raise InputError('%s: Unsupported encoding' % encoding)
""" Return accumulated data of all pages """
def get_binary_data(self):
data = bytearray()
for page in self.pages:
data += page.get_data()
return data
class PageFullError(RuntimeError):
"""
Represents error when current page doesn't have sufficient entries left
to accommodate current request
"""
def __init__(self):
super(PageFullError, self).__init__()
class InputError(RuntimeError):
"""
Represents error on the input
"""
def __init__(self, e):
print('\nError:')
super(InputError, self).__init__(e)
class InsufficientSizeError(RuntimeError):
"""
Represents error when NVS Partition size given is insufficient
to accomodate the data in the given csv file
"""
def __init__(self, e):
super(InsufficientSizeError, self).__init__(e)
def nvs_open(result_obj, input_size, version=None, is_encrypt=False, key=None):
""" Wrapper to create and NVS class object. This object can later be used to set key-value pairs
:param result_obj: File/Stream object to dump resultant binary. If data is to be dumped into memory, one way is to use BytesIO object
:param input_size: Size of Partition
:return: NVS class instance
"""
return NVS(result_obj, input_size, version, encrypt=is_encrypt, key_input=key)
def write_entry(nvs_instance, key, datatype, encoding, value):
""" Wrapper to set key-value pair in NVS format
:param nvs_instance: Instance of an NVS class returned by nvs_open()
:param key: Key of the data
:param datatype: Data type. Valid values are "file", "data" and "namespace"
:param encoding: Data encoding. Valid values are "u8", "i8", "u16", "i16", "u32", "i32", "u64", "i64", "string", "binary", "hex2bin" and "base64"
:param value: Data value in ascii encoded string format for "data" datatype and filepath for "file" datatype
:return: None
"""
if datatype == 'file':
abs_file_path = value
if os.path.isabs(value) is False:
script_dir = os.getcwd()
abs_file_path = os.path.join(script_dir, value)
with open(abs_file_path, 'rb') as f:
value = f.read()
if datatype == 'namespace':
nvs_instance.write_namespace(key)
else:
nvs_instance.write_entry(key, value, encoding)
def nvs_close(nvs_instance):
""" Wrapper to finish writing to NVS and write data to file/stream object provided to nvs_open method
:param nvs_instance: Instance of NVS class returned by nvs_open()
:return: None
"""
nvs_instance.__exit__(None, None, None)
def check_size(size):
'''
Checks for input partition size
:param size: Input partition size
'''
try:
# Set size
input_size = int(size, 0)
if input_size % 4096 != 0:
sys.exit('Size of partition must be multiple of 4096')
# Update size as a page needs to be reserved of size 4KB
input_size = input_size - Page.PAGE_PARAMS['max_size']
if input_size < (2 * Page.PAGE_PARAMS['max_size']):
sys.exit('Minimum NVS partition size needed is 0x3000 bytes.')
return input_size
except Exception as e:
print(e)
sys.exit(0)
def set_target_filepath(outdir, filepath):
'''
Set target file path: <outdir>/<filepath>
:param outdir: Target output dir to store files
:param filepath: Path of target file
'''
bin_ext = '.bin'
# Expand if tilde(~) provided in path
outdir = os.path.expanduser(outdir)
if filepath:
key_file_name, ext = os.path.splitext(filepath)
if not ext:
filepath = key_file_name + bin_ext
elif bin_ext not in ext:
sys.exit('Error: `%s`. Only `%s` extension allowed.' % (filepath, bin_ext))
# Create dir if does not exist
if not (os.path.isdir(outdir)):
distutils.dir_util.mkpath(outdir)
filedir, filename = os.path.split(filepath)
filedir = os.path.join(outdir,filedir,'')
if filedir and not os.path.isdir(filedir):
distutils.dir_util.mkpath(filedir)
if os.path.isabs(filepath):
if not outdir == os.getcwd():
print('\nWarning: `%s` \n\t==> absolute path given so outdir is ignored for this file.' % filepath)
# Set to empty as outdir is ignored here
outdir = ''
# Set full path - outdir + filename
filepath = os.path.join(outdir, '') + filepath
return outdir, filepath
def generate(args, is_encr_enabled=False, encr_key=None):
'''
Generate NVS Partition
:param args: Command line arguments given
:param is_encr_enabled: Encryption enabled/disabled
:param encr_key: Key to encrypt NVS partition
'''
is_dir_new = False
bin_ext = '.bin'
input_size = check_size(args.size)
if args.version == 1:
args.version = Page.VERSION1
elif args.version == 2:
args.version = Page.VERSION2
# Check if key file has .bin extension
filename, ext = os.path.splitext(args.output)
if bin_ext not in ext:
sys.exit('Error: `%s`. Only `.bin` extension allowed.' % args.output)
args.outdir, args.output = set_target_filepath(args.outdir, args.output)
input_file = open(args.input, 'rt', encoding='utf8')
output_file = open(args.output, 'wb')
with open(args.input, 'rt', encoding='utf8') as input_file,\
open(args.output, 'wb') as output_file,\
nvs_open(output_file, input_size, args.version, is_encrypt=is_encr_enabled, key=encr_key) as nvs_obj:
if nvs_obj.version == Page.VERSION1:
version_set = VERSION1_PRINT
else:
version_set = VERSION2_PRINT
print('\nCreating NVS binary with version:', version_set)
line = input_file.readline().strip()
# Comments are skipped
while line.startswith('#'):
line = input_file.readline().strip()
if not isinstance(line, str):
line = line.encode('utf-8')
header = line.split(',')
while True:
line = input_file.readline().strip()
if not isinstance(line, str):
line = line.encode('utf-8')
value = line.split(',')
if len(value) == 1 and '' in value:
break
data = dict(zip_longest(header, value))
try:
# Check key length
if len(data['key']) > 15:
raise InputError('Length of key `{}` should be <= 15 characters.'.format(data['key']))
write_entry(nvs_obj, data['key'], data['type'], data['encoding'], data['value'])
except InputError as e:
print(e)
filedir, filename = os.path.split(args.output)
if filename:
print('\nWarning: NVS binary not created...')
os.remove(args.output)
if is_dir_new and not filedir == os.getcwd():
print('\nWarning: Output dir not created...')
os.rmdir(filedir)
sys.exit(-2)
print('\nCreated NVS binary: ===>', args.output)
def main():
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers()
parser_gen = subparser.add_parser('generate',
help='Generate NVS partition',
formatter_class=argparse.RawTextHelpFormatter)
parser_gen.set_defaults(func=generate)
parser_gen.add_argument('input',
default=None,
help='Path to CSV file to parse')
parser_gen.add_argument('output',
default=None,
help='Path to output NVS binary file')
parser_gen.add_argument('size',
default=None,
help='Size of NVS partition in bytes\
\n(must be multiple of 4096)')
parser_gen.add_argument('--version',
choices=[1,2],
default=2,
type=int,
help='''Set multipage blob version.\
\nVersion 1 - Multipage blob support disabled.\
\nVersion 2 - Multipage blob support enabled.\
\nDefault: Version 2''')
parser_gen.add_argument('--outdir',
default=os.getcwd(),
help='Output directory to store files created\
\n(Default: current directory)')
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
main()
|
PypiClean
|
/bio_gopher-1.0.3-py3-none-any.whl/gopher/saliency_embed.py
|
import logomaker
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import subprocess
import tensorflow as tf
import umap.umap_ as umap
from gopher import utils
from tensorflow import keras
def select(embeddings, lower_lim_1=None,
upper_lim_1=None, lower_lim_2=None,
upper_lim_2=None, idr=''):
'''
This funciton selects embedding points of a UMAP for downstream tasks
:param embeddings: UMAP 2D embeddings in pandas dataframe
:param lower_lim_1: X axis lower lim
:param upper_lim_1: X axis upper lim
:param lower_lim_2: Y axis lower lim
:param upper_lim_2: Y axis upper lim
:param idr: if 'y' filter only IDR peaks if 'n' only non-IDR peaks (if not set to anything take all)
:return: mask filter
'''
mask = np.zeros((embeddings['UMAP 1'].shape[0])) + 1
if lower_lim_1:
mask *= (embeddings['UMAP 1'] > lower_lim_1).values
if upper_lim_1:
mask *= (embeddings['UMAP 1'] < upper_lim_1).values
if lower_lim_2:
mask *= (embeddings['UMAP 2'] > lower_lim_2).values
if upper_lim_2:
mask *= (embeddings['UMAP 2'] < upper_lim_2).values
if idr == 'y':
print('Choosing only IDR')
mask *= (embeddings['IDR'] == True).values
if idr == 'n':
print('Choosing only non IDR')
mask *= (embeddings['IDR'] != True).values
return mask.astype(bool)
def get_cell_line_overlaps(file_prefix, bedfile1, bedfile2, fraction_overlap=0.5):
"""
This function filters overlapping bed ranges and returns start coordinates of points that have idr overlaps. Useful for "annotating" whole chromosome chunks as idr or non-idr
:param file_prefix: output csv file prefix
:param bedfile1: first bedfile (the ranges of which will be annotated)
:param bedfile2: second bedfile that contains the idr regions
:param fraction_overlap: minimum fraction of overlap needed to call a sequence idr
:return: vector of starting positions of idr sequences in the test set
"""
cmd = 'bedtools intersect -f {} -wa -a {} -b {} | uniq > {}_IDR.bed'.format(fraction_overlap, bedfile1, bedfile2,
file_prefix)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
_ = process.communicate()
out_filename = '{}_IDR.bed'.format(file_prefix)
df = pd.read_csv(out_filename, sep='\t', header=None)
idr_starts = df.iloc[:, 1].values
os.remove(out_filename)
return idr_starts
def label_idr_peaks(C, cell_line, bedfile1, bedfile2, fraction_overlap=0.5):
"""
Function to classify each coordinate as idr or non-idr
:param C: iterable of coordinates in the format saved in the tfr datasets
:param cell_line: cell line to select the corresponding IDR peaks
:return: list of boolean values indicating if peak is present at that coordinate
"""
idr_class = []
file_prefix = 'cell_line_{}'.format(cell_line)
idr_starts = get_cell_line_overlaps(file_prefix, bedfile1, bedfile2, fraction_overlap=fraction_overlap)
idr_class.append([True if int(str(c).strip('\'b').split('_')[1]) in idr_starts else False for c in C])
idr_class = [item for sublist in idr_class for item in sublist]
return idr_class
def get_embeddings(input_features):
"""
This function puts embeddings as a pandas dataframe
:param input_features: intermediate representations
:return: pandas dataframe of embeddings
"""
reducer = umap.UMAP(random_state=28)
embedding = reducer.fit_transform(input_features)
df = pd.DataFrame({'UMAP 1': embedding[:, 1], 'UMAP 2': embedding[:, 0]})
print('Finished embedding in UMAP')
return df
def tomtom_upstream(model_path, filter_layer, seq, output_path, threshold=0.5, pad=3):
model = utils.read_model(model_path, True)[0]
max_filter, counter = filter_max_align_batch(seq, model, layer=filter_layer)
clip_filter = clip_filters(max_filter, threshold=threshold, pad=pad)
meme_generate(clip_filter, output_file=ooutput_path + '.txt')
def meme_generate(W, output_file='meme.txt', prefix='filter'):
"""generate a meme file for a set of filters, W ∈ (N,L,A)"""
# background frequency
nt_freqs = [1. / 4 for i in range(4)]
# open file for writing
f = open(output_file, 'w')
# print intro material
f.write('MEME version 4\n')
f.write('\n')
f.write('ALPHABET= ACGT\n')
f.write('\n')
f.write('Background letter frequencies:\n')
f.write('A %.4f C %.4f G %.4f T %.4f \n' % tuple(nt_freqs))
f.write('\n')
for j, pwm in enumerate(W):
L, A = pwm.shape
f.write('MOTIF %s%d \n' % (prefix, j))
f.write('letter-probability matrix: alength= 4 w= %d nsites= %d \n' % (L, L))
for i in range(L):
f.write('%.4f %.4f %.4f %.4f \n' % tuple(pwm[i, :]))
f.write('\n')
f.close()
def clip_filters(W, threshold=0.5, pad=3):
"""clip uninformative parts of conv filters"""
W_clipped = []
for w in W:
L, A = w.shape
entropy = np.log2(4) + np.sum(w * np.log2(w + 1e-7), axis=1)
index = np.where(entropy > threshold)[0]
if index.any():
start = np.maximum(np.min(index) - pad, 0)
end = np.minimum(np.max(index) + pad + 1, L)
W_clipped.append(w[start:end, :])
else:
W_clipped.append(w)
return W_clipped
def filter_max_align_batch(X, model, layer=3, window=24, threshold=0.5, batch_size=1024, max_align=1e4, verbose=1):
"""get alignment of filter activations for visualization"""
if verbose:
print("Calculating filter PPM based on activation-based alignments")
N, L, A = X.element_spec.shape
num_filters = model.layers[layer].output.shape[2]
# Set the left and right window sizes
window_left = int(window / 2)
window_right = window - window_left
# get feature maps of 1st convolutional layer after activation
intermediate = keras.Model(inputs=model.inputs, outputs=model.layers[layer].output)
# dataset = tf.data.Dataset.from_tensor_slices(X)
# batches = X.batch(batch_size)
batches = X
# loop over batches to capture MAX activation
if verbose:
print(' Calculating MAX activation')
MAX = np.zeros(num_filters)
for x in batches:
# get feature map for mini-batch
fmap = intermediate.predict(x)
# loop over each filter to find "active" positions
for f in range(num_filters):
MAX[f] = np.maximum(MAX[f], np.max(fmap[:, :, f]))
# loop over each filter to find "active" positions
W = []
counts = []
for f in range(num_filters):
if verbose:
print(" processing %d out of %d filters" % (f + 1, num_filters))
status = 0
# compile sub-model to get feature map
intermediate = keras.Model(inputs=model.inputs, outputs=model.layers[layer].output[:, :, f])
# loop over each batch
# dataset = tf.data.Dataset.from_tensor_slices(X)
seq_align_sum = np.zeros((window, A)) # running sum
counter = 0 # counts the number of sequences in alignment
status = 1 # monitors whether depth of alignment has reached max_align
for x in X:
if status:
# get feature map for a batch sequences
fmaps = intermediate.predict(x)
# Find regions above threshold
for data_index, fmap in enumerate(fmaps):
if status:
pos_index = np.where(fmap > MAX[f] * threshold)[0]
# Make a sequence alignment centered about each activation (above threshold)
for i in range(len(pos_index)):
if status:
# Determine position of window about each filter activation
start_window = pos_index[i] - window_left
end_window = pos_index[i] + window_right
# Check to make sure positions are valid
if (start_window > 0) & (end_window < L):
seq_align_sum += x[data_index, start_window:end_window, :].numpy()
counter += 1
if counter > max_align:
status = 0
else:
break
else:
break
else:
if verbose:
print(" alignment has reached max depth for all filters")
break
# calculate position probability matrix of filter
if verbose:
print(" %d sub-sequences above threshold" % (counter))
if counter > 0:
W.append(seq_align_sum / counter)
else:
W.append(np.ones((window, A)) / A)
counts.append(counter)
return np.array(W), np.array(counts)
class Explainer():
"""wrapper class for attribution maps"""
def __init__(self, model, class_index=None, func=tf.math.reduce_mean, binary=False):
self.model = model
self.class_index = class_index
self.func = func
self.binary = binary
def saliency_maps(self, X, batch_size=128):
return function_batch(X, saliency_map, batch_size, model=self.model,
class_index=self.class_index, func=self.func,
binary=self.binary)
def function_batch(X, fun, batch_size=128, **kwargs):
""" run a function in batches """
dataset = tf.data.Dataset.from_tensor_slices(X)
outputs = []
for x in dataset.batch(batch_size):
f = fun(x, **kwargs)
outputs.append(f)
return np.concatenate(outputs, axis=0)
def grad_times_input_to_df(x, grad, alphabet='ACGT'):
"""generate pandas dataframe for saliency plot
based on grad x inputs """
x_index = np.argmax(np.squeeze(x), axis=1)
grad = np.squeeze(grad)
L, A = grad.shape
seq = ''
saliency = np.zeros((L))
for i in range(L):
seq += alphabet[x_index[i]]
saliency[i] = grad[i,x_index[i]]
# create saliency matrix
saliency_df = logomaker.saliency_to_matrix(seq=seq, values=saliency)
return saliency_df
@tf.function
def saliency_map(X, model, class_index=None, func=tf.math.reduce_mean, binary=False):
"""fast function to generate saliency maps"""
if not tf.is_tensor(X):
X = tf.Variable(X)
with tf.GradientTape() as tape:
tape.watch(X)
if binary:
outputs = model(X)[:, class_index]
else:
outputs = tf.math.reduce_mean(model(X)[:, :, class_index], axis=1)
return tape.gradient(outputs, X)
def plot_mean_coverages(data_and_labels, ax):
"""
Plot average coverage and std as a shade
:param data_and_labels: iterable of pairs of data points and label
:param ax: figure axis
:return:
"""
for i, (data, label, p) in enumerate(data_and_labels):
x = np.arange(data.shape[1])
est = np.mean(data, axis=0)
sd = np.std(data, axis=0)
cis = (est - sd, est + sd)
ax.fill_between(x, cis[0], cis[1], alpha=0.08, color=p)
ax.plot(x, est, p, label=label)
def plot_attribution_map(saliency_df, ax=None, figsize=(20,1)):
"""plot an attribution map using logomaker"""
logomaker.Logo(saliency_df, figsize=figsize, ax=ax)
if ax is None:
ax = plt.gca()
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticks_position('none')
plt.xticks([])
plt.yticks([])
def plot_saliency_logos_oneplot(saliency_scores, X_sample, window=20,
titles=[],
filename=None):
"""
Function for plotting and saving saliency maps
:param saliency_scores: pre-computed saliency scores
:param X_sample: input sequences
:param window: window around peak saliency to plot
:param titles: title of each subplot
:param filename: filepath where the svg will be saved
:return: None
"""
N, L, A = X_sample.shape
fig, axs = plt.subplots(N, 1, figsize=[20, 2 * N])
for i in range(N):
ax = axs[i]
x_sample = np.expand_dims(X_sample[i], axis=0)
scores = np.expand_dims(saliency_scores[i], axis=0)
# find window to plot saliency maps (about max saliency value)
index = np.argmax(np.max(np.abs(scores), axis=2), axis=1)[0]
if index - window < 0:
start = 0
end = window * 2 + 1
elif index + window > L:
start = L - window * 2 - 1
end = L
else:
start = index - window
end = index + window
saliency_df = grad_times_input_to_df(x_sample[:, start:end, :], scores[:, start:end, :])
plot_attribution_map(saliency_df, ax, figsize=(20, 1))
if len(titles):
ax.set_title(titles[i])
plt.tight_layout()
if filename:
assert not os.path.isfile(filename), 'File exists!'
plt.savefig(filename, format='svg')
#------------------------------------------------------------------------------
def smoothgrad(x, model, num_samples=50, mean=0.0, stddev=0.1,
class_index=None, func=tf.math.reduce_mean):
_,L,A = x.shape
x_noise = tf.tile(x, (num_samples,1,1)) + tf.random.normal((num_samples,L,A), mean, stddev)
grad = saliency_map(x_noise, model, class_index=class_index, func=func)
return tf.reduce_mean(grad, axis=0, keepdims=True)
#------------------------------------------------------------------------------
def integrated_grad(x, model, baseline, num_steps=25,
class_index=None, func=tf.math.reduce_mean):
def integral_approximation(gradients):
# riemann_trapezoidal
grads = (gradients[:-1] + gradients[1:]) / tf.constant(2.0)
integrated_gradients = tf.math.reduce_mean(grads, axis=0)
return integrated_gradients
def interpolate_data(baseline, x, steps):
steps_x = steps[:, tf.newaxis, tf.newaxis]
delta = x - baseline
x = baseline + steps_x * delta
return x
steps = tf.linspace(start=0.0, stop=1.0, num=num_steps+1)
x_interp = interpolate_data(baseline, x, steps)
grad = saliency_map(x_interp, model, class_index=class_index, func=func)
avg_grad = integral_approximation(grad)
avg_grad= np.expand_dims(avg_grad, axis=0)
return avg_grad
#------------------------------------------------------------------------------
def expected_integrated_grad(x, model, baselines, num_steps=25,
class_index=None, func=tf.math.reduce_mean):
"""average integrated gradients across different backgrounds"""
grads = []
for baseline in baselines:
grads.append(integrated_grad(x, model, baseline, num_steps=num_steps,
class_index=class_index, func=tf.math.reduce_mean))
return np.mean(np.array(grads), axis=0)
#------------------------------------------------------------------------------
def mutagenesis(x, model, class_index=None):
""" in silico mutagenesis analysis for a given sequence"""
def generate_mutagenesis(x):
_,L,A = x.shape
x_mut = []
for l in range(L):
for a in range(A):
x_new = np.copy(x)
x_new[0,l,:] = 0
x_new[0,l,a] = 1
x_mut.append(x_new)
return np.concatenate(x_mut, axis=0)
def reconstruct_map(predictions):
_,L,A = x.shape
mut_score = np.zeros((1,L,A))
k = 0
for l in range(L):
for a in range(A):
mut_score[0,l,a] = predictions[k]
k += 1
return mut_score
def get_score(x, model, class_index):
score = model.predict(x)
if class_index == None:
score = np.sqrt(np.sum(score**2, axis=-1, keepdims=True))
else:
score = score[:,class_index]
return score
# generate mutagenized sequences
x_mut = generate_mutagenesis(x)
# get baseline wildtype score
wt_score = get_score(x, model, class_index)
predictions = get_score(x_mut, model, class_index)
# reshape mutagenesis predictiosn
mut_score = reconstruct_map(predictions)
return mut_score - wt_score
|
PypiClean
|
/message_server_xBarbarian-0.6.7-py3-none-any.whl/server/server/main_window.py
|
from PyQt5.QtWidgets import QMainWindow, QAction, qApp, QApplication, QLabel, QTableView
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import QTimer
from server.stat_window import StatWindow
from server.config_window import ConfigWindow
from server.add_user import RegisterUser
from server.remove_user import DelUserDialog
class MainWindow(QMainWindow):
'''Класс - основное окно сервера.'''
def __init__(self, database, server, config):
# Конструктор предка
super().__init__()
# База данных сервера
self.database = database
self.server_thread = server
self.config = config
# Ярлык выхода
self.exitAction = QAction('Выход', self)
self.exitAction.setShortcut('Ctrl+Q')
self.exitAction.triggered.connect(qApp.quit)
# Кнопка обновить список клиентов
self.refresh_button = QAction('Обновить список', self)
# Кнопка настроек сервера
self.config_btn = QAction('Настройки сервера', self)
# Кнопка регистрации пользователя
self.register_btn = QAction('Регистрация пользователя', self)
# Кнопка удаления пользователя
self.remove_btn = QAction('Удаление пользователя', self)
# Кнопка вывести историю сообщений
self.show_history_button = QAction('История клиентов', self)
# Статусбар
self.statusBar()
self.statusBar().showMessage('Server Working')
# Тулбар
self.toolbar = self.addToolBar('MainBar')
self.toolbar.addAction(self.exitAction)
self.toolbar.addAction(self.refresh_button)
self.toolbar.addAction(self.show_history_button)
self.toolbar.addAction(self.config_btn)
self.toolbar.addAction(self.register_btn)
self.toolbar.addAction(self.remove_btn)
# Настройки геометрии основного окна
# Поскольку работать с динамическими размерами мы не умеем, и мало
# времени на изучение, размер окна фиксирован.
self.setFixedSize(800, 600)
self.setWindowTitle('Messaging Server alpha release')
# Надпись о том, что ниже список подключённых клиентов
self.label = QLabel('Список подключённых клиентов:', self)
self.label.setFixedSize(240, 15)
self.label.move(10, 25)
# Окно со списком подключённых клиентов.
self.active_clients_table = QTableView(self)
self.active_clients_table.move(10, 45)
self.active_clients_table.setFixedSize(780, 400)
# Таймер, обновляющий список клиентов 1 раз в секунду
self.timer = QTimer()
self.timer.timeout.connect(self.create_users_model)
self.timer.start(1000)
# Связываем кнопки с процедурами
self.refresh_button.triggered.connect(self.create_users_model)
self.show_history_button.triggered.connect(self.show_statistics)
self.config_btn.triggered.connect(self.server_config)
self.register_btn.triggered.connect(self.reg_user)
self.remove_btn.triggered.connect(self.rem_user)
# Последним параметром отображаем окно.
self.show()
def create_users_model(self):
'''Метод заполняющий таблицу активных пользователей.'''
list_users = self.database.active_users_list()
list = QStandardItemModel()
list.setHorizontalHeaderLabels(
['Имя Клиента', 'IP Адрес', 'Порт', 'Время подключения'])
for row in list_users:
user, ip, port, time = row
user = QStandardItem(user)
user.setEditable(False)
ip = QStandardItem(ip)
ip.setEditable(False)
port = QStandardItem(str(port))
port.setEditable(False)
# Уберём милисекунды из строки времени, т.к. такая точность не
# требуется.
time = QStandardItem(str(time.replace(microsecond=0)))
time.setEditable(False)
list.appendRow([user, ip, port, time])
self.active_clients_table.setModel(list)
self.active_clients_table.resizeColumnsToContents()
self.active_clients_table.resizeRowsToContents()
def show_statistics(self):
'''Метод создающий окно со статистикой клиентов.'''
global stat_window
stat_window = StatWindow(self.database)
stat_window.show()
def server_config(self):
'''Метод создающий окно с настройками сервера.'''
global config_window
# Создаём окно и заносим в него текущие параметры
config_window = ConfigWindow(self.config)
def reg_user(self):
'''Метод создающий окно регистрации пользователя.'''
global reg_window
reg_window = RegisterUser(self.database, self.server_thread)
reg_window.show()
def rem_user(self):
'''Метод создающий окно удаления пользователя.'''
global rem_window
rem_window = DelUserDialog(self.database, self.server_thread)
rem_window.show()
|
PypiClean
|
/OBITools-1.2.13.tar.gz/OBITools-1.2.13/distutils.ext/obidistutils/serenity/pip/_vendor/distlib/metadata.py
|
from __future__ import unicode_literals
import codecs
from email import message_from_file
import json
import logging
import re
from . import DistlibException, __version__
from .compat import StringIO, string_types, text_type
from .markers import interpret
from .util import extract_by_key, get_extras
from .version import get_scheme, PEP426_VERSION_RE
logger = logging.getLogger(__name__)
class MetadataMissingError(DistlibException):
"""A required metadata is missing"""
class MetadataConflictError(DistlibException):
"""Attempt to read or write metadata fields that are conflictual."""
class MetadataUnrecognizedVersionError(DistlibException):
"""Unknown metadata version number."""
class MetadataInvalidError(DistlibException):
"""A metadata value is invalid"""
# public API of this module
__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
# Encoding used for the PKG-INFO files
PKG_INFO_ENCODING = 'utf-8'
# preferred version. Hopefully will be changed
# to 1.2 once PEP 345 is supported everywhere
PKG_INFO_PREFERRED_VERSION = '1.1'
_LINE_PREFIX = re.compile('\n \|')
_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License')
_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'License', 'Classifier', 'Download-URL', 'Obsoletes',
'Provides', 'Requires')
_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
'Download-URL')
_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External')
_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
'Obsoletes-Dist', 'Requires-External', 'Maintainer',
'Maintainer-email', 'Project-URL')
_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
'Supported-Platform', 'Summary', 'Description',
'Keywords', 'Home-page', 'Author', 'Author-email',
'Maintainer', 'Maintainer-email', 'License',
'Classifier', 'Download-URL', 'Obsoletes-Dist',
'Project-URL', 'Provides-Dist', 'Requires-Dist',
'Requires-Python', 'Requires-External', 'Private-Version',
'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
'Provides-Extra')
_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
'Setup-Requires-Dist', 'Extension')
_ALL_FIELDS = set()
_ALL_FIELDS.update(_241_FIELDS)
_ALL_FIELDS.update(_314_FIELDS)
_ALL_FIELDS.update(_345_FIELDS)
_ALL_FIELDS.update(_426_FIELDS)
EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
def _version2fieldlist(version):
if version == '1.0':
return _241_FIELDS
elif version == '1.1':
return _314_FIELDS
elif version == '1.2':
return _345_FIELDS
elif version == '2.0':
return _426_FIELDS
raise MetadataUnrecognizedVersionError(version)
def _best_version(fields):
"""Detect the best version depending on the fields used."""
def _has_marker(keys, markers):
for marker in markers:
if marker in keys:
return True
return False
keys = []
for key, value in fields.items():
if value in ([], 'UNKNOWN', None):
continue
keys.append(key)
possible_versions = ['1.0', '1.1', '1.2', '2.0']
# first let's try to see if a field is not part of one of the version
for key in keys:
if key not in _241_FIELDS and '1.0' in possible_versions:
possible_versions.remove('1.0')
if key not in _314_FIELDS and '1.1' in possible_versions:
possible_versions.remove('1.1')
if key not in _345_FIELDS and '1.2' in possible_versions:
possible_versions.remove('1.2')
if key not in _426_FIELDS and '2.0' in possible_versions:
possible_versions.remove('2.0')
# possible_version contains qualified versions
if len(possible_versions) == 1:
return possible_versions[0] # found !
elif len(possible_versions) == 0:
raise MetadataConflictError('Unknown metadata set')
# let's see if one unique marker is found
is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
if int(is_1_1) + int(is_1_2) + int(is_2_0) > 1:
raise MetadataConflictError('You used incompatible 1.1/1.2/2.0 fields')
# we have the choice, 1.0, or 1.2, or 2.0
# - 1.0 has a broken Summary field but works with all tools
# - 1.1 is to avoid
# - 1.2 fixes Summary but has little adoption
# - 2.0 adds more features and is very new
if not is_1_1 and not is_1_2 and not is_2_0:
# we couldn't find any specific marker
if PKG_INFO_PREFERRED_VERSION in possible_versions:
return PKG_INFO_PREFERRED_VERSION
if is_1_1:
return '1.1'
if is_1_2:
return '1.2'
return '2.0'
_ATTR2FIELD = {
'metadata_version': 'Metadata-Version',
'name': 'Name',
'version': 'Version',
'platform': 'Platform',
'supported_platform': 'Supported-Platform',
'summary': 'Summary',
'description': 'Description',
'keywords': 'Keywords',
'home_page': 'Home-page',
'author': 'Author',
'author_email': 'Author-email',
'maintainer': 'Maintainer',
'maintainer_email': 'Maintainer-email',
'license': 'License',
'classifier': 'Classifier',
'download_url': 'Download-URL',
'obsoletes_dist': 'Obsoletes-Dist',
'provides_dist': 'Provides-Dist',
'requires_dist': 'Requires-Dist',
'setup_requires_dist': 'Setup-Requires-Dist',
'requires_python': 'Requires-Python',
'requires_external': 'Requires-External',
'requires': 'Requires',
'provides': 'Provides',
'obsoletes': 'Obsoletes',
'project_url': 'Project-URL',
'private_version': 'Private-Version',
'obsoleted_by': 'Obsoleted-By',
'extension': 'Extension',
'provides_extra': 'Provides-Extra',
}
_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
_VERSIONS_FIELDS = ('Requires-Python',)
_VERSION_FIELDS = ('Version',)
_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
'Requires', 'Provides', 'Obsoletes-Dist',
'Provides-Dist', 'Requires-Dist', 'Requires-External',
'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
'Provides-Extra', 'Extension')
_LISTTUPLEFIELDS = ('Project-URL',)
_ELEMENTSFIELD = ('Keywords',)
_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
_MISSING = object()
_FILESAFE = re.compile('[^A-Za-z0-9.]+')
def _get_name_and_version(name, version, for_filename=False):
"""Return the distribution name with version.
If for_filename is true, return a filename-escaped form."""
if for_filename:
# For both name and version any runs of non-alphanumeric or '.'
# characters are replaced with a single '-'. Additionally any
# spaces in the version string become '.'
name = _FILESAFE.sub('-', name)
version = _FILESAFE.sub('-', version.replace(' ', '.'))
return '%s-%s' % (name, version)
class LegacyMetadata(object):
"""The legacy metadata of a release.
Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can
instantiate the class with one of these arguments (or none):
- *path*, the path to a metadata file
- *fileobj* give a file-like object with metadata as content
- *mapping* is a dict-like object
- *scheme* is a version scheme name
"""
# TODO document the mapping API and UNKNOWN default key
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._fields = {}
self.requires_files = []
self._dependencies = None
self.scheme = scheme
if path is not None:
self.read(path)
elif fileobj is not None:
self.read_file(fileobj)
elif mapping is not None:
self.update(mapping)
self.set_metadata_version()
def set_metadata_version(self):
self._fields['Metadata-Version'] = _best_version(self._fields)
def _write_field(self, fileobj, name, value):
fileobj.write('%s: %s\n' % (name, value))
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, value):
return self.set(name, value)
def __delitem__(self, name):
field_name = self._convert_name(name)
try:
del self._fields[field_name]
except KeyError:
raise KeyError(name)
def __contains__(self, name):
return (name in self._fields or
self._convert_name(name) in self._fields)
def _convert_name(self, name):
if name in _ALL_FIELDS:
return name
name = name.replace('-', '_').lower()
return _ATTR2FIELD.get(name, name)
def _default_value(self, name):
if name in _LISTFIELDS or name in _ELEMENTSFIELD:
return []
return 'UNKNOWN'
def _remove_line_prefix(self, value):
return _LINE_PREFIX.sub('\n', value)
def __getattr__(self, name):
if name in _ATTR2FIELD:
return self[name]
raise AttributeError(name)
#
# Public API
#
# dependencies = property(_get_dependencies, _set_dependencies)
def get_fullname(self, filesafe=False):
"""Return the distribution name with version.
If filesafe is true, return a filename-escaped form."""
return _get_name_and_version(self['Name'], self['Version'], filesafe)
def is_field(self, name):
"""return True if name is a valid metadata key"""
name = self._convert_name(name)
return name in _ALL_FIELDS
def is_multi_field(self, name):
name = self._convert_name(name)
return name in _LISTFIELDS
def read(self, filepath):
"""Read the metadata values from a file path."""
fp = codecs.open(filepath, 'r', encoding='utf-8')
try:
self.read_file(fp)
finally:
fp.close()
def read_file(self, fileob):
"""Read the metadata values from a file object."""
msg = message_from_file(fileob)
self._fields['Metadata-Version'] = msg['metadata-version']
# When reading, get all the fields we can
for field in _ALL_FIELDS:
if field not in msg:
continue
if field in _LISTFIELDS:
# we can have multiple lines
values = msg.get_all(field)
if field in _LISTTUPLEFIELDS and values is not None:
values = [tuple(value.split(',')) for value in values]
self.set(field, values)
else:
# single line
value = msg[field]
if value is not None and value != 'UNKNOWN':
self.set(field, value)
self.set_metadata_version()
def write(self, filepath, skip_unknown=False):
"""Write the metadata fields to filepath."""
fp = codecs.open(filepath, 'w', encoding='utf-8')
try:
self.write_file(fp, skip_unknown)
finally:
fp.close()
def write_file(self, fileobject, skip_unknown=False):
"""Write the PKG-INFO format data to a file object."""
self.set_metadata_version()
for field in _version2fieldlist(self['Metadata-Version']):
values = self.get(field)
if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
continue
if field in _ELEMENTSFIELD:
self._write_field(fileobject, field, ','.join(values))
continue
if field not in _LISTFIELDS:
if field == 'Description':
values = values.replace('\n', '\n |')
values = [values]
if field in _LISTTUPLEFIELDS:
values = [','.join(value) for value in values]
for value in values:
self._write_field(fileobject, field, value)
def update(self, other=None, **kwargs):
"""Set metadata values from the given iterable `other` and kwargs.
Behavior is like `dict.update`: If `other` has a ``keys`` method,
they are looped over and ``self[key]`` is assigned ``other[key]``.
Else, ``other`` is an iterable of ``(key, value)`` iterables.
Keys that don't match a metadata field or that have an empty value are
dropped.
"""
def _set(key, value):
if key in _ATTR2FIELD and value:
self.set(self._convert_name(key), value)
if not other:
# other is None or empty container
pass
elif hasattr(other, 'keys'):
for k in other.keys():
_set(k, other[k])
else:
for k, v in other:
_set(k, v)
if kwargs:
for k, v in kwargs.items():
_set(k, v)
def set(self, name, value):
"""Control then set a metadata field."""
name = self._convert_name(name)
if ((name in _ELEMENTSFIELD or name == 'Platform') and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [v.strip() for v in value.split(',')]
else:
value = []
elif (name in _LISTFIELDS and
not isinstance(value, (list, tuple))):
if isinstance(value, string_types):
value = [value]
else:
value = []
if logger.isEnabledFor(logging.WARNING):
project_name = self['Name']
scheme = get_scheme(self.scheme)
if name in _PREDICATE_FIELDS and value is not None:
for v in value:
# check that the values are valid
if not scheme.is_valid_matcher(v.split(';')[0]):
logger.warning(
'%r: %r is not valid (field %r)',
project_name, v, name)
# FIXME this rejects UNKNOWN, is that right?
elif name in _VERSIONS_FIELDS and value is not None:
if not scheme.is_valid_constraint_list(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
elif name in _VERSION_FIELDS and value is not None:
if not scheme.is_valid_version(value):
logger.warning('%r: %r is not a valid version (field %r)',
project_name, value, name)
if name in _UNICODEFIELDS:
if name == 'Description':
value = self._remove_line_prefix(value)
self._fields[name] = value
def get(self, name, default=_MISSING):
"""Get a metadata field."""
name = self._convert_name(name)
if name not in self._fields:
if default is _MISSING:
default = self._default_value(name)
return default
if name in _UNICODEFIELDS:
value = self._fields[name]
return value
elif name in _LISTFIELDS:
value = self._fields[name]
if value is None:
return []
res = []
for val in value:
if name not in _LISTTUPLEFIELDS:
res.append(val)
else:
# That's for Project-URL
res.append((val[0], val[1]))
return res
elif name in _ELEMENTSFIELD:
value = self._fields[name]
if isinstance(value, string_types):
return value.split(',')
return self._fields[name]
def check(self, strict=False):
"""Check if the metadata is compliant. If strict is True then raise if
no Name or Version are provided"""
self.set_metadata_version()
# XXX should check the versions (if the file was loaded)
missing, warnings = [], []
for attr in ('Name', 'Version'): # required by PEP 345
if attr not in self:
missing.append(attr)
if strict and missing != []:
msg = 'missing required metadata: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for attr in ('Home-page', 'Author'):
if attr not in self:
missing.append(attr)
# checking metadata 1.2 (XXX needs to check 1.1, 1.0)
if self['Metadata-Version'] != '1.2':
return missing, warnings
scheme = get_scheme(self.scheme)
def are_valid_constraints(value):
for v in value:
if not scheme.is_valid_matcher(v.split(';')[0]):
return False
return True
for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
(_VERSIONS_FIELDS,
scheme.is_valid_constraint_list),
(_VERSION_FIELDS,
scheme.is_valid_version)):
for field in fields:
value = self.get(field, None)
if value is not None and not controller(value):
warnings.append('Wrong value for %r: %s' % (field, value))
return missing, warnings
def todict(self, skip_missing=False):
"""Return fields as a dict.
Field names will be converted to use the underscore-lowercase style
instead of hyphen-mixed case (i.e. home_page instead of Home-page).
"""
self.set_metadata_version()
mapping_1_0 = (
('metadata_version', 'Metadata-Version'),
('name', 'Name'),
('version', 'Version'),
('summary', 'Summary'),
('home_page', 'Home-page'),
('author', 'Author'),
('author_email', 'Author-email'),
('license', 'License'),
('description', 'Description'),
('keywords', 'Keywords'),
('platform', 'Platform'),
('classifier', 'Classifier'),
('download_url', 'Download-URL'),
)
data = {}
for key, field_name in mapping_1_0:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
if self['Metadata-Version'] == '1.2':
mapping_1_2 = (
('requires_dist', 'Requires-Dist'),
('requires_python', 'Requires-Python'),
('requires_external', 'Requires-External'),
('provides_dist', 'Provides-Dist'),
('obsoletes_dist', 'Obsoletes-Dist'),
('project_url', 'Project-URL'),
('maintainer', 'Maintainer'),
('maintainer_email', 'Maintainer-email'),
)
for key, field_name in mapping_1_2:
if not skip_missing or field_name in self._fields:
if key != 'project_url':
data[key] = self[field_name]
else:
data[key] = [','.join(u) for u in self[field_name]]
elif self['Metadata-Version'] == '1.1':
mapping_1_1 = (
('provides', 'Provides'),
('requires', 'Requires'),
('obsoletes', 'Obsoletes'),
)
for key, field_name in mapping_1_1:
if not skip_missing or field_name in self._fields:
data[key] = self[field_name]
return data
def add_requirements(self, requirements):
if self['Metadata-Version'] == '1.1':
# we can't have 1.1 metadata *and* Setuptools requires
for field in ('Obsoletes', 'Requires', 'Provides'):
if field in self:
del self[field]
self['Requires-Dist'] += requirements
# Mapping API
# TODO could add iter* variants
def keys(self):
return list(_version2fieldlist(self['Metadata-Version']))
def __iter__(self):
for key in self.keys():
yield key
def values(self):
return [self[key] for key in self.keys()]
def items(self):
return [(key, self[key]) for key in self.keys()]
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self.name,
self.version)
METADATA_FILENAME = 'pydist.json'
class Metadata(object):
"""
The metadata of a release. This implementation uses 2.0 (JSON)
metadata where possible. If not possible, it wraps a LegacyMetadata
instance which handles the key-value metadata format.
"""
METADATA_VERSION_MATCHER = re.compile('^\d+(\.\d+)*$')
NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
VERSION_MATCHER = PEP426_VERSION_RE
SUMMARY_MATCHER = re.compile('.{1,2047}')
METADATA_VERSION = '2.0'
GENERATOR = 'distlib (%s)' % __version__
MANDATORY_KEYS = {
'name': (),
'version': (),
'summary': ('legacy',),
}
INDEX_KEYS = ('name version license summary description author '
'author_email keywords platform home_page classifiers '
'download_url')
DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
'dev_requires provides meta_requires obsoleted_by '
'supports_environments')
SYNTAX_VALIDATORS = {
'metadata_version': (METADATA_VERSION_MATCHER, ()),
'name': (NAME_MATCHER, ('legacy',)),
'version': (VERSION_MATCHER, ('legacy',)),
'summary': (SUMMARY_MATCHER, ('legacy',)),
}
__slots__ = ('_legacy', '_data', 'scheme')
def __init__(self, path=None, fileobj=None, mapping=None,
scheme='default'):
if [path, fileobj, mapping].count(None) < 2:
raise TypeError('path, fileobj and mapping are exclusive')
self._legacy = None
self._data = None
self.scheme = scheme
#import pdb; pdb.set_trace()
if mapping is not None:
try:
self._validate_mapping(mapping, scheme)
self._data = mapping
except MetadataUnrecognizedVersionError:
self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
self.validate()
else:
data = None
if path:
with open(path, 'rb') as f:
data = f.read()
elif fileobj:
data = fileobj.read()
if data is None:
# Initialised with no args - to be added
self._data = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
else:
if not isinstance(data, text_type):
data = data.decode('utf-8')
try:
self._data = json.loads(data)
self._validate_mapping(self._data, scheme)
except ValueError:
# Note: MetadataUnrecognizedVersionError does not
# inherit from ValueError (it's a DistlibException,
# which should not inherit from ValueError).
# The ValueError comes from the json.load - if that
# succeeds and we get a validation error, we want
# that to propagate
self._legacy = LegacyMetadata(fileobj=StringIO(data),
scheme=scheme)
self.validate()
common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
none_list = (None, list)
none_dict = (None, dict)
mapped_keys = {
'run_requires': ('Requires-Dist', list),
'build_requires': ('Setup-Requires-Dist', list),
'dev_requires': none_list,
'test_requires': none_list,
'meta_requires': none_list,
'extras': ('Provides-Extra', list),
'modules': none_list,
'namespaces': none_list,
'exports': none_dict,
'commands': none_dict,
'classifiers': ('Classifier', list),
'source_url': ('Download-URL', None),
'metadata_version': ('Metadata-Version', None),
}
del none_list, none_dict
def __getattribute__(self, key):
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, maker = mapped[key]
if self._legacy:
if lk is None:
result = None if maker is None else maker()
else:
result = self._legacy.get(lk)
else:
value = None if maker is None else maker()
result = self._data.get(key, value)
elif key not in common:
result = object.__getattribute__(self, key)
elif self._legacy:
result = self._legacy.get(key)
else:
result = self._data.get(key)
return result
def _validate_value(self, key, value, scheme=None):
if key in self.SYNTAX_VALIDATORS:
pattern, exclusions = self.SYNTAX_VALIDATORS[key]
if (scheme or self.scheme) not in exclusions:
m = pattern.match(value)
if not m:
raise MetadataInvalidError('%r is an invalid value for '
'the %r property' % (value,
key))
def __setattr__(self, key, value):
self._validate_value(key, value)
common = object.__getattribute__(self, 'common_keys')
mapped = object.__getattribute__(self, 'mapped_keys')
if key in mapped:
lk, _ = mapped[key]
if self._legacy:
if lk is None:
raise NotImplementedError
self._legacy[lk] = value
else:
self._data[key] = value
elif key not in common:
object.__setattr__(self, key, value)
else:
if key == 'keywords':
if isinstance(value, string_types):
value = value.strip()
if value:
value = value.split()
else:
value = []
if self._legacy:
self._legacy[key] = value
else:
self._data[key] = value
@property
def name_and_version(self):
return _get_name_and_version(self.name, self.version, True)
@property
def provides(self):
if self._legacy:
result = self._legacy['Provides-Dist']
else:
result = self._data.setdefault('provides', [])
s = '%s (%s)' % (self.name, self.version)
if s not in result:
result.append(s)
return result
@provides.setter
def provides(self, value):
if self._legacy:
self._legacy['Provides-Dist'] = value
else:
self._data['provides'] = value
def get_requirements(self, reqts, extras=None, env=None):
"""
Base method to get dependencies, given a set of extras
to satisfy and an optional environment context.
:param reqts: A list of sometimes-wanted dependencies,
perhaps dependent on extras and environment.
:param extras: A list of optional components being requested.
:param env: An optional environment for marker evaluation.
"""
if self._legacy:
result = reqts
else:
result = []
extras = get_extras(extras or [], self.extras)
for d in reqts:
if 'extra' not in d and 'environment' not in d:
# unconditional
include = True
else:
if 'extra' not in d:
# Not extra-dependent - only environment-dependent
include = True
else:
include = d.get('extra') in extras
if include:
# Not excluded because of extras, check environment
marker = d.get('environment')
if marker:
include = interpret(marker, env)
if include:
result.extend(d['requires'])
for key in ('build', 'dev', 'test'):
e = ':%s:' % key
if e in extras:
extras.remove(e)
# A recursive call, but it should terminate since 'test'
# has been removed from the extras
reqts = self._data.get('%s_requires' % key, [])
result.extend(self.get_requirements(reqts, extras=extras,
env=env))
return result
@property
def dictionary(self):
if self._legacy:
return self._from_legacy()
return self._data
@property
def dependencies(self):
if self._legacy:
raise NotImplementedError
else:
return extract_by_key(self._data, self.DEPENDENCY_KEYS)
@dependencies.setter
def dependencies(self, value):
if self._legacy:
raise NotImplementedError
else:
self._data.update(value)
def _validate_mapping(self, mapping, scheme):
if mapping.get('metadata_version') != self.METADATA_VERSION:
raise MetadataUnrecognizedVersionError()
missing = []
for key, exclusions in self.MANDATORY_KEYS.items():
if key not in mapping:
if scheme not in exclusions:
missing.append(key)
if missing:
msg = 'Missing metadata items: %s' % ', '.join(missing)
raise MetadataMissingError(msg)
for k, v in mapping.items():
self._validate_value(k, v, scheme)
def validate(self):
if self._legacy:
missing, warnings = self._legacy.check(True)
if missing or warnings:
logger.warning('Metadata: missing: %s, warnings: %s',
missing, warnings)
else:
self._validate_mapping(self._data, self.scheme)
def todict(self):
if self._legacy:
return self._legacy.todict(True)
else:
result = extract_by_key(self._data, self.INDEX_KEYS)
return result
def _from_legacy(self):
assert self._legacy and not self._data
result = {
'metadata_version': self.METADATA_VERSION,
'generator': self.GENERATOR,
}
lmd = self._legacy.todict(True) # skip missing ones
for k in ('name', 'version', 'license', 'summary', 'description',
'classifier'):
if k in lmd:
if k == 'classifier':
nk = 'classifiers'
else:
nk = k
result[nk] = lmd[k]
kw = lmd.get('Keywords', [])
if kw == ['']:
kw = []
result['keywords'] = kw
keys = (('requires_dist', 'run_requires'),
('setup_requires_dist', 'build_requires'))
for ok, nk in keys:
if ok in lmd and lmd[ok]:
result[nk] = [{'requires': lmd[ok]}]
result['provides'] = self.provides
author = {}
maintainer = {}
return result
LEGACY_MAPPING = {
'name': 'Name',
'version': 'Version',
'license': 'License',
'summary': 'Summary',
'description': 'Description',
'classifiers': 'Classifier',
}
def _to_legacy(self):
def process_entries(entries):
reqts = set()
for e in entries:
extra = e.get('extra')
env = e.get('environment')
rlist = e['requires']
for r in rlist:
if not env and not extra:
reqts.add(r)
else:
marker = ''
if extra:
marker = 'extra == "%s"' % extra
if env:
if marker:
marker = '(%s) and %s' % (env, marker)
else:
marker = env
reqts.add(';'.join((r, marker)))
return reqts
assert self._data and not self._legacy
result = LegacyMetadata()
nmd = self._data
for nk, ok in self.LEGACY_MAPPING.items():
if nk in nmd:
result[ok] = nmd[nk]
r1 = process_entries(self.run_requires + self.meta_requires)
r2 = process_entries(self.build_requires + self.dev_requires)
if self.extras:
result['Provides-Extra'] = sorted(self.extras)
result['Requires-Dist'] = sorted(r1)
result['Setup-Requires-Dist'] = sorted(r2)
# TODO: other fields such as contacts
return result
def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
if [path, fileobj].count(None) != 1:
raise ValueError('Exactly one of path and fileobj is needed')
self.validate()
if legacy:
if self._legacy:
legacy_md = self._legacy
else:
legacy_md = self._to_legacy()
if path:
legacy_md.write(path, skip_unknown=skip_unknown)
else:
legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
else:
if self._legacy:
d = self._from_legacy()
else:
d = self._data
if fileobj:
json.dump(d, fileobj, ensure_ascii=True, indent=2,
sort_keys=True)
else:
with codecs.open(path, 'w', 'utf-8') as f:
json.dump(d, f, ensure_ascii=True, indent=2,
sort_keys=True)
def add_requirements(self, requirements):
if self._legacy:
self._legacy.add_requirements(requirements)
else:
run_requires = self._data.setdefault('run_requires', [])
always = None
for entry in run_requires:
if 'environment' not in entry and 'extra' not in entry:
always = entry
break
if always is None:
always = { 'requires': requirements }
run_requires.insert(0, always)
else:
rset = set(always['requires']) | set(requirements)
always['requires'] = sorted(rset)
def __repr__(self):
name = self.name or '(no name)'
version = self.version or 'no version'
return '<%s %s %s (%s)>' % (self.__class__.__name__,
self.metadata_version, name, version)
|
PypiClean
|
/clawpack-5.9.0.tar.gz/clawpack-5.9.0/geoclaw/examples/tsunami/chile2010/setplot_speeds.py
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from clawpack.geoclaw import topotools
from six.moves import range
try:
TG32412 = np.loadtxt('32412_notide.txt')
except:
print("*** Could not load DART data file")
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
from clawpack.visclaw import colormaps, geoplot
from numpy import linspace
plotdata.clearfigures() # clear any old figures,axes,items data
# To plot gauge locations on pcolor or contour plot, use this as
# an afteraxis function:
def addgauges(current_data):
from clawpack.visclaw import gaugetools
gaugetools.plot_gauge_locations(current_data.plotdata, \
gaugenos='all', format_string='ko', add_labels=True)
# ========================================================================
# Water helper functions
# ========================================================================
def b(cd):
return cd.q[3,:,:] - cd.q[0,:,:]
def extract_eta(h,eta,DRY_TOL=10**-3):
index = np.nonzero((np.abs(h) < DRY_TOL) + (h == np.nan))
eta[index[0],index[1]] = np.nan
return eta
def extract_velocity(h,hu,DRY_TOL=10**-8):
u = np.zeros(hu.shape)
index = np.nonzero((np.abs(h) > DRY_TOL) * (h != np.nan))
u[index[0],index[1]] = hu[index[0],index[1]] / h[index[0],index[1]]
return u
def eta(cd):
return extract_eta(cd.q[0,:,:],cd.q[3,:,:])
def water_u(cd):
return extract_velocity(cd.q[0,:,:],cd.q[1,:,:])
def water_v(cd):
return extract_velocity(cd.q[0,:,:],cd.q[2,:,:])
def water_speed(current_data):
u = water_u(current_data)
v = water_v(current_data)
return np.sqrt(u**2+v**2)
#-----------------------------------------
# Figure for surface
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface', figno=0)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Surface'
plotaxes.scaled = True
def fixup(current_data):
import pylab
addgauges(current_data)
t = current_data.t
t = t / 3600. # hours
pylab.title('Surface at %4.2f hours' % t, fontsize=20)
pylab.xticks(fontsize=15)
pylab.yticks(fontsize=15)
plotaxes.afteraxes = fixup
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = -0.2
plotitem.pcolor_cmax = 0.2
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 1
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [1,1,0]
plotitem.patchedges_show = 1
plotaxes.xlimits = [-120,-60]
plotaxes.ylimits = [-60,0]
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = linspace(-3000,-3000,1)
plotitem.amr_contour_colors = ['y'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':2}
plotitem.amr_contour_show = [1,0,0]
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figure for velocities
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Speeds', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('speeds')
plotaxes.title = 'Speeds'
plotaxes.scaled = True
def fixup(current_data):
import pylab
addgauges(current_data)
t = current_data.t
t = t / 3600. # hours
pylab.title('Speeds at %4.2f hours' % t, fontsize=20)
pylab.xticks(fontsize=15)
pylab.yticks(fontsize=15)
plotaxes.afteraxes = fixup
# Speed
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = water_speed
plotitem.pcolor_cmap = plt.get_cmap('PuBu')
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 0.01
plotitem.add_colorbar = True
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 1
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = 100.0
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [1,1,0]
plotitem.patchedges_show = 1
plotaxes.xlimits = [-120,-60]
plotaxes.ylimits = [-60,0]
#-----------------------------------------
# Figures for gauges
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Surface at gauges', figno=300, \
type='each_gauge')
plotfigure.clf_each_gauge = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = 'auto'
plotaxes.title = 'Surface'
# Plot surface as blue curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.plot_var = 3
plotitem.plotstyle = 'b-'
# Plot topo as green curve:
plotitem = plotaxes.new_plotitem(plot_type='1d_plot')
plotitem.show = False
def gaugetopo(current_data):
q = current_data.q
h = q[0,:]
eta = q[3,:]
topo = eta - h
return topo
# plotitem.plot_var = gaugetopo
# plotitem.plotstyle = 'g-'
def add_zeroline(current_data):
from pylab import plot, legend, xticks, floor, axis, xlabel
t = current_data.t
gaugeno = current_data.gaugeno
if gaugeno == 32412:
try:
plot(TG32412[:,0], TG32412[:,1], 'r')
legend(['GeoClaw','Obs'],'lower right')
except: pass
axis((0,t.max(),-0.3,0.3))
plot(t, 0*t, 'k')
n = int(floor(t.max()/3600.) + 2)
xticks([3600*i for i in range(n)], ['%i' % i for i in range(n)])
xlabel('time (hours)')
plotaxes.afteraxes = add_zeroline
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
return plotdata
|
PypiClean
|
/py-pure-client-1.38.0.tar.gz/py-pure-client-1.38.0/pypureclient/flashblade/FB_2_2/models/policy_member_with_remote_get_response.py
|
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_2 import models
class PolicyMemberWithRemoteGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[PolicyMemberWithRemote]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.PolicyMemberWithRemote]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[PolicyMemberWithRemote]): A list of members for policies.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PolicyMemberWithRemoteGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PolicyMemberWithRemoteGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PolicyMemberWithRemoteGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
PypiClean
|
/taskcc-alipay-sdk-python-3.3.398.tar.gz/taskcc-alipay-sdk-python-3.3.398/alipay/aop/api/request/AntMerchantExpandItemDeleteRequest.py
|
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AntMerchantExpandItemDeleteModel import AntMerchantExpandItemDeleteModel
class AntMerchantExpandItemDeleteRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AntMerchantExpandItemDeleteModel):
self._biz_content = value
else:
self._biz_content = AntMerchantExpandItemDeleteModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'ant.merchant.expand.item.delete'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
PypiClean
|
/spacy_wrap-1.4.4-py3-none-any.whl/spacy_wrap/layers/clf_transformer_model.py
|
import copy
from typing import Callable, Type, Union
from spacy_transformers.align import get_alignment
from spacy_transformers.data_classes import HFObjects, WordpieceBatch
from spacy_transformers.layers.hf_wrapper import HFWrapper
from spacy_transformers.layers.transformer_model import (
_convert_transformer_inputs,
_convert_transformer_outputs,
forward,
huggingface_from_pretrained,
huggingface_tokenize,
set_pytorch_transformer,
)
from spacy_transformers.truncate import truncate_oversize_splits
from thinc.api import Model
from transformers import (
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
)
def init(model: Model, X=None, Y=None):
if model.attrs["has_transformer"]:
return
name = model.attrs["name"]
tok_cfg = model._init_tokenizer_config
trf_cfg = model._init_transformer_config
hf_model = huggingface_from_pretrained(
name,
tok_cfg,
trf_cfg,
model_cls=model.layers[0].shims[0].model_cls,
)
model.attrs["set_transformer"](model, hf_model)
tokenizer = model.tokenizer
# Call the model with a batch of inputs to infer the width
if X:
# If we're dealing with actual texts, do the work to setup the wordpieces
# batch properly
docs = X
get_spans = model.attrs["get_spans"]
nested_spans = get_spans(docs)
flat_spans = []
for doc_spans in nested_spans:
flat_spans.extend(doc_spans)
token_data = huggingface_tokenize(tokenizer, [span.text for span in flat_spans])
wordpieces = WordpieceBatch.from_batch_encoding(token_data)
align = get_alignment(
flat_spans,
wordpieces.strings,
tokenizer.all_special_tokens,
)
wordpieces, align = truncate_oversize_splits(
wordpieces,
align,
tokenizer.model_max_length,
)
else:
texts = ["hello world", "foo bar"]
token_data = huggingface_tokenize(tokenizer, texts)
wordpieces = WordpieceBatch.from_batch_encoding(token_data)
model.layers[0].initialize(X=wordpieces)
class ClassificationTransformerModel(Model):
"""This is a variation of the TransformerModel from spacy-transformers with
some utility regarding listeners removed."""
def __init__(
self,
name: str,
get_spans: Callable,
model_cls: Union[
Type[AutoModelForTokenClassification],
Type[AutoModelForSequenceClassification],
],
tokenizer_config: dict = {},
transformer_config: dict = {},
mixed_precision: bool = False,
grad_scaler_config: dict = {},
):
"""get_spans (Callable[[List[Doc]], List[Span]]):
A function to extract spans from the batch of Doc objects. This
is used to manage long documents, by cutting them into smaller
sequences before running the transformer. The spans are allowed
to overlap, and you can also omit sections of the Doc if
they are not relevant. tokenizer_config (dict): Settings to
pass to the transformers tokenizer. transformer_config (dict):
Settings to pass to the transformers forward pass.
"""
hf_model = HFObjects(None, None, None, tokenizer_config, transformer_config)
wrapper = HFWrapper(
hf_model,
convert_inputs=_convert_transformer_inputs,
convert_outputs=_convert_transformer_outputs,
mixed_precision=mixed_precision,
grad_scaler_config=grad_scaler_config,
model_cls=model_cls,
)
super().__init__(
"clf_transformer",
forward,
init=init,
layers=[wrapper],
dims={"nO": None},
attrs={
"get_spans": get_spans,
"name": name,
"set_transformer": set_pytorch_transformer,
"has_transformer": False,
"flush_cache_chance": 0.0,
},
)
@property
def tokenizer(self):
return self.layers[0].shims[0]._hfmodel.tokenizer
@property
def transformer(self):
return self.layers[0].shims[0]._hfmodel.transformer
@property
def _init_tokenizer_config(self):
return self.layers[0].shims[0]._hfmodel._init_tokenizer_config
@property
def _init_transformer_config(self):
return self.layers[0].shims[0]._hfmodel._init_transformer_config
def copy(self):
"""Create a copy of the model, its attributes, and its parameters.
Any child layers will also be deep-copied. The copy will receive
a distinct `model.id` value.
"""
copied = ClassificationTransformerModel(self.name, self.attrs["get_spans"])
params = {}
for name in self.param_names:
params[name] = self.get_param(name) if self.has_param(name) else None
copied.params = copy.deepcopy(params)
copied.dims = copy.deepcopy(self._dims)
copied.layers[0] = copy.deepcopy(self.layers[0])
for name in self.grad_names:
copied.set_grad(name, self.get_grad(name).copy())
return copied
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/me/messages/item/mark_as_junk/mark_as_junk_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import mark_as_junk_post_request_body
from .....models import message
from .....models.o_data_errors import o_data_error
class MarkAsJunkRequestBuilder():
"""
Provides operations to call the markAsJunk method.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new MarkAsJunkRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/me/messages/{message%2Did}/markAsJunk"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
async def post(self,body: Optional[mark_as_junk_post_request_body.MarkAsJunkPostRequestBody] = None, request_configuration: Optional[MarkAsJunkRequestBuilderPostRequestConfiguration] = None) -> Optional[message.Message]:
"""
Invoke action markAsJunk
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: Optional[message.Message]
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.to_post_request_information(
body, request_configuration
)
from .....models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
from .....models import message
return await self.request_adapter.send_async(request_info, message.Message, error_mapping)
def to_post_request_information(self,body: Optional[mark_as_junk_post_request_body.MarkAsJunkPostRequestBody] = None, request_configuration: Optional[MarkAsJunkRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Invoke action markAsJunk
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
request_info.headers["Accept"] = ["application/json"]
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
@dataclass
class MarkAsJunkRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/kinstabot-0.117.5-py3-none-any.whl/instabot/api/api_photo.py
|
from __future__ import unicode_literals
import imghdr
import os
import shutil
import struct
import json
import time
import random
from uuid import uuid4
from . import config
def download_photo(self, media_id, filename, media=False, folder="photos"):
if not media:
self.media_info(media_id)
if not self.last_json.get("items"):
return True
media = self.last_json["items"][0]
if media["media_type"] == 2:
return True
elif media["media_type"] == 1:
filename = (
"{username}_{media_id}.jpg".format(
username=media["user"]["username"], media_id=media_id
)
if not filename
else "{fname}.jpg".format(fname=filename)
)
images = media["image_versions2"]["candidates"]
fname = os.path.join(folder, filename)
if os.path.exists(fname):
self.logger.info("File already esists, skipping...")
return os.path.abspath(fname)
response = self.session.get(images[0]["url"], stream=True)
if response.status_code == 200:
with open(fname, "wb") as f:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, f)
return os.path.abspath(fname)
else:
success = False
video_included = False
for index in range(len(media["carousel_media"])):
if media["carousel_media"][index]["media_type"] != 1:
video_included = True
continue
filename_i = (
"{username}_{media_id}_{i}.jpg".format(
username=media["user"]["username"], media_id=media_id, i=index
)
if not filename
else "{fname}_{i}.jpg".format(fname=filename, i=index)
)
images = media["carousel_media"][index]["image_versions2"]["candidates"]
fname = os.path.join(folder, filename_i)
if os.path.exists(fname):
return os.path.abspath(fname)
response = self.session.get(images[0]["url"], stream=True)
if response.status_code == 200:
success = True
with open(fname, "wb") as f:
response.raw.decode_content = True
shutil.copyfileobj(response.raw, f)
if success:
return os.path.abspath(fname)
elif video_included:
return True
def compatible_aspect_ratio(size):
min_ratio, max_ratio = 4.0 / 5.0, 90.0 / 47.0
width, height = size
ratio = width * 1.0 / height * 1.0
print("FOUND: w:{w} h:{h} r:{r}".format(w=width, h=height, r=ratio))
return min_ratio <= ratio <= max_ratio
def configure_photo(self, upload_id, photo, caption="", user_tags=None, is_sidecar=False):
width, height = get_image_size(photo)
data = {
"media_folder": "Instagram",
"source_type": 4,
"caption": caption,
"upload_id": upload_id,
"device": self.device_settings,
"edits": {
"crop_original_size": [width * 1.0, height * 1.0],
"crop_center": [0.0, 0.0],
"crop_zoom": 1.0,
},
"extra": {"source_width": width, "source_height": height},
}
if user_tags:
data['usertags'] = user_tags
if is_sidecar:
return data
data = self.json_data(data)
return self.send_request("media/configure/?", data)
def upload_photo(
self,
photo,
caption=None,
upload_id=None,
from_video=False,
force_resize=False,
options={},
user_tags=None,
is_sidecar=False
):
"""Upload photo to Instagram
@param photo Path to photo file (String)
@param caption Media description (String)
@param upload_id Unique upload_id (String). When None, then generate
automatically
@param from_video A flag that signals whether the photo is loaded from
the video or by itself
(Boolean, DEPRECATED: not used)
@param force_resize Force photo resize (Boolean)
@param options Object with difference options, e.g.
configure_timeout, rename (Dict)
Designed to reduce the number of function arguments!
This is the simplest request object.
@param user_tags Tag other users (List)
usertags = [
{"user_id": user_id, "position": [x, y]}
]
@param is_sidecar An album element (Boolean)
@return Object with state of uploading to Instagram (or False), Dict for is_sidecar
"""
if user_tags is None:
usertags = None
else:
tags = {'in': [{'user_id': user['user_id'], 'position': [user['x'], user['y']]} for user in user_tags]}
usertags = json.dumps(tags, separators=(',', ':'))
options = dict({"configure_timeout": 15, "rename": True}, **(options or {}))
if upload_id is None:
upload_id = int(time.time() * 1000)
if not photo:
return False
if not compatible_aspect_ratio(get_image_size(photo)):
self.logger.error("Photo does not have a compatible photo aspect ratio.")
if force_resize:
photo = resize_image(photo)
else:
return False
waterfall_id = str(uuid4())
# upload_name example: '1576102477530_0_7823256191'
# upload_name example: 'fb_uploader_1585807380927'
upload_name = "fb_uploader_{upload_id}".format(upload_id=upload_id)
rupload_params = {
"retry_context": '{"num_step_auto_retry":0,"num_reupload":0,"num_step_manual_retry":0}',
"media_type": "1",
"xsharing_user_ids": "[]",
"upload_id": upload_id,
"image_compression": json.dumps(
{"lib_name": "moz", "lib_version": "3.1.m", "quality": "80"}
),
}
if is_sidecar:
rupload_params["is_sidecar"] = "1"
photo_data = open(photo, "rb").read()
photo_len = str(len(photo_data))
self.session.headers.update(
{
"Accept-Encoding": "gzip",
"X-Instagram-Rupload-Params": json.dumps(rupload_params),
"X_FB_PHOTO_WATERFALL_ID": waterfall_id,
"X-Entity-Type": "image/jpeg",
"Offset": "0",
"X-Entity-Name": upload_name,
"X-Entity-Length": photo_len,
"Content-Type": "application/octet-stream",
"Content-Length": photo_len,
"Accept-Encoding": "gzip",
}
)
response = self.session.post(
"https://{domain}/rupload_igphoto/{name}".format(
domain=config.API_DOMAIN, name=upload_name
),
data=photo_data,
)
if response.status_code != 200:
self.logger.error(
"Photo Upload failed with the following response: {}".format(response)
)
return False
# update the upload id
upload_id = int(response.json()['upload_id'])
if from_video:
# Not configure when from_video is True
return True
# CONFIGURE
configure_timeout = options.get("configure_timeout")
for attempt in range(4):
if configure_timeout:
time.sleep(configure_timeout)
if is_sidecar:
configuration = self.configure_photo(upload_id, photo, caption, usertags, is_sidecar=True)
if options.get("rename"):
os.rename(photo, "{fname}.REMOVE_ME".format(fname=photo))
return configuration
elif self.configure_photo(upload_id, photo, caption, usertags, is_sidecar=False):
media = self.last_json.get("media")
self.expose()
if options.get("rename"):
os.rename(photo, "{fname}.REMOVE_ME".format(fname=photo))
return media
return False
def upload_album(
self,
photos,
caption=None,
upload_id=None,
from_video=False,
force_resize=False,
options={},
user_tags=None
):
"""Upload album to Instagram
@param photos List of paths to photo files (List of strings)
@param caption Media description (String)
@param upload_id Unique upload_id (String). When None, then generate
automatically
@param from_video A flag that signals whether the photo is loaded from
the video or by itself
(Boolean, DEPRECATED: not used)
@param force_resize Force photo resize (Boolean)
@param options Object with difference options, e.g.
configure_timeout, rename (Dict)
Designed to reduce the number of function arguments!
This is the simplest request object.
@param user_tags
@return Boolean
"""
if not photos:
return False
photo_metas = []
for photo in photos:
result = self.upload_photo(photo, caption, None, from_video, force_resize, options, user_tags, is_sidecar=True)
if not result:
self.logger.error("Could not upload photo {photo} for the album!".format(photo=photo))
return False
photo_metas.append(result)
if upload_id is None:
upload_id = int(time.time() * 1000)
data = self.json_data({
"caption": caption,
"client_sidecar_id": upload_id,
"children_metadata": photo_metas
})
return self.send_request("media/configure_sidecar/?", post=data)
def get_image_size(fname):
with open(fname, "rb") as fhandle:
head = fhandle.read(24)
if len(head) != 24:
raise RuntimeError("Invalid Header")
if imghdr.what(fname) == "png":
check = struct.unpack(">i", head[4:8])[0]
if check != 0x0D0A1A0A:
raise RuntimeError("PNG: Invalid check")
width, height = struct.unpack(">ii", head[16:24])
elif imghdr.what(fname) == "gif":
width, height = struct.unpack("<HH", head[6:10])
elif imghdr.what(fname) == "jpeg":
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xC0 <= ftype <= 0xCF:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xFF:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack(">H", fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack(">HH", fhandle.read(4))
else:
raise RuntimeError("Unsupported format")
return width, height
def resize_image(fname):
from math import ceil
try:
from PIL import Image, ExifTags
except ImportError as e:
print("ERROR: {err}".format(err=e))
print(
"Required module `PIL` not installed\n"
"Install with `pip install Pillow` and retry"
)
return False
print("Analizing `{fname}`".format(fname=fname))
h_lim = {"w": 90.0, "h": 47.0}
v_lim = {"w": 4.0, "h": 5.0}
img = Image.open(fname)
(w, h) = img.size
deg = 0
try:
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == "Orientation":
break
exif = dict(img._getexif().items())
o = exif[orientation]
if o == 3:
deg = 180
if o == 6:
deg = 270
if o == 8:
deg = 90
if deg != 0:
print("Rotating by {d} degrees".format(d=deg))
img = img.rotate(deg, expand=True)
(w, h) = img.size
except (AttributeError, KeyError, IndexError) as e:
print("No exif info found (ERR: {err})".format(err=e))
pass
img = img.convert("RGBA")
ratio = w * 1.0 / h * 1.0
print("FOUND w:{w}, h:{h}, ratio={r}".format(w=w, h=h, r=ratio))
if w > h:
print("Horizontal image")
if ratio > (h_lim["w"] / h_lim["h"]):
print("Cropping image")
cut = int(ceil((w - h * h_lim["w"] / h_lim["h"]) / 2))
left = cut
right = w - cut
top = 0
bottom = h
img = img.crop((left, top, right, bottom))
(w, h) = img.size
if w > 1080:
print("Resizing image")
nw = 1080
nh = int(ceil(1080.0 * h / w))
img = img.resize((nw, nh), Image.ANTIALIAS)
elif w < h:
print("Vertical image")
if ratio < (v_lim["w"] / v_lim["h"]):
print("Cropping image")
cut = int(ceil((h - w * v_lim["h"] / v_lim["w"]) / 2))
left = 0
right = w
top = cut
bottom = h - cut
img = img.crop((left, top, right, bottom))
(w, h) = img.size
if h > 1080:
print("Resizing image")
nw = int(ceil(1080.0 * w / h))
nh = 1080
img = img.resize((nw, nh), Image.ANTIALIAS)
else:
print("Square image")
if w > 1080:
print("Resizing image")
img = img.resize((1080, 1080), Image.ANTIALIAS)
(w, h) = img.size
new_fname = "{fname}.CONVERTED.jpg".format(fname=fname)
print("Saving new image w:{w} h:{h} to `{f}`".format(w=w, h=h, f=new_fname))
new = Image.new("RGB", img.size, (255, 255, 255))
new.paste(img, (0, 0, w, h), img)
new.save(new_fname, quality=95)
return new_fname
def stories_shaper(fname):
"""
Find out the size of the uploaded image. Processing is not needed if the
image is already 1080x1920 pixels. Otherwise, the image height should be
1920 pixels. Substrate formation: Crop the image under 1080x1920 pixels
and apply a Gaussian Blur filter. Centering the image depending on its
aspect ratio and paste it onto the substrate. Save the image.
"""
try:
from PIL import Image, ImageFilter
except ImportError as e:
print("ERROR: {err}".format(err=e))
print(
"Required module `PIL` not installed\n"
"Install with `pip install Pillow` and retry"
)
return False
img = Image.open(fname)
if (img.size[0], img.size[1]) == (1080, 1920):
print("Image is already 1080x1920. Just converting image.")
new_fname = "{fname}.STORIES.jpg".format(fname=fname)
new = Image.new("RGB", (img.size[0], img.size[1]), (255, 255, 255))
new.paste(img, (0, 0, img.size[0], img.size[1]))
new.save(new_fname)
return new_fname
else:
min_width = 1080
min_height = 1920
if img.size[1] != 1920:
height_percent = min_height / float(img.size[1])
width_size = int(float(img.size[0]) * float(height_percent))
img = img.resize((width_size, min_height), Image.ANTIALIAS)
else:
pass
if img.size[0] < 1080:
width_percent = min_width / float(img.size[0])
height_size = int(float(img.size[1]) * float(width_percent))
img_bg = img.resize((min_width, height_size), Image.ANTIALIAS)
else:
pass
img_bg = img.crop(
(
int((img.size[0] - 1080) / 2),
int((img.size[1] - 1920) / 2),
int(1080 + ((img.size[0] - 1080) / 2)),
int(1920 + ((img.size[1] - 1920) / 2)),
)
).filter(ImageFilter.GaussianBlur(100))
if img.size[1] > img.size[0]:
height_percent = min_height / float(img.size[1])
width_size = int(float(img.size[0]) * float(height_percent))
img = img.resize((width_size, min_height), Image.ANTIALIAS)
if img.size[0] > 1080:
width_percent = min_width / float(img.size[0])
height_size = int(float(img.size[1]) * float(width_percent))
img = img.resize((min_width, height_size), Image.ANTIALIAS)
img_bg.paste(
img, (int(540 - img.size[0] / 2), int(960 - img.size[1] / 2))
)
else:
img_bg.paste(img, (int(540 - img.size[0] / 2), 0))
else:
width_percent = min_width / float(img.size[0])
height_size = int(float(img.size[1]) * float(width_percent))
img = img.resize((min_width, height_size), Image.ANTIALIAS)
img_bg.paste(img, (int(540 - img.size[0] / 2), int(960 - img.size[1] / 2)))
new_fname = "{fname}.STORIES.jpg".format(fname=fname)
print(
"Saving new image w:{w} h:{h} to `{f}`".format(
w=img_bg.size[0], h=img_bg.size[1], f=new_fname
)
)
new = Image.new("RGB", (img_bg.size[0], img_bg.size[1]), (255, 255, 255))
new.paste(img_bg, (0, 0, img_bg.size[0], img_bg.size[1]))
new.save(new_fname)
return new_fname
|
PypiClean
|
/columbia-discord-bot-0.2.1.tar.gz/columbia-discord-bot-0.2.1/docs/_build/html/_static/pygments/lexers/fortran.py
|
import re
from pygments.lexer import RegexLexer, bygroups, include, words, using, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['FortranLexer', 'FortranFixedLexer']
class FortranLexer(RegexLexer):
"""
Lexer for FORTRAN 90 code.
.. versionadded:: 0.10
"""
name = 'Fortran'
url = 'https://fortran-lang.org/'
aliases = ['fortran', 'f90']
filenames = ['*.f03', '*.f90', '*.F03', '*.F90']
mimetypes = ['text/x-fortran']
flags = re.IGNORECASE | re.MULTILINE
# Data Types: INTEGER, REAL, COMPLEX, LOGICAL, CHARACTER and DOUBLE PRECISION
# Operators: **, *, +, -, /, <, >, <=, >=, ==, /=
# Logical (?): NOT, AND, OR, EQV, NEQV
# Builtins:
# http://gcc.gnu.org/onlinedocs/gcc-3.4.6/g77/Table-of-Intrinsic-Functions.html
tokens = {
'root': [
(r'^#.*\n', Comment.Preproc),
(r'!.*\n', Comment),
include('strings'),
include('core'),
(r'[a-z][\w$]*', Name),
include('nums'),
(r'[\s]+', Text.Whitespace),
],
'core': [
# Statements
(r'\b(DO)(\s+)(CONCURRENT)\b', bygroups(Keyword, Text.Whitespace, Keyword)),
(r'\b(GO)(\s*)(TO)\b', bygroups(Keyword, Text.Whitespace, Keyword)),
(words((
'ABSTRACT', 'ACCEPT', 'ALL', 'ALLSTOP', 'ALLOCATABLE', 'ALLOCATE',
'ARRAY', 'ASSIGN', 'ASSOCIATE', 'ASYNCHRONOUS', 'BACKSPACE', 'BIND',
'BLOCK', 'BLOCKDATA', 'BYTE', 'CALL', 'CASE', 'CLASS', 'CLOSE',
'CODIMENSION', 'COMMON', 'CONTIGUOUS', 'CONTAINS',
'CONTINUE', 'CRITICAL', 'CYCLE', 'DATA', 'DEALLOCATE', 'DECODE',
'DEFERRED', 'DIMENSION', 'DO', 'ELEMENTAL', 'ELSE', 'ENCODE', 'END',
'ENDASSOCIATE', 'ENDBLOCK', 'ENDDO', 'ENDENUM', 'ENDFORALL',
'ENDFUNCTION', 'ENDIF', 'ENDINTERFACE', 'ENDMODULE', 'ENDPROGRAM',
'ENDSELECT', 'ENDSUBMODULE', 'ENDSUBROUTINE', 'ENDTYPE', 'ENDWHERE',
'ENTRY', 'ENUM', 'ENUMERATOR', 'EQUIVALENCE', 'ERROR STOP', 'EXIT',
'EXTENDS', 'EXTERNAL', 'EXTRINSIC', 'FILE', 'FINAL', 'FORALL', 'FORMAT',
'FUNCTION', 'GENERIC', 'IF', 'IMAGES', 'IMPLICIT',
'IMPORT', 'IMPURE', 'INCLUDE', 'INQUIRE', 'INTENT', 'INTERFACE',
'INTRINSIC', 'IS', 'LOCK', 'MEMORY', 'MODULE', 'NAMELIST', 'NULLIFY',
'NONE', 'NON_INTRINSIC', 'NON_OVERRIDABLE', 'NOPASS', 'ONLY', 'OPEN',
'OPTIONAL', 'OPTIONS', 'PARAMETER', 'PASS', 'PAUSE', 'POINTER', 'PRINT',
'PRIVATE', 'PROGRAM', 'PROCEDURE', 'PROTECTED', 'PUBLIC', 'PURE', 'READ',
'RECURSIVE', 'RESULT', 'RETURN', 'REWIND', 'SAVE', 'SELECT', 'SEQUENCE',
'STOP', 'SUBMODULE', 'SUBROUTINE', 'SYNC', 'SYNCALL', 'SYNCIMAGES',
'SYNCMEMORY', 'TARGET', 'THEN', 'TYPE', 'UNLOCK', 'USE', 'VALUE',
'VOLATILE', 'WHERE', 'WRITE', 'WHILE'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
# Data Types
(words((
'CHARACTER', 'COMPLEX', 'DOUBLE PRECISION', 'DOUBLE COMPLEX', 'INTEGER',
'LOGICAL', 'REAL', 'C_INT', 'C_SHORT', 'C_LONG', 'C_LONG_LONG',
'C_SIGNED_CHAR', 'C_SIZE_T', 'C_INT8_T', 'C_INT16_T', 'C_INT32_T',
'C_INT64_T', 'C_INT_LEAST8_T', 'C_INT_LEAST16_T', 'C_INT_LEAST32_T',
'C_INT_LEAST64_T', 'C_INT_FAST8_T', 'C_INT_FAST16_T', 'C_INT_FAST32_T',
'C_INT_FAST64_T', 'C_INTMAX_T', 'C_INTPTR_T', 'C_FLOAT', 'C_DOUBLE',
'C_LONG_DOUBLE', 'C_FLOAT_COMPLEX', 'C_DOUBLE_COMPLEX',
'C_LONG_DOUBLE_COMPLEX', 'C_BOOL', 'C_CHAR', 'C_PTR', 'C_FUNPTR'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword.Type),
# Operators
(r'(\*\*|\*|\+|-|\/|<|>|<=|>=|==|\/=|=)', Operator),
(r'(::)', Keyword.Declaration),
(r'[()\[\],:&%;.]', Punctuation),
# Intrinsics
(words((
'Abort', 'Abs', 'Access', 'AChar', 'ACos', 'ACosH', 'AdjustL',
'AdjustR', 'AImag', 'AInt', 'Alarm', 'All', 'Allocated', 'ALog',
'AMax', 'AMin', 'AMod', 'And', 'ANInt', 'Any', 'ASin', 'ASinH',
'Associated', 'ATan', 'ATanH', 'Atomic_Define', 'Atomic_Ref',
'BesJ', 'BesJN', 'Bessel_J0', 'Bessel_J1', 'Bessel_JN', 'Bessel_Y0',
'Bessel_Y1', 'Bessel_YN', 'BesY', 'BesYN', 'BGE', 'BGT', 'BLE',
'BLT', 'Bit_Size', 'BTest', 'CAbs', 'CCos', 'Ceiling', 'CExp',
'Char', 'ChDir', 'ChMod', 'CLog', 'Cmplx', 'Command_Argument_Count',
'Complex', 'Conjg', 'Cos', 'CosH', 'Count', 'CPU_Time', 'CShift',
'CSin', 'CSqRt', 'CTime', 'C_Loc', 'C_Associated',
'C_Null_Ptr', 'C_Null_Funptr', 'C_F_Pointer', 'C_F_ProcPointer',
'C_Null_Char', 'C_Alert', 'C_Backspace', 'C_Form_Feed', 'C_FunLoc',
'C_Sizeof', 'C_New_Line', 'C_Carriage_Return',
'C_Horizontal_Tab', 'C_Vertical_Tab', 'DAbs', 'DACos', 'DASin',
'DATan', 'Date_and_Time', 'DbesJ', 'DbesJN', 'DbesY',
'DbesYN', 'Dble', 'DCos', 'DCosH', 'DDiM', 'DErF',
'DErFC', 'DExp', 'Digits', 'DiM', 'DInt', 'DLog', 'DMax',
'DMin', 'DMod', 'DNInt', 'Dot_Product', 'DProd', 'DSign', 'DSinH',
'DShiftL', 'DShiftR', 'DSin', 'DSqRt', 'DTanH', 'DTan', 'DTime',
'EOShift', 'Epsilon', 'ErF', 'ErFC', 'ErFC_Scaled', 'ETime',
'Execute_Command_Line', 'Exit', 'Exp', 'Exponent', 'Extends_Type_Of',
'FDate', 'FGet', 'FGetC', 'FindLoc', 'Float', 'Floor', 'Flush',
'FNum', 'FPutC', 'FPut', 'Fraction', 'FSeek', 'FStat', 'FTell',
'Gamma', 'GError', 'GetArg', 'Get_Command', 'Get_Command_Argument',
'Get_Environment_Variable', 'GetCWD', 'GetEnv', 'GetGId', 'GetLog',
'GetPId', 'GetUId', 'GMTime', 'HostNm', 'Huge', 'Hypot', 'IAbs',
'IAChar', 'IAll', 'IAnd', 'IAny', 'IArgC', 'IBClr', 'IBits',
'IBSet', 'IChar', 'IDate', 'IDiM', 'IDInt', 'IDNInt', 'IEOr',
'IErrNo', 'IFix', 'Imag', 'ImagPart', 'Image_Index', 'Index',
'Int', 'IOr', 'IParity', 'IRand', 'IsaTty', 'IShft', 'IShftC',
'ISign', 'Iso_C_Binding', 'Is_Contiguous', 'Is_Iostat_End',
'Is_Iostat_Eor', 'ITime', 'Kill', 'Kind', 'LBound', 'LCoBound',
'Len', 'Len_Trim', 'LGe', 'LGt', 'Link', 'LLe', 'LLt', 'LnBlnk',
'Loc', 'Log', 'Log_Gamma', 'Logical', 'Long', 'LShift', 'LStat',
'LTime', 'MaskL', 'MaskR', 'MatMul', 'Max', 'MaxExponent',
'MaxLoc', 'MaxVal', 'MClock', 'Merge', 'Merge_Bits', 'Move_Alloc',
'Min', 'MinExponent', 'MinLoc', 'MinVal', 'Mod', 'Modulo', 'MvBits',
'Nearest', 'New_Line', 'NInt', 'Norm2', 'Not', 'Null', 'Num_Images',
'Or', 'Pack', 'Parity', 'PError', 'Precision', 'Present', 'Product',
'Radix', 'Rand', 'Random_Number', 'Random_Seed', 'Range', 'Real',
'RealPart', 'Rename', 'Repeat', 'Reshape', 'RRSpacing', 'RShift',
'Same_Type_As', 'Scale', 'Scan', 'Second', 'Selected_Char_Kind',
'Selected_Int_Kind', 'Selected_Real_Kind', 'Set_Exponent', 'Shape',
'ShiftA', 'ShiftL', 'ShiftR', 'Short', 'Sign', 'Signal', 'SinH',
'Sin', 'Sleep', 'Sngl', 'Spacing', 'Spread', 'SqRt', 'SRand',
'Stat', 'Storage_Size', 'Sum', 'SymLnk', 'System', 'System_Clock',
'Tan', 'TanH', 'Time', 'This_Image', 'Tiny', 'TrailZ', 'Transfer',
'Transpose', 'Trim', 'TtyNam', 'UBound', 'UCoBound', 'UMask',
'Unlink', 'Unpack', 'Verify', 'XOr', 'ZAbs', 'ZCos', 'ZExp',
'ZLog', 'ZSin', 'ZSqRt'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
# Booleans
(r'\.(true|false)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|eqv|neqv)\.', Operator.Word),
],
'strings': [
(r'"(\\[0-7]+|\\[^0-7]|[^"\\])*"', String.Double),
(r"'(\\[0-7]+|\\[^0-7]|[^'\\])*'", String.Single),
],
'nums': [
(r'\d+(?![.e])(_([1-9]|[a-z]\w*))?', Number.Integer),
(r'[+-]?\d*\.\d+([ed][-+]?\d+)?(_([1-9]|[a-z]\w*))?', Number.Float),
(r'[+-]?\d+\.\d*([ed][-+]?\d+)?(_([1-9]|[a-z]\w*))?', Number.Float),
(r'[+-]?\d+(\.\d*)?[ed][-+]?\d+(_([1-9]|[a-z]\w*))?', Number.Float),
],
}
class FortranFixedLexer(RegexLexer):
"""
Lexer for fixed format Fortran.
.. versionadded:: 2.1
"""
name = 'FortranFixed'
aliases = ['fortranfixed']
filenames = ['*.f', '*.F']
flags = re.IGNORECASE
def _lex_fortran(self, match, ctx=None):
"""Lex a line just as free form fortran without line break."""
lexer = FortranLexer()
text = match.group(0) + "\n"
for index, token, value in lexer.get_tokens_unprocessed(text):
value = value.replace('\n', '')
if value != '':
yield index, token, value
tokens = {
'root': [
(r'[C*].*\n', Comment),
(r'#.*\n', Comment.Preproc),
(r' {0,4}!.*\n', Comment),
(r'(.{5})', Name.Label, 'cont-char'),
(r'.*\n', using(FortranLexer)),
],
'cont-char': [
(' ', Text, 'code'),
('0', Comment, 'code'),
('.', Generic.Strong, 'code'),
],
'code': [
(r'(.{66})(.*)(\n)',
bygroups(_lex_fortran, Comment, Text.Whitespace), 'root'),
(r'(.*)(\n)', bygroups(_lex_fortran, Text.Whitespace), 'root'),
default('root'),
]
}
|
PypiClean
|
/pyqtgraph-for-dubble-bubble-2016.11.2.tar.gz/pyqtgraph-for-dubble-bubble-2016.11.2/pyqtgraph/graphicsItems/ErrorBarItem.py
|
from ..Qt import QtGui, QtCore
from .GraphicsObject import GraphicsObject
from .. import getConfigOption
from .. import functions as fn
__all__ = ['ErrorBarItem']
class ErrorBarItem(GraphicsObject):
def __init__(self, **opts):
"""
All keyword arguments are passed to setData().
"""
GraphicsObject.__init__(self)
self.opts = dict(
x=None,
y=None,
height=None,
width=None,
top=None,
bottom=None,
left=None,
right=None,
beam=None,
pen=None
)
self.setData(**opts)
def setData(self, **opts):
"""
Update the data in the item. All arguments are optional.
Valid keyword options are:
x, y, height, width, top, bottom, left, right, beam, pen
* x and y must be numpy arrays specifying the coordinates of data points.
* height, width, top, bottom, left, right, and beam may be numpy arrays,
single values, or None to disable. All values should be positive.
* top, bottom, left, and right specify the lengths of bars extending
in each direction.
* If height is specified, it overrides top and bottom.
* If width is specified, it overrides left and right.
* beam specifies the width of the beam at the end of each bar.
* pen may be any single argument accepted by pg.mkPen().
This method was added in version 0.9.9. For prior versions, use setOpts.
"""
self.opts.update(opts)
self.path = None
self.update()
self.prepareGeometryChange()
self.informViewBoundsChanged()
def setOpts(self, **opts):
# for backward compatibility
self.setData(**opts)
def drawPath(self):
p = QtGui.QPainterPath()
x, y = self.opts['x'], self.opts['y']
if x is None or y is None:
return
beam = self.opts['beam']
height, top, bottom = self.opts['height'], self.opts['top'], self.opts['bottom']
if height is not None or top is not None or bottom is not None:
## draw vertical error bars
if height is not None:
y1 = y - height/2.
y2 = y + height/2.
else:
if bottom is None:
y1 = y
else:
y1 = y - bottom
if top is None:
y2 = y
else:
y2 = y + top
for i in range(len(x)):
p.moveTo(x[i], y1[i])
p.lineTo(x[i], y2[i])
if beam is not None and beam > 0:
x1 = x - beam/2.
x2 = x + beam/2.
if height is not None or top is not None:
for i in range(len(x)):
p.moveTo(x1[i], y2[i])
p.lineTo(x2[i], y2[i])
if height is not None or bottom is not None:
for i in range(len(x)):
p.moveTo(x1[i], y1[i])
p.lineTo(x2[i], y1[i])
width, right, left = self.opts['width'], self.opts['right'], self.opts['left']
if width is not None or right is not None or left is not None:
## draw vertical error bars
if width is not None:
x1 = x - width/2.
x2 = x + width/2.
else:
if left is None:
x1 = x
else:
x1 = x - left
if right is None:
x2 = x
else:
x2 = x + right
for i in range(len(x)):
p.moveTo(x1[i], y[i])
p.lineTo(x2[i], y[i])
if beam is not None and beam > 0:
y1 = y - beam/2.
y2 = y + beam/2.
if width is not None or right is not None:
for i in range(len(x)):
p.moveTo(x2[i], y1[i])
p.lineTo(x2[i], y2[i])
if width is not None or left is not None:
for i in range(len(x)):
p.moveTo(x1[i], y1[i])
p.lineTo(x1[i], y2[i])
self.path = p
self.prepareGeometryChange()
def paint(self, p, *args):
if self.path is None:
self.drawPath()
pen = self.opts['pen']
if pen is None:
pen = getConfigOption('foreground')
p.setPen(fn.mkPen(pen))
p.drawPath(self.path)
def boundingRect(self):
if self.path is None:
self.drawPath()
return self.path.boundingRect()
|
PypiClean
|
/troposphere_mate-0.0.14.tar.gz/troposphere_mate-0.0.14/troposphere_mate/core/orch_core.py
|
try:
from typing import List, Tuple
except:
pass
from collections import OrderedDict
DEPLOY_NOW_SIGNAL_TIER_NAME = "deploy"
DEPLOY_NOW_SIGNAL_TIER_ENV = "now"
def extract_all_env_tag(plan):
"""
:type plan: List[List[str, str]]
:param plan: [(can_id1, env_tag1), (can_id2, env_tag2), ...]
:rtype: List[str]
"""
env_tag_dict = OrderedDict()
for tier_name, tier_env in plan:
if tier_name.lower() == DEPLOY_NOW_SIGNAL_TIER_NAME and tier_env.lower() == DEPLOY_NOW_SIGNAL_TIER_ENV:
pass
else:
env_tag_dict[tier_env] = None
return list(env_tag_dict)
def resolve_pipeline(plan):
"""
Convert the item-to-deploy pipeline syntax to several execution plan.
:type plan: List[List[str, str]]
:param plan: [(can_id1, env_tag1), (can_id2, env_tag2), ...]
:rtype: List[Tuple[List[str], str]]]
"""
pipeline_change_set = list()
job = ([], None)
previous_env = None
for tier_name, tier_env in plan:
if tier_name.lower() == DEPLOY_NOW_SIGNAL_TIER_NAME and tier_env.lower() == DEPLOY_NOW_SIGNAL_TIER_ENV:
if job != pipeline_change_set[-1]:
pipeline_change_set.append(job)
job = (list(job[0]),
job[1]) # new job just do a deep copy of the old one, since it encounters a deploy now signal
continue
if tier_env != previous_env:
# if it is not empty, then it could be a item right after a deploy now signal
if len(pipeline_change_set):
if job != pipeline_change_set[-1]:
pipeline_change_set.append(job)
else: # if it is empty, then it is the first job, just append it
pipeline_change_set.append(job)
previous_env = tier_env
job = ([tier_name, ], tier_env)
else:
job[0].append(tier_name)
if job != pipeline_change_set[-1]:
pipeline_change_set.append(job)
pipeline_change_set = pipeline_change_set[1:]
dct = dict()
pipeline = list()
for tier_list, tier_env in pipeline_change_set:
if tier_env in dct:
for tier_name in tier_list:
if tier_name not in dct[tier_env]:
dct[tier_env].append(tier_name)
else:
dct[tier_env] = tier_list
pipeline.append((list(dct[tier_env]), tier_env))
return pipeline
class ResourceFilter(object):
"""
Construct a Resource Filter Class to decide if a specific AWS Resource
should be ignored or not.
1. Explicit Deny
2. Explicit Allow
3. Default Deny
"""
def __init__(self,
ignored_stack_id_list,
allowed_stack_id_list):
self.ignored_stack_id_list = ignored_stack_id_list
self.allowed_stack_id_list = allowed_stack_id_list
def filter(self, resource, template):
"""
Check if we want to keep this resource in the cloudformation.
If ``True``, we keep it. if ``False`` we call
``Template.remove_resource(resource)`` to remove it,
:type resource: AWSObject
:type template: Template
:rtype: bool
"""
# if resource.
if resource.resource_type == "AWS::CloudFormation::Stack":
if resource.title in self.allowed_stack_id_list:
return True
else:
return False
else:
return True
|
PypiClean
|
/theseus-ai-nightly-2023.4.21.tar.gz/theseus-ai-nightly-2023.4.21/theseus/optimizer/sparse_linearization.py
|
from typing import List, Optional
import numpy as np
import torch
from theseus.core import Objective
from theseus.utils.sparse_matrix_utils import sparse_mv, sparse_mtv
from .linear_system import SparseStructure
from .linearization import Linearization
from .variable_ordering import VariableOrdering
class SparseLinearization(Linearization):
def __init__(
self,
objective: Objective,
ordering: Optional[VariableOrdering] = None,
**kwargs,
):
super().__init__(objective, ordering)
# we prepare the indices for At as csc matrix (or A as csr, same thing)
# for similarity with dense_linearization code we build A as csr, then
# actually we have At as csc and can feed it to `cholesky_AAt` routine
# we want a unique set of rowPtr/colInd indices for A for all batches
# we also save pointers to the data block, so that we can later quickly
# write the data blocks
A_col_ind: List[int] = []
A_row_ptr: List[int] = [0]
# ptr to data block (stride = sum of variable.dim()
cost_function_block_pointers = []
cost_function_row_block_starts = [] # where data start for this row block
cost_function_stride = [] # total jacobian cols
for _, cost_function in enumerate(self.objective._get_jacobians_iter()):
num_rows = cost_function.dim()
col_slices_indices = []
for var_idx_in_cost_function, variable in enumerate(
cost_function.optim_vars
):
var_idx_in_order = self.ordering.index_of(
cost_function.optim_var_at(var_idx_in_cost_function).name
)
var_start_col = self.var_start_cols[var_idx_in_order]
num_cols = variable.dof()
col_slice = slice(var_start_col, var_start_col + num_cols)
col_slices_indices.append((col_slice, var_idx_in_cost_function))
# sort according to how they will be written inside A
col_slices_indices.sort()
sorted_block_sizes = [(s.stop - s.start) for s, _ in col_slices_indices]
sorted_block_pointers = np.cumsum([0] + sorted_block_sizes)[:-1]
sorted_indices = np.array([i for _, i in col_slices_indices])
block_pointers: np.ndarray = np.ndarray(
(len(col_slices_indices),), dtype=int
)
block_pointers[sorted_indices] = sorted_block_pointers
cost_function_block_pointers.append(block_pointers)
cost_function_row_block_starts.append(len(A_col_ind))
col_ind = [c for s, _ in col_slices_indices for c in range(s.start, s.stop)]
cost_function_stride.append(len(col_ind))
for _ in range(num_rows):
A_col_ind += col_ind
A_row_ptr.append(len(A_col_ind))
# not batched, these data are the same across batches
self.cost_function_block_pointers = cost_function_block_pointers
self.cost_function_row_block_starts: np.ndarray = np.array(
cost_function_row_block_starts, dtype=int
)
self.cost_function_stride: np.ndarray = np.array(
cost_function_stride, dtype=int
)
self.A_row_ptr: np.ndarray = np.array(A_row_ptr, dtype=int)
self.A_col_ind: np.ndarray = np.array(A_col_ind, dtype=int)
# batched data
self.A_val: torch.Tensor = None
self.b: torch.Tensor = None
# computed lazily by self._atb_impl() and reset to None by
# self._linearize_jacobian_impl()
self._Atb: torch.Tensor = None
# computed lazily by self.diagonal_scaling() and reset to None by
# self._linearize_jacobian_impl()
self._AtA_diag: torch.Tensor = None
# If true, it signals to linear solvers that any computation resulting from
# matrix (At * A) must be detached from the compute graph
self.detached_hessian = False
def _linearize_jacobian_impl(self):
self._detached_hessian = False
self._Atb = None
self._AtA_diag = None
# those will be fully overwritten, no need to zero:
self.A_val = torch.empty(
size=(self.objective.batch_size, len(self.A_col_ind)),
device=self.objective.device,
dtype=self.objective.dtype,
)
self.b = torch.empty(
size=(self.objective.batch_size, self.num_rows),
device=self.objective.device,
dtype=self.objective.dtype,
)
err_row_idx = 0
for f_idx, cost_function in enumerate(self.objective._get_jacobians_iter()):
jacobians, error = cost_function.weighted_jacobians_error()
num_rows = cost_function.dim()
row_slice = slice(err_row_idx, err_row_idx + num_rows)
# we will view the blocks of rows inside `A_val` as `num_rows` x `stride` matrix
block_start = self.cost_function_row_block_starts[f_idx]
stride = self.cost_function_stride[f_idx]
block = self.A_val[:, block_start : block_start + stride * num_rows].view(
-1, num_rows, stride
)
block_pointers = self.cost_function_block_pointers[f_idx]
for var_idx_in_cost_function, var_jacobian in enumerate(jacobians):
# the proper block is written, using the precomputed index in `block_pointers`
num_cols = var_jacobian.shape[2]
pointer = block_pointers[var_idx_in_cost_function]
block[:, :, pointer : pointer + num_cols] = var_jacobian
self.b[:, row_slice] = -error
err_row_idx += cost_function.dim()
def structure(self):
return SparseStructure(
self.A_col_ind,
self.A_row_ptr,
self.num_rows,
self.num_cols,
dtype=np.float64 if self.objective.dtype == torch.double else np.float32,
)
def _linearize_hessian_impl(self, _detach_hessian: bool = False):
# Some of our sparse solvers don't require explicitly computing the
# hessian approximation, so we only compute the jacobian here and let each
# solver handle this as needed
self._linearize_jacobian_impl()
self.detached_hessian = _detach_hessian
def _ata_impl(self) -> torch.Tensor:
raise NotImplementedError("AtA is not yet implemented for SparseLinearization.")
def _atb_impl(self) -> torch.Tensor:
if self._Atb is None:
A_row_ptr = torch.tensor(self.A_row_ptr, dtype=torch.int32).to(
self.objective.device
)
A_col_ind = A_row_ptr.new_tensor(self.A_col_ind)
# unsqueeze at the end for consistency with DenseLinearization
self._Atb = sparse_mtv(
self.num_cols,
A_row_ptr,
A_col_ind,
self.A_val.double(),
self.b.double(),
).unsqueeze(2)
return self._Atb.to(dtype=self.A_val.dtype)
def Av(self, v: torch.Tensor) -> torch.Tensor:
A_row_ptr = torch.tensor(self.A_row_ptr, dtype=torch.int32).to(
self.objective.device
)
A_col_ind = A_row_ptr.new_tensor(self.A_col_ind)
return sparse_mv(
self.num_cols, A_row_ptr, A_col_ind, self.A_val.double(), v.double()
).to(v.dtype)
def diagonal_scaling(self, v: torch.Tensor) -> torch.Tensor:
assert v.ndim == 2
assert v.shape[1] == self.num_cols
if self._AtA_diag is None:
A_val = self.A_val
self._AtA_diag = torch.zeros(A_val.shape[0], self.num_cols)
for row in range(self.num_rows):
start = self.A_row_ptr[row]
end = self.A_row_ptr[row + 1]
columns = self.A_col_ind[start:end]
self._AtA_diag[:, columns] += A_val[:, start:end] ** 2
return self._AtA_diag * v
|
PypiClean
|
/django-celery-beat-yywing2-2.0.0.tar.gz/django-celery-beat-yywing2-2.0.0/docs/copyright.rst
|
Copyright
=========
*django-celery-beat User Manual*
by Ask Solem
.. |copy| unicode:: U+000A9 .. COPYRIGHT SIGN
Copyright |copy| 2016, Ask Solem
All rights reserved. This material may be copied or distributed only
subject to the terms and conditions set forth in the `Creative Commons
Attribution-ShareAlike 4.0 International`
<http://creativecommons.org/licenses/by-sa/4.0/legalcode>`_ license.
You may share and adapt the material, even for commercial purposes, but
you must give the original author credit.
If you alter, transform, or build upon this
work, you may distribute the resulting work only under the same license or
a license compatible to this one.
.. note::
While the django-celery-beat *documentation* is offered under the
Creative Commons *Attribution-ShareAlike 4.0 International* license
the django-celery-beat *software* is offered under the
`BSD License (3 Clause) <http://www.opensource.org/licenses/BSD-3-Clause>`_
|
PypiClean
|
/dsin100daysv29-6.0.1.tar.gz/dsin100daysv29-6.0.1/notebook/static/components/MathJax/jax/output/HTML-CSS/fonts/STIX-Web/Size5/Regular/Main.js
|
MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS.STIXMathJax_Size5={directory:"Size5/Regular",family:"STIXMathJax_Size5",testString:"\u00A0\u02C6\u02C7\u02DC\u02F7\u0302\u0303\u0305\u030C\u0330\u0332\u0338\u203E\u20D0\u20D1",32:[0,0,250,0,0],160:[0,0,250,0,0],710:[816,-572,2328,0,2328],711:[816,-572,2328,0,2328],732:[780,-617,2328,0,2328],759:[-117,280,2328,0,2328],770:[816,-572,2328,0,2328],771:[780,-617,2328,0,2328],773:[820,-770,3000,0,3000],780:[816,-572,2328,0,2328],816:[-117,280,2328,0,2328],818:[-127,177,3000,0,3000],824:[960,454,0,-561,-123],8254:[820,-770,3000,0,3000],8400:[749,-584,3000,0,3000],8401:[749,-584,3000,0,3000],8406:[735,-482,3000,0,3000],8407:[735,-482,3000,0,3000],8428:[-123,288,3000,0,3000],8429:[-123,288,3000,0,3000],8430:[-26,279,3000,0,3000],8431:[-26,279,3000,0,3000],9140:[766,-544,3237,90,3147],9141:[139,83,3237,90,3147],9180:[80,189,3237,0,3237],9181:[842,-573,3237,0,3237],9182:[181,90,3238,0,3238],9183:[844,-573,3238,0,3238],9184:[66,212,3164,0,3164],9185:[842,-564,3164,0,3164],57344:[705,300,450,50,400],57345:[705,305,450,50,174],57346:[700,305,450,50,400],57347:[705,300,450,50,400],57348:[705,305,450,276,400],57349:[700,305,450,50,400],57350:[687,318,450,50,415],57351:[687,323,450,50,150],57352:[682,323,450,50,415],57353:[687,318,450,35,400],57354:[687,323,450,300,400],57355:[682,323,450,35,400],57356:[705,300,640,260,600],57357:[705,305,640,260,380],57358:[705,305,640,40,380],57359:[700,305,640,260,600],57360:[705,300,640,40,380],57361:[705,305,640,260,600],57362:[700,305,640,40,380],57363:[820,-770,1000,0,1000],57364:[-127,177,1000,0,1000],57365:[749,-584,870,0,871],57366:[634,-584,480,-10,490],57367:[749,-584,871,0,871],57368:[735,-482,871,0,872],57369:[736,-482,871,0,872],57370:[-127,177,480,-10,490],57371:[-123,288,871,0,871],57372:[-123,288,871,0,871],57373:[-26,279,871,0,872],57374:[-25,279,871,0,872],57375:[386,-120,315,0,315],57376:[405,-101,686,210,476],57377:[486,-20,315,0,315],57378:[1855,0,1184,112,895],57379:[635,0,1184,829,895],57380:[626,0,1184,829,1211],57381:[2140,0,1184,112,895],57382:[2135,0,1184,112,895],57383:[955,-554,1820,-25,1830],57384:[955,-820,633,-1,634],57385:[955,-554,1820,-10,1845],57386:[140,261,1820,-25,1830],57387:[-126,261,633,-1,634],57388:[140,261,1820,-10,1845],57389:[955,-342,1820,-25,1830],57390:[955,-342,1820,-10,1845],57391:[352,261,1820,-25,1830],57392:[352,261,1820,-10,1845],57393:[955,-512,897,-25,908],57394:[1218,-820,1844,-10,1854],57395:[955,-512,897,-11,922],57396:[182,261,897,-25,908],57397:[-126,524,1844,-10,1854],57398:[182,261,897,-11,922],57399:[405,-101,1033,229,805],57400:[405,-101,926,230,696],57401:[541,35,315,0,315],57402:[700,301,600,35,566],57403:[700,301,600,35,566],57404:[1066,79,688,294,574],57405:[610,25,688,294,394],57406:[1086,59,688,115,394]};MathJax.Callback.Queue(["initFont",MathJax.OutputJax["HTML-CSS"],"STIXMathJax_Size5"],["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Size5/Regular/Main.js"]);
|
PypiClean
|
/smartautomatic_server_frontend-20220907.2-py3-none-any.whl/sas_frontend/frontend_es5/5074c137.js
|
"use strict";(self.webpackChunksmartautomatic_server_frontend=self.webpackChunksmartautomatic_server_frontend||[]).push([[68353],{18601:function(e,t,n){n.d(t,{qN:function(){return u.q},Wg:function(){return v}});var r,o,i=n(87480),a=n(33310),u=n(78220);function c(e){return c="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},c(e)}function s(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function l(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}function f(e,t,n){return f="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(e,t,n){var r=function(e,t){for(;!Object.prototype.hasOwnProperty.call(e,t)&&null!==(e=m(e)););return e}(e,t);if(r){var o=Object.getOwnPropertyDescriptor(r,t);return o.get?o.get.call(n):o.value}},f(e,t,n||e)}function d(e,t){return d=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},d(e,t)}function p(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var n,r=m(e);if(t){var o=m(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return h(this,n)}}function h(e,t){if(t&&("object"===c(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return function(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}(e)}function m(e){return m=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},m(e)}var y=null!==(o=null===(r=window.ShadyDOM)||void 0===r?void 0:r.inUse)&&void 0!==o&&o,v=function(e){!function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&d(e,t)}(i,e);var t,n,r,o=p(i);function i(){var e;return s(this,i),(e=o.apply(this,arguments)).disabled=!1,e.containingForm=null,e.formDataListener=function(t){e.disabled||e.setFormData(t.formData)},e}return t=i,n=[{key:"findFormElement",value:function(){if(!this.shadowRoot||y)return null;for(var e=this.getRootNode().querySelectorAll("form"),t=0,n=Array.from(e);t<n.length;t++){var r=n[t];if(r.contains(this))return r}return null}},{key:"connectedCallback",value:function(){var e;f(m(i.prototype),"connectedCallback",this).call(this),this.containingForm=this.findFormElement(),null===(e=this.containingForm)||void 0===e||e.addEventListener("formdata",this.formDataListener)}},{key:"disconnectedCallback",value:function(){var e;f(m(i.prototype),"disconnectedCallback",this).call(this),null===(e=this.containingForm)||void 0===e||e.removeEventListener("formdata",this.formDataListener),this.containingForm=null}},{key:"click",value:function(){this.formElement&&!this.disabled&&(this.formElement.focus(),this.formElement.click())}},{key:"firstUpdated",value:function(){var e=this;f(m(i.prototype),"firstUpdated",this).call(this),this.shadowRoot&&this.mdcRoot.addEventListener("change",(function(t){e.dispatchEvent(new Event("change",t))}))}}],n&&l(t.prototype,n),r&&l(t,r),i}(u.H);v.shadowRootOptions={mode:"open",delegatesFocus:!0},(0,i.__decorate)([(0,a.Cb)({type:Boolean})],v.prototype,"disabled",void 0)},14114:function(e,t,n){n.d(t,{P:function(){return r}});var r=function(e){return function(t,n){if(t.constructor._observers){if(!t.constructor.hasOwnProperty("_observers")){var r=t.constructor._observers;t.constructor._observers=new Map,r.forEach((function(e,n){return t.constructor._observers.set(n,e)}))}}else{t.constructor._observers=new Map;var o=t.updated;t.updated=function(e){var t=this;o.call(this,e),e.forEach((function(e,n){var r=t.constructor._observers.get(n);void 0!==r&&r.call(t,t[n],e)}))}}t.constructor._observers.set(n,e)}}},22814:function(e,t,n){function r(e,t,n,r,o,i,a){try{var u=e[i](a),c=u.value}catch(s){return void n(s)}u.done?t(c):Promise.resolve(c).then(r,o)}function o(e){return function(){var t=this,n=arguments;return new Promise((function(o,i){var a=e.apply(t,n);function u(e){r(a,o,i,u,c,"next",e)}function c(e){r(a,o,i,u,c,"throw",e)}u(void 0)}))}}n.d(t,{iI:function(){return i},W2:function(){return a},TZ:function(){return u}});"".concat(location.protocol,"//").concat(location.host);var i=function(e,t){return e.callWS({type:"auth/sign_path",path:t})},a=function(){var e=o(regeneratorRuntime.mark((function e(t,n,r,o){return regeneratorRuntime.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.abrupt("return",t.callWS({type:"config/auth_provider/smartautomatic/create",user_id:n,username:r,password:o}));case 1:case"end":return e.stop()}}),e)})));return function(t,n,r,o){return e.apply(this,arguments)}}(),u=function(){var e=o(regeneratorRuntime.mark((function e(t,n,r){return regeneratorRuntime.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:return e.abrupt("return",t.callWS({type:"config/auth_provider/smartautomatic/admin_change_password",user_id:n,password:r}));case 1:case"end":return e.stop()}}),e)})));return function(t,n,r){return e.apply(this,arguments)}}()},9893:function(e,t,n){n.d(t,{Qo:function(){return r},kb:function(){return i},cs:function(){return a}});var r="custom:",o=window;"customCards"in o||(o.customCards=[]);var i=o.customCards,a=function(e){return i.find((function(t){return t.type===e}))}},51444:function(e,t,n){n.d(t,{_:function(){return i}});var r=n(47181),o=function(){return Promise.all([n.e(29563),n.e(98985),n.e(85084),n.e(3555),n.e(34821),n.e(72420)]).then(n.bind(n,72420))},i=function(e){(0,r.B)(e,"show-dialog",{dialogTag:"ha-voice-command-dialog",dialogImport:o,dialogParams:{}})}},27849:function(e,t,n){n(39841);var r,o=n(50856);n(28426);function i(e){return i="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},i(e)}function a(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function u(e,t){for(var n=0;n<t.length;n++){var r=t[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}function c(e,t){return c=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},c(e,t)}function s(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var n,r=f(e);if(t){var o=f(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return l(this,n)}}function l(e,t){if(t&&("object"===i(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return function(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}(e)}function f(e){return f=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},f(e)}var d=function(e){!function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&c(e,t)}(f,customElements.get("app-header-layout"));var t,n,i,l=s(f);function f(){return a(this,f),l.apply(this,arguments)}return t=f,i=[{key:"template",get:function(){return(0,o.d)(r||(e=['\n <style>\n :host {\n display: block;\n /**\n * Force app-header-layout to have its own stacking context so that its parent can\n * control the stacking of it relative to other elements (e.g. app-drawer-layout).\n * This could be done using `isolation: isolate`, but that\'s not well supported\n * across browsers.\n */\n position: relative;\n z-index: 0;\n }\n\n #wrapper ::slotted([slot="header"]) {\n @apply --layout-fixed-top;\n z-index: 1;\n }\n\n #wrapper.initializing ::slotted([slot="header"]) {\n position: relative;\n }\n\n :host([has-scrolling-region]) {\n height: 100%;\n }\n\n :host([has-scrolling-region]) #wrapper ::slotted([slot="header"]) {\n position: absolute;\n }\n\n :host([has-scrolling-region])\n #wrapper.initializing\n ::slotted([slot="header"]) {\n position: relative;\n }\n\n :host([has-scrolling-region]) #wrapper #contentContainer {\n @apply --layout-fit;\n overflow-y: auto;\n -webkit-overflow-scrolling: touch;\n }\n\n :host([has-scrolling-region]) #wrapper.initializing #contentContainer {\n position: relative;\n }\n\n #contentContainer {\n /* Create a stacking context here so that all children appear below the header. */\n position: relative;\n z-index: 0;\n /* Using \'transform\' will cause \'position: fixed\' elements to behave like\n \'position: absolute\' relative to this element. */\n transform: translate(0);\n margin-left: env(safe-area-inset-left);\n margin-right: env(safe-area-inset-right);\n }\n\n @media print {\n :host([has-scrolling-region]) #wrapper #contentContainer {\n overflow-y: visible;\n }\n }\n </style>\n\n <div id="wrapper" class="initializing">\n <slot id="headerSlot" name="header"></slot>\n\n <div id="contentContainer"><slot></slot></div>\n <slot id="fab" name="fab"></slot>\n </div>\n '],(t=['\n <style>\n :host {\n display: block;\n /**\n * Force app-header-layout to have its own stacking context so that its parent can\n * control the stacking of it relative to other elements (e.g. app-drawer-layout).\n * This could be done using \\`isolation: isolate\\`, but that\'s not well supported\n * across browsers.\n */\n position: relative;\n z-index: 0;\n }\n\n #wrapper ::slotted([slot="header"]) {\n @apply --layout-fixed-top;\n z-index: 1;\n }\n\n #wrapper.initializing ::slotted([slot="header"]) {\n position: relative;\n }\n\n :host([has-scrolling-region]) {\n height: 100%;\n }\n\n :host([has-scrolling-region]) #wrapper ::slotted([slot="header"]) {\n position: absolute;\n }\n\n :host([has-scrolling-region])\n #wrapper.initializing\n ::slotted([slot="header"]) {\n position: relative;\n }\n\n :host([has-scrolling-region]) #wrapper #contentContainer {\n @apply --layout-fit;\n overflow-y: auto;\n -webkit-overflow-scrolling: touch;\n }\n\n :host([has-scrolling-region]) #wrapper.initializing #contentContainer {\n position: relative;\n }\n\n #contentContainer {\n /* Create a stacking context here so that all children appear below the header. */\n position: relative;\n z-index: 0;\n /* Using \'transform\' will cause \'position: fixed\' elements to behave like\n \'position: absolute\' relative to this element. */\n transform: translate(0);\n margin-left: env(safe-area-inset-left);\n margin-right: env(safe-area-inset-right);\n }\n\n @media print {\n :host([has-scrolling-region]) #wrapper #contentContainer {\n overflow-y: visible;\n }\n }\n </style>\n\n <div id="wrapper" class="initializing">\n <slot id="headerSlot" name="header"></slot>\n\n <div id="contentContainer"><slot></slot></div>\n <slot id="fab" name="fab"></slot>\n </div>\n '])||(t=e.slice(0)),r=Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))));var e,t}}],(n=null)&&u(t.prototype,n),i&&u(t,i),f}();customElements.define("ha-app-layout",d)},51153:function(e,t,n){n.d(t,{l$:function(){return a},Z6:function(){return u},Do:function(){return c}});n(10175),n(80251),n(99471),n(14888),n(69377),n(95035),n(6169),n(41043),n(57464),n(24617),n(82778);var r=n(7778),o=new Set(["entity","entities","button","entity-button","glance","grid","light","sensor","thermostat","weather-forecast"]),i={"alarm-panel":function(){return Promise.all([n.e(29563),n.e(98985),n.e(3555),n.e(77639)]).then(n.bind(n,77639))},area:function(){return Promise.all([n.e(73826),n.e(97282),n.e(95795)]).then(n.bind(n,95795))},calendar:function(){return Promise.resolve().then(n.bind(n,80251))},conditional:function(){return n.e(68857).then(n.bind(n,68857))},"empty-state":function(){return n.e(67284).then(n.bind(n,67284))},"energy-compare":function(){return Promise.all([n.e(80985),n.e(73826),n.e(55424),n.e(61046)]).then(n.bind(n,61046))},"energy-carbon-consumed-gauge":function(){return Promise.all([n.e(54444),n.e(80985),n.e(73826),n.e(55424),n.e(49915),n.e(43283),n.e(19490)]).then(n.bind(n,19490))},"energy-date-selection":function(){return Promise.all([n.e(80985),n.e(73826),n.e(55424),n.e(23754),n.e(55139)]).then(n.bind(n,10346))},"energy-devices-graph":function(){return Promise.all([n.e(80985),n.e(5287),n.e(73826),n.e(55424),n.e(62336),n.e(94576)]).then(n.bind(n,94576))},"energy-distribution":function(){return Promise.all([n.e(80985),n.e(73826),n.e(55424),n.e(9928)]).then(n.bind(n,9928))},"energy-gas-graph":function(){return Promise.all([n.e(80985),n.e(73826),n.e(55424),n.e(62336),n.e(41305)]).then(n.bind(n,41305))},"energy-grid-neutrality-gauge":function(){return Promise.all([n.e(54444),n.e(80985),n.e(73826),n.e(55424),n.e(49915),n.e(32176)]).then(n.bind(n,32176))},"energy-solar-consumed-gauge":function(){return Promise.all([n.e(54444),n.e(80985),n.e(73826),n.e(55424),n.e(49915),n.e(43283),n.e(85930)]).then(n.bind(n,85930))},"energy-solar-graph":function(){return Promise.all([n.e(80985),n.e(73826),n.e(55424),n.e(62336),n.e(70310)]).then(n.bind(n,70310))},"energy-sources-table":function(){return Promise.all([n.e(80985),n.e(40521),n.e(73826),n.e(55424),n.e(62336),n.e(17595),n.e(16938)]).then(n.bind(n,16938))},"energy-usage-graph":function(){return Promise.all([n.e(80985),n.e(73826),n.e(55424),n.e(62336),n.e(9897)]).then(n.bind(n,9897))},"entity-filter":function(){return n.e(33688).then(n.bind(n,33688))},error:function(){return Promise.all([n.e(77426),n.e(55796)]).then(n.bind(n,55796))},gauge:function(){return Promise.all([n.e(49915),n.e(43283)]).then(n.bind(n,43283))},"history-graph":function(){return Promise.all([n.e(9874),n.e(62336),n.e(70731),n.e(38026)]).then(n.bind(n,38026))},"horizontal-stack":function(){return n.e(89173).then(n.bind(n,89173))},humidifier:function(){return n.e(68558).then(n.bind(n,68558))},iframe:function(){return n.e(95018).then(n.bind(n,95018))},logbook:function(){return Promise.all([n.e(9874),n.e(99528),n.e(40967),n.e(90851)]).then(n.bind(n,8436))},map:function(){return Promise.all([n.e(23956),n.e(60076)]).then(n.bind(n,60076))},markdown:function(){return Promise.all([n.e(4940),n.e(26607)]).then(n.bind(n,51282))},"media-control":function(){return Promise.all([n.e(62744),n.e(67794),n.e(11866)]).then(n.bind(n,11866))},"picture-elements":function(){return Promise.all([n.e(98762),n.e(97282),n.e(6315),n.e(99810),n.e(70128)]).then(n.bind(n,83358))},"picture-entity":function(){return Promise.all([n.e(97282),n.e(41500)]).then(n.bind(n,41500))},"picture-glance":function(){return Promise.all([n.e(97282),n.e(66621)]).then(n.bind(n,66621))},picture:function(){return n.e(45338).then(n.bind(n,45338))},"plant-status":function(){return n.e(48723).then(n.bind(n,48723))},"safe-mode":function(){return Promise.all([n.e(29563),n.e(79071),n.e(24103),n.e(88278),n.e(6294),n.e(86630),n.e(98124),n.e(94066),n.e(24503)]).then(n.bind(n,24503))},"shopping-list":function(){return Promise.all([n.e(29563),n.e(98985),n.e(41985),n.e(3555),n.e(73826),n.e(43376)]).then(n.bind(n,43376))},starting:function(){return n.e(47873).then(n.bind(n,47873))},"statistics-graph":function(){return Promise.all([n.e(62336),n.e(17595),n.e(95396)]).then(n.bind(n,95396))},"vertical-stack":function(){return n.e(26136).then(n.bind(n,26136))}},a=function(e){return(0,r.Xm)("card",e,o,i,void 0,void 0)},u=function(e){return(0,r.Tw)("card",e,o,i,void 0,void 0)},c=function(e){return(0,r.ED)(e,"card",o,i)}},7778:function(e,t,n){n.d(t,{Pc:function(){return u},N2:function(){return c},Tw:function(){return d},Xm:function(){return p},ED:function(){return h}});var r=n(47181),o=n(9893);function i(e,t,n,r,o,i,a){try{var u=e[i](a),c=u.value}catch(s){return void n(s)}u.done?t(c):Promise.resolve(c).then(r,o)}function a(e){return a="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},a(e)}var u=function(e){var t=document.createElement("hui-error-card");return customElements.get("hui-error-card")?t.setConfig(e):(Promise.all([n.e(77426),n.e(55796)]).then(n.bind(n,55796)),customElements.whenDefined("hui-error-card").then((function(){customElements.upgrade(t),t.setConfig(e)}))),t},c=function(e,t){return{type:"error",error:e,origConfig:t}},s=function(e,t){var n=document.createElement(e);return n.setConfig(t),n},l=function(e,t){return u(c(e,t))},f=function(e){return e.startsWith(o.Qo)?e.substr(o.Qo.length):void 0},d=function(e,t,n,r,o,i){try{return p(e,t,n,r,o,i)}catch(a){return console.error(e,t.type,a),l(a.message,t)}},p=function(e,t,n,o,i,u){if(!t||"object"!==a(t))throw new Error("Config is not an object");if(!(t.type||u||i&&"entity"in t))throw new Error("No card type configured");var c,d=t.type?f(t.type):void 0;if(d)return function(e,t){if(customElements.get(e))return s(e,t);var n=l("Custom element doesn't exist: ".concat(e,"."),t);if(!e.includes("-"))return n;n.style.display="None";var o=window.setTimeout((function(){n.style.display=""}),2e3);return customElements.whenDefined(e).then((function(){clearTimeout(o),(0,r.B)(n,"ll-rebuild")})),n}(d,t);if(i&&!t.type&&t.entity){var p=t.entity.split(".",1)[0];c="".concat(i[p]||i._domain_not_found,"-entity")}else c=t.type||u;if(void 0===c)throw new Error("No type specified");var h="hui-".concat(c,"-").concat(e);if(o&&c in o)return o[c](),function(e,t){if(customElements.get(e))return s(e,t);var n=document.createElement(e);return customElements.whenDefined(e).then((function(){try{customElements.upgrade(n),n.setConfig(t)}catch(e){(0,r.B)(n,"ll-rebuild")}})),n}(h,t);if(n&&n.has(c))return s(h,t);throw new Error("Unknown type encountered: ".concat(c))},h=function(){var e,t=(e=regeneratorRuntime.mark((function e(t,n,r,o){var i,a,u,c;return regeneratorRuntime.wrap((function(e){for(;;)switch(e.prev=e.next){case 0:if(!(i=f(t))){e.next=8;break}if(!(a=customElements.get(i))){e.next=5;break}return e.abrupt("return",a);case 5:if(i.includes("-")){e.next=7;break}throw new Error("Custom element not found: ".concat(i));case 7:return e.abrupt("return",new Promise((function(e,t){setTimeout((function(){return t(new Error("Custom element not found: ".concat(i)))}),2e3),customElements.whenDefined(i).then((function(){return e(customElements.get(i))}))})));case 8:if(u="hui-".concat(t,"-").concat(n),c=customElements.get(u),!r||!r.has(t)){e.next=12;break}return e.abrupt("return",c);case 12:if(!o||!(t in o)){e.next=14;break}return e.abrupt("return",c||o[t]().then((function(){return customElements.get(u)})));case 14:throw new Error("Unknown type: ".concat(t));case 15:case"end":return e.stop()}}),e)})),function(){var t=this,n=arguments;return new Promise((function(r,o){var a=e.apply(t,n);function u(e){i(a,r,o,u,c,"next",e)}function c(e){i(a,r,o,u,c,"throw",e)}u(void 0)}))});return function(e,n,r,o){return t.apply(this,arguments)}}()},89026:function(e,t,n){n.d(t,{t:function(){return i},Q:function(){return a}});var r=n(7778),o={picture:function(){return n.e(69130).then(n.bind(n,69130))},buttons:function(){return Promise.all([n.e(42109),n.e(32587)]).then(n.bind(n,32587))},graph:function(){return n.e(25773).then(n.bind(n,25773))}},i=function(e){return(0,r.Tw)("header-footer",e,void 0,o,void 0,void 0)},a=function(e){return(0,r.ED)(e,"header-footer",void 0,o)}},37482:function(e,t,n){n.d(t,{m:function(){return u},T:function(){return c}});n(12141),n(31479),n(23266),n(65716),n(97600),n(83896),n(45340),n(56427),n(23658);var r=n(7778),o=new Set(["media-player-entity","scene-entity","script-entity","sensor-entity","text-entity","toggle-entity","button","call-service"]),i={"button-entity":function(){return n.e(85611).then(n.bind(n,85611))},"climate-entity":function(){return n.e(35642).then(n.bind(n,35642))},"cover-entity":function(){return Promise.all([n.e(69448),n.e(16755)]).then(n.bind(n,16755))},"group-entity":function(){return n.e(81534).then(n.bind(n,81534))},"input-button-entity":function(){return n.e(83968).then(n.bind(n,83968))},"humidifier-entity":function(){return n.e(41102).then(n.bind(n,41102))},"input-datetime-entity":function(){return Promise.all([n.e(29563),n.e(98985),n.e(79071),n.e(24103),n.e(88278),n.e(6294),n.e(3555),n.e(86630),n.e(12545),n.e(88101)]).then(n.bind(n,22350))},"input-number-entity":function(){return Promise.all([n.e(29563),n.e(98985),n.e(3555),n.e(12335)]).then(n.bind(n,12335))},"input-select-entity":function(){return Promise.all([n.e(29563),n.e(79071),n.e(24103),n.e(88278),n.e(6294),n.e(86630),n.e(25675)]).then(n.bind(n,25675))},"input-text-entity":function(){return Promise.all([n.e(29563),n.e(98985),n.e(3555),n.e(73943)]).then(n.bind(n,73943))},"lock-entity":function(){return n.e(61596).then(n.bind(n,61596))},"number-entity":function(){return Promise.all([n.e(29563),n.e(98985),n.e(3555),n.e(66778)]).then(n.bind(n,66778))},"select-entity":function(){return Promise.all([n.e(29563),n.e(79071),n.e(24103),n.e(88278),n.e(6294),n.e(86630),n.e(35994)]).then(n.bind(n,35994))},"timer-entity":function(){return n.e(31203).then(n.bind(n,31203))},conditional:function(){return n.e(97749).then(n.bind(n,97749))},"weather-entity":function(){return n.e(71850).then(n.bind(n,71850))},divider:function(){return n.e(41930).then(n.bind(n,41930))},section:function(){return n.e(94832).then(n.bind(n,94832))},weblink:function(){return n.e(44689).then(n.bind(n,44689))},cast:function(){return n.e(25840).then(n.bind(n,25840))},buttons:function(){return Promise.all([n.e(42109),n.e(82137)]).then(n.bind(n,82137))},attribute:function(){return Promise.resolve().then(n.bind(n,45340))},text:function(){return n.e(63459).then(n.bind(n,63459))}},a={_domain_not_found:"text",alert:"toggle",automation:"toggle",button:"button",climate:"climate",cover:"cover",fan:"toggle",group:"group",humidifier:"humidifier",input_boolean:"toggle",input_button:"input-button",input_number:"input-number",input_select:"input-select",input_text:"input-text",light:"toggle",lock:"lock",media_player:"media-player",number:"number",remote:"toggle",scene:"scene",script:"script",select:"select",sensor:"sensor",siren:"toggle",switch:"toggle",timer:"timer",vacuum:"toggle",water_heater:"climate",input_datetime:"input-datetime",weather:"weather"},u=function(e){return(0,r.Tw)("row",e,o,i,a,void 0)},c=function(e){return(0,r.ED)(e,"row",o,i)}},44295:function(e,t,n){n.r(t);n(53268),n(12730);var r,o,i,a=n(37500),u=n(33310),c=n(14516),s=n(7323),l=(n(10983),n(48932),n(51444)),f=(n(27849),n(11654)),d=n(51153);function p(e){return p="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},p(e)}function h(e,t){return t||(t=e.slice(0)),Object.freeze(Object.defineProperties(e,{raw:{value:Object.freeze(t)}}))}function m(e,t){if(!(e instanceof t))throw new TypeError("Cannot call a class as a function")}function y(e,t){return y=Object.setPrototypeOf||function(e,t){return e.__proto__=t,e},y(e,t)}function v(e){var t=function(){if("undefined"==typeof Reflect||!Reflect.construct)return!1;if(Reflect.construct.sham)return!1;if("function"==typeof Proxy)return!0;try{return Boolean.prototype.valueOf.call(Reflect.construct(Boolean,[],(function(){}))),!0}catch(e){return!1}}();return function(){var n,r=A(e);if(t){var o=A(this).constructor;n=Reflect.construct(r,arguments,o)}else n=r.apply(this,arguments);return b(this,n)}}function b(e,t){if(t&&("object"===p(t)||"function"==typeof t))return t;if(void 0!==t)throw new TypeError("Derived constructors may only return object or undefined");return g(e)}function g(e){if(void 0===e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return e}function w(){w=function(){return e};var e={elementsDefinitionOrder:[["method"],["field"]],initializeInstanceElements:function(e,t){["method","field"].forEach((function(n){t.forEach((function(t){t.kind===n&&"own"===t.placement&&this.defineClassElement(e,t)}),this)}),this)},initializeClassElements:function(e,t){var n=e.prototype;["method","field"].forEach((function(r){t.forEach((function(t){var o=t.placement;if(t.kind===r&&("static"===o||"prototype"===o)){var i="static"===o?e:n;this.defineClassElement(i,t)}}),this)}),this)},defineClassElement:function(e,t){var n=t.descriptor;if("field"===t.kind){var r=t.initializer;n={enumerable:n.enumerable,writable:n.writable,configurable:n.configurable,value:void 0===r?void 0:r.call(e)}}Object.defineProperty(e,t.key,n)},decorateClass:function(e,t){var n=[],r=[],o={static:[],prototype:[],own:[]};if(e.forEach((function(e){this.addElementPlacement(e,o)}),this),e.forEach((function(e){if(!P(e))return n.push(e);var t=this.decorateElement(e,o);n.push(t.element),n.push.apply(n,t.extras),r.push.apply(r,t.finishers)}),this),!t)return{elements:n,finishers:r};var i=this.decorateConstructor(n,t);return r.push.apply(r,i.finishers),i.finishers=r,i},addElementPlacement:function(e,t,n){var r=t[e.placement];if(!n&&-1!==r.indexOf(e.key))throw new TypeError("Duplicated element ("+e.key+")");r.push(e.key)},decorateElement:function(e,t){for(var n=[],r=[],o=e.decorators,i=o.length-1;i>=0;i--){var a=t[e.placement];a.splice(a.indexOf(e.key),1);var u=this.fromElementDescriptor(e),c=this.toElementFinisherExtras((0,o[i])(u)||u);e=c.element,this.addElementPlacement(e,t),c.finisher&&r.push(c.finisher);var s=c.extras;if(s){for(var l=0;l<s.length;l++)this.addElementPlacement(s[l],t);n.push.apply(n,s)}}return{element:e,finishers:r,extras:n}},decorateConstructor:function(e,t){for(var n=[],r=t.length-1;r>=0;r--){var o=this.fromClassDescriptor(e),i=this.toClassDescriptor((0,t[r])(o)||o);if(void 0!==i.finisher&&n.push(i.finisher),void 0!==i.elements){e=i.elements;for(var a=0;a<e.length-1;a++)for(var u=a+1;u<e.length;u++)if(e[a].key===e[u].key&&e[a].placement===e[u].placement)throw new TypeError("Duplicated element ("+e[a].key+")")}}return{elements:e,finishers:n}},fromElementDescriptor:function(e){var t={kind:e.kind,key:e.key,placement:e.placement,descriptor:e.descriptor};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),"field"===e.kind&&(t.initializer=e.initializer),t},toElementDescriptors:function(e){var t;if(void 0!==e)return(t=e,function(e){if(Array.isArray(e))return e}(t)||function(e){if("undefined"!=typeof Symbol&&null!=e[Symbol.iterator]||null!=e["@@iterator"])return Array.from(e)}(t)||function(e,t){if(e){if("string"==typeof e)return C(e,t);var n=Object.prototype.toString.call(e).slice(8,-1);return"Object"===n&&e.constructor&&(n=e.constructor.name),"Map"===n||"Set"===n?Array.from(e):"Arguments"===n||/^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(n)?C(e,t):void 0}}(t)||function(){throw new TypeError("Invalid attempt to destructure non-iterable instance.\nIn order to be iterable, non-array objects must have a [Symbol.iterator]() method.")}()).map((function(e){var t=this.toElementDescriptor(e);return this.disallowProperty(e,"finisher","An element descriptor"),this.disallowProperty(e,"extras","An element descriptor"),t}),this)},toElementDescriptor:function(e){var t=String(e.kind);if("method"!==t&&"field"!==t)throw new TypeError('An element descriptor\'s .kind property must be either "method" or "field", but a decorator created an element descriptor with .kind "'+t+'"');var n=O(e.key),r=String(e.placement);if("static"!==r&&"prototype"!==r&&"own"!==r)throw new TypeError('An element descriptor\'s .placement property must be one of "static", "prototype" or "own", but a decorator created an element descriptor with .placement "'+r+'"');var o=e.descriptor;this.disallowProperty(e,"elements","An element descriptor");var i={kind:t,key:n,placement:r,descriptor:Object.assign({},o)};return"field"!==t?this.disallowProperty(e,"initializer","A method descriptor"):(this.disallowProperty(o,"get","The property descriptor of a field descriptor"),this.disallowProperty(o,"set","The property descriptor of a field descriptor"),this.disallowProperty(o,"value","The property descriptor of a field descriptor"),i.initializer=e.initializer),i},toElementFinisherExtras:function(e){return{element:this.toElementDescriptor(e),finisher:x(e,"finisher"),extras:this.toElementDescriptors(e.extras)}},fromClassDescriptor:function(e){var t={kind:"class",elements:e.map(this.fromElementDescriptor,this)};return Object.defineProperty(t,Symbol.toStringTag,{value:"Descriptor",configurable:!0}),t},toClassDescriptor:function(e){var t=String(e.kind);if("class"!==t)throw new TypeError('A class descriptor\'s .kind property must be "class", but a decorator created a class descriptor with .kind "'+t+'"');this.disallowProperty(e,"key","A class descriptor"),this.disallowProperty(e,"placement","A class descriptor"),this.disallowProperty(e,"descriptor","A class descriptor"),this.disallowProperty(e,"initializer","A class descriptor"),this.disallowProperty(e,"extras","A class descriptor");var n=x(e,"finisher");return{elements:this.toElementDescriptors(e.elements),finisher:n}},runClassFinishers:function(e,t){for(var n=0;n<t.length;n++){var r=(0,t[n])(e);if(void 0!==r){if("function"!=typeof r)throw new TypeError("Finishers must return a constructor.");e=r}}return e},disallowProperty:function(e,t,n){if(void 0!==e[t])throw new TypeError(n+" can't have a ."+t+" property.")}};return e}function k(e){var t,n=O(e.key);"method"===e.kind?t={value:e.value,writable:!0,configurable:!0,enumerable:!1}:"get"===e.kind?t={get:e.value,configurable:!0,enumerable:!1}:"set"===e.kind?t={set:e.value,configurable:!0,enumerable:!1}:"field"===e.kind&&(t={configurable:!0,writable:!0,enumerable:!0});var r={kind:"field"===e.kind?"field":"method",key:n,placement:e.static?"static":"field"===e.kind?"own":"prototype",descriptor:t};return e.decorators&&(r.decorators=e.decorators),"field"===e.kind&&(r.initializer=e.value),r}function E(e,t){void 0!==e.descriptor.get?t.descriptor.get=e.descriptor.get:t.descriptor.set=e.descriptor.set}function P(e){return e.decorators&&e.decorators.length}function _(e){return void 0!==e&&!(void 0===e.value&&void 0===e.writable)}function x(e,t){var n=e[t];if(void 0!==n&&"function"!=typeof n)throw new TypeError("Expected '"+t+"' to be a function");return n}function O(e){var t=function(e,t){if("object"!==p(e)||null===e)return e;var n=e[Symbol.toPrimitive];if(void 0!==n){var r=n.call(e,t||"default");if("object"!==p(r))return r;throw new TypeError("@@toPrimitive must return a primitive value.")}return("string"===t?String:Number)(e)}(e,"string");return"symbol"===p(t)?t:String(t)}function C(e,t){(null==t||t>e.length)&&(t=e.length);for(var n=0,r=new Array(t);n<t;n++)r[n]=e[n];return r}function S(e,t,n){return S="undefined"!=typeof Reflect&&Reflect.get?Reflect.get:function(e,t,n){var r=function(e,t){for(;!Object.prototype.hasOwnProperty.call(e,t)&&null!==(e=A(e)););return e}(e,t);if(r){var o=Object.getOwnPropertyDescriptor(r,t);return o.get?o.get.call(n):o.value}},S(e,t,n||e)}function A(e){return A=Object.setPrototypeOf?Object.getPrototypeOf:function(e){return e.__proto__||Object.getPrototypeOf(e)},A(e)}!function(e,t,n,r){var o=w();if(r)for(var i=0;i<r.length;i++)o=r[i](o);var a=t((function(e){o.initializeInstanceElements(e,u.elements)}),n),u=o.decorateClass(function(e){for(var t=[],n=function(e){return"method"===e.kind&&e.key===i.key&&e.placement===i.placement},r=0;r<e.length;r++){var o,i=e[r];if("method"===i.kind&&(o=t.find(n)))if(_(i.descriptor)||_(o.descriptor)){if(P(i)||P(o))throw new ReferenceError("Duplicated methods ("+i.key+") can't be decorated.");o.descriptor=i.descriptor}else{if(P(i)){if(P(o))throw new ReferenceError("Decorators can't be placed on different accessors with for the same property ("+i.key+").");o.decorators=i.decorators}E(i,o)}else t.push(i)}return t}(a.d.map(k)),e);o.initializeClassElements(a.F,u.elements),o.runClassFinishers(a.F,u.finishers)}([(0,u.Mo)("ha-panel-shopping-list")],(function(e,t){var n=function(t){!function(e,t){if("function"!=typeof t&&null!==t)throw new TypeError("Super expression must either be null or a function");e.prototype=Object.create(t&&t.prototype,{constructor:{value:e,writable:!0,configurable:!0}}),t&&y(e,t)}(r,t);var n=v(r);function r(){var t;m(this,r);for(var o=arguments.length,i=new Array(o),a=0;a<o;a++)i[a]=arguments[a];return t=n.call.apply(n,[this].concat(i)),e(g(t)),t}return r}(t);return{F:n,d:[{kind:"field",decorators:[(0,u.Cb)({attribute:!1})],key:"hass",value:void 0},{kind:"field",decorators:[(0,u.Cb)({type:Boolean,reflect:!0})],key:"narrow",value:void 0},{kind:"field",decorators:[(0,u.SB)()],key:"_card",value:void 0},{kind:"field",key:"_conversation",value:function(){var e=this;return(0,c.Z)((function(t){return(0,s.p)(e.hass,"conversation")}))}},{kind:"method",key:"firstUpdated",value:function(e){S(A(n.prototype),"firstUpdated",this).call(this,e),this._card=(0,d.Z6)({type:"shopping-list"}),this._card.hass=this.hass}},{kind:"method",key:"updated",value:function(e){S(A(n.prototype),"updated",this).call(this,e),e.has("hass")&&(this._card.hass=this.hass)}},{kind:"method",key:"render",value:function(){return(0,a.dy)(r||(r=h(['\n <ha-app-layout>\n <app-header fixed slot="header">\n <app-toolbar>\n <ha-menu-button\n .hass=',"\n .narrow=","\n ></ha-menu-button>\n <div main-title>","</div>\n ",'\n </app-toolbar>\n </app-header>\n <div id="columns">\n <div class="column">',"</div>\n </div>\n </ha-app-layout>\n "])),this.hass,this.narrow,this.hass.localize("panel.shopping_list"),this._conversation(this.hass.config.components)?(0,a.dy)(o||(o=h(["\n <ha-icon-button\n .label=","\n .path=","\n @click=","\n ></ha-icon-button>\n "])),this.hass.localize("ui.panel.shopping_list.start_conversation"),"M12,2A3,3 0 0,1 15,5V11A3,3 0 0,1 12,14A3,3 0 0,1 9,11V5A3,3 0 0,1 12,2M19,11C19,14.53 16.39,17.44 13,17.93V21H11V17.93C7.61,17.44 5,14.53 5,11H7A5,5 0 0,0 12,16A5,5 0 0,0 17,11H19Z",this._showVoiceCommandDialog):"",this._card)}},{kind:"method",key:"_showVoiceCommandDialog",value:function(){(0,l._)(this)}},{kind:"get",static:!0,key:"styles",value:function(){return[f.Qx,(0,a.iv)(i||(i=h(["\n :host {\n display: block;\n height: 100%;\n }\n app-header {\n --mdc-theme-primary: var(--app-header-text-color);\n }\n :host([narrow]) app-toolbar mwc-button {\n width: 65px;\n }\n .heading {\n overflow: hidden;\n white-space: nowrap;\n margin-top: 4px;\n }\n #columns {\n display: flex;\n flex-direction: row;\n justify-content: center;\n margin-left: 4px;\n margin-right: 4px;\n }\n .column {\n flex: 1 0 0;\n max-width: 500px;\n min-width: 0;\n }\n "])))]}}]}}),a.oi)},81563:function(e,t,n){function r(e){return r="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e},r(e)}n.d(t,{E_:function(){return m},i9:function(){return p},_Y:function(){return s},pt:function(){return i},OR:function(){return u},hN:function(){return a},ws:function(){return h},fk:function(){return l},hl:function(){return d}});var o=n(15304).Al.H,i=function(e){return null===e||"object"!=r(e)&&"function"!=typeof e},a=function(e,t){var n,r;return void 0===t?void 0!==(null===(n=e)||void 0===n?void 0:n._$litType$):(null===(r=e)||void 0===r?void 0:r._$litType$)===t},u=function(e){return void 0===e.strings},c=function(){return document.createComment("")},s=function(e,t,n){var r,i=e._$AA.parentNode,a=void 0===t?e._$AB:t._$AA;if(void 0===n){var u=i.insertBefore(c(),a),s=i.insertBefore(c(),a);n=new o(u,s,e,e.options)}else{var l,f=n._$AB.nextSibling,d=n._$AM,p=d!==e;if(p)null===(r=n._$AQ)||void 0===r||r.call(n,e),n._$AM=e,void 0!==n._$AP&&(l=e._$AU)!==d._$AU&&n._$AP(l);if(f!==a||p)for(var h=n._$AA;h!==f;){var m=h.nextSibling;i.insertBefore(h,a),h=m}}return n},l=function(e,t){var n=arguments.length>2&&void 0!==arguments[2]?arguments[2]:e;return e._$AI(t,n),e},f={},d=function(e){var t=arguments.length>1&&void 0!==arguments[1]?arguments[1]:f;return e._$AH=t},p=function(e){return e._$AH},h=function(e){var t;null===(t=e._$AP)||void 0===t||t.call(e,!1,!0);for(var n=e._$AA,r=e._$AB.nextSibling;n!==r;){var o=n.nextSibling;n.remove(),n=o}},m=function(e){e._$AR()}},57835:function(e,t,n){n.d(t,{Xe:function(){return r.Xe},pX:function(){return r.pX},XM:function(){return r.XM}});var r=n(38941)}}]);
|
PypiClean
|
/hpvsim-1.2.3.tar.gz/hpvsim-1.2.3/docs/modules.rst
|
API reference
=============
.. automodule:: hpvsim
:members:
:undoc-members:
:show-inheritance:
Subpackages
-----------
.. toctree::
:maxdepth: 4
hpvsim.data
Submodules
----------
.. toctree::
:maxdepth: 4
hpvsim.analysis
hpvsim.base
hpvsim.calibration
hpvsim.defaults
hpvsim.hiv
hpvsim.immunity
hpvsim.interventions
hpvsim.misc
hpvsim.parameters
hpvsim.people
hpvsim.plotting
hpvsim.population
hpvsim.run
hpvsim.settings
hpvsim.sim
hpvsim.utils
hpvsim.version
|
PypiClean
|
/qs-qrcode-1.2.tar.gz/qs-qrcode-1.2/qsqrcode/constant.py
|
alignment_location = [
None, None, (6, 18), (6, 22), (6, 26), (6, 30), (6, 34), (6, 22, 38), (6, 24, 42), (6, 26, 46), (6, 28, 50),
(6, 30, 54), (6, 32, 58), (6, 34, 62), (6, 26, 46, 66), (6, 26, 48, 70), (6, 26, 50, 74), (6, 30, 54, 78),
(6, 30, 56, 82), (6, 30, 58, 86), (6, 34, 62, 90), (6, 28, 50, 72, 94), (6, 26, 50, 74, 98), (6, 30, 54, 78, 102),
(6, 28, 54, 80, 106), (6, 32, 58, 84, 110), (6, 30, 58, 86, 114), (6, 34, 62, 90, 118), (6, 26, 50, 74, 98, 122),
(6, 30, 54, 78, 102, 126), (6, 26, 52, 78, 104, 130), (6, 30, 56, 82, 108, 134), (6, 34, 60, 86, 112, 138),
(6, 30, 58, 86, 114, 142), (6, 34, 62, 90, 118, 146), (6, 30, 54, 78, 102, 126, 150),
(6, 24, 50, 76, 102, 128, 154), (6, 28, 54, 80, 106, 132, 158), (6, 32, 58, 84, 110, 136, 162),
(6, 26, 54, 82, 110, 138, 166), (6, 30, 58, 86, 114, 142, 170)
]
level_map = {'L': 0, 'M': 1, 'Q': 2, 'H': 3}
format_info_str = [
['111011111000100', '111001011110011', '111110110101010', '111100010011101', '110011000101111', '110001100011000',
'110110001000001', '110100101110110'],
['101010000010010', '101000100100101', '101111001111100', '101101101001011', '100010111111001', '100000011001110',
'100111110010111', '100101010100000'],
['011010101011111', '011000001101000', '011111100110001', '011101000000110', '010010010110100', '010000110000011',
'010111011011010', '010101111101101'],
['001011010001001', '001001110111110', '001110011100111', '001100111010000', '000011101100010', '000001001010101',
'000110100001100', '000100000111011']
]
version_info_str = [
None, None, None, None, None, None, None, '000111110010010100', '001000010110111100', '001001101010011001',
'001010010011010011', '001011101111110110', '001100011101100010', '001101100001000111', '001110011000001101',
'001111100100101000', '010000101101111000', '010001010001011101', '010010101000010111', '010011010100110010',
'010100100110100110', '010101011010000011', '010110100011001001', '010111011111101100', '011000111011000100',
'011001000111100001', '011010111110101011', '011011000010001110', '011100110000011010', '011101001100111111',
'011110110101110101', '011111001001010000', '100000100111010101', '100001011011110000', '100010100010111010',
'100011011110011111', '100100101100001011', '100101010000101110', '100110101001100100', '100111010101000001',
'101000110001101001'
]
character_amount = {
'L': [(41, 25, 17, 10), (77, 47, 32, 20), (127, 77, 53, 32), (187, 114, 78, 48), (255, 154, 106, 65),
(322, 195, 134, 82), (370, 224, 154, 95), (461, 279, 192, 118), (552, 335, 230, 141), (652, 395, 271, 167),
(772, 468, 321, 198), (883, 535, 367, 226), (1022, 619, 425, 262), (1101, 667, 458, 282),
(1250, 758, 520, 320), (1408, 854, 586, 361), (1548, 938, 644, 397), (1725, 1046, 718, 442),
(1903, 1153, 792, 488), (2061, 1249, 858, 528), (2232, 1352, 929, 572), (2409, 1460, 1003, 618),
(2620, 1588, 1091, 672), (2812, 1704, 1171, 721), (3057, 1853, 1273, 784), (3283, 1990, 1367, 842),
(3517, 2132, 1465, 902), (3669, 2223, 1528, 940), (3909, 2369, 1628, 1002), (4158, 2520, 1732, 1066),
(4417, 2677, 1840, 1132), (4686, 2840, 1952, 1201), (4965, 3009, 2068, 1273), (5253, 3183, 2188, 1347),
(5529, 3351, 2303, 1417), (5836, 3537, 2431, 1496), (6153, 3729, 2563, 1577), (6479, 3927, 2699, 1661),
(6743, 4087, 2809, 1729), (7089, 4296, 2953, 1817)],
'M': [(34, 20, 14, 8), (63, 38, 26, 16), (101, 61, 42, 26), (149, 90, 62, 38), (202, 122, 84, 52),
(255, 154, 106, 65), (293, 178, 122, 75), (365, 221, 152, 93), (432, 262, 180, 111), (513, 311, 213, 131),
(604, 366, 251, 155), (691, 419, 287, 177), (796, 483, 331, 204), (871, 528, 362, 223), (991, 600, 412, 254),
(1082, 656, 450, 277), (1212, 734, 504, 310), (1346, 816, 560, 345), (1500, 909, 624, 384),
(1600, 970, 666, 410), (1708, 1035, 711, 438), (1872, 1134, 779, 480), (2059, 1248, 857, 528),
(2188, 1326, 911, 561), (2395, 1451, 997, 614), (2544, 1542, 1059, 652), (2701, 1637, 1125, 692),
(2857, 1732, 1190, 732), (3035, 1839, 1264, 778), (3289, 1994, 1370, 843), (3486, 2113, 1452, 894),
(3693, 2238, 1538, 947), (3909, 2369, 1628, 1002), (4134, 2506, 1722, 1060), (4343, 2632, 1809, 1113),
(4588, 2780, 1911, 1176), (4775, 2894, 1989, 1224), (5039, 3054, 2099, 1292), (5313, 3220, 2213, 1362),
(5596, 3391, 2331, 1435)],
'Q': [(27, 16, 11, 7), (48, 29, 20, 12), (77, 47, 32, 20), (111, 67, 46, 28), (144, 87, 60, 37), (178, 108, 74, 45),
(207, 125, 86, 53), (259, 157, 108, 66), (312, 189, 130, 80), (364, 221, 151, 93), (427, 259, 177, 109),
(489, 296, 203, 125), (580, 352, 241, 149), (621, 376, 258, 159), (703, 426, 292, 180), (775, 470, 322, 198),
(876, 531, 364, 224), (948, 574, 394, 243), (1063, 644, 442, 272), (1159, 702, 482, 297),
(1224, 742, 509, 314), (1358, 823, 565, 348), (1468, 890, 611, 376), (1588, 963, 661, 407),
(1718, 1041, 715, 440), (1804, 1094, 751, 462), (1933, 1172, 805, 496), (2085, 1263, 868, 534),
(2181, 1322, 908, 559), (2358, 1429, 982, 604), (2473, 1499, 1030, 634), (2670, 1618, 1112, 684),
(2805, 1700, 1168, 719), (2949, 1787, 1228, 756), (3081, 1867, 1283, 790), (3244, 1966, 1351, 832),
(3417, 2071, 1423, 876), (3599, 2181, 1499, 923), (3791, 2298, 1579, 972), (3993, 2420, 1663, 1024)],
'H': [(17, 10, 7, 4), (34, 20, 14, 8), (58, 35, 24, 15), (82, 50, 34, 21), (106, 64, 44, 27), (139, 84, 58, 36),
(154, 93, 64, 39), (202, 122, 84, 52), (235, 143, 98, 60), (288, 174, 119, 74), (331, 200, 137, 85),
(374, 227, 155, 96), (427, 259, 177, 109), (468, 283, 194, 120), (530, 321, 220, 136), (602, 365, 250, 154),
(674, 408, 280, 173), (746, 452, 310, 191), (813, 493, 338, 208), (919, 557, 382, 235), (969, 587, 403, 248),
(1056, 640, 439, 270), (1108, 672, 461, 284), (1228, 744, 511, 315), (1286, 779, 535, 330),
(1425, 864, 593, 365), (1501, 910, 625, 385), (1581, 958, 658, 405), (1677, 1016, 698, 430),
(1782, 1080, 742, 457), (1897, 1150, 790, 486), (2022, 1226, 842, 518), (2157, 1307, 898, 553),
(2301, 1394, 958, 590), (2361, 1431, 983, 605), (2524, 1530, 1051, 647), (2625, 1591, 1093, 673),
(2735, 1658, 1139, 701), (2927, 1774, 1219, 750), (3057, 1852, 1273, 784)]
}
mode_map = {'numeric': 0, 'alphanumeric': 1, 'byte': 2, 'kanji': 3}
mode_indicator_map = {'numeric': '0001', 'alphanumeric': '0010', 'byte': '0100', 'kanji': '1000'}
character_count_indicator_map = [None, (10, 9, 8, 8), (10, 9, 8, 8), (10, 9, 8, 8), (10, 9, 8, 8),
(10, 9, 8, 8), (10, 9, 8, 8), (10, 9, 8, 8), (10, 9, 8, 8), (10, 9, 8, 8),
(12, 11, 16, 10), (12, 11, 16, 10), (12, 11, 16, 10), (12, 11, 16, 10),
(12, 11, 16, 10), (12, 11, 16, 10), (12, 11, 16, 10), (12, 11, 16, 10),
(12, 11, 16, 10), (12, 11, 16, 10), (12, 11, 16, 10), (12, 11, 16, 10),
(12, 11, 16, 10), (12, 11, 16, 10), (12, 11, 16, 10), (12, 11, 16, 10),
(12, 11, 16, 10), (14, 13, 16, 12), (14, 13, 16, 12), (14, 13, 16, 12),
(14, 13, 16, 12), (14, 13, 16, 12), (14, 13, 16, 12), (14, 13, 16, 12),
(14, 13, 16, 12), (14, 13, 16, 12), (14, 13, 16, 12), (14, 13, 16, 12),
(14, 13, 16, 12), (14, 13, 16, 12), (14, 13, 16, 12)]
each_version_required_bytes = [
None, [19, 16, 13, 9], [34, 28, 22, 16], [55, 44, 34, 26], [80, 64, 48, 36], [108, 86, 62, 46], [136, 108, 76, 60],
[156, 124, 88, 66], [194, 154, 110, 86], [232, 182, 132, 100], [274, 216, 154, 122], [324, 254, 180, 140],
[370, 290, 206, 158], [428, 334, 244, 180], [461, 365, 261, 197], [523, 415, 295, 223], [589, 453, 325, 253],
[647, 507, 367, 283], [721, 563, 397, 313], [795, 627, 445, 341], [861, 669, 485, 385], [932, 714, 512, 406],
[1006, 782, 568, 442], [1094, 860, 614, 464], [1174, 914, 664, 514], [1276, 1000, 718, 538], [1370, 1062, 754, 596],
[1468, 1128, 808, 628], [1531, 1193, 871, 661], [1631, 1267, 911, 701], [1735, 1373, 985, 745],
[1843, 1455, 1033, 793], [1955, 1541, 1115, 845], [2071, 1631, 1171, 901], [2191, 1725, 1231, 961],
[2306, 1812, 1286, 986], [2434, 1914, 1354, 1054], [2566, 1992, 1426, 1096], [2702, 2102, 1502, 1142],
[2812, 2216, 1582, 1222], [2956, 2334, 1666, 1276]
]
num_list = '0123456789'
ecc_num_version_level_map = [
None, (7, 10, 13, 17), (10, 16, 22, 28), (15, 26, 18, 22), (20, 18, 26, 16), (26, 24, 18, 22), (18, 16, 24, 28),
(20, 18, 18, 26), (24, 22, 22, 26), (30, 22, 20, 24), (18, 26, 24, 28), (20, 30, 28, 24), (24, 22, 26, 28),
(26, 22, 24, 22), (30, 24, 20, 24), (22, 24, 30, 24), (24, 28, 24, 30), (28, 28, 28, 28), (30, 26, 28, 28),
(28, 26, 26, 26), (28, 26, 30, 28), (28, 26, 28, 30), (28, 28, 30, 24), (30, 28, 30, 30), (30, 28, 30, 30),
(26, 28, 30, 30), (28, 28, 28, 30), (30, 28, 30, 30), (30, 28, 30, 30), (30, 28, 30, 30), (30, 28, 30, 30),
(30, 28, 30, 30), (30, 28, 30, 30), (30, 28, 30, 30), (30, 28, 30, 30), (30, 28, 30, 30), (30, 28, 30, 30),
(30, 28, 30, 30), (30, 28, 30, 30), (30, 28, 30, 30), (30, 28, 30, 30)
]
num_of_error_correction_blocks_2_error_correction_per_blocks = [
None,
[(1, 19, 0, 0), (1, 16, 0, 0), (1, 13, 0, 0), (1, 9, 0, 0)],
[(1, 34, 0, 0), (1, 28, 0, 0), (1, 22, 0, 0), (1, 16, 0, 0)],
[(1, 55, 0, 0), (1, 44, 0, 0), (2, 17, 0, 0), (2, 13, 0, 0)],
[(1, 80, 0, 0), (2, 32, 0, 0), (2, 24, 0, 0), (4, 9, 0, 0)],
[(1, 108, 0, 0), (2, 43, 0, 0), (2, 15, 2, 16), (2, 11, 2, 12)],
[(2, 68, 0, 0), (4, 27, 0, 0), (4, 19, 0, 0), (4, 15, 0, 0)],
[(2, 78, 0, 0), (4, 31, 0, 0), (2, 14, 4, 15), (4, 13, 1, 14)],
[(2, 97, 0, 0), (2, 38, 2, 39), (4, 18, 2, 19), (4, 14, 2, 15)],
[(2, 116, 0, 0), (3, 36, 2, 37), (4, 16, 4, 17), (4, 12, 4, 13)],
[(2, 68, 2, 69), (4, 43, 1, 44), (6, 19, 2, 20), (6, 15, 2, 16)],
[(4, 81, 0, 0), (1, 50, 4, 51), (4, 22, 4, 23), (3, 12, 8, 13)],
[(2, 92, 2, 93), (6, 36, 2, 37), (4, 20, 6, 21), (7, 14, 4, 15)],
[(4, 107, 0, 0), (8, 37, 1, 38), (8, 20, 4, 21), (12, 11, 4, 12)],
[(3, 115, 1, 116), (4, 40, 5, 41), (11, 16, 5, 17), (11, 12, 5, 13)],
[(5, 87, 1, 88), (5, 41, 5, 42), (5, 24, 7, 25), (11, 12, 7, 13)],
[(5, 98, 1, 99), (7, 45, 3, 46), (15, 19, 2, 20), (3, 15, 13, 16)],
[(1, 107, 5, 108), (10, 46, 1, 47), (1, 22, 15, 23), (2, 14, 17, 15)],
[(5, 120, 1, 121), (9, 43, 4, 44), (17, 22, 1, 23), (2, 14, 19, 15)],
[(3, 113, 4, 114), (3, 44, 11, 45), (17, 21, 4, 22), (9, 13, 16, 14)],
[(3, 107, 5, 108), (3, 41, 13, 42), (15, 24, 5, 25), (15, 15, 10, 16)],
[(4, 116, 4, 117), (17, 42, 0, 0), (17, 22, 6, 23), (19, 16, 6, 17)],
[(2, 111, 7, 112), (17, 46, 0, 0), (7, 24, 16, 25), (34, 13, 0, 0)],
[(4, 121, 5, 122), (4, 47, 14, 48), (11, 24, 14, 25), (16, 15, 14, 16)],
[(6, 117, 4, 118), (6, 45, 14, 46), (11, 24, 16, 25), (30, 16, 2, 17)],
[(8, 106, 4, 107), (8, 47, 13, 48), (7, 24, 22, 25), (22, 15, 13, 16)],
[(10, 114, 2, 115), (19, 46, 4, 47), (28, 22, 6, 23), (33, 16, 4, 17)],
[(8, 122, 4, 123), (22, 45, 3, 46), (8, 23, 26, 24), (12, 15, 28, 16)],
[(3, 117, 10, 118), (3, 45, 23, 46), (4, 24, 31, 25), (11, 15, 31, 16)],
[(7, 116, 7, 117), (21, 45, 7, 46), (1, 23, 37, 24), (19, 15, 26, 16)],
[(5, 115, 10, 116), (19, 47, 10, 48), (15, 24, 25, 25), (23, 15, 25, 16)],
[(13, 115, 3, 116), (2, 46, 29, 47), (42, 24, 1, 25), (23, 15, 28, 16)],
[(17, 115, 0, 0), (10, 46, 23, 47), (10, 24, 35, 25), (19, 15, 35, 16)],
[(17, 115, 1, 116), (14, 46, 21, 47), (29, 24, 19, 25), (11, 15, 46, 16)],
[(13, 115, 6, 116), (14, 46, 23, 47), (44, 24, 7, 25), (59, 16, 1, 17)],
[(12, 121, 7, 122), (12, 47, 26, 48), (39, 24, 14, 25), (22, 15, 41, 16)],
[(6, 121, 14, 122), (6, 47, 34, 48), (46, 24, 10, 25), (2, 15, 64, 16)],
[(17, 122, 4, 123), (29, 46, 14, 47), (49, 24, 10, 25), (24, 15, 46, 16)],
[(4, 122, 18, 123), (13, 46, 32, 47), (48, 24, 14, 25), (42, 15, 32, 16)],
[(20, 117, 4, 118), (40, 47, 7, 48), (43, 24, 22, 25), (10, 15, 67, 16)],
[(19, 118, 6, 119), (18, 47, 31, 48), (34, 24, 34, 25), (20, 15, 61, 16)]
]
remainder_bits = (None, 0, 7, 7, 7, 7, 7, 0, 0, 0, 0,
0, 0, 0, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 3, 3, 3,
3, 3, 3, 3, 0, 0, 0, 0, 0, 0)
img_mode_2_color_map = {'1': [1, 0], 'L': [255, 0], 'RGB': [(255, 255, 255), (0, 0, 0)],
'RGBA': [(255, 255, 255, 255), (0, 0, 0, 255)]}
|
PypiClean
|
/panddas-0.2.14.tar.gz/panddas-0.2.14/lib-python/bamboo/stats/ospina.py
|
from scipy.optimize import fsolve
import numpy
import time
def variable_sigma_d_log_likelihood(est_sigma, est_mu, obs_vals, obs_error):
"""Calculate the value of the differentiated log likelihood for the values of mu, sigma"""
term1 = (obs_vals - est_mu)**2 / ((est_sigma**2 + obs_error**2)**2)
term2 = 1 / (est_sigma**2 + obs_error**2)
return numpy.sum(term1) - numpy.sum(term2)
def estimate_true_underlying_sd(obs_vals, obs_error, est_mu=None, est_sigma=1e-16, try_number=1):
"""Given a set of observations `obs_vals` with estimated errors `obs_sigma`, estimate the mean and sd of the underlying distribution"""
if try_number > 1:
#print 'ITERATION {!s} - EST SIGMA: {!s}'.format(try_number, est_sigma)
if try_number > 10:
raise Exception('TOO MANY ITERATIONS IN OSPINA FUNCTION')
obs_vals = numpy.array(obs_vals)
obs_error = numpy.array(obs_error)
if not est_mu:
# Calculate the mean of the sample - this is a good estimator of the true mean
est_mu = numpy.mean(obs_vals)
# Estimate the sigma of the underlying distribution
answer = abs(fsolve(func=variable_sigma_d_log_likelihood, x0=est_sigma, args=(est_mu, obs_vals, obs_error)))[0]
if answer > 2e40:
est_sigma = est_sigma/1000
answer = estimate_true_underlying_sd(obs_vals=obs_vals, obs_error=obs_error, est_mu=est_mu, est_sigma=est_sigma, try_number=try_number+1)
return answer
if __name__ == '__main__':
# True values we are trying to estimate
true_mean = 1
true_sd = 0.11
# Number of observations
num_obs = 200
# Number of times to run
num_cycles = 10000
guesses = []
start_t = time.time()
for attempt in xrange(num_cycles):
# print('============================>')
# Sample the original distribution
true_vals = true_mean + true_sd*numpy.random.randn(num_obs)
# print('MEAN OF TRUE VALS: {!s} ({!s})'.format(round(numpy.mean(true_vals),3), true_mean))
# print(' STD OF TRUE VALS: {!s} ({!s})'.format(round(numpy.std(true_vals),3), true_sd))
# Create a random selection of sigmas for the different observations
obs_error = numpy.abs(0.45 + 0.01*numpy.random.randn(num_obs))
# print('MEAN OF OBS ERROR: {!s} ({!s})'.format(round(numpy.mean(obs_error),3), '2ish'))
# print(' STD OF OBS ERROR: {!s} ({!s})'.format(round(numpy.std(obs_error),3), '1ish'))
# Noise to be added to the true observations
obs_noise = numpy.random.randn(num_obs)
# print('MEAN OF OBS NOISE: {!s} ({!s})'.format(round(numpy.mean(obs_noise),3), 0))
# print(' STD OF OBS NOISE: {!s} ({!s})'.format(round(numpy.std(obs_noise),3), 1))
# Create fake data!
obs_vals = true_vals + obs_error * obs_noise
# print(' MEAN OF OBS VALS: {!s}'.format(round(numpy.mean(obs_vals),3)))
# print(' STD OF OBS VALS: {!s}'.format(round(numpy.std(obs_vals),3)))
out = estimate_true_underlying_sd(obs_vals=obs_vals, obs_error=obs_error, est_sigma=0.1)
print(' ESTIMATED VALUES: {!s}'.format(round(out,5)))
guesses.append(out)
print('============================>')
end_t = time.time()
print(' TIME TAKEN TOTAL: {!s} (Seconds)'.format(int(end_t - start_t)))
print('TIME TAKEN PER CYCLE: {!s} (Millionths)'.format(int(1000000*(end_t-start_t)/num_cycles)))
print('============================>')
print('MEAN OF ESTIMATES: {!s}'.format(round(numpy.mean(guesses),5)))
print(' STD OF ESTIMATES: {!s}'.format(round(numpy.std(guesses),5)))
|
PypiClean
|
/custom-awscli-1.27.51.tar.gz/custom-awscli-1.27.51/awscli/examples/iotsitewise/update-asset-property.rst
|
**Example 1: To update an asset property's alias**
The following ``update-asset-property`` example updates a wind turbine asset's power property alias. ::
aws iotsitewise update-asset-property \
--asset-id a1b2c3d4-5678-90ab-cdef-33333EXAMPLE \
--property-id a1b2c3d4-5678-90ab-cdef-55555EXAMPLE \
--property-alias "/examplecorp/windfarm/1/turbine/1/power" \
--property-notification-state DISABLED
This command produces no output.
For more information, see `Mapping industrial data streams to asset properties <https://docs.aws.amazon.com/iot-sitewise/latest/userguide/connect-data-streams.html>`__ in the *AWS IoT SiteWise User Guide*.
**Example 2: To enable asset property notifications**
The following ``update-asset-property`` example enables asset property update notifications for a wind turbine asset's power property. Property value updates are published to the MQTT topic ``$aws/sitewise/asset-models/<assetModelId>/assets/<assetId>/properties/<propertyId>``, where each ID is replaced by the property, asset, and model ID of the asset property. ::
aws iotsitewise update-asset-property \
--asset-id a1b2c3d4-5678-90ab-cdef-33333EXAMPLE \
--property-id a1b2c3d4-5678-90ab-cdef-66666EXAMPLE \
--property-notification-state ENABLED \
--property-alias "/examplecorp/windfarm/1/turbine/1/power"
This command produces no output.
For more information, see `Interacting with other services <https://docs.aws.amazon.com/iot-sitewise/latest/userguide/interact-with-other-services.html>`__ in the *AWS IoT SiteWise User Guide*.
|
PypiClean
|
/skytime-0.16.1-py3-none-any.whl/build/lib/build/lib/build/lib/sktime/param_est/base.py
|
__author__ = ["fkiraly"]
__all__ = ["BaseParamFitter"]
from warnings import warn
from sktime.base import BaseEstimator
from sktime.datatypes import (
VectorizedDF,
check_is_scitype,
convert_to,
scitype_to_mtype,
update_data,
)
from sktime.utils.sklearn import is_sklearn_transformer
from sktime.utils.validation._dependencies import _check_estimator_deps
def _coerce_to_list(obj):
"""Return [obj] if obj is not a list, otherwise obj."""
if not isinstance(obj, list):
return [obj]
else:
return obj
class BaseParamFitter(BaseEstimator):
"""Base parameter fitting estimator class.
The base parameter fitter specifies the methods and method
signatures that all parameter fitter have to implement.
Specific implementations of these methods is deferred to concrete instances.
"""
# default tag values - these typically make the "safest" assumption
_tags = {
"X_inner_mtype": "pd.DataFrame", # which types do _fit/_predict, support for X?
"scitype:X": "Series", # which X scitypes are supported natively?
"capability:missing_values": False, # can estimator handle missing data?
"capability:multivariate": False, # can estimator handle multivariate data?
"python_version": None, # PEP 440 python version specifier to limit versions
"python_dependencies": None, # string or str list of pkg soft dependencies
}
def __init__(self):
self._is_fitted = False
self._X = None
super(BaseParamFitter, self).__init__()
_check_estimator_deps(self)
def __rmul__(self, other):
"""Magic * method, return concatenated ParamFitterPipeline, trafos on left.
Overloaded multiplication operation for classifiers. Implemented for `other`
being a transformer, otherwise returns `NotImplemented`.
Parameters
----------
other: `sktime` transformer, must inherit from BaseTransformer
otherwise, `NotImplemented` is returned
Returns
-------
BaseParamFitter object, concatenation of `other` (first) with `self` (last).
"""
from sktime.param_est.compose import ParamFitterPipeline
from sktime.transformations.base import BaseTransformer
from sktime.transformations.compose import TransformerPipeline
from sktime.transformations.series.adapt import TabularToSeriesAdaptor
# behaviour is implemented only if other inherits from BaseTransformer
# in that case, distinctions arise from whether self or other is a pipeline
# todo: this can probably be simplified further with "zero length" pipelines
if isinstance(other, BaseTransformer):
# ClassifierPipeline already has the dunder method defined
if isinstance(self, ParamFitterPipeline):
return other * self
# if other is a TransformerPipeline but self is not, first unwrap it
elif isinstance(other, TransformerPipeline):
return ParamFitterPipeline(param_est=self, transformers=other.steps)
# if neither self nor other are a pipeline, construct a ClassifierPipeline
else:
return ParamFitterPipeline(param_est=self, transformers=[other])
elif is_sklearn_transformer(other):
return TabularToSeriesAdaptor(other) * self
else:
return NotImplemented
def fit(self, X):
"""Fit estimator and estimate parameters.
State change:
Changes state to "fitted".
Writes to self:
Sets self._is_fitted flag to True.
Writes `X` to self._X.
Sets fitted model attributes ending in "_".
Parameters
----------
X : time series in sktime compatible data container format
Time series to which to fit the forecaster in the update.
y can be in one of the following formats, must be same scitype as in fit:
Series scitype: pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Panel scitype: pd.DataFrame with 2-level row MultiIndex,
3D np.ndarray, list of Series pd.DataFrame, or nested pd.DataFrame
Hierarchical scitype: pd.DataFrame with 3 or more level row MultiIndex
For further details:
on usage, see forecasting tutorial examples/01_forecasting.ipynb
on specification of formats, examples/AA_datatypes_and_datasets.ipynb
Returns
-------
self : Reference to self.
"""
# check X is not None
assert X is not None, "X cannot be None, but found None"
# if fit is called, estimator is reset, including fitted state
self.reset()
# check and convert X/y
X_inner = self._check_X(X=X)
# set internal X to the new X
self._update_X(X_inner)
# checks and conversions complete, pass to inner fit
#####################################################
self._fit(X=X_inner)
# this should happen last
self._is_fitted = True
return self
def update(self, X):
"""Update fitted parameters on more data.
If no estimator-specific update method has been implemented,
default fall-back is fitting to all observed data so far
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_".
Pointers to seen data, self._X
self._is_fitted
model attributes ending in "_".
Writes to self:
Update self._X with `X`, by appending rows.
Updates fitted model attributes ending in "_".
Parameters
----------
X : time series in sktime compatible data container format
Time series to which to fit the forecaster in the update.
y can be in one of the following formats, must be same scitype as in fit:
Series scitype: pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Panel scitype: pd.DataFrame with 2-level row MultiIndex,
3D np.ndarray, list of Series pd.DataFrame, or nested pd.DataFrame
Hierarchical scitype: pd.DataFrame with 3 or more level row MultiIndex
For further details:
on usage, see forecasting tutorial examples/01_forecasting.ipynb
on specification of formats, examples/AA_datatypes_and_datasets.ipynb
Returns
-------
self : reference to self
"""
self.check_is_fitted()
if X is None or (hasattr(X, "__len__") and len(X) == 0):
warn("empty y passed to update, no update was carried out")
return self
# input checks and minor coercions on X, y
X_inner = self._check_X(X=X)
# update internal X with the new X
self._update_X(X_inner)
# checks and conversions complete, pass to inner update
self._update(X=X_inner)
return self
def _check_X(self, X=None):
"""Check and coerce X for fit/update functions.
Parameters
----------
X : time series in sktime compatible data container format
Time series to which to fit the forecaster in the update.
y can be in one of the following formats, must be same scitype as in fit:
Series scitype: pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Panel scitype: pd.DataFrame with 2-level row MultiIndex,
3D np.ndarray, list of Series pd.DataFrame, or nested pd.DataFrame
Hierarchical scitype: pd.DataFrame with 3 or more level row MultiIndex
For further details:
on usage, see forecasting tutorial examples/01_forecasting.ipynb
on specification of formats, examples/AA_datatypes_and_datasets.ipynb
Returns
-------
X_inner : Series, Panel, or Hierarchical object
compatible with self.get_tag("X_inner_mtype") format
Case 1: self.get_tag("X_inner_mtype") supports scitype of X, then
converted/coerced version of X, mtype determined by "X_inner_mtype" tag
Case 2: None if X was None
Raises
------
TypeError if X is not one of the permissible Series mtypes
TypeError if X is of a different scitype as self.get_tag("scitype:X")
"""
if X is None:
return None
X_inner_mtype = _coerce_to_list(self.get_tag("X_inner_mtype"))
# X_inner_scitype = mtype_to_scitype(X_inner_mtype, return_unique=True)
ALLOWED_SCITYPES = _coerce_to_list(self.get_tag("scitype:X"))
FORBIDDEN_MTYPES = ["numpyflat", "pd-wide"]
for scitype in ALLOWED_SCITYPES:
mtypes = set(scitype_to_mtype(scitype))
mtypes = list(mtypes.difference(FORBIDDEN_MTYPES))
mtypes_msg = f'"For {scitype} scitype: {mtypes}. '
# checking X
X_valid, _, X_metadata = check_is_scitype(
X, scitype=ALLOWED_SCITYPES, return_metadata=True, var_name="X"
)
msg = (
"X must be in an sktime compatible format, "
f"of scitypes {ALLOWED_SCITYPES}, "
"for instance a pandas.DataFrame with sktime compatible time indices, "
"or with MultiIndex and last(-1) level an sktime compatible time index."
" See data format tutorial examples/AA_datatypes_and_datasets.ipynb,"
"If you think X is already in an sktime supported input format, "
"run sktime.datatypes.check_raise(X, mtype) to diagnose the error, "
"where mtype is the string of the type specification you want for y. "
"Possible mtype specification strings are as follows. "
)
if not X_valid:
raise TypeError(msg + mtypes_msg)
X_scitype = X_metadata["scitype"]
# end checking X
# converts X, converts None to None if X is None
X_inner = convert_to(
X,
to_type=X_inner_mtype,
as_scitype=X_scitype,
)
return X_inner
def _update_X(self, X):
"""Update internal memory of seen training data.
Accesses in self:
_X : only if exists, then assumed same type as X and same cols
these assumptions should be guaranteed by calls
Writes to self:
_X : same type as X - new rows from X are added to current _X
if _X does not exist, stores X as _X
_X is guaranteed to be one of mtypes:
pd.DataFrame, pd.Series, np.ndarray, pd-multiindex, numpy3D,
pd_multiindex_hier
Parameters
----------
X : time series in sktime compatible data container format
Time series to which to fit the forecaster in the update.
y can be in one of the following formats, must be same scitype as in fit:
Series scitype: pd.Series, pd.DataFrame, or np.ndarray (1D or 2D)
Panel scitype: pd.DataFrame with 2-level row MultiIndex,
3D np.ndarray, list of Series pd.DataFrame, or nested pd.DataFrame
Hierarchical scitype: pd.DataFrame with 3 or more level row MultiIndex
For further details:
on usage, see forecasting tutorial examples/01_forecasting.ipynb
on specification of formats, examples/AA_datatypes_and_datasets.ipynb
"""
if X is not None:
# unwrap X if VectorizedDF
if isinstance(X, VectorizedDF):
X = X.X_multiindex
# if _X does not exist yet, initialize it with X
if not hasattr(self, "_X") or self._X is None or not self.is_fitted:
self._X = X
else:
self._X = update_data(self._X, X)
def _fit(self, X):
"""Fit estimator and estimate parameters.
private _fit containing the core logic, called from fit
Writes to self:
Sets fitted model attributes ending in "_".
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
Time series to which to fit the estimator.
Returns
-------
self : reference to self
"""
raise NotImplementedError("abstract method")
def _update(self, X):
"""Update fitted parameters on more data.
private _update containing the core logic, called from update
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_"
Writes to self:
Sets fitted model attributes ending in "_"
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
Time series with which to update the estimator.
Returns
-------
self : reference to self
"""
# default to re-fitting if update is not implemented
warn(
f"NotImplementedWarning: {self.__class__.__name__} "
f"does not have a custom `update` method implemented. "
f"{self.__class__.__name__} will be refit each time "
f"`update` is called."
)
# refit with updated data, not only passed data
self.fit(X=self._X)
# todo: should probably be self._fit, not self.fit
# but looping to self.fit for now to avoid interface break
return self
def _get_fitted_params(self):
"""Get fitted parameters.
private _get_fitted_params, called from get_fitted_params
State required:
Requires state to be "fitted".
Returns
-------
fitted_params : dict
"""
# default retrieves all self attributes ending in "_"
# and returns them with keys that have the "_" removed
fitted_params = [attr for attr in dir(self) if attr.endswith("_")]
fitted_params = [x for x in fitted_params if not x.startswith("_")]
fitted_param_dict = {p[:-1]: getattr(self, p) for p in fitted_params}
return fitted_param_dict
|
PypiClean
|
/orbit_nrel-1.0.8-py3-none-any.whl/ORBIT/phases/install/oss_install/floating.py
|
__author__ = "Jake Nunemaker"
__copyright__ = "Copyright 2021, National Renewable Energy Laboratory"
__maintainer__ = "Jake Nunemaker"
__email__ = "[email protected]"
from marmot import Agent, process, le
from marmot._exceptions import AgentNotRegistered
from ORBIT.core import WetStorage
from ORBIT.core.logic import position_onsite
from ORBIT.phases.install import InstallPhase
from ORBIT.phases.install.mooring_install.mooring import (
install_mooring_line,
install_mooring_anchor,
perform_mooring_site_survey,
)
class FloatingSubstationInstallation(InstallPhase):
"""
Offshore Substation (OSS) installation process using the quayside assembly
and tow-out processes.
"""
phase = "Offshore Substation Installation"
capex_category = "Offshore Substation"
#:
expected_config = {
"num_substations": "int",
"oss_install_vessel": "str",
"site": {"distance": "km", "depth": "m"},
"offshore_substation_topside": {
"unit_cost": "USD",
"attach_time": "int | float (optional, default: 24)",
},
"offshore_substation_substructure": {
"type": "Floating",
"takt_time": "int | float (optional, default: 0)",
"unit_cost": "USD",
"mooring_cost": "USD",
"towing_speed": "int | float (optional, default: 6 km/h)",
},
}
def __init__(self, config, weather=None, **kwargs):
"""
Creates an instance of OffshoreSubstationInstallation.
Parameters
----------
config : dict
Simulation specific configuration.
weather : np.ndarray
Weather profile at site.
"""
super().__init__(weather, **kwargs)
config = self.initialize_library(config, **kwargs)
self.config = self.validate_config(config)
self.initialize_port()
self.setup_simulation(**kwargs)
def setup_simulation(self, **kwargs):
"""
Initializes required objects for simulation.
- Creates port
- Creates support vessel + towing vessels
"""
self.distance = self.config["site"]["distance"]
self.num_substations = self.config["num_substations"]
self.initialize_substructure_production()
self.initialize_installation_vessel()
@property
def system_capex(self):
"""Returns total procurement cost of the substation substructures,
topsides and mooring."""
topside = self.config["offshore_substation_topside"]["unit_cost"]
substructure = self.config["offshore_substation_substructure"][
"unit_cost"
]
mooring = self.config["offshore_substation_substructure"][
"mooring_cost"
]
return self.num_substations * (topside + substructure + mooring)
def initialize_substructure_production(self):
"""
Initializes the production of the floating substation substructures at
quayside.
"""
self.wet_storage = WetStorage(self.env, float("inf"))
takt_time = self.config["offshore_substation_substructure"].get(
"takt_time", 0
)
attach_time = self.config["offshore_substation_topside"].get(
"attach_time", 24
)
to_assemble = [1] * self.num_substations
self.assembly_line = SubstationAssemblyLine(
to_assemble, takt_time, attach_time, self.wet_storage, 1
)
self.env.register(self.assembly_line)
self.assembly_line.start()
def initialize_installation_vessel(self):
"""Initialize the floating substation installation vessel."""
support = self.config["oss_install_vessel"]
vessel = self.initialize_vessel(
"Floating Substation Installation Vessel", support
)
self.env.register(vessel)
vessel.initialize(mobilize=False)
self.support_vessel = vessel
depth = self.config["site"]["depth"]
towing_speed = self.config["offshore_substation_substructure"].get(
"towing_speed", 6
)
install_floating_substations(
self.support_vessel,
self.wet_storage,
self.distance,
towing_speed,
depth,
self.num_substations,
)
@property
def detailed_output(self):
return {}
@process
def install_floating_substations(
vessel, feed, distance, towing_speed, depth, number
):
"""
Process steps that installation vessel at site performs to install floating
substations.
Parameters
----------
vessel : Agent
Performing agent.
feed : simply.Resource
Wet storage for completed assemblies.
distance : int | float
Distance from port to site.
towing_speed : int | float
Speed at which completed assembly can be towed to site at (km/h).
depth : int | float
Site depth (m).
number : int
Number of substations to install.
"""
travel_time = distance / towing_speed
for _ in range(number):
start = vessel.env.now
yield feed.get()
delay = vessel.env.now - start
if delay > 0:
vessel.submit_action_log(
"Delay: Waiting on Completed Assembly", delay
)
yield vessel.task(
"Tow Substation to Site",
travel_time,
constraints=vessel.operational_limits,
)
yield position_onsite(vessel)
yield vessel.task_wrapper(
"Ballast to Operational Draft",
6,
constraints={"windspeed": le(15), "waveheight": le(2.5)},
)
for _ in range (3):
yield perform_mooring_site_survey(vessel)
yield install_mooring_anchor(vessel, depth, "Suction Pile")
yield install_mooring_line(vessel, depth)
yield vessel.task_wrapper(
"Connect Mooring Lines",
22,
suspendable=True,
constraints={"windspeed": le(15), "waveheight": le(2.5)},
)
yield vessel.task_wrapper(
"Check Mooring Lines",
12,
suspendable=True,
constraints={"windspeed": le(15), "waveheight": le(2.5)},
)
yield vessel.transit(distance)
class SubstationAssemblyLine(Agent):
"""Substation Assembly Line Class."""
def __init__(self, assigned, takt_time, attach_time, target, num):
"""
Creates an instance of `SubstructureAssemblyLine`.
Parameters
----------
assigned : list
List of assigned tasks. Can be shared with other assembly lines.
takt_time : int | float
Hours required to produce one substructure.
attach_time : int | float
Hours required to attach a topside to the substructure.
target : simpy.Store
Target storage.
num : int
Assembly line number designation.
"""
super().__init__(f"Substation Assembly Line {num}")
self.assigned = assigned
self.takt_time = takt_time
self.attach_time = attach_time
self.target = target
def submit_action_log(self, action, duration, **kwargs):
"""
Submits a log representing a completed `action` performed over time
`duration`.
This method overwrites the default `submit_action_log` in
`marmot.Agent`, adding operation cost to every submitted log within
ORBIT.
Parameters
----------
action : str
Performed action.
duration : int | float
Duration of action.
Raises
------
AgentNotRegistered
"""
if self.env is None:
raise AgentNotRegistered(self)
else:
payload = {
**kwargs,
"agent": str(self),
"action": action,
"duration": float(duration),
"cost": 0,
}
self.env._submit_log(payload, level="ACTION")
@process
def assemble_substructure(self):
"""
Simulation process for assembling a substructure.
"""
yield self.task("Substation Substructure Assembly", self.takt_time)
yield self.task("Attach Topside", self.attach_time)
substation = FloatingSubstation()
start = self.env.now
yield self.target.put(substation)
delay = self.env.now - start
if delay > 0:
self.submit_action_log("Delay: No Wet Storage Available", delay)
@process
def start(self):
"""
Trigger the assembly line to run. Will attempt to pull a task from
self.assigned and timeout for the assembly time. Shuts down after
self.assigned is empty.
"""
while True:
try:
_ = self.assigned.pop(0)
yield self.assemble_substructure()
except IndexError:
break
class FloatingSubstation:
"""Floating Substructure Class."""
def __init__(self):
"""Creates an instance of `Substructure`."""
pass
|
PypiClean
|
/epicyon-1.3.0-py3-none-any.whl/manualapprove.py
|
__filename__ = "manualapprove.py"
__author__ = "Bob Mottram"
__license__ = "AGPL3+"
__version__ = "1.2.0"
__maintainer__ = "Bob Mottram"
__email__ = "[email protected]"
__status__ = "Production"
import os
from follow import followedAccountAccepts
from follow import followedAccountRejects
from follow import removeFromFollowRequests
from utils import loadJson
def manualDenyFollowRequest(session, baseDir: str,
httpPrefix: str,
nickname: str, domain: str, port: int,
denyHandle: str,
federationList: [],
sendThreads: [], postLog: [],
cachedWebfingers: {}, personCache: {},
debug: bool,
projectVersion: str) -> None:
"""Manually deny a follow request
"""
handle = nickname + '@' + domain
accountsDir = baseDir + '/accounts/' + handle
# has this handle already been rejected?
rejectedFollowsFilename = accountsDir + '/followrejects.txt'
if os.path.isfile(rejectedFollowsFilename):
if denyHandle in open(rejectedFollowsFilename).read():
removeFromFollowRequests(baseDir, nickname, domain,
denyHandle, debug)
print(denyHandle + ' has already been rejected as a follower of ' +
nickname)
return
removeFromFollowRequests(baseDir, nickname, domain, denyHandle, debug)
# Store rejected follows
rejectsFile = open(rejectedFollowsFilename, "a+")
rejectsFile.write(denyHandle + '\n')
rejectsFile.close()
denyNickname = denyHandle.split('@')[0]
denyDomain = \
denyHandle.split('@')[1].replace('\n', '').replace('\r', '')
denyPort = port
if ':' in denyDomain:
denyPort = denyDomain.split(':')[1]
denyDomain = denyDomain.split(':')[0]
followedAccountRejects(session, baseDir, httpPrefix,
nickname, domain, port,
denyNickname, denyDomain, denyPort,
federationList,
sendThreads, postLog,
cachedWebfingers, personCache,
debug, projectVersion)
print('Follow request from ' + denyHandle + ' was denied.')
def _approveFollowerHandle(accountDir: str, approveHandle: str) -> None:
""" Record manually approved handles so that if they unfollow and then
re-follow later then they don't need to be manually approved again
"""
approvedFilename = accountDir + '/approved.txt'
if os.path.isfile(approvedFilename):
if approveHandle not in open(approvedFilename).read():
approvedFile = open(approvedFilename, "a+")
approvedFile.write(approveHandle + '\n')
approvedFile.close()
else:
approvedFile = open(approvedFilename, "w+")
approvedFile.write(approveHandle + '\n')
approvedFile.close()
def manualApproveFollowRequest(session, baseDir: str,
httpPrefix: str,
nickname: str, domain: str, port: int,
approveHandle: str,
federationList: [],
sendThreads: [], postLog: [],
cachedWebfingers: {}, personCache: {},
debug: bool,
projectVersion: str) -> None:
"""Manually approve a follow request
"""
handle = nickname + '@' + domain
print('Manual follow accept: ' + handle +
' approving follow request from ' + approveHandle)
accountDir = baseDir + '/accounts/' + handle
approveFollowsFilename = accountDir + '/followrequests.txt'
if not os.path.isfile(approveFollowsFilename):
print('Manual follow accept: follow requests file ' +
approveFollowsFilename + ' not found')
return
# is the handle in the requests file?
approveFollowsStr = ''
with open(approveFollowsFilename, 'r') as fpFollowers:
approveFollowsStr = fpFollowers.read()
exists = False
approveHandleFull = approveHandle
if approveHandle in approveFollowsStr:
exists = True
elif '@' in approveHandle:
reqNick = approveHandle.split('@')[0]
reqDomain = approveHandle.split('@')[1].strip()
reqPrefix = httpPrefix + '://' + reqDomain
if reqPrefix + '/profile/' + reqNick in approveFollowsStr:
exists = True
approveHandleFull = reqPrefix + '/profile/' + reqNick
elif reqPrefix + '/channel/' + reqNick in approveFollowsStr:
exists = True
approveHandleFull = reqPrefix + '/channel/' + reqNick
elif reqPrefix + '/accounts/' + reqNick in approveFollowsStr:
exists = True
approveHandleFull = reqPrefix + '/accounts/' + reqNick
if not exists:
print('Manual follow accept: ' + approveHandleFull +
' not in requests file "' +
approveFollowsStr.replace('\n', ' ') +
'" ' + approveFollowsFilename)
return
approvefilenew = open(approveFollowsFilename + '.new', 'w+')
updateApprovedFollowers = False
followActivityfilename = None
with open(approveFollowsFilename, 'r') as approvefile:
for handleOfFollowRequester in approvefile:
# is this the approved follow?
if handleOfFollowRequester.startswith(approveHandleFull):
handleOfFollowRequester = \
handleOfFollowRequester.replace('\n', '').replace('\r', '')
port2 = port
if ':' in handleOfFollowRequester:
port2Str = handleOfFollowRequester.split(':')[1]
if port2Str.isdigit():
port2 = int(port2Str)
requestsDir = accountDir + '/requests'
followActivityfilename = \
requestsDir + '/' + handleOfFollowRequester + '.follow'
if os.path.isfile(followActivityfilename):
followJson = loadJson(followActivityfilename)
if followJson:
approveNickname = approveHandle.split('@')[0]
approveDomain = approveHandle.split('@')[1]
approveDomain = \
approveDomain.replace('\n', '').replace('\r', '')
approvePort = port2
if ':' in approveDomain:
approvePort = approveDomain.split(':')[1]
approveDomain = approveDomain.split(':')[0]
print('Manual follow accept: Sending Accept for ' +
handle + ' follow request from ' +
approveNickname + '@' + approveDomain)
followedAccountAccepts(session, baseDir, httpPrefix,
nickname, domain, port,
approveNickname, approveDomain,
approvePort,
followJson['actor'],
federationList,
followJson,
sendThreads, postLog,
cachedWebfingers, personCache,
debug, projectVersion, False)
updateApprovedFollowers = True
else:
# this isn't the approved follow so it will remain
# in the requests file
approvefilenew.write(handleOfFollowRequester)
approvefilenew.close()
followersFilename = accountDir + '/followers.txt'
if updateApprovedFollowers:
# update the followers
print('Manual follow accept: updating ' + followersFilename)
if os.path.isfile(followersFilename):
if approveHandleFull not in open(followersFilename).read():
try:
with open(followersFilename, 'r+') as followersFile:
content = followersFile.read()
if approveHandleFull + '\n' not in content:
followersFile.seek(0, 0)
followersFile.write(approveHandleFull + '\n' +
content)
except Exception as e:
print('WARN: Manual follow accept. ' +
'Failed to write entry to followers file ' + str(e))
else:
print('WARN: Manual follow accept: ' + approveHandleFull +
' already exists in ' + followersFilename)
else:
print('Manual follow accept: first follower accepted for ' +
handle + ' is ' + approveHandleFull)
followersFile = open(followersFilename, "w+")
followersFile.write(approveHandleFull + '\n')
followersFile.close()
# only update the follow requests file if the follow is confirmed to be
# in followers.txt
if approveHandleFull in open(followersFilename).read():
# mark this handle as approved for following
_approveFollowerHandle(accountDir, approveHandle)
# update the follow requests with the handles not yet approved
os.rename(approveFollowsFilename + '.new', approveFollowsFilename)
# remove the .follow file
if followActivityfilename:
if os.path.isfile(followActivityfilename):
os.remove(followActivityfilename)
else:
os.remove(approveFollowsFilename + '.new')
|
PypiClean
|
/ceh_core_infa-0.3.tar.gz/ceh_core_infa-0.3/ceh_core_infa/libs/af_lib/operators/interface/check_resource_locking_interface.py
|
from .base_interface import BaseInterface
from .....libs.exceptions.exception import ResourceLockedException
from .....libs.clients.ceh_resource_client import CehResourse
class CheckResourceLockingInterfaceOld(BaseInterface):
def __init__(self, xcom_params, *args, **kwargs):
super().__init__(*args, **kwargs)
self.xcom_params = xcom_params
@staticmethod
def __check_if_locked(resource_cd):
cur_res = CehResourse.get_resourse(resource_cd=resource_cd)
if not cur_res.state:
return True
cur_locked = CehResourse.get_resource_state(
resource_cd=resource_cd
).is_locked
if cur_locked:
raise ResourceLockedException(
f'The resource {resource_cd} is locked.'
)
return cur_locked
def execute(self):
params = self.xcom_params.value.items()
cache = set()
for i, elem in params:
resource_cd = elem['tgt_resource_name']
if resource_cd in cache:
continue
else:
cache.add(resource_cd)
self.log.info(f'Checking if {resource_cd} is locked')
de = self.dynamic_executor(
ex_func=self.__check_if_locked,
op_kwargs={'resource_cd': resource_cd, },
timer=self.timer
)
de.executor()
self.log.info(
f'The check is over. The resource {resource_cd} is free.'
)
class CheckResourceLockingInterface(BaseInterface):
def __init__(self, resources, *args, **kwargs):
super().__init__(*args, **kwargs)
self.resources = resources
@staticmethod
def __check_if_locked(resource_cd):
cur_res = CehResourse.get_resourse(resource_cd=resource_cd)
if not cur_res.state:
return True
cur_locked = CehResourse.get_resource_state(
resource_cd=resource_cd
).is_locked
if cur_locked:
raise ResourceLockedException(
f'The resource {resource_cd} is locked.'
)
return cur_locked
def execute(self):
params = self.resources.items()
cache = set()
for i, elem in params:
resource_cd = elem['tgt_resource_name']
if resource_cd in cache:
continue
else:
cache.add(resource_cd)
self.log.info(f'Checking if {resource_cd} is locked')
de = self.dynamic_executor(
ex_func=self.__check_if_locked,
op_kwargs={'resource_cd': resource_cd, },
timer=self.timer
)
de.executor()
self.log.info(
f'The check is over. The resource {resource_cd} is free.'
)
|
PypiClean
|
/pulumi_yandex_unofficial-0.1.8.tar.gz/pulumi_yandex_unofficial-0.1.8/pulumi_yandex_unofficial/get_alb_target_group.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
__all__ = [
'GetAlbTargetGroupResult',
'AwaitableGetAlbTargetGroupResult',
'get_alb_target_group',
'get_alb_target_group_output',
]
@pulumi.output_type
class GetAlbTargetGroupResult:
"""
A collection of values returned by getAlbTargetGroup.
"""
def __init__(__self__, created_at=None, description=None, folder_id=None, id=None, labels=None, name=None, target_group_id=None, targets=None):
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if folder_id and not isinstance(folder_id, str):
raise TypeError("Expected argument 'folder_id' to be a str")
pulumi.set(__self__, "folder_id", folder_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if target_group_id and not isinstance(target_group_id, str):
raise TypeError("Expected argument 'target_group_id' to be a str")
pulumi.set(__self__, "target_group_id", target_group_id)
if targets and not isinstance(targets, list):
raise TypeError("Expected argument 'targets' to be a list")
pulumi.set(__self__, "targets", targets)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
Creation timestamp of this target group.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the target group.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="folderId")
def folder_id(self) -> str:
return pulumi.get(self, "folder_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Labels to assign to this target group.
* `target.0.ip_address` - IP address of the target.
* `target.0.subnet_id` - ID of the subnet that targets are connected to.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="targetGroupId")
def target_group_id(self) -> str:
return pulumi.get(self, "target_group_id")
@property
@pulumi.getter
def targets(self) -> Sequence['outputs.GetAlbTargetGroupTargetResult']:
return pulumi.get(self, "targets")
class AwaitableGetAlbTargetGroupResult(GetAlbTargetGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAlbTargetGroupResult(
created_at=self.created_at,
description=self.description,
folder_id=self.folder_id,
id=self.id,
labels=self.labels,
name=self.name,
target_group_id=self.target_group_id,
targets=self.targets)
def get_alb_target_group(description: Optional[str] = None,
folder_id: Optional[str] = None,
name: Optional[str] = None,
target_group_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAlbTargetGroupResult:
"""
Get information about a Yandex Application Load Balancer target group. For more information, see
[Yandex.Cloud Application Load Balancer](https://cloud.yandex.com/en/docs/application-load-balancer/quickstart).
```python
import pulumi
import pulumi_yandex as yandex
foo = yandex.get_alb_target_group(target_group_id="my-target-group-id")
```
This data source is used to define [Application Load Balancer Target Groups] that can be used by other resources.
:param str description: Description of the target group.
:param str folder_id: Folder that the resource belongs to. If value is omitted, the default provider folder is used.
:param str name: - Name of the Target Group.
:param str target_group_id: Target Group ID.
"""
__args__ = dict()
__args__['description'] = description
__args__['folderId'] = folder_id
__args__['name'] = name
__args__['targetGroupId'] = target_group_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('yandex:index/getAlbTargetGroup:getAlbTargetGroup', __args__, opts=opts, typ=GetAlbTargetGroupResult).value
return AwaitableGetAlbTargetGroupResult(
created_at=__ret__.created_at,
description=__ret__.description,
folder_id=__ret__.folder_id,
id=__ret__.id,
labels=__ret__.labels,
name=__ret__.name,
target_group_id=__ret__.target_group_id,
targets=__ret__.targets)
@_utilities.lift_output_func(get_alb_target_group)
def get_alb_target_group_output(description: Optional[pulumi.Input[Optional[str]]] = None,
folder_id: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[Optional[str]]] = None,
target_group_id: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAlbTargetGroupResult]:
"""
Get information about a Yandex Application Load Balancer target group. For more information, see
[Yandex.Cloud Application Load Balancer](https://cloud.yandex.com/en/docs/application-load-balancer/quickstart).
```python
import pulumi
import pulumi_yandex as yandex
foo = yandex.get_alb_target_group(target_group_id="my-target-group-id")
```
This data source is used to define [Application Load Balancer Target Groups] that can be used by other resources.
:param str description: Description of the target group.
:param str folder_id: Folder that the resource belongs to. If value is omitted, the default provider folder is used.
:param str name: - Name of the Target Group.
:param str target_group_id: Target Group ID.
"""
...
|
PypiClean
|
/RsCMPX_Gprf-4.0.171.tar.gz/RsCMPX_Gprf-4.0.171/RsCMPX_Gprf/Implementations/Trigger/Gprf/Measurement/Power/ParameterSetList/Offset.py
|
from typing import List
from .......Internal.Core import Core
from .......Internal.CommandsGroup import CommandsGroup
from .......Internal import Conversions
from .......Internal.Types import DataType
from .......Internal.ArgSingleList import ArgSingleList
from .......Internal.ArgSingle import ArgSingle
# noinspection PyPep8Naming,PyAttributeOutsideInit,SpellCheckingInspection
class OffsetCls:
"""Offset commands group definition. 2 total commands, 0 Subgroups, 2 group commands"""
def __init__(self, core: Core, parent):
self._core = core
self._cmd_group = CommandsGroup("offset", core, parent)
def set(self, index: int, trigger_offset: float) -> None:
"""SCPI: TRIGger:GPRF:MEASurement<Instance>:POWer:PSET:OFFSet \n
Snippet: driver.trigger.gprf.measurement.power.parameterSetList.offset.set(index = 1, trigger_offset = 1.0) \n
Defines a delay time relative to the trigger event for the parameter set <Index>. \n
:param index: No help available
:param trigger_offset: No help available
"""
param = ArgSingleList().compose_cmd_string(ArgSingle('index', index, DataType.Integer), ArgSingle('trigger_offset', trigger_offset, DataType.Float))
self._core.io.write(f'TRIGger:GPRF:MEASurement<Instance>:POWer:PSET:OFFSet {param}'.rstrip())
def get(self, index: int) -> float:
"""SCPI: TRIGger:GPRF:MEASurement<Instance>:POWer:PSET:OFFSet \n
Snippet: value: float = driver.trigger.gprf.measurement.power.parameterSetList.offset.get(index = 1) \n
Defines a delay time relative to the trigger event for the parameter set <Index>. \n
:param index: No help available
:return: trigger_offset: No help available"""
param = Conversions.decimal_value_to_str(index)
response = self._core.io.query_str(f'TRIGger:GPRF:MEASurement<Instance>:POWer:PSET:OFFSet? {param}')
return Conversions.str_to_float(response)
def get_all(self) -> List[float]:
"""SCPI: TRIGger:GPRF:MEASurement<Instance>:POWer:PSET:OFFSet:ALL \n
Snippet: value: List[float] = driver.trigger.gprf.measurement.power.parameterSetList.offset.get_all() \n
Defines a delay time relative to the trigger event for all parameter sets. \n
:return: trigger_offset: Comma-separated list of 32 offsets, for parameter set 0 to 31
"""
response = self._core.io.query_bin_or_ascii_float_list('TRIGger:GPRF:MEASurement<Instance>:POWer:PSET:OFFSet:ALL?')
return response
def set_all(self, trigger_offset: List[float]) -> None:
"""SCPI: TRIGger:GPRF:MEASurement<Instance>:POWer:PSET:OFFSet:ALL \n
Snippet: driver.trigger.gprf.measurement.power.parameterSetList.offset.set_all(trigger_offset = [1.1, 2.2, 3.3]) \n
Defines a delay time relative to the trigger event for all parameter sets. \n
:param trigger_offset: Comma-separated list of 32 offsets, for parameter set 0 to 31
"""
param = Conversions.list_to_csv_str(trigger_offset)
self._core.io.write(f'TRIGger:GPRF:MEASurement<Instance>:POWer:PSET:OFFSet:ALL {param}')
|
PypiClean
|
/opendr-toolkit-pose-estimation-2.2.0.tar.gz/opendr-toolkit-pose-estimation-2.2.0/src/opendr/perception/pose_estimation/lightweight_open_pose/algorithm/scripts/prepare_train_labels.py
|
import json
import pickle
def prepare_annotations(annotations_per_image, images_info, net_input_size):
"""Prepare labels for training. For each annotated person calculates center
to perform crop around it during the training. Also converts data to the internal format.
:param annotations_per_image: all annotations for specified image id
:param images_info: auxiliary information about all images
:param net_input_size: network input size during training
:return: list of prepared annotations
"""
prepared_annotations = []
for _, annotations in annotations_per_image.items():
previous_centers = []
for annotation in annotations[0]:
if annotation['num_keypoints'] < 5 or annotation['area'] < 32 * 32:
continue
person_center = [annotation['bbox'][0] + annotation['bbox'][2] / 2,
annotation['bbox'][1] + annotation['bbox'][3] / 2]
is_close = False
for previous_center in previous_centers:
distance_to_previous = ((person_center[0] - previous_center[0]) ** 2 +
(person_center[1] - previous_center[1]) ** 2) ** 0.5
if distance_to_previous < previous_center[2] * 0.3:
is_close = True
break
if is_close:
continue
prepared_annotation = {
'img_paths': images_info[annotation['image_id']]['file_name'],
'img_width': images_info[annotation['image_id']]['width'],
'img_height': images_info[annotation['image_id']]['height'],
'objpos': person_center,
'image_id': annotation['image_id'],
'bbox': annotation['bbox'],
'segment_area': annotation['area'],
'scale_provided': annotation['bbox'][3] / net_input_size,
'num_keypoints': annotation['num_keypoints'],
'segmentations': annotations[1]
}
keypoints = []
for i in range(len(annotation['keypoints']) // 3):
keypoint = [annotation['keypoints'][i * 3], annotation['keypoints'][i * 3 + 1], 2]
if annotation['keypoints'][i * 3 + 2] == 1:
keypoint[2] = 0
elif annotation['keypoints'][i * 3 + 2] == 2:
keypoint[2] = 1
keypoints.append(keypoint)
prepared_annotation['keypoints'] = keypoints
prepared_other_annotations = []
for other_annotation in annotations[0]:
if other_annotation == annotation:
continue
prepared_other_annotation = {
'objpos': [other_annotation['bbox'][0] + other_annotation['bbox'][2] / 2,
other_annotation['bbox'][1] + other_annotation['bbox'][3] / 2],
'bbox': other_annotation['bbox'],
'segment_area': other_annotation['area'],
'scale_provided': other_annotation['bbox'][3] / net_input_size,
'num_keypoints': other_annotation['num_keypoints']
}
keypoints = []
for i in range(len(other_annotation['keypoints']) // 3):
keypoint = [other_annotation['keypoints'][i * 3], other_annotation['keypoints'][i * 3 + 1], 2]
if other_annotation['keypoints'][i * 3 + 2] == 1:
keypoint[2] = 0
elif other_annotation['keypoints'][i * 3 + 2] == 2:
keypoint[2] = 1
keypoints.append(keypoint)
prepared_other_annotation['keypoints'] = keypoints
prepared_other_annotations.append(prepared_other_annotation)
prepared_annotation['processed_other_annotations'] = prepared_other_annotations
prepared_annotations.append(prepared_annotation)
previous_centers.append((person_center[0], person_center[1], annotation['bbox'][2], annotation['bbox'][3]))
return prepared_annotations
def convert_annotations(labels, output_path="prepared_train_annotation.pkl", net_input_size=368):
"""
:param labels: path to json with keypoints train labels
:param output_path: name of output file with prepared keypoints annotation, defaults to
"prepared_train_annotation.pkl"
:param net_input_size: network input size, defaults to 368
"""
with open(labels, 'r') as f:
data = json.load(f)
annotations_per_image_mapping = {}
for annotation in data['annotations']:
if annotation['num_keypoints'] != 0 and not annotation['iscrowd']:
if annotation['image_id'] not in annotations_per_image_mapping:
annotations_per_image_mapping[annotation['image_id']] = [[], []]
annotations_per_image_mapping[annotation['image_id']][0].append(annotation)
crowd_segmentations_per_image_mapping = {}
for annotation in data['annotations']:
if annotation['iscrowd']:
if annotation['image_id'] not in crowd_segmentations_per_image_mapping:
crowd_segmentations_per_image_mapping[annotation['image_id']] = []
crowd_segmentations_per_image_mapping[annotation['image_id']].append(annotation['segmentation'])
for image_id, crowd_segmentations in crowd_segmentations_per_image_mapping.items():
if image_id in annotations_per_image_mapping:
annotations_per_image_mapping[image_id][1] = crowd_segmentations
images_info = {}
for image_info in data['images']:
images_info[image_info['id']] = image_info
prepared_annotations = prepare_annotations(annotations_per_image_mapping, images_info, net_input_size)
with open(output_path, 'wb') as f:
pickle.dump(prepared_annotations, f)
|
PypiClean
|
/streamlit-elements-0.1.0.tar.gz/streamlit-elements-0.1.0/streamlit_elements/frontend/build/_next/static/chunks/4814.79a05d5d2a98ae9d.js
|
"use strict";(self.webpackChunk_N_E=self.webpackChunk_N_E||[]).push([[4814],{544814:function(e,n,t){t.r(n),t.d(n,{InnerSwarmPlotCanvas:function(){return le},ResponsiveSwarmPlot:function(){return ie},ResponsiveSwarmPlotCanvas:function(){return ce},SwarmPlot:function(){return oe},SwarmPlotCanvas:function(){return se},SwarmPlotTooltip:function(){return Z},computeForces:function(){return T},computeNodes:function(){return G},computeOrdinalScale:function(){return R},computeValueScale:function(){return P},defaultProps:function(){return F},getSizeGenerator:function(){return A},renderCircleDefault:function(){return ue},useBorderWidth:function(){return Y},useForces:function(){return N},useNodeMouseHandlers:function(){return _},useOrdinalScale:function(){return O},useSwarmPlot:function(){return X},useSwarmPlotAnnotations:function(){return K},useSwarmPlotLayerContext:function(){return U},useValueScale:function(){return H}});var o=t(667294),i=t(543060),r=t(475370),a=t(34887),u=t(449742),l=t(785893),s=t(649446),c=t(43790),d=t(281763),f=t.n(d),v=t(968630),h=t.n(v),g=t(747037),p=t.n(g),m=t(227361),y=t.n(m),x=t(357603),M=t(66871),S=t(547148),w=t(627898),C=t(608691);function b(e){return e.x+e.vx}function k(e){return e.y+e.vy}function z(e){var n,t,o,i=(0,w.Z)(.1);function r(e){for(var i,r=0,a=n.length;r<a;++r)(i=n[r]).vx+=(o[r]-i.x)*t[r]*e}function a(){if(n){var r,a=n.length;for(t=new Array(a),o=new Array(a),r=0;r<a;++r)t[r]=isNaN(o[r]=+e(n[r],r,n))?0:+i(n[r],r,n)}}return"function"!==typeof e&&(e=(0,w.Z)(null==e?0:+e)),r.initialize=function(e){n=e,a()},r.strength=function(e){return arguments.length?(i="function"===typeof e?e:(0,w.Z)(+e),a(),r):i},r.x=function(n){return arguments.length?(e="function"===typeof n?n:(0,w.Z)(+n),a(),r):e},r}function W(e){var n,t,o,i=(0,w.Z)(.1);function r(e){for(var i,r=0,a=n.length;r<a;++r)(i=n[r]).vy+=(o[r]-i.y)*t[r]*e}function a(){if(n){var r,a=n.length;for(t=new Array(a),o=new Array(a),r=0;r<a;++r)t[r]=isNaN(o[r]=+e(n[r],r,n))?0:+i(n[r],r,n)}}return"function"!==typeof e&&(e=(0,w.Z)(null==e?0:+e)),r.initialize=function(e){n=e,a()},r.strength=function(e){return arguments.length?(i="function"===typeof e?e:(0,w.Z)(+e),a(),r):i},r.y=function(n){return arguments.length?(e="function"===typeof n?n:(0,w.Z)(+n),a(),r):e},r}var I=t(587503),E=t(75837),B=t(281472);function j(){return j=Object.assign||function(e){for(var n=1;n<arguments.length;n++){var t=arguments[n];for(var o in t)Object.prototype.hasOwnProperty.call(t,o)&&(e[o]=t[o])}return e},j.apply(this,arguments)}function L(e,n){if(null==e)return{};var t,o,i={},r=Object.keys(e);for(o=0;o<r.length;o++)t=r[o],n.indexOf(t)>=0||(i[t]=e[t]);return i}var V,Z=function(e){var n=e.id,t=e.formattedValue,o=e.color;return(0,l.jsx)(u._5,{id:n,value:t,enableChip:!0,color:o})},F={id:"id",value:"value",valueScale:{type:"linear",min:0,max:"auto"},groupBy:"group",size:6,spacing:2,layout:"vertical",gap:0,forceStrength:1,simulationIterations:120,colors:{scheme:"nivo"},colorBy:"group",borderWidth:0,borderColor:"rgba(0, 0, 0, 0)",layers:["grid","axes","circles","annotations","mesh"],enableGridX:!0,enableGridY:!0,axisTop:{},axisRight:{},axisBottom:{},axisLeft:{},isInteractive:!0,useMesh:!1,debugMesh:!1,tooltip:Z,animate:!0,motionConfig:"gentle",annotations:[],role:"img",pixelRatio:"undefined"!=typeof window&&null!=(V=window.devicePixelRatio)?V:1},R=function(e){var n=e.width,t=e.height,o=e.axis,i=e.groups,r=e.gap;if(!Array.isArray(i)||0===i.length)throw new Error("'groups' should be an array containing at least one item");var a,u=i.length;"x"===o?a=(t-r*(u-1))/u:"y"===o&&(a=(n-r*(u-1))/u);var l=i.map((function(e,n){return n*(a+r)+a/2}));return(0,x.Z)(l).domain(i)},P=function(e){var n=e.width,t=e.height,o=e.axis,i=e.getValue,r=e.scale,a=e.data.map(i);if("time"===r.type){var u=[{data:a.map((function(e){var n;return{data:(n={x:null,y:null},n[o]=e,n)}}))}],l=(0,E.OO)(u,o,r);return(0,E.ZN)(r,l,"x"===o?n:t,o)}var s=Math.min.apply(Math,a),c=Math.max.apply(Math,a);return(0,E.ZN)(r,{all:a,min:s,max:c},"x"===o?n:t,o)},A=function(e){if("function"==typeof e)return e;if(f()(e))return function(){return e};if(h()(e)){if(!p()(e.key))throw new Error("Size is invalid, key should be a string pointing to the property to use to determine node size");if(!Array.isArray(e.values)||2!==e.values.length)throw new Error("Size is invalid, values spec should be an array containing two values, min and max");if(!Array.isArray(e.sizes)||2!==e.sizes.length)throw new Error("Size is invalid, sizes spec should be an array containing two values, min and max");var n=(0,M.Z)().domain([e.values[0],e.values[1]]).range([e.sizes[0],e.sizes[1]]);return function(t){return n(y()(t,e.key))}}throw new Error("Size is invalid, it should be either a function, a number or an object")},T=function(e){var n,t,o=e.axis,i=e.valueScale,r=e.ordinalScale,a=e.spacing,u=e.forceStrength,l=function(e){var n,t,o,i=1,r=1;function a(){for(var e,a,l,s,c,d,f,v=n.length,h=0;h<r;++h)for(a=(0,S.Z)(n,b,k).visitAfter(u),e=0;e<v;++e)l=n[e],d=t[l.index],f=d*d,s=l.x+l.vx,c=l.y+l.vy,a.visit(g);function g(e,n,t,r,a){var u=e.data,v=e.r,h=d+v;if(!u)return n>s+h||r<s-h||t>c+h||a<c-h;if(u.index>l.index){var g=s-u.x-u.vx,p=c-u.y-u.vy,m=g*g+p*p;m<h*h&&(0===g&&(m+=(g=(0,C.Z)(o))*g),0===p&&(m+=(p=(0,C.Z)(o))*p),m=(h-(m=Math.sqrt(m)))/m*i,l.vx+=(g*=m)*(h=(v*=v)/(f+v)),l.vy+=(p*=m)*h,u.vx-=g*(h=1-h),u.vy-=p*h)}}}function u(e){if(e.data)return e.r=t[e.data.index];for(var n=e.r=0;n<4;++n)e[n]&&e[n].r>e.r&&(e.r=e[n].r)}function l(){if(n){var o,i,r=n.length;for(t=new Array(r),o=0;o<r;++o)i=n[o],t[i.index]=+e(i,o,n)}}return"function"!==typeof e&&(e=(0,w.Z)(null==e?1:+e)),a.initialize=function(e,t){n=e,o=t,l()},a.iterations=function(e){return arguments.length?(r=+e,a):r},a.strength=function(e){return arguments.length?(i=+e,a):i},a.radius=function(n){return arguments.length?(e="function"===typeof n?n:(0,w.Z)(+n),l(),a):e},a}((function(e){return e.size/2+a/2}));if("x"===o)n=z((function(e){return i(e.value)})).strength(u),t=W((function(e){return r(e.group)}));else{if("y"!==o)throw new Error("Invalid axis provided: "+o);n=z((function(e){return r(e.group)})),t=W((function(e){return i(e.value)})).strength(u)}return{x:n,y:t,collision:l}},G=function(e){var n,t,o=e.data,i=e.getId,r=e.layout,a=e.getValue,u=e.valueScale,l=e.getGroup,s=e.ordinalScale,c=e.getSize,d=e.forces,f=e.simulationIterations,v={horizontal:["x","y"],vertical:["y","x"]},h="time"===(t=e.valueScaleConfig).type&&"native"!==t.format?(0,E.KD)(t):function(e){return e},g=o.map((function(e){return{id:i(e),group:l(e),value:h(a(e)),size:c(e),data:j({},e)}})),p=(0,I.Z)(g).force("x",d.x).force("y",d.y).force("collide",d.collision).stop();return p.tick(f),(n={})[v[r][0]+"Scale"]=u,n[v[r][1]+"Scale"]=s,n.nodes=p.nodes(),n},H=function(e){var n=e.width,t=e.height,i=e.axis,r=e.getValue,a=e.scale,u=e.data;return(0,o.useMemo)((function(){return P({width:n,height:t,axis:i,getValue:r,scale:a,data:u})}),[n,t,i,r,a,u])},O=function(e){var n=e.width,t=e.height,i=e.axis,r=e.groups,a=e.gap;return(0,o.useMemo)((function(){return R({width:n,height:t,axis:i,groups:r,gap:a})}),[n,t,i,r,a])},N=function(e){var n=e.axis,t=e.valueScale,i=e.ordinalScale,r=e.spacing,a=e.forceStrength;return(0,o.useMemo)((function(){return T({axis:n,valueScale:t,ordinalScale:i,spacing:r,forceStrength:a})}),[n,t,i,r,a])},X=function(e){var n=e.data,t=e.width,r=e.height,a=e.id,u=e.value,l=e.valueFormat,c=e.valueScale,d=e.groups,f=e.groupBy,v=e.size,h=e.spacing,g=e.layout,p=e.gap,m=e.forceStrength,y=e.simulationIterations,x=e.colors,M=e.colorBy,S="horizontal"===g?"x":"y",w=(0,i.LR)(a),C=(0,i.LR)(u),b=(0,i.O_)(l),k=(0,i.LR)(f),z=function(e){return(0,o.useMemo)((function(){return A(e)}),[e])}(v),W=(0,i.LR)(M),I=(0,s.U)(x,W),E=H({width:t,height:r,axis:S,getValue:C,scale:c,data:n}),B=O({width:t,height:r,axis:S,groups:d,gap:p}),L=N({axis:S,valueScale:E,ordinalScale:B,spacing:h,forceStrength:m}),V=(0,o.useMemo)((function(){return G({data:n,getId:w,layout:g,getValue:C,valueScale:E,getGroup:k,ordinalScale:B,getSize:z,forces:L,simulationIterations:y,valueScaleConfig:c})}),[n,w,g,C,E,k,B,z,L,y,c]),Z=V.nodes,F=V.xScale,R=V.yScale;return{nodes:(0,o.useMemo)((function(){return Z.map((function(e){return j({},e,{formattedValue:b(e.value),color:I(e)})}))}),[Z,b,I]),xScale:F,yScale:R,getColor:I}},Y=function(e){return(0,o.useMemo)((function(){return"function"==typeof e?e:function(){return e}}),[e])},_=function(e){var n=e.isInteractive,t=e.onClick,i=e.onMouseEnter,r=e.onMouseLeave,a=e.onMouseMove,l=e.tooltip,s=(0,u.lL)(),c=s.showTooltipFromEvent,d=s.hideTooltip;return{onMouseEnter:(0,o.useCallback)((function(e,t){n&&(c(l(e),t),null==i||i(e,t))}),[n,i,c,l]),onMouseMove:(0,o.useCallback)((function(e,t){n&&(c(l(e),t),null==a||a(e,t))}),[n,a,c,l]),onMouseLeave:(0,o.useCallback)((function(e,t){n&&(d(),null==r||r(e,t))}),[n,d,r]),onClick:(0,o.useCallback)((function(e,o){n&&(null==t||t(e,o))}),[n,t])}},D=function(e){return{x:e.x,y:e.y}},q=function(e){return{size:e.size,width:e.size,height:e.size}},K=function(e,n){return(0,c.O2)({data:e,annotations:n,getPosition:D,getDimensions:q})},U=function(e){var n=e.nodes,t=e.xScale,i=e.yScale,r=e.innerWidth,a=e.innerHeight,u=e.outerWidth,l=e.outerHeight,s=e.margin;return(0,o.useMemo)((function(){return{nodes:n,xScale:t,yScale:i,innerWidth:r,innerHeight:a,outerWidth:u,outerHeight:l,margin:s}}),[n,t,i,r,a,u,l,s])},J=function(e){var n=e.nodes,t=e.borderWidth,r=e.borderColor,a=e.component,c=e.isInteractive,d=e.onMouseEnter,f=e.onMouseMove,v=e.onMouseLeave,h=e.onClick,g=e.tooltip,p=(0,u.lL)(),m=p.showTooltipFromEvent,y=p.hideTooltip,x=(0,o.useMemo)((function(){if(c)return function(e,n){m((0,o.createElement)(g,e),n),null==d||d(e,n)}}),[c,m,g,d]),M=(0,o.useMemo)((function(){if(c)return function(e,n){m((0,o.createElement)(g,e),n),null==f||f(e,n)}}),[c,m,g,f]),S=(0,o.useMemo)((function(){if(c)return function(e,n){y(),null==v||v(e,n)}}),[c,y,v]),w=(0,o.useMemo)((function(){if(c)return function(e,n){null==h||h(e,n)}}),[c,h]),C=(0,i.tf)(),b=C.animate,k=C.config,z=(0,i.Fg)(),W=Y(t),I=(0,s.Bf)(r,z),E=(0,o.useMemo)((function(){return e=I,{enter:function(n){return{x:n.x,y:n.y,radius:0,color:n.color,borderColor:e(n),opacity:0}},update:function(n){return{x:n.x,y:n.y,radius:n.size/2,color:n.color,borderColor:e(n),opacity:1}},leave:function(n){return{x:n.x,y:n.y,radius:0,color:n.color,borderColor:e(n),opacity:0}}};var e}),[I]),L=(0,B.useTransition)(n,{keys:function(e){return e.id},initial:E.update,from:E.enter,enter:E.update,update:E.update,leave:E.leave,config:k,immediate:!b});return(0,l.jsx)(l.Fragment,{children:L((function(e,n){return o.createElement(a,{key:n.id,node:n,style:j({},e,{radius:(t=e.radius,(0,B.to)([t],(function(e){return Math.max(0,e)}))),borderWidth:W(n)}),onMouseEnter:x,onMouseMove:M,onMouseLeave:S,onClick:w});var t}))})},Q=function(e){var n=e.node,t=e.style,o=e.onMouseEnter,i=e.onMouseMove,r=e.onMouseLeave,a=e.onClick;return(0,l.jsx)(B.animated.circle,{cx:t.x,cy:t.y,r:t.radius,fill:t.color,stroke:t.borderColor,strokeWidth:t.borderWidth,opacity:t.opacity,onMouseEnter:function(e){return null==o?void 0:o(n,e)},onMouseMove:function(e){return null==i?void 0:i(n,e)},onMouseLeave:function(e){return null==r?void 0:r(n,e)},onClick:function(e){return null==a?void 0:a(n,e)}},n.id)},$=function(e){var n=e.nodes,t=e.annotations,o=K(n,t);return(0,l.jsx)(l.Fragment,{children:o.map((function(e,n){return(0,l.jsx)(c.q6,j({},e),n)}))})},ee=["nodes"],ne=["theme","isInteractive","animate","motionConfig","renderWrapper"],te=function(e){var n=e.data,t=e.width,u=e.height,s=e.margin,c=e.id,d=void 0===c?F.id:c,f=e.value,v=void 0===f?F.value:f,h=e.valueScale,g=void 0===h?F.valueScale:h,p=e.valueFormat,m=e.groups,y=e.groupBy,x=void 0===y?F.groupBy:y,M=e.size,S=void 0===M?F.size:M,w=e.forceStrength,C=void 0===w?F.forceStrength:w,b=e.simulationIterations,k=void 0===b?F.simulationIterations:b,z=e.colors,W=void 0===z?F.colors:z,I=e.colorBy,E=void 0===I?F.colorBy:I,B=e.borderColor,j=void 0===B?F.borderColor:B,V=e.borderWidth,Z=void 0===V?F.borderWidth:V,R=e.layout,P=void 0===R?F.layout:R,A=e.spacing,T=void 0===A?F.spacing:A,G=e.gap,H=void 0===G?F.gap:G,O=e.layers,N=void 0===O?F.layers:O,Y=e.circleComponent,D=void 0===Y?Q:Y,q=e.useMesh,K=void 0===q?F.useMesh:q,ne=e.debugMesh,te=void 0===ne?F.debugMesh:ne,oe=e.enableGridX,ie=void 0===oe?F.enableGridX:oe,re=e.gridXValues,ae=e.enableGridY,ue=void 0===ae?F.enableGridY:ae,le=e.gridYValues,se=e.axisTop,ce=void 0===se?F.axisTop:se,de=e.axisRight,fe=void 0===de?F.axisRight:de,ve=e.axisBottom,he=void 0===ve?F.axisBottom:ve,ge=e.axisLeft,pe=void 0===ge?F.axisLeft:ge,me=e.isInteractive,ye=e.onMouseEnter,xe=e.onMouseMove,Me=e.onMouseLeave,Se=e.onClick,we=e.tooltip,Ce=void 0===we?F.tooltip:we,be=e.annotations,ke=void 0===be?F.annotations:be,ze=e.role,We=void 0===ze?F.role:ze,Ie=(0,i.Bs)(t,u,s),Ee=Ie.outerWidth,Be=Ie.outerHeight,je=Ie.margin,Le=Ie.innerWidth,Ve=Ie.innerHeight,Ze=X({width:Le,height:Ve,data:n,id:d,value:v,valueFormat:p,valueScale:g,groups:m,groupBy:x,size:S,spacing:T,layout:P,gap:H,colors:W,colorBy:E,forceStrength:C,simulationIterations:k}),Fe=Ze.nodes,Re=L(Ze,ee),Pe=Re.xScale,Ae=Re.yScale,Te=_({isInteractive:me,onClick:Se,onMouseEnter:ye,onMouseLeave:Me,onMouseMove:xe,tooltip:Ce}),Ge={grid:null,axes:null,circles:null,annotations:null,mesh:null};N.includes("grid")&&(Ge.grid=(0,l.jsx)(r.rj,{width:Le,height:Ve,xScale:ie?Pe:null,xValues:re,yScale:ue?Ae:null,yValues:le},"grid")),N.includes("axes")&&(Ge.axes=(0,l.jsx)(r.dk,{xScale:Pe,yScale:Ae,width:Le,height:Ve,top:null!=ce?ce:void 0,right:null!=fe?fe:void 0,bottom:null!=he?he:void 0,left:null!=pe?pe:void 0},"axes")),N.includes("circles")&&(Ge.circles=(0,l.jsx)(J,{nodes:Fe,borderWidth:Z,borderColor:j,isInteractive:me,tooltip:Ce,component:D,onMouseEnter:ye,onMouseMove:xe,onMouseLeave:Me,onClick:Se},"circles")),N.includes("annotations")&&(Ge.annotations=(0,l.jsx)($,{nodes:Fe,annotations:ke},"annotations")),me&&K&&(Ge.mesh=(0,l.jsx)(a.Mesh,{nodes:Fe,width:Le,height:Ve,onMouseEnter:Te.onMouseEnter,onMouseMove:Te.onMouseMove,onMouseLeave:Te.onMouseLeave,onClick:Te.onClick,debug:te},"mesh"));var He=U({nodes:Fe,xScale:Pe,yScale:Ae,innerWidth:Le,innerHeight:Ve,outerWidth:Ee,outerHeight:Be,margin:je});return(0,l.jsx)(i.tM,{width:Ee,height:Be,margin:je,role:We,children:N.map((function(e,n){return void 0!==Ge[e]?Ge[e]:"function"==typeof e?(0,l.jsx)(o.Fragment,{children:(0,o.createElement)(e,He)},n):null}))})},oe=function(e){var n=e.theme,t=e.isInteractive,o=void 0===t?F.isInteractive:t,r=e.animate,a=void 0===r?F.animate:r,u=e.motionConfig,s=void 0===u?F.motionConfig:u,c=e.renderWrapper,d=L(e,ne);return(0,l.jsx)(i.W2,{isInteractive:o,animate:a,motionConfig:s,theme:n,renderWrapper:c,children:(0,l.jsx)(te,j({isInteractive:o},d))})},ie=function(e){return(0,l.jsx)(i.d,{children:function(n){var t=n.width,o=n.height;return(0,l.jsx)(oe,j({width:t,height:o},e))}})},re=["nodes"],ae=["theme","isInteractive","animate","motionConfig","renderWrapper"],ue=function(e,n){var t=n.node,o=n.getBorderWidth,i=n.getBorderColor,r=o(t);r>0&&(e.strokeStyle=i(t),e.lineWidth=r),e.beginPath(),e.arc(t.x,t.y,t.size/2,0,2*Math.PI),e.fillStyle=t.color,e.fill(),r>0&&e.stroke()},le=function(e){var n=e.data,t=e.width,c=e.height,d=e.margin,v=e.id,h=void 0===v?F.id:v,g=e.value,p=void 0===g?F.value:g,m=e.valueFormat,y=e.valueScale,x=void 0===y?F.valueScale:y,M=e.groups,S=e.groupBy,w=void 0===S?F.groupBy:S,C=e.size,b=void 0===C?F.size:C,k=e.forceStrength,z=void 0===k?F.forceStrength:k,W=e.simulationIterations,I=void 0===W?F.simulationIterations:W,E=e.colors,B=void 0===E?F.colors:E,j=e.colorBy,V=void 0===j?F.colorBy:j,Z=e.borderColor,R=void 0===Z?F.borderColor:Z,P=e.layout,A=void 0===P?F.layout:P,T=e.spacing,G=void 0===T?F.spacing:T,H=e.gap,O=void 0===H?F.gap:H,N=e.layers,Y=void 0===N?F.layers:N,_=e.renderCircle,D=void 0===_?ue:_,q=e.debugMesh,K=void 0===q?F.debugMesh:q,U=e.enableGridX,J=e.gridXValues,Q=e.enableGridY,$=e.gridYValues,ee=e.axisTop,ne=void 0===ee?F.axisTop:ee,te=e.axisRight,oe=void 0===te?F.axisRight:te,ie=e.axisBottom,ae=void 0===ie?F.axisBottom:ie,le=e.axisLeft,se=void 0===le?F.axisLeft:le,ce=e.isInteractive,de=e.onMouseMove,fe=e.onClick,ve=e.tooltip,he=void 0===ve?F.tooltip:ve,ge=e.role,pe=void 0===ge?F.role:ge,me=e.pixelRatio,ye=void 0===me?F.pixelRatio:me,xe=(0,o.useRef)(null),Me=(0,i.Fg)(),Se=(0,o.useState)(null),we=Se[0],Ce=Se[1],be=(0,i.Bs)(t,c,d),ke=be.outerWidth,ze=be.outerHeight,We=be.margin,Ie=be.innerWidth,Ee=be.innerHeight,Be=X({width:Ie,height:Ee,data:n,id:h,value:p,valueFormat:m,valueScale:x,groups:M,groupBy:w,size:b,spacing:G,layout:A,gap:O,colors:B,colorBy:V,forceStrength:z,simulationIterations:I}),je=Be.nodes,Le=L(Be,re),Ve=Le.xScale,Ze=Le.yScale,Fe=(0,a.useVoronoiMesh)({points:je,width:Ie,height:Ee,debug:K}),Re=Fe.delaunay,Pe=Fe.voronoi,Ae=(0,s.Bf)(R,Me),Te=function(){return 1};(0,o.useEffect)((function(){if(xe.current){xe.current.width=ke*ye,xe.current.height=ze*ye;var e=xe.current.getContext("2d");e&&(e.scale(ye,ye),e.fillStyle=Me.background,e.fillRect(0,0,ke,ze),e.save(),e.translate(We.left,We.top),Y.forEach((function(n){"grid"===n&&f()(Me.grid.line.strokeWidth)&&Me.grid.line.strokeWidth>0&&(e.lineWidth=Me.grid.line.strokeWidth,e.strokeStyle=Me.grid.line.stroke,U&&(0,r.FA)(e,{width:Ie,height:Ee,scale:Ve,axis:"x",values:J}),Q&&(0,r.FA)(e,{width:Ie,height:Ee,scale:Ze,axis:"y",values:$})),"axes"===n&&(0,r.DZ)(e,{xScale:Ve,yScale:Ze,width:Ie,height:Ee,top:ne,right:oe,bottom:ae,left:se,theme:Me}),"circles"===n&&je.forEach((function(n){D(e,{node:n,getBorderWidth:Te,getBorderColor:Ae})})),"mesh"===n&&K&&Pe&&((0,a.renderVoronoiToCanvas)(e,Pe),we&&(0,a.renderVoronoiCellToCanvas)(e,Pe,we.index))})))}}),[xe,ke,ze,Ie,Ee,ye,We,Me,Y,Ve,Ze,U,J,Q,$,ne,oe,ae,se,Pe,K,we,je,D,Te,Ae]);var Ge=(0,o.useCallback)((function(e){if(!xe.current)return null;var n=(0,i.P6)(xe.current,e),t=n[0],o=n[1];if(!(0,i.zn)(We.left,We.top,Ie,Ee,t,o))return null;var r=Re.find(t-We.left,o-We.top);return je[r]}),[xe,We,Ie,Ee,Re,je]),He=(0,u.lL)(),Oe=He.showTooltipFromEvent,Ne=He.hideTooltip,Xe=(0,o.useCallback)((function(e,n){Oe((0,o.createElement)(he,e),n)}),[Oe,he]),Ye=(0,o.useCallback)((function(e){var n=Ge(e);Ce(n),n?(null==de||de(n,e),Xe(n,e)):Ne()}),[Ge,Ce,de,Xe,Ne]),_e=(0,o.useCallback)((function(){Ne(),Ce(null)}),[Ne,Ce]),De=(0,o.useCallback)((function(e){var n=Ge(e);n&&(null==fe||fe(n,e))}),[Ge,fe]);return(0,l.jsx)("canvas",{ref:xe,width:ke*ye,height:ze*ye,style:{width:ke,height:ze,cursor:ce?"auto":"normal"},role:pe,onMouseEnter:ce?Ye:void 0,onMouseMove:ce?Ye:void 0,onMouseLeave:ce?_e:void 0,onClick:ce?De:void 0})},se=function(e){var n=e.theme,t=e.isInteractive,o=void 0===t?F.isInteractive:t,r=e.animate,a=void 0===r?F.animate:r,u=e.motionConfig,s=void 0===u?F.motionConfig:u,c=e.renderWrapper,d=L(e,ae);return(0,l.jsx)(i.W2,{isInteractive:o,animate:a,motionConfig:s,theme:n,renderWrapper:c,children:(0,l.jsx)(le,j({isInteractive:o},d))})},ce=function(e){return(0,l.jsx)(i.d,{children:function(n){var t=n.width,o=n.height;return(0,l.jsx)(se,j({width:t,height:o},e))}})}}}]);
|
PypiClean
|
/safegate_pro-2021.7.6-py3-none-any.whl/homeassistant/components/mqtt/light/schema_template.py
|
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import (
CONF_NAME,
CONF_OPTIMISTIC,
CONF_STATE_TEMPLATE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.color as color_util
from .. import CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC, subscription
from ... import mqtt
from ..debug_info import log_messages
from ..mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity
from .schema import MQTT_LIGHT_SCHEMA_SCHEMA
from .schema_basic import MQTT_LIGHT_ATTRIBUTES_BLOCKED
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mqtt_template"
DEFAULT_NAME = "MQTT Template Light"
DEFAULT_OPTIMISTIC = False
CONF_BLUE_TEMPLATE = "blue_template"
CONF_BRIGHTNESS_TEMPLATE = "brightness_template"
CONF_COLOR_TEMP_TEMPLATE = "color_temp_template"
CONF_COMMAND_OFF_TEMPLATE = "command_off_template"
CONF_COMMAND_ON_TEMPLATE = "command_on_template"
CONF_EFFECT_LIST = "effect_list"
CONF_EFFECT_TEMPLATE = "effect_template"
CONF_GREEN_TEMPLATE = "green_template"
CONF_MAX_MIREDS = "max_mireds"
CONF_MIN_MIREDS = "min_mireds"
CONF_RED_TEMPLATE = "red_template"
CONF_WHITE_VALUE_TEMPLATE = "white_value_template"
PLATFORM_SCHEMA_TEMPLATE = (
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_BLUE_TEMPLATE): cv.template,
vol.Optional(CONF_BRIGHTNESS_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_OFF_TEMPLATE): cv.template,
vol.Required(CONF_COMMAND_ON_TEMPLATE): cv.template,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EFFECT_TEMPLATE): cv.template,
vol.Optional(CONF_GREEN_TEMPLATE): cv.template,
vol.Optional(CONF_MAX_MIREDS): cv.positive_int,
vol.Optional(CONF_MIN_MIREDS): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_RED_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
}
)
.extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
.extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema)
)
async def async_setup_entity_template(
hass, config, async_add_entities, config_entry, discovery_data
):
"""Set up a MQTT Template light."""
async_add_entities([MqttLightTemplate(hass, config, config_entry, discovery_data)])
class MqttLightTemplate(MqttEntity, LightEntity, RestoreEntity):
"""Representation of a MQTT Template light."""
_attributes_extra_blocked = MQTT_LIGHT_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize a MQTT Template light."""
self._state = False
self._topics = None
self._templates = None
self._optimistic = False
# features
self._brightness = None
self._color_temp = None
self._white_value = None
self._hs = None
self._effect = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA_TEMPLATE
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._topics = {
key: config.get(key) for key in (CONF_STATE_TOPIC, CONF_COMMAND_TOPIC)
}
self._templates = {
key: config.get(key)
for key in (
CONF_BLUE_TEMPLATE,
CONF_BRIGHTNESS_TEMPLATE,
CONF_COLOR_TEMP_TEMPLATE,
CONF_COMMAND_OFF_TEMPLATE,
CONF_COMMAND_ON_TEMPLATE,
CONF_EFFECT_TEMPLATE,
CONF_GREEN_TEMPLATE,
CONF_RED_TEMPLATE,
CONF_STATE_TEMPLATE,
CONF_WHITE_VALUE_TEMPLATE,
)
}
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = (
optimistic
or self._topics[CONF_STATE_TOPIC] is None
or self._templates[CONF_STATE_TEMPLATE] is None
)
async def _subscribe_topics(self): # noqa: C901
"""(Re)Subscribe to topics."""
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = self.hass
last_state = await self.async_get_last_state()
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new MQTT messages."""
state = self._templates[
CONF_STATE_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
if state == STATE_ON:
self._state = True
elif state == STATE_OFF:
self._state = False
else:
_LOGGER.warning("Invalid state value received")
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
try:
self._brightness = int(
self._templates[
CONF_BRIGHTNESS_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
except ValueError:
_LOGGER.warning("Invalid brightness value received")
if self._templates[CONF_COLOR_TEMP_TEMPLATE] is not None:
try:
self._color_temp = int(
self._templates[
CONF_COLOR_TEMP_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
except ValueError:
_LOGGER.warning("Invalid color temperature value received")
if (
self._templates[CONF_RED_TEMPLATE] is not None
and self._templates[CONF_GREEN_TEMPLATE] is not None
and self._templates[CONF_BLUE_TEMPLATE] is not None
):
try:
red = int(
self._templates[
CONF_RED_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
green = int(
self._templates[
CONF_GREEN_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
blue = int(
self._templates[
CONF_BLUE_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
self._hs = color_util.color_RGB_to_hs(red, green, blue)
except ValueError:
_LOGGER.warning("Invalid color value received")
if self._templates[CONF_WHITE_VALUE_TEMPLATE] is not None:
try:
self._white_value = int(
self._templates[
CONF_WHITE_VALUE_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
)
except ValueError:
_LOGGER.warning("Invalid white value received")
if self._templates[CONF_EFFECT_TEMPLATE] is not None:
effect = self._templates[
CONF_EFFECT_TEMPLATE
].async_render_with_possible_json_value(msg.payload)
if effect in self._config.get(CONF_EFFECT_LIST):
self._effect = effect
else:
_LOGGER.warning("Unsupported effect value received")
self.async_write_ha_state()
if self._topics[CONF_STATE_TOPIC] is not None:
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._topics[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
},
)
if self._optimistic and last_state:
self._state = last_state.state == STATE_ON
if last_state.attributes.get(ATTR_BRIGHTNESS):
self._brightness = last_state.attributes.get(ATTR_BRIGHTNESS)
if last_state.attributes.get(ATTR_HS_COLOR):
self._hs = last_state.attributes.get(ATTR_HS_COLOR)
if last_state.attributes.get(ATTR_COLOR_TEMP):
self._color_temp = last_state.attributes.get(ATTR_COLOR_TEMP)
if last_state.attributes.get(ATTR_EFFECT):
self._effect = last_state.attributes.get(ATTR_EFFECT)
if last_state.attributes.get(ATTR_WHITE_VALUE):
self._white_value = last_state.attributes.get(ATTR_WHITE_VALUE)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._config.get(CONF_MIN_MIREDS, super().min_mireds)
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._config.get(CONF_MAX_MIREDS, super().max_mireds)
@property
def hs_color(self):
"""Return the hs color value [int, int]."""
return self._hs
@property
def white_value(self):
"""Return the white property."""
return self._white_value
@property
def is_on(self):
"""Return True if entity is on."""
return self._state
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return self._optimistic
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._config.get(CONF_EFFECT_LIST)
@property
def effect(self):
"""Return the current effect."""
return self._effect
async def async_turn_on(self, **kwargs):
"""Turn the entity on.
This method is a coroutine.
"""
values = {"state": True}
if self._optimistic:
self._state = True
if ATTR_BRIGHTNESS in kwargs:
values["brightness"] = int(kwargs[ATTR_BRIGHTNESS])
if self._optimistic:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs:
values["color_temp"] = int(kwargs[ATTR_COLOR_TEMP])
if self._optimistic:
self._color_temp = kwargs[ATTR_COLOR_TEMP]
if ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
# If there's a brightness topic set, we don't want to scale the RGB
# values given using the brightness.
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
brightness = 255
else:
brightness = kwargs.get(
ATTR_BRIGHTNESS,
self._brightness if self._brightness is not None else 255,
)
rgb = color_util.color_hsv_to_RGB(
hs_color[0], hs_color[1], brightness / 255 * 100
)
values["red"] = rgb[0]
values["green"] = rgb[1]
values["blue"] = rgb[2]
if self._optimistic:
self._hs = kwargs[ATTR_HS_COLOR]
if ATTR_WHITE_VALUE in kwargs:
values["white_value"] = int(kwargs[ATTR_WHITE_VALUE])
if self._optimistic:
self._white_value = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
values["effect"] = kwargs.get(ATTR_EFFECT)
if self._optimistic:
self._effect = kwargs[ATTR_EFFECT]
if ATTR_FLASH in kwargs:
values["flash"] = kwargs.get(ATTR_FLASH)
if ATTR_TRANSITION in kwargs:
values["transition"] = kwargs[ATTR_TRANSITION]
mqtt.async_publish(
self.hass,
self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_ON_TEMPLATE].async_render(
parse_result=False, **values
),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off.
This method is a coroutine.
"""
values = {"state": False}
if self._optimistic:
self._state = False
if ATTR_TRANSITION in kwargs:
values["transition"] = kwargs[ATTR_TRANSITION]
mqtt.async_publish(
self.hass,
self._topics[CONF_COMMAND_TOPIC],
self._templates[CONF_COMMAND_OFF_TEMPLATE].async_render(
parse_result=False, **values
),
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self.async_write_ha_state()
@property
def supported_features(self):
"""Flag supported features."""
features = SUPPORT_FLASH | SUPPORT_TRANSITION
if self._templates[CONF_BRIGHTNESS_TEMPLATE] is not None:
features = features | SUPPORT_BRIGHTNESS
if (
self._templates[CONF_RED_TEMPLATE] is not None
and self._templates[CONF_GREEN_TEMPLATE] is not None
and self._templates[CONF_BLUE_TEMPLATE] is not None
):
features = features | SUPPORT_COLOR | SUPPORT_BRIGHTNESS
if self._config.get(CONF_EFFECT_LIST) is not None:
features = features | SUPPORT_EFFECT
if self._templates[CONF_COLOR_TEMP_TEMPLATE] is not None:
features = features | SUPPORT_COLOR_TEMP
if self._templates[CONF_WHITE_VALUE_TEMPLATE] is not None:
features = features | SUPPORT_WHITE_VALUE
return features
|
PypiClean
|
/PyQt-Fit-1.4.0.tar.gz/PyQt-Fit-1.4.0/docs/mod_plot_fit.rst
|
Module ``pyqt_fit.plot_fit``
============================
.. automodule:: pyqt_fit.plot_fit
Analyses of the residuals
-------------------------
.. autofunction:: fit_evaluation
.. autofunction:: residual_measures
Plotting the residuals
----------------------
.. autofunction:: plot_dist_residuals
.. autofunction:: plot_residuals
.. autofunction:: scaled_location_plot
.. autofunction:: qqplot
.. autofunction:: plot_residual_tests
General plotting
----------------
.. autofunction:: plot1d
Output to a file
----------------
.. autofunction:: write1d
Return types
------------
Most function return a tuple. For easier access, there are named tuple, i.e.
tuples that can be accessed by name.
.. class:: ResultStruct(...)
.. note::
This is a class created with :py:func:`pyqt_fit.utils.namedtuple`.
.. py:attribute:: fct
Fitted function (i.e. result of the fitted function)
.. py:attribute:: fct_desc
Description of the function being fitted
.. py:attribute:: param_names
Name of the parameters fitted
.. py:attribute:: xdata
Explaining variables used for fitting
.. py:attribute:: ydata
Dependent variables observed during experiment
.. py:attribute:: xname
Name of the explaining variables
.. py:attribute:: yname
Name of the dependent variabled
.. py:attribute:: res_name
Name of the residuals
.. py:attribute:: residuals
Function used to compute the residuals
.. py:attribute:: popt
Optimal parameters
.. py:attribute:: res
Residuals computed with the parameters ``popt``
.. py:attribute:: yopts
Evaluation of the optimized function on the observed points
.. py:attribute:: eval_points
Points on which the function has been interpolated (may be equal to xdata)
.. py:attribute:: interpolation
Interpolated function on ``eval_points`` (may be equal to ``yopt``)
.. py:attribute:: sorted_yopts
Evaluated function for each data points, sorted in increasing residual order
.. py:attribute:: scaled_res
Scaled residuals, ordered by increasing residuals
.. py:attribute:: normq
Expected values for the residuals, based on their quantile
.. py:attribute:: CI
List of confidence intervals evaluated (in percent)
.. py:attribute:: CIs
List of arrays giving the confidence intervals for the dependent variables and for the parameters.
.. py:attribute:: CIresults
Object returned by the confidence interval method
.. class:: ResidualMeasures(scaled_res, res_IX, prob, normq)
.. note::
This is a class created with :py:func:`pyqt_fit.utils.namedtuple`.
.. py:attribute:: scaled_res
Scaled residuals, sorted
.. py:attribute:: res_IX
Sorting indices for the residuals
.. py:attribute:: prob
Quantiles of the scaled residuals
.. py:attribute:: normq
Expected values of the quantiles for a normal distribution
.. class:: ResTestResult(res_figure, residuals, scaled_residuals, qqplot, dist_residuals)
.. note::
This is a class created with :py:func:`pyqt_fit.utils.namedtuple`.
.. py:attribute:: res_figure
Handle to the figure
.. py:attribute:: residuals
Handles created by :py:func:`plot_residuals`
.. py:attribute:: scaled_residuals
Handles created by :py:func:`scaled_location_plot`
.. py:attribute:: qqplot
Handles created by :py:func:`qqplot`
.. py:attribute:: dist_residuals
Handles created by :py:func:`plot_dist_residuals`
.. class:: Plot1dResult(figure, estimate, data, CIs, \*ResTestResult)
.. note::
This is a class create with :py:func:`pyqt_fit.utils.namedtuple`. Also, it
contains all the first of :py:class:`ResTestResult` at the end of the
tuple.
.. py:attribute:: figure
Handle to the figure with the data and fitted curve
.. py:attribute:: estimate
Handle to the fitted curve
.. py:attribute:: data
Handle to the data
.. py:attribute:: CIs
Handles to the confidence interval curves
|
PypiClean
|
/ReplayTables_andnp-5.6.0-py3-none-any.whl/ReplayTables/PER.py
|
import numpy as np
from dataclasses import dataclass
from typing import Optional
from ReplayTables.interface import EID, LaggedTimestep, Batch
from ReplayTables.ReplayBuffer import ReplayBuffer
from ReplayTables.sampling.PrioritySampler import PrioritySampler
@dataclass
class PERConfig:
new_priority_mode: str = 'max'
uniform_probability: float = 1e-3
priority_exponent: float = 0.5
max_decay: float = 1.
class PrioritizedReplay(ReplayBuffer):
def __init__(self, max_size: int, lag: int, rng: np.random.Generator, config: Optional[PERConfig] = None):
super().__init__(max_size, lag, rng)
self._c = config or PERConfig()
self._sampler: PrioritySampler = PrioritySampler(
self._c.uniform_probability,
max_size,
self._rng,
)
self._max_priority = 1e-16
def _on_add(self, transition: LaggedTimestep):
if transition.extra is not None and 'priority' in transition.extra:
priority = transition.extra['priority']
elif self._c.new_priority_mode == 'max':
priority = self._max_priority
elif self._c.new_priority_mode == 'mean':
total_priority = self._sampler.total_priority()
priority = total_priority / self.size()
if priority == 0:
priority = 1e-16
else:
raise NotImplementedError()
idx = self._idx_mapper.eid2idx(transition.eid)
self._sampler.replace(idx, transition, priority=priority)
def update_priorities(self, batch: Batch, priorities: np.ndarray):
idxs = self._idx_mapper.eids2idxs(batch.eid)
priorities = priorities ** self._c.priority_exponent
self._sampler.update(idxs, batch, priorities=priorities)
self._max_priority = max(
self._c.max_decay * self._max_priority,
priorities.max(),
)
def delete_sample(self, eid: EID):
idx = self._idx_mapper.eid2idx(eid)
self._sampler.mask_sample(idx)
|
PypiClean
|
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/domain/AgWeatherWeeklyStats.py
|
import json
from alipay.aop.api.constant.ParamConstants import *
class AgWeatherWeeklyStats(object):
def __init__(self):
self._acc_precipitation = None
self._acc_temperature = None
self._create_date = None
self._update_date = None
@property
def acc_precipitation(self):
return self._acc_precipitation
@acc_precipitation.setter
def acc_precipitation(self, value):
self._acc_precipitation = value
@property
def acc_temperature(self):
return self._acc_temperature
@acc_temperature.setter
def acc_temperature(self, value):
self._acc_temperature = value
@property
def create_date(self):
return self._create_date
@create_date.setter
def create_date(self, value):
self._create_date = value
@property
def update_date(self):
return self._update_date
@update_date.setter
def update_date(self, value):
self._update_date = value
def to_alipay_dict(self):
params = dict()
if self.acc_precipitation:
if hasattr(self.acc_precipitation, 'to_alipay_dict'):
params['acc_precipitation'] = self.acc_precipitation.to_alipay_dict()
else:
params['acc_precipitation'] = self.acc_precipitation
if self.acc_temperature:
if hasattr(self.acc_temperature, 'to_alipay_dict'):
params['acc_temperature'] = self.acc_temperature.to_alipay_dict()
else:
params['acc_temperature'] = self.acc_temperature
if self.create_date:
if hasattr(self.create_date, 'to_alipay_dict'):
params['create_date'] = self.create_date.to_alipay_dict()
else:
params['create_date'] = self.create_date
if self.update_date:
if hasattr(self.update_date, 'to_alipay_dict'):
params['update_date'] = self.update_date.to_alipay_dict()
else:
params['update_date'] = self.update_date
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AgWeatherWeeklyStats()
if 'acc_precipitation' in d:
o.acc_precipitation = d['acc_precipitation']
if 'acc_temperature' in d:
o.acc_temperature = d['acc_temperature']
if 'create_date' in d:
o.create_date = d['create_date']
if 'update_date' in d:
o.update_date = d['update_date']
return o
|
PypiClean
|
/PyFunctional-1.4.3.tar.gz/PyFunctional-1.4.3/functional/io.py
|
import gzip
import lzma
import bz2
import io
import builtins
WRITE_MODE = "wt"
class ReusableFile(object):
"""
Class which emulates the builtin file except that calling iter() on it will return separate
iterators on different file handlers (which are automatically closed when iteration stops). This
is useful for allowing a file object to be iterated over multiple times while keep evaluation
lazy.
"""
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="r",
buffering=-1,
encoding=None,
errors=None,
newline=None,
):
"""
Constructor arguments are passed directly to builtins.open
:param path: passed to open
:param delimiter: passed to open
:param mode: passed to open
:param buffering: passed to open
:param encoding: passed to open
:param errors: passed to open
:param newline: passed to open
:return: ReusableFile from the arguments
"""
self.path = path
self.delimiter = delimiter
self.mode = mode
self.buffering = buffering
self.encoding = encoding
self.errors = errors
self.newline = newline
def __iter__(self):
"""
Returns a new iterator over the file using the arguments from the constructor. Each call
to __iter__ returns a new iterator independent of all others
:return: iterator over file
"""
# pylint: disable=no-member
with builtins.open(
self.path,
mode=self.mode,
buffering=self.buffering,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
for line in file_content:
yield line
def read(self):
# pylint: disable=no-member
with builtins.open(
self.path,
mode=self.mode,
buffering=self.buffering,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
return file_content.read()
class CompressedFile(ReusableFile):
magic_bytes = None
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="rt",
buffering=-1,
compresslevel=9,
encoding=None,
errors=None,
newline=None,
):
super(CompressedFile, self).__init__(
path,
delimiter=delimiter,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
self.compresslevel = compresslevel
@classmethod
def is_compressed(cls, data):
return data.startswith(cls.magic_bytes)
class GZFile(CompressedFile):
magic_bytes = b"\x1f\x8b\x08"
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="rt",
buffering=-1,
compresslevel=9,
encoding=None,
errors=None,
newline=None,
):
super(GZFile, self).__init__(
path,
delimiter=delimiter,
mode=mode,
buffering=buffering,
compresslevel=compresslevel,
encoding=encoding,
errors=errors,
newline=newline,
)
def __iter__(self):
if "t" in self.mode:
with gzip.GzipFile(self.path, compresslevel=self.compresslevel) as gz_file:
gz_file.read1 = gz_file.read
with io.TextIOWrapper(
gz_file,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
for line in file_content:
yield line
else:
with gzip.open(
self.path, mode=self.mode, compresslevel=self.compresslevel
) as file_content:
for line in file_content:
yield line
def read(self):
with gzip.GzipFile(self.path, compresslevel=self.compresslevel) as gz_file:
gz_file.read1 = gz_file.read
with io.TextIOWrapper(
gz_file,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
return file_content.read()
class BZ2File(CompressedFile):
magic_bytes = b"\x42\x5a\x68"
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="rt",
buffering=-1,
compresslevel=9,
encoding=None,
errors=None,
newline=None,
):
super(BZ2File, self).__init__(
path,
delimiter=delimiter,
mode=mode,
buffering=buffering,
compresslevel=compresslevel,
encoding=encoding,
errors=errors,
newline=newline,
)
def __iter__(self):
with bz2.open(
self.path,
mode=self.mode,
compresslevel=self.compresslevel,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
for line in file_content:
yield line
def read(self):
with bz2.open(
self.path,
mode=self.mode,
compresslevel=self.compresslevel,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
return file_content.read()
class XZFile(CompressedFile):
magic_bytes = b"\xfd\x37\x7a\x58\x5a\x00"
# pylint: disable=too-many-instance-attributes
def __init__(
self,
path,
delimiter=None,
mode="rt",
buffering=-1,
compresslevel=9,
encoding=None,
errors=None,
newline=None,
check=-1,
preset=None,
filters=None,
format=None,
):
super(XZFile, self).__init__(
path,
delimiter=delimiter,
mode=mode,
buffering=buffering,
compresslevel=compresslevel,
encoding=encoding,
errors=errors,
newline=newline,
)
self.check = check
self.preset = preset
self.format = format
self.filters = filters
def __iter__(self):
with lzma.open(
self.path,
mode=self.mode,
format=self.format,
check=self.check,
preset=self.preset,
filters=self.filters,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
for line in file_content:
yield line
def read(self):
with lzma.open(
self.path,
mode=self.mode,
format=self.format,
check=self.check,
preset=self.preset,
filters=self.filters,
encoding=self.encoding,
errors=self.errors,
newline=self.newline,
) as file_content:
return file_content.read()
COMPRESSION_CLASSES = [GZFile, BZ2File, XZFile]
N_COMPRESSION_CHECK_BYTES = max(len(cls.magic_bytes) for cls in COMPRESSION_CLASSES)
def get_read_function(filename, disable_compression):
if disable_compression:
return ReusableFile
else:
with open(filename, "rb") as f:
start_bytes = f.read(N_COMPRESSION_CHECK_BYTES)
for cls in COMPRESSION_CLASSES:
if cls.is_compressed(start_bytes):
return cls
return ReusableFile
def universal_write_open(
path,
mode,
buffering=-1,
encoding=None,
errors=None,
newline=None,
compresslevel=9,
format=None,
check=-1,
preset=None,
filters=None,
compression=None,
):
# pylint: disable=unexpected-keyword-arg,no-member
if compression is None:
return builtins.open(
path,
mode=mode,
buffering=buffering,
encoding=encoding,
errors=errors,
newline=newline,
)
elif compression in ("gz", "gzip"):
return gzip.open(
path,
mode=mode,
compresslevel=compresslevel,
errors=errors,
newline=newline,
encoding=encoding,
)
elif compression in ("lzma", "xz"):
return lzma.open(
path,
mode=mode,
format=format,
check=check,
preset=preset,
filters=filters,
encoding=encoding,
errors=errors,
newline=newline,
)
elif compression == "bz2":
return bz2.open(
path,
mode=mode,
compresslevel=compresslevel,
encoding=encoding,
errors=errors,
newline=newline,
)
else:
raise ValueError(
"compression must be None, gz, gzip, lzma, or xz and was {0}".format(
compression
)
)
|
PypiClean
|
/bee-django-crm-1.4.62.tar.gz/bee-django-crm-1.4.62/bee_django_crm/urls.py
|
__author__ = 'bee'
from django.conf.urls import include, url
from . import views
urlpatterns = [
# ==Preuser==
url(r'^test/$', views.test, name='test'),
url(r'^migrate_to_fee/$', views.migrate_to_fee, name='migrate_to_fee'),
url(r'^$', views.PreuserList.as_view(), name='index'),
# url(r'^preuser/custom/reg/$', views.PreuserCustomReg.as_view(), name='preuser_custom_reg'),
url(r'^preuser/list/$', views.PreuserList.as_view(), name='preuser_list'),
url(r'^preuser/detail/(?P<pk>[0-9]+)$', views.PreuserDetail.as_view(), name='preuser_detail'),
url(r'^preuser/add/$', views.PreuserCreate.as_view(), name='preuser_add'),
url(r'^preuser/update/(?P<pk>[0-9]+)/$', views.update_preuser, name='preuser_update'),
url(r'^preuser/delete/(?P<pk>[0-9]+)/$', views.PreuserDelete.as_view(), name='preuser_delete'),
url(r'^preuser/reg/$', views.PreuserReg.as_view(), name='preuser_reg'),
url(r'^preuser/reg/code', views.PreuserRegCode.as_view(), name='preuser_reg_code'),
url(r'^preuser/reg_done/$', views.preuser_reg_done, name='preuser_reg_done'),
url(r'^referral/preuser/list/$', views.ReferralPreuserList.as_view(), name='referral_preuser_list'),
# 经由自己介绍的preuser列表
# api
url(r'^preuser/api/get_name/$', views.get_name_with_user, name='get_name_with_user'),
# =====reg code =====
url(r'^regcode/check/$', views.RegCodeCheck.as_view(), name='regcode_check'),
# ==Source==
url(r'^source/list/$', views.SourceList.as_view(), name='source_list'),
url(r'^source/detail/(?P<pk>[0-9]+)/$', views.SourceDetail.as_view(), name='source_detail'),
url(r'^source/update/(?P<pk>[0-9]+)/$', views.SourceUpdate.as_view(), name='source_update'),
# 二维码图片地址
url(r'^qrcode/(?P<url>(.)+)$', views.qrcode_img, name='qrcode_img'),
url(r'^source/qrcode/(?P<qrcode_type>(.)+)/(?P<source_id>[0-9]+)/(?P<landing_id>[0-9]+)/$', views.source_qrcode,
name='source_qrcode'),
url(r'^source/add/$', views.SourceCreate.as_view(), name='source_add'),
# url(r'^source/update/(?P<pk>[0-9]+)/$', views.SourceUpdate.as_view(), name='source_update'),
# url(r'^source/delete/(?P<pk>[0-9]+)/$', views.SourceDelete.as_view(), name='source_delete'),
# 海报
url(r'^poster/detail/(?P<pk>[0-9]+)/$', views.PosterDetail.as_view(), name='poster_detail'),
url(r'^poster/create/(?P<source_id>[0-9]+)/$', views.PosterCreate.as_view(), name='poster_create'),
url(r'^poster/update/(?P<pk>[0-9]+)/$', views.PosterUpdate.as_view(), name='poster_update'),
url(r'^poster/delete/(?P<pk>[0-9]+)/$', views.PosterDelete.as_view(), name='poster_delete'),
# 用户海报页
url(r'^user/poster/$', views.UserPosterTemplate.as_view(), name='user_poster'),
# 用户海报图片地址
url(r'^user/poster/image/(?P<poster_id>[0-9]+)/(?P<user_id>[0-9]+)/$', views.user_poster_image,
name='user_poster_image'),
# ==Preuser Track==
url(r'^preuser/track/add/(?P<preuser_id>[0-9]+)/$', views.PreuserTrackCreate.as_view(), name='preuser_track_add'),
# ==Application question==
url(r'^application/question/list$', views.ApplicationQuestionList.as_view(), name='application_question_list'),
url(r'^application/question/detail/(?P<pk>[0-9]+)$', views.ApplicationQuestionDetail.as_view(),
name='application_question_detail'),
url(r'^application/question/add/$', views.ApplicationQuestionCreate.as_view(), name='application_question_add'),
url(r'^application/question/update/(?P<pk>[0-9]+)/$', views.ApplicationQuestionUpdate.as_view(),
name='application_question_update'),
url(r'^application/question/delete/(?P<pk>[0-9]+)/$', views.ApplicationQuestionDelete.as_view(),
name='application_question_delete'),
# ==Application option==
url(r'^application/option/add/(?P<pk>[0-9]+)$', views.ApplicationOptionCreate.as_view(),
name='application_option_add'),
# ==Preuser Application==
url(r'^preuser/application/add/(?P<preuser_id>[0-9]+)/$', views.PreuserApplicationView.as_view(),
name='preuser_application_add'),
url(r'^preuser/application/update_preuser/(?P<pk>[0-9]+)$', views.PreuserApplicationUpdate.as_view(),
name='preuser_application_update_preuser'),
url(r'^preuser/application/done/$', views.preuser_application_done, name='preuser_application_done'),
# ==Contract==
url(r'^contract/list/$', views.ContractList.as_view(), name='contract_list'),
url(r'^contract/detail/(?P<pk>[0-9]+)/$', views.ContractDetail.as_view(), name='contract_detail'),
url(r'^contract/add/$', views.ContractCreate.as_view(), name='contract_add'),
url(r'^contract/update/(?P<pk>[0-9]+)/$', views.ContractUpdate.as_view(), name='contract_update'),
url(r'^contract/update/agreement/(?P<pk>[0-9]+)/$', views.ContractUpdateAgreement.as_view(),
name='contract_update_agreement'),
url(r'^contract/delete/(?P<pk>[0-9]+)/$', views.ContractDelete.as_view(), name='contract_delete'),
# ==Preuser Contract==
url(r'^preuser/contract/list/(?P<preuser_id>[0-9]+)$', views.PreuserContractList.as_view(),
name='preuser_contract_list'),
# 所有学生的合同列表
# url(r'^preuser/contract/list/$', views.PreuserAllContractList.as_view(),
# name='preuser_all_contract_list'),
url(r'^preuser/contract/detail/(?P<pk>[0-9]+)$', views.PreuserContractDetail.as_view(),
name='preuser_contract_detail'),
url(r'^preuser/contract/agreement/(?P<preuser_contract_id>[0-9]+)$', views.PreuserContractAgreement.as_view(),
name='preuser_contract_agreement'),
url(r'^preuser/contract/add/(?P<preuser_id>[0-9]+)$', views.PreuserContractCreate.as_view(),
name='preuser_contract_add'),
url(r'^preuser/contract/update/(?P<pk>[0-9]+)$', views.PreuserContractUpdate.as_view(),
name='preuser_contract_update'),
url(r'^preuser/contract/delete/(?P<pk>[0-9]+)/$', views.PreuserContractDelete.as_view(),
name='preuser_contract_delete'),
# 缴费
url(r'^preuser/fee/list/(?P<preuser_id>[0-9]+)$', views.PreuserFeeList.as_view(),
name='preuser_fee_list'),
url(r'^preuser/fee/detail/(?P<pk>[0-9]+)$', views.PreuserFeeDetail.as_view(), name='preuser_fee_detail'),
url(r'^preuser/fee/add/(?P<preuser_contract_id>[0-9]+)$', views.PreuserFeeCreate.as_view(),
name='preuser_fee_add'),
url(r'^preuser/fee/update/check/(?P<pk>[0-9]+)$', views.PreuserFeeUpdateCheck.as_view(),
name='preuser_fee_update_check'),
url(r'^preuser/fee/update/after/(?P<pk>[0-9]+)$', views.PreuserFeeUpdateAfter.as_view(),
name='preuser_fee_update_after'),
url(r'^preuser/fee/delete/(?P<pk>[0-9]+)/$', views.PreuserFeeDelete.as_view(),
name='preuser_fee_delete'),
# 课程卡用户创建
url(r'^code/create/$', views.CodeUserCreateTemplate.as_view(), name='code_user_create'),
# =======微信小程序 ============
url(r'^wxapp/', include('bee_django_crm.wxapp_urls')),
url(r'^campaign/record/list$', views.CampaignRecordList.as_view(),
name='campaign_record_list'),
url(r'^campaign/record/detail/(?P<pk>[0-9]+)$', views.CampaignRecordDetail.as_view(),
name='campaign_record_detail'),
url(r'^campaign/record/update/(?P<pk>[0-9]+)$', views.CampaignRecordUpdate.as_view(),
name='campaign_record_update'),
# 前台
url(r'^custom/reward/detail/(?P<pk>[0-9]+)', views.CustomRewardDetail.as_view(),name='custom_reward_detail'),
url(r'^custom/campaign/record/list$', views.CustomCampaignRecordList.as_view(),
name='custom_campaign_record_list'),
]
|
PypiClean
|
/kiara_plugin.tabular-0.4.29.tar.gz/kiara_plugin.tabular-0.4.29/README.md
|
[](https://pypi.python.org/pypi/kiara_plugin.tabular/)
[](https://pypi.python.org/pypi/kiara_plugin.tabular/)
[](https://pypi.python.org/pypi/kiara_plugin.tabular/)
[](https://actions-badge.atrox.dev/DHARPA-Project/kiara_plugin.tabular/goto?ref=develop)
[](https://coveralls.io/github/DHARPA-Project/kiara_plugin.tabular?branch=develop)
[](https://github.com/ambv/black)
# [**kiara**](https://dharpa.org/kiara.documentation) plugin: (tabular)
kiara data-types and modules for working with tables and databases.
- Documentation: [https://DHARPA-Project.github.io/kiara_plugin.tabular](https://DHARPA-Project.github.io/kiara_plugin.tabular)
- Code: [https://github.com/DHARPA-Project/kiara_plugin.tabular](https://github.com/DHARPA-Project/kiara_plugin.tabular)
- `kiara`: [https://dharpa.org/kiara.documentation](https://dharpa.org/kiara.documentation)
## Description
TODO
## Development
### Requirements
- Python (version >= 3.8)
- pip, virtualenv
- git
- make (on Linux / Mac OS X -- optional)
### Prepare development environment
If you only want to work on the modules, and not the core *Kiara* codebase, follow the instructions below. Otherwise, please
check the notes on how to setup a *Kiara* development environment under (TODO).
#### Linux & Mac OS X (using make)
For *NIX-like operating system, setting up a development environment is relatively easy:
```console
git clone https://github.com/DHARPA-Project/kiara_plugin.tabular.git
cd kiara_plugin.tabular
python3 -m venv .venv
source .venv/bin/activate
make init
```
#### Windows (or manual pip install)
It's impossible to lay out all the ways Python can be installed on a machine, and virtual- (or conda-)envs can be created, so I'll assume you know how to do this.
One simple way is to install the [Anaconda (individual edition)](https://docs.anaconda.com/anaconda/install/index.html), then use the Anaconda navigator to create a new environment, install the 'git' package in it (if your system does not already have it), and use the 'Open Terminal' option of that environment to start up a terminal that has that virtual-/conda-environment activated.
Once that is done, `cd` into a directory where you want this project folder to live, and do:
```console
# make sure your virtual env is activated!!!
git clone https://github.com/DHARPA-Project/kiara_plugin.tabular.git
cd kiara_plugin.tabular
pip install --extra-index-url https://pypi.fury.io/dharpa/ -U -e .[all_dev]
```
#### Try it out
After this is done, you should be able to run the included example module via:
```console
kiara run tabular_example text_1="xxx" text_2="yyy"
...
...
```
### Re-activate the development environment
The 'prepare' step from above only has to be done once. After that, to re-enable your virtual environment,
you'll need to navigate to the directory again (wherever that is, in your case), and run the ``source`` command from before again:
```console
cd path/to/kiara_plugin.tabular
source .venv/bin/activate # if it isn't activated already, for example by the Anaconda navigator
kiara --help # or whatever, point is, kiara should be available for you now,
```
### ``make`` targets (Linux & Mac OS X)
- ``init``: init development project (install project & dev dependencies into virtualenv, as well as pre-commit git hook)
- ``update-dependencies``: update development dependencies (mainly the core ``kiara`` package from git)
- ``flake``: run *flake8* tests
- ``mypy``: run mypy tests
- ``test``: run unit tests
- ``docs``: create static documentation pages (under ``build/site``)
- ``serve-docs``: serve documentation pages (incl. auto-reload) for getting direct feedback when working on documentation
- ``clean``: clean build directories
For details (and other, minor targets), check the ``Makefile``.
### Running tests
``` console
> make test
# or
> make coverage
```
## Copyright & license
This project is MPL v2.0 licensed, for the license text please check the [LICENSE](/LICENSE) file in this repository.
|
PypiClean
|
/advanced_telegram_bot-0.2.1-py3-none-any.whl/advancedbot/components/models/documentlink.py
|
from typing import Union
import telegram as tg
class DocumentLink:
"""
Model representing link to a downloadable document
..........
Attrubutes
----------
name: str, public
name of document (with extensinon)
media_group_id: int, public
telegram id of group of files
mime_type: str, public
MIME-type of document
size: int, public
size of document (in bytes)
tg_document: telegram.Document, private
`python-telegram-bot` class object providing document info
tg_file: telegram.File, private
`python-telegram-bot` class object providing download ability
"""
def __init__(self,
tg_document: Union[tg.Document, tg.File],
media_group_id: int = None) -> None:
"""
Constructor.
.........
Arguments
---------
tg_document: Union[telegram.Document, telegram.File], required
`python-telegram-bot` class object providing document info
media_group_id: int, optional (default is None)
telegram id of group of files
"""
self.__tg_document: tg.Document = None
self.__tg_file: tg.File = None
self.name: str = None
self.mime_type: str = None
self.media_group_id: int = media_group_id
if type(tg_document) == tg.Document:
self.__tg_document = tg_document
self.__tg_file = self.__tg_document.get_file()
self.name = self.__tg_document.file_name
self.mime_type = self.__tg_document.mime_type
else:
self.__tg_file = tg_document
self.size: str = self.__tg_file.file_size
def download(self, directory: str, name: str = None) -> str:
"""
Constructor.
.........
Arguments
---------
directory: str, required
directory where to save downloaded file
name: str, optional (default is None)
name with which to save downloaded file
if None, self name is given
Returns
------
str
path where document has been downloaded
"""
if name is None:
name = self.name
if not directory.endswith('/'):
directory += '/'
download_path: str = directory + name
self.__tg_file.download(custom_path=download_path)
return download_path
|
PypiClean
|
/nest-on-square-wheels-3.0.2.tar.gz/nest-on-square-wheels-3.0.2/data/pynest/examples/urbanczik_synapse_example.py
|
import numpy as np
from matplotlib import pyplot as plt
import nest
def g_inh(amplitude, t_start, t_end):
"""
returns weights for the spike generator that drives the inhibitory
somatic conductance.
"""
return lambda t: np.piecewise(t, [(t >= t_start) & (t < t_end)],
[amplitude, 0.0])
def g_exc(amplitude, freq, offset, t_start, t_end):
"""
returns weights for the spike generator that drives the excitatory
somatic conductance.
"""
return lambda t: np.piecewise(t, [(t >= t_start) & (t < t_end)],
[lambda t: amplitude*np.sin(freq*t) + offset, 0.0])
def matching_potential(g_E, g_I, nrn_params):
"""
returns the matching potential as a function of the somatic conductances.
"""
E_E = nrn_params['soma']['E_ex']
E_I = nrn_params['soma']['E_in']
return (g_E*E_E + g_I*E_I) / (g_E + g_I)
def V_w_star(V_w, nrn_params):
"""
returns the dendritic prediction of the somatic membrane potential.
"""
g_D = nrn_params['g_sp']
g_L = nrn_params['soma']['g_L']
E_L = nrn_params['soma']['E_L']
return (g_L*E_L + g_D*V_w) / (g_L + g_D)
def phi(U, nrn_params):
"""
rate function of the soma
"""
phi_max = nrn_params['phi_max']
k = nrn_params['rate_slope']
beta = nrn_params['beta']
theta = nrn_params['theta']
return phi_max / (1.0 + k*np.exp(beta*(theta - U)))
def h(U, nrn_params):
"""
derivative of the rate function phi
"""
k = nrn_params['rate_slope']
beta = nrn_params['beta']
theta = nrn_params['theta']
return 15.0*beta / (1.0 + np.exp(-beta*(theta - U)) / k)
# simulation params
n_pattern_rep = 100 # number of repetitions of the spike pattern
pattern_duration = 200.0
t_start = 2.0*pattern_duration
t_end = n_pattern_rep*pattern_duration + t_start
simulation_time = t_end + 2.0*pattern_duration
n_rep_total = int(np.around(simulation_time / pattern_duration))
resolution = 0.1
nest.SetKernelStatus({'resolution': resolution})
# neuron parameters
nrn_model = 'pp_cond_exp_mc_urbanczik'
nrn_params = {
't_ref': 3.0, # refractory period
'g_sp': 600.0, # soma-to-dendritic coupling conductance
'soma': {
'V_m': -70.0, # initial value of V_m
'C_m': 300.0, # capacitance of membrane
'E_L': -70.0, # resting potential
'g_L': 30.0, # somatic leak conductance
'E_ex': 0.0, # resting potential for exc input
'E_in': -75.0, # resting potential for inh input
'tau_syn_ex': 3.0, # time constant of exc conductance
'tau_syn_in': 3.0, # time constant of inh conductance
},
'dendritic': {
'V_m': -70.0, # initial value of V_m
'C_m': 300.0, # capacitance of membrane
'E_L': -70.0, # resting potential
'g_L': 30.0, # dendritic leak conductance
'tau_syn_ex': 3.0, # time constant of exc input current
'tau_syn_in': 3.0, # time constant of inh input current
},
# parameters of rate function
'phi_max': 0.15, # max rate
'rate_slope': 0.5, # called 'k' in the paper
'beta': 1.0 / 3.0,
'theta': -55.0,
}
# synapse params
syns = nest.GetDefaults(nrn_model)['receptor_types']
init_w = 0.3*nrn_params['dendritic']['C_m']
syn_params = {
'synapse_model': 'urbanczik_synapse_wr',
'receptor_type': syns['dendritic_exc'],
'tau_Delta': 100.0, # time constant of low pass filtering of the weight change
'eta': 0.17, # learning rate
'weight': init_w,
'Wmax': 4.5*nrn_params['dendritic']['C_m'],
'delay': resolution,
}
"""
# in case you want to use the unitless quantities as in [1]:
# neuron params:
nrn_model = 'pp_cond_exp_mc_urbanczik'
nrn_params = {
't_ref': 3.0,
'g_sp': 2.0,
'soma': {
'V_m': 0.0,
'C_m': 1.0,
'E_L': 0.0,
'g_L': 0.1,
'E_ex': 14.0 / 3.0,
'E_in': -1.0 / 3.0,
'tau_syn_ex': 3.0,
'tau_syn_in': 3.0,
},
'dendritic': {
'V_m': 0.0,
'C_m': 1.0,
'E_L': 0.0,
'g_L': 0.1,
'tau_syn_ex': 3.0,
'tau_syn_in': 3.0,
},
# parameters of rate function
'phi_max': 0.15,
'rate_slope': 0.5,
'beta': 5.0,
'theta': 1.0,
}
# synapse params:
syns = nest.GetDefaults(nrn_model)['receptor_types']
init_w = 0.2*nrn_params['dendritic']['g_L']
syn_params = {
'synapse_model': 'urbanczik_synapse_wr',
'receptor_type': syns['dendritic_exc'],
'tau_Delta': 100.0,
'eta': 0.0003 / (15.0*15.0*nrn_params['dendritic']['C_m']),
'weight': init_w,
'Wmax': 3.0*nrn_params['dendritic']['g_L'],
'delay': resolution,
}
"""
# somatic input
ampl_exc = 0.016*nrn_params['dendritic']['C_m']
offset = 0.018*nrn_params['dendritic']['C_m']
ampl_inh = 0.06*nrn_params['dendritic']['C_m']
freq = 2.0 / pattern_duration
soma_exc_inp = g_exc(ampl_exc, 2.0*np.pi*freq, offset, t_start, t_end)
soma_inh_inp = g_inh(ampl_inh, t_start, t_end)
# dendritic input
# create spike pattern by recording the spikes of a simulation of n_pg
# poisson generators. The recorded spike times are then given to spike
# generators.
n_pg = 200 # number of poisson generators
p_rate = 10.0 # rate in Hz
pgs = nest.Create('poisson_generator', n=n_pg, params={'rate': p_rate})
prrt_nrns_pg = nest.Create('parrot_neuron', n_pg)
nest.Connect(pgs, prrt_nrns_pg, {'rule': 'one_to_one'})
sr = nest.Create('spike_recorder', n_pg)
nest.Connect(prrt_nrns_pg, sr, {'rule': 'one_to_one'})
nest.Simulate(pattern_duration)
t_srs = [ssr.get('events', 'times') for ssr in sr]
nest.ResetKernel()
nest.SetKernelStatus({'resolution': resolution})
"""
neuron and devices
"""
nrn = nest.Create(nrn_model, params=nrn_params)
# poisson generators are connected to parrot neurons which are
# connected to the mc neuron
prrt_nrns = nest.Create('parrot_neuron', n_pg)
# excitatory input to the soma
spike_times_soma_inp = np.arange(resolution, simulation_time, resolution)
sg_soma_exc = nest.Create('spike_generator',
params={'spike_times': spike_times_soma_inp,
'spike_weights': soma_exc_inp(spike_times_soma_inp)})
# inhibitory input to the soma
sg_soma_inh = nest.Create('spike_generator',
params={'spike_times': spike_times_soma_inp,
'spike_weights': soma_inh_inp(spike_times_soma_inp)})
# excitatory input to the dendrite
sg_prox = nest.Create('spike_generator', n=n_pg)
# for recording all parameters of the Urbanczik neuron
rqs = nest.GetDefaults(nrn_model)['recordables']
mm = nest.Create('multimeter', params={'record_from': rqs, 'interval': 0.1})
# for recoding the synaptic weights of the Urbanczik synapses
wr = nest.Create('weight_recorder')
# for recording the spiking of the soma
sr_soma = nest.Create('spike_recorder')
# create connections
nest.Connect(sg_prox, prrt_nrns, {'rule': 'one_to_one'})
nest.CopyModel('urbanczik_synapse', 'urbanczik_synapse_wr',
{'weight_recorder': wr[0]})
nest.Connect(prrt_nrns, nrn, syn_spec=syn_params)
nest.Connect(mm, nrn, syn_spec={'delay': 0.1})
nest.Connect(sg_soma_exc, nrn,
syn_spec={'receptor_type': syns['soma_exc'], 'weight': 10.0*resolution, 'delay': resolution})
nest.Connect(sg_soma_inh, nrn,
syn_spec={'receptor_type': syns['soma_inh'], 'weight': 10.0*resolution, 'delay': resolution})
nest.Connect(nrn, sr_soma)
# simulation divided into intervals of the pattern duration
for i in np.arange(n_rep_total):
# Set the spike times of the pattern for each spike generator
for (sg, t_sp) in zip(sg_prox, t_srs):
nest.SetStatus(
sg, {'spike_times': np.array(t_sp) + i*pattern_duration})
nest.Simulate(pattern_duration)
# read out devices
# multimeter
mm_events = mm.events
t = mm_events['times']
V_s = mm_events['V_m.s']
V_d = mm_events['V_m.p']
V_d_star = V_w_star(V_d, nrn_params)
g_in = mm_events['g_in.s']
g_ex = mm_events['g_ex.s']
I_ex = mm_events['I_ex.p']
I_in = mm_events['I_in.p']
U_M = matching_potential(g_ex, g_in, nrn_params)
# weight recorder
wr_events = wr.events
senders = wr_events['senders']
targets = wr_events['targets']
weights = wr_events['weights']
times = wr_events['times']
# spike recorder
spike_times_soma = sr_soma.get('events', 'times')
# plot results
fs = 22
lw = 2.5
fig1, (axA, axB, axC, axD) = plt.subplots(4, 1, sharex=True)
# membrane potentials and matching potential
axA.plot(t, V_s, lw=lw, label=r'$U$ (soma)', color='darkblue')
axA.plot(t, V_d, lw=lw, label=r'$V_W$ (dendrit)', color='deepskyblue')
axA.plot(t, V_d_star, lw=lw, label=r'$V_W^\ast$ (dendrit)', color='b', ls='--')
axA.plot(t, U_M, lw=lw, label=r'$U_M$ (soma)', color='r', ls='-')
axA.set_ylabel('membrane pot [mV]', fontsize=fs)
axA.legend(fontsize=fs)
# somatic conductances
axB.plot(t, g_in, lw=lw, label=r'$g_I$', color='r')
axB.plot(t, g_ex, lw=lw, label=r'$g_E$', color='coral')
axB.set_ylabel('somatic cond', fontsize=fs)
axB.legend(fontsize=fs)
# dendritic currents
axC.plot(t, I_ex, lw=lw, label=r'$I_ex$', color='r')
axC.plot(t, I_in, lw=lw, label=r'$I_in$', color='coral')
axC.set_ylabel('dend current', fontsize=fs)
axC.legend(fontsize=fs)
# rates
axD.plot(t, phi(V_s, nrn_params), lw=lw, label=r'$\phi(U)$', color='darkblue')
axD.plot(t, phi(V_d, nrn_params), lw=lw,
label=r'$\phi(V_W)$', color='deepskyblue')
axD.plot(t, phi(V_d_star, nrn_params), lw=lw,
label=r'$\phi(V_W^\ast)$', color='b', ls='--')
axD.plot(t, h(V_d_star, nrn_params), lw=lw,
label=r'$h(V_W^\ast)$', color='g', ls='--')
axD.plot(t, phi(V_s, nrn_params) - phi(V_d_star, nrn_params), lw=lw,
label=r'$\phi(U) - \phi(V_W^\ast)$', color='r', ls='-')
axD.plot(spike_times_soma, 0.0*np.ones(len(spike_times_soma)),
's', color='k', markersize=2)
axD.legend(fontsize=fs)
# synaptic weights
fig2, axA = plt.subplots(1, 1)
for i in np.arange(2, 200, 10):
index = np.intersect1d(np.where(senders == i), np.where(targets == 1))
if not len(index) == 0:
axA.step(times[index], weights[index], label='pg_{}'.format(i - 2),
lw=lw)
axA.set_title('Synaptic weights of Urbanczik synapses')
axA.set_xlabel('time [ms]', fontsize=fs)
axA.set_ylabel('weight', fontsize=fs)
axA.legend(fontsize=fs - 4)
plt.show()
|
PypiClean
|
/dronin-pyqtgraph-20160825.3.tar.gz/dronin-pyqtgraph-20160825.3/dronin_pyqtgraph/graphicsItems/PlotCurveItem.py
|
from ..Qt import QtGui, QtCore
try:
from ..Qt import QtOpenGL
HAVE_OPENGL = True
except:
HAVE_OPENGL = False
import numpy as np
from .GraphicsObject import GraphicsObject
from .. import functions as fn
from ..Point import Point
import struct, sys
from .. import getConfigOption
from .. import debug
__all__ = ['PlotCurveItem']
class PlotCurveItem(GraphicsObject):
"""
Class representing a single plot curve. Instances of this class are created
automatically as part of PlotDataItem; these rarely need to be instantiated
directly.
Features:
- Fast data update
- Fill under curve
- Mouse interaction
==================== ===============================================
**Signals:**
sigPlotChanged(self) Emitted when the data being plotted has changed
sigClicked(self) Emitted when the curve is clicked
==================== ===============================================
"""
sigPlotChanged = QtCore.Signal(object)
sigClicked = QtCore.Signal(object)
def __init__(self, *args, **kargs):
"""
Forwards all arguments to :func:`setData <pyqtgraph.PlotCurveItem.setData>`.
Some extra arguments are accepted as well:
============== =======================================================
**Arguments:**
parent The parent GraphicsObject (optional)
clickable If True, the item will emit sigClicked when it is
clicked on. Defaults to False.
============== =======================================================
"""
GraphicsObject.__init__(self, kargs.get('parent', None))
self.clear()
## this is disastrous for performance.
#self.setCacheMode(QtGui.QGraphicsItem.DeviceCoordinateCache)
self.metaData = {}
self.opts = {
'pen': fn.mkPen('w'),
'shadowPen': None,
'fillLevel': None,
'brush': None,
'stepMode': False,
'name': None,
'antialias': getConfigOption('antialias'),
'connect': 'all',
'mouseWidth': 8, # width of shape responding to mouse click
}
self.setClickable(kargs.get('clickable', False))
self.setData(*args, **kargs)
def implements(self, interface=None):
ints = ['plotData']
if interface is None:
return ints
return interface in ints
def name(self):
return self.opts.get('name', None)
def setClickable(self, s, width=None):
"""Sets whether the item responds to mouse clicks.
The *width* argument specifies the width in pixels orthogonal to the
curve that will respond to a mouse click.
"""
self.clickable = s
if width is not None:
self.opts['mouseWidth'] = width
self._mouseShape = None
self._boundingRect = None
def getData(self):
return self.xData, self.yData
def dataBounds(self, ax, frac=1.0, orthoRange=None):
## Need this to run as fast as possible.
## check cache first:
cache = self._boundsCache[ax]
if cache is not None and cache[0] == (frac, orthoRange):
return cache[1]
(x, y) = self.getData()
if x is None or len(x) == 0:
return (None, None)
if ax == 0:
d = x
d2 = y
elif ax == 1:
d = y
d2 = x
## If an orthogonal range is specified, mask the data now
if orthoRange is not None:
mask = (d2 >= orthoRange[0]) * (d2 <= orthoRange[1])
d = d[mask]
#d2 = d2[mask]
if len(d) == 0:
return (None, None)
## Get min/max (or percentiles) of the requested data range
if frac >= 1.0:
# include complete data range
# first try faster nanmin/max function, then cut out infs if needed.
b = (np.nanmin(d), np.nanmax(d))
if any(np.isinf(b)):
mask = np.isfinite(d)
d = d[mask]
b = (d.min(), d.max())
elif frac <= 0.0:
raise Exception("Value for parameter 'frac' must be > 0. (got %s)" % str(frac))
else:
# include a percentile of data range
mask = np.isfinite(d)
d = d[mask]
b = np.percentile(d, [50 * (1 - frac), 50 * (1 + frac)])
## adjust for fill level
if ax == 1 and self.opts['fillLevel'] is not None:
b = (min(b[0], self.opts['fillLevel']), max(b[1], self.opts['fillLevel']))
## Add pen width only if it is non-cosmetic.
pen = self.opts['pen']
spen = self.opts['shadowPen']
if not pen.isCosmetic():
b = (b[0] - pen.widthF()*0.7072, b[1] + pen.widthF()*0.7072)
if spen is not None and not spen.isCosmetic() and spen.style() != QtCore.Qt.NoPen:
b = (b[0] - spen.widthF()*0.7072, b[1] + spen.widthF()*0.7072)
self._boundsCache[ax] = [(frac, orthoRange), b]
return b
def pixelPadding(self):
pen = self.opts['pen']
spen = self.opts['shadowPen']
w = 0
if pen.isCosmetic():
w += pen.widthF()*0.7072
if spen is not None and spen.isCosmetic() and spen.style() != QtCore.Qt.NoPen:
w = max(w, spen.widthF()*0.7072)
if self.clickable:
w = max(w, self.opts['mouseWidth']//2 + 1)
return w
def boundingRect(self):
if self._boundingRect is None:
(xmn, xmx) = self.dataBounds(ax=0)
(ymn, ymx) = self.dataBounds(ax=1)
if xmn is None:
return QtCore.QRectF()
px = py = 0.0
pxPad = self.pixelPadding()
if pxPad > 0:
# determine length of pixel in local x, y directions
px, py = self.pixelVectors()
try:
px = 0 if px is None else px.length()
except OverflowError:
px = 0
try:
py = 0 if py is None else py.length()
except OverflowError:
py = 0
# return bounds expanded by pixel size
px *= pxPad
py *= pxPad
#px += self._maxSpotWidth * 0.5
#py += self._maxSpotWidth * 0.5
self._boundingRect = QtCore.QRectF(xmn-px, ymn-py, (2*px)+xmx-xmn, (2*py)+ymx-ymn)
return self._boundingRect
def viewTransformChanged(self):
self.invalidateBounds()
self.prepareGeometryChange()
#def boundingRect(self):
#if self._boundingRect is None:
#(x, y) = self.getData()
#if x is None or y is None or len(x) == 0 or len(y) == 0:
#return QtCore.QRectF()
#if self.opts['shadowPen'] is not None:
#lineWidth = (max(self.opts['pen'].width(), self.opts['shadowPen'].width()) + 1)
#else:
#lineWidth = (self.opts['pen'].width()+1)
#pixels = self.pixelVectors()
#if pixels == (None, None):
#pixels = [Point(0,0), Point(0,0)]
#xmin = x.min()
#xmax = x.max()
#ymin = y.min()
#ymax = y.max()
#if self.opts['fillLevel'] is not None:
#ymin = min(ymin, self.opts['fillLevel'])
#ymax = max(ymax, self.opts['fillLevel'])
#xmin -= pixels[0].x() * lineWidth
#xmax += pixels[0].x() * lineWidth
#ymin -= abs(pixels[1].y()) * lineWidth
#ymax += abs(pixels[1].y()) * lineWidth
#self._boundingRect = QtCore.QRectF(xmin, ymin, xmax-xmin, ymax-ymin)
#return self._boundingRect
def invalidateBounds(self):
self._boundingRect = None
self._boundsCache = [None, None]
def setPen(self, *args, **kargs):
"""Set the pen used to draw the curve."""
self.opts['pen'] = fn.mkPen(*args, **kargs)
self.invalidateBounds()
self.update()
def setShadowPen(self, *args, **kargs):
"""Set the shadow pen used to draw behind tyhe primary pen.
This pen must have a larger width than the primary
pen to be visible.
"""
self.opts['shadowPen'] = fn.mkPen(*args, **kargs)
self.invalidateBounds()
self.update()
def setBrush(self, *args, **kargs):
"""Set the brush used when filling the area under the curve"""
self.opts['brush'] = fn.mkBrush(*args, **kargs)
self.invalidateBounds()
self.update()
def setFillLevel(self, level):
"""Set the level filled to when filling under the curve"""
self.opts['fillLevel'] = level
self.fillPath = None
self.invalidateBounds()
self.update()
def setData(self, *args, **kargs):
"""
============== ========================================================
**Arguments:**
x, y (numpy arrays) Data to show
pen Pen to use when drawing. Any single argument accepted by
:func:`mkPen <pyqtgraph.mkPen>` is allowed.
shadowPen Pen for drawing behind the primary pen. Usually this
is used to emphasize the curve by providing a
high-contrast border. Any single argument accepted by
:func:`mkPen <pyqtgraph.mkPen>` is allowed.
fillLevel (float or None) Fill the area 'under' the curve to
*fillLevel*
brush QBrush to use when filling. Any single argument accepted
by :func:`mkBrush <pyqtgraph.mkBrush>` is allowed.
antialias (bool) Whether to use antialiasing when drawing. This
is disabled by default because it decreases performance.
stepMode If True, two orthogonal lines are drawn for each sample
as steps. This is commonly used when drawing histograms.
Note that in this case, len(x) == len(y) + 1
connect Argument specifying how vertexes should be connected
by line segments. Default is "all", indicating full
connection. "pairs" causes only even-numbered segments
to be drawn. "finite" causes segments to be omitted if
they are attached to nan or inf values. For any other
connectivity, specify an array of boolean values.
============== ========================================================
If non-keyword arguments are used, they will be interpreted as
setData(y) for a single argument and setData(x, y) for two
arguments.
"""
self.updateData(*args, **kargs)
def updateData(self, *args, **kargs):
profiler = debug.Profiler()
if len(args) == 1:
kargs['y'] = args[0]
elif len(args) == 2:
kargs['x'] = args[0]
kargs['y'] = args[1]
if 'y' not in kargs or kargs['y'] is None:
kargs['y'] = np.array([])
if 'x' not in kargs or kargs['x'] is None:
kargs['x'] = np.arange(len(kargs['y']))
for k in ['x', 'y']:
data = kargs[k]
if isinstance(data, list):
data = np.array(data)
kargs[k] = data
if not isinstance(data, np.ndarray) or data.ndim > 1:
raise Exception("Plot data must be 1D ndarray.")
if 'complex' in str(data.dtype):
raise Exception("Can not plot complex data types.")
profiler("data checks")
#self.setCacheMode(QtGui.QGraphicsItem.NoCache) ## Disabling and re-enabling the cache works around a bug in Qt 4.6 causing the cached results to display incorrectly
## Test this bug with test_PlotWidget and zoom in on the animated plot
self.invalidateBounds()
self.prepareGeometryChange()
self.informViewBoundsChanged()
self.yData = kargs['y'].view(np.ndarray)
self.xData = kargs['x'].view(np.ndarray)
profiler('copy')
if 'stepMode' in kargs:
self.opts['stepMode'] = kargs['stepMode']
if self.opts['stepMode'] is True:
if len(self.xData) != len(self.yData)+1: ## allow difference of 1 for step mode plots
raise Exception("len(X) must be len(Y)+1 since stepMode=True (got %s and %s)" % (self.xData.shape, self.yData.shape))
else:
if self.xData.shape != self.yData.shape: ## allow difference of 1 for step mode plots
raise Exception("X and Y arrays must be the same shape--got %s and %s." % (self.xData.shape, self.yData.shape))
self.path = None
self.fillPath = None
self._mouseShape = None
#self.xDisp = self.yDisp = None
if 'name' in kargs:
self.opts['name'] = kargs['name']
if 'connect' in kargs:
self.opts['connect'] = kargs['connect']
if 'pen' in kargs:
self.setPen(kargs['pen'])
if 'shadowPen' in kargs:
self.setShadowPen(kargs['shadowPen'])
if 'fillLevel' in kargs:
self.setFillLevel(kargs['fillLevel'])
if 'brush' in kargs:
self.setBrush(kargs['brush'])
if 'antialias' in kargs:
self.opts['antialias'] = kargs['antialias']
profiler('set')
self.update()
profiler('update')
self.sigPlotChanged.emit(self)
profiler('emit')
def generatePath(self, x, y):
if self.opts['stepMode']:
## each value in the x/y arrays generates 2 points.
x2 = np.empty((len(x),2), dtype=x.dtype)
x2[:] = x[:,np.newaxis]
if self.opts['fillLevel'] is None:
x = x2.reshape(x2.size)[1:-1]
y2 = np.empty((len(y),2), dtype=y.dtype)
y2[:] = y[:,np.newaxis]
y = y2.reshape(y2.size)
else:
## If we have a fill level, add two extra points at either end
x = x2.reshape(x2.size)
y2 = np.empty((len(y)+2,2), dtype=y.dtype)
y2[1:-1] = y[:,np.newaxis]
y = y2.reshape(y2.size)[1:-1]
y[0] = self.opts['fillLevel']
y[-1] = self.opts['fillLevel']
path = fn.arrayToQPath(x, y, connect=self.opts['connect'])
return path
def getPath(self):
if self.path is None:
x,y = self.getData()
if x is None or len(x) == 0 or y is None or len(y) == 0:
self.path = QtGui.QPainterPath()
else:
self.path = self.generatePath(*self.getData())
self.fillPath = None
self._mouseShape = None
return self.path
@debug.warnOnException ## raising an exception here causes crash
def paint(self, p, opt, widget):
profiler = debug.Profiler()
if self.xData is None or len(self.xData) == 0:
return
if HAVE_OPENGL and getConfigOption('enableExperimental') and isinstance(widget, QtOpenGL.QGLWidget):
self.paintGL(p, opt, widget)
return
x = None
y = None
path = self.getPath()
profiler('generate path')
if self._exportOpts is not False:
aa = self._exportOpts.get('antialias', True)
else:
aa = self.opts['antialias']
p.setRenderHint(p.Antialiasing, aa)
if self.opts['brush'] is not None and self.opts['fillLevel'] is not None:
if self.fillPath is None:
if x is None:
x,y = self.getData()
p2 = QtGui.QPainterPath(self.path)
p2.lineTo(x[-1], self.opts['fillLevel'])
p2.lineTo(x[0], self.opts['fillLevel'])
p2.lineTo(x[0], y[0])
p2.closeSubpath()
self.fillPath = p2
profiler('generate fill path')
p.fillPath(self.fillPath, self.opts['brush'])
profiler('draw fill path')
sp = fn.mkPen(self.opts['shadowPen'])
cp = fn.mkPen(self.opts['pen'])
## Copy pens and apply alpha adjustment
#sp = QtGui.QPen(self.opts['shadowPen'])
#cp = QtGui.QPen(self.opts['pen'])
#for pen in [sp, cp]:
#if pen is None:
#continue
#c = pen.color()
#c.setAlpha(c.alpha() * self.opts['alphaHint'])
#pen.setColor(c)
##pen.setCosmetic(True)
if sp is not None and sp.style() != QtCore.Qt.NoPen:
p.setPen(sp)
p.drawPath(path)
p.setPen(cp)
p.drawPath(path)
profiler('drawPath')
#print "Render hints:", int(p.renderHints())
#p.setPen(QtGui.QPen(QtGui.QColor(255,0,0)))
#p.drawRect(self.boundingRect())
def paintGL(self, p, opt, widget):
p.beginNativePainting()
import OpenGL.GL as gl
## set clipping viewport
view = self.getViewBox()
if view is not None:
rect = view.mapRectToItem(self, view.boundingRect())
#gl.glViewport(int(rect.x()), int(rect.y()), int(rect.width()), int(rect.height()))
#gl.glTranslate(-rect.x(), -rect.y(), 0)
gl.glEnable(gl.GL_STENCIL_TEST)
gl.glColorMask(gl.GL_FALSE, gl.GL_FALSE, gl.GL_FALSE, gl.GL_FALSE) # disable drawing to frame buffer
gl.glDepthMask(gl.GL_FALSE) # disable drawing to depth buffer
gl.glStencilFunc(gl.GL_NEVER, 1, 0xFF)
gl.glStencilOp(gl.GL_REPLACE, gl.GL_KEEP, gl.GL_KEEP)
## draw stencil pattern
gl.glStencilMask(0xFF)
gl.glClear(gl.GL_STENCIL_BUFFER_BIT)
gl.glBegin(gl.GL_TRIANGLES)
gl.glVertex2f(rect.x(), rect.y())
gl.glVertex2f(rect.x()+rect.width(), rect.y())
gl.glVertex2f(rect.x(), rect.y()+rect.height())
gl.glVertex2f(rect.x()+rect.width(), rect.y()+rect.height())
gl.glVertex2f(rect.x()+rect.width(), rect.y())
gl.glVertex2f(rect.x(), rect.y()+rect.height())
gl.glEnd()
gl.glColorMask(gl.GL_TRUE, gl.GL_TRUE, gl.GL_TRUE, gl.GL_TRUE)
gl.glDepthMask(gl.GL_TRUE)
gl.glStencilMask(0x00)
gl.glStencilFunc(gl.GL_EQUAL, 1, 0xFF)
try:
x, y = self.getData()
pos = np.empty((len(x), 2))
pos[:,0] = x
pos[:,1] = y
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
try:
gl.glVertexPointerf(pos)
pen = fn.mkPen(self.opts['pen'])
color = pen.color()
gl.glColor4f(color.red()/255., color.green()/255., color.blue()/255., color.alpha()/255.)
width = pen.width()
if pen.isCosmetic() and width < 1:
width = 1
gl.glPointSize(width)
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glEnable(gl.GL_BLEND)
gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glDrawArrays(gl.GL_LINE_STRIP, 0, pos.size / pos.shape[-1])
finally:
gl.glDisableClientState(gl.GL_VERTEX_ARRAY)
finally:
p.endNativePainting()
def clear(self):
self.xData = None ## raw values
self.yData = None
self.xDisp = None ## display values (after log / fft)
self.yDisp = None
self.path = None
self.fillPath = None
self._mouseShape = None
self._mouseBounds = None
self._boundsCache = [None, None]
#del self.xData, self.yData, self.xDisp, self.yDisp, self.path
def mouseShape(self):
"""
Return a QPainterPath representing the clickable shape of the curve
"""
if self._mouseShape is None:
view = self.getViewBox()
if view is None:
return QtGui.QPainterPath()
stroker = QtGui.QPainterPathStroker()
path = self.getPath()
path = self.mapToItem(view, path)
stroker.setWidth(self.opts['mouseWidth'])
mousePath = stroker.createStroke(path)
self._mouseShape = self.mapFromItem(view, mousePath)
return self._mouseShape
def mouseClickEvent(self, ev):
if not self.clickable or ev.button() != QtCore.Qt.LeftButton:
return
if self.mouseShape().contains(ev.pos()):
ev.accept()
self.sigClicked.emit(self)
class ROIPlotItem(PlotCurveItem):
"""Plot curve that monitors an ROI and image for changes to automatically replot."""
def __init__(self, roi, data, img, axes=(0,1), xVals=None, color=None):
self.roi = roi
self.roiData = data
self.roiImg = img
self.axes = axes
self.xVals = xVals
PlotCurveItem.__init__(self, self.getRoiData(), x=self.xVals, color=color)
#roi.connect(roi, QtCore.SIGNAL('regionChanged'), self.roiChangedEvent)
roi.sigRegionChanged.connect(self.roiChangedEvent)
#self.roiChangedEvent()
def getRoiData(self):
d = self.roi.getArrayRegion(self.roiData, self.roiImg, axes=self.axes)
if d is None:
return
while d.ndim > 1:
d = d.mean(axis=1)
return d
def roiChangedEvent(self):
d = self.getRoiData()
self.updateData(d, self.xVals)
|
PypiClean
|
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/groups/item/calendar_view/item/instances/item/tentatively_accept/tentatively_accept_request_builder.py
|
from __future__ import annotations
from dataclasses import dataclass
from kiota_abstractions.get_path_parameters import get_path_parameters
from kiota_abstractions.method import Method
from kiota_abstractions.request_adapter import RequestAdapter
from kiota_abstractions.request_information import RequestInformation
from kiota_abstractions.request_option import RequestOption
from kiota_abstractions.response_handler import ResponseHandler
from kiota_abstractions.serialization import Parsable, ParsableFactory
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from . import tentatively_accept_post_request_body
from ........models.o_data_errors import o_data_error
class TentativelyAcceptRequestBuilder():
"""
Provides operations to call the tentativelyAccept method.
"""
def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None:
"""
Instantiates a new TentativelyAcceptRequestBuilder and sets the default values.
Args:
pathParameters: The raw url or the Url template parameters for the request.
requestAdapter: The request adapter to use to execute the requests.
"""
if path_parameters is None:
raise Exception("path_parameters cannot be undefined")
if request_adapter is None:
raise Exception("request_adapter cannot be undefined")
# Url template to use to build the URL for the current request builder
self.url_template: str = "{+baseurl}/groups/{group%2Did}/calendarView/{event%2Did}/instances/{event%2Did1}/tentativelyAccept"
url_tpl_params = get_path_parameters(path_parameters)
self.path_parameters = url_tpl_params
self.request_adapter = request_adapter
async def post(self,body: Optional[tentatively_accept_post_request_body.TentativelyAcceptPostRequestBody] = None, request_configuration: Optional[TentativelyAcceptRequestBuilderPostRequestConfiguration] = None) -> None:
"""
Invoke action tentativelyAccept
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = self.to_post_request_information(
body, request_configuration
)
from ........models.o_data_errors import o_data_error
error_mapping: Dict[str, ParsableFactory] = {
"4XX": o_data_error.ODataError,
"5XX": o_data_error.ODataError,
}
if not self.request_adapter:
raise Exception("Http core is null")
return await self.request_adapter.send_no_response_content_async(request_info, error_mapping)
def to_post_request_information(self,body: Optional[tentatively_accept_post_request_body.TentativelyAcceptPostRequestBody] = None, request_configuration: Optional[TentativelyAcceptRequestBuilderPostRequestConfiguration] = None) -> RequestInformation:
"""
Invoke action tentativelyAccept
Args:
body: The request body
requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options.
Returns: RequestInformation
"""
if body is None:
raise Exception("body cannot be undefined")
request_info = RequestInformation()
request_info.url_template = self.url_template
request_info.path_parameters = self.path_parameters
request_info.http_method = Method.POST
if request_configuration:
request_info.add_request_headers(request_configuration.headers)
request_info.add_request_options(request_configuration.options)
request_info.set_content_from_parsable(self.request_adapter, "application/json", body)
return request_info
@dataclass
class TentativelyAcceptRequestBuilderPostRequestConfiguration():
"""
Configuration for the request such as headers, query parameters, and middleware options.
"""
# Request headers
headers: Optional[Dict[str, Union[str, List[str]]]] = None
# Request options
options: Optional[List[RequestOption]] = None
|
PypiClean
|
/bob.bio.base-8.0.1b0.tar.gz/bob.bio.base-8.0.1b0/src/bob/bio/base/script/figure.py
|
import logging
import math
import click
import matplotlib.pyplot as mpl
from tabulate import tabulate
import bob.measure
import bob.measure.script.figure as measure_figure
from bob.measure import plot
LOGGER = logging.getLogger("bob.bio.base")
class Roc(measure_figure.Roc):
def __init__(self, ctx, scores, evaluation, func_load):
super(Roc, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get("x_label") or "FMR"
default_y_label = "1 - FNMR" if self._tpr else "FNMR"
self._y_label = ctx.meta.get("y_label") or default_y_label
class Det(measure_figure.Det):
def __init__(self, ctx, scores, evaluation, func_load):
super(Det, self).__init__(ctx, scores, evaluation, func_load)
self._x_label = ctx.meta.get("x_label") or "FMR (%)"
self._y_label = ctx.meta.get("y_label") or "FNMR (%)"
class Cmc(measure_figure.PlotBase):
"""Handles the plotting of Cmc"""
def __init__(self, ctx, scores, evaluation, func_load):
super(Cmc, self).__init__(ctx, scores, evaluation, func_load)
self._semilogx = ctx.meta.get("semilogx", True)
self._titles = self._titles or ["CMC dev.", "CMC eval."]
self._x_label = self._x_label or "Rank"
self._y_label = self._y_label or "Identification rate"
self._max_R = 0
def compute(self, idx, input_scores, input_names):
"""Plot CMC for dev and eval data using
:py:func:`bob.measure.plot.cmc`"""
mpl.figure(1)
if self._eval:
linestyle = "-" if not self._split else self._linestyles[idx]
LOGGER.info("CMC dev. curve using %s", input_names[0])
rank = plot.cmc(
input_scores[0],
logx=self._semilogx,
color=self._colors[idx],
linestyle=linestyle,
label=self._label("dev.", idx),
)
self._max_R = max(rank, self._max_R)
linestyle = "--"
if self._split:
mpl.figure(2)
linestyle = self._linestyles[idx]
LOGGER.info("CMC eval. curve using %s", input_names[1])
rank = plot.cmc(
input_scores[1],
logx=self._semilogx,
color=self._colors[idx],
linestyle=linestyle,
label=self._label("eval.", idx),
)
self._max_R = max(rank, self._max_R)
else:
LOGGER.info("CMC dev. curve using %s", input_names[0])
rank = plot.cmc(
input_scores[0],
logx=self._semilogx,
color=self._colors[idx],
linestyle=self._linestyles[idx],
label=self._label("dev.", idx),
)
self._max_R = max(rank, self._max_R)
class Dir(measure_figure.PlotBase):
"""Handles the plotting of DIR curve"""
def __init__(self, ctx, scores, evaluation, func_load):
super(Dir, self).__init__(ctx, scores, evaluation, func_load)
self._semilogx = ctx.meta.get("semilogx", True)
self._rank = ctx.meta.get("rank", 1)
self._titles = self._titles or ["DIR curve"] * 2
self._x_label = self._x_label or "False Positive Identification Rate"
self._y_label = self._y_label or "True Positive Identification Rate"
def compute(self, idx, input_scores, input_names):
"""Plot DIR for dev and eval data using
:py:func:`bob.measure.plot.detection_identification_curve`"""
mpl.figure(1)
if self._eval:
linestyle = "-" if not self._split else self._linestyles[idx]
LOGGER.info("DIR dev. curve using %s", input_names[0])
plot.detection_identification_curve(
input_scores[0],
rank=self._rank,
logx=self._semilogx,
color=self._colors[idx],
linestyle=linestyle,
label=self._label("dev", idx),
)
linestyle = "--"
if self._split:
mpl.figure(2)
linestyle = self._linestyles[idx]
LOGGER.info("DIR eval. curve using %s", input_names[1])
plot.detection_identification_curve(
input_scores[1],
rank=self._rank,
logx=self._semilogx,
color=self._colors[idx],
linestyle=linestyle,
label=self._label("eval", idx),
)
else:
LOGGER.info("DIR dev. curve using %s", input_names[0])
plot.detection_identification_curve(
input_scores[0],
rank=self._rank,
logx=self._semilogx,
color=self._colors[idx],
linestyle=self._linestyles[idx],
label=self._label("dev", idx),
)
if self._min_dig is not None:
mpl.xlim(xmin=math.pow(10, self._min_dig))
class Metrics(measure_figure.Metrics):
"""Compute metrics from score files"""
def __init__(
self,
ctx,
scores,
evaluation,
func_load,
names=(
"Failure to Acquire",
"False Match Rate",
"False Non Match Rate",
"False Accept Rate",
"False Reject Rate",
"Half Total Error Rate",
),
):
super(Metrics, self).__init__(ctx, scores, evaluation, func_load, names)
def init_process(self):
if self._criterion == "rr":
self._thres = (
[None] * self.n_systems if self._thres is None else self._thres
)
def compute(self, idx, input_scores, input_names):
"""Compute metrics for the given criteria"""
title = self._legends[idx] if self._legends is not None else None
headers = ["" or title, "Dev. %s" % input_names[0]]
if self._eval and input_scores[1] is not None:
headers.append("eval % s" % input_names[1])
if self._criterion == "rr":
rr = bob.measure.recognition_rate(input_scores[0], self._thres[idx])
dev_rr = "%.1f%%" % (100 * rr)
raws = [["RR", dev_rr]]
if self._eval and input_scores[1] is not None:
rr = bob.measure.recognition_rate(
input_scores[1], self._thres[idx]
)
eval_rr = "%.1f%%" % (100 * rr)
raws[0].append(eval_rr)
click.echo(
tabulate(raws, headers, self._tablefmt), file=self.log_file
)
elif self._criterion == "mindcf":
if "cost" in self._ctx.meta:
cost = self._ctx.meta.get("cost", 0.99)
threshold = (
bob.measure.min_weighted_error_rate_threshold(
input_scores[0][0], input_scores[0][1], cost
)
if self._thres is None
else self._thres[idx]
)
if self._thres is None:
click.echo(
"[minDCF - Cost:%f] Threshold on Development set `%s`: %e"
% (cost, input_names[0], threshold),
file=self.log_file,
)
else:
click.echo(
"[minDCF] User defined Threshold: %e" % threshold,
file=self.log_file,
)
# apply threshold to development set
far, frr = bob.measure.farfrr(
input_scores[0][0], input_scores[0][1], threshold
)
dev_far_str = "%.1f%%" % (100 * far)
dev_frr_str = "%.1f%%" % (100 * frr)
dev_mindcf_str = "%.1f%%" % (
(cost * far + (1 - cost) * frr) * 100.0
)
raws = [
["FAR", dev_far_str],
["FRR", dev_frr_str],
["minDCF", dev_mindcf_str],
]
if self._eval and input_scores[1] is not None:
# apply threshold to development set
far, frr = bob.measure.farfrr(
input_scores[1][0], input_scores[1][1], threshold
)
eval_far_str = "%.1f%%" % (100 * far)
eval_frr_str = "%.1f%%" % (100 * frr)
eval_mindcf_str = "%.1f%%" % (
(cost * far + (1 - cost) * frr) * 100.0
)
raws[0].append(eval_far_str)
raws[1].append(eval_frr_str)
raws[2].append(eval_mindcf_str)
click.echo(
tabulate(raws, headers, self._tablefmt), file=self.log_file
)
elif self._criterion == "cllr":
cllr = bob.measure.calibration.cllr(
input_scores[0][0], input_scores[0][1]
)
min_cllr = bob.measure.calibration.min_cllr(
input_scores[0][0], input_scores[0][1]
)
dev_cllr_str = "%.1f%%" % cllr
dev_min_cllr_str = "%.1f%%" % min_cllr
raws = [["Cllr", dev_cllr_str], ["minCllr", dev_min_cllr_str]]
if self._eval and input_scores[1] is not None:
cllr = bob.measure.calibration.cllr(
input_scores[1][0], input_scores[1][1]
)
min_cllr = bob.measure.calibration.min_cllr(
input_scores[1][0], input_scores[1][1]
)
eval_cllr_str = "%.1f%%" % cllr
eval_min_cllr_str = "%.1f%%" % min_cllr
raws[0].append(eval_cllr_str)
raws[1].append(eval_min_cllr_str)
click.echo(
tabulate(raws, headers, self._tablefmt), file=self.log_file
)
else:
title = self._legends[idx] if self._legends is not None else None
all_metrics = self._get_all_metrics(idx, input_scores, input_names)
headers = [" " or title, "Development"]
rows = [
[self.names[0], all_metrics[0][0]],
[self.names[1], all_metrics[0][1]],
[self.names[2], all_metrics[0][2]],
[self.names[3], all_metrics[0][3]],
[self.names[4], all_metrics[0][4]],
[self.names[5], all_metrics[0][5]],
]
if self._eval:
# computes statistics for the eval set based on the threshold a
# priori
headers.append("Evaluation")
rows[0].append(all_metrics[1][0])
rows[1].append(all_metrics[1][1])
rows[2].append(all_metrics[1][2])
rows[3].append(all_metrics[1][3])
rows[4].append(all_metrics[1][4])
rows[5].append(all_metrics[1][5])
click.echo(
tabulate(rows, headers, self._tablefmt), file=self.log_file
)
class MultiMetrics(measure_figure.MultiMetrics):
"""Compute metrics from score files"""
def __init__(self, ctx, scores, evaluation, func_load):
super(MultiMetrics, self).__init__(
ctx,
scores,
evaluation,
func_load,
names=(
"Failure to Acquire",
"False Match Rate",
"False Non Match Rate",
"False Accept Rate",
"False Reject Rate",
"Half Total Error Rate",
),
)
class Hist(measure_figure.Hist):
"""Histograms for biometric scores"""
def _setup_hist(self, neg, pos):
self._title_base = "Biometric scores"
self._density_hist(pos[0], n=0, label="Genuines", alpha=0.9, color="C2")
self._density_hist(
neg[0], n=1, label="Zero-effort impostors", alpha=0.8, color="C0"
)
|
PypiClean
|
/collective.blog.portlets-1.6.zip/collective.blog.portlets-1.6/collective/blog/portlets/archive.py
|
from Products.CMFCore.utils import getToolByName
from Products.CMFPlone.i18nl10n import monthname_msgid
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from collective.blog.portlets import _
from collective.blog.portlets.utils import find_assignment_context
from plone.app.portlets.portlets import base
from plone.portlets.interfaces import IPortletDataProvider
from zope import schema
from zope.formlib import form
from zope.interface import implements
class IArchivePortlet(IPortletDataProvider):
"""A portlet
It inherits from IPortletDataProvider because for this portlet, the
data that is being rendered and the portlet assignment itself are the
same.
"""
header = schema.TextLine(title=_(u'Title of the portlet'),
description=_(u'The text that will be shown as the title of the portlet'),
required=False)
archive_view = schema.TextLine(title=_(u"Archive view"),
description=_(u"The name of the archive view"),
default=u'blog_view',
required=True)
reversed = schema.Bool(title=_(u"Reverse year and month list"),
description=_(u"When checked, the first shown year will be the current one, and then the previous ones. The same applies to the months"),
default=False,
)
depth = schema.Int(title=_(u"Sub-folder depth"),
description=_(u"To include blog posts in subfolders, set this to the depth of the subfolders. 0 means no subfolders"),
default=0,
min=0,
# Setting this to 1000000 or similar will slow down the site,
# so let's prevent silly values.
max=100,
)
class Assignment(base.Assignment):
"""Portlet assignment.
This is what is actually managed through the portlets UI and associated
with columns.
"""
implements(IArchivePortlet)
header = u'Monthly archive'
archive_view = u'blog_view'
reversed = False
depth = 0
def __init__(self, header=u'Monthly archive',
archive_view=u'blog_view',
reversed=False,
depth=0):
self.header = header
self.archive_view = archive_view
self.reversed = reversed
@property
def title(self):
"""This property is used to give the title of the portlet in the
"manage portlets" screen.
"""
return _("Monthly archive")
class Renderer(base.Renderer):
"""Portlet renderer.
This is registered in configure.zcml. The referenced page template is
rendered, and the implicit variable 'view' will refer to an instance
of this class. Other methods can be added and referenced in the template.
"""
render = ViewPageTemplateFile('archive.pt')
def update(self):
self._counts = {}
catalog = getToolByName(self.context, 'portal_catalog')
# Get the path of where the portlet is created. That's the blog.
assignment_context = find_assignment_context(self.data, self.context)
if assignment_context is None:
assignment_context = self.context
self.folder_path = '/'.join(assignment_context.getPhysicalPath())
self.folder_url = assignment_context.absolute_url()
# Find the blog types:
portal_properties = getToolByName(self.context, 'portal_properties', None)
site_properties = getattr(portal_properties, 'site_properties', None)
portal_types = site_properties.getProperty('blog_types', None)
if portal_types == None:
portal_types = ('Document', 'News Item', 'File')
# Because of ExtendedPathIndex being braindead it's tricky (read:
# impossible) to get all subobjects for all folders, without also
# getting the folder. So we set a specific depth.
brains = catalog(path={'query': self.folder_path, 'depth': self.data.depth + 1},
portal_type=portal_types)
if not brains:
return
# Count the number of posts per month:
allmonths = {}
for brain in brains:
effective = brain.effective
year = str(effective.year())
if year == '1000':
continue # No effective date == not published
month = str(effective.month())
count = allmonths.setdefault((year, month), 0)
allmonths[(year, month)] = count +1
for year, month in allmonths:
year = str(year)
month = str(month)
# Make sure there is a year in the _counts dict:
self._counts.setdefault(year, {})
# Add this month:
months = self._counts[year]
months[month] = allmonths[year, month]
def years(self):
items = sorted(self._counts.keys())
if self.data.reversed:
return reversed(items)
return items
def months(self, year):
# sort as integers, return as strings
_months = sorted([int(m) for m in self._counts[year].keys()])
items = [str(m) for m in _months]
if self.data.reversed:
return reversed(items)
return items
def monthname(self, month):
return monthname_msgid(month)
def count(self, year, month):
return self._counts[year][month]
def archive_url(self, year, month):
return '%s/%s?year=%s&month=%s' % (self.folder_url,
self.data.archive_view,
year, month)
class AddForm(base.AddForm):
"""Portlet add form.
This is registered in configure.zcml. The form_fields variable tells
zope.formlib which fields to display. The create() method actually
constructs the assignment that is being added.
"""
form_fields = form.Fields(IArchivePortlet)
def create(self, data):
return Assignment(**data)
class EditForm(base.EditForm):
"""Portlet edit form.
This is registered with configure.zcml. The form_fields variable tells
zope.formlib which fields to display.
"""
form_fields = form.Fields(IArchivePortlet)
|
PypiClean
|
/hvl_ccb-0.14.1.tar.gz/hvl_ccb-0.14.1/hvl_ccb/dev/visa.py
|
import logging
import re
from datetime import datetime, timedelta
from time import sleep
from typing import Optional, Union
from bitstring import BitArray
from hvl_ccb.comm.visa import VisaCommunication, VisaCommunicationConfig
from hvl_ccb.configuration import configdataclass
from hvl_ccb.dev import SingleCommDevice
from hvl_ccb.utils.poller import Poller
from hvl_ccb.utils.typing import Number
logger = logging.getLogger(__name__)
@configdataclass
class _VisaDeviceConfigBase:
"""
Required VisaDeviceConfig keys, separated from the default ones to enable config
extension by inheritance with required keys.
"""
# NOTE: this class is unnecessary as there are no keys here; it's coded here only
# to illustrate a solution; for detailed explanations of the issue see:
# https://stackoverflow.com/questions/51575931/class-inheritance-in-python-3-7-dataclasses/
pass
@configdataclass
class _VisaDeviceConfigDefaultsBase:
spoll_interval: Number = 0.5
"""
Seconds to wait between status polling.
"""
spoll_start_delay: Number = 2
"""
Seconds to delay the start of status polling.
"""
def clean_values(self):
if self.spoll_interval <= 0:
raise ValueError("Polling interval needs to be positive.")
if self.spoll_start_delay < 0:
raise ValueError("Polling start delay needs to be non-negative.")
@configdataclass
class VisaDeviceConfig(_VisaDeviceConfigDefaultsBase, _VisaDeviceConfigBase):
"""
Configdataclass for a VISA device.
"""
pass
class VisaDevice(SingleCommDevice):
"""
Device communicating over the VISA protocol using VisaCommunication.
"""
def __init__(
self,
com: Union[VisaCommunication, VisaCommunicationConfig, dict],
dev_config: Union[VisaDeviceConfig, dict, None] = None,
) -> None:
super().__init__(com, dev_config)
self._spoll_thread: Union[Poller, None] = None
self._notify_operation_complete: bool = False
@staticmethod
def default_com_cls() -> type[VisaCommunication]:
"""
Return the default communication protocol for this device type, which is
VisaCommunication.
:return: the VisaCommunication class
"""
return VisaCommunication
@staticmethod
def config_cls():
return VisaDeviceConfig
def get_identification(self) -> str:
"""
Queries `"*IDN?"` and returns the identification string of the connected device.
:return: the identification string of the connected device
"""
return self.com.query("*IDN?")
def start(self) -> None:
"""
Start the VisaDevice. Sets up the status poller and starts it.
:return:
"""
super().start()
self._spoll_thread = Poller(
polling_interval_sec=self.config.spoll_interval,
polling_delay_sec=self.config.spoll_start_delay,
spoll_handler=self.spoll_handler,
)
self._spoll_thread.start_polling()
def stop(self) -> None:
"""
Stop the VisaDevice. Stops the polling thread and closes the communication
protocol.
:return:
"""
if self._spoll_thread:
self._spoll_thread.stop_polling()
super().stop()
def spoll_handler(self):
"""
Reads the status byte and decodes it. The status byte STB is defined in
IEEE 488.2. It provides a rough overview of the instrument status.
:return:
"""
stb = self.com.spoll()
if stb:
bits = BitArray(length=8, int=stb)
bits.reverse()
if bits[0]:
# has no meaning, always zero
pass
if bits[1]:
# has no meaning, always zero
pass
if bits[2]:
# error queue contains new error
logger.debug(f"Error bit set in STB: {stb}")
self.get_error_queue()
if bits[3]:
# Questionable Status QUES summary bit
logger.debug(f"Questionable status bit set in STB: {stb}")
if bits[4]:
# Output buffer holds data (RTO 1024), MAV bit (Message available)
pass
if bits[5]:
# Event status byte ESB, summary of ESR register (RTO 1024)
logger.debug(f"Operation status bit set in STB: {stb}")
# read event status register
esr = int(self.com.query("*ESR?"))
esr_bits = BitArray(length=8, int=esr)
esr_bits.reverse()
if esr_bits[0]:
# Operation complete bit set. This bit is set on receipt of the
# command *OPC exactly when all previous commands have been
# executed.
logger.debug(f"Operation complete bit set in ESR: {esr}")
self._notify_operation_complete = True
if bits[6]:
# RQS/MSS bit (RTO 1024)
pass
if bits[7]:
# Operation Status OPER summary bit
pass
def wait_operation_complete(self, timeout: Optional[float] = None) -> bool:
"""
Waits for a operation complete event. Returns after timeout [s] has expired
or the operation complete event has been caught.
:param timeout: Time in seconds to wait for the event; `None` for no timeout.
:return: True, if OPC event is caught, False if timeout expired
"""
# reset event bit
self._notify_operation_complete = False
# compute timeout
timeout_time = datetime.now() + timedelta(seconds=(timeout or 0))
# wait until event is caught
while not self._notify_operation_complete:
sleep(0.01)
if timeout is not None and datetime.now() > timeout_time:
break
# if event was caught, return true
if self._notify_operation_complete:
self._notify_operation_complete = False
return True
# if timeout expired, return false
return False
def get_error_queue(self) -> str:
"""
Read out error queue and logs the error.
:return: Error string
"""
err_string = self.com.query("SYSTem:ERRor:ALL?")
for error in re.findall("[^,]+,[^,]+", err_string):
logger.error(f"VISA Error from Device: {error}")
return err_string
def reset(self) -> None:
"""
Send `"*RST"` and `"*CLS"` to the device. Typically sets a defined state.
"""
self.com.write("*RST", "*CLS")
|
PypiClean
|
/thrillington-0.0.2a1-py3-none-any.whl/ram/modules.py
|
from typing import NamedTuple
import numpy as np
import tensorflow as tf
class Glimpse(NamedTuple):
"""represents output of GlimpseSensor"""
rho: tf.Tensor
fixations: np.ndarray
class GlimpseSensor(tf.keras.Model):
"""glimpse sensor, returns retina-like representation rho
of a region of an image x, given a location l to 'fixate'.
"""
def __init__(self, g_w=8, k=3, s=2):
"""__init__ for GlimpseSensor
Parameters
----------
g_w : int
length of one side of square patches in glimpses extracted by glimpse sensor.
Default is 8.
k : int
number of patches that the retina encoding rho(x,l) extracts
at location l from image x. Default is 3.
s : int
scaling factor, controls size of successive patches. Default is 2.
"""
super(GlimpseSensor, self).__init__()
self.g_w = g_w
self.k = k
self.s = s
def glimpse(self, images, loc_normd):
"""take a "glimpse" of a batch of images.
Returns retina-like representation rho(img, loc)
consisting of patches from each image.
Parameters
----------
images : tf.Tensor
with shape (B, H, W, C). Minibatch of images.
loc_normd : tf.Tensor
with shape (B, 2). Location of retina "fixation",
in normalized co-ordinates where center of image is (0,0),
upper left corner is (-1,-1), and lower right corner is (1,1).
Returns
-------
Glimpse : NamedTuple
typed tuple, with following fields
rho : tf.Tensor
with shape (B, k, g_w, g_w,
retina-like representation of k patches of increasing size
and decreasing resolution, centered around location loc within
image img
fixations: np.ndarray
locations where glimpse sensor "fixates" converted from
normalized values in loc_normd to pixels in co-ordinate plane
of input images.
top_left_corners: np.ndarray
calculated top left corners of extracted glimpse 'patches'.
Useful for plotting the glimpses.
"""
batch_size, img_H, img_W, C = images.shape.as_list()
# convert image co-ordinates from normalized to co-ordinates within
# the specific size of the images
# first convert location to range from 0 to 1
# and then multiply by number of pixels - 1 (because of zero indexing)
loc_0 = ((loc_normd[:, 0] + 1) / 2) * (img_H - 1)
loc_0 = tf.cast(tf.round(loc_0), tf.int32)
loc_1 = ((loc_normd[:, 1] + 1) / 2) * (img_W - 1)
loc_1 = tf.cast(tf.round(loc_1), tf.int32)
fixations = tf.stack([loc_0, loc_1], axis=1)
rho = []
for ind in range(batch_size):
img = images[ind, :, :, :]
patches = []
for patch_num in range(self.k):
size = self.g_w * (self.s ** patch_num)
# pad image with zeros
# (in case patch at current location extends beyond edges of image)
img_padded = tf.image.pad_to_bounding_box(img,
offset_height=size,
offset_width=size,
target_height=(size * 2) + img_H,
target_width=(size * 2) + img_W)
# compute top left corner of patch.
# note we add 'size' to compensate for padding
patch_x = fixations[ind, 0] - (size // 2) + size
patch_y = fixations[ind, 1] - (size // 2) + size
patch = tf.slice(img_padded,
begin=tf.stack([patch_x, patch_y, 0]),
size=tf.stack([size, size, C])
)
if size == self.g_w:
# convert to float32 to be consistent with
# tensors output after resizing
patch = tf.cast(patch, dtype=tf.float32)
else:
# resize cropped image to (size x size)
patch = tf.image.resize_images(patch, size=(self.g_w, self.g_w))
patches.append(patch)
rho.append(patches)
rho = tf.stack(rho)
fixations = fixations.numpy()
return Glimpse(rho, fixations)
class GlimpseNetwork(tf.keras.Model):
"""Network that maps retina representation rho and
location loc into a hidden space; defines a trainable
bandwidth-limited sensor that produces the glimpse
representation g_t
Attributes
----------
self.forward : forward pass through network, accepts image and
location tensors and returns tensor of glimpse representations g_t
"""
def __init__(self, g_w=8, k=3, s=2, h_g_units=128, h_l_units=128, h_gt_units=256):
"""__init__ function for GlimpseNetwork
Parameters
----------
g_w : int
size of square patches extracted by glimpse sensor.
k : int
number of patches to extract per glimpse.
s : int
scaling factor that controls size of successive patches.
h_g_units : int
number of units in fully-connected layer for retina-like representation rho.
Default is 128.
h_l_units : int
number of units in fully-connected layer for location l.
Default is 128.
h_gt_units : int
number of units in fully-connected layer for output g_t. This must be equal
to the number of hidden units in the core network. Default is 256.
"""
super(GlimpseNetwork, self).__init__()
self.g_w = g_w
self.k = k
self.s = s
self.h_g_units = h_g_units
self.h_l_units = h_l_units
self.h_gt_units = h_gt_units
self.glimpse_sensor = GlimpseSensor(g_w=g_w, k=k, s=s)
self.theta_g_0 = tf.keras.layers.Dense(units=h_g_units, activation='relu')
self.theta_g_1 = tf.keras.layers.Dense(units=h_l_units, activation='relu')
self.linear_h_g = tf.keras.layers.Dense(units=h_g_units, activation='linear')
self.linear_h_l = tf.keras.layers.Dense(units=h_l_units, activation='linear')
self.theta_g_2 = tf.keras.layers.Dense(units=h_gt_units, activation='relu')
def forward(self, images, loc):
"""computes forward pass through GlimpseNetwork
Parameters
----------
images : tf.Tensor
with shape (batch size, height, width, channels). Minibatch of images.
loc : tf.Tensor
with shape (batch size, 2). Location of retina "fixation",
in normalized co-ordinates where center of image is (0,0),
upper left corner is (-1,-1), and lower right corner is (1,1).
Returns
-------
Glimpse : NamedTuple
typed tuple, with following fields
rho : tf.Tensor
with shape
(batch size, number of glimpses, glimpse size, glimpse size, channels);
Glimpse representation extracted by GlimpseSensor.
fixations: np.ndarray
locations where glimpse sensor "fixates" converted from
normalized values in loc_normd to pixels in co-ordinate plane
of input images.
top_left_corners: np.ndarray
calculated top left corners of extracted glimpse 'patches'.
Useful for plotting the glimpses.
g_t : tf.Tensor
glimpse representation, output by glimpse network
"""
glimpse = self.glimpse_sensor.glimpse(images, loc)
batch_size, k, g, _, channels = glimpse.rho.shape.as_list()
rho_vec = tf.reshape(glimpse.rho, shape=(batch_size, k * g * g * channels))
h_g = self.theta_g_0(rho_vec)
h_l = self.theta_g_1(loc)
g_t = self.theta_g_2(
self.linear_h_g(h_g) + self.linear_h_l(h_l)
)
return glimpse, g_t
class CoreNetwork(tf.keras.Model):
"""RNN that maintains an internal state which summarizes
information extracted from the history of past observations.
The external input to the network is the glimpse feature
vector g_t.
The output h_t = f_h(h_t_minus_1, g_t; theta_h) where theta_h
is parameterized by the layers listed below in Attributes.
Attributes
----------
self.linear_h_t_minus_1 : linear layer of units that accepts
hidden state from the last time step as an input
self.linear_g_t : linear layer of units that accepts the
glimpse feature vector g_t as an input
So h_t = f_h(h_t_minus_1, g_t) =
Rect(Linear(h_t_minus_1) + Linear(g_t))
"""
def __init__(self, hidden_size=256):
"""__init__ function for CoreNetwork.
Note that in [1]_ the network as implemented here is only
used for classification; an LSTM was used for dynamic
environments.
Parameters
----------
hidden_size : int
Number of units in hidden layers that maintain internal state.
Default is 256.
"""
super(CoreNetwork, self).__init__()
self.hidden_size = hidden_size
self.linear_h_t_minus_1 = tf.keras.layers.Dense(units=hidden_size)
self.linear_g_t = tf.keras.layers.Dense(units=hidden_size)
def forward(self, g_t, h_t_minus_1):
"""computes forward pass through CoreNetwork
Parameters
----------
g_t : tf.Tensor
glimpse feature vector
h_t_minus_1 : tf.Tensor
output of CoreNetwork from previous time step
Returns
-------
h_t : tf.Tensor
= f_h(h_t_minus_1, g_t) = Rect(Linear(h_t_minus_1) + Linear(g_t))
= tf.relu(self.linear_g_t(g_t) + self.linear_h_t_minus_1(h_t_minus_1))
"""
h_t = self.linear_g_t(g_t) + self.linear_h_t_minus_1(h_t_minus_1)
h_t = tf.nn.relu(h_t)
return h_t
class LocationNetwork(tf.keras.Model):
"""Uses internal state `h_t` of core network
to produce location coordinates `l_t` for the
next time step.
The location network is a fully-connected layer
that parameterizes a normal distribution with
mean mu and a constant standard deviation sigma
(specified by the user). Locations are drawn from
this distribution on each time step, by passing
the hidden state h_t through the location network l_t.
Attributes
----------
self.forward
"""
def __init__(self, loc_std, output_size=2):
"""__init__ for LocationNetwork
Parameters
----------
loc_std : float
standard deviation of normal distribution from which
location co-ordinates are drawn.
output_size : int
dimensionality of output of fully connected layer
in location network. Default is 2, i.e. x and y co-ordinates
of a location.
"""
super(LocationNetwork, self).__init__()
self.output_size = output_size
self.loc_std = loc_std
self.fc = tf.keras.layers.Dense(units=output_size, activation='tanh')
def forward(self, h_t):
"""forward pass through LocationNetwork.
Ues location policy to compute l_t, the location to glimpse next,
given internal state `h_t` of core network.
Passes h_t through a fully connected layer with tan_h activation
to clamp the output between [-1, 1]. This results in mu of
distribution with standard deviation self.loc_std. The location
l_t is then drawn from this distribution then returned.
Parameters
----------
h_t : tf.Tensor
with shape (B, num_hidden). Output of core network.
Returns
-------
mu : tf.Tensor
with shape (B, 2). mu parameter for normal distributions
from which l_t will be drawn. Since l_t is a location with
x and y co-ordinates, there is one value of mu for each
distribution (the one that represents the x co-ordinate and
the one that represents the y co-ordinate).
l_t : tf.Tensor
with shape (B, 2)
"""
mu = self.fc(h_t)
y = tf.random_normal(mu.get_shape(), mean=mu, stddev=self.loc_std)
# run through tanh again to bound between -1 and 1
l_t = tf.tanh(y)
return mu, l_t
class ActionNetwork(tf.keras.Model):
"""Uses internal state `h_t` of CoreNetwork to
produce final output classification.
Feeds hidden state `h_t` through a fully-connected
layer followed by a softmax to yield the vector of
output probabilities over the possible classes.
"""
def __init__(self, num_actions):
super(ActionNetwork, self).__init__()
self.num_actions = num_actions
self.fc = tf.keras.layers.Dense(units=num_actions, activation='linear')
def forward(self, h_t):
"""forward pass through ActionNetwork
Returns
-------
a_t : tf.Tensor
"actions" to take, currently just classification
"""
a_t = self.fc(h_t)
return a_t
class BaselineNetwork(tf.keras.Model):
"""Provides estimate of (state) value that does not depend on
actions taken. Used during optimization as a baseline,
subtracted from return on each time step,
to reduce variance of the policy function gradient.
Attributes
----------
self.fc : tf.keras.layers.Dense
fully-connected layer with Rectified Linear activation
"""
def __init__(self, output_size=1):
"""__init__ for BaselineNetwork
Parameters
----------
output_size : int
Number of units in fully-connected layer of BaselineNetwork.
Should be a scalar, since it is the estimate of total return
from the current state. Default is 1.
"""
super(BaselineNetwork, self).__init__()
self.output_size = output_size
self.fc = tf.keras.layers.Dense(units=output_size, activation='relu')
def forward(self, h_t):
"""forward pass through BaselineNetwork.
Returns
-------
b_t : tf.Tensor
baseline network output with size (batch, self.output_size)
"""
b_t = self.fc(h_t)
return b_t
|
PypiClean
|
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/cometd/callbackPollTransport.js.uncompressed.js
|
define("dojox/cometd/callbackPollTransport", ["dijit","dojo","dojox","dojo/require!dojox/cometd/_base,dojox/cometd/longPollTransport,dojo/io/script"], function(dijit,dojo,dojox){
dojo.provide("dojox.cometd.callbackPollTransport");
dojo.require("dojox.cometd._base");
dojo.require("dojox.cometd.longPollTransport");
dojo.require("dojo.io.script");
dojox.cometd.callbackPollTransport = new function(){
this._connectionType = "callback-polling";
this._cometd = null;
this.check = function(types, version, xdomain){
// we handle x-domain!
return (dojo.indexOf(types, "callback-polling") >= 0);
}
this.tunnelInit = function(){
var message = {
channel: "/meta/connect",
clientId: this._cometd.clientId,
connectionType: this._connectionType,
id: "" + this._cometd.messageId++
};
message = this._cometd._extendOut(message);
this.openTunnelWith([message]);
}
this.tunnelCollapse = dojox.cometd.longPollTransport.tunnelCollapse;
this._connect = dojox.cometd.longPollTransport._connect;
this.deliver = dojox.cometd.longPollTransport.deliver;
this.openTunnelWith = function(content, url){
this._cometd._polling = true;
var script = {
load: dojo.hitch(this, function(data){
this._cometd._polling=false;
this._cometd.deliver(data);
this._cometd._backon();
this.tunnelCollapse();
}),
error: dojo.hitch(this, function(err){
this._cometd._polling = false;
this._cometd._publishMeta("connect",false);
this._cometd._backoff();
this.tunnelCollapse();
}),
url: (url || this._cometd.url),
content: { message: dojo.toJson(content) },
callbackParamName: "jsonp"
};
var connectTimeout = this._cometd._connectTimeout();
if(connectTimeout > 0){
script.timeout=connectTimeout;
}
dojo.io.script.get(script);
}
this.sendMessages = function(/*array*/ messages){
for(var i = 0; i < messages.length; i++){
messages[i].clientId = this._cometd.clientId;
messages[i].id = ""+this._cometd.messageId++;
messages[i]=this._cometd._extendOut(messages[i]);
}
var bindArgs = {
url: this._cometd.url || dojo.config["cometdRoot"],
load: dojo.hitch(this._cometd, "deliver"),
callbackParamName: "jsonp",
content: { message: dojo.toJson( messages ) },
error: dojo.hitch(this, function(err){
this._cometd._publishMeta("publish",false,{messages:messages});
}),
timeout: this._cometd.expectedNetworkDelay
};
return dojo.io.script.get(bindArgs);
}
this.startup = function(handshakeData){
if(this._cometd._connected){ return; }
this.tunnelInit();
}
// FIXME: what is this supposed to do? ;)
this.disconnect = dojox.cometd.longPollTransport.disconnect;
this.disconnect = function(){
var message = {
channel: "/meta/disconnect",
clientId: this._cometd.clientId,
id: "" + this._cometd.messageId++
};
message = this._cometd._extendOut(message);
dojo.io.script.get({
url: this._cometd.url || dojo.config["cometdRoot"],
callbackParamName: "jsonp",
content: { message: dojo.toJson([message]) }
});
}
this.cancelConnect = function(){}
}
dojox.cometd.connectionTypes.register("callback-polling", dojox.cometd.callbackPollTransport.check, dojox.cometd.callbackPollTransport);
});
|
PypiClean
|
/Dominate_Layui-2020.6.25.post1-py3-none-any.whl/Dominate_Layui/Core.py
|
import inspect
import os
import shutil
import dominate
import Dominate_Layui.static
from dominate.document import *
from dominate.tags import *
COUNTS = 0
def __getVarName(variable):
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
return [var_name for var_name, var_val in callers_local_vars if var_val is variable]
class HTMLDocument(document):
pass
def linkInit(doc:HTMLDocument) ->None:
"""To initialize layui, you need to pass in a document object"""
doc.head+=script(src="static/layui/layui.all.js")
doc.head+=script(src="static/layui/init.js")
doc.head+=link(href="static/layui/css/layui.css",rel="stylesheet")
doc.head+=link(href="static/layui/css/layui.mobile.css",rel="stylesheet")
def FuncBtnAdd(doc:HTMLDocument,title:str,color:str="primary",radius:str="null",fluid:str="none",size:str="nr",onClick:str=None):
"""color:primary,normal,None,warm,danger
radius:'null'/'radius'
fluid:'none'/'fluid'
size:lg,nr,sm,xs"""
l=[doc,title,color,radius,fluid,size,onClick]
front_attribute="layui-btn-"
attribute="layui-btn"
for i in l:
if type(i)==type(None):
l.remove(i)
if not onClick:
for i in l:
#print(__getVarName(i))
if __getVarName(i)[1]!="doc":
if __getVarName(i)[1]!="title":
if i=="null" or i=="none":
continue
else:
attribute+=" "
attribute+=front_attribute+str(i)
else:
continue
doc.body.add(button(title,Class=attribute))
else:
for i in l:
if __getVarName(i)[1]!="onClick":
if __getVarName(i)[1]!="doc":
if __getVarName(i)[1]!="title":
if i=="null" or i=="none":
continue
else:
attribute+=" "
attribute+=front_attribute+str(i)
else:
continue
else:
continue
doc.body.add(button(title,Class=attribute,onclick=onClick))
return
def LinkBtnAdd(doc:HTMLDocument,title:str,color:str="primary",radius:str="null",fluid:str="none",size:str="nr",href:str=None):
"""color:primary,normal,None,warm,danger
radius:'null'/'radius'
fluid:'none'/'fluid'
size:lg,nr,sm,xs"""
l=[doc,title,color,radius,fluid,size,href]
front_attribute="layui-btn-"
attribute="layui-btn"
for i in l:
if type(i)==type(None):
l.remove(i)
for i in l:
#print(__getVarName(i))
if __getVarName(i)[1]!="href":
if __getVarName(i)[1]!="doc":
if __getVarName(i)[1]!="title":
if i=="null" or i=="none":
continue
else:
attribute+=" "
attribute+=front_attribute+str(i)
else:
continue
link=a(href=href)
link.add(button(title,Class=attribute))
doc.body.add(link)
return
def htmlbr(doc:HTMLDocument):
'''Just a Tag<br>.'''
doc.add(br(doc))
def render(doc:HTMLDocument,DocName:str):
"""Copy the static resources, Doc renders them
into strings and writes them to the specified file"""
__StaticGot()
file=open(DocName,"w",encoding="utf-8")
file.write(str(doc))
file.close()
return (doc,DocName,len(str(doc)))
def __copyFiles(sourceDir,targetDir):
global COUNTS
for f in os.listdir(sourceDir):
sourceF = os.path.join(sourceDir, f)
targetF = os.path.join(targetDir, f)
if os.path.isfile(sourceF):
if not os.path.exists(targetDir):
os.makedirs(targetDir)
COUNTS += 1
if not os.path.exists(targetF) or (os.path.exists(targetF) and (os.path.getsize(targetF) != os.path.getsize(sourceF))):
open(targetF, "wb").write(open(sourceF, "rb").read())
else:
if os.path.isdir(sourceF):
__copyFiles(sourceF, targetF)
COUNTS=0
def __StaticGot():
"""Copy all static resources to the current directory"""
path=os.path.dirname(Dominate_Layui.static.__file__)
__copyFiles(path,"static")
os.remove("static/__init__.py")
shutil.rmtree("static/__pycache__")
if "__main__"==__name__:
Test=HTMLDocument()
linkInit(Test)
FuncBtnAdd(Test,title="Hello World",radius="radius",color="danger",onClick=("AlertBox('HEYSS')"))
print(Test)
__StaticGot()
|
PypiClean
|
/pelican-jupyter-0.10.1.tar.gz/pelican-jupyter-0.10.1/pelican_jupyter/vendor/liquid_tags/mdx_liquid_tags.py
|
import itertools
import re
import warnings
import markdown
# Define some regular expressions
LIQUID_TAG = re.compile(r"\{%.*?%\}", re.MULTILINE | re.DOTALL)
EXTRACT_TAG = re.compile(r"(?:\s*)(\S+)(?:\s*)")
LT_CONFIG = {
"CODE_DIR": "code",
"NOTEBOOK_DIR": "notebooks",
"FLICKR_API_KEY": "flickr",
"GIPHY_API_KEY": "giphy",
}
LT_HELP = {
"CODE_DIR": "Code directory for include_code subplugin",
"NOTEBOOK_DIR": "Notebook directory for notebook subplugin",
"FLICKR_API_KEY": "Flickr key for accessing the API",
"GIPHY_API_KEY": "Giphy key for accessing the API",
}
class _LiquidTagsPreprocessor(markdown.preprocessors.Preprocessor):
_tags = {}
def __init__(self, configs):
self.configs = configs
def run(self, lines):
page = "\n".join(lines)
liquid_tags = LIQUID_TAG.findall(page)
for i, markup in enumerate(liquid_tags):
# remove {% %}
markup = markup[2:-2]
tag = EXTRACT_TAG.match(markup).groups()[0]
markup = EXTRACT_TAG.sub("", markup, 1)
if tag in self._tags:
liquid_tags[i] = self._tags[tag](self, tag, markup.strip())
# add an empty string to liquid_tags so that chaining works
liquid_tags.append("")
# reconstruct string
page = "".join(itertools.chain(*zip(LIQUID_TAG.split(page), liquid_tags)))
# resplit the lines
return page.split("\n")
class LiquidTags(markdown.Extension):
"""Wrapper for MDPreprocessor"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
for key, value in LT_CONFIG.items():
self.config[key] = [value, LT_HELP[key]]
super(LiquidTags, self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
for key, value in LT_CONFIG.items():
config[key] = [config[key], LT_HELP[key]]
super(LiquidTags, self).__init__(config)
@classmethod
def register(cls, tag):
"""Decorator to register a new include tag"""
def dec(func):
if tag in _LiquidTagsPreprocessor._tags:
warnings.warn("Enhanced Markdown: overriding tag '%s'" % tag)
_LiquidTagsPreprocessor._tags[tag] = func
return func
return dec
def extendMarkdown(self, md, md_globals):
self.htmlStash = md.htmlStash
md.registerExtension(self)
# for the include_code preprocessor, we need to re-run the
# fenced code block preprocessor after substituting the code.
# Because the fenced code processor is run before, {% %} tags
# within equations will not be parsed as an include.
md.preprocessors.add("mdincludes", _LiquidTagsPreprocessor(self), ">html_block")
def makeExtension(configs=None):
"""Wrapper for a MarkDown extension"""
return LiquidTags(configs=configs)
|
PypiClean
|
/django-static-angular-v1.3.15.tar.gz/django-static-angular-v1.3.15/django_static_angular/static/static_angular/js/i18n/angular-locale_en-gm.js
|
'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"ERANAMES": [
"Before Christ",
"Anno Domini"
],
"ERAS": [
"BC",
"AD"
],
"MONTH": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
],
"SHORTDAY": [
"Sun",
"Mon",
"Tue",
"Wed",
"Thu",
"Fri",
"Sat"
],
"SHORTMONTH": [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec"
],
"fullDate": "EEEE, MMMM d, y",
"longDate": "MMMM d, y",
"medium": "MMM d, y h:mm:ss a",
"mediumDate": "MMM d, y",
"mediumTime": "h:mm:ss a",
"short": "M/d/yy h:mm a",
"shortDate": "M/d/yy",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "GMD",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "en-gm",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]);
|
PypiClean
|
/ytd-0.0.1.tar.gz/ytd-0.0.1/detectron2/ObjectDetector.py
|
import cv2 as cv
import json
import time
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.utils.visualizer import ColorMode
from detectron2 import model_zoo
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.modeling import build_model
import torch
import numpy as np
from PIL import Image
from com_ineuron_utils.utils import encodeImageIntoBase64
class Detector:
def __init__(self,filename, model_name):
# set model and test set
self.filename = filename
# obtain detectron2's default config
self.cfg = get_cfg()
# load values from a file
# self.cfg.merge_from_file("config.yml")
if model_name == 'faster_rcnn_R_50_C4':
print(f'model:faster_rcnn_R_50_C4')
self.model = 'faster_rcnn_R_50_C4_1x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/" + self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
elif model_name == 'faster_rcnn_R_50_FPN':
print(f'model:faster_rcnn_R_50_FPN')
self.model = 'faster_rcnn_R_50_FPN_3x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/"+self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
elif model_name == 'faster_rcnn_X_101_32x8d_FPN':
print(f'model:faster_rcnn_X_101_32x8d_FPN')
self.model = 'faster_rcnn_X_101_32x8d_FPN_3x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/"+self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
elif model_name == 'retinanet_R_50_FPN':
print(f'model:retinanet_R_50_FPN')
self.model = 'retinanet_R_50_FPN_3x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/"+self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
elif model_name == 'fast_rcnn_R_50_FPN':
print(f'model:fast_rcnn_R_50_FPN')
self.model = 'fast_rcnn_R_50_FPN_1x.yaml'
self.cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/"+self.model))
self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/" + self.model)
else:
raise Exception('Unknown model')
# set device to cpu
self.cfg.MODEL.DEVICE = "cpu"
# get weights
# self.cfg.MODEL.WEIGHTS = "model_final_f10217.pkl"
# self.cfg.MODEL.WEIGHTS = "model_final.pth"
# set the testing threshold for this model
self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.50
# build model from weights
# self.cfg.MODEL.WEIGHTS = self.convert_model_for_inference()
# build model and convert for inference
def convert_model_for_inference(self):
# build model
model = build_model(self.cfg)
# save as checkpoint
torch.save(model.state_dict(), 'checkpoint.pth')
# return path to inference model
return 'checkpoint.pth'
def inference(self):
# self.ROI = ROI
predictor = DefaultPredictor(self.cfg)
im = cv.imread(self.filename)
t0 = time.time()
outputs = predictor(im)
t1 = time.time()
metadata = MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0])
# visualise
v = Visualizer(im[:, :, ::-1], metadata=metadata, scale=1.2)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
predicted_image = v.get_image()
im_rgb = cv.cvtColor(predicted_image, cv.COLOR_RGB2BGR)
cv.imwrite('output.jpg', im_rgb)
# imagekeeper = []
opencodedbase64 = encodeImageIntoBase64("output.jpg")
pred_bbox = outputs['instances'].pred_boxes.to('cpu')
pred_bbox = pred_bbox.tensor.detach().numpy().tolist()
pred_scores = outputs['instances'].scores.to('cpu').tolist()
pred_time = round((t1 - t0), 3)
print(pred_time)
print(pred_scores)
print(pred_bbox)
# imagekeeper.append({"image": opencodedbase64.decode('utf-8')})
result = {"pred_bbox": str(pred_bbox),
"pred_scores": str(pred_scores),
"pred_time": str(pred_time),
"image" : opencodedbase64.decode('utf-8')}
return result
|
PypiClean
|
/mozuma-0.9.0.tar.gz/mozuma-0.9.0/README.md
|
# MoZuMa
MoZuMa is a model zoo for multimedia search application. It provides an easy to use interface to run models for:
- **Text to image retrieval**: Rank images by their similarity to a text query.
- **Image similarity search**: Rank images by their similarity to query image.
- **Image classification**: Add labels to images.
- **Face detection**: Detect and retrieve images with similar faces.
- **Object detection**: Detect and retrieve images with similar objects.
- **Video keyframes extraction**: Retrieve the important frames of a video.
Key-frames are used to apply all the other queries on videos.
- **Multilingual text search**: Rank similar sentences from a text query in multiple languages.
## Quick links
- [Documentation](https://mozuma.github.io/mozuma/)
- [Models](https://mozuma.github.io/mozuma/models/)
- [For developers](https://mozuma.github.io/mozuma/contributing/0-setup.md)
## Example gallery
See `docs/examples/` for a collection of ready to use notebooks.
## Citation
Please cite as:
```bibtex
@inproceedings{mozuma,
author = {Massonnet, St\'{e}phane and Romanelli, Marco and Lebret, R\'{e}mi and Poulsen, Niels and Aberer, Karl},
title = {MoZuMa: A Model Zoo for Multimedia Applications},
year = {2022},
isbn = {9781450392037},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3503161.3548542},
doi = {10.1145/3503161.3548542},
abstract = {Lots of machine learning models with applications in Multimedia Search are released as Open Source Software. However, integrating these models into an application is not always an easy task due to the lack of a consistent interface to run, train or distribute models. With MoZuMa, we aim at reducing this effort by providing a model zoo for image similarity, text-to-image retrieval, face recognition, object similarity search, video key-frames detection and multilingual text search implemented in a generic interface with a modular architecture. The code is released as Open Source Software at https://github.com/mozuma/mozuma.},
booktitle = {Proceedings of the 30th ACM International Conference on Multimedia},
pages = {7335–7338},
numpages = {4},
keywords = {multimedia search, vision and language, open source software},
location = {Lisboa, Portugal},
series = {MM '22}
}
```
|
PypiClean
|
/juju-vnfm-1.0.0b19.tar.gz/juju-vnfm-1.0.0b19/README.rst
|
Juju Virtual Network Function Manager
=====================================
This project is a Virtual Network Function Manager (VNFM) that enables
`Juju`_ to work as a VNFM in the `Open Baton`_ environment.
Requirements
------------
- Python 3.5.2+
- Juju 2.0+
Installation
------------
For installing the Juju-VNFM execute
.. code:: bash
pip install .
inside the project’s root directory. **Note** that you have to use pip3 if
your standard Python interpreter is python2.
Usage
-----
After you installed the Juju-VNFM you have to configure it. Create the
file */etc/openbaton/juju/conf.ini*, make sure that the current user has write permissions for the file and execute:
.. code:: bash
jujuvnfm configure
Then follow the instructions.
Afterwards you can start the Juju-VNFM with the command *jujuvnfm
start*. You can specify the number of threads started to handle NFVO
requests by passing a number with the -t option:
.. code:: bash
jujuvnfm -t 10 start
The default number of threads is five.
.. _Juju: https://jujucharms.com/
.. _Open Baton: https://openbaton.github.io/
|
PypiClean
|
/hxrsnd-0.3.1.tar.gz/hxrsnd-0.3.1/scripts.py
|
# Imports from the Python standard library go here
import logging
# Imports from the third-party modules go here
import numpy as np
from ophyd import Component as Cmp
from ophyd import Device, EpicsSignal
from ophyd.sim import hw
from ophyd.status import wait as status_wait
# Imports from the HXRSnD module go here
import snd_devices
# Imports from other SLAC modules go here
# Default logger
logger = logging.getLogger(__name__)
###############################################################################
# Good Design Practices #
###############################################################################
# # Replace all print() statements with logger.info() statements # #
###############################################################################
# The Main reason for this is the IPython shell will log everything you log in
# log files IFF you use the logger methods, while also printing to the console.
# Even better, is if you include various logger levels. To use the logger,
# simply make the following substitution:
# print("text") --> logger.info("text")
# It is that simple, that the message will now be archived in the info level
# (HXRSnD/logs/info.log) and debug level (HXRSnD/logs/debug.log) log files.
# # Leave Comments # #
###############################################################################
# This seems like it may not be that important, but the purpose of this file is
# to temporarily hold scripts developed during beamtime to then be migrated by
# us (PCDS) into the module. By leaving comments, you make it easier for
# everyone to understand what the code is doing.
###############################################################################
# Insert Code Below #
###############################################################################
hw = hw() # Fake hardware for testing
fake_motor = hw.motor
class NotepadScanStatus(Device):
istep = Cmp(EpicsSignal, ":ISTEP")
isscan = Cmp(EpicsSignal, ":ISSCAN")
nshots = Cmp(EpicsSignal, ":NSHOTS")
nsteps = Cmp(EpicsSignal, ":NSTEPS")
var0 = Cmp(EpicsSignal, ":SCANVAR00")
var1 = Cmp(EpicsSignal, ":SCANVAR01")
var2 = Cmp(EpicsSignal, ":SCANVAR02")
var0_max = Cmp(EpicsSignal, ":MAX00")
var1_max = Cmp(EpicsSignal, ":MAX01")
var2_max = Cmp(EpicsSignal, ":MAX02")
var0_min = Cmp(EpicsSignal, ":MIN00")
var1_min = Cmp(EpicsSignal, ":MIN01")
var2_min = Cmp(EpicsSignal, ":MIN02")
def clean_fields(self):
for sig_name in self.signal_names:
sig = getattr(self, sig_name)
val = sig.value
if isinstance(val, (int, float)):
sig.put(0)
elif isinstance(val, str):
sig.put('')
notepad_scan_status = NotepadScanStatus('XCS:SCAN', name='xcs_scan_status')
def ascan(motor, start, stop, num, events_per_point=360, record=False,
controls=None, **kwargs):
"""
Quick re-implementation of old python for the transition
"""
daq = snd_devices.daq
events = events_per_point
status = notepad_scan_status
status.clean_fields()
if controls is None:
controls = {}
start_pos = motor.position
def get_controls(motor, extra_controls):
out_arr = {motor.name: motor}
out_arr.update(extra_controls)
return out_arr
try:
scan_controls = get_controls(motor, controls)
daq.configure(record=record, controls=scan_controls)
status.isscan.put(1)
status.nshots.put(events_per_point)
status.nsteps.put(num)
status.var0.put(motor.name)
status.var0_max.put(max((start, stop)))
status.var0_min.put(min((start, stop)))
for i, step in enumerate(np.linspace(start, stop, num)):
logger.info(f'Beginning step {step}')
try:
mstat = motor.set(step, verify_move=False, **kwargs)
except TypeError:
mstat = motor.set(step, **kwargs)
status.istep.put(i)
status_wait(mstat)
scan_controls = get_controls(motor, controls)
daq.begin(events=events, controls=scan_controls)
logger.info(f'Waiting for {events} events ...')
daq.wait()
finally:
logger.info('DONE!')
status.clean_fields()
daq.end_run()
daq.disconnect()
try:
motor.set(start_pos, verify_move=False, **kwargs)
except TypeError:
motor.set(start_pos, **kwargs)
|
PypiClean
|
/diacamma_syndic-2.6.12.23071414-py3-none-any.whl/diacamma/condominium/static/diacamma.condominium/doc_fr/_static/sphinx_highlight.js
|
"use strict";
const SPHINX_HIGHLIGHT_ENABLED = true
/**
* highlight a given string on a node by wrapping it in
* span elements with the given class name.
*/
const _highlight = (node, addItems, text, className) => {
if (node.nodeType === Node.TEXT_NODE) {
const val = node.nodeValue;
const parent = node.parentNode;
const pos = val.toLowerCase().indexOf(text);
if (
pos >= 0 &&
!parent.classList.contains(className) &&
!parent.classList.contains("nohighlight")
) {
let span;
const closestNode = parent.closest("body, svg, foreignObject");
const isInSVG = closestNode && closestNode.matches("svg");
if (isInSVG) {
span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
} else {
span = document.createElement("span");
span.classList.add(className);
}
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
parent.insertBefore(
span,
parent.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling
)
);
node.nodeValue = val.substr(0, pos);
if (isInSVG) {
const rect = document.createElementNS(
"http://www.w3.org/2000/svg",
"rect"
);
const bbox = parent.getBBox();
rect.x.baseVal.value = bbox.x;
rect.y.baseVal.value = bbox.y;
rect.width.baseVal.value = bbox.width;
rect.height.baseVal.value = bbox.height;
rect.setAttribute("class", className);
addItems.push({ parent: parent, target: rect });
}
}
} else if (node.matches && !node.matches("button, select, textarea")) {
node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
}
};
const _highlightText = (thisNode, text, className) => {
let addItems = [];
_highlight(thisNode, addItems, text, className);
addItems.forEach((obj) =>
obj.parent.insertAdjacentElement("beforebegin", obj.target)
);
};
/**
* Small JavaScript module for the documentation.
*/
const SphinxHighlight = {
/**
* highlight the search words provided in localstorage in the text
*/
highlightSearchWords: () => {
if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
// get and clear terms from localstorage
const url = new URL(window.location);
const highlight =
localStorage.getItem("sphinx_highlight_terms")
|| url.searchParams.get("highlight")
|| "";
localStorage.removeItem("sphinx_highlight_terms")
url.searchParams.delete("highlight");
window.history.replaceState({}, "", url);
// get individual terms from highlight string
const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
if (terms.length === 0) return; // nothing to do
// There should never be more than one element matching "div.body"
const divBody = document.querySelectorAll("div.body");
const body = divBody.length ? divBody[0] : document.querySelector("body");
window.setTimeout(() => {
terms.forEach((term) => _highlightText(body, term, "highlighted"));
}, 10);
const searchBox = document.getElementById("searchbox");
if (searchBox === null) return;
searchBox.appendChild(
document
.createRange()
.createContextualFragment(
'<p class="highlight-link">' +
'<a href="javascript:SphinxHighlight.hideSearchWords()">' +
_("Hide Search Matches") +
"</a></p>"
)
);
},
/**
* helper function to hide the search marks again
*/
hideSearchWords: () => {
document
.querySelectorAll("#searchbox .highlight-link")
.forEach((el) => el.remove());
document
.querySelectorAll("span.highlighted")
.forEach((el) => el.classList.remove("highlighted"));
localStorage.removeItem("sphinx_highlight_terms")
},
initEscapeListener: () => {
// only install a listener if it is really needed
if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
document.addEventListener("keydown", (event) => {
// bail for input elements
if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
// bail with special keys
if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
SphinxHighlight.hideSearchWords();
event.preventDefault();
}
});
},
};
_ready(SphinxHighlight.highlightSearchWords);
_ready(SphinxHighlight.initEscapeListener);
|
PypiClean
|
/cctbx_base-2020.8-0_py38h167b89d-cp38-cp38m-manylinux2010_x86_64.whl/mmtbx/proq2.py
|
from __future__ import absolute_import, division, print_function
from HTMLParser import HTMLParser
class ProQError(Exception):
"""
Module exception
"""
class JobFolderParseError(ProQError):
"""
Could not parse out job folder
"""
class JobFolderParser(HTMLParser):
JOB_FOLDER_TEXT = "The job folder is located here:"
def __init__(self):
HTMLParser.__init__( self )
self.job_folder = None
self.capture_next_anchor = False
def handle_starttag(self, tag, attrs):
if tag == "a" and self.capture_next_anchor:
self.job_folder = None
for ( key, value ) in attrs:
if key == "href":
self.job_folder = value
self.capture_next_anchor = False
def handle_data(self, data):
if data.find( self.JOB_FOLDER_TEXT ) != -1:
self.capture_next_anchor = True
def parse_submission_page(stream):
parser = JobFolderParser()
for line in stream:
parser.feed( line )
if parser.job_folder is not None:
return parser.job_folder
raise JobFolderParseError("Could not find job folder text")
class Job(object):
"""
Interface to ProQ2 server
"""
SERVERURL = "http://duffman.it.liu.se/ProQ2/index.php"
OUTPUTFILE = "input.pdb.orig.B"
def __init__(self, pdbstr, name):
from iotbx.pdb import download
from six.moves import urllib
data_for = {
"pdb": pdbstr,
"bfactorPDB": "Yes",
"name": name,
"nomail": "Yes",
"do": "Submit",
}
stream = download.openurl(
url = self.SERVERURL,
data = urllib.parse.urlencode( list(data_for.items()) ),
)
self.job_folder = parse_submission_page( stream = stream )
stream.close()
def __call__(self):
from iotbx.pdb import download
return download.openurl(
url = "%s/%s" % ( self.job_folder, self.OUTPUTFILE ),
)
if __name__ == "__main__":
import sys
if len( sys.argv ) == 2:
timeout = 600
elif len( sys.argv ) == 3:
try:
timeout = float( sys.argv[2] )
except ValueError as e:
print("Cannot interpret as number: %s (%s)" % ( sys.argv[2], e ))
else:
print("Usage: %s PDBFILE <timeout = 600>" % sys.argv[0])
sys.exit( 1 )
pdbstr = open( sys.argv[1] ).read()
sys.stdout.write( "Submitting job..." )
sys.stdout.flush()
try:
myjob = Job( pdbstr = pdbstr, name = "phenix.proq2" )
except JobFolderParseError as e:
sys.stdout.write( "failed\n" )
print("Unexpected response: cannot find job folder")
sys.exit( 1 )
sys.stdout.write( "done\n" )
print("Job folder:", myjob.job_folder)
sys.stdout.write( "Waiting for results" )
sys.stdout.flush()
from libtbx import progress
from iotbx.pdb import download
waiter = progress.complete_on_success( func = myjob, excspec = download.NotFound )
try:
progress.wait(
condition = waiter,
waittime = 2,
timeout = timeout,
callback = progress.streamprint( stream = sys.stdout, character = "." ),
)
except progress.TimeoutError as e:
sys.stdout.write( "%s\n" % e )
sys.exit( 1 )
assert hasattr( waiter, "result" )
result = waiter.result.read()
waiter.result.close()
sys.stdout.write( "done\n" )
import os.path
output = "proq2-%s" % os.path.basename( sys.argv[1] )
print("Output:", output)
with open( output, "w" ) as ofile:
ofile.write( result )
ofile.write( "\n" )
print("Done!")
|
PypiClean
|
/audioldm2-0.0.9.tar.gz/audioldm2-0.0.9/app.py
|
from huggingface_hub import hf_hub_download
import torch
import os
import gradio as gr
from audioldm2 import text_to_audio, build_model
from share_btn import community_icon_html, loading_icon_html, share_js
os.environ["TOKENIZERS_PARALLELISM"] = "true"
default_checkpoint="audioldm2-full"
audioldm = None
current_model_name = None
def text2audio(
text,
guidance_scale,
random_seed,
n_candidates,
model_name=default_checkpoint,
):
global audioldm, current_model_name
torch.set_float32_matmul_precision("high")
if audioldm is None or model_name != current_model_name:
audioldm = build_model(model_name=model_name)
current_model_name = model_name
audioldm = torch.compile(audioldm)
# print(text, length, guidance_scale)
waveform = text_to_audio(
latent_diffusion=audioldm,
text=text,
seed=random_seed,
duration=10,
guidance_scale=guidance_scale,
n_candidate_gen_per_text=int(n_candidates),
) # [bs, 1, samples]
waveform = [
gr.make_waveform((16000, wave[0]), bg_image="bg.png") for wave in waveform
]
# waveform = [(16000, np.random.randn(16000)), (16000, np.random.randn(16000))]
if len(waveform) == 1:
waveform = waveform[0]
return waveform
css = """
a {
color: inherit;
text-decoration: underline;
}
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
}
.gr-button {
color: white;
border-color: #000000;
background: #000000;
}
input[type='range'] {
accent-color: #000000;
}
.dark input[type='range'] {
accent-color: #dfdfdf;
}
.container {
max-width: 730px;
margin: auto;
padding-top: 1.5rem;
}
#gallery {
min-height: 22rem;
margin-bottom: 15px;
margin-left: auto;
margin-right: auto;
border-bottom-right-radius: .5rem !important;
border-bottom-left-radius: .5rem !important;
}
#gallery>div>.h-full {
min-height: 20rem;
}
.details:hover {
text-decoration: underline;
}
.gr-button {
white-space: nowrap;
}
.gr-button:focus {
border-color: rgb(147 197 253 / var(--tw-border-opacity));
outline: none;
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
--tw-border-opacity: 1;
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
--tw-ring-opacity: .5;
}
#advanced-btn {
font-size: .7rem !important;
line-height: 19px;
margin-top: 12px;
margin-bottom: 12px;
padding: 2px 8px;
border-radius: 14px !important;
}
#advanced-options {
margin-bottom: 20px;
}
.footer {
margin-bottom: 45px;
margin-top: 35px;
text-align: center;
border-bottom: 1px solid #e5e5e5;
}
.footer>p {
font-size: .8rem;
display: inline-block;
padding: 0 10px;
transform: translateY(10px);
background: white;
}
.dark .footer {
border-color: #303030;
}
.dark .footer>p {
background: #0b0f19;
}
.acknowledgments h4{
margin: 1.25em 0 .25em 0;
font-weight: bold;
font-size: 115%;
}
#container-advanced-btns{
display: flex;
flex-wrap: wrap;
justify-content: space-between;
align-items: center;
}
.animate-spin {
animation: spin 1s linear infinite;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
#share-btn-container {
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
margin-top: 10px;
margin-left: auto;
}
#share-btn {
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0;
}
#share-btn * {
all: unset;
}
#share-btn-container div:nth-child(-n+2){
width: auto !important;
min-height: 0px !important;
}
#share-btn-container .wrap {
display: none !important;
}
.gr-form{
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
}
#prompt-container{
gap: 0;
}
#generated_id{
min-height: 700px
}
#setting_id{
margin-bottom: 12px;
text-align: center;
font-weight: 900;
}
"""
iface = gr.Blocks(css=css)
with iface:
gr.HTML(
"""
<div style="text-align: center; max-width: 700px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<h1 style="font-weight: 900; margin-bottom: 7px; line-height: normal;">
AudioLDM 2: A General Framework for Audio, Music, and Speech Generation
</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
<a href="https://arxiv.org/abs/2301.12503">[Paper]</a> <a href="https://audioldm.github.io/">[Project page]</a>
</p>
</div>
"""
)
gr.HTML(
"""
<h1 style="font-weight: 900; margin-bottom: 7px;">
AudioLDM 2: A General Framework for Audio, Music, and Speech Generation
</h1>
<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
<br/>
<a href="https://huggingface.co/spaces/haoheliu/audioldm2-text2audio-text2music?duplicate=true">
<img style="margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a>
<p/>
"""
)
with gr.Group():
with gr.Box():
############# Input
textbox = gr.Textbox(
value="A forest of wind chimes singing a soothing melody in the breeze.",
max_lines=1,
label="Input your text here. Your text is important for the audio quality. Please ensure it is descriptive by using more adjectives.",
elem_id="prompt-in",
)
with gr.Accordion("Click to modify detailed configurations", open=False):
seed = gr.Number(
value=45,
label="Change this value (any integer number) will lead to a different generation result.",
)
# duration = gr.Slider(
# 10, 10, value=10, step=2.5, label="Duration (seconds)"
# )
guidance_scale = gr.Slider(
0,
6,
value=3.5,
step=0.5,
label="Guidance scale (Large => better quality and relavancy to text; Small => better diversity)",
)
n_candidates = gr.Slider(
1,
3,
value=3,
step=1,
label="Automatic quality control. This number control the number of candidates (e.g., generate three audios and choose the best to show you). A Larger value usually lead to better quality with heavier computation",
)
# model_name = gr.Dropdown(
# ["audioldm-m-text-ft", "audioldm-s-text-ft", "audioldm-m-full","audioldm-s-full-v2", "audioldm-s-full", "audioldm-l-full"], value="audioldm-m-full", label="Choose the model to use. audioldm-m-text-ft and audioldm-s-text-ft are recommanded. -s- means small, -m- means medium and -l- means large",
# )
############# Output
# outputs=gr.Audio(label="Output", type="numpy")
outputs = gr.Video(label="Output", elem_id="output-video")
# with gr.Group(elem_id="container-advanced-btns"):
# # advanced_button = gr.Button("Advanced options", elem_id="advanced-btn")
# with gr.Group(elem_id="share-btn-container"):
# community_icon = gr.HTML(community_icon_html, visible=False)
# loading_icon = gr.HTML(loading_icon_html, visible=False)
# share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
# outputs=[gr.Audio(label="Output", type="numpy"), gr.Audio(label="Output", type="numpy")]
btn = gr.Button("Submit").style(full_width=True)
with gr.Group(elem_id="share-btn-container", visible=False):
community_icon = gr.HTML(community_icon_html)
loading_icon = gr.HTML(loading_icon_html)
share_button = gr.Button("Share to community", elem_id="share-btn")
# btn.click(text2audio, inputs=[
# textbox, duration, guidance_scale, seed, n_candidates, model_name], outputs=[outputs])
btn.click(
text2audio,
inputs=[textbox, guidance_scale, seed, n_candidates],
outputs=[outputs],
)
share_button.click(None, [], [], _js=share_js)
gr.HTML(
"""
<div class="footer" style="text-align: center; max-width: 700px; margin: 0 auto;">
<p>Follow the latest update of AudioLDM on our<a href="https://github.com/haoheliu/AudioLDM" style="text-decoration: underline;" target="_blank"> Github repo</a>
</p>
<br>
<p>Model by <a href="https://twitter.com/LiuHaohe" style="text-decoration: underline;" target="_blank">Haohe Liu</a></p>
<br>
</div>
"""
)
gr.Examples(
[
[
"An excited crowd cheering at a sports game.",
3.5,
45,
3,
default_checkpoint,
],
[
"A cat is meowing for attention.",
3.5,
45,
3,
default_checkpoint,
],
[
"Birds singing sweetly in a blooming garden.",
3.5,
45,
3,
default_checkpoint,
],
[
"A modern synthesizer creating futuristic soundscapes.",
3.5,
45,
3,
default_checkpoint,
],
[
"The vibrant beat of Brazilian samba drums.",
3.5,
45,
3,
default_checkpoint,
],
],
fn=text2audio,
# inputs=[textbox, duration, guidance_scale, seed, n_candidates, model_name],
inputs=[textbox, guidance_scale, seed, n_candidates],
outputs=[outputs],
cache_examples=True,
)
gr.HTML(
"""
<div class="acknowledgements">
<p>Essential Tricks for Enhancing the Quality of Your Generated Audio</p>
<p>1. Try to use more adjectives to describe your sound. For example: "A man is speaking clearly and slowly in a large room" is better than "A man is speaking". This can make sure AudioLDM understands what you want.</p>
<p>2. Try to use different random seeds, which can affect the generation quality significantly sometimes.</p>
<p>3. It's better to use general terms like 'man' or 'woman' instead of specific names for individuals or abstract objects that humans may not be familiar with, such as 'mummy'.</p>
</div>
"""
)
with gr.Accordion("Additional information", open=False):
gr.HTML(
"""
<div class="acknowledgments">
<p> We build the model with data from <a href="http://research.google.com/audioset/">AudioSet</a>, <a href="https://freesound.org/">Freesound</a> and <a href="https://sound-effects.bbcrewind.co.uk/">BBC Sound Effect library</a>. We share this demo based on the <a href="https://assets.publishing.service.gov.uk/government/uploads/system/uploads/attachment_data/file/375954/Research.pdf">UK copyright exception</a> of data for academic research. </p>
</div>
"""
)
# <p>This demo is strictly for research demo purpose only. For commercial use please <a href="[email protected]">contact us</a>.</p>
iface.queue(concurrency_count=3)
iface.launch(debug=True)
# iface.launch(debug=True, share=True)
|
PypiClean
|
/ECSctrl-0.4.0-py3-none-any.whl/ecsctrl/cli.py
|
import os
import re
from email.policy import default
import click
from ecsctrl.loader import VarsLoader
from .boto_client import BotoClient
from .service_updater import ServiceUpdater, TaskDefinitionServiceUpdater, WaitForUpdate
from .yaml_converter import (
JOB_DEFINITION,
SECRETS,
SERVICE,
TASK_DEFINITION,
yaml_file_to_dict,
)
def check_var(ctx, param, value):
for v in value:
if not re.match("^[^=]+=.*$", v):
raise click.BadParameter(
f"'{v}'. Variable has to be in format variable=value"
)
return value
# fmt: off
@click.group()
@click.option("--dry-run", is_flag=True, default=False, help="Do not call actual AWS API")
@click.pass_context
# fmt: on
def cli(ctx, dry_run):
ctx.ensure_object(dict)
ctx.obj["dry_run"] = dry_run
ctx.obj["boto_client"] = BotoClient("ecs", dry_run=dry_run)
@cli.group(name="task-definition")
@click.pass_context
def task_definition(ctx):
"""Task definition management."""
def common_options(fn):
# fmt: off
fn = click.option("--env-file", "-e", multiple=True, type=str, help="Path to env-style file with variables")(fn)
fn = click.option("--json-file", "-j", multiple=True, type=str, help="Path to json file with variable")(fn)
fn = click.option("--var", "-v", multiple=True, type=str, callback=check_var, help="Single variable in format name=value")(fn)
fn = click.option("--sys-env/--no-sys-env", is_flag=True, default=False, help="Uses system env as a source for template variables")(fn)
# fmt: on
return fn
def wait_options(wait_for, many=False):
def wrapper(fn):
s = "s" if many else ""
# fmt: off
fn = click.option("--wait", "-w", is_flag=True, help=f"Waits for service{s} to finish {wait_for}")(fn)
fn = click.option("--wait-timeout", default=600, type=int, help=f"Custom timeout in seconds (defaults to 600s)")(fn)
# fmt: on
return fn
return wrapper
# fmt: off
@task_definition.command()
@click.argument("spec-file", type=str)
@common_options
@click.option("--update-services-in-cluster", "-c", multiple=True, type=str, help="Updates all services deployed with this task in a particular cluster")
@wait_options(wait_for="update", many=True)
@click.pass_context
# fmt: on
def register(
ctx,
spec_file,
env_file,
json_file,
var,
sys_env,
update_services_in_cluster,
wait,
wait_timeout,
):
"""Register task definition."""
vars = VarsLoader(env_file, var, json_file, sys_env).load()
spec = yaml_file_to_dict(spec_file, vars, TASK_DEFINITION)
task_family = spec.get("family", "N/A")
click.echo(f"🗂 Registering task definition {task_family}.")
response = ctx.obj["boto_client"].call("register_task_definition", **spec)
task_definition_arn = response["taskDefinition"]["taskDefinitionArn"]
click.echo(f"\t✅ done, task definition arn: {task_definition_arn}.")
if update_services_in_cluster and not ctx.obj["dry_run"]:
updated_services = {}
for cluster_name in update_services_in_cluster:
updater = TaskDefinitionServiceUpdater(
ctx.obj["boto_client"], task_definition_arn, cluster_name
)
updated_services_in_cluster = updater.update()
updated_services[cluster_name] = updated_services_in_cluster
if wait:
waiter = WaitForUpdate(ctx.obj["boto_client"], updated_services)
waiter.timeout = wait_timeout
waiter.wait_for_all()
@cli.group(name="batch-job-definition")
@click.pass_context
def batch_job_definition(ctx):
"""Batch job definition management."""
# fmt: off
@batch_job_definition.command()
@click.argument("spec-file", type=str)
@common_options
@click.pass_context
# fmt: on
def register(
ctx,
spec_file,
env_file,
json_file,
var,
sys_env,
):
"""Register AWS Batch job definition."""
vars = VarsLoader(env_file, var, json_file, sys_env).load()
spec = yaml_file_to_dict(spec_file, vars, JOB_DEFINITION)
job_definition_name = spec.get("jobDefinitionName", "N/A")
click.echo(f"🗂 Registering batch job definition {job_definition_name}.")
client = BotoClient("batch", dry_run=ctx.obj["boto_client"].dry_run)
response = client.call("register_job_definition", **spec)
job_definition_arn = response["jobDefinitionArn"]
click.echo(f"\t✅ done, job definition arn: {job_definition_arn}.")
@cli.group(name="service")
@click.pass_context
def service(ctx):
"""Service management."""
@service.command()
@click.argument("spec-file", type=str)
@common_options
@wait_options(wait_for="creation")
@click.pass_context
def create(
ctx,
spec_file,
env_file,
json_file,
var,
sys_env,
wait,
wait_timeout,
):
"""Create a new service."""
vars = VarsLoader(env_file, var, json_file, sys_env).load()
spec = yaml_file_to_dict(spec_file, vars, SERVICE)
service_name = spec.get("serviceName")
cluster_name = spec.get("cluster")
click.echo(f"🏸 Creating service {service_name}.")
response = ctx.obj["boto_client"].call("create_service", **spec)
service_arn = response["service"]["serviceArn"]
click.echo("\t✅ done.")
if wait:
waiter = WaitForUpdate(
ctx.obj["boto_client"],
{cluster_name: [(service_arn, service_name)]},
)
waiter.timeout = wait_timeout
waiter.wait_for_all()
@service.command()
@click.argument("spec-file", type=str)
@common_options
@wait_options(wait_for="update")
@click.pass_context
def update(
ctx,
spec_file,
env_file,
json_file,
var,
sys_env,
wait,
wait_timeout,
):
"""Update an existing service."""
vars = VarsLoader(env_file, var, json_file, sys_env).load()
spec = yaml_file_to_dict(spec_file, vars, SERVICE)
service_name = spec.get("serviceName")
cluster_name = spec.get("cluster")
click.echo(f"🏸 Updating service {service_name}.")
updater = ServiceUpdater()
spec = updater.make_update_payload(spec)
response = ctx.obj["boto_client"].call("update_service", **spec)
service_arn = response["service"]["serviceArn"]
click.echo("\t✅ done.")
if wait:
waiter = WaitForUpdate(
ctx.obj["boto_client"],
{cluster_name: [(service_arn, service_name)]},
)
waiter.timeout = wait_timeout
waiter.wait_for_all()
@service.command("create-or-update")
@click.argument("spec-file", type=str)
@common_options
@wait_options(wait_for="update")
@click.pass_context
def create_or_update(
ctx,
spec_file,
env_file,
json_file,
var,
sys_env,
wait,
wait_timeout,
):
"""Check if service exists and update it or create a new one."""
vars = VarsLoader(env_file, var, json_file, sys_env).load()
spec = yaml_file_to_dict(spec_file, vars, SERVICE)
service_name = spec.get("serviceName")
cluster_name = spec.get("cluster")
response = ctx.obj["boto_client"].call(
"describe_services",
cluster=spec["cluster"],
services=[service_name],
)
service_exists = len(response["services"]) > 0
if service_exists:
click.echo(f"🏸 Updating service {service_name}.")
updater = ServiceUpdater()
spec = updater.make_update_payload(spec)
response = ctx.obj["boto_client"].call("update_service", **spec)
click.echo("\t✅ done.")
else:
click.echo(f"🏸 Creating service {service_name}.")
response = ctx.obj["boto_client"].call("create_service", **spec)
click.echo("\t✅ done.")
service_arn = response["service"]["serviceArn"]
if wait:
waiter = WaitForUpdate(
ctx.obj["boto_client"],
{cluster_name: [(service_arn, service_name)]},
)
waiter.timeout = wait_timeout
waiter.wait_for_all()
@cli.group(name="secrets")
@click.pass_context
def secrets(ctx):
"""Secrets management."""
@secrets.command()
@click.argument("spec-file", type=str)
@common_options
@click.pass_context
def store(
ctx,
spec_file,
env_file,
json_file,
var,
sys_env,
):
"""Store secret is Parameter Store."""
vars = VarsLoader(env_file, var, json_file, sys_env).load()
spec = yaml_file_to_dict(spec_file, vars, SECRETS)
ssm = BotoClient("ssm", dry_run=ctx.obj["boto_client"].dry_run)
for secret_name, value in spec.items():
ssm_params = {
"Name": secret_name,
"Value": value,
"Type": "SecureString",
}
click.echo(f"🔑 Storing secret {secret_name}.")
response = ssm.call("put_parameter", **ssm_params)
click.echo(f"\t✅ done, parameter version: {response['Version']}")
@service.command()
@click.argument("task-definition-spec-file", type=str)
@click.argument("service-spec-file", type=str)
@common_options
@wait_options(wait_for="update")
@click.pass_context
def deploy(
ctx,
task_definition_spec_file,
service_spec_file,
env_file,
json_file,
var,
sys_env,
wait,
wait_timeout,
):
"""All-in-one - register task definition and create or update service."""
vars = VarsLoader(env_file, var, json_file, sys_env).load()
task_definition_spec = yaml_file_to_dict(
task_definition_spec_file, vars, TASK_DEFINITION
)
task_family = task_definition_spec.get("family", "N/A")
click.echo(f"🗂 Registering task definition {task_family}.")
response = ctx.obj["boto_client"].call(
"register_task_definition", **task_definition_spec
)
task_definition_arn = response["taskDefinition"]["taskDefinitionArn"]
click.echo(f"\t✅ done, task definition arn: {task_definition_arn}.")
service_spec = yaml_file_to_dict(service_spec_file, vars, SERVICE)
service_name = service_spec.get("serviceName")
cluster_name = service_spec.get("cluster")
service_spec["taskDefinition"] = task_definition_arn
response = ctx.obj["boto_client"].call(
"describe_services",
cluster=service_spec["cluster"],
services=[service_name],
)
service_exists = len(response["services"]) > 0
if service_exists:
click.echo(f"🏸 Updating service {service_name}.")
updater = ServiceUpdater()
service_spec = updater.make_update_payload(service_spec)
response = ctx.obj["boto_client"].call("update_service", **service_spec)
click.echo("\t✅ done.")
else:
click.echo(f"🏸 Creating service {service_name}.")
response = ctx.obj["boto_client"].call("create_service", **service_spec)
click.echo("\t✅ done.")
service_arn = response["service"]["serviceArn"]
if wait:
waiter = WaitForUpdate(
ctx.obj["boto_client"],
{cluster_name: [(service_arn, service_name)]},
)
waiter.timeout = wait_timeout
waiter.wait_for_all()
|
PypiClean
|
/avimigrationtools-30.1.1.tar.gz/avimigrationtools-30.1.1/avi/migrationtools/avi_converter.py
|
import random
import json
import logging
import os
import string
import yaml
import avi.migrationtools
from avi.migrationtools import avi_rest_lib
from avi.migrationtools.config_patch import ConfigPatch
from avi.migrationtools.vs_filter import filter_for_vs
from avi.migrationtools.avi_migration_utils import MigrationUtil
LOG = logging.getLogger(__name__)
sdk_version = getattr(avi.migrationtools, '__version__', None)
mg_util = MigrationUtil()
class AviConverter(object):
output_file_path = None
patch = None
vs_filter = None
controller_ip = None
user = None
password = None
tenant = None
prefix = None
skip_ref_object_list = ['cloud_ref', 'tenant_ref', 'se_group_ref']
def print_pip_and_controller_version(self):
pass
def convert(self):
pass
def process_for_utils(self, avi_config, skip_ref_objects=skip_ref_object_list):
"""
Check if patch args present then execute the config_patch.py with args
:param avi_config: converted avi object dict
:param skip_ref_objects: comma separated names of objects ref to be skipped
:return: avi_config
"""
if self.patch:
with open(self.patch) as f:
patches = yaml.load(f, Loader=yaml.Loader)
cp = ConfigPatch(avi_config, patches)
avi_config = cp.patch()
# Check if vs_filter args present then execute vs_filter.py with args
if self.vs_filter:
avi_config = filter_for_vs(avi_config, self.vs_filter, self.prefix, skip_ref_objects=skip_ref_objects)
return avi_config
def upload_config_to_controller(self, avi_config):
"""
Upload configuration to controller
:param avi_config: converted avi object dict
:return:
"""
print("Uploading Configuration to Controller...")
avi_rest_lib.upload_config_to_controller(
avi_config, self.controller_ip, self.user, self.password,
self.tenant, self.controller_version)
def download_gslb_config_form_controller(self):
""" Downloading gslb configuration from controller
and return the output json"""
return avi_rest_lib.download_gslb_from_controller(
self.controller_ip, self.user, self.password, self.password)
def write_output(self, avi_config, output_dir, report_name):
"""
write output file for conversion
:param avi_config: dict of converted avi object
:param output_dir: location for output file
:param report_name: name of file
:param prefix: prefix for object
:return: None
"""
report_path = output_dir + os.path.sep + report_name
print("Converted Output Location: %s" % \
(report_path))
with open(report_path, "w", encoding='utf-8') as text_file:
json.dump(avi_config, text_file, indent=4)
LOG.info('written avi config file %s %s output.json',
output_dir, os.path.sep)
def init_logger_path(self):
LOG.setLevel(logging.DEBUG)
print("Log File Location: %s" % self.output_file_path)
formatter = '[%(asctime)s] %(levelname)s [%(funcName)s:%(lineno)d] %(message)s'
logging.basicConfig(filename=os.path.join(self.output_file_path, 'converter.log'),
level=logging.DEBUG, format=formatter)
def trim_object_length(self, avi_config):
'''
Method for triming object length when it exceeds max allowed length
param: passed migrated avi configuration
'''
list_with_max_280_char = ['VsVip', 'PoolGroup', 'Pool', 'NetworkSecurityPolicy', 'HTTPPolicySet']
for key in avi_config.keys():
if key in ['UnsupportedProfiles','OneConnect']:
pass
elif key in list_with_max_280_char:
self.trim_length_if_name_field_exceeds_max_char(avi_config[key], avi_config, key, 280)
else:
self.trim_length_if_name_field_exceeds_max_char(avi_config[key], avi_config, key, 256)
def trim_length_if_name_field_exceeds_max_char(self, obj_config_dict, avi_config, obj_type, max_char):
"""
Trim object length
Args:
obj_config_dict: passed object configuration
avi_config : passed migrated avi configuration
obj_type : passed object type
max_char : max allowed character for object length
"""
cp = ConfigPatch(avi_config, '')
for obj_config in obj_config_dict:
if len(obj_config['name']) > max_char:
random_str = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase
+ string.digits) for _ in range(3))
obj_config['description'] = obj_config['name']
new_obj_name = "%s-%s" % ((obj_config['name'])[:200],random_str)
old_obj_ref, new_obj_ref = self.get_old_and_new_obj_ref(
avi_config, obj_config, new_obj_name, obj_config['name'], obj_type)
cp.update_references(obj_type, old_obj_ref, new_obj_ref, avi_cfg=avi_config)
obj_config['name'] = "%s-%s" % ((obj_config['name'])[:200],random_str)
def get_old_and_new_obj_ref(self, avi_config, obj_config, new_name, old_name, obj_type):
'''
Method for getting object refrences
'''
cp = ConfigPatch(avi_config, '')
tenant = (cp.param_value_in_ref(obj_config.get('tenant_ref'), 'name')
if 'tenant_ref' in obj_config else '')
cloud = (cp.param_value_in_ref(obj_config.get('cloud_ref'), 'name')
if 'cloud_ref' in obj_config else '')
new_obj_ref = mg_util.get_object_ref(
new_name, obj_type.lower(), tenant, cloud_name=cloud)
old_obj_ref = mg_util.get_object_ref(old_name, obj_type.lower(), tenant, cloud_name=cloud)
return old_obj_ref, new_obj_ref
|
PypiClean
|
/Bluebook-0.0.1.tar.gz/Bluebook-0.0.1/devtest/application/config.py
|
import os
CWD = os.path.dirname(__file__)
class Config(object):
"""
Flask config: http://flask.pocoo.org/docs/0.10/config/
"""
SERVER_NAME = None
DEBUG = True
SECRET_KEY = "PLEASE CHANGE ME"
# ------
APP_NAME = ""
APP_VERSION = "0.0.1"
#
ADMIN_EMAIL = "[email protected]"
ADMIN_NAME = "Admin Test"
# ------- AWS CREDENTIALS ------------------------------------------------------
#: AWS Credentials
# For: S3, SES Mailer, flask s3
AWS_ACCESS_KEY_ID = "AKIAIJ66SVCZPD4F3P6A"
AWS_SECRET_ACCESS_KEY = "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"
AWS_S3_BUCKET_NAME = "yoredis"
# ------- DATABASES ------------------------------------------------------------
#: SQLAlchemy
#: format: engine://USERNAME:PASSWORD@HOST:PORT/DB_NAME
DATABASE_URI = "mysql+pymysql://root:[email protected]:3306/test_pylot" #"sqlite://///Users/mardochee.macxis/Projects/Python/flask-pilot/test/app.db"
#: REDIS
#: format: USERNAME:PASSWORD@HOST:PORT
REDIS_URI = None
# ------------------------------------------------------------------------------
# WEBASSETS
# Flask-Assets
# http://flask-assets.readthedocs.org/
ASSETS_DEBUG = False
FLASK_ASSETS_USE_S3 = False
# Flask-S3
# https://flask-s3.readthedocs.org/en/v0.1.4/
USE_S3 = False
S3_BUCKET_DOMAIN = ""
S3_BUCKET_NAME = AWS_S3_BUCKET_NAME
S3_USE_HTTPS = False
USE_S3_DEBUG = False
S3_ONLY_MODIFIED = False
# ------------------------------------------------------------------------------
#: SESSION
#: Flask-KVSession is used to save the user's session
#: Set the SESSION_URI to by using these examples below to set KVSession
#: To use local session, just set SESSION_URI to None
#:
#: Redis: redis://username:password@host:6379/db
#: S3: s3://username:[email protected]/bucket
#: Google Storage: google_storage:username:[email protected]/bucket
#: SQL: postgresql://username:password@host:3306/db
#: mysql+pysql://username:password@host:3306/db
#: sqlite://
#: Memcached: memcache://host:port
#:
SESSION_URI = None
# ------------------------------------------------------------------------------
#: CLOUDSTORAGE
#: Flask-CloudStorage is used to save upload on S3, Google Storage,
#: Cloudfiles, Azure Blobs, and Local storage
#: When using local storage, they can be accessed via http://yoursite/files
#: CLOUDSTORAGE_PROVIDER:
# The provider to use. By default it's 'LOCAL'.
# You can use:
# LOCAL, S3, GOOGLE_STORAGE, AZURE_BLOBS, CLOUDFILES
CLOUDSTORAGE_PROVIDER = "LOCAL"
#: CLOUDSTORAGE_KEY
# The storage key. Leave it blank if PROVIDER is LOCAL
CLOUDSTORAGE_KEY = AWS_ACCESS_KEY_ID
#: CLOUDSTORAGE_SECRET
#: The storage secret key. Leave it blank if PROVIDER is LOCAL
CLOUDSTORAGE_SECRET = AWS_SECRET_ACCESS_KEY
#: CLOUDSTORAGE_CONTAINER
#: The Bucket name (for S3, Google storage, Azure, cloudfile)
#: or the directory name (LOCAL) to access
CLOUDSTORAGE_CONTAINER = "uploads"
#: CLOUDSTORAGE_ALLOWED_EXTENSIONS
#: List of extensions to allow
CLOUDSTORAGE_ALLOWED_EXTENSIONS = []
#: CLOUDSTORAGE_LOCAL_PATH
#: When POVIDER is LOCAL, the directory path where CONTAINER exists
CLOUDSTORAGE_LOCAL_PATH = "%s/data" % CWD
#: CLOUDSTORAGE_LOCAL_URL
#: Url to access LOCAL file
CLOUDSTORAGE_SERVE_FILES = True
CLOUDSTORAGE_SERVE_FILES_URL = "files"
#: CLOUDSTORAGE_SERVE_FILES_URL_SECURE
#: Bool to serve files via http or https
CLOUDSTORAGE_SERVE_FILES_URL_SECURE = False
CLOUDSTORAGE_URI = "s3://{username}:{password}@s3.aws.amazon.com/{bucket}"\
.format(username=AWS_SECRET_ACCESS_KEY,
password=AWS_SECRET_ACCESS_KEY,
bucket=AWS_S3_BUCKET_NAME)
# ------------------------------------------------------------------------------
#: MAILER
#: To send mail using AWS SES or SMTP
#: You can send raw email, or templated email for convenience
#: MAILER_PROVIDER
#: The mailer provider SES or SMTP
MAILER_PROVIDER = "SMTP"
#: MAILER_SES_ACCESS_KEY
#: For SES The AWS_ACCESS_KEY_ID
MAILER_SES_ACCESS_KEY = AWS_ACCESS_KEY_ID
#: MAILER_SES_SECRET_KEY
#: For SES The AWS_SECRET_ACCESS_KEY
MAILER_SES_SECRET_KEY = AWS_SECRET_ACCESS_KEY
#: MAILER_SENDER - The sender of the email by default
#: For SES, this email must be authorized
MAILER_SENDER = "[email protected]"
#: MAILER_REPLY_TO
#: The email to reply to by default
MAILER_REPLY_TO = "[email protected]"
#: MAILER_TEMPLATE
#: a directory that contains the email template or a dict
MAILER_TEMPLATE = "%s/var/ses-mailer" % CWD
#: MAILER_TEMPLATE_CONTEXT
#: a dict of all context to pass to the email by default
MAILER_TEMPLATE_CONTEXT = {
"site_name": "MyTestSite.com",
"site_url": "http://mytestsite.com"
}
#: MAILER_SMTP_URI
#: The uri for the smtp connection. It will use Flask-Mail
#: format: smtp://USERNAME:PASSWORD@HOST:PORT
#: with sll -> smtp+ssl://USERNAME:PASSWORD@HOST:PORT
#: with ssl and tls -> smtp+ssl+tls://USERNAME:PASSWORD@HOST:PORT
MAILER_SMTP_URI = "smtp+ssl://{username}:{password}@{host}:{port}"\
.format(username="",
password="",
host="smtp.gmail.com",
port=465)
#: PAGINATION_PER_PAGE : Total entries to display per page
PAGINATION_PER_PAGE = 25
# ------------------------------------------------------------------------------
#: CACHE
#: Flask-Cache is used to caching
#: CACHE_TYPE
#: The type of cache to use
#: null, simple, redis, filesystem,
CACHE_TYPE = "simple"
#: CACHE_REDIS_URL
#: If CHACHE_TYPE is 'redis', set the redis uri
#: redis://username:password@host:port/db
CACHE_REDIS_URL = ""
#: CACHE_DIR
#: Directory to store cache if CACHE_TYPE is filesystem, it will
CACHE_DIR = ""
# ------------------------------------------------------------------------------
#: RECAPTCHA
#: Flask-Recaptcha
#: Register your application at https://www.google.com/recaptcha/admin
#: RECAPTCHA_SITE_KEY
RECAPTCHA_SITE_KEY = "6LchGgITAAAAAG-5mgaxR-5QFwtxt1OByvdOkQJV"
#: RECAPTCHA_SECRET_KEY
RECAPTCHA_SECRET_KEY = "6LchGgITAAAAAIHo1JDof2SFOaqD9YEFzwMb5w77"
# ------------------------------------------------------------------------------
#: GOOGLE ANALYTICS ID
GOOGLE_ANALYTICS_ID = ""
# ------------------------------------------------------------------------------
USER_ACCOUNT_ENABLE_EMAIL_LOGIN = True
USER_ACCOUNT_ENABLE_SIGNUP = True
USER_ACCOUNT_RESET_PASSWORD_METHOD = "TOKEN" # TOKEN | PASSWORD
USER_ACCOUNT_ENABLE_AUTH_LOGIN = True
USER_ACCOUNT_AUTH_CREDENTIALS = {
"Facebook": {
"consumer_key": "402239969943888",
"consumer_secret": ""
},
"Google": {
"consumer_key": "402239969943888",
"consumer_secret": ""
},
"Twitter": {
"consumer_key": "402239969943888",
"consumer_secret": ""
},
"WindowsLive": {
"consumer_key": "402239969943888",
"consumer_secret": ""
},
"UbuntuOne": {
"consumer_key": "402239969943888",
"consumer_secret": ""
}
}
# ----- LOGIN -----
LOGIN_RESET_PASSWORD_METHOD = "TOKEN" # PASSWORD | TOKEN
LOGIN_EMAIL_ENABLE = True
LOGIN_OAUTH_ENABLE = True
LOGIN_SIGNUP_ENABLE = True
LOGIN_OAUTH_CREDENTIALS = {
"FACEBOOK": {
"ENABLE": True,
"CLIENT_ID": ""
},
"GOOGLE": {
"ENABLE": True,
"CLIENT_ID": ""
},
"TWITTER": {
"ENABLE": False,
"CLIENT_ID": ""
}
}
# Maintenance
# Turn maintenance page ON and OFF
MAINTENANCE_ON = False
# Contact
# Email address for the contact page receipient
CONTACT_PAGE_EMAIL_RECIPIENT = "[email protected]"
COMPONENT_USER_ACCOUNT = {
"email_login": True,
"auth_login": True,
"auth": {
"Facebook": {
}
}
}
class Development(Config):
pass
class Production(Config):
SECRET_KEY = None
|
PypiClean
|
/sifflet_sdk-0.3.2-py3-none-any.whl/client/model/multi_metrics_graph_dto.py
|
import re # noqa: F401
import sys # noqa: F401
from client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from client.exceptions import ApiAttributeError
def lazy_import():
from client.model.multi_metrics_graph_dto_all_of import MultiMetricsGraphDtoAllOf
from client.model.rule_graph_dto import RuleGraphDto
from client.model.rule_graph_dto_graph_points_inner import RuleGraphDtoGraphPointsInner
globals()['MultiMetricsGraphDtoAllOf'] = MultiMetricsGraphDtoAllOf
globals()['RuleGraphDto'] = RuleGraphDto
globals()['RuleGraphDtoGraphPointsInner'] = RuleGraphDtoGraphPointsInner
class MultiMetricsGraphDto(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('empty_string_id',): {
'EDGE_165169_EMPTY_165169_STRING': "EDGE_165169_EMPTY_165169_STRING",
},
('graph_type',): {
'DUPLICATE': "DUPLICATE",
'COMPLETENESS': "COMPLETENESS",
'FRESHNESS': "FRESHNESS",
'NUMERICAL_TRANSFORMATION': "NUMERICAL_TRANSFORMATION",
'DISTRIBUTION': "DISTRIBUTION",
'MULTI_METRICS_GLOBAL_TIME_SERIES': "MULTI_METRICS_GLOBAL_TIME_SERIES",
'MULTI_METRICS_DIFFERENCE_TIME_SERIES': "MULTI_METRICS_DIFFERENCE_TIME_SERIES",
'NONE': "NONE",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'empty_string_id': (str,), # noqa: E501
'graph_points': ([RuleGraphDtoGraphPointsInner],), # noqa: E501
'graph_type': (str,), # noqa: E501
'metric_names': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'empty_string_id': 'emptyStringId', # noqa: E501
'graph_points': 'graphPoints', # noqa: E501
'graph_type': 'graphType', # noqa: E501
'metric_names': 'metricNames', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""MultiMetricsGraphDto - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
empty_string_id (str): [optional] if omitted the server will use the default value of "EDGE_165169_EMPTY_165169_STRING" # noqa: E501
graph_points ([RuleGraphDtoGraphPointsInner]): [optional] # noqa: E501
graph_type (str): [optional] # noqa: E501
metric_names ([str]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""MultiMetricsGraphDto - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
empty_string_id (str): [optional] if omitted the server will use the default value of "EDGE_165169_EMPTY_165169_STRING" # noqa: E501
graph_points ([RuleGraphDtoGraphPointsInner]): [optional] # noqa: E501
graph_type (str): [optional] # noqa: E501
metric_names ([str]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MultiMetricsGraphDtoAllOf,
RuleGraphDto,
],
'oneOf': [
],
}
|
PypiClean
|
/ContextPy3-1.0.tar.gz/ContextPy3-1.0/contextpy3.py
|
import sys
import threading
__all__ = ['Layer']
__all__ += ['active_layer', 'active_layers', 'inactive_layer', 'inactive_layers']
__all__ += ['proceed']
__all__ += ['before', 'after', 'around', 'base']
__all__ += ['global_activate_layer', 'global_deactivate_layer']
__version__ = "1.0"
# system-global layer stack
_BASELAYERS = (None,)
# thread-local layer stack
class TLS(threading.local):
def __init__(self):
super(threading.local, self).__init__() # pylint: disable=bad-super-call
self.context = None
self.active_layers = ()
_TLS = TLS()
class Layer(object):
def __init__(self, name=None):
self._name = name or hex(id(self))
def __str__(self):
return "<layer %s>" % (self._name)
def __repr__(self):
args = []
if self._name != hex(id(self)):
args.append('name="%s"' % self._name)
return "layer(%s)" % (", ".join(args))
class _LayerManager(object):
def __init__(self, layers):
self._layers = layers
self._old_layers = ()
def __enter__(self):
self._old_layers = _TLS.active_layers
_TLS.active_layers = tuple(self._get_active_layers()) # pylint: disable=no-member
def __exit__(self, exc_type, exc_value, exc_tb):
_TLS.active_layers = self._old_layers
class _LayerActivationManager(_LayerManager):
def _get_active_layers(self):
return [layer for layer in self._old_layers if layer not in self._layers] + self._layers
class _LayerDeactivationManager(_LayerManager):
def _get_active_layers(self):
return [layer for layer in self._old_layers if layer not in self._layers]
def active_layer(layer):
return _LayerActivationManager([layer])
def inactive_layer(layer):
return _LayerDeactivationManager([layer])
def active_layers(*layers):
return _LayerActivationManager(list(layers))
def inactive_layers(*layers):
return _LayerDeactivationManager(list(layers))
class _advice(object):
def __init__(self, func, successor):
if func:
self._func = func
else:
self._func = None
self._successor = successor
def _invoke(self, context, args, kwargs):
if (context[0] is None) and (context[1] is None):
# Normal Python function no binding needed
return self._func(*args, **kwargs)
# Kind of instance method, class or static mehtod (binding needed)
return self._func.__get__(context[0], context[1])(*args, **kwargs)
@classmethod
def createchain(cls, methods):
if not methods:
return _stop(None, None)
method, when = methods[0]
return when(method, cls.createchain(methods[1:]))
class _before(_advice):
def __call__(self, context, args, kwargs):
self._invoke(context, args, kwargs)
return self._successor(context, args, kwargs)
class _around(_advice):
def __call__(self, context, args, kwargs):
backup = _TLS.context
_TLS.context = context
context[2] = self._successor
result = self._invoke(context, args, kwargs)
_TLS.context = backup
return result
class _after(_advice):
def __call__(self, context, args, kwargs):
result = self._successor(context, args, kwargs)
kwargs_with_result = dict(__result__=result, **kwargs)
return self._invoke(context, args, kwargs_with_result)
class _stop(_advice):
def __call__(self, context, args, kwargs):
raise Exception(
"called proceed() in innermost function, this probably means that"
"you don't have a base method (`around` advice in None layer) or"
"the base method itself calls proceed()")
def proceed(*args, **kwargs):
context = _TLS.context
return context[2](context, args, kwargs)
def _true(*_):
return True
def merge_layers(tuple1, tuple2):
return tuple1 + tuple([layer for layer in tuple2 if layer not in tuple1])
class _layeredmethodinvocationproxy(object):
__slots__ = ("_inst", "_cls", "_descriptor")
def __init__(self, descriptor, inst, cls):
self._inst = inst
self._cls = cls
self._descriptor = descriptor
def __call__(self, *args, **kwargs):
layers = merge_layers(_BASELAYERS, _TLS.active_layers)
advice = (
self._descriptor.cache().get(layers)
or self._descriptor.cache_methods(layers))
context = [self._inst, self._cls, None]
result = advice(context, args, kwargs)
return result
class _layeredmethoddescriptor(object):
def __init__(self, methods):
self._methods = methods
self._cache = {}
def _clear_cache(self):
self._cache = {}
def cache(self):
return self._cache
def cache_methods(self, layers):
layers = list(reversed(layers))
# For each active layer, get all methods and the when advice class related to this layer
methods = sum([
list(reversed(
[(lmwgm[1], lmwgm[2])
for lmwgm in self._methods
if lmwgm[0] is currentlayer and lmwgm[3](active_layers)]
)) for currentlayer in layers], [])
self._cache[active_layers] = result = _advice.createchain(methods)
return result
def set_methods(self, methods):
self._methods[:] = methods
self._clear_cache()
def get_methods(self):
return list(self._methods)
def register_method(self, method, when=_around, layer_=None, guard=_true, method_name=""):
assert isinstance(layer_, (Layer, type(None)))
assert issubclass(when, _advice)
self.methods = self.methods + [
(layer_, method, when, guard, method_name)]
methods = property(get_methods, set_methods)
def __get__(self, inst, cls=None):
return _layeredmethodinvocationproxy(self, inst, cls)
# Used only for functions (no binding or invocation proxy needed)
def __call__(self, *args, **kwargs):
layers = merge_layers(_BASELAYERS, _TLS.active_layers)
advice = self._cache.get(layers) or self.cache_methods(layers)
# 2x None to identify: do not bound this function
context = [None, None, None]
result = advice(context, args, kwargs)
return result
def createlayeredmethod(base_method, partial_method):
if base_method:
return _layeredmethoddescriptor([(None, base_method, _around, _true)] + partial_method)
return _layeredmethoddescriptor(partial_method)
# Needed for a hack to get the name of the class/static method object
class _dummyClass:
pass
def get_method_name(method):
if isinstance(method, (classmethod, staticmethod)):
# Bound the method to a dummy class to retrieve the original name
return method.__get__(None, _dummyClass).__name__
return method.__name__
def __common(layer_, guard, when):
assert isinstance(layer_, (Layer, type(None))), \
"layer_ argument must be a layer instance or None"
assert callable(guard), "guard must be callable"
assert issubclass(when, _advice)
frame = sys._getframe(2).f_locals # pylint: disable=protected-access
def decorator(method):
method_name = get_method_name(method)
current_method = frame.get(method_name)
if issubclass(type(current_method), _layeredmethoddescriptor):
#Append the new method
current_method.register_method(method, when, layer_, guard, method_name)
else:
current_method = createlayeredmethod(current_method,
[(layer_, method, when, guard, method_name)])
return current_method
return decorator
def before(layer_=None, guard=_true):
return __common(layer_, guard, _before)
def around(layer_=None, guard=_true):
return __common(layer_, guard, _around)
def after(layer_=None, guard=_true):
return __common(layer_, guard, _after)
def base(method):
# look for the current entry in the __dict__ (class or module)
frame = sys._getframe(1).f_locals # pylint: disable=protected-access
method_name = get_method_name(method)
current_method = frame.get(method_name)
if issubclass(type(current_method), _layeredmethoddescriptor):
# add the first entry of the layered method with the base entry
current_method.methods = [(None, method, _around, _true)] + current_method.methods
return current_method
return method
before.when = _before
around.when = _around
after.when = _after
def global_activate_layer(layer):
global _BASELAYERS
if layer in _BASELAYERS:
raise ValueError("layer is already active")
_BASELAYERS += (layer,)
return _BASELAYERS
def global_deactivate_layer(layer):
global _BASELAYERS
old_layers = list(_BASELAYERS)
if layer not in old_layers:
raise ValueError("layer is not active")
i = old_layers.index(layer)
_BASELAYERS = tuple(old_layers[:i] + old_layers[i+1:])
return _BASELAYERS
|
PypiClean
|
/scancode-toolkit-32.0.6.tar.gz/scancode-toolkit-32.0.6/src/cluecode/finder.py
|
import string
import re
import ipaddress
import urlpy
from commoncode.text import toascii
from cluecode import finder_data
from textcode import analysis
# Tracing flags
TRACE = False
TRACE_URL = False
TRACE_EMAIL = False
def logger_debug(*args):
pass
if TRACE or TRACE_URL or TRACE_EMAIL:
import logging
import sys
logger = logging.getLogger(__name__)
# logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
def logger_debug(*args):
return logger.debug(' '.join(isinstance(a, str) and a or repr(a) for a in args))
"""
Find patterns in text lines such as a emails and URLs.
Optionally apply filters to pattern matches.
"""
def find(location, patterns):
"""
Yield match and matched lines for patterns found in file at location as a
tuple of (key, found text, text line). `patterns` is a list of tuples (key,
compiled regex).
Note: the location can be a list of lines for testing convenience.
"""
if TRACE:
from pprint import pformat
loc = pformat(location)
logger_debug('find(location=%(loc)r,\n patterns=%(patterns)r)' % locals())
for line_number, line in analysis.numbered_text_lines(location, demarkup=False):
for key, pattern in patterns:
for match in pattern.findall(line):
if TRACE:
logger_debug('find: yielding match: key=%(key)r, '
'match=%(match)r,\n line=%(line)r' % locals())
yield key, toascii(match), line, line_number
def unique_filter(matches):
"""
Iterate over matches and yield unique matches.
"""
uniques = set()
for key, match, line, line_number in matches:
if (key, match,) in uniques:
continue
uniques.add((key, match,))
yield key, match, line, line_number
def apply_filters(matches, *filters):
"""
Apply a sequence of `filters` to a `matches` iterable. Return a new filtered
matches iterable.
A filter must accept a single arg: an iterable of tuples of (key, match,
line, line_number) and must return an iterable of tuples of (key, match, line,
line_number).
"""
for filt in filters:
matches = filt(matches)
return matches
def build_regex_filter(pattern):
"""
Return a filter function using regex pattern, filtering out matches
matching this regex. The pattern should be text, not a compiled re.
"""
def re_filt(matches):
if TRACE:
logger_debug('re_filt: pattern="{}"'.format(pattern))
for key, match, line, line_number in matches:
if matcher(match):
if TRACE:
logger_debug('re_filt: filtering match: "{}"'.format(match))
continue
yield key, match, line, line_number
matcher = re.compile(pattern, re.UNICODE | re.IGNORECASE).match
return re_filt
# A good reference page of email address regex is:
# http://fightingforalostcause.net/misc/2006/compare-email-regex.php email
# regex from http://www.regular-expressions.info/regexbuddy/email.html
def emails_regex():
return re.compile('\\b[A-Z0-9._%-]+@[A-Z0-9.-]+\\.[A-Z]{2,4}\\b', re.IGNORECASE)
def find_emails(location, unique=True):
"""
Yield an iterable of (email, line_number) found in file at ``location``.
Only return unique items if ``unique`` is True.
"""
patterns = [('emails', emails_regex(),)]
matches = find(location, patterns)
if TRACE_EMAIL:
matches = list(matches)
for r in matches:
logger_debug('find_emails: match:', r)
filters = (junk_email_domains_filter, uninteresting_emails_filter)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, email, _line, line_number in matches:
yield email, line_number
def junk_email_domains_filter(matches):
"""
Given an iterable of email matches, return an iterable where email with
common uninteresting domains have been removed, such as local, non public
or example.com emails.
"""
for key, email, line, line_number in matches:
if is_good_email_domain(email):
yield key, email, line, line_number
else:
if TRACE:
logger_debug(f'junk_email_domains_filter: !is_good_host: {email!r}')
def is_good_email_domain(email):
"""
Return True if the domain of the ``email`` string is valid, False otherwise
such as for local, non public domains.
For example::
>>> is_good_email_domain("[email protected]")
True
>>> is_good_email_domain("[email protected]")
False
>>> is_good_email_domain("[email protected]")
False
"""
if not email:
return False
_dest, _, server = email.partition('@')
if not is_good_host(server):
return False
fake_url = f'http://{server}'
_host, domain = url_host_domain(fake_url)
if not is_good_host(domain):
return False
return True
def uninteresting_emails_filter(matches):
"""
Given an iterable of emails matches, return an iterable where common
uninteresting emails have been removed.
"""
for key, email, line, line_number in matches:
good_email = finder_data.classify_email(email)
if not good_email:
continue
yield key, email, line, line_number
# TODO: consider: http://www.regexguru.com/2008/11/detecting-urls-in-a-block-of-text/
# TODO: consider: http://blog.codinghorror.com/the-problem-with-urls/
schemes = 'https?|ftps?|sftp|rsync|ssh|svn|git|hg|https?\\+git|https?\\+svn|https?\\+hg'
url_body = '[^\\s<>\\[\\]"]'
def urls_regex():
# no space, no < >, no [ ] and no double quote
return re.compile('''
(
# URLs with schemes
(?:%(schemes)s)://%(url_body)s+
|
# common URLs prefix without schemes
(?:www|ftp)\\.%(url_body)s+
|
# git style [email protected]:christophercantu/pipeline.git
git\\@%(url_body)s+:%(url_body)s+\\.git
)''' % globals()
, re.UNICODE | re.VERBOSE | re.IGNORECASE)
INVALID_URLS_PATTERN = '((?:' + schemes + ')://([$%*/_])+)'
def find_urls(location, unique=True):
"""
Yield an iterable of (url, line_number) found in file at ``location``.
Only return unique items if ``unique`` is True.
`location` can be a list of strings for testing.
"""
patterns = [('urls', urls_regex(),)]
matches = find(location, patterns)
if TRACE:
matches = list(matches)
for m in matches:
logger_debug('url match:', m)
# the order of filters IS important
filters = (
verbatim_crlf_url_cleaner,
end_of_url_cleaner,
empty_urls_filter,
scheme_adder,
user_pass_cleaning_filter,
build_regex_filter(INVALID_URLS_PATTERN),
canonical_url_cleaner,
junk_url_hosts_filter,
junk_urls_filter,
)
if unique:
filters += (unique_filter,)
matches = apply_filters(matches, *filters)
for _key, url, _line, line_number in matches:
if TRACE_URL:
logger_debug('find_urls: line_number:', line_number, '_line:', repr(_line),
'type(url):', type(url), 'url:', repr(url))
yield str(url), line_number
EMPTY_URLS = set(['https', 'http', 'ftp', 'www', ])
def empty_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable without empty URLs.
"""
for key, match, line, line_number in matches:
junk = match.lower().strip(string.punctuation).strip()
if not junk or junk in EMPTY_URLS:
if TRACE:
logger_debug('empty_urls_filter: filtering match: %(match)r' % locals())
continue
yield key, match, line, line_number
def verbatim_crlf_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where literal end of
lines and carriage return characters that may show up as-is, un-encoded in
a URL have been removed.
"""
# FIXME: when is this possible and could happen?
for key, url, line, line_number in matches:
if not url.endswith('/'):
url = url.replace('\n', '')
url = url.replace('\r', '')
yield key, url, line, line_number
def end_of_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where junk characters
commonly found at the end of a URL are removed.
This is not entirely correct, but works practically.
"""
for key, url, line, line_number in matches:
if not url.endswith('/'):
url = url.replace(u'<', u'<')
url = url.replace(u'>', u'>')
url = url.replace(u'&', u'&')
url = url.rstrip(string.punctuation)
url = url.split(u'\\')[0]
url = url.split(u'<')[0]
url = url.split(u'>')[0]
url = url.split(u'(')[0]
url = url.split(u')')[0]
url = url.split(u'[')[0]
url = url.split(u']')[0]
url = url.split(u'"')[0]
url = url.split(u"'")[0]
yield key, url, line, line_number
non_standard_urls_prefix = ('git@',)
def is_filterable(url):
"""
Return True if a url is eligible for filtering. Certain URLs should not pass
through certain filters (such as a [email protected] style urls)
"""
return not url.startswith(non_standard_urls_prefix)
def scheme_adder(matches):
"""
Add a fake http:// scheme if there was none.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
match = add_fake_scheme(match)
yield key, match, line, line_number
def add_fake_scheme(url):
"""
Add a fake http:// scheme to URL if has none.
"""
if not has_scheme(url):
url = 'http://' + url.lstrip(':/').strip()
return url
def has_scheme(url):
"""
Return True if url has a scheme.
"""
return re.match('^(?:%(schemes)s)://.*' % globals(), url, re.UNICODE)
def user_pass_cleaning_filter(matches):
"""
Given an iterable of URL matches, return an iterable where user and
password are removed from the URLs host.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
host, _domain = url_host_domain(match)
if not host:
if TRACE:
logger_debug('user_pass_cleaning_filter: '
'filtering match(no host): %(match)r' % locals())
continue
if '@' in host:
# strips any user/pass
host = host.split(u'@')[-1]
yield key, match, line, line_number
DEFAULT_PORTS = {
'http': 80,
'https': 443
}
def canonical_url(uri):
"""
Return the canonical representation of a given URI.
This assumes the `uri` has a scheme.
* When a default port corresponding for the scheme is explicitly declared
(such as port 80 for http), the port will be removed from the output.
* Fragments '#' are not removed.
* Params and query string arguments are not reordered.
"""
try:
parsed = urlpy.parse(uri)
if not parsed:
return
if TRACE:
logger_debug('canonical_url: parsed:', parsed)
sanitized = parsed.sanitize()
if TRACE:
logger_debug('canonical_url: sanitized:', sanitized)
punycoded = sanitized.punycode()
if TRACE:
logger_debug('canonical_url: punycoded:', punycoded)
deport = punycoded.remove_default_port()
if TRACE:
logger_debug('canonical_url: deport:', deport)
return str(sanitized)
except Exception as e:
if TRACE:
logger_debug('canonical_url: failed for:', uri, 'with:', repr(e))
# ignore it
pass
def canonical_url_cleaner(matches):
"""
Given an iterable of URL matches, return an iterable where URLs have been
canonicalized.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
canonical = canonical_url(match)
if TRACE:
logger_debug('canonical_url_cleaner: '
'match=%(match)r, canonical=%(canonical)r' % locals())
match = canonical
if match:
yield key, match , line, line_number
IP_V4_RE = '^(\\d{1,3}\\.){0,3}\\d{1,3}$'
def is_ip_v4(s):
return re.compile(IP_V4_RE, re.UNICODE).match(s)
IP_V6_RE = (
'^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}$'
'|'
'^([0-9a-f]{0,4}:){2,6}(\\d{1,3}\\.){0,3}\\d{1,3}$'
)
def is_ip_v6(s):
"""
Return True is string s is an IP V6 address
"""
return re.compile(IP_V6_RE, re.UNICODE).match(s)
def is_ip(s):
"""
Return True is string s is an IP address
"""
return is_ip_v4(s) or is_ip_v6(s)
def get_ip(s):
"""
Return True is string s is an IP address
"""
if not is_ip(s):
return False
try:
ip = ipaddress.ip_address(str(s))
return ip
except ValueError:
return False
def is_private_ip(ip):
"""
Return true if ip object is a private or local IP.
"""
if ip:
if isinstance(ip, ipaddress.IPv4Address):
private = (
ip.is_reserved
or ip.is_private
or ip.is_multicast
or ip.is_unspecified
or ip.is_loopback
or ip.is_link_local
)
else:
private(
ip.is_multicast
or ip.is_reserved
or ip.is_link_local
or ip.is_site_local
or ip.is_private
or ip.is_unspecified
or ip.is_loopback
)
return private
def is_good_host(host):
"""
Return True if the host is not some local or uninteresting host.
"""
if not host:
return False
ip = get_ip(host)
if ip:
if is_private_ip(ip):
return False
return finder_data.classify_ip(host)
# at this stage we have a host name, not an IP
if '.' not in host:
# private hostnames not in a domain, including localhost
return False
good_host = finder_data.classify_host(host)
return good_host
def url_host_domain(url):
"""
Return a tuple of the (host, domain) of a URL or None. Assumes that the
URL has a scheme.
"""
try:
parsed = urlpy.parse(url)
host = parsed.host
if not host:
return None, None
domain = parsed.pld
return host.lower(), domain.lower()
except Exception as e:
if TRACE:
logger_debug('url_host_domain: failed for:', url, 'with:', repr(e))
# ignore it
return None, None
def junk_url_hosts_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting hosts or domains have been removed, such as local,
non public or example.com URLs.
"""
for key, match, line, line_number in matches:
if is_filterable(match):
host, domain = url_host_domain(match)
if not is_good_host(host):
if TRACE:
logger_debug('junk_url_hosts_filter: '
'!is_good_host:%(host)r): %(match)r' % locals())
continue
if not is_good_host(domain) and not is_ip(host):
if TRACE:
logger_debug('junk_url_hosts_filter: ''!is_good_host:%(domain)r '
'and !is_ip:%(host)r: %(match)r' % locals())
continue
yield key, match, line, line_number
def junk_urls_filter(matches):
"""
Given an iterable of URL matches, return an iterable where URLs with
common uninteresting URLs, or uninteresting URL hosts or domains have been
removed, such as local, non public or example.com URLs.
"""
for key, match, line, line_number in matches:
good_url = finder_data.classify_url(match)
if not good_url:
if TRACE:
logger_debug('junk_url_filter: %(match)r' % locals())
continue
yield key, match, line, line_number
def find_pattern(location, pattern, unique=False):
"""
Find regex pattern in the text lines of file at location.
Return all match groups joined as one unicode string.
Only return unique items if unique is True.
"""
pattern = re.compile(pattern, re.UNICODE | re.IGNORECASE)
matches = find(location, [(None, pattern,)])
if unique:
matches = unique_filter(matches)
for _key, match , _line, line_number in matches:
yield match, line_number
|
PypiClean
|
/geezlibs1-2.0.0-py3-none-any.whl/geezlibs/filters.py
|
import inspect
import re
from typing import Callable, Union, List, Pattern
import geezlibs
from geezlibs import enums
from geezlibs.types import Message, CallbackQuery, InlineQuery, InlineKeyboardMarkup, ReplyKeyboardMarkup, Update
class Filter:
async def __call__(self, client: "geezlibs.Client", update: Update):
raise NotImplementedError
def __invert__(self):
return InvertFilter(self)
def __and__(self, other):
return AndFilter(self, other)
def __or__(self, other):
return OrFilter(self, other)
class InvertFilter(Filter):
def __init__(self, base):
self.base = base
async def __call__(self, client: "geezlibs.Client", update: Update):
if inspect.iscoroutinefunction(self.base.__call__):
x = await self.base(client, update)
else:
x = await client.loop.run_in_executor(
client.executor,
self.base,
client, update
)
return not x
class AndFilter(Filter):
def __init__(self, base, other):
self.base = base
self.other = other
async def __call__(self, client: "geezlibs.Client", update: Update):
if inspect.iscoroutinefunction(self.base.__call__):
x = await self.base(client, update)
else:
x = await client.loop.run_in_executor(
client.executor,
self.base,
client, update
)
# short circuit
if not x:
return False
if inspect.iscoroutinefunction(self.other.__call__):
y = await self.other(client, update)
else:
y = await client.loop.run_in_executor(
client.executor,
self.other,
client, update
)
return x and y
class OrFilter(Filter):
def __init__(self, base, other):
self.base = base
self.other = other
async def __call__(self, client: "geezlibs.Client", update: Update):
if inspect.iscoroutinefunction(self.base.__call__):
x = await self.base(client, update)
else:
x = await client.loop.run_in_executor(
client.executor,
self.base,
client, update
)
# short circuit
if x:
return True
if inspect.iscoroutinefunction(self.other.__call__):
y = await self.other(client, update)
else:
y = await client.loop.run_in_executor(
client.executor,
self.other,
client, update
)
return x or y
CUSTOM_FILTER_NAME = "CustomFilter"
def create(func: Callable, name: str = None, **kwargs) -> Filter:
"""Easily create a custom filter.
Custom filters give you extra control over which updates are allowed or not to be processed by your handlers.
Parameters:
func (``Callable``):
A function that accepts three positional arguments *(filter, client, update)* and returns a boolean: True if the
update should be handled, False otherwise.
The *filter* argument refers to the filter itself and can be used to access keyword arguments (read below).
The *client* argument refers to the :obj:`~geezlibs.Client` that received the update.
The *update* argument type will vary depending on which `Handler <handlers>`_ is coming from.
For example, in a :obj:`~geezlibs.handlers.MessageHandler` the *update* argument will be a :obj:`~geezlibs.types.Message`; in a :obj:`~geezlibs.handlers.CallbackQueryHandler` the *update* will be a :obj:`~geezlibs.types.CallbackQuery`.
Your function body can then access the incoming update attributes and decide whether to allow it or not.
name (``str``, *optional*):
Your filter's name. Can be anything you like.
Defaults to "CustomFilter".
**kwargs (``any``, *optional*):
Any keyword argument you would like to pass. Useful when creating parameterized custom filters, such as
:meth:`~geezlibs.filters.command` or :meth:`~geezlibs.filters.regex`.
"""
return type(
name or func.__name__ or CUSTOM_FILTER_NAME,
(Filter,),
{"__call__": func, **kwargs}
)()
# region all_filter
async def all_filter(_, __, ___):
return True
all = create(all_filter)
"""Filter all messages."""
# endregion
# region me_filter
async def me_filter(_, __, m: Message):
return bool(m.from_user and m.from_user.is_self or getattr(m, "outgoing", False))
me = create(me_filter)
"""Filter messages generated by you yourself."""
# endregion
# region bot_filter
async def bot_filter(_, __, m: Message):
return bool(m.from_user and m.from_user.is_bot)
bot = create(bot_filter)
"""Filter messages coming from bots."""
# endregion
# region incoming_filter
async def incoming_filter(_, __, m: Message):
return not m.outgoing
incoming = create(incoming_filter)
"""Filter incoming messages. Messages sent to your own chat (Saved Messages) are also recognised as incoming."""
# endregion
# region outgoing_filter
async def outgoing_filter(_, __, m: Message):
return m.outgoing
outgoing = create(outgoing_filter)
"""Filter outgoing messages. Messages sent to your own chat (Saved Messages) are not recognized as outgoing."""
# endregion
# region text_filter
async def text_filter(_, __, m: Message):
return bool(m.text)
text = create(text_filter)
"""Filter text messages."""
# endregion
# region reply_filter
async def reply_filter(_, __, m: Message):
return bool(m.reply_to_message_id)
reply = create(reply_filter)
"""Filter messages that are replies to other messages."""
# endregion
# region forwarded_filter
async def forwarded_filter(_, __, m: Message):
return bool(m.forward_date)
forwarded = create(forwarded_filter)
"""Filter messages that are forwarded."""
# endregion
# region caption_filter
async def caption_filter(_, __, m: Message):
return bool(m.caption)
caption = create(caption_filter)
"""Filter media messages that contain captions."""
# endregion
# region audio_filter
async def audio_filter(_, __, m: Message):
return bool(m.audio)
audio = create(audio_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Audio` objects."""
# endregion
# region document_filter
async def document_filter(_, __, m: Message):
return bool(m.document)
document = create(document_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Document` objects."""
# endregion
# region photo_filter
async def photo_filter(_, __, m: Message):
return bool(m.photo)
photo = create(photo_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Photo` objects."""
# endregion
# region sticker_filter
async def sticker_filter(_, __, m: Message):
return bool(m.sticker)
sticker = create(sticker_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Sticker` objects."""
# endregion
# region animation_filter
async def animation_filter(_, __, m: Message):
return bool(m.animation)
animation = create(animation_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Animation` objects."""
# endregion
# region game_filter
async def game_filter(_, __, m: Message):
return bool(m.game)
game = create(game_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Game` objects."""
# endregion
# region video_filter
async def video_filter(_, __, m: Message):
return bool(m.video)
video = create(video_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Video` objects."""
# endregion
# region media_group_filter
async def media_group_filter(_, __, m: Message):
return bool(m.media_group_id)
media_group = create(media_group_filter)
"""Filter messages containing photos or videos being part of an album."""
# endregion
# region voice_filter
async def voice_filter(_, __, m: Message):
return bool(m.voice)
voice = create(voice_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Voice` note objects."""
# endregion
# region video_note_filter
async def video_note_filter(_, __, m: Message):
return bool(m.video_note)
video_note = create(video_note_filter)
"""Filter messages that contain :obj:`~geezlibs.types.VideoNote` objects."""
# endregion
# region contact_filter
async def contact_filter(_, __, m: Message):
return bool(m.contact)
contact = create(contact_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Contact` objects."""
# endregion
# region location_filter
async def location_filter(_, __, m: Message):
return bool(m.location)
location = create(location_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Location` objects."""
# endregion
# region venue_filter
async def venue_filter(_, __, m: Message):
return bool(m.venue)
venue = create(venue_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Venue` objects."""
# endregion
# region web_page_filter
async def web_page_filter(_, __, m: Message):
return bool(m.web_page)
web_page = create(web_page_filter)
"""Filter messages sent with a webpage preview."""
# endregion
# region poll_filter
async def poll_filter(_, __, m: Message):
return bool(m.poll)
poll = create(poll_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Poll` objects."""
# endregion
# region dice_filter
async def dice_filter(_, __, m: Message):
return bool(m.dice)
dice = create(dice_filter)
"""Filter messages that contain :obj:`~geezlibs.types.Dice` objects."""
# endregion
# region private_filter
async def private_filter(_, __, m: Message):
return bool(m.chat and m.chat.type in {enums.ChatType.PRIVATE, enums.ChatType.BOT})
private = create(private_filter)
"""Filter messages sent in private chats."""
# endregion
# region group_filter
async def group_filter(_, __, m: Message):
return bool(m.chat and m.chat.type in {enums.ChatType.GROUP, enums.ChatType.SUPERGROUP})
group = create(group_filter)
"""Filter messages sent in group or supergroup chats."""
# endregion
# region channel_filter
async def channel_filter(_, __, m: Message):
return bool(m.chat and m.chat.type == enums.ChatType.CHANNEL)
channel = create(channel_filter)
"""Filter messages sent in channels."""
# endregion
# region new_chat_members_filter
async def new_chat_members_filter(_, __, m: Message):
return bool(m.new_chat_members)
new_chat_members = create(new_chat_members_filter)
"""Filter service messages for new chat members."""
# endregion
# region left_chat_member_filter
async def left_chat_member_filter(_, __, m: Message):
return bool(m.left_chat_member)
left_chat_member = create(left_chat_member_filter)
"""Filter service messages for members that left the chat."""
# endregion
# region new_chat_title_filter
async def new_chat_title_filter(_, __, m: Message):
return bool(m.new_chat_title)
new_chat_title = create(new_chat_title_filter)
"""Filter service messages for new chat titles."""
# endregion
# region new_chat_photo_filter
async def new_chat_photo_filter(_, __, m: Message):
return bool(m.new_chat_photo)
new_chat_photo = create(new_chat_photo_filter)
"""Filter service messages for new chat photos."""
# endregion
# region delete_chat_photo_filter
async def delete_chat_photo_filter(_, __, m: Message):
return bool(m.delete_chat_photo)
delete_chat_photo = create(delete_chat_photo_filter)
"""Filter service messages for deleted photos."""
# endregion
# region group_chat_created_filter
async def group_chat_created_filter(_, __, m: Message):
return bool(m.group_chat_created)
group_chat_created = create(group_chat_created_filter)
"""Filter service messages for group chat creations."""
# endregion
# region supergroup_chat_created_filter
async def supergroup_chat_created_filter(_, __, m: Message):
return bool(m.supergroup_chat_created)
supergroup_chat_created = create(supergroup_chat_created_filter)
"""Filter service messages for supergroup chat creations."""
# endregion
# region channel_chat_created_filter
async def channel_chat_created_filter(_, __, m: Message):
return bool(m.channel_chat_created)
channel_chat_created = create(channel_chat_created_filter)
"""Filter service messages for channel chat creations."""
# endregion
# region migrate_to_chat_id_filter
async def migrate_to_chat_id_filter(_, __, m: Message):
return bool(m.migrate_to_chat_id)
migrate_to_chat_id = create(migrate_to_chat_id_filter)
"""Filter service messages that contain migrate_to_chat_id."""
# endregion
# region migrate_from_chat_id_filter
async def migrate_from_chat_id_filter(_, __, m: Message):
return bool(m.migrate_from_chat_id)
migrate_from_chat_id = create(migrate_from_chat_id_filter)
"""Filter service messages that contain migrate_from_chat_id."""
# endregion
# region pinned_message_filter
async def pinned_message_filter(_, __, m: Message):
return bool(m.pinned_message)
pinned_message = create(pinned_message_filter)
"""Filter service messages for pinned messages."""
# endregion
# region game_high_score_filter
async def game_high_score_filter(_, __, m: Message):
return bool(m.game_high_score)
game_high_score = create(game_high_score_filter)
"""Filter service messages for game high scores."""
# endregion
# region reply_keyboard_filter
async def reply_keyboard_filter(_, __, m: Message):
return isinstance(m.reply_markup, ReplyKeyboardMarkup)
reply_keyboard = create(reply_keyboard_filter)
"""Filter messages containing reply keyboard markups"""
# endregion
# region inline_keyboard_filter
async def inline_keyboard_filter(_, __, m: Message):
return isinstance(m.reply_markup, InlineKeyboardMarkup)
inline_keyboard = create(inline_keyboard_filter)
"""Filter messages containing inline keyboard markups"""
# endregion
# region mentioned_filter
async def mentioned_filter(_, __, m: Message):
return bool(m.mentioned)
mentioned = create(mentioned_filter)
"""Filter messages containing mentions"""
# endregion
# region via_bot_filter
async def via_bot_filter(_, __, m: Message):
return bool(m.via_bot)
via_bot = create(via_bot_filter)
"""Filter messages sent via inline bots"""
# endregion
# region video_chat_started_filter
async def video_chat_started_filter(_, __, m: Message):
return bool(m.video_chat_started)
video_chat_started = create(video_chat_started_filter)
"""Filter messages for started video chats"""
# endregion
# region video_chat_ended_filter
async def video_chat_ended_filter(_, __, m: Message):
return bool(m.video_chat_ended)
video_chat_ended = create(video_chat_ended_filter)
"""Filter messages for ended video chats"""
# endregion
# region video_chat_members_invited_filter
async def video_chat_members_invited_filter(_, __, m: Message):
return bool(m.video_chat_members_invited)
video_chat_members_invited = create(video_chat_members_invited_filter)
"""Filter messages for voice chat invited members"""
# endregion
# region service_filter
async def service_filter(_, __, m: Message):
return bool(m.service)
service = create(service_filter)
"""Filter service messages.
A service message contains any of the following fields set: *left_chat_member*,
*new_chat_title*, *new_chat_photo*, *delete_chat_photo*, *group_chat_created*, *supergroup_chat_created*,
*channel_chat_created*, *migrate_to_chat_id*, *migrate_from_chat_id*, *pinned_message*, *game_score*,
*video_chat_started*, *video_chat_ended*, *video_chat_members_invited*.
"""
# endregion
# region media_filter
async def media_filter(_, __, m: Message):
return bool(m.media)
media = create(media_filter)
"""Filter media messages.
A media message contains any of the following fields set: *audio*, *document*, *photo*, *sticker*, *video*,
*animation*, *voice*, *video_note*, *contact*, *location*, *venue*, *poll*.
"""
# endregion
# region scheduled_filter
async def scheduled_filter(_, __, m: Message):
return bool(m.scheduled)
scheduled = create(scheduled_filter)
"""Filter messages that have been scheduled (not yet sent)."""
# endregion
# region from_scheduled_filter
async def from_scheduled_filter(_, __, m: Message):
return bool(m.from_scheduled)
from_scheduled = create(from_scheduled_filter)
"""Filter new automatically sent messages that were previously scheduled."""
# endregion
# region linked_channel_filter
async def linked_channel_filter(_, __, m: Message):
return bool(m.forward_from_chat and not m.from_user)
linked_channel = create(linked_channel_filter)
"""Filter messages that are automatically forwarded from the linked channel to the group chat."""
# endregion
# region command_filter
def command(commands: Union[str, List[str]], prefixes: Union[str, List[str]] = "/", case_sensitive: bool = False):
"""Filter commands, i.e.: text messages starting with "/" or any other custom prefix.
Parameters:
commands (``str`` | ``list``):
The command or list of commands as string the filter should look for.
Examples: "start", ["start", "help", "settings"]. When a message text containing
a command arrives, the command itself and its arguments will be stored in the *command*
field of the :obj:`~geezlibs.types.Message`.
prefixes (``str`` | ``list``, *optional*):
A prefix or a list of prefixes as string the filter should look for.
Defaults to "/" (slash). Examples: ".", "!", ["/", "!", "."], list(".:!").
Pass None or "" (empty string) to allow commands with no prefix at all.
case_sensitive (``bool``, *optional*):
Pass True if you want your command(s) to be case sensitive. Defaults to False.
Examples: when True, command="Start" would trigger /Start but not /start.
"""
command_re = re.compile(r"([\"'])(.*?)(?<!\\)\1|(\S+)")
async def func(flt, client: geezlibs.Client, message: Message):
username = client.me.username or ""
text = message.text or message.caption
message.command = None
if not text:
return False
for prefix in flt.prefixes:
if not text.startswith(prefix):
continue
without_prefix = text[len(prefix):]
for cmd in flt.commands:
if not re.match(rf"^(?:{cmd}(?:@?{username})?)(?:\s|$)", without_prefix,
flags=re.IGNORECASE if not flt.case_sensitive else 0):
continue
without_command = re.sub(rf"{cmd}(?:@?{username})?\s?", "", without_prefix, count=1,
flags=re.IGNORECASE if not flt.case_sensitive else 0)
# match.groups are 1-indexed, group(1) is the quote, group(2) is the text
# between the quotes, group(3) is unquoted, whitespace-split text
# Remove the escape character from the arguments
message.command = [cmd] + [
re.sub(r"\\([\"'])", r"\1", m.group(2) or m.group(3) or "")
for m in command_re.finditer(without_command)
]
return True
return False
commands = commands if isinstance(commands, list) else [commands]
commands = {c if case_sensitive else c.lower() for c in commands}
prefixes = [] if prefixes is None else prefixes
prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
prefixes = set(prefixes) if prefixes else {""}
return create(
func,
"CommandFilter",
commands=commands,
prefixes=prefixes,
case_sensitive=case_sensitive
)
# endregion
def regex(pattern: Union[str, Pattern], flags: int = 0):
"""Filter updates that match a given regular expression pattern.
Can be applied to handlers that receive one of the following updates:
- :obj:`~geezlibs.types.Message`: The filter will match ``text`` or ``caption``.
- :obj:`~geezlibs.types.CallbackQuery`: The filter will match ``data``.
- :obj:`~geezlibs.types.InlineQuery`: The filter will match ``query``.
When a pattern matches, all the `Match Objects <https://docs.python.org/3/library/re.html#match-objects>`_ are
stored in the ``matches`` field of the update object itself.
Parameters:
pattern (``str`` | ``Pattern``):
The regex pattern as string or as pre-compiled pattern.
flags (``int``, *optional*):
Regex flags.
"""
async def func(flt, _, update: Update):
if isinstance(update, Message):
value = update.text or update.caption
elif isinstance(update, CallbackQuery):
value = update.data
elif isinstance(update, InlineQuery):
value = update.query
else:
raise ValueError(f"Regex filter doesn't work with {type(update)}")
if value:
update.matches = list(flt.p.finditer(value)) or None
return bool(update.matches)
return create(
func,
"RegexFilter",
p=pattern if isinstance(pattern, Pattern) else re.compile(pattern, flags)
)
# noinspection PyPep8Naming
class user(Filter, set):
"""Filter messages coming from one or more users.
You can use `set bound methods <https://docs.python.org/3/library/stdtypes.html#set>`_ to manipulate the
users container.
Parameters:
users (``int`` | ``str`` | ``list``):
Pass one or more user ids/usernames to filter users.
For you yourself, "me" or "self" can be used as well.
Defaults to None (no users).
"""
def __init__(self, users: Union[int, str, List[Union[int, str]]] = None):
users = [] if users is None else users if isinstance(users, list) else [users]
super().__init__(
"me" if u in ["me", "self"]
else u.lower().strip("@") if isinstance(u, str)
else u for u in users
)
async def __call__(self, _, message: Message):
return (message.from_user
and (message.from_user.id in self
or (message.from_user.username
and message.from_user.username.lower() in self)
or ("me" in self
and message.from_user.is_self)))
# noinspection PyPep8Naming
class chat(Filter, set):
"""Filter messages coming from one or more chats.
You can use `set bound methods <https://docs.python.org/3/library/stdtypes.html#set>`_ to manipulate the
chats container.
Parameters:
chats (``int`` | ``str`` | ``list``):
Pass one or more chat ids/usernames to filter chats.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
Defaults to None (no chats).
"""
def __init__(self, chats: Union[int, str, List[Union[int, str]]] = None):
chats = [] if chats is None else chats if isinstance(chats, list) else [chats]
super().__init__(
"me" if c in ["me", "self"]
else c.lower().strip("@") if isinstance(c, str)
else c for c in chats
)
async def __call__(self, _, message: Message):
return (message.chat
and (message.chat.id in self
or (message.chat.username
and message.chat.username.lower() in self)
or ("me" in self
and message.from_user
and message.from_user.is_self
and not message.outgoing)))
|
PypiClean
|
/PyScenes-0.0.1-py3-none-any.whl/pyscenes/pyscenes.py
|
# this code is a modification of pygame_functions by Steve Paget
# https://github.com/StevePaget/Pygame_Functions
import pygame
import sys
import os
spriteGroup = pygame.sprite.OrderedUpdates()
textboxGroup = pygame.sprite.OrderedUpdates()
hiddenSprites = pygame.sprite.OrderedUpdates()
screenRefresh = True
class Mixer:
def __init__(self):
self.paused = False
self.initialize()
def initialize(self):
pygame.mixer.pre_init(44100, -16, 2, 512)
pygame.mixer.init()
def pause(self):
pygame.mixer.music.pause()
self.paused = True
def play(self, loops=0):
if self.paused:
pygame.mixer.music.unpause()
else:
pygame.mixer.music.play(loops)
self.paused = False
def stop(self):
pygame.mixer.music.stop()
def loadMusic(self, path):
pygame.mixer.music.load(path)
class Display:
# screen is a pygame object
def __init__(self, width, height):
self.width = width
self.height = height
self.screen, self.background = self.createScreen()
# see screenSize function
def createScreen(self):
screen = pygame.display.set_mode([self.width, self.height])
background = Background(screen)
screen.fill(background.colour)
background.surface = screen.copy()
pygame.display.update()
return screen, background
def update(self):
spriteRects = spriteGroup.draw(self.screen)
textboxRects = textboxGroup.draw(self.screen)
pygame.display.update()
spriteGroup.clear(self.screen, self.background.surface)
textboxGroup.clear(self.screen, self.background.surface)
# contains methods related to managing game
# should be self contained, no need to access anything anything in here
# inputs are only processed for quit events here
# all other input processing should be done through the scene itself
class Game:
def __init__(self, width, height, fps):
self.fps = fps
self.initialize()
self.clock = pygame.time.Clock()
self.display = Display(width, height)
self.mixer = Mixer()
def initialize(self):
pygame.init()
# see tick method
def tick(self):
self.clock.tick(self.fps)
return self.clock.get_fps()
# "main" method for running game, controlling system level
def run_game(self, starting_scene):
self.current_scene = starting_scene
self.current_scene.setup()
# Game Loop
while self.current_scene is not None:
pressed_keys = pygame.key.get_pressed()
filtered_events = self.filter_events(pressed_keys)
self.current_scene.process_input(filtered_events, pressed_keys)
self.current_scene.update()
self.current_scene.render(self.display.screen)
self.current_scene = self.next_scene()
self.display.update()
self.tick()
print("Game made with PyScenes.")
def next_scene(self):
# if scene called switch, clean it up, then set it to old scene state
if self.current_scene.next is not self.current_scene:
self.current_scene.cleanup()
# if game is not terminating, setup next scene
if self.current_scene.next is not None:
print(
"Switching from {} to {}...".format(
self.current_scene, self.current_scene.next
)
)
self.current_scene.next.setup()
return self.current_scene.next
# define external game events here
# all other event handling should be done in scene class
def filter_events(self, pressed_keys):
filtered_events = []
for event in pygame.event.get():
quit_attempt = False
if event.type == pygame.QUIT:
quit_attempt = True
elif event.type == pygame.KEYDOWN:
alt_pressed = pressed_keys[pygame.K_LALT] or pressed_keys[pygame.K_RALT]
if event.key == pygame.K_ESCAPE:
quit_attempt = True
elif event.key == pygame.K_F4 and alt_pressed:
quit_attempt = True
if quit_attempt:
self.current_scene.terminate()
else:
filtered_events.append(event)
return filtered_events
keydict = {
"space": pygame.K_SPACE,
"esc": pygame.K_ESCAPE,
"up": pygame.K_UP,
"down": pygame.K_DOWN,
"left": pygame.K_LEFT,
"right": pygame.K_RIGHT,
"return": pygame.K_RETURN,
"a": pygame.K_a,
"b": pygame.K_b,
"c": pygame.K_c,
"d": pygame.K_d,
"e": pygame.K_e,
"f": pygame.K_f,
"g": pygame.K_g,
"h": pygame.K_h,
"i": pygame.K_i,
"j": pygame.K_j,
"k": pygame.K_k,
"l": pygame.K_l,
"m": pygame.K_m,
"n": pygame.K_n,
"o": pygame.K_o,
"p": pygame.K_p,
"q": pygame.K_q,
"r": pygame.K_r,
"s": pygame.K_s,
"t": pygame.K_t,
"u": pygame.K_u,
"v": pygame.K_v,
"w": pygame.K_w,
"x": pygame.K_x,
"y": pygame.K_y,
"z": pygame.K_z,
"1": pygame.K_1,
"2": pygame.K_2,
"3": pygame.K_3,
"4": pygame.K_4,
"5": pygame.K_5,
"6": pygame.K_6,
"7": pygame.K_7,
"8": pygame.K_8,
"9": pygame.K_9,
"0": pygame.K_0,
"num0": pygame.K_KP0,
"num1": pygame.K_KP1,
"num2": pygame.K_KP2,
"num3": pygame.K_KP3,
"num4": pygame.K_KP4,
"num5": pygame.K_KP5,
"num6": pygame.K_KP6,
"num7": pygame.K_KP7,
"num8": pygame.K_KP8,
"num9": pygame.K_KP9,
}
class Background:
def __init__(self, screen):
self.colour = pygame.Color("black")
self.screen = screen
def setTiles(self, tiles):
if type(tiles) is str:
self.tiles = [[loadImage(tiles)]]
elif type(tiles[0]) is str:
self.tiles = [[loadImage(i) for i in tiles]]
else:
self.tiles = [[loadImage(i) for i in row] for row in tiles]
self.stagePosX = 0
self.stagePosY = 0
self.tileWidth = self.tiles[0][0].get_width()
self.tileHeight = self.tiles[0][0].get_height()
self.screen.blit(self.tiles[0][0], [0, 0])
self.surface = self.screen.copy()
def setBackgroundImage(self, img):
self.setTiles(img)
def scroll(self, x, y):
self.stagePosX -= x
self.stagePosY -= y
col = (self.stagePosX % (self.tileWidth * len(self.tiles[0]))) // self.tileWidth
xOff = 0 - self.stagePosX % self.tileWidth
row = (self.stagePosY % (self.tileHeight * len(self.tiles))) // self.tileHeight
yOff = 0 - self.stagePosY % self.tileHeight
col2 = (
(self.stagePosX + self.tileWidth) % (self.tileWidth * len(self.tiles[0]))
) // self.tileWidth
row2 = (
(self.stagePosY + self.tileHeight) % (self.tileHeight * len(self.tiles))
) // self.tileHeight
screen.blit(self.tiles[row][col], [xOff, yOff])
screen.blit(self.tiles[row][col2], [xOff + self.tileWidth, yOff])
screen.blit(self.tiles[row2][col], [xOff, yOff + self.tileHeight])
screen.blit(
self.tiles[row2][col2], [xOff + self.tileWidth, yOff + self.tileHeight]
)
self.surface = screen.copy()
def setColour(self, colour):
self.colour = parseColour(colour)
screen.fill(self.colour)
pygame.display.update()
self.surface = screen.copy()
def loadImage(fileName, useColorKey=False):
if os.path.isfile(fileName):
image = pygame.image.load(fileName)
image = image.convert_alpha()
# Return the image
return image
else:
raise Exception(
"Error loading image: " + fileName + " - Check filename and path?"
)
if __name__ == "__main__":
print("""PyScenes is not designed to be run directly.""")
|
PypiClean
|
/django_bpp-1.0.9-py3-none-any.whl/eksport_pbn/views.py
|
from braces.views import LoginRequiredMixin
from crispy_forms.helper import FormHelper
from crispy_forms_foundation.layout import Layout, Fieldset, Submit
from django.contrib import messages
from django.http.response import HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import render
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView
from django.views.generic.list import ListView
from sendfile import sendfile
from bpp.models.struktura import Wydzial
from eksport_pbn.forms import EksportDoPBNForm
from eksport_pbn.models import PlikEksportuPBN
from eksport_pbn.tasks import eksport_pbn
class Generuj(LoginRequiredMixin, TemplateView):
template_name = "generuj.html"
def get(self, request, *args, **kwargs):
wydzial = Wydzial.objects.get(pk=kwargs['wydzial'])
rok = kwargs['rok']
eksport_pbn.delay(self.request.user.pk, kwargs['wydzial'], kwargs['rok'])
messages.info(self.request, "Rozpoczęto generowanie eksportu PBN dla %s, rok %s" % (wydzial.nazwa, rok))
return HttpResponseRedirect("..")
class SerwujPlik(LoginRequiredMixin, DetailView):
template_name = "generuj.html"
model = PlikEksportuPBN
def get(self, request, *args, **kwargs):
self.object = self.get_object()
if self.object.owner == self.request.user:
return sendfile(self.request,
self.object.file.path,
attachment=True,
attachment_filename=self.object.get_fn() + ".zip",
mimetype="application/octet-stream")
return HttpResponseForbidden
class WyborWydzialu(LoginRequiredMixin, ListView):
model = Wydzial
template_name = "wydzial_list.html"
def get_context_data(self, **kwargs):
return super(WyborWydzialu, self).get_context_data(**{
'lata': [2013, 2014, 2015],
'ostatnie_raporty': PlikEksportuPBN.objects.filter(owner=self.request.user).order_by('-pk').exclude(file=None)[:10]
})
class ZamowEksportDoPBN(LoginRequiredMixin, FormView):
form_class = EksportDoPBNForm
template_name = "zamow.html"
def get_context_data(self, **kwargs):
return super(ZamowEksportDoPBN, self).get_context_data(
ostatnie_raporty=PlikEksportuPBN.objects.filter(owner=self.request.user).exclude(file='').order_by('-pk')[:10],
)
def form_valid(self, form):
obj = form.instance
obj.owner = self.request.user
obj.save()
eksport_pbn.delay(obj.pk)
messages.info(self.request, "Rozpoczęto generowanie eksportu PBN dla %s - %s" % (obj.wydzial.nazwa, obj.get_rok_string()))
return HttpResponseRedirect('.')
|
PypiClean
|
/actfw_core-2.2.1-py3-none-any.whl/actfw_core/_private/agent_app_protocol/service_server.py
|
import base64
import copy
import os
import socket
from pathlib import Path
from typing import Optional
import OpenSSL
from ..compat.queue import SimpleQueue
from ..schema.agent_app_protocol import ServiceKind, ServiceRequest, ServiceResponse, Status
from ..util.result import ResultTuple
from ..util.thread import LoopThread
class AgentAppProtocolServiceServer:
_path: Path
_pkey: OpenSSL.crypto.PKey
_thread: LoopThread
_listener: Optional[socket.socket]
def __init__(self, path: Path, pkey: OpenSSL.crypto.PKey) -> None:
self._path = path
self._pkey = pkey
dummy_ch = SimpleQueue()
self._thread = LoopThread(dummy_ch, self._loop_body)
self._listener = None
def path(self) -> Path:
return self._path
def startup(self) -> None:
try:
os.unlink(self._path)
except FileNotFoundError:
pass
self._listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._listener.bind(str(self._path))
self._path.chmod(0o664)
self._listener.settimeout(1)
self._listener.listen(1)
self._thread.startup()
def teardown(self) -> None:
self._thread.teardown()
def join(self) -> None:
self._thread.join()
def _loop_body(self) -> None:
assert self._listener is not None
try:
stream, _ = self._listener.accept()
except socket.timeout:
return
request, err = ServiceRequest.parse(stream)
if err is not None:
response = ServiceResponse(
copy.copy(request.id_),
Status.GENERAL_ERROR,
b"",
)
stream.sendall(response.to_bytes())
return
if request.kind == ServiceKind.RS_256:
response = self._handle_rs_256(request)
else:
response = ServiceResponse(
copy.copy(request.id_),
Status.GENERAL_ERROR,
b"",
)
stream.sendall(response.to_bytes())
def _handle_rs_256(self, request: ServiceRequest) -> ServiceResponse:
bs, err = _base64_decode_url_safe_no_pad(request.data)
if err is not None:
return ServiceResponse(
copy.copy(request.id_),
Status.GENERAL_ERROR,
b"",
)
bs = OpenSSL.crypto.sign(self._pkey, bs, "sha256")
bs = _base64_encode_url_safe_no_pad(bs)
return ServiceResponse(
copy.copy(request.id_),
Status.OK,
bs,
)
def _base64_decode_url_safe_no_pad(data: bytes) -> ResultTuple[bytes, bool]:
"""
Decode with config `base64::URL_SAFE_NO_PAD`.
c.f. https://docs.rs/base64/0.13.0/base64/constant.URL_SAFE_NO_PAD.html
"""
missing_pad_len = -(len(data) % -4)
data_ = data + b"=" * missing_pad_len
try:
return base64.urlsafe_b64decode(data_), None
except Exception:
return None, True
def _base64_encode_url_safe_no_pad(data: bytes) -> bytes:
"""
Encode with config `base64::URL_SAFE_NO_PAD`.
c.f. https://docs.rs/base64/0.13.0/base64/constant.URL_SAFE_NO_PAD.html
"""
return base64.urlsafe_b64encode(data).rstrip(b"=")
|
PypiClean
|
/akida_models-1.2.0.tar.gz/akida_models-1.2.0/akida_models/imagenet/imagenet_train.py
|
import os
import argparse
import numpy as np
import tensorflow as tf
try:
import tensorflow_datasets as tfds
except ImportError:
tfds = None
import keras
from keras.callbacks import LearningRateScheduler
from keras import Sequential
from keras.optimizers import SGD, Adam
from tensorflow_addons.optimizers import LAMB
from keras.layers import Input
from keras.models import clone_model
import akida
from cnn2snn import convert
from quantizeml.layers import AddPositionEmbs, ClassToken
from .preprocessing import preprocess_image, DATA_AUGMENTATION
from ..training import (get_training_parser, freeze_model_before, print_history_stats, RestoreBest,
save_model)
from ..extract import extract_samples
from ..transformers.model_vit import apply_embedding_weights
from ..distiller import DeitDistiller
from ..param_scheduler import get_cosine_lr_scheduler
from ..utils import get_tensorboard_callback
from ..model_io import load_model
def get_imagenet_dataset(data_path, training, image_size, batch_size, data_aug=True, one_hot=False):
""" Loads ImageNet 2012 dataset and builds a tf.dataset out of it.
Args:
data_path (str): path to the folder containing ImageNet tar files
training (bool): True to retrieve training data, False for validation
image_size (int): desired image size
batch_size (int): the batch size
data_aug (bool, optional): True to apply data augmentation (only train). Defaults to True.
one_hot (bool, optional): whether to one hot labels or not. Defaults to False.
Returns:
tf.dataset, int: the requested dataset (train or validation) and the
corresponding steps
"""
assert tfds is not None, "To load imagenet dataset, tensorflow-datasets module must\
be installed."
# Build the dataset
write_dir = os.path.join(data_path, 'tfds')
download_and_prepare_kwargs = {
'download_dir': os.path.join(write_dir, 'downloaded'),
'download_config': tfds.download.DownloadConfig(manual_dir=data_path)
}
split = 'train' if training else 'validation'
dataset, infos = tfds.load(
'imagenet2012',
data_dir=os.path.join(write_dir, 'data'),
split=split,
shuffle_files=training,
download=True,
as_supervised=True,
download_and_prepare_kwargs=download_and_prepare_kwargs,
with_info=True)
if training:
dataset = dataset.shuffle(10000, reshuffle_each_iteration=True).repeat()
data_aug = DATA_AUGMENTATION if data_aug else None
dataset = dataset.map(lambda image, label: (preprocess_image(
image, image_size, training, data_aug), label))
# One hot encode labels if requested
if one_hot:
num_classes = infos.features["label"].num_classes
dataset = dataset.map(lambda image, label: (image, tf.one_hot(label, num_classes)))
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1)
# The following will silence a Tensorflow warning on auto shard policy
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA
dataset = dataset.with_options(options)
return dataset, infos.splits[split].num_examples / batch_size
def compile_model(model, optimizer="SGD", distiller_type="none"):
""" Compiles the model.
Args:
model (keras.Model): the model to compile
optimizer (str, optional): the optimizer to use. Defaults to "SGD".
distiller_type (str, optional): string to select the loss in distillation.
Only used when input model is of ``DeitDistiller`` type. Defaults to 'none'.
Returns:
bool: True if labels should be one-hot encoded, False if not.
"""
def _get_optim(optim_str):
optim_str_low = optim_str.lower()
if optim_str_low == "sgd":
return SGD(momentum=0.9)
elif optim_str_low == "adam":
return Adam(epsilon=1e-8)
elif optim_str_low == "lamb":
return LAMB(epsilon=1e-8, weight_decay_rate=2e-2)
else:
raise ValueError(f"Unknown optimizer {optim_str}. "
"Please choose one of these options: {SGD, ADAM, LAMB}")
if isinstance(model, DeitDistiller):
model.compile(optimizer=_get_optim(optimizer),
metrics=['accuracy', 'top_k_categorical_accuracy'],
student_loss_fn=keras.losses.CategoricalCrossentropy(from_logits=True),
distiller_type=distiller_type)
return True
model.compile(optimizer=_get_optim(optimizer),
loss=keras.losses.CategoricalCrossentropy(from_logits=True),
metrics=['accuracy', 'top_k_categorical_accuracy'])
return True
def evaluate(model, val_dataset, batch_size, num_samples, val_steps):
""" Evaluates model performances.
Args:
model (keras.Model or akida.Model): the model to compile evaluate
val_dataset (tf.dataset): validation dataset
batch_size (int): the batch size
num_samples (int): number of samples to use for Akida
val_steps (int): validation steps
"""
if isinstance(model, akida.Model):
correct_preds = 0
cur_samples = 0
total_samples = val_steps * batch_size
if num_samples <= 0:
num_samples = total_samples
else:
num_samples = min(num_samples, total_samples)
it = val_dataset.as_numpy_iterator()
print(f"Processing {num_samples} samples.")
if num_samples > batch_size:
n_batches = num_samples // batch_size
if n_batches > 5:
log_samples = (n_batches // 5) * batch_size
else:
log_samples = batch_size
print(f"Logging every {log_samples} samples.")
else:
log_samples = num_samples
while cur_samples < num_samples:
x, y = next(it)
y_ak = model.predict_classes(x.astype(np.uint8))
correct_preds += np.sum(y_ak == np.argmax(y, -1))
cur_samples += y_ak.shape[0]
if cur_samples % log_samples == 0 and cur_samples < num_samples:
# Log current accuracy
accuracy = correct_preds / cur_samples
print(f"Accuracy after {cur_samples}: {accuracy}")
accuracy = correct_preds / cur_samples
print(f"Accuracy after {cur_samples}: {accuracy}")
else:
history = model.evaluate(val_dataset, steps=val_steps)
print(history)
def rescale_legacy(base_model, input_size):
""" Rescales the model by changing its input size.
Args:
base_model (keras.Model): the model to rescale
input_size (int): desired model input size
Returns:
keras.Model: the rescaled model
"""
# Create the desired input
input_shape = (input_size, input_size, base_model.input.shape[-1])
new_input = Input(input_shape)
# Workaround to force the input shape update that is not working for
# functional models: the input_tensors parameter is ignored as described in
# https://github.com/tensorflow/tensorflow/issues/40617.
if not isinstance(base_model, Sequential):
base_model.layers[0]._batch_input_shape = (None, input_size, input_size,
base_model.input.shape[-1])
new_input = None
# Clone the model and replace input layer
clone = clone_model(base_model, input_tensors=new_input)
clone.set_weights(base_model.get_weights())
return clone
def rescale_vit(base_model, input_size):
""" Rescales the model by changing its input size in three steps:
1. Change the input layer, given the new input size values
2. Reconstruct model from config
3. Update weights, taking into account interpolation on PosEmbed layer
Args:
base_model (keras.Model): the model to rescale
input_size (int): desired model input size
Returns:
keras.Model: the rescaled model
"""
# 1. Create the desired input layer:
input_shape = (None, input_size, input_size, base_model.input.shape[-1])
num_tokens = sum(isinstance(ly, ClassToken) for ly in base_model.layers)
# 2. Clone the model by modification of dict_config:
# In models based on vision transformers, PositionEmbeding change its amount of parameters
# given an input size. This is why we need to reconstruct the model.
x_patch, y_patch = None, None
clone_config = base_model.get_config()
for layer_config in clone_config["layers"]:
# 2.1. Change input size
if layer_config["class_name"] == "InputLayer":
layer_config["config"]["batch_input_shape"] = input_shape
# 2.2. Recover total of patches in both directions
if "Conv2D" in layer_config["class_name"]:
x_patch = input_size // layer_config["config"]["kernel_size"][0]
y_patch = input_size // layer_config["config"]["kernel_size"][1]
# 2.3. Change values in reshape process
elif layer_config["class_name"] == "Reshape":
layer_config["config"]["target_shape"] = list(layer_config["config"]["target_shape"])
layer_config["config"]["target_shape"][0] = x_patch * y_patch
# 2.5. Recompile model with new configuration
clone = base_model.from_config(clone_config)
# 3. Update weights:
# Get weights from based model, and tranfer them into clone model,
for base_layer, clone_layer in zip(base_model.layers, clone.layers):
base_weights = base_layer.get_weights()
if isinstance(base_layer, AddPositionEmbs):
apply_embedding_weights(clone_layer, base_weights, (x_patch, y_patch), num_tokens)
else:
clone_layer.set_weights(base_weights)
return clone
def rescale(base_model, input_size):
""" Rescale the model by architecture (if there is a vision transformer model or not)
Args:
base_model (keras.Model): the model to rescale
input_size (int): desired model input size
Returns:
keras.Model: the rescaled model
"""
is_vit = any(isinstance(layer, AddPositionEmbs) for layer in base_model.layers)
if is_vit:
return rescale_vit(base_model, input_size)
return rescale_legacy(base_model, input_size)
def train(model,
train_dataset,
train_steps,
val_dataset,
val_steps,
out_dir,
num_epochs,
tune=False,
learning_rate=1e-1,
initial_epoch=0,
lr_policy='exp_decay'):
""" Trains the model
Args:
model (keras.Model): the model to train
train_dataset (tf.dataset): training dataset
train_steps (int): train steps
val_dataset (tf.dataset): validation dataset
val_steps (int): validation steps
out_dir (str): parent directory for logs folder
num_epochs (int): the number of epochs
tune (bool, optional): enable tuning (lower learning rate). Defaults to
False.
learning_rate (float, optional): the learning rate. Defaults to 1e-1.
initial_epoch (int, optional): epoch at which to start training.
Defaults to 0.
lr_policy (str, optional): defines the learning rate policy to adopt. Values in
['exp_decay', 'cosine_decay', 'cosine_sched'] for exponential decay, cosine decay and
cosine oscillation respectively. Defaults to 'exp_decay'.
"""
# 1. Define training callbacks
callbacks = []
# 1.1 Learning rate scheduler
if lr_policy == 'exp_decay':
LR_START = learning_rate
LR_END = 1e-4
# number of epochs you first keep the learning rate constant
LR_EPOCH_CONSTANT = 10
# Modify default values for fine-tuning
if tune:
LR_START = 1e-4
LR_END = 1e-8
LR_EPOCH_CONSTANT = 2
if LR_EPOCH_CONSTANT >= num_epochs:
lr_decay = LR_END / LR_START
else:
lr_decay = (LR_END / LR_START)**(1. / (num_epochs - LR_EPOCH_CONSTANT))
# This function keeps the learning rate at LR_START for the first N epochs
# and decreases it exponentially after that.
def agg_lr_scheduler(epoch):
if epoch < LR_EPOCH_CONSTANT:
return LR_START
return LR_START * lr_decay**(epoch - (LR_EPOCH_CONSTANT - 1))
lr_scheduler = LearningRateScheduler(agg_lr_scheduler)
elif lr_policy == 'cosine_decay':
LR_START = learning_rate
if tune:
LR_EPOCH_CONSTANT = 2 # number of epochs you first keep the learning rate constant
LR_MIN = 1e-8
else:
LR_EPOCH_CONSTANT = 5 # number of epochs you first keep the learning rate constant
LR_MIN = 1e-6
# Make sure to start with a learning rate higher than LR_MIN
LR_MIN = LR_MIN if LR_START > LR_MIN else 0.1 * LR_START
# If the number of epochs is too small, dont use WARMUP_LR
if num_epochs < LR_EPOCH_CONSTANT:
LR_EPOCH_CONSTANT = 0
# This function keeps the learning rate at LR_START for the first N epochs and decreases it
# following
# https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/CosineDecay
def cos_lr_scheduler(epoch):
if epoch < LR_EPOCH_CONSTANT:
return LR_START
step = min(epoch, num_epochs) - LR_EPOCH_CONSTANT
cosine_decay = 0.5 * (1 + np.cos(np.pi * step / (num_epochs - LR_EPOCH_CONSTANT)))
decayed = (1 - LR_MIN) * cosine_decay + LR_MIN
return LR_START * decayed
lr_scheduler = LearningRateScheduler(cos_lr_scheduler)
elif lr_policy == 'cosine_sched':
lr_scheduler = get_cosine_lr_scheduler(learning_rate, num_epochs * train_steps)
else:
raise ValueError(f"Unsupported learning rate policy '{lr_policy}'.")
callbacks.append(lr_scheduler)
# 1.2 Model checkpoints (save best model and retrieve it when training is complete)
restore_model = RestoreBest(model)
callbacks.append(restore_model)
# 1.3 Tensorboard logs
tensorboard = get_tensorboard_callback(out_dir, prefix='imagenet')
callbacks.append(tensorboard)
# 2. Train
history = model.fit(train_dataset,
steps_per_epoch=train_steps,
epochs=num_epochs,
callbacks=callbacks,
validation_data=val_dataset,
validation_steps=val_steps,
initial_epoch=initial_epoch)
print_history_stats(history)
def main():
""" Entry point for script and CLI usage.
"""
global_parser = argparse.ArgumentParser(add_help=False)
global_parser.add_argument(
"-d",
"--data_dir",
type=str,
default='/hdd/datasets/imagenet/',
help="The directory containing the ImageNet data.")
global_parser.add_argument("-o",
"--out_dir",
type=str,
default='./logs',
help="The output directory (logs, checkpoints).")
parsers = get_training_parser(batch_size=128,
extract=True,
freeze_before=True,
tune=True,
global_parser=global_parser)
train_parser = parsers[1]
train_parser.add_argument("-lr",
"--learning_rate",
type=float,
default=1e-1,
help="Learning rate start value.")
train_parser.add_argument("-ie",
"--initial_epoch",
type=int,
default=0,
help="Epoch at which to start training.")
train_parser.add_argument("--optim", type=str, default="SGD",
help="Optimizer to use. Defaults to %(default)s.")
train_parser.add_argument("--data_aug", action='store_true', help="Enables custom DA.")
train_parser.add_argument("--lr_policy",
default='exp_decay',
choices=['exp_decay', 'cosine_decay', 'cosine_sched'],
help="Defines the learning rate scheduling. Values in "
"['exp_decay', 'cosine_decay', 'cosine_sched'] which corresponds"
" to exponential decay, cosine decay and cosine oscillation "
"respectively.")
train_parser.add_argument("--teacher", type=str, default=None,
help="Teacher model use to train the model through Knowledge"
"Distillation. The input model output an Add layer. "
"Defaults to %(default)s.")
train_parser.add_argument("-dt", "--distiller_type", type=str, default="soft",
help="Define the distillation loss type. Defaults to %(default)s.")
train_parser.add_argument("-a", "--alpha", type=float, default=0.5,
help="Value for distiller losses weighting. Defaults to %(default)s.")
tune_parser = parsers[2]
tune_parser.add_argument("-ie",
"--initial_epoch",
type=int,
default=0,
help="Epoch at which to start training.")
tune_parser.add_argument("-lr",
"--learning_rate",
type=float,
default=6e-5,
help="Learning rate start value.")
tune_parser.add_argument("--data_aug",
action='store_true',
help="Enables custom DA.")
tune_parser.add_argument("--optim",
type=str,
default="SGD",
help="Optimizer to use. Defaults to %(default)s.")
tune_parser.add_argument("--lr_policy",
default='exp_decay',
choices=['exp_decay', 'cosine_decay', 'cosine_sched'],
help="Defines the learning rate scheduling. Values in "
"['exp_decay', 'cosine_decay', 'cosine_sched'] which corresponds "
"to exponential decay, cosine decay and cosine oscillation "
"respectively.")
tune_parser.add_argument("--teacher", type=str, default=None,
help="Teacher model use to train the model through Knowledge"
"Distillation. The input model output an Add layer. "
"Defaults to %(default)s.")
tune_parser.add_argument("-dt", "--distiller_type", type=str, default="soft",
help="Define the distillation loss type. Defaults to %(default)s.")
tune_parser.add_argument("-a", "--alpha", type=float, default=0.5,
help="Value for distiller losses weighting. Defaults to %(default)s.")
eval_parser = parsers[3]
eval_parser.add_argument("-ns",
"--num_samples",
type=int,
default=-1,
help="Number of samples to use (for Akida)")
subparsers = parsers[-1]
rescale_parser = subparsers.add_parser("rescale",
help="Rescale a model.",
parents=[global_parser])
rescale_parser.add_argument("-i",
"--input_size",
type=int,
required=True,
help="The square input image size")
rescale_parser.add_argument("-s",
"--savemodel",
type=str,
default=None,
help="Save model with the specified name")
args = parsers[0].parse_args()
# Load the source model
model = load_model(args.model)
im_size = model.input_shape[1]
# Try to load the teacher model, used after to train with Knowledge Distillation
# Hyperparameters takes from https://arxiv.org/pdf/2012.12877.pdf
if getattr(args, 'teacher', False):
if args.distiller_type not in ['none', 'soft', 'hard']:
raise ValueError("Distiller type must be one of ['none', 'soft', 'hard']")
teacher = load_model(args.teacher)
train_model = DeitDistiller(model, teacher, alpha=args.alpha, temperature=3.0)
else:
train_model = model
# Freeze the model
if "freeze_before" in args:
freeze_model_before(model, args.freeze_before)
# Compile model
one_hot = compile_model(train_model, optimizer=getattr(args, "optim", "SGD"),
distiller_type=getattr(args, "distiller_type", "none"))
# Load validation data
if args.action in ['train', 'tune', 'eval']:
val_ds, val_steps = get_imagenet_dataset(args.data_dir, False, im_size,
args.batch_size, data_aug=False, one_hot=one_hot)
# Load training data
if args.action in ['train', 'tune', 'extract']:
data_aug = args.data_aug if args.action in ['train', 'tune'] else False
train_ds, train_steps = get_imagenet_dataset(args.data_dir, True, im_size,
args.batch_size, data_aug, one_hot=one_hot)
# Disable QuantizeML assertions
os.environ["ASSERT_ENABLED"] = "0"
# Train model
if args.action in ['train', 'tune']:
tune = args.action == 'tune'
learning_rate = args.learning_rate
if args.lr_policy == 'cosine_decay':
# Tune learning rate following https://arxiv.org/pdf/2012.12877.pdf
learning_rate *= args.batch_size / 512
train(train_model,
train_ds,
train_steps,
val_ds,
val_steps,
args.out_dir,
args.epochs,
tune=tune,
learning_rate=learning_rate,
initial_epoch=args.initial_epoch,
lr_policy=args.lr_policy)
save_model(model, args.model, args.savemodel, args.action)
elif args.action == 'eval':
# Evaluate model accuracy
if args.akida:
model = convert(model)
evaluate(model, val_ds, args.batch_size, args.num_samples, val_steps)
elif args.action == 'rescale':
# Rescale model
m = rescale(model, args.input_size)
save_model(m, args.model, args.savemodel, args.action)
elif args.action == 'extract':
# Extract samples from dataset
extract_samples(args.savefile, train_ds, args.batch_size)
if __name__ == "__main__":
main()
|
PypiClean
|
/zertoapilib-0.0.1.tar.gz/zertoapilib-0.0.1/README.md
|
# pyzerto-unofficial

## Overview
pyzerto-unofficial is a simple python3 API wrapper for the Zerto product by the eponymous corporation. It is intended to
simplify the deployment and management of Zerto in a code-driven manner. Potential uses for this API wrapper are as
diverse as you may wish to make it. An example script of automatically protecting tagged virtual machines (VMs) is
included in this library.
## Motiviation and Audience
The official Zerto API is a REST-driven architecture. This wrapper was developed to reduce the barrier of entry for
anyone interested in automating Zerto tasks via Python by abstracting the effort of generating a session key,
constructing correctly formatted CRUD requests, and identifying the correct URL endpoints for any given API call. This
can benefit new Zerto users, Zerto partners deploying at-scale environments, and anyone with a Python and Zerto
background.
## Disclaimer
This is not an official Zerto product in any way, shape, or form. Support is community-driven and you should thoroughly
test your scripts to ensure that you do not break anything by utilizing pyzerto-unofficial. In other words, don't blame
us if it messes something up!!
## Usage
### The Zerto API, explained
The Zerto Virtual Manager (or Zerto Cloud Appliance) acts as the management plane for all things Zerto-related in your
environment. To use the Zerto API, you will need:
* The IP address of your target ZVM or ZCA and access from the machine running the Python library over TCP port 9669
* Credentials of a valid admin-level account for your ZVM or ZCA. For a ZVM, this usually means an admin- or nearly-
admin level vCenter account, and for a ZCA, this is usually the Administrator account of the underlying Windows instance
or VM in AWS or Azure.
Using the IP address of the ZVM/ZCA and a valid user account, you can generate an API session token, which will then
need to be passed back to the ZVM/ZCA in the form of a properly formed header as well as a request body, if applicable,
in order to run API calls to do interesting things like get local site information, create a virtual protection group
(VPG) object, add VMs to said VPG, and so on. Furthermore, the ZVM/ZCA must have a valid license key installed in order
to successfully run API calls (except for generating an API session token and applying a license!)
### Using the wrapper
Start by executing zerto_auth.py. The function `login` accepts three arguments: `zvm_ip`, `zvm_user`, and
`zvm_password`. This function will return a `dict` with the properly formatted headers for running API commands against
the ZVM/ZCA, including a valid session token.
From here, you can instansiate classes which can be found in `zvm.py`, `vra.py`, and `vpg.py` modules. The theory behind
how the modules (and subsequent classes) were organized was based on the Zerto "object" that you would want to do
interesting things to. For example, if you want to get information about datastores visible to the ZVM, you will find
that method located in the `zvm` class under the `zvm` module. If you want to install or delete a VRA, you will find
`installVra` and `delVra` in the `vra` class under the `vra` module. Even though all API calls are executed against a
ZVM or ZCA, this modularization is intended to make sense to an admin.
#### A special note on Virtual Protection Groups
Virtual Protection Groups, or VPGs, require some specific clarification. To create and manage VPGs, you need to first
create a "VPG Object", to which you will then add VMs and specify specific instructions such as length of journal,
networks to attach do during a live failover or a test failover, and the like. However, none of your changes will be
applied unless and until you *commit* the settings.
One a VPG has been committed, the vpgSettings identifier *goes away*. Do not confuse the VPG identifier with a
vpgSettings identifier; these are two different things. Roughly speaking, you can think of the `vpgs` class in the `vpg`
module as anything having to do with existing VPG "containers" as a whole, and the `vpgSettings` class in the `vpg`
module as having to do with what's *inside* a VPG (with the exception of creating a VPG to begin with).
### Example: Adding a license
vczvmSession = login('10.0.10.50', '[email protected]', 'password')
z = zvm.zvm('10.0.10.50, vczvmSession)
z.addLicense('VALIDZERTOLICENSEKEYHERE')
### Example: Installing a VRA
import json
vczvmSession = login('10.0.10.50', '[email protected]', 'password')
v = vra.vra('10.0.10.50', vczvmSession)
testVraDict = {
"DatastoreIdentifier":"GUID-OF-VCENTER.DATASTOREMOREF",
"HostIdentifier":"GUID-OF-VCENTER.HOSTMOREF",
"HostRootPassword":"ESXIPASSWORD,
"MemoryInGb":3,
"NumOfCpus":1,
"NetworkIdentifier":"GUID-OF-VCENTER.NETWORKMOREF",
"UsePublicKeyInsteadOfCredentials":False,
"PopulatePostInstallation":False,
"VraNetworkDataApi":{
"DefaultGateway":"192.168.1.1",
"SubnetMask":"255.255.255.0",
"VraIPAddress":"192.168.1.90",
"VraIPConfigurationTypeApi":"Static"
}
}
v.installVRA(json.dumps(testVraDict))
### Example: Pairing a site with another site
import json, requests
vczvmSession = login('10.0.10.50', '[email protected]', 'password')
awszcaSession = login('172.16.20.21', 'Administrator', 'password')
zvmOnsite = zvm.zvm('10.0.10.50', vczvmSession)
zcaInCloud = zvm.zvm('172.16.20.21', awszcaSession)
zcaTokenObject = zcaInCloud.generatePeeringToken()
zcaTokenActual = zcaTokenObject.json().get('Token')
pairOutput = requests.post('https://10.0.10.50:9669/v1/peersites', headers=vczvmSession, data=json.dumps(
{"HostName": '172.16.20.21', "Port":"9071", "Token":zcaTokenActual}), verify=False)
### Example: Create a VPG
import json
vczvmSession = login('10.0.10.50', '[email protected]', 'password')
v = vpg.vpgSettings(zvm_ip, vczvmSession)
vpgPayload = {
"Basic":{
"JournalHistoryInHours":2,
"Name":"TestVpg",
"Priority":"Medium",
"ProtectedSiteIdentifier":"IDENTIFIER1",
"RecoverySiteIdentifier":"IDENTIFIER2",
"RpoInSeconds":600,
"ServiceProfileIdentifier": null,
"TestIntervalInMinutes":0,
"UseWanCompression":"True",
"ZorgIdentifier": null
},
"BootGroups":{
"BootGroups":[
{
"BootDelayInSeconds":0,
"BootGroupIdentifier":"00000000-0000-0000-0000-000000000000",
"Name":"Default"
}
]
},
"Journal":{
"DatastoreIdentifier":"GUIDOFVCENTER.DATASTOREMOREF",
"Limitation":{
"HardLimitInMB":0,
"HardLimitInPercent":0,
"WarningThresholdInMB":0,
"WarningThresholdInPercent":0
}
},
"LongTermRetention":null,
"Networks":{
"Failover":{
"Hypervisor":{
"DefaultNetworkIdentifier":"GUIDOFVCENTER.NETWORKMOREF"
}
},
"FailoverTest":{
"Hypervisor":{
"DefaultNetworkIdentifier":"GUIDOFVCENTER.NETWORKMOREF"
}
}
},
"Recovery":{
"DefaultDatastoreClusterIdentifier":null,
"DefaultDatastoreIdentifier":"GUIDOFVCENTER.DATASTOREMOREF",
"DefaultFolderIdentifier":"GUIDOFVCENTER.FOLDERMOREF",
"DefaultHostClusterIdentifier":null,
"DefaultHostIdentifier":"GUIDOFVCENTER.HOSTMOREF",
"ResourcePoolIdentifier":null
},
"Scripting":{
"PostRecovery":{
"Command":null,
"Parameters":null,
"TimeoutInSeconds":0
},
"PreRecovery":{
"Command":null,
"Parameters":null,
"TimeoutInSeconds":0
}
},
"Vms":[]
}
vpgSettingsId = v.createNewVpgSettingsObject(json.dumps(vpgPayload))
v.commitSettingsObject(vpgSettingsId)
## Acknowledgements
I would like to acknowledge several people for assisting, either directly or indirectly, in the creation of this
library. Shaun Finn directly contributed to the zerto_auth and vra modules, and Wes Carroll provided insight and
assistance based on his experiences designing and developing his excellent PowerShell API wrapper. I would also like
to acknowledge Nick Costigan, Jacob Lucas and Chris Pacejo in providing their insight as professional developers.
|
PypiClean
|
/odooku_odoo_base-11.0.7-py35-none-any.whl/odoo/addons/bus/static/src/js/bus.js
|
odoo.define('bus.bus', function (require) {
"use strict";
var local_storage = require('web.local_storage');
var session = require('web.session');
var Widget = require('web.Widget');
var bus = {};
var PARTNERS_PRESENCE_CHECK_PERIOD = 30000; // don't check presence more than once every 30s
var TAB_HEARTBEAT_PERIOD = 10000; // 10 seconds
var MASTER_TAB_HEARTBEAT_PERIOD = 1500; // 1.5 second
bus.ERROR_DELAY = 10000;
bus.Bus = Widget.extend({
init: function(){
var self = this;
this._super();
this.options = {};
this.activated = false;
this.bus_id = _.uniqueId('bus');
this.channels = [];
this.last = 0;
this.stop = false;
this.is_master = true;
// bus presence
this.last_presence = new Date().getTime();
this.last_partners_presence_check = this.last_presence;
this.set("window_focus", true);
this.on("change:window_focus", this, function () {
if (this.get("window_focus")) {
this.trigger('window_focus', this.is_master);
}
});
$(window).on("focus." + this.bus_id, _.bind(this.focus_change, this, true));
$(window).on("blur." + this.bus_id, _.bind(this.focus_change, this, false));
$(window).on("unload." + this.bus_id, _.bind(this.focus_change, this, false));
_.each('click,keydown,keyup'.split(','), function(evtype) {
$(window).on(evtype + "." + self.bus_id, function() {
self.last_presence = new Date().getTime();
});
});
},
destroy: function () {
var self = this;
$(window).off("focus." + this.bus_id);
$(window).off("blur." + this.bus_id);
$(window).off("unload." + this.bus_id);
_.each('click,keydown,keyup'.split(','), function(evtype) {
$(window).off(evtype + "." + self.bus_id);
});
},
start_polling: function(){
if(!this.activated){
this.poll();
this.stop = false;
}
},
stop_polling: function(){
this.activated = false;
this.stop = true;
this.channels = [];
},
poll: function() {
var self = this;
self.activated = true;
var now = new Date().getTime();
var options = _.extend({}, this.options, {
bus_inactivity: now - this.get_last_presence(),
});
if (this.last_partners_presence_check + PARTNERS_PRESENCE_CHECK_PERIOD > now) {
options = _.omit(options, 'bus_presence_partner_ids');
} else {
this.last_partners_presence_check = now;
}
var data = {channels: self.channels, last: self.last, options: options};
// The backend has a maximum cycle time of 50 seconds so give +10 seconds
session.rpc('/longpolling/poll', data, {shadow : true, timeout: 60000}).then(function(result) {
self.on_notification(result);
if(!self.stop){
self.poll();
}
}, function(unused, e) {
// no error popup if request is interrupted or fails for any reason
e.preventDefault();
// random delay to avoid massive longpolling
setTimeout(_.bind(self.poll, self), bus.ERROR_DELAY + (Math.floor((Math.random()*20)+1)*1000));
});
},
on_notification: function(notifications) {
var self = this;
var notifs = _.map(notifications, function (notif) {
if (notif.id > self.last) {
self.last = notif.id;
}
return [notif.channel, notif.message];
});
this.trigger("notification", notifs);
},
add_channel: function(channel){
this.channels.push(channel);
this.channels = _.uniq(this.channels);
},
delete_channel: function(channel){
this.channels = _.without(this.channels, channel);
},
// bus presence : window focus/unfocus
focus_change: function(focus) {
this.set("window_focus", focus);
},
is_odoo_focused: function () {
return this.get("window_focus");
},
get_last_presence: function () {
return this.last_presence;
},
update_option: function(key, value){
this.options[key] = value;
},
delete_option: function(key){
if(_.contains(_.keys(this.options), key)){
delete this.options[key];
}
},
});
/**
* CrossTabBus Widget
*
* Manage the communication before browser tab to allow only one tab polling for the others (performance improvement)
* When a tab is opened, and the start_polling method is called, the tab is signaling through the localStorage to the
* others. When a tab is closed, it signals its removing. If he was the master tab (the polling one), he choose another
* one in the list of open tabs. This one start polling for the other. When a notification is recieved from the poll, it
* is signaling through the localStorage too.
*
* localStorage used keys are:
*
* - bus.channels : shared public channel list to listen during the poll
* - bus.options : shared options
* - bus.notification : the received notifications from the last poll
* - bus.tab_list : list of opened tab ids
* - bus.tab_master : generated id of the master tab
*/
var CrossTabBus = bus.Bus.extend({
init: function(){
this._super.apply(this, arguments);
this.is_master = false;
this.is_registered = false;
if (parseInt(getItem('bus.last_ts', 0)) + 50000 < new Date().getTime()) {
setItem('bus.last', -1);
}
on("storage", this.on_storage.bind(this));
},
start_polling: function(){
var self = this;
if (!this.is_registered) {
this.is_registered = true;
tab_manager.register_tab(function () {
self.is_master = true;
self.start_polling();
}, function () {
self.is_master = false;
self.stop_polling();
}, function () {
// Write last_presence in local storage if it has been updated since last heartbeat
var hb_period = this.is_master ? MASTER_TAB_HEARTBEAT_PERIOD : TAB_HEARTBEAT_PERIOD;
if (self.last_presence + hb_period > new Date().getTime()) {
setItem('bus.last_presence', self.last_presence);
}
});
if (this.is_master) {
setItem('bus.channels', this.channels);
setItem('bus.options', this.options);
} else {
this.channels = getItem('bus.channels', this.channels);
this.options = getItem('bus.options', this.options);
}
return; // start_polling will be called again on tab registration
}
if (this.is_master) {
this._super.apply(this, arguments);
}
},
on_notification: function(notifications){
if(this.is_master) { // broadcast to other tabs
var last = getItem('bus.last', -1);
var max_id = Math.max(last, 0);
var new_notifications = _.filter(notifications, function (notif) {
max_id = Math.max(max_id, notif.id);
return notif.id < 0 || notif.id > last;
});
this.last = max_id;
if (new_notifications.length) {
setItem('bus.last', max_id);
setItem('bus.last_ts', new Date().getTime());
setItem('bus.notification', new_notifications);
this._super(new_notifications);
}
} else {
this._super.apply(this, arguments);
}
},
on_storage: function (e) {
// use the value of event to not read from
// localStorage (avoid race condition)
var value = e.newValue;
// notifications changed
if(e.key === 'bus.notification'){
var notifs = JSON.parse(value);
this.on_notification(notifs);
}
// update channels
if(e.key === 'bus.channels'){
this.channels = JSON.parse(value);
}
// update options
if(e.key === 'bus.options'){
this.options = JSON.parse(value);
}
// update focus
if(e.key === 'bus.focus'){
this.set('window_focus', JSON.parse(value));
}
},
add_channel: function(){
this._super.apply(this, arguments);
setItem('bus.channels', this.channels);
},
delete_channel: function(){
this._super.apply(this, arguments);
setItem('bus.channels', this.channels);
},
get_last_presence: function () {
return getItem('bus.last_presence') || new Date().getTime();
},
update_option: function(){
this._super.apply(this, arguments);
setItem('bus.options', this.options);
},
delete_option: function(){
this._super.apply(this, arguments);
setItem('bus.options', this.options);
},
focus_change: function(focus) {
this._super.apply(this, arguments);
setItem('bus.focus', focus);
},
});
//utility functions
function on(type, listener) {
if (window.addEventListener) {
window.addEventListener(type, listener);
} else { //IE8
window.attachEvent('on' + type, listener);
}
}
function getItem(key, defaultValue) {
var val = local_storage.getItem(key);
return val ? JSON.parse(val) : defaultValue;
}
function setItem(key, value) {
local_storage.setItem(key, JSON.stringify(value));
}
var tab_manager = {
peersKey: 'bus.peers',
masterKey: 'bus.master',
heartbeatKey: 'bus.heartbeat',
isMaster: false,
id: new Date().getTime() + ':' + (Math.random() * 1000000000 | 0),
register_tab: function (is_master_callback, is_no_longer_master, on_heartbeat_callback) {
this.is_master_callback = is_master_callback;
this.is_no_longer_master = is_no_longer_master || function () {};
this.on_heartbeat_callback = on_heartbeat_callback || function () {};
var peers = getItem(tab_manager.peersKey, {});
peers[tab_manager.id] = new Date().getTime();
setItem(tab_manager.peersKey, peers);
on('unload', function () {
// unload peer
var peers = getItem(tab_manager.peersKey, {});
delete peers[tab_manager.id];
setItem(tab_manager.peersKey, peers);
// unload master
if (tab_manager.isMaster) {
local_storage.removeItem(tab_manager.masterKey);
}
});
if (!local_storage.getItem(tab_manager.masterKey)) {
tab_manager.start_election();
}
on('storage', function(e) {
if (!e) { e = window.event;}
if (e.key !== tab_manager.masterKey) {
return;
}
if (e.newValue === null) { //master was unloaded
tab_manager.start_election();
}
});
tab_manager.heartbeat();
},
heartbeat: function () {
var current = new Date().getTime();
var heartbeatValue = local_storage.getItem(tab_manager.heartbeatKey) || 0;
var peers = getItem(tab_manager.peersKey, {});
if ((parseInt(heartbeatValue) + 5000) < current) {
// Heartbeat is out of date. Electing new master
tab_manager.start_election();
}
if (tab_manager.isMaster) {
//walk through all peers and kill old
var cleanedPeers = {};
for (var peerName in peers) {
if (peers[peerName] + 15000 > current) {
cleanedPeers[peerName] = peers[peerName];
}
}
if (!tab_manager.is_last_heartbeat_mine()) {
// someone else is also master...
// it should not happen, except in some race condition situation.
tab_manager.isMaster = false;
tab_manager.last_heartbeat = 0;
peers[tab_manager.id] = current;
setItem(tab_manager.peersKey, peers);
tab_manager.is_no_longer_master();
} else {
tab_manager.last_heartbeat = current;
local_storage.setItem(tab_manager.heartbeatKey, current);
setItem(tab_manager.peersKey, cleanedPeers);
}
} else {
//update own heartbeat
peers[tab_manager.id] = current;
setItem(tab_manager.peersKey, peers);
}
this.on_heartbeat_callback();
setTimeout(function(){
tab_manager.heartbeat();
}, tab_manager.isMaster ? MASTER_TAB_HEARTBEAT_PERIOD : TAB_HEARTBEAT_PERIOD);
},
is_last_heartbeat_mine: function () {
var heartbeatValue = local_storage.getItem(tab_manager.heartbeatKey) || 0;
return (parseInt(heartbeatValue) === tab_manager.last_heartbeat);
},
start_election: function () {
if (tab_manager.isMaster) {
return;
}
//check who's next
var peers = getItem(tab_manager.peersKey, {});
var now = new Date().getTime();
var newMaster;
for (var peerName in peers) {
//check for dead peers
if (peers[peerName] + 15000 < now) {
continue;
}
newMaster = peerName;
break;
}
if (newMaster === tab_manager.id) {
//we're next in queue. Electing as master
setItem(tab_manager.masterKey, tab_manager.id);
tab_manager.last_heartbeat = new Date().getTime();
setItem(tab_manager.heartbeatKey, tab_manager.last_heartbeat);
tab_manager.isMaster = true;
tab_manager.is_master_callback();
//removing master peer from queue
delete peers[newMaster];
setItem(tab_manager.peersKey, peers);
}
},
};
// bus singleton, depending of the browser :
// if supporting LocalStorage, there will be only one tab polling
if(typeof Storage !== "undefined"){
bus.bus = new CrossTabBus();
} else {
bus.bus = new bus.Bus();
}
return bus;
});
|
PypiClean
|
/traits-enaml-0.3.0.tar.gz/traits-enaml-0.3.0/traits_enaml/testing/event_loop_helper.py
|
import contextlib
import threading
from enaml.application import deferred_call
from enaml.qt import QtCore
from traits.api import HasStrictTraits, Instance
from traits_enaml.compat import QApplication
class ConditionTimeoutError(RuntimeError):
pass
class EventLoopHelper(HasStrictTraits):
qt_app = Instance(QApplication)
def event_loop_with_timeout(self, repeat=2, timeout=10.0):
"""Helper function to send all posted events to the event queue and
wait for them to be processed. This runs the real event loop and
does not emulate it with QApplication.processEvents.
Parameters
----------
repeat : int
Number of times to process events. Default is 2.
timeout: float, optional, keyword only
Number of seconds to run the event loop. Default value is 10.0.
"""
def repeat_loop(condition, repeat):
# We sendPostedEvents to ensure that enaml events are processed
self.qt_app.sendPostedEvents()
repeat = repeat - 1
if repeat <= 0:
deferred_call(condition.set)
else:
deferred_call(repeat_loop, condition=condition, repeat=repeat)
condition = threading.Event()
deferred_call(repeat_loop, repeat=repeat, condition=condition)
self.event_loop_until_condition(
condition=condition.is_set, timeout=timeout)
def event_loop(self, repeat=1):
"""Emulates an event loop `repeat` times with
QApplication.processEvents.
Parameters
----------
repeat : int
Number of times to process events. Default is 1.
"""
for i in range(repeat):
self.qt_app.sendPostedEvents()
self.qt_app.processEvents()
def event_loop_until_condition(self, condition, timeout=10.0):
"""Runs the real Qt event loop until the provided condition evaluates
to True.
Raises ConditionTimeoutError if the timeout occurs before the condition
is satisfied.
Parameters
----------
condition : callable
A callable to determine if the stop criteria have been met. This
should accept no arguments.
timeout : float
Number of seconds to run the event loop in the case that the trait
change does not occur.
"""
def handler():
if condition():
self.qt_app.quit()
condition_timer = QtCore.QTimer()
condition_timer.setInterval(50)
condition_timer.timeout.connect(handler)
timeout_timer = QtCore.QTimer()
timeout_timer.setSingleShot(True)
timeout_timer.setInterval(timeout * 1000)
timeout_timer.timeout.connect(self.qt_app.quit)
timeout_timer.start()
condition_timer.start()
try:
self.qt_app.exec_()
if not timeout_timer.isActive():
# We exited the event loop on timeout
raise ConditionTimeoutError('Timed out waiting for condition')
finally:
timeout_timer.stop()
condition_timer.stop()
@contextlib.contextmanager
def delete_widget(self, widget, timeout=1.0):
"""Runs the real Qt event loop until the widget provided has been
deleted. Raises ConditionTimeoutError on timeout.
Parameters
----------
widget : QObject
The widget whose deletion will stop the event loop.
timeout : float
Number of seconds to run the event loop in the case that the
widget is not deleted.
"""
timer = QtCore.QTimer()
timer.setSingleShot(True)
timer.setInterval(timeout * 1000)
timer.timeout.connect(self.qt_app.quit)
widget.destroyed.connect(self.qt_app.quit)
yield
timer.start()
self.qt_app.exec_()
if not timer.isActive():
# We exited the event loop on timeout
raise ConditionTimeoutError(
'Could not destroy widget before timeout: {!r}'.format(widget))
|
PypiClean
|
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/machinelearningservices/latest/list_notebook_keys.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListNotebookKeysResult',
'AwaitableListNotebookKeysResult',
'list_notebook_keys',
]
@pulumi.output_type
class ListNotebookKeysResult:
def __init__(__self__, primary_access_key=None, secondary_access_key=None):
if primary_access_key and not isinstance(primary_access_key, str):
raise TypeError("Expected argument 'primary_access_key' to be a str")
pulumi.set(__self__, "primary_access_key", primary_access_key)
if secondary_access_key and not isinstance(secondary_access_key, str):
raise TypeError("Expected argument 'secondary_access_key' to be a str")
pulumi.set(__self__, "secondary_access_key", secondary_access_key)
@property
@pulumi.getter(name="primaryAccessKey")
def primary_access_key(self) -> str:
return pulumi.get(self, "primary_access_key")
@property
@pulumi.getter(name="secondaryAccessKey")
def secondary_access_key(self) -> str:
return pulumi.get(self, "secondary_access_key")
class AwaitableListNotebookKeysResult(ListNotebookKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListNotebookKeysResult(
primary_access_key=self.primary_access_key,
secondary_access_key=self.secondary_access_key)
def list_notebook_keys(resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListNotebookKeysResult:
"""
Use this data source to access information about an existing resource.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:machinelearningservices/latest:listNotebookKeys', __args__, opts=opts, typ=ListNotebookKeysResult).value
return AwaitableListNotebookKeysResult(
primary_access_key=__ret__.primary_access_key,
secondary_access_key=__ret__.secondary_access_key)
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.