id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/pee-0.1.1.tar.gz/pee-0.1.1/README.md
|
# pee
## (p)ython fr(ee)
### the Linux free command but for MacOS and built in Python
`pee` displays stats about `total`, `used`, `free`, and `available` memory in the system. By default (no flag), it returns the stats as bytes.
`time` also returns `buffer` and `cache` info which is not available from the MacOS kernal.
## Installation
`pip install pee`
## Usage
`pee <-metric>` where `metric` is a data metric.
### flags
```
-h, --help show this help message and exit
-g, --gigabytes display memory info in gigabytes
-m, --megabytes display memory info in megabytes
-k, --kilobytes display memory info in kilobytes
-b, --bytes display memory info in bytes (default)
-V, --version display version info
```
## Dependencies
[psutil](https://github.com/giampaolo/psutil)
## More Info
I am currently working through [Operating System: Three Easy Steps](http://pages.cs.wisc.edu/~remzi/OSTEP/) and the exercises at the end of chapter 13 require the use of `free`. I am using MacOS so I don't have access to it.
Yes, I could have just booted up Virtual Box for Ubuntu, but instead I built a tool similar to `free` for use on MacOS, because there should be one anyways.
## Contributing
feel free to raise an issue or pull request if you find a bug
|
PypiClean
|
/zipline_tej-0.0.48-cp38-cp38-win_amd64.whl/zipline/utils/events.py
|
from abc import ABCMeta, abstractmethod
from collections import namedtuple
import inspect
import warnings
import datetime
import numpy as np
import pandas as pd
import pytz
from toolz import curry
from zipline.utils.input_validation import preprocess
from zipline.utils.memoize import lazyval
from zipline.utils.sentinel import sentinel
from .context_tricks import nop_context
__all__ = [
"EventManager",
"Event",
"EventRule",
"StatelessRule",
"ComposedRule",
"Always",
"Never",
"AfterOpen",
"BeforeClose",
"NotHalfDay",
"NthTradingDayOfWeek",
"NDaysBeforeLastTradingDayOfWeek",
"NthTradingDayOfMonth",
"NDaysBeforeLastTradingDayOfMonth",
"StatefulRule",
"OncePerDay",
# Factory API
"date_rules",
"time_rules",
"calendars",
"make_eventrule",
]
MAX_MONTH_RANGE = 23
MAX_WEEK_RANGE = 5
def ensure_utc(time, tz="UTC"):
"""
Normalize a time. If the time is tz-naive, assume it is UTC.
"""
if not time.tzinfo:
time = time.replace(tzinfo=pytz.timezone(tz))
return time.replace(tzinfo=pytz.utc)
def _out_of_range_error(a, b=None, var="offset"):
start = 0
if b is None:
end = a - 1
else:
start = a
end = b - 1
return ValueError(
"{var} must be in between {start} and {end} inclusive".format(
var=var,
start=start,
end=end,
)
)
def _td_check(td):
seconds = td.total_seconds()
# 43200 seconds = 12 hours
if 60 <= seconds <= 43200:
return td
else:
raise ValueError(
"offset must be in between 1 minute and 12 hours, " "inclusive."
)
def _build_offset(offset, kwargs, default):
"""
Builds the offset argument for event rules.
"""
# Filter down to just kwargs that were actually passed.
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if offset is None:
if not kwargs:
return default # use the default.
else:
return _td_check(datetime.timedelta(**kwargs))
elif kwargs:
raise ValueError("Cannot pass kwargs and an offset")
elif isinstance(offset, datetime.timedelta):
return _td_check(offset)
else:
raise TypeError("Must pass 'hours' and/or 'minutes' as keywords")
def _build_date(date, kwargs):
"""
Builds the date argument for event rules.
"""
if date is None:
if not kwargs:
raise ValueError("Must pass a date or kwargs")
else:
return datetime.date(**kwargs)
elif kwargs:
raise ValueError("Cannot pass kwargs and a date")
else:
return date
# TODO: only used in tests
def _build_time(time, kwargs):
"""
Builds the time argument for event rules.
"""
tz = kwargs.pop("tz", "UTC")
if time:
if kwargs:
raise ValueError("Cannot pass kwargs and a time")
else:
return ensure_utc(time, tz)
elif not kwargs:
raise ValueError("Must pass a time or kwargs")
else:
return datetime.time(**kwargs)
@curry
def lossless_float_to_int(funcname, func, argname, arg):
"""
A preprocessor that coerces integral floats to ints.
Receipt of non-integral floats raises a TypeError.
"""
if not isinstance(arg, float):
return arg
arg_as_int = int(arg)
if arg == arg_as_int:
warnings.warn(
"{f} expected an int for argument {name!r}, but got float {arg}."
" Coercing to int.".format(
f=funcname,
name=argname,
arg=arg,
),
)
return arg_as_int
raise TypeError(arg)
class EventManager(object):
"""Manages a list of Event objects.
This manages the logic for checking the rules and dispatching to the
handle_data function of the Events.
Parameters
----------
create_context : (BarData) -> context manager, optional
An optional callback to produce a context manager to wrap the calls
to handle_data. This will be passed the current BarData.
"""
def __init__(self, create_context=None):
self._events = []
self._create_context = (
create_context
if create_context is not None
else lambda *_: nop_context
)
def add_event(self, event, prepend=False):
"""
Adds an event to the manager.
"""
if prepend:
self._events.insert(0, event)
else:
self._events.append(event)
def handle_data(self, context, data, dt):
with self._create_context(data):
for event in self._events:
event.handle_data(
context,
data,
dt,
)
class Event(namedtuple("Event", ["rule", "callback"])):
"""
An event is a pairing of an EventRule and a callable that will be invoked
with the current algorithm context, data, and datetime only when the rule
is triggered.
"""
def __new__(cls, rule, callback=None):
callback = callback or (lambda *args, **kwargs: None)
return super(cls, cls).__new__(cls, rule=rule, callback=callback)
def handle_data(self, context, data, dt):
"""
Calls the callable only when the rule is triggered.
"""
if self.rule.should_trigger(dt):
self.callback(context, data)
class EventRule(metaclass=ABCMeta):
"""A rule defining when a scheduled function should execute."""
# Instances of EventRule are assigned a calendar instance when scheduling
# a function.
_cal = None
@property
def cal(self):
return self._cal
@cal.setter
def cal(self, value):
self._cal = value
@abstractmethod
def should_trigger(self, dt):
"""
Checks if the rule should trigger with its current state.
This method should be pure and NOT mutate any state on the object.
"""
raise NotImplementedError("should_trigger")
class StatelessRule(EventRule):
"""
A stateless rule has no observable side effects.
This is reentrant and will always give the same result for the
same datetime.
Because these are pure, they can be composed to create new rules.
"""
def and_(self, rule):
"""
Logical and of two rules, triggers only when both rules trigger.
This follows the short circuiting rules for normal and.
"""
return ComposedRule(self, rule, ComposedRule.lazy_and)
__and__ = and_
class ComposedRule(StatelessRule):
"""
A rule that composes the results of two rules with some composing function.
The composing function should be a binary function that accepts the results
first(dt) and second(dt) as positional arguments.
For example, operator.and_.
If lazy=True, then the lazy composer is used instead. The lazy composer
expects a function that takes the two should_trigger functions and the
datetime. This is useful of you don't always want to call should_trigger
for one of the rules. For example, this is used to implement the & and |
operators so that they will have the same short circuit logic that is
expected.
"""
def __init__(self, first, second, composer):
if not (
isinstance(first, StatelessRule)
and isinstance(second, StatelessRule)
):
raise ValueError("Only two StatelessRules can be composed")
self.first = first
self.second = second
self.composer = composer
def should_trigger(self, dt):
"""
Composes the two rules with a lazy composer.
"""
return self.composer(
self.first.should_trigger, self.second.should_trigger, dt
)
@staticmethod
def lazy_and(first_should_trigger, second_should_trigger, dt):
"""
Lazily ands the two rules. This will NOT call the should_trigger of the
second rule if the first one returns False.
"""
return first_should_trigger(dt) and second_should_trigger(dt)
@property
def cal(self):
return self.first.cal
@cal.setter
def cal(self, value):
# Thread the calendar through to the underlying rules.
self.first.cal = self.second.cal = value
class Always(StatelessRule):
"""
A rule that always triggers.
"""
@staticmethod
def always_trigger(dt):
"""
A should_trigger implementation that will always trigger.
"""
return True
should_trigger = always_trigger
class Never(StatelessRule):
"""
A rule that never triggers.
"""
@staticmethod
def never_trigger(dt):
"""
A should_trigger implementation that will never trigger.
"""
return False
should_trigger = never_trigger
class AfterOpen(StatelessRule):
"""
A rule that triggers for some offset after the market opens.
Example that triggers after 30 minutes of the market opening:
>>> AfterOpen(minutes=30) # doctest: +ELLIPSIS
<zipline.utils.events.AfterOpen object at ...>
"""
def __init__(self, offset=None, **kwargs):
self.offset = _build_offset(
offset,
kwargs,
datetime.timedelta(minutes=1), # Defaults to the first minute.
)
self._period_start = None
self._period_end = None
self._period_close = None
self._one_minute = datetime.timedelta(minutes=1)
def calculate_dates(self, dt):
"""
Given a date, find that day's open and period end (open + offset).
"""
period_start, period_close = self.cal.open_and_close_for_session(
self.cal.minute_to_session_label(dt),
)
# Align the market open and close times here with the execution times
# used by the simulation clock. This ensures that scheduled functions
# trigger at the correct times.
self._period_start = self.cal.execution_time_from_open(period_start)
self._period_close = self.cal.execution_time_from_close(period_close)
self._period_end = self._period_start + self.offset - self._one_minute
def should_trigger(self, dt):
# There are two reasons why we might want to recalculate the dates.
# One is the first time we ever call should_trigger, when
# self._period_start is none. The second is when we're on a new day,
# and need to recalculate the dates. For performance reasons, we rely
# on the fact that our clock only ever ticks forward, since it's
# cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means
# that we will NOT correctly recognize a new date if we go backwards
# in time(which should never happen in a simulation, or in live
# trading)
if self._period_start is None or self._period_close <= dt:
self.calculate_dates(dt)
return dt == self._period_end
class BeforeClose(StatelessRule):
"""
A rule that triggers for some offset time before the market closes.
Example that triggers for the last 30 minutes every day:
>>> BeforeClose(minutes=30) # doctest: +ELLIPSIS
<zipline.utils.events.BeforeClose object at ...>
"""
def __init__(self, offset=None, **kwargs):
self.offset = _build_offset(
offset,
kwargs,
datetime.timedelta(minutes=1), # Defaults to the last minute.
)
self._period_start = None
self._period_close = None
self._period_end = None
self._one_minute = datetime.timedelta(minutes=1)
def calculate_dates(self, dt):
"""
Given a dt, find that day's close and period start (close - offset).
"""
period_end = self.cal.open_and_close_for_session(
self.cal.minute_to_session_label(dt),
)[1]
# Align the market close time here with the execution time used by the
# simulation clock. This ensures that scheduled functions trigger at
# the correct times.
self._period_end = self.cal.execution_time_from_close(period_end)
self._period_start = self._period_end - self.offset
self._period_close = self._period_end
def should_trigger(self, dt):
# There are two reasons why we might want to recalculate the dates.
# One is the first time we ever call should_trigger, when
# self._period_start is none. The second is when we're on a new day,
# and need to recalculate the dates. For performance reasons, we rely
# on the fact that our clock only ever ticks forward, since it's
# cheaper to do dt1 <= dt2 than dt1.date() != dt2.date(). This means
# that we will NOT correctly recognize a new date if we go backwards
# in time(which should never happen in a simulation, or in live
# trading)
if self._period_start is None or self._period_close <= dt:
self.calculate_dates(dt)
return self._period_start == dt
class NotHalfDay(StatelessRule):
"""
A rule that only triggers when it is not a half day.
"""
def should_trigger(self, dt):
return self.cal.minute_to_session_label(dt) not in self.cal.early_closes
class TradingDayOfWeekRule(StatelessRule, metaclass=ABCMeta):
@preprocess(n=lossless_float_to_int("TradingDayOfWeekRule"))
def __init__(self, n, invert):
if not 0 <= n < MAX_WEEK_RANGE:
raise _out_of_range_error(MAX_WEEK_RANGE)
self.td_delta = (-n - 1) if invert else n
def should_trigger(self, dt):
# is this market minute's period in the list of execution periods?
val = self.cal.minute_to_session_label(dt, direction="none").value
return val in self.execution_period_values
@lazyval
def execution_period_values(self):
# calculate the list of periods that match the given criteria
sessions = self.cal.all_sessions
return set(
pd.Series(data=sessions)
# Group by ISO year (0) and week (1)
.groupby(sessions.map(lambda x: x.isocalendar()[0:2]))
.nth(self.td_delta)
.astype(np.int64)
)
class NthTradingDayOfWeek(TradingDayOfWeekRule):
"""
A rule that triggers on the nth trading day of the week.
This is zero-indexed, n=0 is the first trading day of the week.
"""
def __init__(self, n):
super(NthTradingDayOfWeek, self).__init__(n, invert=False)
class NDaysBeforeLastTradingDayOfWeek(TradingDayOfWeekRule):
"""
A rule that triggers n days before the last trading day of the week.
"""
def __init__(self, n):
super(NDaysBeforeLastTradingDayOfWeek, self).__init__(n, invert=True)
class TradingDayOfMonthRule(StatelessRule, metaclass=ABCMeta):
@preprocess(n=lossless_float_to_int("TradingDayOfMonthRule"))
def __init__(self, n, invert):
if not 0 <= n < MAX_MONTH_RANGE:
raise _out_of_range_error(MAX_MONTH_RANGE)
if invert:
self.td_delta = -n - 1
else:
self.td_delta = n
def should_trigger(self, dt):
# is this market minute's period in the list of execution periods?
value = self.cal.minute_to_session_label(dt, direction="none").value
return value in self.execution_period_values
@lazyval
def execution_period_values(self):
# calculate the list of periods that match the given criteria
sessions = self.cal.all_sessions
return set(
pd.Series(data=sessions)
.groupby([sessions.year, sessions.month])
.nth(self.td_delta)
.astype(np.int64)
)
class NthTradingDayOfMonth(TradingDayOfMonthRule):
"""
A rule that triggers on the nth trading day of the month.
This is zero-indexed, n=0 is the first trading day of the month.
"""
def __init__(self, n):
super(NthTradingDayOfMonth, self).__init__(n, invert=False)
class NDaysBeforeLastTradingDayOfMonth(TradingDayOfMonthRule):
"""
A rule that triggers n days before the last trading day of the month.
"""
def __init__(self, n):
super(NDaysBeforeLastTradingDayOfMonth, self).__init__(n, invert=True)
# Stateful rules
class StatefulRule(EventRule):
"""
A stateful rule has state.
This rule will give different results for the same datetimes depending
on the internal state that this holds.
StatefulRules wrap other rules as state transformers.
"""
def __init__(self, rule=None):
self.rule = rule or Always()
@property
def cal(self):
return self.rule.cal
@cal.setter
def cal(self, value):
# Thread the calendar through to the underlying rule.
self.rule.cal = value
class OncePerDay(StatefulRule):
def __init__(self, rule=None):
self.triggered = False
self.date = None
self.next_date = None
super(OncePerDay, self).__init__(rule)
def should_trigger(self, dt):
if self.date is None or dt >= self.next_date:
# initialize or reset for new date
self.triggered = False
self.date = dt
# record the timestamp for the next day, so that we can use it
# to know if we've moved to the next day
self.next_date = dt + pd.Timedelta(1, unit="d")
if not self.triggered and self.rule.should_trigger(dt):
self.triggered = True
return True
# Factory API
class date_rules(object):
"""
Factories for date-based :func:`~zipline.api.schedule_function` rules.
See Also
--------
:func:`~zipline.api.schedule_function`
"""
@staticmethod
def every_day():
"""Create a rule that triggers every day.
Returns
-------
rule : zipline.utils.events.EventRule
"""
return Always()
@staticmethod
def month_start(days_offset=0):
"""
Create a rule that triggers a fixed number of trading days after the
start of each month.
Parameters
----------
days_offset : int, optional
Number of trading days to wait before triggering each
month. Default is 0, i.e., trigger on the first trading day of the
month.
Returns
-------
rule : zipline.utils.events.EventRule
"""
return NthTradingDayOfMonth(n=days_offset)
@staticmethod
def month_end(days_offset=0):
"""
Create a rule that triggers a fixed number of trading days before the
end of each month.
Parameters
----------
days_offset : int, optional
Number of trading days prior to month end to trigger. Default is 0,
i.e., trigger on the last day of the month.
Returns
-------
rule : zipline.utils.events.EventRule
"""
return NDaysBeforeLastTradingDayOfMonth(n=days_offset)
@staticmethod
def week_start(days_offset=0):
"""
Create a rule that triggers a fixed number of trading days after the
start of each week.
Parameters
----------
days_offset : int, optional
Number of trading days to wait before triggering each week. Default
is 0, i.e., trigger on the first trading day of the week.
"""
return NthTradingDayOfWeek(n=days_offset)
@staticmethod
def week_end(days_offset=0):
"""
Create a rule that triggers a fixed number of trading days before the
end of each week.
Parameters
----------
days_offset : int, optional
Number of trading days prior to week end to trigger. Default is 0,
i.e., trigger on the last trading day of the week.
"""
return NDaysBeforeLastTradingDayOfWeek(n=days_offset)
class time_rules(object):
"""Factories for time-based :func:`~zipline.api.schedule_function` rules.
See Also
--------
:func:`~zipline.api.schedule_function`
"""
@staticmethod
def market_open(offset=None, hours=None, minutes=None):
"""
Create a rule that triggers at a fixed offset from market open.
The offset can be specified either as a :class:`datetime.timedelta`, or
as a number of hours and minutes.
Parameters
----------
offset : datetime.timedelta, optional
If passed, the offset from market open at which to trigger. Must be
at least 1 minute.
hours : int, optional
If passed, number of hours to wait after market open.
minutes : int, optional
If passed, number of minutes to wait after market open.
Returns
-------
rule : zipline.utils.events.EventRule
Notes
-----
If no arguments are passed, the default offset is one minute after
market open.
If ``offset`` is passed, ``hours`` and ``minutes`` must not be
passed. Conversely, if either ``hours`` or ``minutes`` are passed,
``offset`` must not be passed.
"""
return AfterOpen(offset=offset, hours=hours, minutes=minutes)
@staticmethod
def market_close(offset=None, hours=None, minutes=None):
"""
Create a rule that triggers at a fixed offset from market close.
The offset can be specified either as a :class:`datetime.timedelta`, or
as a number of hours and minutes.
Parameters
----------
offset : datetime.timedelta, optional
If passed, the offset from market close at which to trigger. Must
be at least 1 minute.
hours : int, optional
If passed, number of hours to wait before market close.
minutes : int, optional
If passed, number of minutes to wait before market close.
Returns
-------
rule : zipline.utils.events.EventRule
Notes
-----
If no arguments are passed, the default offset is one minute before
market close.
If ``offset`` is passed, ``hours`` and ``minutes`` must not be
passed. Conversely, if either ``hours`` or ``minutes`` are passed,
``offset`` must not be passed.
"""
return BeforeClose(offset=offset, hours=hours, minutes=minutes)
every_minute = Always
class calendars(object):
US_EQUITIES = sentinel("US_EQUITIES")
US_FUTURES = sentinel("US_FUTURES")
def _invert(d):
return dict(zip(d.values(), d.keys()))
_uncalled_rules = _invert(vars(date_rules))
_uncalled_rules.update(_invert(vars(time_rules)))
def _check_if_not_called(v):
try:
name = _uncalled_rules[v]
except KeyError:
if not (inspect.isclass(v) and issubclass(v, EventRule)):
return
name = getattr(v, "__name__", None)
msg = "invalid rule: %r" % (v,)
if name is not None:
msg += " (hint: did you mean %s())" % name
raise TypeError(msg)
def make_eventrule(date_rule, time_rule, cal, half_days=True):
"""
Constructs an event rule from the factory api.
"""
_check_if_not_called(date_rule)
_check_if_not_called(time_rule)
if half_days:
inner_rule = date_rule & time_rule
else:
inner_rule = date_rule & time_rule & NotHalfDay()
opd = OncePerDay(rule=inner_rule)
# This is where a scheduled function's rule is associated with a calendar.
opd.cal = cal
return opd
|
PypiClean
|
/trytond_stock_consignment-6.8.0.tar.gz/trytond_stock_consignment-6.8.0/stock.py
|
from functools import wraps
from trytond.model import ModelView, Workflow, fields
from trytond.modules.product import round_price
from trytond.pool import Pool, PoolMeta
from trytond.pyson import Eval
from trytond.transaction import Transaction
class Location(metaclass=PoolMeta):
__name__ = 'stock.location'
consignment_party = fields.Many2One(
'party.party', "Consignment Party",
states={
'invisible': ~Eval('type').in_(['supplier', 'storage']),
},
help="The party invoiced when consignment stock is used.")
@classmethod
def __setup__(cls):
super().__setup__()
cls.lost_found_location.states['invisible'] &= (
~Eval('type').in_(['supplier', 'customer']))
@classmethod
def _parent_domain(cls):
domain = super(Location, cls)._parent_domain()
domain['supplier'].append('storage')
domain['storage'].append('customer')
return domain
@property
def lost_found_used(self):
lost_found = super().lost_found_used
if not lost_found and not self.warehouse and self.type == 'storage':
location = self.parent
while location:
if location.type in {'supplier', 'storage'}:
lost_found = location.lost_found_location
break
location = location.parent
return lost_found
class LocationLeadTime(metaclass=PoolMeta):
__name__ = 'stock.location.lead_time'
@classmethod
def __setup__(cls):
super(LocationLeadTime, cls).__setup__()
cls.warehouse_to.domain = ['OR',
cls.warehouse_to.domain,
('type', '=', 'storage'),
]
def set_origin_consignment(func):
@wraps(func)
def wrapper(cls, moves):
pool = Pool()
InvoiceLine = pool.get('account.invoice.line')
to_save = []
move2line = {}
for move in moves:
if not move.consignment_invoice_lines:
lines = move.get_invoice_lines_consignment()
if lines:
to_save.extend(lines)
move2line[move] = lines[0]
if to_save:
InvoiceLine.save(to_save)
for move, line in move2line.items():
if not move.origin:
move.origin = line
if move.unit_price is None:
move.unit_price = line.unit_price
move.currency = line.currency
cls.save(list(move2line.keys()))
return func(cls, moves)
return wrapper
def unset_origin_consignment(func):
@wraps(func)
def wrapper(cls, moves):
pool = Pool()
InvoiceLine = pool.get('account.invoice.line')
lines, to_save = [], []
for move in moves:
for invoice_line in move.consignment_invoice_lines:
lines.append(invoice_line)
if move.origin == move:
move.origin = None
to_save.append(move)
if lines:
InvoiceLine.delete(lines)
cls.save(to_save)
return func(cls, moves)
return wrapper
class Move(metaclass=PoolMeta):
__name__ = 'stock.move'
consignment_invoice_lines = fields.One2Many(
'account.invoice.line', 'origin', "Consignment Invoice Lines",
readonly=True,
states={
'invisible': ~Eval('consignment_invoice_lines'),
})
@fields.depends('state', 'from_location', 'to_location')
def on_change_with_unit_price_required(self, name=None):
required = super().on_change_with_unit_price_required(name)
if (required
and self.state in {'staging', 'draft'}
and self.from_location
and self.to_location
and ((
self.from_location.type == 'supplier'
and self.to_location.type in {
'storage', 'production', 'customer'})
or (self.from_location.type in {
'storage', 'production', 'supplier'}
and self.to_location.type == 'customer'))
and self.from_location.consignment_party):
required = False
return required
@classmethod
def _get_origin(cls):
return super(Move, cls)._get_origin() + ['account.invoice.line']
@fields.depends('from_location')
def on_change_with_assignation_required(self, name=None):
required = super(Move, self).on_change_with_assignation_required(
name=name)
if self.from_location:
if (self.from_location.type == 'supplier'
and self.from_location.warehouse):
required = True
return required
@property
def is_supplier_consignment(self):
return (self.from_location.type == 'supplier'
and self.to_location.type in {'storage', 'production', 'customer'}
and self.from_location.consignment_party)
@property
def is_customer_consignment(self):
return (
self.from_location.type in {'storage', 'production', 'supplier'}
and self.to_location.type == 'customer'
and self.from_location.consignment_party)
def get_invoice_lines_consignment(self):
lines = []
if self.is_supplier_consignment:
lines.append(self._get_supplier_invoice_line_consignment())
if self.is_customer_consignment:
lines.append(self._get_customer_invoice_line_consignment())
return lines
def _get_supplier_invoice_line_consignment(self):
pool = Pool()
InvoiceLine = pool.get('account.invoice.line')
Product = pool.get('product.product')
ProductSupplier = pool.get('purchase.product_supplier')
with Transaction().set_context(
supplier=self.from_location.consignment_party.id):
pattern = ProductSupplier.get_pattern()
for product_supplier in self.product.product_suppliers_used(**pattern):
currency = product_supplier.currency
break
else:
currency = self.company.currency
line = InvoiceLine()
line.invoice_type = 'in'
line.type = 'line'
line.company = self.company
line.party = self.from_location.consignment_party
line.currency = currency
line.product = self.product
line.quantity = self.quantity
line.unit = self.uom
line.stock_moves = [self]
line.origin = self
line.on_change_product()
with Transaction().set_context(
currency=line.currency.id,
supplier=line.party.id,
uom=line.unit,
taxes=[t.id for t in line.taxes]):
line.unit_price = Product.get_purchase_price(
[line.product], line.quantity)[line.product.id]
if line.unit_price is not None:
line.unit_price = round_price(line.unit_price)
return line
def _get_customer_invoice_line_consignment(self):
pool = Pool()
InvoiceLine = pool.get('account.invoice.line')
Product = pool.get('product.product')
line = InvoiceLine()
line.invoice_type = 'out'
line.type = 'line'
line.company = self.company
line.party = self.from_location.consignment_party
line.currency = self.company.currency
line.product = self.product
line.quantity = self.quantity
line.unit = self.uom
line.stock_moves = [self]
line.origin = self
line.on_change_product()
with Transaction().set_context(
currency=line.currency.id,
customer=line.party.id,
uom=line.unit,
taxes=[t.id for t in line.taxes]):
line.unit_price = Product.get_sale_price(
[line.product], line.quantity)[line.product.id]
if line.unit_price is not None:
line.unit_price = round_price(line.unit_price)
return line
@classmethod
@ModelView.button
@Workflow.transition('draft')
@unset_origin_consignment
def draft(cls, moves):
super(Move, cls).draft(moves)
@classmethod
@ModelView.button
@Workflow.transition('assigned')
@set_origin_consignment
def assign(cls, moves):
super(Move, cls).assign(moves)
@classmethod
@ModelView.button
@Workflow.transition('done')
@set_origin_consignment
def do(cls, moves):
super(Move, cls).do(moves)
@classmethod
@ModelView.button
@Workflow.transition('cancelled')
@unset_origin_consignment
def cancel(cls, moves):
super(Move, cls).cancel(moves)
@classmethod
def copy(cls, moves, default=None):
pool = Pool()
InvoiceLine = pool.get('account.invoice.line')
moves = super(Move, cls).copy(moves, default=default)
if not Transaction().context.get('_stock_move_split'):
to_save = []
for move in moves:
if isinstance(move.origin, InvoiceLine):
move.origin = None
to_save.append(move)
if to_save:
cls.save(to_save)
return moves
class ShipmentInternal(metaclass=PoolMeta):
__name__ = 'stock.shipment.internal'
@classmethod
def __setup__(cls):
super(ShipmentInternal, cls).__setup__()
cls.from_location.domain = ['OR',
cls.from_location.domain,
('type', '=', 'supplier'),
]
cls.to_location.domain = ['OR',
cls.to_location.domain,
('type', 'in', ['supplier', 'customer']),
]
@fields.depends('to_location')
def on_change_with_planned_start_date(self, pattern=None):
if pattern is None:
pattern = {}
if self.to_location and not self.to_location.warehouse:
pattern.setdefault('location_to', self.to_location.id)
return super(ShipmentInternal, self).on_change_with_planned_start_date(
pattern=pattern)
class Inventory(metaclass=PoolMeta):
__name__ = 'stock.inventory'
@classmethod
def __setup__(cls):
super(Inventory, cls).__setup__()
cls.location.domain = ['OR',
cls.location.domain,
('type', '=', 'supplier'),
]
class OrderPoint(metaclass=PoolMeta):
__name__ = 'stock.order_point'
@classmethod
def __setup__(cls):
super(OrderPoint, cls).__setup__()
cls.provisioning_location.domain = ['OR',
cls.provisioning_location.domain,
('type', '=', 'supplier'),
]
|
PypiClean
|
/wipo_gbd_pypers-2.1.27-py3-none-any.whl/pypers/execs.py
|
import argparse
import sys
import os
import getpass
import glob
import re
import time
import traceback
import locale
import signal
from threading import Thread
from pypers.common import build_command_parser, apply_custom
from pypers.cli import STAGE
from pypers.cli.snapshot import Snapshot
from pypers.cli.stage_ori import StageOri
from pypers.cli.stage_gbd import StageGBD
from pypers.cli.stage_idx import StageIDX, StageIDXFromBucket
from pypers.cli.publish import Publish
from pypers.cli.ecs_manager import ECSManager
from pypers.cli.dynamo_manager import DynamoManager
from pypers.utils.execute import run_as
from pypers.core.pipelines import Pipeline
from pypers.core.step import Step
from pypers.core.supervisor import get_status_manager, SupervisorServer
from pypers.core.interfaces import msgbus, db
supervisor = None
def _end_docker(_signal, _frame):
global supervisor
if supervisor:
supervisor.stop()
signal.signal(signal.SIGINT, signal.SIG_DFL)
def bnd_docker():
global supervisor
configs = [
{
'name': ['--no-monitor'],
'dest': 'no_supervisor',
'help': 'Do not monitor',
'action': 'store_true',
'default': False
}]
args = build_command_parser(configs, '')
supervised = not args.no_supervisor
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
locale.setlocale(locale.LC_CTYPE, 'en_US.UTF-8')
user = getpass.getuser()
# init the singleton
get_status_manager(supervised=supervised).set_sanity()
get_status_manager().set_status(busy=False)
# trigger the sanity check endpoints
signal.signal(signal.SIGINT, _end_docker)
while get_status_manager().keep_alive():
get_status_manager().set_sanity()
body, id = msgbus.get_msg_bus().get_messges()
if body and body.get('runid', None):
msgbus.get_msg_bus().delete_message(id)
get_status_manager().set_status(busy=True)
#time.sleep(10)
try:
if body.get('index', None):
cfg = db.get_db().get_run_id_config(body.get('runid'),
body.get('collection'))
# create a step
step_index = Step.load_step(
body.get('runid'), body.get('collection'),
body.get('step'), sub_step=body.get('index'))
if not os.path.exists(step_index.output_dir):
os.makedirs(step_index.output_dir, 0o775)
# remove existing files, except step config
full_list = glob.glob(step_index.output_dir + "/*")
regex = re.compile("(input\.json|.*\.cfg)")
to_remove = filter(lambda f: not regex.search(f), full_list)
for entry in to_remove:
cmd = ['rm', '-rvf', entry]
(ec, err, out) = run_as(cmd=cmd, user=user)
if ec:
step_index.log.warning("failed to remove file %s: %s, %s" % (
entry, err, out))
else:
step_index.log.info("Removed %s" % entry)
# run step
step_index.run()
p = Pipeline(cfg.get('pipeline_configuration'))
p.load_completed()
step = Step.load_step(
body.get('runid'), body.get('collection'),
body.get('step'))
p.running[body.get('step')] = step
p.update_status()
p.parse_next()
msgbus.get_msg_bus().delete_message(id)
elif body.get('step', None):
cfg = db.get_db().get_run_id_config(body.get('runid'),
body.get('collection'))
p = Pipeline(cfg.get('pipeline_configuration'))
p.load_completed()
p.run_step(body.get('step'))
msgbus.get_msg_bus().delete_message(id)
else:
try:
config = Pipeline.load_cfg_from_db(body.get('collection'))
if 'sys_path' not in config:
config['sys_path'] = os.path.dirname(
os.path.realpath(body.get('collection')))
except Exception as e1:
raise e1
customs = ['pipeline.run_id=%s' % body.get('runid'),
'pipeline.type=%s' % body.get('type'),
'pipeline.forced_restarted=%s' % body.get('force_restart')]
if body.get('custom_config'):
customs.extend(body.get('custom_config'))
apply_custom(config, customs)
p = Pipeline(config, reset_logs=str(body.get('force_restart'))=='True')
if str(body.get('force_restart'))=='True':
msgbus.get_msg_bus().reset_history(p.run_id, p.collection)
p.load_completed(restart=str(body.get('force_restart'))=='True')
p.log.info("Running the pipeline...")
p.run()
except Exception as e:
prefix = "Will not retry: "
if body.get('retry', False) is False:
msgbus.get_msg_bus().send_message(body.get('runid'),
type=body.get('type'),
step=body.get('step'),
index=body.get('index'),
collection=body.get('collection'),
restart=body.get('force_restart'),
restart_step=True,
custom_config=body.get('custom_config'),
retry=True)
prefix = 'Will retry: '
else:
collection = body.get('collection')
if '_' in collection:
collection = collection.split('_')[-1]
db.get_operation_db().completed(
body.get('runid'),
collection,
success=False)
db.get_db_error().send_error(body.get('runid'), body.get('collection'), body,
"%s - %s- %s" % (prefix, id, traceback.format_exc()))
break
get_status_manager().set_status(busy=False)
time.sleep(0.5)
def gbd_submit():
doc = """
Submit a pipeline to the cluster
"""
configs = [
{
'name': ['pipeline_name'],
'type': str,
'help': 'the configuration name of the pipeline run in the database'
},
{
'name': ['run_id'],
'type': str,
'help': 'the run id'
},
{
'name': ['type'],
'type': str,
'help': 'the pipeline type'
},
{
'name': ['collection'],
'type': str,
'help': 'the collection name'
},
{
'name': ['--restart'],
'dest': 'restart',
'help': 'restart a pipeline from fetch',
'action': 'store_true',
'default': False
},
{
'name': ['custom'],
'type': str,
'metavar': 'SECTION.PARAM=VALUE',
'nargs': argparse.REMAINDER,
'default': getpass.getuser(),
'help': 'custom configuration to apply on top of configuration file.\n'
'SECTION must be a subsection of the \'config\' section\n'
'(several levels can be specified: SEC.SUBSEC.SUBSUBSEC, etc.'
')\nPARAM is any parameter in this section'
}]
args = build_command_parser(configs, doc)
if args.pipeline_name == 'operations':
collection = 'operations'
elif args.pipeline_name == 'fetch':
collection = args.collection
else:
collection = "%s_%s" % (args.pipeline_name, args.collection)
override_output = False
pipeline_type = args.type or ('brands' if collection.endswith('tm') else 'designs')
for item in args.custom:
if 'pipeline.output_dir' in item:
override_output = True
break
if not override_output and os.environ.get('WORK_DIR', None):
output_dir = os.path.join(os.environ['WORK_DIR'],
args.run_id,
pipeline_type,
args.collection)
os.makedirs(output_dir, exist_ok=True)
args.custom.append('pipeline.output_dir=%s' % output_dir)
msgbus.get_msg_bus().send_message(args.run_id,
type=pipeline_type,
collection=collection,
restart=args.restart,
custom_config=args.custom)
def gbd_stage_snapshot():
doc = """
Creates a snapshot form db in order to perform staging
"""
configs = [
{
'name': ['type'],
'type': str,
'help': 'the pipeline type'
},
{
'name': ['collection'],
'type': str,
'help': 'the collection name'
},
{
'name': ['-o'],
'type': str,
'help': 'the path to snapshot',
'default': './'
}
]
args = build_command_parser(configs, doc)
snapshot = Snapshot(args.collection, args.type)
snapshot.collect_data(path=args.o)
def gbd_stage_ori():
doc = """
Gets ori files for staging
"""
configs = [
{
'name': ['snapshot'],
'type': str,
'help': 'the snapshot to run'
},
{
'name': ['stage_type'],
'type': str,
'choices': [i for i in STAGE.get_choices()],
'help': 'the stage type'
}
]
args = build_command_parser(configs, doc)
executor = StageOri(args.snapshot, args.stage_type)
executor.stage()
def gbd_stage_publish():
doc = """
Publishes the sync message to SQS when the end-to-end pypers pipelines are done.
"""
configs = [
{
'name': ['run_id'],
'type': str,
'help': ' the run id to watch if pypers operation is done'
}
]
args = build_command_parser(configs, doc)
executor = Publish(args.run_id)
executor.publish()
def gbd_stage_gbd():
doc = """
Gets gbd files for staging
"""
configs = [
{
'name': ['snapshot'],
'type': str,
'help': 'the snapshot to run'
},
{
'name': ['stage_type'],
'type': str,
'choices': [i for i in STAGE.get_choices()],
'help': 'the stage type'
}
]
args = build_command_parser(configs, doc)
executor = StageGBD(args.snapshot, args.stage_type)
executor.stage()
def gbd_stage_idx():
doc = """
Gets gbd files for staging
"""
configs = [
{
'name': ['--snapshot'],
'type': str,
'help': 'the snapshot to run'
},
{
'name': ['--type'],
'type': str,
'help': 'the pipeline type'
},
{
'name': ['--collection'],
'type': str,
'help': 'the collection to get the files from bucket',
},
{
'name': ['--run_id'],
'type': str,
'help': 'the run id'
},
]
args = build_command_parser(configs, doc)
if not (args.snapshot or (args.collection and args.type and args.run_id)):
raise Exception("Please complete the command: A snapshot file or a (type, collection, run id)")
if args.snapshot:
executor = StageIDX(args.snapshot, None)
executor.stage()
else:
executor = StageIDXFromBucket(args.type, args.collection, args.run_id)
executor.stage()
def gbd_ecs_manager():
doc = """
Manages the ecs cluster
"""
configs = [
{
'name': ['action'],
'type': str,
'choices': ['start', 'stop', 'info'],
'help': 'Action to perform'
},
{
'name': ['--cluster'],
'default': 'gbd-solr-ecs-cluster',
'type': str,
'help': 'The cluster name to perfom the action on'
},
{
'name': ['--service'],
'type': str,
'choices': ['blue', 'green', 'solr', 'etl-etl'],
'help': 'Service Name to perform the action on'
},
{
'name': ['--nb_tasks'],
'type': int,
'default': 1,
'help': 'The number of tasks to be started per service'
}
]
args = build_command_parser(configs, doc)
if not args.action or (args.action != 'info' and not args.service):
raise Exception("Please complete the command!")
if args.action == 'info':
ECSManager.info_cluster(args.cluster)
if args.action == 'start':
if args.service == 'solr':
ECSManager.start_service(args.cluster, anti_pattern=['blue', 'green'], nb_tasks=args.nb_tasks)
else:
ECSManager.start_service(args.cluster, pattern=[args.service], nb_tasks=args.nb_tasks)
if args.action == 'stop':
if args.service == 'solr':
ECSManager.stop_service(args.cluster, anti_pattern=['blue', 'green'])
else:
ECSManager.stop_service(args.cluster, pattern=[args.service])
def gbd_dynamo_manager():
doc = """
Manages the dynamo db
"""
configs = [
{
'name': ['table'],
'type': str,
'help': 'Action to perform'
},
{
'name': ['write_cap'],
'type': int,
'help': 'The write capacity desired'
},
{
'name': ['read_cap'],
'type': int,
'help': 'The read capacity desired'
},
]
args = build_command_parser(configs, doc)
DynamoManager.update_capacity(args.table, args.write_cap, args.read_cap)
|
PypiClean
|
/hightea-plotting-0.3.5.tar.gz/hightea-plotting-0.3.5/README.md
|
# hightea-plotting
Plotting routines for `hightea` library.
Documentation is available [online](https://hightea-plotting.readthedocs.io/en/latest/).
Simplest usage in python code:
```
import hightea.plotting as hyt
hyt.plot('tests/input/simple1d.json')
```
Plot function can take any arguments with appropriate data format
and a number of key arguments to manage plot features.
Data storage is organised in Run class which can be imported directly through
`hyt.Run`.
Run class can be initialised through file (`xml`,`csv`,`json`) or dictionary.
To make a contribution, report a bug, or suggest improvements,
please open an issue or email us directly at [email protected].
|
PypiClean
|
/jellyfin_mpv_shim-2.6.0-py3-none-any.whl/jellyfin_mpv_shim/utils.py
|
import socket
import ipaddress
import requests
import urllib.parse
from threading import Lock
import logging
import sys
import os.path
import platform
from .conf import settings
from datetime import datetime
from functools import wraps
from .constants import USER_APP_NAME
from .i18n import _
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from jellyfin_apiclient_python import JellyfinClient as JellyfinClient_type
log = logging.getLogger("utils")
seq_num = 0
seq_num_lock = Lock()
class Timer(object):
def __init__(self):
self.started = datetime.now()
def restart(self):
self.started = datetime.now()
def elapsed_ms(self):
return self.elapsed() * 1e3
def elapsed(self):
return (datetime.now() - self.started).total_seconds()
def synchronous(tlockname: str):
"""
A decorator to place an instance based lock around a method.
From: http://code.activestate.com/recipes/577105-synchronization-decorator-for-class-methods/
"""
def _synched(func):
@wraps(func)
def _synchronizer(self, *args, **kwargs):
tlock = self.__getattribute__(tlockname)
tlock.acquire()
try:
return func(self, *args, **kwargs)
finally:
tlock.release()
return _synchronizer
return _synched
def is_local_domain(client: "JellyfinClient_type"):
# With Jellyfin, it is significantly more likely the user will be using
# an address that is a hairpin NAT. We want to detect this and avoid
# imposing limits in this case.
url = client.config.data.get("auth.server", "")
domain = urllib.parse.urlparse(url).hostname
addr_info = socket.getaddrinfo(domain, 8096)[0]
ip = addr_info[4][0]
is_local = ipaddress.ip_address(ip).is_private
if not is_local:
if addr_info[0] == socket.AddressFamily.AF_INET:
try:
wan_ip = requests.get(
"https://checkip.amazonaws.com/", timeout=(3, 10)
).text.strip("\r\n")
return ip == wan_ip
except Exception:
log.warning(
"checkip.amazonaws.com is unavailable. Assuming potential WAN ip is remote.",
exc_info=True,
)
return False
elif addr_info[0] == socket.AddressFamily.AF_INET6:
return False
return True
def mpv_color_to_plex(color: str):
return "#" + color.lower()[3:]
def plex_color_to_mpv(color: str):
return "#FF" + color.upper()[1:]
def get_profile(
is_remote: bool = False,
video_bitrate: Optional[int] = None,
force_transcode: bool = False,
is_tv: bool = False,
):
if video_bitrate is None:
if is_remote:
video_bitrate = settings.remote_kbps
else:
video_bitrate = settings.local_kbps
if settings.force_video_codec:
transcode_codecs = settings.force_video_codec
elif settings.allow_transcode_to_h265:
transcode_codecs = "h264,h265,hevc,mpeg4,mpeg2video"
elif settings.prefer_transcode_to_h265:
transcode_codecs = "h265,hevc,h264,mpeg4,mpeg2video"
else:
transcode_codecs = "h264,mpeg4,mpeg2video"
if settings.force_audio_codec:
audio_transcode_codecs = settings.force_audio_codec
else:
audio_transcode_codecs = "aac,mp3,ac3,opus,flac,vorbis"
profile = {
"Name": USER_APP_NAME,
"MaxStreamingBitrate": video_bitrate * 1000,
"MaxStaticBitrate": video_bitrate * 1000,
"MusicStreamingTranscodingBitrate": 1280000,
"TimelineOffsetSeconds": 5,
"TranscodingProfiles": [
{"Type": "Audio"},
{
"Container": "ts",
"Type": "Video",
"Protocol": "hls",
"AudioCodec": audio_transcode_codecs,
"VideoCodec": transcode_codecs,
"MaxAudioChannels": "6",
},
{"Container": "jpeg", "Type": "Photo"},
],
"DirectPlayProfiles": [{"Type": "Video"}, {"Type": "Audio"}, {"Type": "Photo"}],
"ResponseProfiles": [],
"ContainerProfiles": [],
"CodecProfiles": [],
"SubtitleProfiles": [
{"Format": "srt", "Method": "External"},
{"Format": "srt", "Method": "Embed"},
{"Format": "ass", "Method": "External"},
{"Format": "ass", "Method": "Embed"},
{"Format": "sub", "Method": "Embed"},
{"Format": "sub", "Method": "External"},
{"Format": "ssa", "Method": "Embed"},
{"Format": "ssa", "Method": "External"},
{"Format": "smi", "Method": "Embed"},
{"Format": "smi", "Method": "External"},
# Jellyfin currently refuses to serve these subtitle types as external.
{"Format": "pgssub", "Method": "Embed"},
# {
# "Format": "pgssub",
# "Method": "External"
# },
{"Format": "dvdsub", "Method": "Embed"},
{"Format": "dvbsub", "Method": "Embed"},
# {
# "Format": "dvdsub",
# "Method": "External"
# },
{"Format": "pgs", "Method": "Embed"},
# {
# "Format": "pgs",
# "Method": "External"
# }
],
}
if settings.transcode_hi10p:
profile["CodecProfiles"].append(
{
"Type": "Video",
"Conditions": [
{
"Condition": "LessThanEqual",
"Property": "VideoBitDepth",
"Value": "8",
}
],
}
)
if settings.transcode_dolby_vision:
profile["CodecProfiles"].append(
{
"Type": "Video",
"Conditions": [
{
"Condition": "NotEquals",
"Property": "VideoRangeType",
"Value": "DOVI",
}
],
}
)
if settings.transcode_hdr:
profile["CodecProfiles"].append(
{
"Type": "Video",
"Conditions": [
{
"Condition": "Equals",
"Property": "VideoRangeType",
"Value": "SDR",
}
],
}
)
if settings.always_transcode or force_transcode:
profile["DirectPlayProfiles"] = []
if is_tv:
profile["TranscodingProfiles"].insert(
0,
{
"Container": "ts",
"Type": "Video",
"AudioCodec": "mp3,aac",
"VideoCodec": "h264",
"Context": "Streaming",
"Protocol": "hls",
"MaxAudioChannels": "2",
"MinSegments": "1",
"BreakOnNonKeyFrames": True,
},
)
return profile
def get_sub_display_title(stream: dict):
return "{0}{1} ({2})".format(
stream.get("Language", _("Unkn")).capitalize(),
_(" Forced") if stream.get("IsForced") else "",
stream.get("Codec"),
)
def get_seq():
global seq_num
seq_num_lock.acquire()
current = seq_num
seq_num += 1
seq_num_lock.release()
return current
def none_fallback(value, fallback):
if value is None:
return fallback
return value
def get_resource(*path):
# Detect if bundled via pyinstaller.
# From: https://stackoverflow.com/questions/404744/
if getattr(sys, "_MEIPASS", False):
application_path = os.path.join(getattr(sys, "_MEIPASS"), "jellyfin_mpv_shim")
else:
application_path = os.path.dirname(os.path.abspath(__file__))
# ! Test code for Mac
if getattr(sys, "frozen", False) and platform.system() == "Darwin":
application_path = os.path.join(os.path.dirname(sys.executable), "../Resources")
return os.path.join(application_path, *path)
def get_text(*path):
with open(get_resource(*path)) as fh:
return fh.read()
|
PypiClean
|
/wayround_org_webserver-0.3.tar.gz/wayround_org_webserver-0.3/wayround_org/webserver/application.py
|
import importlib
import logging
"""
module structure description:
wayround_org_webserver application module must have 2 strict things:
1. class named 'WebServerModule', which has following
parameters:
1. reference to webserver instance;
2. reference to socket pool
3. reference to application pool
4. dict with parameters passed from configuration file
2. WebServerModule class instances must have callable methods:
1. 'callable_for_webserver' which must have following parameters:
# first part of parameters
# parameters passed by SocketServer
transaction_id,
serv,
serv_stop_event,
sock,
addr,
# second part of parameters
# WebServer socket and application instances
ws_socket_inst,
ws_application_inst,
# third part of parameters
# header parsing result (as WebServer reads and parses
# header manually to retrive Host parameter)
header_bytes,
line_terminator,
request_line_parsed,
header_fields
2. 'start' - called on application start() called
3. 'stop' - called on application stop() called
4. 'wait' - called on application wait() called
"""
class Application:
"""
this class (or it's instances) is not intended for direct initialization.
it's created, used and destroyed by ApplicationPool class instance
"""
def __init__(
self,
application_data_dict,
web_server_inst,
application_pool_inst,
socket_pool_inst
):
self.name = application_data_dict['name']
self.domain = application_data_dict['domain']
self.module = application_data_dict['module']
self.module_parameters = {}
if 'module_parameters' in application_data_dict:
self.module_parameters = application_data_dict['module_parameters']
self.module_inst = None
self._web_server_inst = web_server_inst
self._application_pool_inst = application_pool_inst
self._socket_pool_inst = socket_pool_inst
self._load_module(web_server_inst)
return
def _load_module(self, web_server_inst):
"""
result: True - Ok, False - Error
"""
ret = True
module = None
module_name = 'wayround_org.webserver.modules.{}'.format(self.module)
try:
module = importlib.import_module(module_name)
except:
logging.exception(
"Error loading module `{}'".format(module_name)
)
ret = False
if ret:
if not hasattr(module, 'WebServerAppModule'):
logging.exception(
"module `{}' has no WebServerAppModule member".format(
module_name
)
)
ret = False
if ret:
self.module_inst = module.WebServerAppModule(
web_server_inst,
self._socket_pool_inst,
self._application_pool_inst,
self.module_parameters
)
return ret
def start(self):
if self.module_inst:
self.module_inst.start()
return
def stop(self):
if self.module_inst:
self.module_inst.stop()
return
def wait(self):
if self.module_inst:
self.module_inst.wait()
return
class Pool:
def __init__(self, cfg, web_server_inst, socket_pool_inst):
self._application_pool = {}
for i in cfg['applications']:
d = Application(i, web_server_inst, self, socket_pool_inst)
self._application_pool[d.name] = d
return
def start(self):
for i in self._application_pool.values():
i.start()
return
def stop(self):
for i in self._application_pool.values():
i.stop()
return
def wait(self):
for i in self._application_pool.values():
i.wait()
return
def get_by_name(self, name):
return self._application_pool.get(name, None)
def find_by_domain(self, domain):
ret = []
for i in list(self._application_pool.values()):
if i.domain == domain:
ret.append(i)
return ret
|
PypiClean
|
/custom-awscli-1.27.51.tar.gz/custom-awscli-1.27.51/awscli/examples/textract/start-document-analysis.rst
|
**To start analyzing text in a multi-page document**
The following ``start-document-analysis`` example shows how to start asynchronous analysis of text in a multi-page document.
Linux/macOS::
aws textract start-document-analysis \
--document-location '{"S3Object":{"Bucket":"bucket","Name":"document"}}' \
--feature-types '["TABLES","FORMS"]' \
--notification-channel "SNSTopicArn=arn:snsTopic,RoleArn=roleArn"
Windows::
aws textract start-document-analysis \
--document-location "{\"S3Object\":{\"Bucket\":\"bucket\",\"Name\":\"document\"}}" \
--feature-types "[\"TABLES\", \"FORMS\"]" \
--region region-name \
--notification-channel "SNSTopicArn=arn:snsTopic,RoleArn=roleArn"
Output::
{
"JobId": "df7cf32ebbd2a5de113535fcf4d921926a701b09b4e7d089f3aebadb41e0712b"
}
For more information, see `Detecting and Analyzing Text in Multi-Page Documents`_ in the *Amazon Textract Developers Guide*
.. _`Detecting and Analyzing Text in Multi-Page Documents`: https://docs.aws.amazon.com/textract/latest/dg/async.html
|
PypiClean
|
/simapy-4.4.3.tar.gz/simapy-4.4.3/src/sima/hydro/frequencydependentdamping.py
|
from typing import Dict,Sequence,List
from dmt.blueprint import Blueprint
from .blueprints.frequencydependentdamping import FrequencyDependentDampingBlueprint
from numpy import ndarray,asarray
from sima.hydro.twodofdata import TwoDofData
from sima.sima.moao import MOAO
from sima.sima.scriptablevalue import ScriptableValue
class FrequencyDependentDamping(MOAO):
"""
Keyword arguments
-----------------
description : str
(default "")
scriptableValues : List[ScriptableValue]
frequencies : ndarray
items : List[TwoDofData]
"""
def __init__(self , description="", **kwargs):
super().__init__(**kwargs)
self.description = description
self.scriptableValues = list()
self.frequencies = ndarray(1)
self.items = list()
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return FrequencyDependentDampingBlueprint()
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = value
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def frequencies(self) -> ndarray:
""""""
return self.__frequencies
@frequencies.setter
def frequencies(self, value: ndarray):
"""Set frequencies"""
self.__frequencies = asarray(value)
@property
def items(self) -> List[TwoDofData]:
""""""
return self.__items
@items.setter
def items(self, value: List[TwoDofData]):
"""Set items"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__items = value
|
PypiClean
|
/control-lab-les-0.0.0.5.tar.gz/control-lab-les-0.0.0.5/src/controllable/View/image_utils.py
|
# Standard library imports
import numpy as np
# Third party imports
import cv2 # pip install opencv-python
# Local application imports
print(f"Import: OK <{__name__}>")
class Image(object):
"""
Image class with image manipulation methods
Args:
frame (array): image frame
"""
def __init__(self, frame):
self.frame = frame
pass
def addText(self, text:str, position, inplace=False):
"""
Add text to the image
Args:
text (str): text to be added
position (tuple): x,y position of where to place the text
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
frame = self.frame
cv2.putText(frame, text, position, cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 1)
if inplace:
self.frame = frame
return
return Image(frame)
def annotate(self, index:int, dimensions:tuple, inplace=False):
"""
Annotate the image to label identified targets
Args:
index (int): index of target
dimensions (list): list of x,y,w,h
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
x,y,w,h = dimensions
frame = self.frame
frame = cv2.rectangle(frame, (x,y), (x+w, y+h), (0,255,0), 2)
frame = cv2.circle(frame, (int(x+(w/2)), int(y+(h/2))), 3, (0,0,255), -1)
frame = cv2.putText(frame, '{}'.format(index+1), (x-8, y-4), cv2.FONT_HERSHEY_PLAIN, 1, (255,255,255), 1)
if inplace:
self.frame = frame
return
return Image(frame)
def blur(self, blur_kernel=3, inplace=False):
"""
Blur the image
Args:
blur_kernel (int, optional): level of blurring, odd numbers only, minimum value of 3. Defaults to 3.
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
frame = cv2.GaussianBlur(self.frame, (blur_kernel,blur_kernel), 0)
if inplace:
self.frame = frame
return
return Image(frame)
def convertToRGB(self, inplace=False):
"""
Turn image to RGB
Args:
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
if inplace:
self.frame = frame
return
return Image(frame)
def convolve(self, inplace=False):
"""
Perform convolution on image
Args:
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
return
def crosshair(self, inplace=False):
"""
Add crosshair in the middle of image
Args:
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
frame = self.frame
center_x = int(frame.shape[1] / 2)
center_y = int(frame.shape[0] / 2)
cv2.line(frame, (center_x, center_y+50), (center_x, center_y-50), (255,255,255), 1)
cv2.line(frame, (center_x+50, center_y), (center_x-50, center_y), (255,255,255), 1)
if inplace:
self.frame = frame
return
return Image(frame)
def encode(self, extension='.png'):
"""
Encode the frame into bytes
Args:
extension (str, optional): image format to encode to. Defaults to '.png'.
Returns:
bytes: byte representation of image
"""
return cv2.imencode(extension, self.frame)[1].tobytes()
def grayscale(self, inplace=False):
"""
Turn image to grayscale
Args:
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
frame = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
if inplace:
self.frame = frame
return
return Image(frame)
def process(self, alpha, beta, blur_kernel=3, inplace=False):
"""
Process the image
Args:
alpha (float): alpha value
beta (float): beta value
blur_kernel (int, optional): level of blurring, odd numbers only, minimum value of 3. Defaults to 3.
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
frame = self.frame
frame = cv2.addWeighted(frame, alpha, np.zeros(frame.shape, frame.dtype), 0, beta)
if blur_kernel > 0:
frame = cv2.GaussianBlur(frame, (blur_kernel,blur_kernel), 0)
if inplace:
self.frame = frame
return
return Image(frame)
def removeNoise(self, open_iter=0, close_iter=0, inplace=False):
"""
Remove noise from image
Args:
open_iter (int, optional): opening iteration. Defaults to 0.
close_iter (int, optional): closing iteration. Defaults to 0.
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
kernel = np.ones((3,3),np.uint8)
frame = self.frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = cv2.morphologyEx(frame,cv2.MORPH_OPEN,kernel,iterations=open_iter)
frame = cv2.morphologyEx(frame,cv2.MORPH_CLOSE,kernel,iterations=close_iter)
if inplace:
self.frame = frame
return
return Image(frame)
def resize(self, size, inplace=False):
"""
Resize the image
Args:
size (tuple): tuple of desired image width and height
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
frame = cv2.resize(self.frame, size)
if inplace:
self.frame = frame
return
return Image(frame)
def rotate(self, angle:int, inplace=False):
"""
Rotate a 2D array of multiples of 90 degrees, clockwise
Args:
angle (int): 90, 180, or 270 degrees
inplace (bool, optional): whether to perform action in place. Defaults to False.
Returns:
Image, or None: Image object, or None (if inplace=True)
"""
rotateCodes = {
90: cv2.ROTATE_90_CLOCKWISE,
180: cv2.ROTATE_180,
270: cv2.ROTATE_90_COUNTERCLOCKWISE
}
frame = self.frame
if angle != 0:
frame = cv2.rotate(frame, rotateCodes.get(angle))
if inplace:
self.frame = frame
return
return Image(frame)
def save(self, filename):
"""
Save image to file
Args:
filename (str): filename to save to
Returns:
bool: True if successfully saved
"""
return cv2.imwrite(filename, self.frame)
|
PypiClean
|
/ansible-8.3.0-py3-none-any.whl/ansible_collections/community/aws/plugins/modules/iam_server_certificate.py
|
# Copyright: Contributors to the Ansible project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
DOCUMENTATION = r"""
---
module: iam_server_certificate
version_added: 1.0.0
short_description: Manage IAM server certificates for use on ELBs and CloudFront
description:
- Allows for the management of IAM server certificates.
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
type: str
new_name:
description:
- When I(state=present), this will update the name of the cert.
- The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined.
type: str
new_path:
description:
- When I(state=present), this will update the path of the cert.
- The I(cert), I(key) and I(cert_chain) parameters will be ignored if this is defined.
type: str
state:
description:
- Whether to create (or update) or delete the certificate.
- If I(new_path) or I(new_name) is defined, specifying present will attempt to make an update these.
required: true
choices: [ "present", "absent" ]
type: str
path:
description:
- When creating or updating, specify the desired path of the certificate.
default: "/"
type: str
cert_chain:
description:
- The content of the CA certificate chain in PEM encoded format.
type: str
cert:
description:
- The content of the certificate body in PEM encoded format.
type: str
key:
description:
- The content of the private key in PEM encoded format.
type: str
dup_ok:
description:
- By default the module will not upload a certificate that is already uploaded into AWS.
- If I(dup_ok=True), it will upload the certificate as long as the name is unique.
- The default value for this value changed in release 5.0.0 to C(true).
default: true
type: bool
author:
- Jonathan I. Davila (@defionscode)
extends_documentation_fragment:
- amazon.aws.common.modules
- amazon.aws.region.modules
- amazon.aws.boto3
"""
RETURN = r""" # """
EXAMPLES = r"""
- name: Basic server certificate upload from local file
community.aws.iam_server_certificate:
name: very_ssl
state: present
cert: "{{ lookup('file', 'path/to/cert') }}"
key: "{{ lookup('file', 'path/to/key') }}"
cert_chain: "{{ lookup('file', 'path/to/certchain') }}"
- name: Server certificate upload using key string
community.aws.iam_server_certificate:
name: very_ssl
state: present
path: "/a/cert/path/"
cert: "{{ lookup('file', 'path/to/cert') }}"
key: "{{ lookup('file', 'path/to/key') }}"
cert_chain: "{{ lookup('file', 'path/to/certchain') }}"
- name: Basic rename of existing certificate
community.aws.iam_server_certificate:
name: very_ssl
new_name: new_very_ssl
state: present
"""
try:
import botocore
except ImportError:
pass # Handled by HAS_BOTO
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry
from ansible_collections.community.aws.plugins.module_utils.modules import AnsibleCommunityAWSModule as AnsibleAWSModule
@AWSRetry.jittered_backoff()
def _list_server_certficates():
paginator = client.get_paginator("list_server_certificates")
return paginator.paginate().build_full_result()["ServerCertificateMetadataList"]
def check_duplicate_cert(new_cert):
orig_cert_names = list(c["ServerCertificateName"] for c in _list_server_certficates())
for cert_name in orig_cert_names:
cert = get_server_certificate(cert_name)
if not cert:
continue
cert_body = cert.get("certificate_body", None)
if not _compare_cert(new_cert, cert_body):
continue
module.fail_json(
changed=False,
msg=f"This certificate already exists under the name {cert_name} and dup_ok=False",
duplicate_cert=cert,
)
def _compare_cert(cert_a, cert_b):
if not cert_a and not cert_b:
return True
if not cert_a or not cert_b:
return False
# Trim out the whitespace before comparing the certs. While this could mean
# an invalid cert 'matches' a valid cert, that's better than some stray
# whitespace breaking things
cert_a.replace("\r", "")
cert_a.replace("\n", "")
cert_a.replace(" ", "")
cert_b.replace("\r", "")
cert_b.replace("\n", "")
cert_b.replace(" ", "")
return cert_a == cert_b
def update_server_certificate(current_cert):
changed = False
cert = module.params.get("cert")
cert_chain = module.params.get("cert_chain")
if not _compare_cert(cert, current_cert.get("certificate_body", None)):
module.fail_json(msg="Modifying the certificate body is not supported by AWS")
if not _compare_cert(cert_chain, current_cert.get("certificate_chain", None)):
module.fail_json(msg="Modifying the chaining certificate is not supported by AWS")
# We can't compare keys.
if module.check_mode:
return changed
# For now we can't make any changes. Updates to tagging would go here and
# update 'changed'
return changed
def create_server_certificate():
cert = module.params.get("cert")
key = module.params.get("key")
cert_chain = module.params.get("cert_chain")
if not module.params.get("dup_ok"):
check_duplicate_cert(cert)
path = module.params.get("path")
name = module.params.get("name")
params = dict(
ServerCertificateName=name,
CertificateBody=cert,
PrivateKey=key,
)
if cert_chain:
params["CertificateChain"] = cert_chain
if path:
params["Path"] = path
if module.check_mode:
return True
try:
client.upload_server_certificate(aws_retry=True, **params)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg=f"Failed to update server certificate {name}")
return True
def rename_server_certificate(current_cert):
name = module.params.get("name")
new_name = module.params.get("new_name")
new_path = module.params.get("new_path")
changes = dict()
# Try to be nice, if we've already been renamed exit quietly.
if not current_cert:
current_cert = get_server_certificate(new_name)
else:
if new_name:
changes["NewServerCertificateName"] = new_name
cert_metadata = current_cert.get("server_certificate_metadata", {})
if not current_cert:
module.fail_json(msg=f"Unable to find certificate {name}")
current_path = cert_metadata.get("path", None)
if new_path and current_path != new_path:
changes["NewPath"] = new_path
if not changes:
return False
if module.check_mode:
return True
try:
client.update_server_certificate(aws_retry=True, ServerCertificateName=name, **changes)
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg=f"Failed to update server certificate {name}", changes=changes)
return True
def delete_server_certificate(current_cert):
if not current_cert:
return False
if module.check_mode:
return True
name = module.params.get("name")
try:
result = client.delete_server_certificate(
aws_retry=True,
ServerCertificateName=name,
)
except is_boto3_error_code("NoSuchEntity"):
return None
except (
botocore.exceptions.ClientError,
botocore.exceptions.BotoCoreError,
) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg=f"Failed to delete server certificate {name}")
return True
def get_server_certificate(name):
if not name:
return None
try:
result = client.get_server_certificate(
aws_retry=True,
ServerCertificateName=name,
)
except is_boto3_error_code("NoSuchEntity"):
return None
except (
botocore.exceptions.ClientError,
botocore.exceptions.BotoCoreError,
) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg=f"Failed to get server certificate {name}")
cert = dict(camel_dict_to_snake_dict(result.get("ServerCertificate")))
return cert
def compatability_results(current_cert):
compat_results = dict()
if not current_cert:
return compat_results
metadata = current_cert.get("server_certificate_metadata", {})
if current_cert.get("certificate_body", None):
compat_results["cert_body"] = current_cert.get("certificate_body")
if current_cert.get("certificate_chain", None):
compat_results["chain_cert_body"] = current_cert.get("certificate_chain")
if metadata.get("arn", None):
compat_results["arn"] = metadata.get("arn")
if metadata.get("expiration", None):
compat_results["expiration_date"] = metadata.get("expiration")
if metadata.get("path", None):
compat_results["cert_path"] = metadata.get("path")
if metadata.get("server_certificate_name", None):
compat_results["name"] = metadata.get("server_certificate_name")
if metadata.get("upload_date", None):
compat_results["upload_date"] = metadata.get("upload_date")
return compat_results
def main():
global module
global client
argument_spec = dict(
state=dict(required=True, choices=["present", "absent"]),
name=dict(required=True),
cert=dict(),
key=dict(no_log=True),
cert_chain=dict(),
new_name=dict(),
path=dict(default="/"),
new_path=dict(),
dup_ok=dict(type="bool", default=True),
)
module = AnsibleAWSModule(
argument_spec=argument_spec,
mutually_exclusive=[
["new_path", "key"],
["new_path", "cert"],
["new_path", "cert_chain"],
["new_name", "key"],
["new_name", "cert"],
["new_name", "cert_chain"],
],
supports_check_mode=True,
)
client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff())
state = module.params.get("state")
name = module.params.get("name")
path = module.params.get("path")
new_name = module.params.get("new_name")
new_path = module.params.get("new_path")
dup_ok = module.params.get("dup_ok")
current_cert = get_server_certificate(name)
results = dict()
if state == "absent":
changed = delete_server_certificate(current_cert)
if changed:
results["deleted_cert"] = name
else:
msg = f"Certificate with the name {name} already absent"
results["msg"] = msg
else:
if new_name or new_path:
changed = rename_server_certificate(current_cert)
if new_name:
name = new_name
updated_cert = get_server_certificate(name)
elif current_cert:
changed = update_server_certificate(current_cert)
updated_cert = get_server_certificate(name)
else:
changed = create_server_certificate()
updated_cert = get_server_certificate(name)
results["server_certificate"] = updated_cert
compat_results = compatability_results(updated_cert)
if compat_results:
results.update(compat_results)
module.exit_json(changed=changed, **results)
if __name__ == "__main__":
main()
|
PypiClean
|
/PythonProj_Message_Client-0.0.1-py3-none-any.whl/client/client/main_window.py
|
from PyQt5.QtWidgets import QMainWindow, qApp, QMessageBox, QApplication, QListView
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QBrush, QColor
from PyQt5.QtCore import pyqtSlot, QEvent, Qt
from Cryptodome.Cipher import PKCS1_OAEP
from Cryptodome.PublicKey import RSA
import json
import logging
import base64
import sys
sys.path.append('../')
from lesson_6.message_proj.client.main_window_conv import Ui_MainClientWindow
from lesson_6.message_proj.client.add_contact import AddContactDialog
from lesson_6.message_proj.client.del_contact import DelContactDialog
from lesson_6.message_proj.common.errors_user import ServerError
from lesson_6.message_proj.common.variables import *
logger = logging.getLogger('client')
class ClientMainWindow(QMainWindow):
'''
Класс - основное окно пользователя.
Содержит всю основную логику работы клиентского модуля.
Конфигурация окна создана в QTDesigner и загружается из
конвертированого файла main_window_conv.py
'''
def __init__(self, database, transport, keys):
super().__init__()
# основные переменные
self.database = database
self.transport = transport
# объект - дешифорвщик сообщений с предзагруженным ключём
self.decrypter = PKCS1_OAEP.new(keys)
# Загружаем конфигурацию окна из дизайнера
self.ui = Ui_MainClientWindow()
self.ui.setupUi(self)
# Кнопка "Выход"
self.ui.menu_exit.triggered.connect(qApp.exit)
# Кнопка отправить сообщение
self.ui.btn_send.clicked.connect(self.send_message)
# "добавить контакт"
self.ui.btn_add_contact.clicked.connect(self.add_contact_window)
self.ui.menu_add_contact.triggered.connect(self.add_contact_window)
# Удалить контакт
self.ui.btn_remove_contact.clicked.connect(self.delete_contact_window)
self.ui.menu_del_contact.triggered.connect(self.delete_contact_window)
# Дополнительные требующиеся атрибуты
self.contacts_model = None
self.history_model = None
self.messages = QMessageBox()
self.current_chat = None
self.current_chat_key = None
self.encryptor = None
self.ui.list_messages.setHorizontalScrollBarPolicy(
Qt.ScrollBarAlwaysOff)
self.ui.list_messages.setWordWrap(True)
# Даблклик по листу контактов отправляется в обработчик
self.ui.list_contacts.doubleClicked.connect(self.select_active_user)
self.clients_list_update()
self.set_disabled_input()
self.show()
def set_disabled_input(self):
''' Метод делающий поля ввода неактивными'''
# Надпись - получатель.
self.ui.label_new_message.setText(
'Для выбора получателя дважды кликните на нем в окне контактов.')
self.ui.text_message.clear()
if self.history_model:
self.history_model.clear()
# Поле ввода и кнопка отправки неактивны до выбора получателя.
self.ui.btn_clear.setDisabled(True)
self.ui.btn_send.setDisabled(True)
self.ui.text_message.setDisabled(True)
self.encryptor = None
self.current_chat = None
self.current_chat_key = None
def history_list_update(self):
'''
Метод заполняющий соответствующий QListView
историей переписки с текущим собеседником.
'''
# Получаем историю сортированную по дате
list = sorted(
self.database.get_history(
self.current_chat),
key=lambda item: item[3])
# Если модель не создана, создадим.
if not self.history_model:
self.history_model = QStandardItemModel()
self.ui.list_messages.setModel(self.history_model)
# Очистим от старых записей
self.history_model.clear()
# Берём не более 20 последних записей.
length = len(list)
start_index = 0
if length > 20:
start_index = length - 20
# Заполнение модели записями, так-же стоит разделить входящие
# и исходящие выравниванием и разным фоном.
# отображает только последие 20 сообщений
for i in range(start_index, length):
item = list[i]
if item[1] == 'in':
mess = QStandardItem(
f'Входящее от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
mess.setBackground(QBrush(QColor(255, 213, 213)))
mess.setTextAlignment(Qt.AlignLeft)
self.history_model.appendRow(mess)
else:
mess = QStandardItem(
f'Исходящее от {item[3].replace(microsecond=0)}:\n {item[2]}')
mess.setEditable(False)
mess.setTextAlignment(Qt.AlignRight)
mess.setBackground(QBrush(QColor(204, 255, 204)))
self.history_model.appendRow(mess)
self.ui.list_messages.scrollToBottom()
def select_active_user(self):
'''Метод обработчик события двойного клика по списку контактов.'''
# Выбранный пользователем (даблклик) находится в выделеном элементе в
# QListView
self.current_chat = self.ui.list_contacts.currentIndex().data()
# вызываем основную функцию
self.set_active_user()
def set_active_user(self):
'''Метод активации чата с собеседником.'''
# Запрашиваем публичный ключ пользователя и создаём объект шифрования
try:
self.current_chat_key = self.transport.key_request(
self.current_chat)
logger.debug(f'Загружен открытый ключ для {self.current_chat}')
if self.current_chat_key:
self.encryptor = PKCS1_OAEP.new(
RSA.import_key(self.current_chat_key))
except (OSError, json.JSONDecodeError):
self.current_chat_key = None
self.encryptor = None
logger.debug(f'Не удалось получить ключ для {self.current_chat}')
# Если ключа нет то ошибка, что не удалось начать чат с пользователем
if not self.current_chat_key:
self.messages.warning(
self, 'Ошибка', 'Для выбранного пользователя нет ключа шифрования.')
return
# Ставим надпись и активируем кнопки
self.ui.label_new_message.setText(
f'Введите сообщенние для {self.current_chat}:')
self.ui.btn_clear.setDisabled(False)
self.ui.btn_send.setDisabled(False)
self.ui.text_message.setDisabled(False)
# Заполняем окно историю сообщений по требуемому пользователю.
self.history_list_update()
def clients_list_update(self):
'''Метод обновляющий список контактов.'''
contacts_list = self.database.get_contacts()
self.contacts_model = QStandardItemModel()
for i in sorted(contacts_list):
item = QStandardItem(i)
item.setEditable(False)
self.contacts_model.appendRow(item)
self.ui.list_contacts.setModel(self.contacts_model)
def add_contact_window(self):
'''Метод создающий окно - диалог добавления контакта'''
global select_dialog
select_dialog = AddContactDialog(self.transport, self.database)
select_dialog.btn_ok.clicked.connect(
lambda: self.add_contact_action(select_dialog))
select_dialog.show()
def add_contact_action(self, item):
'''Метод обработчк нажатия кнопки "Добавить"'''
new_contact = item.selector.currentText()
self.add_contact(new_contact)
item.close()
def add_contact(self, new_contact):
'''
Метод добавляющий контакт в серверную и клиентсткую BD.
После обновления баз данных обновляет и содержимое окна.
'''
try:
self.transport.add_contact(new_contact)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.add_contact(new_contact)
new_contact = QStandardItem(new_contact)
new_contact.setEditable(False)
self.contacts_model.appendRow(new_contact)
logger.info(f'Успешно добавлен контакт {new_contact}')
self.messages.information(
self, 'Успех', 'Контакт успешно добавлен.')
def delete_contact_window(self):
'''Метод создающий окно удаления контакта.'''
global remove_dialog
remove_dialog = DelContactDialog(self.database)
remove_dialog.btn_ok.clicked.connect(
lambda: self.delete_contact(remove_dialog))
remove_dialog.show()
def delete_contact(self, item):
'''
Метод удаляющий контакт из серверной и клиентсткой BD.
После обновления баз данных обновляет и содержимое окна.
'''
selected = item.selector.currentText()
try:
self.transport.remove_contact(selected)
except ServerError as err:
self.messages.critical(self, 'Ошибка сервера', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
else:
self.database.del_contact(selected)
self.clients_list_update()
logger.info(f'Успешно удалён контакт {selected}')
self.messages.information(self, 'Успех', 'Контакт успешно удалён.')
item.close()
# Если удалён активный пользователь, то деактивируем поля ввода.
if selected == self.current_chat:
self.current_chat = None
self.set_disabled_input()
def send_message(self):
'''
Функция отправки сообщения текущему собеседнику.
Реализует шифрование сообщения и его отправку.
'''
# Текст в поле, проверяем что поле не пустое затем забирается сообщение
# и поле очищается
message_text = self.ui.text_message.toPlainText()
self.ui.text_message.clear()
if not message_text:
return
# Шифруем сообщение ключом получателя и упаковываем в base64.
message_text_encrypted = self.encryptor.encrypt(
message_text.encode('utf8'))
message_text_encrypted_base64 = base64.b64encode(
message_text_encrypted)
try:
self.transport.send_message(
self.current_chat,
message_text_encrypted_base64.decode('ascii'))
pass
except ServerError as err:
self.messages.critical(self, 'Ошибка', err.text)
except OSError as err:
if err.errno:
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
self.messages.critical(self, 'Ошибка', 'Таймаут соединения!')
except (ConnectionResetError, ConnectionAbortedError):
self.messages.critical(
self, 'Ошибка', 'Потеряно соединение с сервером!')
self.close()
else:
self.database.save_message(self.current_chat, 'out', message_text)
logger.debug(
f'Отправлено сообщение для {self.current_chat}: {message_text}')
self.history_list_update()
@pyqtSlot(dict)
def message(self, message):
'''
Слот обработчик поступаемых сообщений, выполняет дешифровку
поступаемых сообщений и их сохранение в истории сообщений.
Запрашивает пользователя если пришло сообщение не от текущего
собеседника. При необходимости меняет собеседника.
'''
# Получаем строку байтов
encrypted_message = base64.b64decode(message[MESSAGE_TEXT])
# Декодируем строку, при ошибке выдаём сообщение и завершаем функцию
try:
decrypted_message = self.decrypter.decrypt(encrypted_message)
except (ValueError, TypeError):
self.messages.warning(
self, 'Ошибка', 'Не удалось декодировать сообщение.')
return
# Сохраняем сообщение в базу и обновляем историю сообщений или
# открываем новый чат.
self.database.save_message(
self.current_chat,
'in',
decrypted_message.decode('utf8'))
sender = message[SENDER]
if sender == self.current_chat:
self.history_list_update()
else:
# Проверим есть ли такой пользователь у нас в контактах:
if self.database.check_contact(sender):
# Если есть, спрашиваем и желании открыть с ним чат и открываем
# при желании
if self.messages.question(
self,
'Новое сообщение',
f'Получено новое сообщение от {sender}, открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.current_chat = sender
self.set_active_user()
else:
print('NO')
# Раз нету,спрашиваем хотим ли добавить юзера в контакты.
if self.messages.question(
self,
'Новое сообщение',
f'Получено новое сообщение от {sender}.\n Данного пользователя нет в вашем контакт-листе.\n Добавить в контакты и открыть чат с ним?',
QMessageBox.Yes,
QMessageBox.No) == QMessageBox.Yes:
self.add_contact(sender)
self.current_chat = sender
# Нужно заново сохранить сообщение, иначе оно будет потеряно,
# т.к. на момент предыдущего вызова контакта не было.
self.database.save_message(
self.current_chat, 'in', decrypted_message.decode('utf8'))
self.set_active_user()
@pyqtSlot()
def connection_lost(self):
'''
Слот обработчик потери соеднинения с сервером.
Выдаёт окно предупреждение и завершает работу приложения.
'''
self.messages.warning(
self,
'Сбой соединения',
'Потеряно соединение с сервером. ')
self.close()
@pyqtSlot()
def sig_205(self):
'''
Слот выполняющий обновление баз данных по команде сервера.
'''
if self.current_chat and not self.database.check_user(
self.current_chat):
self.messages.warning(
self,
'Сочувствую',
'К сожалению собеседник был удалён с сервера.')
self.set_disabled_input()
self.current_chat = None
self.clients_list_update()
def make_connection(self, trans_obj):
'''Метод обеспечивающий соединение сигналов и слотов.'''
trans_obj.new_message.connect(self.message)
trans_obj.connection_lost.connect(self.connection_lost)
trans_obj.message_205.connect(self.sig_205)
|
PypiClean
|
/dipex-4.50.6-py3-none-any.whl/integrations/aarhus/los_import.py
|
import asyncio
import datetime
import logging
from pathlib import Path
import click
import config
import initial
import sentry_sdk
from dateutil import parser
from los_leder import ManagerImporter
from los_org import OrgUnitImporter
from los_pers import PersonImporter
from los_stam import StamImporter
logger = logging.getLogger(__name__)
def get_or_create_import_state(settings: config.Settings) -> datetime.datetime:
"""
Ensure that the state file exists, and return the date of the last import
If no import has been run, datetime.min is returned
"""
state_file_path = Path(settings.import_state_file)
if not state_file_path.is_file():
logger.warning("Import file not present. Creating.")
with open(state_file_path, "w") as f:
earliest = datetime.datetime.min
f.write(earliest.isoformat())
return earliest
else:
with open(state_file_path, "r") as f:
last_import = parser.parse(f.read())
logger.info("Last import performed at %s", last_import)
return last_import
def set_import_state(settings: config.Settings, import_date: datetime.datetime):
"""Set contents of import state file to specified date"""
state_file_path = Path(settings.import_state_file)
with open(state_file_path, "w") as f:
import_date_string = import_date.isoformat()
logger.debug("Writing timestamp %s to state", import_date_string)
f.write(import_date_string)
def run_los_import(settings, last_import):
loop = asyncio.get_event_loop()
now = datetime.datetime.now()
initial_import = asyncio.ensure_future(initial.perform_initial_setup())
loop.run_until_complete(initial_import)
# Import STAM
stam_import = asyncio.ensure_future(StamImporter(last_import).run())
loop.run_until_complete(stam_import)
# Import Org
org_import = asyncio.ensure_future(OrgUnitImporter().run(last_import))
loop.run_until_complete(org_import)
# # Import Person
person_import = asyncio.ensure_future(PersonImporter().run(last_import))
loop.run_until_complete(person_import)
# Import manager
manager_import = asyncio.ensure_future(ManagerImporter().run(last_import))
loop.run_until_complete(manager_import)
loop.close()
set_import_state(settings, now)
@click.command()
@click.option("--import-from-date")
@click.option(
"--ftp-url",
help="URL of FTP where CSV files will be retrieved from",
)
@click.option(
"--ftp-user",
help="Username to use when logging into FTP server",
)
@click.option(
"--ftp-pass",
help="Password to use when logging into FTP server",
)
@click.option(
"--ftp-folder",
help="FTP folder where CSV files are retrieved from",
)
@click.option(
"--import-state-file",
help="Name of import state file",
)
@click.option(
"--import-csv-folder",
help="Path to folder containing CSV files to import. Disables FTP reading",
)
@click.option(
"--azid-it-system-uuid",
type=click.UUID,
help="UUID of MO IT system used for the `AZID` column of `Pers_*.csv` files",
)
def main(**kwargs):
import_from_date = kwargs.pop("import_from_date", None)
command_line_options = {key: value for key, value in kwargs.items() if value}
settings = config.Settings.from_kwargs(**command_line_options)
if settings.sentry_dsn:
sentry_sdk.init(dsn=settings.sentry_dsn)
if import_from_date:
last_import = datetime.date.fromisoformat(import_from_date)
else:
last_import = get_or_create_import_state(settings)
return run_los_import(settings, last_import)
if __name__ == "__main__":
main() # type: ignore
|
PypiClean
|
/shared_memory38-0.1.2-cp37-cp37m-win_amd64.whl/shared_memory/managers.py
|
import os
import signal
from multiprocessing import ProcessError, util
from multiprocessing.managers import BaseManager, Server, State, dispatch
from os import getpid
try:
from . import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
__all__ = ['SharedMemoryManager']
if HAS_SHMEM:
class _SharedMemoryTracker:
"Manages one or more shared memory segments."
def __init__(self, name, segment_names=[]):
self.shared_memory_context_name = name
self.segment_names = segment_names
def register_segment(self, segment_name):
"Adds the supplied shared memory block name to tracker."
util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
self.segment_names.append(segment_name)
def destroy_segment(self, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the list of blocks being tracked."""
util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
self.segment_names.remove(segment_name)
segment = shared_memory.SharedMemory(segment_name)
segment.close()
segment.unlink()
def unlink(self):
"Calls destroy_segment() on all tracked shared memory blocks."
for segment_name in self.segment_names[:]:
self.destroy_segment(segment_name)
def __del__(self):
util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
self.unlink()
def __getstate__(self):
return (self.shared_memory_context_name, self.segment_names)
def __setstate__(self, state):
self.__init__(*state)
class SharedMemoryServer(Server):
public = Server.public + \
['track_segment', 'release_segment', 'list_segments']
def __init__(self, *args, **kwargs):
Server.__init__(self, *args, **kwargs)
address = self.address
# The address of Linux abstract namespaces can be bytes
if isinstance(address, bytes):
address = os.fsdecode(address)
self.shared_memory_context = \
_SharedMemoryTracker(f"shm_{address}_{getpid()}")
util.debug(f"SharedMemoryServer started by pid {getpid()}")
def create(*args, **kwargs):
"""Create a new distributed-shared object (not backed by a shared
memory block) and return its id to be used in a Proxy Object."""
# Unless set up as a shared proxy, don't make shared_memory_context
# a standard part of kwargs. This makes things easier for supplying
# simple functions.
if len(args) >= 3:
typeod = args[2]
elif 'typeid' in kwargs:
typeid = kwargs['typeid']
elif not args:
raise TypeError("descriptor 'create' of 'SharedMemoryServer' "
"object needs an argument")
else:
raise TypeError('create expected at least 2 positional '
'arguments, got %d' % (len(args)-1))
if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
kwargs['shared_memory_context'] = self.shared_memory_context
return Server.create(*args, **kwargs)
create.__text_signature__ = '($self, c, typeid, /, *args, **kwargs)'
def shutdown(self, c):
"Call unlink() on all tracked shared memory, terminate the Server."
self.shared_memory_context.unlink()
return Server.shutdown(self, c)
def track_segment(self, c, segment_name):
"Adds the supplied shared memory block name to Server's tracker."
self.shared_memory_context.register_segment(segment_name)
def release_segment(self, c, segment_name):
"""Calls unlink() on the shared memory block with the supplied name
and removes it from the tracker instance inside the Server."""
self.shared_memory_context.destroy_segment(segment_name)
def list_segments(self, c):
"""Returns a list of names of shared memory blocks that the Server
is currently tracking."""
return self.shared_memory_context.segment_names
class SharedMemoryManager(BaseManager):
"""Like SyncManager but uses SharedMemoryServer instead of Server.
It provides methods for creating and returning SharedMemory instances
and for creating a list-like object (ShareableList) backed by shared
memory. It also provides methods that create and return Proxy Objects
that support synchronization across processes (i.e. multi-process-safe
locks and semaphores).
"""
_Server = SharedMemoryServer
def __init__(self, *args, **kwargs):
if os.name == "posix":
# bpo-36867: Ensure the resource_tracker is running before
# launching the manager process, so that concurrent
# shared_memory manipulation both in the manager and in the
# current process does not create two resource_tracker
# processes.
from . import resource_tracker
resource_tracker.ensure_running()
BaseManager.__init__(self, *args, **kwargs)
util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
def __del__(self):
util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
pass
def get_server(self):
'Better than monkeypatching for now; merge into Server ultimately'
if self._state.value != State.INITIAL:
if self._state.value == State.STARTED:
raise ProcessError("Already started SharedMemoryServer")
elif self._state.value == State.SHUTDOWN:
raise ProcessError("SharedMemoryManager has shut down")
else:
raise ProcessError(
"Unknown state {!r}".format(self._state.value))
return self._Server(self._registry, self._address,
self._authkey, self._serializer)
@classmethod
def _run_server(cls, *args, **kwargs):
# as server protection not implemented in Python < 3.8, we have to
# add it manually via this measure
# bpo-36368: protect server process from KeyboardInterrupt signals
signal.signal(signal.SIGINT, signal.SIG_IGN)
super()._run_server(*args, **kwargs)
def SharedMemory(self, size):
"""Returns a new SharedMemory instance with the specified size in
bytes, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sms = shared_memory.SharedMemory(None, create=True, size=size)
try:
dispatch(conn, None, 'track_segment', (sms.name,))
except BaseException as e:
sms.unlink()
raise e
return sms
def ShareableList(self, sequence):
"""Returns a new ShareableList instance populated with the values
from the input sequence, to be tracked by the manager."""
with self._Client(self._address, authkey=self._authkey) as conn:
sl = shared_memory.ShareableList(sequence)
try:
dispatch(conn, None, 'track_segment', (sl.shm.name,))
except BaseException as e:
sl.shm.unlink()
raise e
return sl
|
PypiClean
|
/ray_for_mars-1.12.1-cp38-cp38-manylinux2014_x86_64.whl/ray_for_mars-1.12.1.data/purelib/ray/tune/examples/tune_mnist_keras.py
|
import argparse
import os
from filelock import FileLock
from tensorflow.keras.datasets import mnist
import ray
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.integration.keras import TuneReportCallback
def train_mnist(config):
# https://github.com/tensorflow/tensorflow/issues/32159
import tensorflow as tf
batch_size = 128
num_classes = 10
epochs = 12
with FileLock(os.path.expanduser("~/.data.lock")):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(config["hidden"], activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(num_classes, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.SGD(lr=config["lr"], momentum=config["momentum"]),
metrics=["accuracy"],
)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=0,
validation_data=(x_test, y_test),
callbacks=[TuneReportCallback({"mean_accuracy": "accuracy"})],
)
def tune_mnist(num_training_iterations):
sched = AsyncHyperBandScheduler(
time_attr="training_iteration", max_t=400, grace_period=20
)
analysis = tune.run(
train_mnist,
name="exp",
scheduler=sched,
metric="mean_accuracy",
mode="max",
stop={"mean_accuracy": 0.99, "training_iteration": num_training_iterations},
num_samples=10,
resources_per_trial={"cpu": 2, "gpu": 0},
config={
"threads": 2,
"lr": tune.uniform(0.001, 0.1),
"momentum": tune.uniform(0.1, 0.9),
"hidden": tune.randint(32, 512),
},
)
print("Best hyperparameters found were: ", analysis.best_config)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing"
)
parser.add_argument(
"--server-address",
type=str,
default=None,
required=False,
help="The address of server to connect to if using Ray Client.",
)
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=4)
elif args.server_address:
ray.init(f"ray://{args.server_address}")
tune_mnist(num_training_iterations=5 if args.smoke_test else 300)
|
PypiClean
|
/ngcp-xbee-1.3.tar.gz/ngcp-xbee-1.3/examples/io/LocalADCSample/readme.txt
|
Introduction
------------
This sample Python application shows how to read XBee analog inputs of the
device attached to the serial/USB port of your PC.
The application configures an IO line of the XBee device as ADC. Then, it
periodically reads its value and prints it in the output console.
NOTE: This example uses the generic XBee device (XBeeDevice) class,
but it can be applied to any other local XBee device class.
Requirements
------------
To run this example you will need:
* One XBee radio in API mode and its corresponding carrier board (XBIB
or XBee Development Board).
* The XCTU application (available at www.digi.com/xctu).
Compatible protocols
--------------------
* 802.15.4
* Cellular
* Cellular NB-IoT
* DigiMesh
* Point-to-Multipoint
* Wi-Fi
* ZigBee
Example setup
-------------
1) Plug the XBee radio into the XBee adapter and connect it to your
computer's USB or serial port.
2) Ensure that the module is in API mode.
For further information on how to perform this task, read the
'Configuring Your XBee Modules' topic of the Getting Started guide.
3) Set the port and baud rate of the XBee radio in the sample file.
If you configured the module in the previous step with the XCTU, you
will see the port number and baud rate in the 'Port' label of the device
on the left view.
4) The final step is to connect a voltage variable source to the pin
configured as ADC (light sensor, temperature sensor, etc). For testing
purposes we recommend using a potentiometer. Depending on the carrier
board you are using you will need to follow a different set of
instructions to connect it:
- XBIB-U-DEV board:
* Isolate the pin configured as ADC so it does not use the
functionality provided by the board.
* Connect the potentiometer to VCC, to the pin configured as ADC
and to GND. Something similar to this:
O VCC
|
<
>___ XBee device pin (ADC)
>
<
_|_
- GND
* If you prefer not to isolate the pin of the board and not to use
a potentiometer, you can still test the example. The IO line
configured as ADC (DIO1/AD1) is connected to the SW3 user button
of the XBIB-U-DEV board, so the analog value will change from
nothing to all depending on the status of the button.
- XBee Development Board:
* Connect a voltage to VRef pin of the device (you can take it
from the Vcc pin).
* Configure the micro-switch of AD1 line to "Potentiometer", this
way the DIO1/AD1 line of the device will be connected to the
board's potentiometer
NOTE: It is recommended to verify the capabilities of the pins used
in the example as well as the electrical characteristics in the
product manual of your XBee Device to ensure that everything is
configured correctly.
Running the example
-------------------
First, build and launch the application.
To test the functionality, follow these steps:
1) Rotate the potentiometer.
2) Verify that the value displayed in the output console is changing.
|
PypiClean
|
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/network/get_bastion_host.py
|
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetBastionHostResult',
'AwaitableGetBastionHostResult',
'get_bastion_host',
'get_bastion_host_output',
]
@pulumi.output_type
class GetBastionHostResult:
"""
Bastion Host resource.
"""
def __init__(__self__, disable_copy_paste=None, dns_name=None, enable_file_copy=None, enable_ip_connect=None, enable_kerberos=None, enable_shareable_link=None, enable_tunneling=None, etag=None, id=None, ip_configurations=None, location=None, name=None, provisioning_state=None, scale_units=None, sku=None, tags=None, type=None):
if disable_copy_paste and not isinstance(disable_copy_paste, bool):
raise TypeError("Expected argument 'disable_copy_paste' to be a bool")
pulumi.set(__self__, "disable_copy_paste", disable_copy_paste)
if dns_name and not isinstance(dns_name, str):
raise TypeError("Expected argument 'dns_name' to be a str")
pulumi.set(__self__, "dns_name", dns_name)
if enable_file_copy and not isinstance(enable_file_copy, bool):
raise TypeError("Expected argument 'enable_file_copy' to be a bool")
pulumi.set(__self__, "enable_file_copy", enable_file_copy)
if enable_ip_connect and not isinstance(enable_ip_connect, bool):
raise TypeError("Expected argument 'enable_ip_connect' to be a bool")
pulumi.set(__self__, "enable_ip_connect", enable_ip_connect)
if enable_kerberos and not isinstance(enable_kerberos, bool):
raise TypeError("Expected argument 'enable_kerberos' to be a bool")
pulumi.set(__self__, "enable_kerberos", enable_kerberos)
if enable_shareable_link and not isinstance(enable_shareable_link, bool):
raise TypeError("Expected argument 'enable_shareable_link' to be a bool")
pulumi.set(__self__, "enable_shareable_link", enable_shareable_link)
if enable_tunneling and not isinstance(enable_tunneling, bool):
raise TypeError("Expected argument 'enable_tunneling' to be a bool")
pulumi.set(__self__, "enable_tunneling", enable_tunneling)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_configurations and not isinstance(ip_configurations, list):
raise TypeError("Expected argument 'ip_configurations' to be a list")
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if scale_units and not isinstance(scale_units, int):
raise TypeError("Expected argument 'scale_units' to be a int")
pulumi.set(__self__, "scale_units", scale_units)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="disableCopyPaste")
def disable_copy_paste(self) -> Optional[bool]:
"""
Enable/Disable Copy/Paste feature of the Bastion Host resource.
"""
return pulumi.get(self, "disable_copy_paste")
@property
@pulumi.getter(name="dnsName")
def dns_name(self) -> Optional[str]:
"""
FQDN for the endpoint on which bastion host is accessible.
"""
return pulumi.get(self, "dns_name")
@property
@pulumi.getter(name="enableFileCopy")
def enable_file_copy(self) -> Optional[bool]:
"""
Enable/Disable File Copy feature of the Bastion Host resource.
"""
return pulumi.get(self, "enable_file_copy")
@property
@pulumi.getter(name="enableIpConnect")
def enable_ip_connect(self) -> Optional[bool]:
"""
Enable/Disable IP Connect feature of the Bastion Host resource.
"""
return pulumi.get(self, "enable_ip_connect")
@property
@pulumi.getter(name="enableKerberos")
def enable_kerberos(self) -> Optional[bool]:
"""
Enable/Disable Kerberos feature of the Bastion Host resource.
"""
return pulumi.get(self, "enable_kerberos")
@property
@pulumi.getter(name="enableShareableLink")
def enable_shareable_link(self) -> Optional[bool]:
"""
Enable/Disable Shareable Link of the Bastion Host resource.
"""
return pulumi.get(self, "enable_shareable_link")
@property
@pulumi.getter(name="enableTunneling")
def enable_tunneling(self) -> Optional[bool]:
"""
Enable/Disable Tunneling feature of the Bastion Host resource.
"""
return pulumi.get(self, "enable_tunneling")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.BastionHostIPConfigurationResponse']]:
"""
IP configuration of the Bastion Host resource.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the bastion host resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="scaleUnits")
def scale_units(self) -> Optional[int]:
"""
The scale units for the Bastion Host resource.
"""
return pulumi.get(self, "scale_units")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of this Bastion Host.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetBastionHostResult(GetBastionHostResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBastionHostResult(
disable_copy_paste=self.disable_copy_paste,
dns_name=self.dns_name,
enable_file_copy=self.enable_file_copy,
enable_ip_connect=self.enable_ip_connect,
enable_kerberos=self.enable_kerberos,
enable_shareable_link=self.enable_shareable_link,
enable_tunneling=self.enable_tunneling,
etag=self.etag,
id=self.id,
ip_configurations=self.ip_configurations,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
scale_units=self.scale_units,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_bastion_host(bastion_host_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBastionHostResult:
"""
Gets the specified Bastion Host.
Azure REST API version: 2023-02-01.
:param str bastion_host_name: The name of the Bastion Host.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['bastionHostName'] = bastion_host_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network:getBastionHost', __args__, opts=opts, typ=GetBastionHostResult).value
return AwaitableGetBastionHostResult(
disable_copy_paste=pulumi.get(__ret__, 'disable_copy_paste'),
dns_name=pulumi.get(__ret__, 'dns_name'),
enable_file_copy=pulumi.get(__ret__, 'enable_file_copy'),
enable_ip_connect=pulumi.get(__ret__, 'enable_ip_connect'),
enable_kerberos=pulumi.get(__ret__, 'enable_kerberos'),
enable_shareable_link=pulumi.get(__ret__, 'enable_shareable_link'),
enable_tunneling=pulumi.get(__ret__, 'enable_tunneling'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
ip_configurations=pulumi.get(__ret__, 'ip_configurations'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
scale_units=pulumi.get(__ret__, 'scale_units'),
sku=pulumi.get(__ret__, 'sku'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_bastion_host)
def get_bastion_host_output(bastion_host_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBastionHostResult]:
"""
Gets the specified Bastion Host.
Azure REST API version: 2023-02-01.
:param str bastion_host_name: The name of the Bastion Host.
:param str resource_group_name: The name of the resource group.
"""
...
|
PypiClean
|
/Products.SilvaKupu-1.4.2.tar.gz/Products.SilvaKupu-1.4.2/Products/kupu/doc/reference/readme.txt
|
This is designed to be more of a reference than the procedural
documentation in the folder above.
It's currently only just beginning, so please feel free to submit
patches, comments and suggestions to [email protected].
The files are as follows:
errors.txt
- A list of the errors commonly emitted by Kupu and the common ways
to solve them.
glossary.txt
- A glossary of the terms you're likely to encounter when dealing
with Kupu.
kupuconfig.txt
- This is a reference for the various sections that can be set in
the <kupuconfig> xml block at the top of your editing template.
|
PypiClean
|
/jk_json-0.2023.2.6.tar.gz/jk_json-0.2023.2.6/jk_jsonschema/SimpleSchemaGenerator.py
|
import typing
import re
import random
import jk_json
import jk_utils
from .re import compactVerboseRegEx
def _isType(x) -> bool:
return type is type(x)
#
_compileSingleType = None
class AbstractGenerator(object):
#
# @param list _xParentalRequiredList The JSON list that holds the names of those properties that are required. This list is defined at the parent!
#
def __init__(self, _xParent, _xName:str, _xSchema:dict, _xParentalRequiredList:list):
self._schema = _xSchema
self._parent = _xParent
self._name = _xName
self._requiredList = _xParentalRequiredList
self._definitionID = None # if this is a definition, this variable holds the name of the definition
#
#
# Returns `true` if this is a definition. `false` indicates this is a regular schema component.
#
def isDefinition(self) -> bool:
return bool(self._definitionID)
#
def required(self):
if self._requiredList:
if self._name not in self._requiredList:
self._requiredList.append(self._name)
return self
else:
raise Exception("This is not a value key that could be set to optional/required.")
#
def notRequired(self):
if self._requiredList:
if self._name in self._requiredList:
self._requiredList.remove(self._name)
return self
else:
raise Exception("This is not a value key that could be set to optional/required.")
#
def __str__(self):
return jk_json.dumps(self._schema)
#
def __repr__(self):
return jk_json.dumps(self._schema)
#
@property
def schema(self):
return self._schema
#
def __enter__(self):
return self
#
def __exit__(self, etype, value, traceback):
return False
#
#
class BooleanGenerator(AbstractGenerator):
def __init__(self, _xParent, _xName:str, _xSchema:dict, _xParentalRequiredList:list):
super().__init__(_xParent, _xName, _xSchema, _xParentalRequiredList)
#
#
class IntegerGenerator(AbstractGenerator):
def __init__(self, _xParent, _xName:str, _xSchema:dict, _xParentalRequiredList:list):
super().__init__(_xParent, _xName, _xSchema, _xParentalRequiredList)
#
def minimum(self, minimum:int):
if minimum is None:
if "minimum" in self._schema:
self._schema.remove("minimum")
else:
self._schema["minimum"] = minimum
return self
#
def exclusiveMinimum(self, minimum:int):
if minimum is None:
if "exclusiveMinimum" in self._schema:
self._schema.remove("exclusiveMinimum")
else:
self._schema["exclusiveMinimum"] = minimum
return self
#
def maximum(self, maximum:int):
if maximum is None:
if "maximum" in self._schema:
self._schema.remove("maximum")
else:
self._schema["maximum"] = maximum
return self
#
def exclusiveMaximum(self, maximum:int):
if maximum is None:
if "exclusiveMaximum" in self._schema:
self._schema.remove("exclusiveMaximum")
else:
self._schema["exclusiveMaximum"] = maximum
return self
#
def allowedValues(self, allowedValues:list):
if allowedValues is None:
if "enum" in self._schema:
self._schema.remove("enum")
else:
self._schema["enum"] = allowedValues
return self
#
#
class FloatGenerator(AbstractGenerator):
def __init__(self, _xParent, _xName:str, _xSchema:dict, _xParentalRequiredList:list):
super().__init__(_xParent, _xName, _xSchema, _xParentalRequiredList)
#
def minimum(self, minimum:float):
if minimum is None:
if "minimum" in self._schema:
self._schema.remove("minimum")
else:
self._schema["minimum"] = minimum
return self
#
def exclusiveMinimum(self, minimum:float):
if minimum is None:
if "exclusiveMinimum" in self._schema:
self._schema.remove("exclusiveMinimum")
else:
self._schema["exclusiveMinimum"] = minimum
return self
#
def maximum(self, maximum:float):
if maximum is None:
if "maximum" in self._schema:
self._schema.remove("maximum")
else:
self._schema["maximum"] = maximum
return self
#
def exclusiveMaximum(self, maximum:float):
if maximum is None:
if "exclusiveMaximum" in self._schema:
self._schema.remove("exclusiveMaximum")
else:
self._schema["exclusiveMaximum"] = maximum
return self
#
def allowedValues(self, allowedValues:list):
if allowedValues is None:
if "enum" in self._schema:
self._schema.remove("enum")
else:
self._schema["enum"] = allowedValues
return self
#
#
class StringGenerator(AbstractGenerator):
def __init__(self, _xParent, _xName:str, _xSchema:dict, _xParentalRequiredList:list):
super().__init__(_xParent, _xName, _xSchema, _xParentalRequiredList)
#
def minLength(self, minLength:int):
if minLength is None:
if "minLength" in self._schema:
self._schema.remove("minLength")
else:
self._schema["minLength"] = minLength
return self
#
def maxLength(self, maxLength:int):
if maxLength is None:
if "maxLength" in self._schema:
self._schema.remove("maxLength")
else:
self._schema["maxLength"] = maxLength
return self
#
def regexPattern(self, regexPattern:str):
if regexPattern is None:
if "enum" in self._schema:
self._schema.remove("enum")
else:
if len(regexPattern) != len(regexPattern.strip()):
raise Exception("Invalid pattern!")
self._schema["pattern"] = regexPattern
return self
#
def regexPatternVerbose(self, regexPattern:str):
if regexPattern is None:
if "enum" in self._schema:
self._schema.remove("enum")
else:
regexPattern = compactVerboseRegEx(regexPattern)
if len(regexPattern) != len(regexPattern.strip()):
raise Exception("Invalid pattern!")
self._schema["pattern"] = regexPattern
return self
#
def allowedValues(self, allowedValues:list):
if allowedValues is None:
if "enum" in self._schema:
self._schema.remove("enum")
else:
self._schema["enum"] = allowedValues
return self
#
#
class ListGenerator(AbstractGenerator):
#
# @param AbtractGenerator[] The generator or generators the list elements can be of
#
def __init__(self, _xParent, _xName:str, _xSchema:dict, _xParentalRequiredList:list, subGenList:list):
super().__init__(_xParent, _xName, _xSchema, _xParentalRequiredList)
assert isinstance(subGenList, (list, tuple))
for subGen in subGenList:
isinstance(subGen, AbstractGenerator)
self.__subGenList = subGenList
#
@property
def dataType(self) -> AbstractGenerator:
if len(self.__subGenList) == 1:
return self.__subGenList[0]
else:
raise Exception("There are multiple types defined!")
#
def minLength(self, minLength:int):
if minLength is None:
if "minItems" in self._schema:
self._schema.remove("minItems")
else:
self._schema["minItems"] = minLength
return self
#
def maxLength(self, maxLength:int):
if maxLength is None:
if "maxItems" in self._schema:
self._schema.remove("maxItems")
else:
self._schema["maxItems"] = maxLength
return self
#
def allowedValues(self, allowedValues:list):
# TODO: The intention is to restrict the allowed values to certain values. Does this implementation here conform to the JSON schema specification?
if allowedValues is None:
if "enum" in self._schema:
self._schema.remove("enum")
else:
self._schema["enum"] = allowedValues
return self
#
#
class ObjectGenerator(AbstractGenerator):
def __init__(self, _xParent, _xName:str, _xSchema:dict, _xParentalRequiredList:list):
super().__init__(_xParent, _xName, _xSchema, _xParentalRequiredList)
if _xSchema is None:
_xSchema = {
"type": [ "object" ]
}
if not "properties" in _xSchema:
_xSchema["properties"] = {}
if not "required" in _xSchema:
_xSchema["required"] = []
self._schema = _xSchema
#
def objectValue(self, name:str, bRequired:bool = True):
if bRequired:
self._schema["required"].append(name)
subSchema = {
"type": [ "object" ]
}
self._schema["properties"][name] = subSchema
return ObjectGenerator(self, None, subSchema, None)
#
def intValue(self, name:str, bRequired:bool = True) -> IntegerGenerator:
if bRequired:
self._schema["required"].append(name)
subSchema = {
"type": [ "integer" ]
}
self._schema["properties"][name] = subSchema
return IntegerGenerator(self, name, subSchema, self._schema["required"])
#
def floatValue(self, name:str, bRequired:bool = True) -> FloatGenerator:
if bRequired:
self._schema["required"].append(name)
subSchema = {
"type": [ "number" ]
}
self._schema["properties"][name] = subSchema
return FloatGenerator(self, name, subSchema, self._schema["required"])
#
def boolValue(self, name:str, bRequired:bool = True) -> BooleanGenerator:
if bRequired:
self._schema["required"].append(name)
subSchema = {
"type": [ "boolean" ]
}
self._schema["properties"][name] = subSchema
return BooleanGenerator(self, name, subSchema, self._schema["required"])
#
def strValue(self, name:str, bRequired:bool = True) -> StringGenerator:
if bRequired:
self._schema["required"].append(name)
subSchema = {
"type": [ "string" ]
}
self._schema["properties"][name] = subSchema
return StringGenerator(self, name, subSchema, self._schema["required"])
#
#
# A property should be of type "array".
#
# @param str name The name of the property.
# @param type|AbstractGenerator listType The type of the property values. (All property values must be of exactly this single type specified here.)
# @param bool bRequired This property is either optional or required.
#
def listValue(self, name:str, listType:typing.Union[type,AbstractGenerator], bRequired:bool = True) -> ListGenerator:
if bRequired:
self._schema["required"].append(name)
# ----
subTypeSchema, subGens = _compileListType(self, listType)
# ----
subSchema = {
"type": [ "array" ],
"items": subTypeSchema,
}
self._schema["properties"][name] = subSchema
return ListGenerator(self, name, subSchema, self._schema["required"], subGens)
#
#
_SUB_TYPE_CLASSES_BY_TYPE = {
bool: (BooleanGenerator, "boolean"),
int: (IntegerGenerator, "integer"),
float: (FloatGenerator, "number"),
str: (StringGenerator, "string"),
}
_SUB_TYPE_CLASSES_BY_STR = {
"bool": (BooleanGenerator, "boolean"),
"boolean": (BooleanGenerator, "boolean"),
"int": (IntegerGenerator, "integer"),
"integer": (IntegerGenerator, "integer"),
"float": (FloatGenerator, "number"),
"number": (FloatGenerator, "number"),
"str": (StringGenerator, "string"),
"string": (StringGenerator, "string"),
}
def _compileSingleType(parent, listType:typing.Union[type,str,"AbstractGenerator"]) -> tuple:
if isinstance(listType, AbstractGenerator):
if not listType.isDefinition:
raise Exception("The specified list element type is not a definition!")
subTypeSchema = dict(listType.schema)
subGen = listType
elif isinstance(listType, str):
if listType in _SUB_TYPE_CLASSES_BY_STR:
genClazz, jsType = _SUB_TYPE_CLASSES_BY_STR[listType]
subTypeSchema = {
"type": jsType
}
subGen = genClazz(parent, None, subTypeSchema, None)
else:
raise Exception("Invalid list element type specified: " + str(listType))
elif _isType(listType):
if listType in _SUB_TYPE_CLASSES_BY_TYPE:
genClazz, jsType = _SUB_TYPE_CLASSES_BY_TYPE[listType]
subTypeSchema = {
"type": jsType
}
subGen = genClazz(parent, None, subTypeSchema, None)
else:
raise Exception("Invalid list element type specified: " + str(listType))
else:
raise Exception("Invalid list element type specified: " + str(listType))
return subTypeSchema, subGen
#
def _compileListType(parent, listType:typing.Union[type,"AbstractGenerator"]) -> tuple:
subTypeSchema, subGen = _compileSingleType(parent, listType)
return subTypeSchema, [ subGen ]
#
class _Generator(object):
def __init__(self):
self.__schema = None
self.__rng = random.Random()
self.__defs = {}
#
def __generateID(self, existingIDs):
CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
while True:
randID = "".join([ self.__rng.choice(CHARS) for i in range(0, 8) ])
if randID not in existingIDs:
return randID
#
def __str__(self):
return jk_json.dumps(self.__schema)
#
def __repr__(self):
return jk_json.dumps(self.__schema)
#
def objectValue(self) -> ObjectGenerator:
if self.__schema is not None:
raise Exception("This generator already provides a schema!")
# ----
ret = ObjectGenerator(None, None, None, None)
self.__schema = ret._schema
self.__schema["$schema"] = "http://json-schema.org/draft-04/schema#"
return ret
#
#
# A property should be of type "array".
#
# @param str name The name of the property.
# @param type|AbstractGenerator listType The type of the property values. (All property values must be of exactly this single type specified here.)
# @param bool bRequired This property is either optional or required.
#
def listValue(self, listType:typing.Union[type,AbstractGenerator], bRequired:bool = True) -> ListGenerator:
if self.__schema is not None:
raise Exception("This generator already provides a schema!")
# ----
subTypeSchema, subGens = _compileListType(self, listType)
# ----
subSchema = {
"type": [ "array" ],
"items": subTypeSchema,
}
ret = ListGenerator(self, None, subSchema, None, subGens)
self.__schema = ret._schema
self.__schema["$schema"] = "http://json-schema.org/draft-04/schema#"
return ret
#
@property
def schema(self):
ret = dict(self.__schema)
return ret
#
def __enter__(self):
return self
#
def __exit__(self, etype, value, traceback):
return False
#
#
# Invoke this method to define an object schema that can be used as a component in other definitions later.
#
def defineObject(self, name:str = None) -> ObjectGenerator:
if name is None:
name = self.__generateID(list(self.__defs.keys()))
else:
assert isinstance(name, str)
assert re.match("^[a-zA-Z]+$", name)
assert name not in self.__defs
ret = ObjectGenerator(None, None, None, None)
ret._definitionID = name
self.__defs[name] = ret
return ret
#
#
def createObjectSchemaGenerator() -> ObjectGenerator:
return ObjectGenerator(None, None, None, None)
#
def createSchemaGenerator():
return _Generator()
#
|
PypiClean
|
/Flask_Telebot-1.0.0-py3-none-any.whl/flask_telebot.py
|
import logging
import os
import sys
import time
import telebot
from flask import current_app, request, abort, Blueprint
from pyngrok import ngrok
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
logger = logging.getLogger(__name__)
telebot.apihelper.ENABLE_MIDDLEWARE = True
prefix = os.path.basename(__file__)[:-3].lower()
blueprint = Blueprint(prefix, prefix, url_prefix='/{}'.format(prefix))
class FlaskTelebot(object):
_public_url = None
_bot = None
_bot_info = None
_message_handler = []
def __init__(self, token, app=None):
self._bot = telebot.TeleBot(token=token, threaded=False, skip_pending=True)
self.app = app
if app is not None:
self.init_app(app)
def _init_route(self):
@blueprint.route('/', methods=['GET'])
def debug():
return self.bot_info.to_dict(), 200
@blueprint.route('/webhook', methods=['POST'])
def webhook():
if request.headers.get('content-type') == 'application/json':
json_string = request.get_data().decode('utf-8')
update = telebot.types.Update.de_json(json_string)
self.bot.process_new_updates([update])
return ''
else:
abort(403)
@property
def public_url(self) -> str:
return self._public_url
def init_app(self, app):
app.extensions[prefix] = self
if app.config.get('NGROK_TOKEN'):
ngrok.set_auth_token(current_app.config.get('NGROK_TOKEN'))
command_line = ' '.join(sys.argv)
is_running_server = ('flask run' in command_line) or ('wsgi' in command_line)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown)
if not is_running_server:
return
elif self.public_url:
return
self._init_route()
app.register_blueprint(blueprint)
port = os.getenv('FLASK_RUN_PORT', 9000)
if not port.__str__().isdigit():
raise KeyError('Error: Invalid Port')
try:
ngrok.kill()
self._public_url = ngrok.connect(port).public_url
except Exception as error:
logging.error(error.__str__())
finally:
if self.public_url:
self.bot.remove_webhook()
time.sleep(1.2)
webhook = '{}/{}/webhook'.format(self.public_url.replace('http://', 'https://'), prefix)
print(
' * ngrok tunnel "{}" -> "http://127.0.0.1:{}/"'.format(
self.public_url, port
)
)
print(
' * bot webhook "{}"'.format(webhook)
)
self.bot.set_webhook(url=webhook)
def teardown(self, exception):
ctx = stack.top
def connect(self):
return self.bot
@property
def bot(self):
return self._bot
@property
def bot_info(self):
if not self._bot_info:
self._bot_info = self.bot.get_me()
return self._bot_info
@property
def telebot(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'telebot'):
ctx.telebot = self.bot
return ctx.telebot
|
PypiClean
|
/nemesispy-0.0.10.tar.gz/nemesispy-0.0.10/README.md
|
This library contains routines for simulating and fitting
exoplanet emission spectra at arbitrary orbital phase,
thereby constraining the thermal structure and chemical
abundance of exoplanet atmospheres. It is also capable
of fitting emission spectra at multiple orbital phases
(phase curves) at the same time. This package
comes ready with some spectral data and General Circulation
Model (GCM) )data so you could start simulating spectra
straight away. There are a few demonstration routines in
the `nemesispy` folder; in particular `demo_fit_eclipse.py`
contains an interactive plot routine which allows you
to fit a hot Jupiter eclipse spectrum by hand by varying
its chemical abundance and temperature profile. This package
can be easily integrated with a Bayesian sampler, in particular
`MultiNest` for a full spectral retrieval.
The radiative transfer calculations are done with the
correlated-k approximation, and are accelerated with the
`numba` just-in-time compiler to match the speed of
compiled languages such as Fortran. The radiative transfer
routines are based on the well-tested [Nemesis](https://github.com/nemesiscode) library developed
by Patrick Irwin (University of Oxford) and collaborators.
This package has the following advantageous features:
* Highly portable and customisable compared
to packages written in compiled languages, and
can be easily installed on computer clusters.
* Fast calculation speed due to just-in-time
compilation, which compiles Python code to machine
code at run time.
* Radiative transfer routines are benchmarked against
the extensively used [Nemesis](https://github.com/nemesiscode) library.
* Contains interactive plotting routines that allows you
to visualise the impact of gas abundance and thermal
structure on the emission spectra.
* Contains routine to simulate spectra from General
Circulation Models (GCMs).
* Contains unit tests so that you could check if the
the code is working properly after your modifications.
|
PypiClean
|
/aws-spot-fleet-helper-0.2.0.tar.gz/aws-spot-fleet-helper-0.2.0/README.md
|
# AWS Spot Fleet Helper #
### Python library to help launching a fleet of Spot instances within AWS infrastructure ###
Functionality is also available via CLI:
```
$ spot_fleet_config.py --help
usage: spot_fleet_config.py [-h] -bid-value BID_VALUE -ssh-key-name
SSH_KEY_NAME -ami-id AMI_ID -iam-role IAM_ROLE
-instance-type INSTANCE_TYPE [INSTANCE_TYPE ...]
-security-group SECURITY_GROUP
[SECURITY_GROUP ...] -subnet-id SUBNET_ID
[SUBNET_ID ...]
[--assign-public-ip ASSIGN_PUBLIC_IP]
[--fleet-role FLEET_ROLE] [--user-data USER_DATA]
account-id
Tool to launch a fleet of Spot instances within AWS infrastructure
positional arguments:
account-id AWS account id
optional arguments:
-h, --help show this help message and exit
-bid-value BID_VALUE Maximum bid value per VCPU in USD
-ssh-key-name SSH_KEY_NAME
SSH key name to be used
-ami-id AMI_ID Amazon Machine Image id to deploy
-iam-role IAM_ROLE Instance IAM role
-instance-type INSTANCE_TYPE [INSTANCE_TYPE ...]
Instance types to deploy (ex: c3.4xlarge, m3.medium)
-security-group SECURITY_GROUP [SECURITY_GROUP ...]
Security Group ids to deploy
-subnet-id SUBNET_ID [SUBNET_ID ...]
Subnet ids to deploy
--assign-public-ip ASSIGN_PUBLIC_IP
Assign public ip to launched instances
--fleet-role FLEET_ROLE
IAM role used to deploy assets (default:
aws_spot_fleet_helper-ec2-spot-fleet-role)
--user-data USER_DATA
User data to be included in instance launch
configuration. File name or "-" for reading from stdin
```
|
PypiClean
|
/kiosk-multiweb-1.3.3.tar.gz/kiosk-multiweb-1.3.3/multiweb/applications/manager.py
|
import os
from urllib.parse import urlparse
from twisted import logger
from twisted.internet.defer import inlineCallbacks
from ebs.linuxnode.core.constants import ASSET
class MultiWebApplication(object):
def __init__(self, parent, spec):
self._parent = parent
self._spec = spec
@property
def parent(self):
return self._parent
@property
def actual(self):
return self.parent.actual
@property
def id(self):
return self._spec['id']
@property
def name(self):
return self._spec['name']
@property
def description(self):
return self._spec['description'] or ''
@property
def url(self):
return self._spec['link']
@property
def whitelist_urls(self):
return self._spec['whitelist_urls']
@property
def blacklist_urls(self):
return self._spec['blacklist_urls']
@property
def avatar_url(self):
return self._spec['avatar']['large']
@property
def avatar_filename(self):
return os.path.basename(urlparse(self.avatar_url).path)
@property
def avatar(self):
return self.actual.resource_manager.get(self.avatar_filename).filepath
@property
def status(self):
return self._spec['status']
@property
def order_no(self):
return self._spec['order_no']
@inlineCallbacks
def install(self):
self.actual.resource_manager.insert(
self.avatar_filename, url=self.avatar_url, rtype=ASSET)
r = self.actual.resource_manager.get(self.avatar_filename)
yield self.actual.resource_manager.prefetch(r)
def uninstall(self):
fname = os.path.basename(urlparse(self.avatar_url).path)
self.actual.resource_manager.remove(fname)
class MultiWebApplicationsManager(object):
def __init__(self, actual, *args, **kwargs):
self._log = None
self._actual = actual
self._applications = []
@property
def actual(self):
return self._actual
@property
def log(self):
if not self._log:
self._log = logger.Logger(namespace="multiweb.manager", source=self)
return self._log
@inlineCallbacks
def update(self, applications):
self.log.info("Loading Applications Response with {n} Entries",
n=len(applications))
self._applications = []
for application in applications:
app = MultiWebApplication(self, application)
yield app.install()
self._applications.append(app)
self._applications.sort(key=lambda x: x.order_no)
@property
def applications(self):
return [app for app in self._applications if app.status == 'ENABLE']
|
PypiClean
|
/sage-conf-10.0b0.tar.gz/sage-conf-10.0b0/sage_root/build/pkgs/elliptic_curves/SPKG.rst
|
elliptic_curves: Databases of elliptic curves
=============================================
Description
-----------
Includes two databases:
- A small subset of John Cremona's database of elliptic curves up
to conductor 10000.
- William Stein's database of interesting curves
Upstream Contact
----------------
cremona_mini
~~~~~~~~~~~~
- Author: John Cremona
- Email: [email protected]
- Website: http://johncremona.github.io/ecdata/
ellcurves
~~~~~~~~~
- Author: William Stein
- Email: [email protected]
|
PypiClean
|
/ee_satellites-0.0.11-py3-none-any.whl/ee_satellites.py
|
import ee, pandas as pd, concurrent.futures, sentinel1, sentinel2, landsat8
''' GATHER ALL MEAN INDICES FOR A FIELD, FOR EACH TIME A SATELLITE CONSTELLATION (SENTINEL 1, SENTINEL 2 or LANDSAT 8)
CROSSED THE FIELD, WITHIN A SPECIFIC TIME SPAN '''
def process_chunk(s_filtered, chunk, polygon, field_col_name, field_name, satellite):
'''
Processes a chunk of acquisition dates for a given polygon and returns a list of acquisitions.
Parameters:
s_filtered (ee.ImageCollection): A data object with filtered and pre-processed Sentinel-1, Sentinel-2 or Landsat-8 images.
chunk (pd.Timestamp list): A list of acquisition dates to process.
polygon (ee.Geometry): The polygon of interest.
field_col_name (str): The name of the "field_name" column inside the original DataFrame.
field_name (str): The name of the field.
satellite (str): The satellite name ("sentinel-1", "sentinel-2" or "landsat-8").
Returns:
acquisitions (list): A list of dictionaries, where each dictionary represents an acquisition and contains the
acquisition date and associated features.
'''
acquisitions = []
for date in chunk:
if (satellite == 'sentinel-1'):
polarizations_means = sentinel1.get_all_polarizations(s_filtered, date, polygon)
ave = sentinel1.calculate_simple_index(polarizations_means, 'AVE')
dif = sentinel1.calculate_simple_index(polarizations_means, 'DIF')
rat1 = sentinel1.calculate_simple_index(polarizations_means, 'RAT1')
rat2 = sentinel1.calculate_simple_index(polarizations_means, 'RAT2')
ndi = sentinel1.calculate_normalized_difference_index(polarizations_means)
rvi = sentinel1.calculate_radar_vegetation_index(polarizations_means)
# Create a dataframe row for the date
df_acquisition= {str(field_col_name): field_name, 's1_acquisition_date': date,
'VV': polarizations_means['VV'], 'VH': polarizations_means['VH'],
'AVE': ave, 'DIF': dif, 'RAT1': rat1, 'RAT2': rat2,
'NDI': ndi, 'RVI': rvi}
elif (satellite == 'sentinel-2'):
# Calculate standard bands
bands_means = sentinel2.get_all_bands(s_filtered, date, polygon)
# Calculate vegetation indexes
ndvi = sentinel2.calculate_vegetation_index(bands_means, 'ND')
nsndvi = sentinel2.calculate_vegetation_index(bands_means, 'NSND')
gndvi = sentinel2.calculate_vegetation_index(bands_means, 'GND')
rendvi1 = sentinel2.calculate_vegetation_index(bands_means, 'REND', 1)
rendvi2 = sentinel2.calculate_vegetation_index(bands_means, 'REND', 2)
rendvi3 = sentinel2.calculate_vegetation_index(bands_means, 'REND', 3)
grndvi = sentinel2.calculate_vegetation_index(bands_means, 'GRND')
gbndvi = sentinel2.calculate_vegetation_index(bands_means, 'GBND')
savi = sentinel2.calculate_vegetation_index(bands_means, 'SA')
osavi = sentinel2.calculate_vegetation_index(bands_means, 'OSA')
msavi = sentinel2.calculate_vegetation_index(bands_means, 'MSA')
tsavi = sentinel2.calculate_vegetation_index(bands_means, 'TSA')
atsavi = sentinel2.calculate_vegetation_index(bands_means, 'ATSA')
rvi = sentinel2.calculate_vegetation_index(bands_means, 'R')
dvi = sentinel2.calculate_vegetation_index(bands_means, 'D')
cvi = sentinel2.calculate_vegetation_index(bands_means, 'C')
ctvi = sentinel2.calculate_vegetation_index(bands_means, 'CT')
avi = sentinel2.calculate_vegetation_index(bands_means, 'A')
arvi1 = sentinel2.calculate_vegetation_index(bands_means, 'AR', 1)
arvi2 = sentinel2.calculate_vegetation_index(bands_means, 'AR', 2)
evi1 = sentinel2.calculate_vegetation_index(bands_means, 'E', 1)
evi2 = sentinel2.calculate_vegetation_index(bands_means, 'E', 2)
evi3 = sentinel2.calculate_vegetation_index(bands_means, 'E', 3)
wdrvi = sentinel2.calculate_vegetation_index(bands_means, 'WDR')
mtvi1 = sentinel2.calculate_vegetation_index(bands_means, 'MT', 1)
mtvi2 = sentinel2.calculate_vegetation_index(bands_means, 'MT', 2)
# Calculate exogenous organic matter indexes
eomi1 = sentinel2.calculate_exogenous_organic_matter_index(bands_means, 1)
eomi2 = sentinel2.calculate_exogenous_organic_matter_index(bands_means, 2)
eomi3 = sentinel2.calculate_exogenous_organic_matter_index(bands_means, 3)
eomi4 = sentinel2.calculate_exogenous_organic_matter_index(bands_means, 4)
# Calculate normalized burn ratio indexes
nbr = sentinel2.calculate_normalized_burn_ratio(bands_means)
nbr2 = sentinel2.calculate_normalized_burn_ratio(bands_means, 2)
# Calculate clorophyl indexes
ci1 = sentinel2.calculate_chlorophyll_index(bands_means, 1)
ci2 = sentinel2.calculate_chlorophyll_index(bands_means, 2)
ci3 = sentinel2.calculate_chlorophyll_index(bands_means, 3)
# Calculate green coverage index
gci = sentinel2.calculate_green_coverage_index(bands_means)
# Calculate soil composition index
sci = sentinel2.calculate_soil_composition_index(bands_means)
# Calculate normalized difference red edge
ndre1 = sentinel2.calculate_normalized_difference_red_edge(bands_means, 1)
ndre2 = sentinel2.calculate_normalized_difference_red_edge(bands_means, 2)
ndre3 = sentinel2.calculate_normalized_difference_red_edge(bands_means, 3)
# Calculate chlorophyll absorption ratio index
cari1 = sentinel2.calculate_chlorophyll_absorption_ratio_index(bands_means, 1)
cari2 = sentinel2.calculate_chlorophyll_absorption_ratio_index(bands_means, 2)
# Calculate modified chlorophyll absorption in reflectance index
mcari = sentinel2.calculate_modified_chlorophyll_absorption_reflectance_index(bands_means)
mcari1 = sentinel2.calculate_modified_chlorophyll_absorption_reflectance_index(bands_means, 1)
mcari2 = sentinel2.calculate_modified_chlorophyll_absorption_reflectance_index(bands_means, 2)
# Calculate bare soil index
bsi = sentinel2.calculate_bare_soil_index(bands_means)
# Calculate green leaf index
gli = sentinel2.calculate_green_leaf_index(bands_means)
# Compute alteration index
alteration = sentinel2.calculate_alteration_index(bands_means)
# Compute SWIR Difference Index
sdi = sentinel2.calculate_swir_difference_index(bands_means)
# Create a dataframe row for the date
df_acquisition= {str(field_col_name): field_name, 's2_acquisition_date': date,
'B1': bands_means['B1'], 'B2': bands_means['B2'], 'B3': bands_means['B3'], 'B4': bands_means['B4'], 'B5': bands_means['B5'],
'B6': bands_means['B6'], 'B7': bands_means['B7'], 'B8': bands_means['B8'], 'B8A': bands_means['B8A'], 'B9': bands_means['B9'],
'B11': bands_means['B11'], 'B12': bands_means['B12'],
'NDVI': ndvi, 'NSNDVI': nsndvi, 'GNDVI': gndvi, 'RENDVI1': rendvi1, 'RENDVI2': rendvi2,
'RENDVI3': rendvi3, 'GRNDVI': grndvi, 'GBNDVI': gbndvi,
'SAVI': savi, 'OSAVI': osavi, 'MSAVI': msavi, 'TSAVI': tsavi, 'ATSAVI': atsavi,
'RVI': rvi, 'DVI': dvi, 'CVI': cvi, 'CTVI': ctvi, 'AVI': avi, 'ARVI1': arvi1, 'ARVI2': arvi2,
'EVI1': evi1, 'EVI2': evi2, 'EVI3': evi3, 'WDRVI': wdrvi, 'MTVI1': mtvi1, 'MTVI2': mtvi2,
'EOMI1': eomi1, 'EOMI2': eomi2, 'EOMI3': eomi3, 'EOMI4': eomi4,
'NBR': nbr, 'NBR2': nbr2,
'CI1': ci1, 'CI2': ci2, 'CI3': ci3,
'GCI': gci, 'SCI': sci, 'NDRE1': ndre1, 'NDRE2': ndre2, 'NDRE3': ndre3,
'CARI1': cari1, 'CARI2': cari2, 'MCARI': mcari, 'MCARI1': mcari1, 'MCARI2': mcari2,
'BSI': bsi, 'GLI': gli, 'ALTERATION': alteration, 'SDI': sdi}
if (satellite == 'landsat-8'):
bands_means = landsat8.get_all_bands(s_filtered, date, polygon)
# Create a dataframe row for the date
df_acquisition= {str(field_col_name): field_name, 'l8_acquisition_date': date,
'B1': bands_means['B1'], 'B2': bands_means['B2'], 'B3': bands_means['B3'],
'B4': bands_means['B4'], 'B5': bands_means['B5'], 'B6': bands_means['B6'],
'B7': bands_means['B7'], 'B8': bands_means['B8'], 'B9': bands_means['B9'],
'B10': bands_means['B10'], 'B11': bands_means['B11']}
# Add row to the list
acquisitions.append(df_acquisition)
return acquisitions
def parallelize_features_read(s_filtered, date_range, polygon, field_col_name, field_name, satellite, already_occupied_threads):
'''
Reads Sentinel-1, Sentinel-2 or Landsat-8 images and gathers spectral features.
Furthermore, it makes multiple I/O requests to GEE in parallel - otherwise to compute the same task would have taken too much
time. Each thread works on different date chunks, maintaining fixed the field.
Parameters:
s_filtered (ee.ImageCollection): A data object with filtered and pre-processed Sentinel-1, Sentinel-2 or Landsat-8 images.
date_range (pd.Timestamp list): A list of acquisition dates to process.
polygon (ee.Geometry): The polygon of interest.
field_col_name (str): The name of the "field_name" column inside the original DataFrame.
field_name (str): The name of the field.
satellite (str): The satellite name ("sentinel-1", "sentinel-2" or "landsat-8").
already_occupied_threads (int): The number of threads dedicated to parallelization over the field level.
Returns:
acquisitions (list): A list of dictionaries, where each dictionary represents an acquisition and contains the
acquisition date and associated features.
'''
# Split date range into multiple chunks
ThreadPoolExecutor = concurrent.futures.ThreadPoolExecutor()
num_chunks = ThreadPoolExecutor._max_workers - already_occupied_threads
chunk_size = len(date_range) // num_chunks
date_chunks = [date_range[i:i+chunk_size] for i in range(0, len(date_range), chunk_size)]
# Create threads to process date chunks in parallel
acquisitions = []
# Each thread works on a different chunks of dates to gather features
with concurrent.futures.ThreadPoolExecutor(num_chunks) as executor:
futures = []
for chunk in date_chunks:
futures.append(executor.submit(process_chunk, s_filtered, chunk, polygon, field_col_name, field_name, satellite))
# Wait for all futures to complete and gather results
for future in concurrent.futures.as_completed(futures):
acquisitions.extend(future.result())
return acquisitions
def process_field(field, start_date, end_date, satellite, filters_params, already_occupied_threads):
'''
Processes a single field by calculating the mean indices for each time the satellite passed over that field.
Args:
field (pandas Dataframe - row): The field item containing the field name and polygon coordinates.
start_date (str): The start date of the date range to filter the collection by.
end_date (str): The end date of the date range to filter the collection by.
satellite (str): The satellite name ("sentinel-1", "sentinel-2" or "landsat-8").
filters_params (list): The list of parameters to be used for filters parameters for extracting satellite Images Collections.
already_occupied_threads (int): The number of threads dedicated to parallelization over the field level.
Returns:
list: A list of dictionary objects containing the mean calculated indices for each acquisition date, for the
specified field (using the selected satellite).
'''
# Get the field name and polygon coordinates
field_name = field[0]
polygon = ee.Geometry.Polygon(field[1])
if (satellite == 'sentinel-1'):
# Filter Sentinel 1 collection
s_collection = ee.ImageCollection('COPERNICUS/S1_GRD')
s_filtered = s_collection.filterBounds(polygon).filterDate(str(start_date), str(end_date)) \
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VV')) \
.filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VH')) \
.filter(ee.Filter.eq('instrumentMode', 'IW')) \
.filter(ee.Filter.eq('orbitProperties_pass', filters_params[0])) #\
#.filter(ee.Filter.eq('relativeOrbitNumber_start', 66)) \
#.filterMetadata('resolution_meters', 'equals', 10)
elif (satellite == 'sentinel-2'):
# Filter Sentinel 2 collection
s_collection = ee.ImageCollection('COPERNICUS/S2_SR')
s_filtered = s_collection.filterBounds(polygon).filterDate(str(start_date), str(end_date)) \
.filter(ee.Filter.lte('CLOUDY_PIXEL_PERCENTAGE', int(filters_params[0])))
elif (satellite == 'landsat-8'):
# Filter Landsat 8 collection
s_collection = ee.ImageCollection('LANDSAT/LC08/C02/T1_TOA')
s_filtered = s_collection.filterBounds(polygon).filterDate(str(start_date), str(end_date))
# Get distinct dates from the satellite Image collection and put into the date_range list
s_dates = s_filtered.aggregate_array('system:time_start').map(lambda time_start: ee.Date(time_start).format('YYYY-MM-dd'))
s_dates_distinct = s_dates.distinct().sort()
date_range = pd.to_datetime(s_dates_distinct.getInfo())
return parallelize_features_read(s_filtered, date_range, polygon, field.index[0], field_name, satellite, already_occupied_threads)
def get_features(fields_df, start_date, end_date, satellite, filters_params=None, fields_threads=4):
'''
It allows to get from a pandas DataFrame composed of crop fields information, another DataFrame that contains
for each time a satellite (sentinel-1, sentinel-2 or landsat-8) passed on regions of interest, within a given time period,
all the mainly used features mean values (optical, radar or thermal).
Furthermore, it makes multiple I/O requests to Google Earth Engine (GEE) in parallel - otherwise to compute the same task
would have taken too much time. Each thread works on different date chunks, maintaining fixed the field (obviously if you
have only one field this part is not parallelized).
Please consider that parallelization of requests has been applied on two levels: on fields and on dates.
Args:
fields_df (pandas DataFrame): A DataFrame containing the crop field name and polygon coordinates for each field.
start_date (str): The start date of the date range to filter the collection by.
end_date (str): The end date of the date range to filter the collection by.
satellite (str): The satellite name ("sentinel-1": radar, "sentinel-2": optical, "landsat-8": optical + thermal).
filters_params (list - default `None`): The list of parameters values to be used for filters to extracting satellite Images Collections.
* For Sentinel-1:
* first parameter in the list rapresents the value of the 'orbitProperties_pass' filter ('ASCENDING' or 'DESCENDING')
* For Sentinel-2:
* first parameter in the list represents the value of the 'CLOUDY_PIXEL_PERCENTAGE' filter ('LTE' - values in range [0, 100])
fields_threads (int - default `4`): The number of threads to dedicate to parallelization of requests over the fields level, the remaining part
instead is used to apply parallelization over dates level. The value of this parameter should be high (with respect
to the overall number of threads exploitable) if you have a lot of crop fields but a little time-span to consider, whereas
if you have fewer fields but a bigger time-span you should decrease this parameter.
Finally, if you have lot of fields with lot of dates to process it should may be optimal considering half of the
overall number of threads available.
Returns:
pd.DataFrame: A pandas DataFrame containing the calculated indices for each acquisition date, for each field within
the specified date range and using the selected satellite. The DataFrame includes columns for the crop
details, acquisition date and calculated indices. The calculated indices depend on the selected satellite.
'''
# Create an empty list to store the data for each field
df_list = []
# Calculate all the indices for each field, for the selected time period
# In parallell, to improve performances (each thread works on one single field)
with concurrent.futures.ThreadPoolExecutor(fields_threads) as executor:
futures = []
# When just one field is inside the fields_df DataFrame
if (len(fields_df) == 2):
futures.append(executor.submit(process_field, fields_df, start_date, end_date, satellite, filters_params, fields_threads))
else:
for index, field in fields_df.iterrows():
futures.append(executor.submit(process_field, field, start_date, end_date, satellite, filters_params, fields_threads))
# Wait for all futures to complete and gather results
for future in concurrent.futures.as_completed(futures):
df_list.extend(future.result())
# Create a dataframe from the list of rows
df = pd.DataFrame(df_list).reset_index(drop=True)
# Sorting by crop field name and acquisition date and return the sorted DataFrame
return df.sort_values([str(df.columns[0]), str(df.columns[1])], ascending=[True, True])
|
PypiClean
|
/tvb_framework-2.8.1.1-py3-none-any.whl/tvb/interfaces/web/static/mathjax/jax/output/SVG/fonts/TeX/Main/Regular/CombDiacritMarks.js
|
MathJax.Hub.Insert(MathJax.OutputJax.SVG.FONTDATA.FONTS.MathJax_Main,{768:[699,-505,0,-394,-204,"-394 655Q-394 671 -381 685T-350 699Q-334 699 -323 688Q-310 671 -278 629T-225 561T-205 533T-218 519L-233 505L-304 563Q-381 626 -387 634Q-394 643 -394 655"],769:[699,-505,0,-297,-107,"-151 699Q-133 699 -120 686T-107 656Q-107 651 -108 647T-113 637T-120 627T-133 616T-149 602T-170 585T-197 563L-268 505L-283 519Q-297 533 -296 533Q-296 534 -271 567T-218 636T-187 678L-184 681Q-182 684 -179 686T-172 692T-163 697T-151 699"],770:[694,-531,0,-388,-113,"-388 560L-251 694L-243 686Q-113 562 -113 560L-139 531Q-141 532 -197 581L-250 627L-305 580Q-318 569 -331 557T-352 538L-360 532Q-362 530 -375 546L-388 560"],771:[668,-565,0,-417,-84,"-321 601Q-336 601 -349 595T-369 584T-389 565L-403 577L-417 588Q-417 589 -405 603T-379 633T-358 654Q-335 668 -313 668T-247 650T-180 632Q-165 632 -152 638T-132 649T-112 668L-98 656L-84 645Q-125 586 -156 572Q-170 565 -187 565Q-208 565 -252 583T-321 601"],772:[590,-544,0,-431,-70,"-431 544V590H-70V544H-431"],774:[694,-515,0,-408,-93,"-250 515Q-321 515 -362 565T-408 683V694H-371V689Q-371 688 -371 683T-370 675Q-363 631 -331 599T-252 567Q-196 567 -163 608T-130 689V694H-93V683Q-97 617 -139 566T-250 515"],775:[669,-549,0,-310,-191,"-310 609Q-310 637 -292 653T-248 669Q-225 667 -208 652T-191 609Q-191 579 -208 564T-250 549Q-275 549 -292 564T-310 609"],776:[669,-554,0,-405,-95,"-405 612Q-405 633 -388 651T-347 669T-307 652T-290 612Q-290 588 -306 571T-348 554L-373 560Q-405 577 -405 612ZM-211 611Q-211 634 -196 649T-165 668Q-164 668 -160 668T-154 669Q-131 669 -114 652T-96 612T-113 572T-154 554Q-177 554 -194 570T-211 611"],778:[715,-542,0,-353,-148,"-353 628Q-353 669 -321 692T-256 715Q-202 715 -175 689T-148 629Q-148 592 -177 567T-251 542Q-298 542 -325 567T-353 628ZM-187 628Q-187 660 -200 669T-241 678H-247Q-252 678 -258 678T-266 679Q-283 679 -293 674T-308 659T-312 644T-313 629Q-313 600 -302 590Q-290 579 -250 579H-235Q-221 579 -212 581T-195 595T-187 628"],779:[701,-510,0,-378,-80,"-292 701Q-278 701 -262 690T-246 658Q-246 649 -250 641Q-252 637 -297 574T-344 510L-378 528Q-378 530 -355 598T-327 676Q-316 701 -292 701ZM-126 701Q-112 701 -96 690T-80 658Q-80 649 -84 641Q-86 637 -131 574T-178 510L-212 528Q-212 530 -189 598T-161 676Q-150 701 -126 701"],780:[644,-513,0,-386,-115,"-386 611L-373 630L-364 644Q-362 644 -307 612Q-252 581 -250 581L-194 612Q-139 644 -137 644L-115 611L-182 562L-251 513L-386 611"],824:[716,215,0,-639,-140,"-612 -215T-619 -215T-631 -212T-637 -204T-639 -197Q-639 -190 -634 -183Q-621 -157 -400 274T-176 707Q-173 716 -160 716Q-153 716 -148 712T-142 703T-140 696Q-140 691 -372 241T-608 -212Q-612 -215 -619 -215"]});MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Main/Regular/CombDiacritMarks.js");
|
PypiClean
|
/odoo_addon_l10n_th_base_location-15.0.1.0.0.2-py3-none-any.whl/odoo/addons/l10n_th_base_location/wizard/geonames_import.py
|
import csv
import os
from odoo import api, fields, models
class CityZipGeonamesImport(models.TransientModel):
_inherit = "city.zip.geonames.import"
is_thailand = fields.Boolean(
compute="_compute_is_thailand",
help="For Thailand only, data is from TH_th.txt and TH_en.txt stored "
"in the module's data folder. To get data from Geonames.org, "
"please uninstall l10n_th_base_location.",
)
location_thailand_language = fields.Selection(
[("th", "Thai"), ("en", "English")], string="Language of Thailand", default="th"
)
@api.depends("country_ids")
def _compute_is_thailand(self):
self.ensure_one()
self.is_thailand = "TH" in self.country_ids.mapped("code")
def _prepare_district_thailand(self, row):
sub_district = ""
district = ""
if len(row) >= 6:
district = row[5]
if len(row) >= 7:
sub_district = row[6]
return district, sub_district
@api.model
def prepare_zip(self, row, city_id):
vals = super().prepare_zip(row, city_id)
district, sub_district = self._prepare_district_thailand(row)
vals.update({"district_code": district, "sub_district_code": sub_district})
return vals
@api.model
def select_zip(self, row, country, state_id):
city_zip = super().select_zip(row, country, state_id)
if country.code == "TH":
# If District or Sub-District, update code
district, sub_district = self._prepare_district_thailand(row)
city_zip.write(
{"district_code": district, "sub_district_code": sub_district}
)
return city_zip
@api.model
def get_and_parse_csv(self, country):
if country.code == "TH":
path = os.path.join(os.path.dirname(os.path.abspath(__file__)))
import_test = self._context.get("import_test", False)
if import_test:
th_location_file = "demo/TH_th.txt"
en_location_file = "demo/TH_en.txt"
else:
th_location_file = "data/TH_th.txt"
en_location_file = "data/TH_en.txt"
if self.location_thailand_language == "th":
file_path = os.path.join(path[:-6], th_location_file)
else:
file_path = os.path.join(path[:-6], en_location_file)
data_file = open(file_path, "r", encoding="utf-8")
data_file.seek(0)
reader = csv.reader(data_file, delimiter=" ")
parsed_csv = [row for i, row in enumerate(reader)]
data_file.close()
return parsed_csv
return super().get_and_parse_csv(country)
|
PypiClean
|
/tno.mpc.protocols.secure_comparison-4.1.2-py3-none-any.whl/tno/mpc/protocols/secure_comparison/initiator.py
|
from secrets import choice, randbelow
from typing import Any, List, Optional, Tuple, Union, cast
from tno.mpc.encryption_schemes.dgk import DGK, DGKCiphertext
from tno.mpc.encryption_schemes.paillier import Paillier, PaillierCiphertext
from .communicator import Communicator
from .utils import to_bits
class Initiator:
"""
Player Alice in the secure comparison protocol, initiates.
"""
def __init__(
self,
l_maximum_bit_length: int,
communicator: Optional[Communicator] = None,
other_party: str = "",
scheme_paillier: Optional[Paillier] = None,
scheme_dgk: Optional[DGK] = None,
session_id: int = 0,
) -> None:
r"""
:param l_maximum_bit_length: maximum bit length used to constrain variables ($l$).
:param communicator: object for handling communication with the KeyHolder during the protocol.
:param other_party: identifier of the other party
:param scheme_paillier: Paillier encryption scheme (without secret key).
:param scheme_dgk: DGK encryption scheme (without secret key).
:param session_id: keeps track of the session.
"""
self.l_maximum_bit_length = l_maximum_bit_length
self.communicator = communicator
self.other_party = other_party
self.scheme_paillier = scheme_paillier
self.scheme_dgk = scheme_dgk
self.session_id = session_id
async def perform_secure_comparison(
self,
x_enc: PaillierCiphertext,
y_enc: PaillierCiphertext,
) -> PaillierCiphertext:
"""
Performs all steps of the secure comparison protocol for Alice.
Performs required communication with Bob.
:param x_enc: first encrypted input variable $[[x]]$.
:param y_enc: second encrypted input variable $[[y]]$.
:return: Encrypted value of $(x<=y)$: $[[(x<=y)]]$.
:raise ValueError: raised when communicator is not propertly configured.
"""
if self.communicator is None:
raise ValueError("Communicator not properly initialized.")
self.session_id += 1
session_id = self.session_id
# make sure you have the schemes. Always receive them to make sure they are the same.
await self.receive_encryption_schemes(session_id)
self.scheme_paillier = cast(Paillier, self.scheme_paillier)
self.scheme_dgk = cast(DGK, self.scheme_dgk)
# step 1
z_enc, r_plain = Initiator.step_1(
x_enc, y_enc, self.l_maximum_bit_length, self.scheme_paillier
)
z_enc.randomize()
await self.communicator.send(
self.other_party, z_enc, msg_id=f"step_1_session_{session_id}"
)
# step 3
alpha = Initiator.step_3(r_plain, self.l_maximum_bit_length)
d_enc, beta_is_enc = await self.communicator.recv(
self.other_party, msg_id=f"step_4b_session_{session_id}"
)
# step 4c
d_enc = Initiator.step_4c(d_enc, r_plain, self.scheme_dgk, self.scheme_paillier)
# step 4d
alpha_is_xor_beta_is_enc = Initiator.step_4d(alpha, beta_is_enc)
# step 4e
w_is_enc_step4e, alpha_tilde = Initiator.step_4e(
r_plain, alpha, alpha_is_xor_beta_is_enc, d_enc, self.scheme_paillier
)
# step 4f
w_is_enc = Initiator.step_4f(w_is_enc_step4e)
# step 4g
s_plain, delta_a = Initiator.step_4g()
# step 4h
c_is_enc_step4h = Initiator.step_4h(
s_plain,
alpha,
alpha_tilde,
d_enc,
beta_is_enc,
w_is_enc,
delta_a,
self.scheme_dgk,
)
# step 4i
c_is_enc = Initiator.step_4i(c_is_enc_step4h, self.scheme_dgk, do_shuffle=True)
for c in c_is_enc:
c.randomize()
await self.communicator.send(
self.other_party, c_is_enc, msg_id=f"step_4i_session_{session_id}"
)
zeta_1_enc, zeta_2_enc, delta_b_enc = await self.communicator.recv(
self.other_party, msg_id=f"step_5_session_{session_id}"
)
# step 6
beta_lt_alpha_enc = Initiator.step_6(delta_a, delta_b_enc)
# step 7
x_leq_y_enc = Initiator.step_7(
zeta_1_enc,
zeta_2_enc,
r_plain,
self.l_maximum_bit_length,
beta_lt_alpha_enc,
self.scheme_paillier,
)
return x_leq_y_enc
async def receive_encryption_schemes(self, session_id: int = 1) -> None:
"""
Receives encryption schemes Paillier and DGK (without secret keys) from Bob.
:param session_id: distinguish communication different sessions.
:raise ValueError: raised when communicator is not properly configured.
"""
if self.communicator is None:
raise ValueError("Communicator not properly initialized.")
self.scheme_paillier, self.scheme_dgk = await self.communicator.recv(
self.other_party, msg_id=f"schemes_session_{session_id}"
)
@staticmethod
def shuffle(values: List[Any]) -> List[Any]:
r"""
Shuffle the list in random order.
:param values: List of objets that is to be shuffled.
:return: Shuffled version of the input list.
"""
values = values.copy()
shuffled_values = []
while len(values):
this_value = choice(values)
values.remove(this_value)
shuffled_values.append(this_value)
return shuffled_values
@staticmethod
def step_1(
x_enc: PaillierCiphertext,
y_enc: PaillierCiphertext,
l: int,
scheme_paillier: Paillier,
) -> Tuple[PaillierCiphertext, int]:
r"""
$A$ chooses a random number $r, 0 \leq r < N$, and computes
$$[[z]] \leftarrow [[y - x + 2^l + r]] = [[x]] \cdot [[y]]^{-1} \cdot [[2^l + r]]
\mod N^2.$$
:param x_enc: Encrypted value of $x$: $[[x]]$.
:param y_enc: Encrypted value of $y$: $[[y]]$.
:param l: Fixed value, such that $0 \leq x,y < 2^l$, for any $x$, $y$ that will be given as
input to this method.
:param scheme_paillier: Paillier encryption scheme.
:return: Tuple containing as first entry the encrypted value of $z$:
$[[z]] \leftarrow [[y - x + 2^l + r]] = [[y]] \cdot [[x]]^{-1} \cdot [[2^l + r]] \mod
N^2$. The second entry is the randomness value $r$.
"""
assert (1 << (l + 2)) < scheme_paillier.public_key.n // 2
r = randbelow(scheme_paillier.public_key.n)
# Note: the paper has a typo here, it says x - y, i/o y - x.
return (
y_enc
- x_enc
+ scheme_paillier.unsafe_encrypt((1 << l) + r, apply_encoding=False),
r,
)
@staticmethod
def step_3(r: int, l: int) -> List[int]:
r"""
$A$ computes $\alpha = r \mod 2^l$.
:param r: The randomness value $r$ from step 1.
:param l: Fixed value, such that $0 \leq x,y < 2^l$, for any $x, y$ that will
be given as input to this method.
:return: Value $\alpha = r \mod 2^l$ as bits.
"""
return to_bits(r % (1 << l), l)
@staticmethod
def step_4c(
d_enc: DGKCiphertext, r: int, scheme_dgk: DGK, scheme_paillier: Paillier
) -> DGKCiphertext:
r"""
$A$ corrects $[d]$ by setting $[d] \leftarrow [0]$ whenever $0 \leq r < (N - 1)/2$.
:param d_enc: Encrypted value of $d$: $[d]$.
:param r: The randomness value $r$ from step 1.
:param scheme_dgk: DGK encryption scheme.
:param scheme_paillier: Paillier encryption scheme.
:return: Corrected encrypted value of $d$: $[d]$. If $0 \leq r < (N - 1)/2$, then
$[d] \leftarrow [0]$, else $[d]$ remains unaltered.
"""
assert (
0 <= r < scheme_paillier.public_key.n
) # If step 1 is used, this is no issue. But this function can also be called seperately.
if r < (scheme_paillier.public_key.n - 1) // 2:
d_enc = scheme_dgk.unsafe_encrypt(0, apply_encoding=False)
return d_enc
@staticmethod
def step_4d(
alpha: List[int], beta_is_enc: List[DGKCiphertext]
) -> List[DGKCiphertext]:
r"""
For each $i, 0 \leq i < l$, $A$ computes $[\alpha_i \oplus \beta_i]$ as follows:
if $\alpha_i = 0$ then $[\alpha_i \oplus \beta_i] \leftarrow [\beta_i]$ else
$[\alpha_i \oplus \beta_i] \leftarrow [1] \cdot [\beta_i]^{-1} \mod n$.
:param alpha: The value $\alpha$ from step 3.
:param beta_is_enc: List containing the encrypted values of $\beta_i$:
$[\beta_i], 0 \leq i < l$.
:return: List containing the encrypted values of the bits
$\alpha_i \oplus \beta_i$: $[\alpha_i \oplus \beta_i], 0 \leq i < l$.
"""
def compute_xor(alpha_i: int, beta_i_enc: DGKCiphertext) -> DGKCiphertext:
r"""
Compute $[\alpha_i \oplus \beta_i]$.
:param alpha_i: The $i$-th bit of $\alpha$: $\alpha_i$.
:param beta_i_enc: The encrypted $i$-th bit of $\beta$: $[\beta_i]$.
:return: Encrypted value of $\alpha_i \oplus \beta_i$: $[\alpha_i \oplus \beta_i]$.
"""
if alpha_i == 0:
return beta_i_enc
# else (alpha_i == 1)
return 1 - beta_i_enc
return list(
map(
compute_xor,
alpha,
beta_is_enc,
)
)
@staticmethod
def step_4e(
r: int,
alpha: List[int],
alpha_is_xor_beta_is_enc: List[DGKCiphertext],
d_enc: DGKCiphertext,
scheme_paillier: Paillier,
) -> Tuple[List[DGKCiphertext], List[int]]:
r"""
A computes $\tilde{\alpha} = (r - N) \mod 2^l$, the corrected value of $\alpha$ in case a
carry-over actually did occur and adjusts $[\alpha_i \oplus \beta_i]$ for each $i$:
If $\alpha_i = \tilde{\alpha}_i$ then $[w_i] \leftarrow [\alpha_i \oplus \beta_i]$
else $[w_i] \leftarrow [\alpha_i \oplus \beta_i] \cdot [d]^{-1} \mod n$
:param r: The randomness value $r$ from step 1.
:param alpha: The value $\alpha$ from step 3.
:param alpha_is_xor_beta_is_enc: List containing the encrypted values of the bits
$\alpha_i \oplus \beta_i$: $[\alpha_i \oplus \beta_i], 0 \leq i < l$.
:param d_enc: Encrypted value of $d$: $[d]$.
:param scheme_paillier: Paillier encryption scheme.
:return: Tuple containing as first entry a list containing the encrypted values of the bits
$w_i$: $[w_i], 0 \leq i < l$.
The second entry is the value $\tilde{\alpha} = (r - N) \mod 2^l$ as bits.
"""
l = len(alpha_is_xor_beta_is_enc)
def compute_w(
alpha_i: int, alpha_tilde_i: int, alpha_i_xor_beta_i_enc: DGKCiphertext
) -> DGKCiphertext:
r"""
Compute $[w_i]$.
:param alpha_i: The $i$-th bit of $\alpha$: $\alpha_i$.
:param alpha_tilde_i: The $i$-th bit of $\tilde{\alpha}$: $\tilde{\alpha}_i$.
:param alpha_i_xor_beta_i_enc: Encrypted value of the bit $\alpha_i \oplus \beta_i$:
$[\alpha_i \oplus \beta_i]$.
:return: Encrypted value of $w_i$: $[w_i]$.
"""
if alpha_i == alpha_tilde_i:
return alpha_i_xor_beta_i_enc
# else
return alpha_i_xor_beta_i_enc - d_enc
alpha_tilde = to_bits(int((r - scheme_paillier.public_key.n) % (1 << l)), l)
return (
list(
map(
compute_w,
alpha,
alpha_tilde,
alpha_is_xor_beta_is_enc,
)
),
alpha_tilde,
)
@staticmethod
def step_4f(w_is_enc: List[DGKCiphertext]) -> List[DGKCiphertext]:
r"""
For each $i, 0 \leq i < l$, $A$ computes $[w_i] \leftarrow [w_i]^{2^i} \mod n$
such that these values will not interfere each other when added.
:param w_is_enc: List containing the encrypted values of the bits $w_i$: $[w_i],
0 \leq i < l$.
:return: List containing the encrypted values of the bits $w_i$: $[w_i], 0 \leq i < l$.
"""
base = 1
def compute_w(w_i_enc: DGKCiphertext) -> DGKCiphertext:
r"""
Compute $[w_i]$.
:param w_i_enc: Encrypted value of $w_i$: $[w_i]$.
:return: Encrypted value of $w_i$: $[w_i]$.
"""
nonlocal base
w_i_enc = w_i_enc * base
base <<= 1
return w_i_enc
return list(map(compute_w, w_is_enc))
@staticmethod
def step_4g() -> Tuple[int, int]:
r"""
$A$ chooses a uniformly random bit $\delta_A$ and computes $s = 1 - 2 \cdot \delta_A$.
:return: Tuple containing as first entry the value $s = 1 - 2 \cdot \delta_A$.
The second entry is the value $\delta_A$.
"""
delta_a = randbelow(2)
return 1 - 2 * delta_a, delta_a
@staticmethod
def step_4h(
s: int,
alpha: List[int],
alpha_tilde: List[int],
d_enc: DGKCiphertext,
beta_is_enc: List[DGKCiphertext],
w_is_enc: List[DGKCiphertext],
delta_a: int,
scheme_dgk: DGK,
) -> List[DGKCiphertext]:
r"""
For each $i, 0 \leq i < l$, $A$ computes $[c_i] = [s] \cdot [\alpha_i] \cdot
[d]^{\tilde{\alpha}_i-\alpha_i} \cdot [\beta_i]^{-1} \cdot
(\Pi^{l-1}_{j=i+1}[w_j])^3 \mod n$.
We add an additional value $[c_{-1}]$, with
$c_{-1}=\delta_A + \Sigma^{l-1}_{i=0}(x_i \oplus y_i)$ to also make
the scheme work in case of equality of $x$ and $y$.
:param s: The value $s$ from step 4g.
:param alpha: The value $\alpha$ from step 3.
:param alpha_tilde: The value $\tilde{\alpha}$ from step 4e.
:param d_enc: Encrypted value of $d$: $[d]$.
:param beta_is_enc: List containing the encrypted values of the bits $\beta_i$:
$[\beta_i], 0 \leq i < l$.
:param w_is_enc: List containing the encrypted values of the bits $w_i$: $[w_i],
0 \leq i < l$.
:param delta_a: The value $\delta_A$ from step 4g.
:param scheme_dgk: DGK encryption scheme.
:return: List containing the encrypted values of the bits $c_i$:
$[c_i] = [s] \cdot [\alpha_i]
\cdot [d]^{\tilde{\alpha}_i-\alpha_i} \cdot [\beta_i]^{-1}
\cdot (\Pi^{l-1}_{j=i+1}[w_j])^3 \mod n, 0 \leq i < l$.
"""
l = len(beta_is_enc)
c_is_enc = [
scheme_dgk.unsafe_encrypt(s, apply_encoding=False) for _ in range(l)
]
w_is_enc_sum: Union[int, DGKCiphertext] = 0
# pre-compute 3 options for d_enc * (alpha_i, alpha_tilde_i) to improve efficiency
d_enc_mult_table = {
-1: d_enc * -1,
0: d_enc * 0,
1: d_enc * 1,
}
for i, alpha_i, alpha_tilde_i in zip(
range(l - 1, -1, -1),
reversed(alpha),
reversed(alpha_tilde),
):
c_is_enc[i] += (
int(alpha_i)
+ d_enc_mult_table[alpha_tilde_i - alpha_i]
- beta_is_enc[i]
+ 3 * w_is_enc_sum
)
w_is_enc_sum += w_is_enc[i]
# we use here the fix from the paper to make equality work
c_is_enc.insert(0, cast(DGKCiphertext, delta_a + w_is_enc_sum))
return c_is_enc
@staticmethod
def step_4i(
c_is_enc: List[DGKCiphertext], scheme_dgk: DGK, do_shuffle: bool = True
) -> List[DGKCiphertext]:
r"""
$A$ blinds the numbers $c_i$ by raising them to a random non-zero exponent
$r_i \in \{1,\ldots,u-1\}$.
:param c_is_enc: List containing the encrypted values of the bits $c_i$: $[c_i],
0 \leq i < l$.
:param scheme_dgk: DGK encryption scheme.
:param do_shuffle: Boolean parameter stating whether or not the bits should
be shuffled randomly.
:return: List containing the encrypted values of the masked bits $c_i$: $[c_i],
0 \leq i < l$.
"""
u = scheme_dgk.public_key.u
def mask(c_i_enc: DGKCiphertext) -> DGKCiphertext:
r"""
Compute $[c_i]$.
:param c_i_enc: Encrypted value of the bit $c_i$: $[c_i]$.
:return: Encrypted value of the bit $c_i$: $[c_i]$.
"""
c_i_enc *= randbelow(u - 1) + 1
return c_i_enc
c_is_enc_masked = list(map(mask, c_is_enc))
return Initiator.shuffle(c_is_enc_masked) if do_shuffle else c_is_enc_masked
@staticmethod
def step_6(delta_a: int, delta_b_enc: PaillierCiphertext) -> PaillierCiphertext:
r"""
$A$ computes $[[(\beta < \alpha)]]$ as follows: if $\delta_A = 1$ then
$[[(\beta < \alpha)]] \leftarrow [[\delta_B]]$ else
$[[(\beta < \alpha)]] \leftarrow [[1]] \cdot [[\delta_B]]^{-1} \mod N^2$.
:param delta_a: The value $\delta_A$ from step 4g.
:param delta_b_enc: Encrypted value of $\delta_B$: $[[\delta_B]]$.
:return: Encrypted value of $(\beta < \alpha)$: $[[(\beta < \alpha)]]$.
"""
if delta_a == 1:
return delta_b_enc
return 1 - delta_b_enc
@staticmethod
def step_7(
zeta_1_enc: PaillierCiphertext,
zeta_2_enc: PaillierCiphertext,
r: int,
l: int,
beta_lt_alpha_enc: PaillierCiphertext,
scheme_paillier: Paillier,
) -> PaillierCiphertext:
r"""
$A$ computes $[[(x \leq y)]] \leftarrow
[[\zeta]] \cdot ([[ r \div 2^l]] \cdot [[(\beta < \alpha)]])^{-1} \mod N^2$, where
$\zeta = \zeta_1$, if $r < (N - 1) / 2$, else $\zeta = \zeta_2$.
:param zeta_1_enc: Encrypted value of $\zeta_1$: $[[\zeta_1]]$.
:param zeta_2_enc: Encrypted value of $\zeta_2$: $[[\zeta_2]]$.
:param r: The randomness value $r$ from step 1.
:param l: Fixed value, such that $0 \leq x,y < 2^l$, for any $x, y$ that will be given
as input to this method.
:param beta_lt_alpha_enc: Encrypted value of $(\beta < \alpha)$: $[[(\beta < \alpha)]]$.
:param scheme_paillier: Paillier encryption scheme.
:return: Encrypted value of $(x \leq y)$: $[[(x \leq y)]]$. This is the final result of the
computation.
"""
# We use the fix mentioned in the paper here, to also make this work in case of overflow
zeta_enc = (
zeta_1_enc if r < (scheme_paillier.public_key.n - 1) // 2 else zeta_2_enc
)
return zeta_enc - (
scheme_paillier.unsafe_encrypt(r // (1 << l), apply_encoding=False)
+ beta_lt_alpha_enc
)
|
PypiClean
|
/zipdetr-2.0.10.tar.gz/zipdetr-2.0.10/vehicle/utils/benchmarks.py
|
import glob
import platform
import sys
import time
from pathlib import Path
import numpy as np
import torch.cuda
from tqdm import tqdm
from vehicle import YOLO
from vehicle.cfg import TASK2DATA, TASK2METRIC
from vehicle.engine.exporter import export_formats
from vehicle.utils import ASSETS, LINUX, LOGGER, MACOS, SETTINGS
from vehicle.utils.checks import check_requirements, check_yolo
from vehicle.utils.files import file_size
from vehicle.utils.torch_utils import select_device
def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt',
data=None,
imgsz=160,
half=False,
int8=False,
device='cpu',
verbose=False):
"""
Benchmark a YOLO model across different formats for speed and accuracy.
Args:
model (str | Path | optional): Path to the model file or directory. Default is
Path(SETTINGS['weights_dir']) / 'yolov8n.pt'.
data (str, optional): Dataset to evaluate on, inherited from TASK2DATA if not passed. Default is None.
imgsz (int, optional): Image size for the benchmark. Default is 160.
half (bool, optional): Use half-precision for the model if True. Default is False.
int8 (bool, optional): Use int8-precision for the model if True. Default is False.
device (str, optional): Device to run the benchmark on, either 'cpu' or 'cuda'. Default is 'cpu'.
verbose (bool | float | optional): If True or a float, assert benchmarks pass with given metric.
Default is False.
Returns:
df (pandas.DataFrame): A pandas DataFrame with benchmark results for each format, including file size,
metric, and inference time.
Example:
```python
from vehicle.utils.benchmarks import benchmark
benchmark(model='yolov8n.pt', imgsz=640)
```
"""
import pandas as pd
pd.options.display.max_columns = 10
pd.options.display.width = 120
device = select_device(device, verbose=False)
if isinstance(model, (str, Path)):
model = YOLO(model)
y = []
t0 = time.time()
for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
emoji, filename = '❌', None # export defaults
try:
assert i != 9 or LINUX, 'Edge TPU export only supported on Linux'
if i == 10:
assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux'
elif i == 11:
assert sys.version_info < (3, 11), 'PaddlePaddle export only supported on Python<=3.10'
if 'cpu' in device.type:
assert cpu, 'inference not supported on CPU'
if 'cuda' in device.type:
assert gpu, 'inference not supported on GPU'
# Export
if format == '-':
filename = model.ckpt_path or model.cfg
export = model # PyTorch format
else:
filename = model.export(imgsz=imgsz, format=format, half=half, int8=int8, device=device, verbose=False)
export = YOLO(filename, task=model.task)
assert suffix in str(filename), 'export failed'
emoji = '❎' # indicates export succeeded
# Predict
assert model.task != 'pose' or i != 7, 'GraphDef Pose inference is not supported'
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
export.predict(ASSETS / 'bus.jpg', imgsz=imgsz, device=device, half=half)
# Validate
data = data or TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect
key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect
results = export.val(data=data,
batch=1,
imgsz=imgsz,
plots=False,
device=device,
half=half,
int8=int8,
verbose=False)
metric, speed = results.results_dict[key], results.speed['inference']
y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
except Exception as e:
if verbose:
assert type(e) is AssertionError, f'Benchmark failure for {name}: {e}'
LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference
# Print results
check_yolo(device=device) # print system info
df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'])
name = Path(model.ckpt_path).name
s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n'
LOGGER.info(s)
with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f:
f.write(s)
if verbose and isinstance(verbose, float):
metrics = df[key].array # values to compare to floor
floor = verbose # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
assert all(x > floor for x in metrics if pd.notna(x)), f'Benchmark failure: metric(s) < floor {floor}'
return df
class ProfileModels:
"""
ProfileModels class for profiling different models on ONNX and TensorRT.
This class profiles the performance of different models, provided their paths. The profiling includes parameters such as
model speed and FLOPs.
Attributes:
paths (list): Paths of the models to profile.
num_timed_runs (int): Number of timed runs for the profiling. Default is 100.
num_warmup_runs (int): Number of warmup runs before profiling. Default is 10.
min_time (float): Minimum number of seconds to profile for. Default is 60.
imgsz (int): Image size used in the models. Default is 640.
Methods:
profile(): Profiles the models and prints the result.
Example:
```python
from vehicle.utils.benchmarks import ProfileModels
ProfileModels(['yolov8n.yaml', 'yolov8s.yaml'], imgsz=640).profile()
```
"""
def __init__(self,
paths: list,
num_timed_runs=100,
num_warmup_runs=10,
min_time=60,
imgsz=640,
trt=True,
device=None):
self.paths = paths
self.num_timed_runs = num_timed_runs
self.num_warmup_runs = num_warmup_runs
self.min_time = min_time
self.imgsz = imgsz
self.trt = trt # run TensorRT profiling
self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu')
def profile(self):
files = self.get_files()
if not files:
print('No matching *.pt or *.onnx files found.')
return
table_rows = []
output = []
for file in files:
engine_file = file.with_suffix('.engine')
if file.suffix in ('.pt', '.yaml', '.yml'):
model = YOLO(str(file))
model.fuse() # to report correct params and GFLOPs in model.info()
model_info = model.info()
if self.trt and self.device.type != 'cpu' and not engine_file.is_file():
engine_file = model.export(format='engine',
half=True,
imgsz=self.imgsz,
device=self.device,
verbose=False)
onnx_file = model.export(format='onnx',
half=True,
imgsz=self.imgsz,
simplify=True,
device=self.device,
verbose=False)
elif file.suffix == '.onnx':
model_info = self.get_onnx_model_info(file)
onnx_file = file
else:
continue
t_engine = self.profile_tensorrt_model(str(engine_file))
t_onnx = self.profile_onnx_model(str(onnx_file))
table_rows.append(self.generate_table_row(file.stem, t_onnx, t_engine, model_info))
output.append(self.generate_results_dict(file.stem, t_onnx, t_engine, model_info))
self.print_table(table_rows)
return output
def get_files(self):
files = []
for path in self.paths:
path = Path(path)
if path.is_dir():
extensions = ['*.pt', '*.onnx', '*.yaml']
files.extend([file for ext in extensions for file in glob.glob(str(path / ext))])
elif path.suffix in ('.pt', '.yaml', '.yml'): # add non-existing
files.append(str(path))
else:
files.extend(glob.glob(str(path)))
print(f'Profiling: {sorted(files)}')
return [Path(file) for file in sorted(files)]
def get_onnx_model_info(self, onnx_file: str):
# return (num_layers, num_params, num_gradients, num_flops)
return 0.0, 0.0, 0.0, 0.0
def iterative_sigma_clipping(self, data, sigma=2, max_iters=3):
data = np.array(data)
for _ in range(max_iters):
mean, std = np.mean(data), np.std(data)
clipped_data = data[(data > mean - sigma * std) & (data < mean + sigma * std)]
if len(clipped_data) == len(data):
break
data = clipped_data
return data
def profile_tensorrt_model(self, engine_file: str):
if not self.trt or not Path(engine_file).is_file():
return 0.0, 0.0
# Model and input
model = YOLO(engine_file)
input_data = np.random.rand(self.imgsz, self.imgsz, 3).astype(np.float32) # must be FP32
# Warmup runs
elapsed = 0.0
for _ in range(3):
start_time = time.time()
for _ in range(self.num_warmup_runs):
model(input_data, imgsz=self.imgsz, verbose=False)
elapsed = time.time() - start_time
# Compute number of runs as higher of min_time or num_timed_runs
num_runs = max(round(self.min_time / elapsed * self.num_warmup_runs), self.num_timed_runs * 50)
# Timed runs
run_times = []
for _ in tqdm(range(num_runs), desc=engine_file):
results = model(input_data, imgsz=self.imgsz, verbose=False)
run_times.append(results[0].speed['inference']) # Convert to milliseconds
run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping
return np.mean(run_times), np.std(run_times)
def profile_onnx_model(self, onnx_file: str):
check_requirements('onnxruntime')
import onnxruntime as ort
# Session with either 'TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'
sess_options = ort.SessionOptions()
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
sess_options.intra_op_num_threads = 8 # Limit the number of threads
sess = ort.InferenceSession(onnx_file, sess_options, providers=['CPUExecutionProvider'])
input_tensor = sess.get_inputs()[0]
input_type = input_tensor.type
# Mapping ONNX datatype to numpy datatype
if 'float16' in input_type:
input_dtype = np.float16
elif 'float' in input_type:
input_dtype = np.float32
elif 'double' in input_type:
input_dtype = np.float64
elif 'int64' in input_type:
input_dtype = np.int64
elif 'int32' in input_type:
input_dtype = np.int32
else:
raise ValueError(f'Unsupported ONNX datatype {input_type}')
input_data = np.random.rand(*input_tensor.shape).astype(input_dtype)
input_name = input_tensor.name
output_name = sess.get_outputs()[0].name
# Warmup runs
elapsed = 0.0
for _ in range(3):
start_time = time.time()
for _ in range(self.num_warmup_runs):
sess.run([output_name], {input_name: input_data})
elapsed = time.time() - start_time
# Compute number of runs as higher of min_time or num_timed_runs
num_runs = max(round(self.min_time / elapsed * self.num_warmup_runs), self.num_timed_runs)
# Timed runs
run_times = []
for _ in tqdm(range(num_runs), desc=onnx_file):
start_time = time.time()
sess.run([output_name], {input_name: input_data})
run_times.append((time.time() - start_time) * 1000) # Convert to milliseconds
run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=5) # sigma clipping
return np.mean(run_times), np.std(run_times)
def generate_table_row(self, model_name, t_onnx, t_engine, model_info):
layers, params, gradients, flops = model_info
return f'| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |'
def generate_results_dict(self, model_name, t_onnx, t_engine, model_info):
layers, params, gradients, flops = model_info
return {
'model/name': model_name,
'model/parameters': params,
'model/GFLOPs': round(flops, 3),
'model/speed_ONNX(ms)': round(t_onnx[0], 3),
'model/speed_TensorRT(ms)': round(t_engine[0], 3)}
def print_table(self, table_rows):
gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'GPU'
header = f'| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>{gpu} TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |'
separator = '|-------------|---------------------|--------------------|------------------------------|-----------------------------------|------------------|-----------------|'
print(f'\n\n{header}')
print(separator)
for row in table_rows:
print(row)
|
PypiClean
|
/py-healthcheck-1.10.1.tar.gz/py-healthcheck-1.10.1/healthcheck/environmentdump.py
|
import json
import os
import platform
import sys
import six
from .security import safe_dict
class EnvironmentDump(object):
def __init__(self,
include_os=True,
include_python=True,
include_process=True,
**kwargs):
self.functions = {}
if include_os:
self.functions['os'] = self.get_os
if include_python:
self.functions['python'] = self.get_python
if include_process:
self.functions['process'] = self.get_process
# ads custom_sections on signature
[self.add_section(k, v) for k, v in kwargs.items() if k not in self.functions]
def add_section(self, name, func):
if name in self.functions:
raise Exception('The name "{}" is already taken.'.format(name))
if not hasattr(func, '__call__'):
self.functions[name] = lambda: func
return
self.functions[name] = func
def run(self):
data = {}
for (name, func) in six.iteritems(self.functions):
data[name] = func()
return json.dumps(data, default=str), 200, {'Content-Type': 'application/json'}
def get_os(self):
return {'platform': sys.platform,
'name': os.name,
'uname': platform.uname()}
def get_python(self):
result = {'version': sys.version,
'executable': sys.executable,
'pythonpath': sys.path,
'version_info': {'major': sys.version_info.major,
'minor': sys.version_info.minor,
'micro': sys.version_info.micro,
'releaselevel': sys.version_info.releaselevel,
'serial': sys.version_info.serial}}
try:
import pip
packages = dict([(p.project_name, p.version) for p in pip.get_installed_distributions()])
result['packages'] = packages
except Exception:
pass
return result
def get_login(self):
# Based on https://github.com/gitpython-developers/GitPython/pull/43/
# Fix for 'Inappopropirate ioctl for device' on posix systems.
if os.name == "posix":
import pwd
username = pwd.getpwuid(os.geteuid()).pw_name
else:
username = os.environ.get('USER', os.environ.get('USERNAME', 'UNKNOWN'))
if username == 'UNKNOWN' and hasattr(os, 'getlogin'):
username = os.getlogin()
return username
def get_process(self):
return {'argv': sys.argv,
'cwd': os.getcwd(),
'user': self.get_login(),
'pid': os.getpid(),
'environ': safe_dict(os.environ)}
|
PypiClean
|
/ax-platform-0.3.4.tar.gz/ax-platform-0.3.4/ax/plot/pareto_frontier.py
|
import warnings
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
import pandas as pd
import plotly.graph_objs as go
from ax.core.experiment import Experiment
from ax.core.objective import MultiObjective
from ax.core.optimization_config import (
MultiObjectiveOptimizationConfig,
OptimizationConfig,
)
from ax.core.outcome_constraint import ObjectiveThreshold
from ax.exceptions.core import UserInputError
from ax.plot.base import AxPlotConfig, AxPlotTypes, CI_OPACITY, DECIMALS
from ax.plot.color import COLORS, DISCRETE_COLOR_SCALE, rgba
from ax.plot.helper import _format_CI, _format_dict, extend_range
from ax.plot.pareto_utils import ParetoFrontierResults
from ax.service.utils.best_point_mixin import BestPointMixin
from ax.utils.common.typeutils import checked_cast, not_none
from plotly import express as px
from scipy.stats import norm
DEFAULT_CI_LEVEL: float = 0.9
VALID_CONSTRAINT_OP_NAMES = {"GEQ", "LEQ"}
def _make_label(
mean: float, sem: float, name: str, is_relative: bool, Z: Optional[float]
) -> str:
estimate = str(round(mean, DECIMALS))
perc = "%" if is_relative else ""
ci = (
""
if (Z is None or np.isnan(sem))
else _format_CI(estimate=mean, sd=sem, relative=is_relative, zval=Z)
)
return f"{name}: {estimate}{perc} {ci}<br>"
def _filter_outliers(Y: np.ndarray, m: float = 2.0) -> np.ndarray:
std_filter = abs(Y - np.median(Y, axis=0)) < m * np.std(Y, axis=0)
return Y[np.all(abs(std_filter), axis=1)]
def scatter_plot_with_hypervolume_trace_plotly(experiment: Experiment) -> go.Figure:
"""
Plots the hypervolume of the Pareto frontier after each iteration with the same
color scheme as the Pareto frontier plot. This is useful for understanding if the
frontier is expanding or if the optimization has stalled out.
Arguments:
experiment: MOO experiment to calculate the hypervolume trace from
"""
hypervolume_trace = BestPointMixin._get_trace(experiment=experiment)
df = pd.DataFrame(
{
"hypervolume": hypervolume_trace,
"trial_index": [*range(len(hypervolume_trace))],
}
)
return px.line(
data_frame=df,
x="trial_index",
y="hypervolume",
title="Pareto Frontier Hypervolume Trace",
markers=True,
)
def scatter_plot_with_pareto_frontier_plotly(
Y: np.ndarray,
Y_pareto: Optional[np.ndarray],
metric_x: Optional[str],
metric_y: Optional[str],
reference_point: Optional[Tuple[float, float]],
minimize: Optional[Union[bool, Tuple[bool, bool]]] = True,
hovertext: Optional[Iterable[str]] = None,
) -> go.Figure:
"""Plots a scatter of all points in ``Y`` for ``metric_x`` and ``metric_y``
with a reference point and Pareto frontier from ``Y_pareto``.
Points in the scatter are colored in a gradient representing their trial index,
with metric_x on x-axis and metric_y on y-axis. Reference point is represented
as a star and Pareto frontier –– as a line. The frontier connects to the reference
point via projection lines.
NOTE: Both metrics should have the same minimization setting, passed as `minimize`.
Args:
Y: Array of outcomes, of which the first two will be plotted.
Y_pareto: Array of Pareto-optimal points, first two outcomes in which will be
plotted.
metric_x: Name of first outcome in ``Y``.
metric_Y: Name of second outcome in ``Y``.
reference_point: Reference point for ``metric_x`` and ``metric_y``.
minimize: Whether the two metrics in the plot are being minimized or maximized.
"""
title = "Observed metric values"
if isinstance(minimize, bool):
minimize = (minimize, minimize)
Xs = Y[:, 0]
Ys = Y[:, 1]
experimental_points_scatter = [
go.Scatter(
x=Xs,
y=Ys,
mode="markers",
marker={
"color": np.linspace(0, 100, int(len(Xs) * 1.05)),
"colorscale": "magma",
"colorbar": {
"tickvals": [0, 50, 100],
"ticktext": [
1,
"iteration",
len(Xs),
],
},
},
name="Experimental points",
hovertemplate="%{text}",
text=hovertext,
)
]
# No Pareto frontier is drawn if none is provided, or if the frontier consists of
# a single point and no reference points are provided.
if (
Y_pareto is None
or len(Y_pareto) == 0
or (len(Y_pareto) == 1 and reference_point is None)
):
# `Y_pareto` input was not specified
range_x = extend_range(lower=min(Y[:, 0]), upper=max(Y[:, 0]))
range_y = extend_range(lower=min(Y[:, 1]), upper=max(Y[:, 1]))
pareto_step = reference_point_lines = reference_point_star = []
else:
title += " with Pareto frontier"
if reference_point:
if minimize is None:
minimize = tuple(
reference_point[i] >= max(Y_pareto[:, i]) for i in range(2)
)
reference_point_star = [
go.Scatter(
x=[reference_point[0]],
y=[reference_point[1]],
mode="markers",
marker={
"color": rgba(COLORS.STEELBLUE.value),
"size": 25,
"symbol": "star",
},
)
]
extra_point_x = min(Y_pareto[:, 0]) if minimize[0] else max(Y_pareto[:, 0])
reference_point_line_1 = go.Scatter(
x=[extra_point_x, reference_point[0]],
y=[reference_point[1], reference_point[1]],
mode="lines",
marker={"color": rgba(COLORS.STEELBLUE.value)},
)
extra_point_y = min(Y_pareto[:, 1]) if minimize[1] else max(Y_pareto[:, 1])
reference_point_line_2 = go.Scatter(
x=[reference_point[0], reference_point[0]],
y=[extra_point_y, reference_point[1]],
mode="lines",
marker={"color": rgba(COLORS.STEELBLUE.value)},
)
reference_point_lines = [reference_point_line_1, reference_point_line_2]
Y_pareto_with_extra = np.concatenate(
(
[[extra_point_x, reference_point[1]]],
Y_pareto,
[[reference_point[0], extra_point_y]],
),
axis=0,
)
pareto_step = [
go.Scatter(
x=Y_pareto_with_extra[:, 0],
y=Y_pareto_with_extra[:, 1],
mode="lines",
line_shape="hv",
marker={"color": rgba(COLORS.STEELBLUE.value)},
)
]
range_x = (
extend_range(lower=min(Y_pareto[:, 0]), upper=reference_point[0])
if minimize[0]
else extend_range(lower=reference_point[0], upper=max(Y_pareto[:, 0]))
)
range_y = (
extend_range(lower=min(Y_pareto[:, 1]), upper=reference_point[1])
if minimize[1]
else extend_range(lower=reference_point[1], upper=max(Y_pareto[:, 1]))
)
else: # Reference point was not specified
pareto_step = [
go.Scatter(
x=Y_pareto[:, 0],
y=Y_pareto[:, 1],
mode="lines",
line_shape="hv",
marker={"color": rgba(COLORS.STEELBLUE.value)},
)
]
reference_point_lines = reference_point_star = []
range_x = extend_range(lower=min(Y_pareto[:, 0]), upper=max(Y_pareto[:, 0]))
range_y = extend_range(lower=min(Y_pareto[:, 1]), upper=max(Y_pareto[:, 1]))
layout = go.Layout(
title=title,
showlegend=False,
xaxis={"title": metric_x or "", "range": range_x},
yaxis={"title": metric_y or "", "range": range_y},
)
return go.Figure(
layout=layout,
data=pareto_step
+ reference_point_lines
+ experimental_points_scatter
+ reference_point_star,
)
def scatter_plot_with_pareto_frontier(
Y: np.ndarray,
Y_pareto: np.ndarray,
metric_x: str,
metric_y: str,
reference_point: Tuple[float, float],
minimize: bool = True,
) -> AxPlotConfig:
return AxPlotConfig(
data=scatter_plot_with_pareto_frontier_plotly(
Y=Y,
Y_pareto=Y_pareto,
metric_x=metric_x,
metric_y=metric_y,
reference_point=reference_point,
),
plot_type=AxPlotTypes.GENERIC,
)
def _get_single_pareto_trace(
frontier: ParetoFrontierResults,
CI_level: float,
legend_label: str = "mean",
trace_color: Tuple[int] = COLORS.STEELBLUE.value,
show_parameterization_on_hover: bool = True,
) -> go.Scatter:
primary_means = frontier.means[frontier.primary_metric]
primary_sems = frontier.sems[frontier.primary_metric]
secondary_means = frontier.means[frontier.secondary_metric]
secondary_sems = frontier.sems[frontier.secondary_metric]
absolute_metrics = frontier.absolute_metrics
all_metrics = frontier.means.keys()
if frontier.arm_names is None:
arm_names = [f"Parameterization {i}" for i in range(len(frontier.param_dicts))]
else:
arm_names = [f"Arm {name}" for name in frontier.arm_names]
if CI_level is not None:
Z = norm.ppf(1 - (1 - CI_level) / 2)
else:
Z = None
labels = []
for i, param_dict in enumerate(frontier.param_dicts):
label = f"<b>{arm_names[i]}</b><br>"
for metric in all_metrics:
metric_lab = _make_label(
mean=frontier.means[metric][i],
sem=frontier.sems[metric][i],
name=metric,
is_relative=metric not in absolute_metrics,
Z=Z,
)
label += metric_lab
parameterization = (
_format_dict(param_dict, "Parameterization")
if show_parameterization_on_hover
else ""
)
label += parameterization
labels.append(label)
return go.Scatter(
name=legend_label,
legendgroup=legend_label,
x=secondary_means,
y=primary_means,
error_x={
"type": "data",
"array": Z * np.array(secondary_sems),
"thickness": 2,
"color": rgba(trace_color, CI_OPACITY),
},
error_y={
"type": "data",
"array": Z * np.array(primary_sems),
"thickness": 2,
"color": rgba(trace_color, CI_OPACITY),
},
mode="markers",
text=labels,
hoverinfo="text",
marker={"color": rgba(trace_color)},
)
def plot_pareto_frontier(
frontier: ParetoFrontierResults,
CI_level: float = DEFAULT_CI_LEVEL,
show_parameterization_on_hover: bool = True,
) -> AxPlotConfig:
"""Plot a Pareto frontier from a ParetoFrontierResults object.
Args:
frontier (ParetoFrontierResults): The results of the Pareto frontier
computation.
CI_level (float, optional): The confidence level, i.e. 0.95 (95%)
show_parameterization_on_hover (bool, optional): If True, show the
parameterization of the points on the frontier on hover.
Returns:
AEPlotConfig: The resulting Plotly plot definition.
"""
trace = _get_single_pareto_trace(
frontier=frontier,
CI_level=CI_level,
show_parameterization_on_hover=show_parameterization_on_hover,
)
shapes = []
primary_threshold = None
secondary_threshold = None
if frontier.objective_thresholds is not None:
primary_threshold = frontier.objective_thresholds.get(
frontier.primary_metric, None
)
secondary_threshold = frontier.objective_thresholds.get(
frontier.secondary_metric, None
)
absolute_metrics = frontier.absolute_metrics
rel_x = frontier.secondary_metric not in absolute_metrics
rel_y = frontier.primary_metric not in absolute_metrics
if primary_threshold is not None:
shapes.append(
{
"type": "line",
"xref": "paper",
"x0": 0.0,
"x1": 1.0,
"yref": "y",
"y0": primary_threshold,
"y1": primary_threshold,
"line": {"color": rgba(COLORS.CORAL.value), "width": 3},
}
)
if secondary_threshold is not None:
shapes.append(
{
"type": "line",
"yref": "paper",
"y0": 0.0,
"y1": 1.0,
"xref": "x",
"x0": secondary_threshold,
"x1": secondary_threshold,
"line": {"color": rgba(COLORS.CORAL.value), "width": 3},
}
)
layout = go.Layout(
title="Pareto Frontier",
xaxis={
"title": frontier.secondary_metric,
"ticksuffix": "%" if rel_x else "",
"zeroline": True,
},
yaxis={
"title": frontier.primary_metric,
"ticksuffix": "%" if rel_y else "",
"zeroline": True,
},
hovermode="closest",
legend={"orientation": "h"},
width=750,
height=500,
margin=go.layout.Margin(pad=4, l=225, b=75, t=75), # noqa E741
shapes=shapes,
)
fig = go.Figure(data=[trace], layout=layout)
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
def plot_multiple_pareto_frontiers(
frontiers: Dict[str, ParetoFrontierResults],
CI_level: float = DEFAULT_CI_LEVEL,
show_parameterization_on_hover: bool = True,
) -> AxPlotConfig:
"""Plot a Pareto frontier from a ParetoFrontierResults object.
Args:
frontiers (Dict[str, ParetoFrontierResults]): The results of
the Pareto frontier computation.
CI_level (float, optional): The confidence level, i.e. 0.95 (95%)
show_parameterization_on_hover (bool, optional): If True, show the
parameterization of the points on the frontier on hover.
Returns:
AEPlotConfig: The resulting Plotly plot definition.
"""
first_frontier = list(frontiers.values())[0]
traces = []
for i, (method, frontier) in enumerate(frontiers.items()):
# Check the two metrics are the same as the first frontier
if (
frontier.primary_metric != first_frontier.primary_metric
or frontier.secondary_metric != first_frontier.secondary_metric
):
raise ValueError("All frontiers should have the same pairs of metrics.")
trace = _get_single_pareto_trace(
frontier=frontier,
legend_label=method,
trace_color=DISCRETE_COLOR_SCALE[i % len(DISCRETE_COLOR_SCALE)],
CI_level=CI_level,
show_parameterization_on_hover=show_parameterization_on_hover,
)
traces.append(trace)
shapes = []
primary_threshold = None
secondary_threshold = None
if frontier.objective_thresholds is not None:
primary_threshold = frontier.objective_thresholds.get(
frontier.primary_metric, None
)
secondary_threshold = frontier.objective_thresholds.get(
frontier.secondary_metric, None
)
absolute_metrics = frontier.absolute_metrics
rel_x = frontier.secondary_metric not in absolute_metrics
rel_y = frontier.primary_metric not in absolute_metrics
if primary_threshold is not None:
shapes.append(
{
"type": "line",
"xref": "paper",
"x0": 0.0,
"x1": 1.0,
"yref": "y",
"y0": primary_threshold,
"y1": primary_threshold,
"line": {"color": rgba(COLORS.CORAL.value), "width": 3},
}
)
if secondary_threshold is not None:
shapes.append(
{
"type": "line",
"yref": "paper",
"y0": 0.0,
"y1": 1.0,
"xref": "x",
"x0": secondary_threshold,
"x1": secondary_threshold,
"line": {"color": rgba(COLORS.CORAL.value), "width": 3},
}
)
layout = go.Layout(
title="Pareto Frontier",
xaxis={
"title": frontier.secondary_metric,
"ticksuffix": "%" if rel_x else "",
"zeroline": True,
},
yaxis={
"title": frontier.primary_metric,
"ticksuffix": "%" if rel_y else "",
"zeroline": True,
},
hovermode="closest",
legend={
"orientation": "h",
"yanchor": "top",
"y": -0.20,
"xanchor": "auto",
"x": 0.075,
},
width=750,
height=550,
margin=go.layout.Margin(pad=4, l=225, b=125, t=75), # noqa E741
shapes=shapes,
)
fig = go.Figure(data=traces, layout=layout)
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
def interact_pareto_frontier(
frontier_list: List[ParetoFrontierResults],
CI_level: float = DEFAULT_CI_LEVEL,
show_parameterization_on_hover: bool = True,
label_dict: Optional[Dict[str, str]] = None,
) -> AxPlotConfig:
"""Plot a pareto frontier from a list of objects
Args:
frontier_list: List of ParetoFrontierResults objects to be plotted.
CI_level: CI level for error bars.
show_parameterization_on_hover: Show parameterization on hover.
label_dict: Map from metric name to shortened alias to use on plot.
"""
if not frontier_list:
raise ValueError("Must receive a non-empty list of pareto frontiers to plot.")
label_dict_use = {k: k for k in frontier_list[0].means}
if label_dict is not None:
label_dict_use.update(label_dict)
traces = []
shapes = []
for frontier in frontier_list:
config = plot_pareto_frontier(
frontier=frontier,
CI_level=CI_level,
show_parameterization_on_hover=show_parameterization_on_hover,
)
traces.append(config.data["data"][0])
shapes.append(config.data["layout"].get("shapes", []))
for i, trace in enumerate(traces):
if i == 0: # Only the first trace is initially set to visible
trace["visible"] = True
else: # All other plot traces are not visible initially
trace["visible"] = False
# TODO (jej): replace dropdown with two dropdowns, one for x one for y.
dropdown = []
for i, frontier in enumerate(frontier_list):
trace_cnt = 1
# Only one plot trace is visible at a given time.
visible = [False] * (len(frontier_list) * trace_cnt)
for j in range(i * trace_cnt, (i + 1) * trace_cnt):
visible[j] = True
rel_y = frontier.primary_metric not in frontier.absolute_metrics
rel_x = frontier.secondary_metric not in frontier.absolute_metrics
primary_metric = label_dict_use[frontier.primary_metric]
secondary_metric = label_dict_use[frontier.secondary_metric]
dropdown.append(
{
"method": "update",
"args": [
{"visible": visible, "method": "restyle"},
{
"yaxis.title": primary_metric,
"xaxis.title": secondary_metric,
"yaxis.ticksuffix": "%" if rel_y else "",
"xaxis.ticksuffix": "%" if rel_x else "",
"shapes": shapes[i],
},
],
"label": f"{primary_metric}<br>vs {secondary_metric}",
}
)
# Set initial layout arguments.
initial_frontier = frontier_list[0]
rel_x = initial_frontier.secondary_metric not in initial_frontier.absolute_metrics
rel_y = initial_frontier.primary_metric not in initial_frontier.absolute_metrics
secondary_metric = label_dict_use[initial_frontier.secondary_metric]
primary_metric = label_dict_use[initial_frontier.primary_metric]
layout = go.Layout(
title="Pareto Frontier",
xaxis={
"title": secondary_metric,
"ticksuffix": "%" if rel_x else "",
"zeroline": True,
},
yaxis={
"title": primary_metric,
"ticksuffix": "%" if rel_y else "",
"zeroline": True,
},
updatemenus=[
{
"buttons": dropdown,
"x": 0.075,
"xanchor": "left",
"y": 1.1,
"yanchor": "middle",
}
],
hovermode="closest",
legend={"orientation": "h"},
width=750,
height=500,
margin=go.layout.Margin(pad=4, l=225, b=75, t=75), # noqa E741
shapes=shapes[0],
)
fig = go.Figure(data=traces, layout=layout)
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
def interact_multiple_pareto_frontier(
frontier_lists: Dict[str, List[ParetoFrontierResults]],
CI_level: float = DEFAULT_CI_LEVEL,
show_parameterization_on_hover: bool = True,
) -> AxPlotConfig:
"""Plot a Pareto frontiers from a list of lists of NamedParetoFrontierResults
objects that we want to compare.
Args:
frontier_lists (Dict[List[ParetoFrontierResults]]): A dictionary of multiple
lists of Pareto frontier computation results to plot for comparison.
Each list of ParetoFrontierResults contains a list of the results of
the same pareto frontier but under different pairs of metrics.
Different List[ParetoFrontierResults] must contain the the same pairs
of metrics for this function to work.
CI_level (float, optional): The confidence level, i.e. 0.95 (95%)
show_parameterization_on_hover (bool, optional): If True, show the
parameterization of the points on the frontier on hover.
Returns:
AEPlotConfig: The resulting Plotly plot definition.
"""
if not frontier_lists:
raise ValueError("Must receive a non-empty list of pareto frontiers to plot.")
# Check all the lists have the same length
vals = frontier_lists.values()
length = len(frontier_lists[next(iter(frontier_lists))])
if not all(len(item) == length for item in vals):
raise ValueError("Not all lists in frontier_lists have the same length.")
# Transform the frontier_lists to lists of frontiers where each list
# corresponds to one pair of metrics with multiple frontiers
list_of_frontiers = [
dict(zip(frontier_lists.keys(), t)) for t in zip(*frontier_lists.values())
]
# Get the traces and shapes for plotting
traces = []
shapes = []
for frontiers in list_of_frontiers:
config = plot_multiple_pareto_frontiers(
frontiers=frontiers,
CI_level=CI_level,
show_parameterization_on_hover=show_parameterization_on_hover,
)
for i in range(len(config.data["data"])):
traces.append(config.data["data"][i])
shapes.append(config.data["layout"].get("shapes", []))
num_frontiers = len(frontier_lists)
num_metric_pairs = len(list_of_frontiers)
for i, trace in enumerate(traces):
if (
i < num_frontiers
): # Only the traces for metric 1 v.s. metric 2 are initially set to visible
trace["visible"] = True
else: # All other plot traces are not visible initially
trace["visible"] = False
dropdown = []
for i, frontiers in enumerate(list_of_frontiers):
# Only plot traces for the current pair of metrics are visible at a given time.
visible = [False] * (num_metric_pairs * num_frontiers)
for j in range(i * num_frontiers, (i + 1) * num_frontiers):
visible[j] = True
# Get the first frontier for reference of metric names
first_frontier = list(frontiers.values())[0]
rel_y = first_frontier.primary_metric not in first_frontier.absolute_metrics
rel_x = first_frontier.secondary_metric not in first_frontier.absolute_metrics
primary_metric = first_frontier.primary_metric
secondary_metric = first_frontier.secondary_metric
dropdown.append(
{
"method": "update",
"args": [
{"visible": visible, "method": "restyle"},
{
"yaxis.title": primary_metric,
"xaxis.title": secondary_metric,
"yaxis.ticksuffix": "%" if rel_y else "",
"xaxis.ticksuffix": "%" if rel_x else "",
"shapes": shapes[i],
},
],
"label": f"{primary_metric} vs {secondary_metric}",
}
)
# Set initial layout arguments.
initial_first_frontier = list(list_of_frontiers[0].values())[0]
rel_x = (
initial_first_frontier.secondary_metric
not in initial_first_frontier.absolute_metrics
)
rel_y = (
initial_first_frontier.primary_metric
not in initial_first_frontier.absolute_metrics
)
secondary_metric = initial_first_frontier.secondary_metric
primary_metric = initial_first_frontier.primary_metric
layout = go.Layout(
title="Pareto Frontier",
xaxis={
"title": secondary_metric,
"ticksuffix": "%" if rel_x else "",
"zeroline": True,
},
yaxis={
"title": primary_metric,
"ticksuffix": "%" if rel_y else "",
"zeroline": True,
},
updatemenus=[
{
"buttons": dropdown,
"x": 0.075,
"xanchor": "left",
"y": 1.1,
"yanchor": "middle",
}
],
hovermode="closest",
legend={
"orientation": "h",
"yanchor": "top",
"y": -0.20,
"xanchor": "auto",
"x": 0.075,
},
showlegend=True,
width=750,
height=550,
margin=go.layout.Margin(pad=4, l=225, b=125, t=75), # noqa E741
shapes=shapes[0],
)
fig = go.Figure(data=traces, layout=layout)
return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
def _pareto_frontier_plot_input_processing(
experiment: Experiment,
metric_names: Optional[Tuple[str, str]] = None,
reference_point: Optional[Tuple[float, float]] = None,
minimize: Optional[Union[bool, Tuple[bool, bool]]] = None,
) -> Tuple[Tuple[str, str], Optional[Tuple[float, float]], Optional[Tuple[bool, bool]]]:
"""Processes inputs for Pareto frontier + scatterplot.
Args:
experiment: An Ax experiment.
metric_names: The names of two metrics to be plotted. Defaults to the metrics
in the optimization_config.
reference_point: The 2-dimensional reference point to use when plotting the
Pareto frontier. Defaults to the value of the objective thresholds of each
variable.
minimize: Whether each metric is being minimized. Defaults to the direction
specified for each variable in the optimization config.
Returns:
metric_names: The names of two metrics to be plotted.
reference_point: The 2-dimensional reference point to use when plotting the
Pareto frontier.
minimize: Whether each metric is being minimized.
"""
optimization_config = _validate_experiment_and_get_optimization_config(
experiment=experiment,
metric_names=metric_names,
reference_point=reference_point,
)
metric_names = _validate_and_maybe_get_default_metric_names(
metric_names=metric_names, optimization_config=optimization_config
)
objective_thresholds = _validate_experiment_and_maybe_get_objective_thresholds(
optimization_config=optimization_config,
metric_names=metric_names,
reference_point=reference_point,
)
reference_point = _validate_and_maybe_get_default_reference_point(
reference_point=reference_point,
objective_thresholds=objective_thresholds,
metric_names=metric_names,
)
minimize_output = _validate_and_maybe_get_default_minimize(
minimize=minimize,
objective_thresholds=objective_thresholds,
metric_names=metric_names,
optimization_config=optimization_config,
)
return metric_names, reference_point, minimize_output
def _validate_experiment_and_get_optimization_config(
experiment: Experiment,
metric_names: Optional[Tuple[str, str]] = None,
reference_point: Optional[Tuple[float, float]] = None,
minimize: Optional[Union[bool, Tuple[bool, bool]]] = None,
) -> Optional[OptimizationConfig]:
# If `optimization_config` is unspecified, check what inputs are missing and
# error/warn accordingly
if experiment.optimization_config is None:
if metric_names is None:
raise UserInputError(
"Inference of defaults failed. Please either specify `metric_names` "
"(and optionally `minimize` and `reference_point`) or provide an "
"experiment with an `optimization_config`."
)
if reference_point is None or minimize is None:
warnings.warn(
"Inference of defaults failed. Please specify `minimize` and "
"`reference_point` if available, or provide an experiment with an "
"`optimization_config` that contains an `objective` and "
"`objective_threshold` corresponding to each of `metric_names`: "
f"{metric_names}."
)
return None
return not_none(experiment.optimization_config)
def _validate_and_maybe_get_default_metric_names(
metric_names: Optional[Tuple[str, str]],
optimization_config: Optional[OptimizationConfig],
) -> Tuple[str, str]:
# Default metric_names is all metrics, producing an error if more than 2
if metric_names is None:
if not_none(optimization_config).is_moo_problem:
multi_objective = checked_cast(
MultiObjective, not_none(optimization_config).objective
)
metric_names = tuple(obj.metric.name for obj in multi_objective.objectives)
else:
raise UserInputError(
"Inference of `metric_names` failed. Expected `MultiObjective` but "
f"got {not_none(optimization_config).objective}. Please specify "
"`metric_names` of length 2 or provide an experiment whose "
"`optimization_config` has 2 objective metrics."
)
if metric_names is not None and len(metric_names) == 2:
return metric_names
raise UserInputError(
f"Expected 2 metrics but got {len(metric_names or [])}: {metric_names}. "
"Please specify `metric_names` of length 2 or provide an experiment whose "
"`optimization_config` has 2 objective metrics."
)
def _validate_experiment_and_maybe_get_objective_thresholds(
optimization_config: Optional[OptimizationConfig],
metric_names: Tuple[str, str],
reference_point: Optional[Tuple[float, float]],
) -> List[ObjectiveThreshold]:
objective_thresholds = []
# Validate `objective_thresholds` if `reference_point` is unspecified.
if reference_point is None:
objective_thresholds = checked_cast(
MultiObjectiveOptimizationConfig, optimization_config
).objective_thresholds
if any(
ot.relative for ot in objective_thresholds if ot.metric.name in metric_names
):
raise NotImplementedError(
"Pareto plotting not supported for experiments with relative objective "
"thresholds."
)
constraint_metric_names = {
objective_threshold.metric.name
for objective_threshold in objective_thresholds
}
missing_metric_names = set(metric_names) - set(constraint_metric_names)
if missing_metric_names:
warnings.warn(
"For automatic inference of reference point, expected one "
"`objective_threshold` for each metric in `metric_names`: "
f"{metric_names}. Missing {missing_metric_names}. Got "
f"{len(objective_thresholds)}: {objective_thresholds}. "
"Please specify `reference_point` or provide "
"an experiment whose `optimization_config` contains one "
"objective threshold for each metric. Returning an empty list."
)
return objective_thresholds
def _validate_and_maybe_get_default_reference_point(
reference_point: Optional[Tuple[float, float]],
objective_thresholds: List[ObjectiveThreshold],
metric_names: Tuple[str, str],
) -> Optional[Tuple[float, float]]:
if reference_point is None:
reference_point = {
objective_threshold.metric.name: objective_threshold.bound
for objective_threshold in objective_thresholds
}
missing_metric_names = set(metric_names) - set(reference_point)
if missing_metric_names:
warnings.warn(
"Automated determination of `reference_point` failed: missing metrics "
f"{missing_metric_names}. Please specify `reference_point` or provide "
"an experiment whose `optimization_config` has one "
"`objective_threshold` for each of two metrics. Returning `None`."
)
return None
reference_point = tuple(
reference_point[metric_name] for metric_name in metric_names
)
if len(reference_point) != 2:
warnings.warn(
f"Expected 2-dimensional `reference_point` but got {len(reference_point)} "
f"dimensions: {reference_point}. Please specify `reference_point` of "
"length 2 or provide an experiment whose optimization config has one "
"`objective_threshold` for each of two metrics. Returning `None`."
)
return None
return reference_point
def _validate_and_maybe_get_default_minimize(
minimize: Optional[Union[bool, Tuple[bool, bool]]],
objective_thresholds: List[ObjectiveThreshold],
metric_names: Tuple[str, str],
optimization_config: Optional[OptimizationConfig] = None,
) -> Optional[Tuple[bool, bool]]:
if minimize is None:
# Determine `minimize` defaults
minimize = tuple(
_maybe_get_default_minimize_single_metric(
metric_name=metric_name,
optimization_config=optimization_config,
objective_thresholds=objective_thresholds,
)
for metric_name in metric_names
)
# If either value of minimize is missing, return `None`
if any(i_min is None for i_min in minimize):
warnings.warn(
"Extraction of default `minimize` failed. Please specify `minimize` "
"of length 2 or provide an experiment whose `optimization_config` "
"includes 2 objectives. Returning None."
)
return None
minimize = tuple(not_none(i_min) for i_min in minimize)
# If only one bool provided, use for both dimensions
elif isinstance(minimize, bool):
minimize = (minimize, minimize)
if len(minimize) != 2:
warnings.warn(
f"Expected 2-dimensional `minimize` but got {len(minimize)} dimensions: "
f"{minimize}. Please specify `minimize` of length 2 or provide an "
"experiment whose `optimization_config` includes 2 objectives. Returning "
"None."
)
return None
return minimize
def _maybe_get_default_minimize_single_metric(
metric_name: str,
objective_thresholds: List[ObjectiveThreshold],
optimization_config: Optional[OptimizationConfig] = None,
) -> Optional[bool]:
minimize = None
# First try to get metric_name from optimization_config
if (
optimization_config is not None
and metric_name in optimization_config.objective.metric_names
):
if optimization_config.is_moo_problem:
multi_objective = checked_cast(
MultiObjective, optimization_config.objective
)
for objective in multi_objective.objectives:
if objective.metric.name == metric_name:
return objective.minimize
else:
return optimization_config.objective.minimize
# Next try to get minimize from objective_thresholds
if objective_thresholds is not None:
constraint_op_names = {
objective_threshold.op.name for objective_threshold in objective_thresholds
}
invalid_constraint_op_names = constraint_op_names - VALID_CONSTRAINT_OP_NAMES
if invalid_constraint_op_names:
raise ValueError(
"Operators of all constraints must be in "
f"{VALID_CONSTRAINT_OP_NAMES}. Got {invalid_constraint_op_names}.)"
)
minimize = {
objective_threshold.metric.name: objective_threshold.op.name == "LEQ"
for objective_threshold in objective_thresholds
}
minimize = minimize.get(metric_name)
if minimize is None:
warnings.warn(
f"Extraction of default `minimize` failed for metric {metric_name}. "
f"Ensure {metric_name} is an objective of the provided experiment. "
"Setting `minimize` to `None`."
)
return minimize
|
PypiClean
|
/cppyy-cling-6.28.0.tar.gz/cppyy-cling-6.28.0/src/interpreter/llvm/src/lib/Support/Unix/README.txt
|
llvm/lib/Support/Unix README
===========================
This directory provides implementations of the lib/System classes that
are common to two or more variants of UNIX. For example, the directory
structure underneath this directory could look like this:
Unix - only code that is truly generic to all UNIX platforms
Posix - code that is specific to Posix variants of UNIX
SUS - code that is specific to the Single Unix Specification
SysV - code that is specific to System V variants of UNIX
As a rule, only those directories actually needing to be created should be
created. Also, further subdirectories could be created to reflect versions of
the various standards. For example, under SUS there could be v1, v2, and v3
subdirectories to reflect the three major versions of SUS.
|
PypiClean
|
/onnxruntime_openvino-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/onnxruntime/backend/backend.py
|
import os
import unittest
import packaging.version
from onnx import ModelProto, helper, version # noqa: F401
from onnx.backend.base import Backend
from onnx.checker import check_model
from onnxruntime import InferenceSession, SessionOptions, get_available_providers, get_device
from onnxruntime.backend.backend_rep import OnnxRuntimeBackendRep
class OnnxRuntimeBackend(Backend):
"""
Implements
`ONNX's backend API <https://github.com/onnx/onnx/blob/main/docs/ImplementingAnOnnxBackend.md>`_
with *ONNX Runtime*.
The backend is mostly used when you need to switch between
multiple runtimes with the same API.
`Importing models from ONNX to Caffe2 <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxCaffe2Import.ipynb>`_
shows how to use *caffe2* as a backend for a converted model.
Note: This is not the official Python API.
"""
allowReleasedOpsetsOnly = bool(os.getenv("ALLOW_RELEASED_ONNX_OPSET_ONLY", "1") == "1") # noqa: N815
@classmethod
def is_compatible(cls, model, device=None, **kwargs):
"""
Return whether the model is compatible with the backend.
:param model: unused
:param device: None to use the default device or a string (ex: `'CPU'`)
:return: boolean
"""
if device is None:
device = get_device()
return cls.supports_device(device)
@classmethod
def is_opset_supported(cls, model):
"""
Return whether the opset for the model is supported by the backend.
When By default only released onnx opsets are allowed by the backend
To test new opsets env variable ALLOW_RELEASED_ONNX_OPSET_ONLY should be set to 0
:param model: Model whose opsets needed to be verified.
:return: boolean and error message if opset is not supported.
"""
if cls.allowReleasedOpsetsOnly:
for opset in model.opset_import:
domain = opset.domain if opset.domain else "ai.onnx"
try:
key = (domain, opset.version)
if key not in helper.OP_SET_ID_VERSION_MAP:
error_message = (
"Skipping this test as only released onnx opsets are supported."
"To run this test set env variable ALLOW_RELEASED_ONNX_OPSET_ONLY to 0."
" Got Domain '{}' version '{}'.".format(domain, opset.version)
)
return False, error_message
except AttributeError:
# for some CI pipelines accessing helper.OP_SET_ID_VERSION_MAP
# is generating attribute error. TODO investigate the pipelines to
# fix this error. Falling back to a simple version check when this error is encountered
if (domain == "ai.onnx" and opset.version > 12) or (domain == "ai.ommx.ml" and opset.version > 2):
error_message = (
"Skipping this test as only released onnx opsets are supported."
"To run this test set env variable ALLOW_RELEASED_ONNX_OPSET_ONLY to 0."
" Got Domain '{}' version '{}'.".format(domain, opset.version)
)
return False, error_message
return True, ""
@classmethod
def supports_device(cls, device):
"""
Check whether the backend is compiled with particular device support.
In particular it's used in the testing suite.
"""
if device == "CUDA":
device = "GPU"
return device in get_device()
@classmethod
def prepare(cls, model, device=None, **kwargs):
"""
Load the model and creates a :class:`onnxruntime.InferenceSession`
ready to be used as a backend.
:param model: ModelProto (returned by `onnx.load`),
string for a filename or bytes for a serialized model
:param device: requested device for the computation,
None means the default one which depends on
the compilation settings
:param kwargs: see :class:`onnxruntime.SessionOptions`
:return: :class:`onnxruntime.InferenceSession`
"""
if isinstance(model, OnnxRuntimeBackendRep):
return model
elif isinstance(model, InferenceSession):
return OnnxRuntimeBackendRep(model)
elif isinstance(model, (str, bytes)):
options = SessionOptions()
for k, v in kwargs.items():
if hasattr(options, k):
setattr(options, k, v)
excluded_providers = os.getenv("ORT_ONNX_BACKEND_EXCLUDE_PROVIDERS", default="").split(",")
providers = [x for x in get_available_providers() if (x not in excluded_providers)]
inf = InferenceSession(model, sess_options=options, providers=providers)
# backend API is primarily used for ONNX test/validation. As such, we should disable session.run() fallback
# which may hide test failures.
inf.disable_fallback()
if device is not None and not cls.supports_device(device):
raise RuntimeError(f"Incompatible device expected '{device}', got '{get_device()}'")
return cls.prepare(inf, device, **kwargs)
else:
# type: ModelProto
# check_model serializes the model anyways, so serialize the model once here
# and reuse it below in the cls.prepare call to avoid an additional serialization
# only works with onnx >= 1.10.0 hence the version check
onnx_version = packaging.version.parse(version.version) or packaging.version.Version("0")
onnx_supports_serialized_model_check = onnx_version.release >= (1, 10, 0)
bin_or_model = model.SerializeToString() if onnx_supports_serialized_model_check else model
check_model(bin_or_model)
opset_supported, error_message = cls.is_opset_supported(model)
if not opset_supported:
raise unittest.SkipTest(error_message)
# Now bin might be serialized, if it's not we need to serialize it otherwise we'll have
# an infinite recursive call
bin = bin_or_model
if not isinstance(bin, (str, bytes)):
bin = bin.SerializeToString()
return cls.prepare(bin, device, **kwargs)
@classmethod
def run_model(cls, model, inputs, device=None, **kwargs):
"""
Compute the prediction.
:param model: :class:`onnxruntime.InferenceSession` returned
by function *prepare*
:param inputs: inputs
:param device: requested device for the computation,
None means the default one which depends on
the compilation settings
:param kwargs: see :class:`onnxruntime.RunOptions`
:return: predictions
"""
rep = cls.prepare(model, device, **kwargs)
return rep.run(inputs, **kwargs)
@classmethod
def run_node(cls, node, inputs, device=None, outputs_info=None, **kwargs):
"""
This method is not implemented as it is much more efficient
to run a whole model than every node independently.
"""
raise NotImplementedError("It is much more efficient to run a whole model than every node independently.")
is_compatible = OnnxRuntimeBackend.is_compatible
prepare = OnnxRuntimeBackend.prepare
run = OnnxRuntimeBackend.run_model
supports_device = OnnxRuntimeBackend.supports_device
|
PypiClean
|
/django_htmx_ui_adminlte-0.1.13-py3-none-any.whl/django_htmx_ui_adminlte/static/adminlte/plugins/summernote/lang/summernote-vi-VN.js
|
(function webpackUniversalModuleDefinition(root, factory) {
if(typeof exports === 'object' && typeof module === 'object')
module.exports = factory();
else if(typeof define === 'function' && define.amd)
define([], factory);
else {
var a = factory();
for(var i in a) (typeof exports === 'object' ? exports : root)[i] = a[i];
}
})(self, function() {
return /******/ (() => { // webpackBootstrap
var __webpack_exports__ = {};
(function ($) {
$.extend($.summernote.lang, {
'vi-VN': {
font: {
bold: 'In Đậm',
italic: 'In Nghiêng',
underline: 'Gạch dưới',
clear: 'Bỏ định dạng',
height: 'Chiều cao dòng',
name: 'Phông chữ',
strikethrough: 'Gạch ngang',
subscript: 'Subscript',
superscript: 'Superscript',
size: 'Cỡ chữ'
},
image: {
image: 'Hình ảnh',
insert: 'Chèn',
resizeFull: '100%',
resizeHalf: '50%',
resizeQuarter: '25%',
floatLeft: 'Trôi về trái',
floatRight: 'Trôi về phải',
floatNone: 'Không trôi',
shapeRounded: 'Shape: Rounded',
shapeCircle: 'Shape: Circle',
shapeThumbnail: 'Shape: Thumbnail',
shapeNone: 'Shape: None',
dragImageHere: 'Thả Ảnh ở vùng này',
dropImage: 'Drop image or Text',
selectFromFiles: 'Chọn từ File',
maximumFileSize: 'Maximum file size',
maximumFileSizeError: 'Maximum file size exceeded.',
url: 'URL',
remove: 'Xóa',
original: 'Original'
},
video: {
video: 'Video',
videoLink: 'Link đến Video',
insert: 'Chèn Video',
url: 'URL',
providers: '(Hỗ trợ YouTube, Vimeo, Vine, Instagram, DailyMotion và Youku)'
},
link: {
link: 'Link',
insert: 'Chèn Link',
unlink: 'Gỡ Link',
edit: 'Sửa',
textToDisplay: 'Văn bản hiển thị',
url: 'URL',
openInNewWindow: 'Mở ở Cửa sổ mới'
},
table: {
table: 'Bảng',
addRowAbove: 'Chèn dòng phía trên',
addRowBelow: 'Chèn dòng phía dưới',
addColLeft: 'Chèn cột bên trái',
addColRight: 'Chèn cột bên phải',
delRow: 'Xóa dòng',
delCol: 'Xóa cột',
delTable: 'Xóa bảng'
},
hr: {
insert: 'Chèn'
},
style: {
style: 'Kiểu chữ',
p: 'Chữ thường',
blockquote: 'Đoạn trích',
pre: 'Mã Code',
h1: 'H1',
h2: 'H2',
h3: 'H3',
h4: 'H4',
h5: 'H5',
h6: 'H6'
},
lists: {
unordered: 'Liệt kê danh sách',
ordered: 'Liệt kê theo thứ tự'
},
options: {
help: 'Trợ giúp',
fullscreen: 'Toàn Màn hình',
codeview: 'Xem Code'
},
paragraph: {
paragraph: 'Canh lề',
outdent: 'Dịch sang trái',
indent: 'Dịch sang phải',
left: 'Canh trái',
center: 'Canh giữa',
right: 'Canh phải',
justify: 'Canh đều'
},
color: {
recent: 'Màu chữ',
more: 'Mở rộng',
background: 'Màu nền',
foreground: 'Màu chữ',
transparent: 'trong suốt',
setTransparent: 'Nền trong suốt',
reset: 'Thiết lập lại',
resetToDefault: 'Trở lại ban đầu'
},
shortcut: {
shortcuts: 'Phím tắt',
close: 'Đóng',
textFormatting: 'Định dạng Văn bản',
action: 'Hành động',
paragraphFormatting: 'Định dạng',
documentStyle: 'Kiểu văn bản',
extraKeys: 'Extra keys'
},
help: {
'insertParagraph': 'Chèn đo văn',
'undo': 'Undoes the last command',
'redo': 'Redoes the last command',
'tab': 'Tab',
'untab': 'Untab',
'bold': 'Set a bold style',
'italic': 'Set a italic style',
'underline': 'Set a underline style',
'strikethrough': 'Set a strikethrough style',
'removeFormat': 'Clean a style',
'justifyLeft': 'Set left align',
'justifyCenter': 'Set center align',
'justifyRight': 'Set right align',
'justifyFull': 'Set full align',
'insertUnorderedList': 'Toggle unordered list',
'insertOrderedList': 'Toggle ordered list',
'outdent': 'Outdent on current paragraph',
'indent': 'Indent on current paragraph',
'formatPara': 'Change current block\'s format as a paragraph(P tag)',
'formatH1': 'Change current block\'s format as H1',
'formatH2': 'Change current block\'s format as H2',
'formatH3': 'Change current block\'s format as H3',
'formatH4': 'Change current block\'s format as H4',
'formatH5': 'Change current block\'s format as H5',
'formatH6': 'Change current block\'s format as H6',
'insertHorizontalRule': 'Insert horizontal rule',
'linkDialog.show': 'Show Link Dialog'
},
history: {
undo: 'Lùi lại',
redo: 'Làm lại'
},
specialChar: {
specialChar: 'KÝ TỰ ĐẶC BIỆT',
select: 'Chọn ký tự đặc biệt'
}
}
});
})(jQuery);
/******/ return __webpack_exports__;
/******/ })()
;
});
//# sourceMappingURL=summernote-vi-VN.js.map
|
PypiClean
|
/jet-test-0.0.3.tar.gz/jet-test-0.0.3/jet_test/report.py
|
import itertools
import re
import textwrap
# self
from jet_test.classes import Error
# dependencies
from rich.panel import Panel
from rich.layout import Layout
from rich.text import Text
from rich.align import Align
def _camel_case_split(identifier: str) -> str:
matches = re.finditer(
".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)", identifier
)
return " ".join([m.group(0) for m in matches])
def _bound_text(title: str, desc: str, text_width: int) -> str:
desc = textwrap.fill(title + desc, text_width)[len(title) :]
return desc
def _center(node, **kwargs):
node = Align(node, vertical="middle", align="center", pad=False, **kwargs)
return node
def report_result(result: Error, color: str, text_width: int):
"""Creates Headline and description"""
if result.type == "Failed":
text = f" TEST FAILED "
else:
text = _camel_case_split(result.name)
text = f" {text.upper()} "
if len(result.description) == 0:
node = _center(
Text(text, style=f"bold white on {color}"),
)
return node
node = _center(
Text.assemble(
(text, f"bold white on {color}"),
("\n\n"),
(_bound_text(text, result.description, text_width), f"dim {color}"),
justify="center",
),
)
return node
def observation(title: str, desc: str, text_width: int):
"""General class for a fainted block with title"""
node = _center(
Text.assemble(
title,
(_bound_text(title, desc, text_width), "dim"),
justify="left",
),
)
return node
def captured_output(output: str, text_width: int):
"""Display captured output"""
node = _center(
Panel(
_center(Text("\n" + output)),
subtitle="",
title="Captured Output",
width=text_width + 4,
border_style="dim",
)
)
return node
def locals_panel(result: Error, buffer: int, console):
if (result.variables is None) or len(result.variables) == 0:
console.height = buffer + 5
return _center(Text(""))
count = 0
text = Text()
for k, v in result.variables.items():
text.append(Text(f"{k} = {str(v)}\n", style="dim"))
# text.append(self.highlight())
count += 1
console.height = max([buffer, count]) + 5
return _center(text)
def function_panel(result: Error, buffer: int, color: int):
lineno = result.line - 1
text = Text()
start_line = max([lineno - buffer, 0])
current_line = start_line
with open(result.test.module.path, "r") as text_file:
for line in itertools.islice(text_file, start_line, lineno + 1):
if current_line == lineno:
text.append(line, style=f"bold white on {color}")
else:
text.append(line, style="dim")
current_line += 1
return _center(text)
def function_and_locals_parallel(result: Error, buffer: int, color: str, console):
layout = Layout()
layout.split_row(
Layout(
Panel(
function_panel(result, buffer, color),
title=f"{result.test.name} @ {result.test.module.name}",
),
name="code",
),
Layout(
Panel(locals_panel(result, buffer, console), title="Local variables"),
name="locals",
),
)
layout["code"].ratio = 1597
layout["locals"].ratio = 987
return layout
def function_and_locals_inline(
result: Error, buffer: int, color: str, text_width: int, console
):
uno = _center(
Panel(
function_panel(result, buffer, color),
title=f"{result.test.name} @ {result.test.module.name}",
width=text_width + 4,
)
)
dos = _center(
Panel(
locals_panel(result, buffer, console),
title="Local variables",
width=text_width + 4,
)
)
return [uno, dos]
|
PypiClean
|
/cctbx-2020.8-0_py36h1d45459-cp36-cp36m-manylinux2010_x86_64.whl/wxtbx/metallicbutton.py
|
from __future__ import absolute_import, division, print_function
# Copyright 2010 University of California
# derived from wx.lib.platebtn (see copyright below).
###############################################################################
# Name: platebtn.py #
# Purpose: PlateButton is a flat label button with support for bitmaps and #
# drop menu. #
# Author: Cody Precord <[email protected]> #
# Copyright: (c) 2007 Cody Precord <[email protected]> #
# Licence: wxWindows Licence #
###############################################################################
__all__ = ["MetallicButton", "AdjustAlpha", "AdjustColour",
"GetHighlightColour",
"GRADIENT_NORMAL", "GRADIENT_PRESSED", "GRADIENT_HIGHLIGHT",
"MB_STYLE_DEFAULT", "GB_STYLE_BOLD_LABEL", "GB_STYLE_DROPARROW"]
import wx.lib.wordwrap
import wx.lib.imageutils
from wx.lib.colourutils import *
from wxtbx import wx4_compatibility as wx4c
# Used on OSX to get access to carbon api constants
CAPTION_SIZE = 9
LABEL_SIZE = 11
if wx.Platform == '__WXMAC__':
try:
from Carbon import Appearance as CarbonAppearance
except Exception:
CarbonAppearance = None
elif (wx.Platform == '__WXMSW__'):
CAPTION_SIZE = 9
LABEL_SIZE = 11
GRADIENT_NORMAL = 0
GRADIENT_PRESSED = 1
GRADIENT_HIGHLIGHT = 2
MB_STYLE_DEFAULT = 1
MB_STYLE_BOLD_LABEL = 2
MB_STYLE_DROPARROW = 4
WxCtrl = wx4c.get_wx_mod(wx, wx.Control)
class MetallicButton(WxCtrl):
def __init__ (self,
parent,
id_=wx.ID_ANY,
label='',
label2='',
bmp=None,
pos=wx.DefaultPosition,
size=wx.DefaultSize,
style=MB_STYLE_DEFAULT,
name=wx.ButtonNameStr,
start_color=(218,218,218),
gradient_percent=15.0,
highlight_color=(230,230,230),
label_size=LABEL_SIZE,
caption_size=CAPTION_SIZE,
button_margin=2,
disable_after_click=0,
bmp2=None):
WxCtrl.__init__(self, parent, id_, pos, size, wx.NO_BORDER, name=name)
self.InheritAttributes()
self._bmp = dict(enable=bmp)
self._margin = button_margin
if bmp is not None :
img = bmp.ConvertToImage()
#img = img.ConvertToGreyscale(.795, .073, .026)
wx.lib.imageutils.grayOut(img)
self._bmp['disable'] = img.ConvertToBitmap()
else :
self._bmp['disable'] = None
self._bmp2 = bmp2
self._use_secondary_bitmap = False
self._label2_font = self.GetFont()
self._label2_font.SetPointSize(caption_size)
# XXX this crashes on wxOSX_Cocoa!
if (not 'wxOSX-cocoa' in wx.PlatformInfo):
self._label2_font.SetStyle(wx.FONTSTYLE_ITALIC)
# with wx4c.set_font_style(wx.FONTSTYLE_ITALIC) as fs:
# self._label2_font.SetStyle(fs)
font_size = label_size
self._label_font = self.GetFont()
self._label_font.SetPointSize(label_size)
if style & MB_STYLE_BOLD_LABEL :
self._label_font.SetWeight(wx.FONTWEIGHT_BOLD)
# with wx4c.set_font_weight(wx.FONTWEIGHT_BOLD) as fw:
# self._label2_font.SetWeight(fw)
self.SetFont(self._label_font)
#self._label2_font = wx.Font(caption_size, wx.SWISS, wx.ITALIC, wx.NORMAL)
self._menu = None
self.SetLabel(label)
self._label2 = label2
self._style = style
#self._size = tuple(size)
self._state = dict(pre=GRADIENT_NORMAL, cur=GRADIENT_NORMAL)
self._color = self.__InitColors(start_color, highlight_color,
gradient_percent)
self._caption_lines = None
self._disable_after_click = disable_after_click
# Setup Initial Size
self.SetInitialSize(size)
# Event Handlers
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Bind(wx.EVT_SET_FOCUS, self.OnFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
# Mouse Events
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
self.Bind(wx.EVT_LEFT_UP, self.OnLeftUp)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnEnter)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnLeave)
# Other events
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu)
def OnPaint(self, event):
self.__DrawButton()
def __DrawBitmap(self, gc):
"""Draw the bitmap if one has been set
@param gc: GCDC to draw with
@return: x cordinate to draw text at
"""
bmp = None
if (self._use_secondary_bitmap):
assert (self._bmp2 is not None)
bmp = self._bmp2
elif self.IsEnabled():
bmp = self._bmp['enable']
else:
bmp = self._bmp['disable']
if bmp is not None and bmp.IsOk():
bw, bh = bmp.GetSize()
cw, ch = self.GetSize()
if ch > (bh + 4) : # (self._margin * 2)):
ypos = ((ch - bh) / 2) - (self._margin / 2) + 1
xpos = self._margin + 2
else :
ypos = 0
xpos = 0
gc.DrawBitmap(bmp, xpos, ypos, bmp.GetMask() != None)
return bw + 6
else:
return 6
def __DrawDropArrow(self, gc, xpos, ypos):
"""Draw a drop arrow if needed and restore pen/brush after finished
@param gc: GCDC to draw with
@param xpos: x cord to start at
@param ypos: y cord to start at
"""
if self._menu is not None or self._style & MB_STYLE_DROPARROW:
# Positioning needs a little help on Windows
if wx.Platform == '__WXMSW__':
xpos -= 2
tripoints = [(xpos, ypos), (xpos + 6, ypos), (xpos + 3, ypos + 5)]
brush_b = gc.GetBrush()
pen_b = gc.GetPen()
gc.SetPen(wx.TRANSPARENT_PEN)
gc.SetBrush(wx.Brush(gc.GetTextForeground()))
gc.DrawPolygon(tripoints)
gc.SetBrush(brush_b)
gc.SetPen(pen_b)
else:
pass
def __DrawHighlight(self, gc, width, height):
"""Draw the main highlight/pressed state
@param gc: GCDC to draw with
@param width: width of highlight
@param height: height of highlight
"""
if self._state['cur'] == GRADIENT_PRESSED:
color = self._color['press_start']
end_color = self._color['press_end']
else:
color = self._color['hlight_start']
end_color = self._color['hlight_end']
rad = 0
gc.SetBrush(wx.TRANSPARENT_BRUSH)
rgc = gc.GetGraphicsContext()
brush = rgc.CreateLinearGradientBrush(0, 1, 0, height, color, end_color)
rgc.SetBrush(brush)
gc.DrawRectangle(1, 1, width-2, height-2)
def __DrawCaption(self, gc, xpos, ypos):
if self._label2 != '' :
gc.SetFont(self._label2_font)
min_w, min_h = self.GetSize() #self._size
if min_w == -1 :
min_w = 120
txt_w = min_w - xpos - 10
if False : #self._caption_lines is not None :
lines = self._caption_lines
else :
if (wx.Platform in ['__WXGTK__', '__WXMSW__']):
dc = wx.ClientDC(self)
dc.SetFont(self._label2_font)
#txt_w += 100
else :
dc = gc
lines = wx.lib.wordwrap.wordwrap(self._label2,
width=txt_w,
dc=dc) #wx.MemoryDC())
offset = 0
for line in lines.splitlines():
line_w, line_h = gc.GetTextExtent(line)
gc.DrawText(line.rstrip(), xpos, ypos + offset)
offset += line_h + 2
def __PostEvent(self):
"""Post a button event to parent of this control"""
bevt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, self.GetId())
bevt.SetEventObject(self)
bevt.SetString(self.GetLabel())
wx.PostEvent(self.GetParent(), bevt)
def __DrawButton(self):
"""Draw the button"""
dc = wx.AutoBufferedPaintDCFactory(self)
gc = wx.GCDC(dc)
# Setup
dc.SetBrush(wx.WHITE_BRUSH)
gc.SetBrush(wx.WHITE_BRUSH)
gc.SetFont(self.GetFont())
#gc.SetBackgroundMode(wx.TRANSPARENT)
# Calc Object Positions
width, height = self.GetSize()
#get text dimensions from dc rather than gc as gc reports wrong height for empty strings on Windows
tw, th = dc.GetTextExtent(self.GetLabel())
if self._label2 != '' :
txt_y = 4 #th + 4 #height - th - 4
txt2_y = th + 8
else :
txt_y = max((height - th) / 2 - 1, 1)
txt2_y = None
#print height, th, txt_y, txt2_y
#gc.SetBrush(wx.TRANSPARENT_BRUSH)
#gc.DrawRectangle(0, 0, width, height)
gc.SetPen(wx.Pen((100,100,100)))
gc.SetBrush(wx.Brush((240,240,240)))
gc.DrawRectangle(0,0,width,height)
gc.SetPen(wx.TRANSPARENT_PEN)
if self._state['cur'] == GRADIENT_HIGHLIGHT:
gc.SetTextForeground(self._color['htxt'])
self.__DrawHighlight(gc, width, height)
elif self._state['cur'] == GRADIENT_PRESSED:
gc.SetTextForeground(self._color['htxt'])
if wx.Platform == '__WXMAC__':
brush = wx.Brush((100,100,100))
if CarbonAppearance:
brush.MacSetTheme(CarbonAppearance.kThemeBrushFocusHighlight)
with wx4c.set_pen_style(wx.PENSTYLE_SOLID) as pstyle:
pen = wx.Pen(brush.GetColour(), 1, pstyle)
else:
pen = wx.Pen(AdjustColour(self._color['press_start'], -80, 220), 1)
#gc.SetPen(pen)
self.__DrawHighlight(gc, width, height)
txt_x = self.__DrawBitmap(gc)
gc.DrawText(self.GetLabel(), txt_x + 2, txt_y)
self.__DrawCaption(gc, txt_x + 2, txt2_y)
self.__DrawDropArrow(gc, width - 10, (height / 2) - 2)
else:
rgc = gc.GetGraphicsContext()
#gc.SetPen(wx.TRANSPARENT_PEN)
color = wx.Colour(218,218,218)
brush = rgc.CreateLinearGradientBrush(0, 1, 0, height,
self._color['gradient_start'], self._color['gradient_end'])
rgc.SetBrush(brush)
gc.DrawRectangle(1, 2, width-2, height-3)
if self.IsEnabled():
gc.SetTextForeground(self.GetForegroundColour())
else:
txt_c = wx.SystemSettings.GetColour(wx.SYS_COLOUR_GRAYTEXT)
gc.SetTextForeground(txt_c)
# Draw bitmap and text
if self._state['cur'] != GRADIENT_PRESSED:
txt_x = self.__DrawBitmap(gc)
gc.DrawText(self.GetLabel(), txt_x + 2, txt_y)
self.__DrawCaption(gc, txt_x + 2, txt2_y)
#self.__DrawDropArrow(gc, txt_x + tw + 6, (height / 2) - 2)
def __InitColors(self, start_color, highlight_color, gradient_percent):
"""Initialize the default colors"""
start_color = wx.Colour(*start_color)
start_hcolor = wx.Colour(*highlight_color) #GetHighlightColour()
start_pcolor = AdjustColour(start_hcolor, -12)
if gradient_percent != 0 :
end_color = AdjustColour(start_color, gradient_percent)
end_hcolor = AdjustColour(start_hcolor, gradient_percent)
end_pcolor = AdjustColour(start_pcolor, gradient_percent)
else :
end_color = start_color
end_hcolor = start_hcolor
end_pcolor = start_pcolor
colors = dict(default=True,
gradient_start=start_color,
gradient_end=end_color,
hlight_start=start_hcolor,
hlight_end=end_hcolor,
press_start=start_pcolor,
press_end=end_pcolor,
htxt=wx.Colour(0,0,0))
# BestLabelColour(self.GetForegroundColour()))
return colors
#---- End Private Member Function ----#
#---- Public Member Functions ----#
def AcceptsFocus(self):
"""Can this window have the focus?"""
return self.IsEnabled()
@property
def BitmapDisabled(self):
"""Property for accessing the bitmap for the disabled state"""
return self._bmp['disable']
@property
def BitmapLabel(self):
"""Property for accessing the default bitmap"""
return self._bmp['enable']
# Aliases
BitmapFocus = BitmapLabel
BitmapHover = BitmapLabel
BitmapSelected = BitmapLabel
def Disable(self):
"""Disable the control"""
WxCtrl.Disable(self)
self.Refresh()
def DoGetBestSize(self):
"""Calculate the best size of the button
@return: wx.Size
"""
width = 8
height = 10
label_width = 0
label_height = 0
caption_width = 0
caption_height = 0
if self._bmp['enable'] is not None:
bsize = self._bmp['enable'].GetSize()
width += (bsize[0] + 12)
height = bsize[1] + (self._margin * 2)
else:
width += 10
if self.GetLabel():
lsize = self.GetTextExtent(self.GetLabel())
label_width = lsize[0]
label_height = lsize[1]
if self._label2 != '' :
if wx.Platform == '__WXMAC__' :
with wx4c.create_measuring_context() as context:
dc = context
gfont = dc.CreateFont(self._label2_font, self.GetForegroundColour())
else :
dc = wx.ClientDC(self)
gfont = self._label2_font
#dc = wx.MemoryDC()
dc.SetFont(gfont)
min_w, min_h = self.GetSize() #self._size
if min_w == -1 :
min_w = 120
txt_w = min_w - width - 10
if wx.Platform == '__WXGTK__' :
txt_w += 40
lines = wx.lib.wordwrap.wordwrap(self._label2,
width=txt_w,
dc=dc)
self._caption_lines = lines
offset = 0
if wx.Platform == "__WXMAC__" :
buffer = 4
else :
buffer = 0
for line in lines.splitlines():
line_w, line_h = dc.GetTextExtent(line)
if line_w > caption_width :
caption_width = line_w
caption_height += line_h + buffer
if (wx.Platform == '__WXMSW__'):
caption_height += 4
width += max(caption_width, label_width) + 4
height = max(caption_height + label_height + 12, height)
if self._menu is not None or self._style & MB_STYLE_DROPARROW :
width += 12
if width < self.GetSize()[0] : #self._size[0] :
width = self.GetSize()[0]
best = wx.Size(width, height)
self.CacheBestSize(best)
return best
def Enable(self, enable=True):
"""Enable/Disable the control"""
WxCtrl.Enable(self, enable)
self.Refresh()
def GetBackgroundBrush(self, dc):
"""Get the brush for drawing the background of the button
@return: wx.Brush
@note: used internally when on gtk
"""
if wx.Platform == '__WXMAC__' : #or self._style & PB_STYLE_NOBG:
return wx.TRANSPARENT_BRUSH
bkgrd = self.GetBackgroundColour()
with wx4c.set_brush_style(wx.BRUSHSTYLE_SOLID) as bstyle:
brush = wx.Brush(bkgrd, bstyle)
my_attr = self.GetDefaultAttributes()
p_attr = self.GetParent().GetDefaultAttributes()
my_def = bkgrd == my_attr.colBg
p_def = self.GetParent().GetBackgroundColour() == p_attr.colBg
if my_def and not p_def:
bkgrd = self.GetParent().GetBackgroundColour()
with wx4c.set_brush_style(wx.BRUSHSTYLE_SOLID) as bstyle:
brush = wx.Brush(bkgrd, bstyle)
return brush
def GetBitmapDisabled(self):
"""Get the bitmap of the disable state
@return: wx.Bitmap or None
"""
return self._bmp['disable']
def GetBitmapLabel(self):
"""Get the label bitmap
@return: wx.Bitmap or None
"""
return self._bmp['enable']
# GetBitmap Aliases for BitmapButton api
GetBitmapFocus = GetBitmapLabel
GetBitmapHover = GetBitmapLabel
# Alias for GetLabel
GetLabelText = WxCtrl.GetLabel
def GetMenu(self):
"""Return the menu associated with this button or None if no
menu is associated with it.
"""
return getattr(self, '_menu', None)
def HasTransparentBackground(self):
"""Override setting of background fill"""
return True
@property
def LabelText(self):
"""Property for getting the label of the button"""
return self.GetLabel()
#---- Event Handlers ----#
def OnErase(self, evt):
"""Trap the erase event to keep the background transparent
on windows.
@param evt: wx.EVT_ERASE_BACKGROUND
"""
pass
def OnFocus(self, evt):
"""Set the visual focus state if need be"""
if not self.IsEnabled():
return
if self._state['cur'] == GRADIENT_NORMAL:
self.SetState(GRADIENT_HIGHLIGHT)
def OnKeyUp(self, evt):
"""Execute a single button press action when the Return key is pressed
and this control has the focus.
@param evt: wx.EVT_KEY_UP
"""
if evt.GetKeyCode() == wx.WXK_SPACE:
self.SetState(GRADIENT_PRESSED)
self.__PostEvent()
wx.CallLater(100, self.SetState, GRADIENT_HIGHLIGHT)
else:
evt.Skip()
def OnKillFocus(self, evt):
"""Set the visual state back to normal when focus is lost
unless the control is currently in a pressed state.
"""
# Note: this delay needs to be at least as much as the on in the KeyUp
# handler to prevent ghost highlighting from happening when
# quickly changing focus and activating buttons
if self._state['cur'] != GRADIENT_PRESSED:
self.SetState(GRADIENT_NORMAL)
self.Refresh()
def OnLeftDown(self, evt):
"""Sets the pressed state and depending on the click position will
show the popup menu if one has been set.
"""
if not self.IsEnabled():
return
pos = evt.GetPosition()
self.SetState(GRADIENT_PRESSED)
size = self.GetSize()
if pos[0] >= size[0] - 16:
if self._menu is not None:
self.ShowMenu()
self.SetFocus()
def OnLeftUp(self, evt):
"""Post a button event if the control was previously in a
pressed state.
@param evt: wx.MouseEvent
"""
if not self.IsEnabled():
return
if self._state['cur'] == GRADIENT_PRESSED:
pos = evt.GetPosition()
size = self.GetSize()
if self._disable_after_click > 0 :
self.Enable(False)
self.__PostEvent()
self.SetState(GRADIENT_HIGHLIGHT)
if self._disable_after_click > 0 :
wx.CallLater(self._disable_after_click, lambda : self.Enable(True))
def OnMenuClose(self, evt):
"""Refresh the control to a proper state after the menu has been
dismissed.
@param evt: wx.EVT_MENU_CLOSE
"""
mpos = wx.GetMousePosition()
if self.HitTest(self.ScreenToClient(mpos)) != wx.HT_WINDOW_OUTSIDE:
self.SetState(GRADIENT_HIGHLIGHT)
else:
self.SetState(GRADIENT_NORMAL)
evt.Skip()
def OnEnter(self, evt):
if not self.IsEnabled():
return
self.SetState(GRADIENT_HIGHLIGHT)
def OnLeave(self, evt):
if not self.IsEnabled():
return
self.SetState(GRADIENT_NORMAL)
def OnDoubleClick(self, evt):
if not self.IsEnabled():
return
self.ToggleState()
def OnContextMenu(self, evt):
if not self.IsEnabled():
return
self.ShowMenu()
#---- End Event Handlers ----#
def SetBitmap(self, bmp):
"""Set the bitmap displayed in the button
@param bmp: wx.Bitmap
"""
self._bmp['enable'] = bmp
img = bmp.ConvertToImage()
img = img.ConvertToGreyscale(.795, .073, .026) #(.634, .224, .143)
self._bmp['disable'] = img.ConvertToBitmap()
self.InvalidateBestSize()
def SetBitmapDisabled(self, bmp):
"""Set the bitmap for the disabled state
@param bmp: wx.Bitmap
"""
self._bmp['disable'] = bmp
# Aliases for SetBitmap* functions from BitmapButton
SetBitmapFocus = SetBitmap
SetBitmapHover = SetBitmap
SetBitmapLabel = SetBitmap
SetBitmapSelected = SetBitmap
def SetFocus(self):
"""Set this control to have the focus"""
if self._state['cur'] != GRADIENT_PRESSED:
self.SetState(GRADIENT_HIGHLIGHT)
WxCtrl.SetFocus(self)
def SetFont(self, font):
"""Adjust size of control when font changes"""
WxCtrl.SetFont(self, font)
self.InvalidateBestSize()
def SetLabel(self, label):
"""Set the label of the button
@param label: lable string
"""
WxCtrl.SetLabel(self, label)
self.InvalidateBestSize()
def SetLabelColor(self, normal, hlight=wx.NullColour):
"""Set the color of the label. The optimal label color is usually
automatically selected depending on the button color. In some
cases the colors that are choosen may not be optimal.
The normal state must be specified, if the other two params are left
Null they will be automatically guessed based on the normal color. To
prevent this automatic color choices from happening either specify
a color or None for the other params.
@param normal: Label color for normal state
@keyword hlight: Color for when mouse is hovering over
"""
self._color['default'] = False
self.SetForegroundColour(normal)
if hlight is not None:
if hlight.IsOk():
self._color['htxt'] = hlight
else:
self._color['htxt'] = BestLabelColour(normal)
if wx.Platform == '__WXMSW__':
self.GetParent().RefreshRect(self.GetRect(), False)
else:
self.Refresh()
def SetMenu(self, menu):
"""Set the menu that can be shown when clicking on the
drop arrow of the button.
@param menu: wxMenu to use as a PopupMenu
@note: Arrow is not drawn unless a menu is set
"""
if self._menu is not None:
self.Unbind(wx.EVT_MENU_CLOSE)
self._menu = menu
self.Bind(wx.EVT_MENU_CLOSE, self.OnMenuClose)
self.InvalidateBestSize()
def SetPressColor(self, color):
"""Set the color used for highlighting the pressed state
@param color: wx.Color
@note: also resets all text colours as necessary
"""
self._color['default'] = False
if color.Alpha() == 255:
self._color['hlight'] = AdjustAlpha(color, 200)
else:
self._color['hlight'] = color
#self._color['press'] = AdjustColour(color, -10, 160)
self._color['htxt'] = BestLabelColour(self._color['hlight'])
self.Refresh()
def SetState(self, state):
"""Manually set the state of the button
@param state: one of the MB_* values
@note: the state may be altered by mouse actions
"""
self._state['pre'] = self._state['cur']
self._state['cur'] = state
if wx.Platform == '__WXMSW__':
self.GetParent().RefreshRect(self.GetRect(), False)
else:
self.Refresh()
def SetWindowStyle(self, style):
"""Sets the window style bytes, the updates take place
immediately no need to call refresh afterwards.
@param style: bitmask of PB_STYLE_* values
"""
self._style = style
self.Refresh()
def SetWindowVariant(self, variant):
"""Set the variant/font size of this control"""
WxCtrl.SetWindowVariant(self, variant)
self.InvalidateBestSize()
def ShouldInheritColours(self):
"""Overridden base class virtual. If the parent has non-default
colours then we want this control to inherit them.
"""
return True
def ShowMenu(self):
"""Show the dropdown menu if one is associated with this control"""
if self._menu is not None:
size = self.GetSize()
adj = wx.Platform == '__WXMAC__' and 3 or 0
xpos = 1
self.PopupMenu(self._menu, (xpos, size[1] + adj))
def ToggleState(self):
"""Toggle button state"""
if self._state['cur'] != GRADIENT_PRESSED:
self.SetState(GRADIENT_PRESSED)
else:
self.SetState(GRADIENT_HIGHLIGHT)
def SwapBitmap(self):
assert (self._bmp2 is not None)
if (self._use_secondary_bitmap):
self._use_secondary_bitmap = False
else :
self._use_secondary_bitmap = True
if __name__ == "__main__" :
from wx.lib.embeddedimage import PyEmbeddedImage
folder_home = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAI9UlEQVRYhaWXfXBU1RnGf+fe"
"u3t3s9ndbLKEEJKQBBICifIhtGBEqaCFarFWWooIFBVERtBaa9WO7VRL4yhFR5RpC1OGTrX2"
"H2esIxacTlFRIiggRSMQBhJCQhKSzWY3d/fu/Tj9I59itULfmTvnuXfOx3Oe85z3nCuklFxu"
"XDendi1COfn2O+/+83L7ELNnTkNRBCAQigApYOAdBAiBEAKEgkAgEQgBQogszN6TUqhnHDtT"
"K6VEEQIlKwISwKV/bpJ+IAe+979L6RIKBVEuk3i+TMWa71yzrnD+vPlXA4cvVwHtMtpEHaP7"
"kx/dsSpvxaq7Aeju6Zp6oL7+I+CqS+3sUhWoUDLxU6vX3R+97/4HaW1to7s7xrNb/si8+TdO"
"B45ziZO6FAJVMt19eOU994VWrrqb5uazBLMDqKpKe3sHT9Rt4rpvXV/p9HU1AN6v26laPHZM"
"v8kQQyXiCya8xeMm96574FHfsuUrudB5gcLCMei6js+nk50dIJFMsvCmRbR3tOc2fnp4PZrv"
"VaD7y4eW6Lr+tQgs0En//dEnfqcuvHkR3V0XiMd72LZtG1n+LNo72nnyySepqKzE7/cz74Zv"
"k8pYvmMf7rsTzf8SEP8qAv9rCZYHve4bv376RaX2mlpaW5oZFY1imhme3fwcH+w/xJ4977B1"
"61Yc2yYYDNLaco57121g9fqfBTQ70cBXGDOZSHylAssDqr3ziU1/UGpqaujq7CQUCuHz+QgG"
"Q+SG85lSO4pRBTrTJ89n4XcWoGoKjuvS1XWB2jnXkpUd9hx8961VaL7XgPaLFRDiyx27IRLQ"
"nvtl3YuisnICse4uwuEQ4XAYgNZzrbz6r6f5y97PyM3PpiB7FkZyKV49RE5ODgBt587xve//"
"EFXTPL/fXPehqfgXAl/ImP9NgYciAc/mjc9vF2XjyuiN9xAOh4cGBzh95hTxWJrJhYsozbsW"
"xcmiZFw5o/NHA+Dz+ZBS0hPrZur0GYweW6bWv/3GHa7Q64FTIxW4mMDj0ZxA3a+f2UpxcRFG"
"MkEoFCIUCn2OtZVxiGQXUFU1iXHjyikeM56SkhKCweyhOj6fD4B4T4yqydUUFpWLD99/63Zb"
"ag3Ap4MERp4Fz4yJhh+q2/InopEI6bRBKBQkKyvwZR76WmEYBr29vUQiUT44UM/GxzbIlKvf"
"JaW7QxmhwOaSwvyf1m3ZwdgxBViWRW5uBF3X/6/BATweD36/H8Poo7KykgnV08WBd3YtMi3O"
"C8FHYvbMaa9Mmjh+yV3rH6bzfBu2bRGJ5JIdDCJQcKWDlBLpuDjuwNEtQCCG8cBnKSUSUJX+"
"k1UIgaJoICSJRIJYVxfBcATd6+X5p37BhXj6N+rixYuvueW2pTUd7a2iteWMCg65uVGy/H5M"
"M42mqgggmUxgmikypoljW2TMNFbGxMqYZDIZMhmTdDqFlTGxbQtNVVFVFcfO4Pf5sS2L8+eb"
"aDl7hkgkz50642r7yKGDb2nAA8ADxUVFL0fzwkuLisaye/deXn/9dVasWMHevXtpaGhg586d"
"+P1+Ojs7Od3URF4kgmM7OEhUIYjH43h1nSlXXEEqlWLjxo24rsvNN3+XTZs2MXfudaxdu4qm"
"pmYcx90O3AMDeaC6ugrAZxg9jBtXQm9vnP3797N69WqOHj3KkSNHKC0tRdM00uk0gawsSktL"
"8fp0pONiOw6dHR10x2KMHTsWwzBoaGhA83rRdS8HDx5g2rSplJeXIaXEcaQ66BHtt0/VDWKZ"
"ne1F13Ooqb6SpUuWUlNzJeVl5ZSXl+PxeADIy8vjbEsLebm5KJqGdCWO45AxMxipFEIIAoEA"
"8+bNIy8vj4KCAqqra5gzZw7gIxqN4rrSAnjpb6+gpdPJQQKp/nwAbefPc/jQv+nqSNLS0oGq"
"uUOudhwHAFXTcKSLdCWapiEH8GA0njxJR1uMKdWzON3YTE8sPmxU6VpDCliZIdyfkGSaosIS"
"rvrmVGLqPpb8+Hoy3ZGhOrZt40qJZVkgJQ4SBGQyGVw5THRixRS0UAwn52MefHgDlRMm49oG"
"VsbGcWx7iIA9jBFC4Dpw4tQxjp57mYYXEpSMz2d86AdIG4TWr4CUEsuy+2esgOu4/OPNN5k+"
"Y0a/SjYcObGbhOcA+z6TjCuuxLCmkTFtbNvCcawhppo1TMBVVZXeRB/5o0excPZ6PG4AV81g"
"mik6LnSSPzqK4zgogv4b9EAqUBWVq2trMVIpALq6Opk5bQ4eZQFeLYBhxkj0Jkim+rAcB+k6"
"Q5PWcMUgthAarg3zr72VdDqJVFxc18FMm6iaQp/Rh2PZSAmu6w6sp8SVkJubR/JsM4ZhoHpU"
"lt62hrycQlJ2DwKBip9Mqhs7YwHDS6XV7VgKwLJbfpJ9puMIHb3HWThrPZMr5vdf4W0w0gam"
"ZWKmTRzpAmKAgBi48ls4Tj8hM23i07KoqMrlgxN/ZU/9dryaj7lTl1MRuREj1QfCHl6CgbJ4"
"13vbb2349BTxmEFjUwNrb90CqgOuwhUlC1A1DwiHjz8+SiKZ4OTJkziuiwAUoWCkDPoMg+Mn"
"Gpk1ayZ73t/BxhfXENQLsCyXU00N3DD9LLMmLsGVRt+Q75bceyXAU2bK/nlfTxpFVwjlqkQL"
"dVpaWtH8Dnct/hXl4UX0pWLE4t0oqkrFhAlIxMDWc0mnTZqamnEx6XD2cvjY27z3RiOhUBhV"
"EwQiGtXfGENe1nhKiyp2nW47dBOAmD2/EGB5KKz/2eixEUDGTqMFXXrbTbzZLng8tH0SQAiV"
"ZctuR9d1Ru4eAK/XSyJusLt+G4aZpLg0gE8L0HYqju7RCY32oWUp9PUaTJ5exsSq0tlA/SAB"
"rpo0d2fa7VmU7suoBdGy1nA06J5paihXpZeqSTNO+zyjPhNC+nVdzwaEHPFXKwYzGErGMDuj"
"7Z2NZarwmaFwJN7V3Ra0TVK6T+85dnxfdbw7oQiNuOZTqoFzI++EK7kotm/dlQPoQBYwiL2A"
"CnhGVJWADThAH5AAUgNl8pHH1toAL2x+bcwjj68Z1drWePRiE35Z9IzAykXPyLYuw3srw8h9"
"9vloG3iG4j+/GQJ2mLhyHwAAAABJRU5ErkJggg==")
getfolder_homeData = folder_home.GetData
getfolder_homeImage = folder_home.GetImage
getfolder_homeBitmap = folder_home.GetBitmap
app = wx.App(0)
frame = wx.Frame(None, -1, "Test frame", style=wx.DEFAULT_FRAME_STYLE)
frame_sizer = wx.BoxSizer(wx.VERTICAL)
frame.SetSizer(frame_sizer)
panel = wx.Panel(frame, -1)
frame_sizer.Add(panel)
panel_sizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(panel_sizer)
btn1 = MetallicButton(
parent=panel,
label="Simple button",
button_margin=4)
panel_sizer.Add(btn1, 0, wx.ALL|wx.EXPAND, 10)
btn2 = MetallicButton(
parent=panel,
label="Button with bitmap",
bmp=folder_home.GetBitmap(),
button_margin=4)
panel_sizer.Add(btn2, 0, wx.ALL|wx.EXPAND, 10)
btn3 = MetallicButton(
parent=panel,
label="Disabled button",
bmp=folder_home.GetBitmap(),
button_margin=4)
btn3.Enable(False)
panel_sizer.Add(btn3, 0, wx.ALL|wx.EXPAND, 10)
btn4 = MetallicButton(
parent=panel,
style=MB_STYLE_BOLD_LABEL,
label="Button with bitmap and BOLD caption",
label2="This is the button caption that I can't figure out how to wrap "+
"properly on any platform (but especially Linux!).",
bmp=folder_home.GetBitmap(),
button_margin=4,
size=(320,-1))
panel_sizer.Add(btn4, 0, wx.ALL|wx.EXPAND, 10)
panel_sizer.Fit(panel)
#frame_sizer.Fit(frame)
frame.Fit()
frame.Show()
app.MainLoop()
|
PypiClean
|
/odoo14_addon_pos_customer_display-14.0.1.0.1-py3-none-any.whl/odoo/addons/pos_customer_display/models/pos_config.py
|
from odoo import _, api, fields, models
from odoo.exceptions import ValidationError
class PosConfig(models.Model):
_inherit = "pos.config"
_CUSTOMER_DISPLAY_FORMAT_SELECTION = [
("2_20", "2 Lines of 20 Characters"),
]
iface_customer_display = fields.Boolean(
string="LED Customer Display", help="Display data on the customer display"
)
epos_customer_display = fields.Boolean(
string="LED Customer Display (Epson ePOS)",
help="Activate if you use an Epson LCD connected via USB "
"(DM-D30, DM-D110 or DM-D210) to your Epson printer defined above "
"that support the ePOS protocol.",
)
customer_display_format = fields.Selection(
selection=_CUSTOMER_DISPLAY_FORMAT_SELECTION,
string="Customer Display Format",
default="2_20",
required=True,
)
customer_display_line_length = fields.Integer(
string="Line Length",
compute="_compute_customer_display_line_length",
store=True,
help="Length of the LEDs lines of the customer display",
)
customer_display_msg_next_l1 = fields.Char(
string="Next Customer (Line 1)",
default=lambda x: x._default_customer_display_msg("next_l1"),
help="First line of the message on the customer display which is "
"displayed after starting POS and also after validation of an order",
)
customer_display_msg_next_l2 = fields.Char(
string="Next Customer (Line 2)",
default=lambda x: x._default_customer_display_msg("next_l2"),
help="Second line of the message on the customer display which is "
"displayed after starting POS and also after validation of an order",
)
customer_display_msg_closed_l1 = fields.Char(
string="PoS Closed (Line 1)",
default=lambda x: x._default_customer_display_msg("closed_l1"),
help="First line of the message on the customer display which "
"is displayed when POS is closed",
)
customer_display_msg_closed_l2 = fields.Char(
string="PoS Closed (Line 2)",
default=lambda x: x._default_customer_display_msg("closed_l1"),
help="Second line of the message on the customer display which "
"is displayed when POS is closed",
)
@api.model
def _default_customer_display_msg(self, line):
if line == "next_l1":
return _("Point of Sale Open")
elif line == "next_l2":
return _("Welcome!")
elif line == "closed_l1":
return _("Point of Sale Closed")
elif line == "closed_l2":
return _("See you soon!")
@api.depends("customer_display_format")
def _compute_customer_display_line_length(self):
for config in self:
config.customer_display_line_length = int(
config.customer_display_format.split("_")[1]
)
@api.constrains("iface_customer_display", "epos_customer_display")
def _check_posbox_or_epos(self):
for config in self:
if config.iface_customer_display and config.epos_customer_display:
raise ValidationError(
_(
"On '%s', you activated the LED Customer Display both "
"via the IoTbox and via Direct Devices. You can only "
"select one of the two options."
)
% config.display_name
)
@api.constrains(
"customer_display_format",
"customer_display_msg_next_l1",
"customer_display_msg_next_l2",
"customer_display_msg_closed_l1",
"customer_display_msg_closed_l2",
)
def _check_customer_display_length(self):
for config in self.filtered(lambda x: x.customer_display_line_length):
maxsize = config.customer_display_line_length
fields_to_check = [
x for x in self._fields.keys() if "customer_display_msg_" in x
]
for field_name in fields_to_check:
value = getattr(config, field_name)
if value and len(value) > maxsize:
raise ValidationError(
_(
"The message for customer display '%s' is too "
"long: it has %d chars whereas the maximum "
"is %d chars."
)
% (self._fields[field_name].string, len(value), maxsize)
)
@api.onchange("other_devices")
def other_devices_change_customer_display(self):
if not self.other_devices and self.epos_customer_display:
self.epos_customer_display = False
@api.onchange("is_posbox")
def is_posbox_change_customer_display(self):
if not self.is_posbox and self.iface_customer_display:
self.iface_customer_display = False
|
PypiClean
|
/e3-core-22.3.1.tar.gz/e3-core-22.3.1/src/e3/cve.py
|
from __future__ import annotations
from functools import cached_property
from requests import Session
from e3.log import getLogger
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Iterator
logger = getLogger("cve")
class CVE:
"""Represent a CVE entry."""
def __init__(self, json_content: dict[str, Any]) -> None:
"""Initialize a CVE instance.
:param json_content: dict coming from NVD cves API
"""
self.json_content = json_content
@cached_property
def cve_id(self) -> str:
"""Return the CVE ID."""
return self.json_content["id"]
@property
def nvd_url(self) -> str:
"""Return the nvd.nist.gov vulnerability URL for that CVE."""
return f"https://nvd.nist.gov/vuln/detail/{self.cve_id}"
class NVD:
"""Provide access to the NVD API."""
def __init__(
self,
cache_db_path: str | None = None,
cache_backend: str | None = None,
nvd_api_key: str | None = None,
) -> None:
"""Initialize a NVD instance.
:param cache_db_path: path to the cache database [strongly recommended]
if the path is valid but the file does not exist, the database will
be created when searching for CVE. Note that this requires requests-cache
package.
:param cache_backend: which requests_cache backend to use, default is
sqlite
:param nvd_api_key: the API key to use to avoid drastic rate limits
"""
self.cache_db_path = cache_db_path
if self.cache_db_path is None:
logger.warning(
"the use of a cache for NVD requests is strongly recommended"
)
self.cache_backend = cache_backend
self.nvd_api_key = nvd_api_key
if self.nvd_api_key is None:
logger.warning(
"the use of an API key for the NVD API is strongly recommended"
" to avoid rate limits"
)
def search_by_cpe_name(
self,
cpe_name: str,
is_vulnerable: bool = True,
no_rejected: bool = True,
results_per_page: int | None = None,
) -> Iterator[CVE]:
"""Return a list of matching CVE entries.
:param no_rejected: remove CVE records with the REJECT or Rejected
status from API response
:param results_per_page: number of results to return for each request,
note that it is recommended to keep the default setting
"""
url = f"https://services.nvd.nist.gov/rest/json/cves/2.0?cpeName={cpe_name}"
if is_vulnerable:
url += "&isVulnerable"
if no_rejected:
url += "&noRejected"
if results_per_page:
url += f"&resultsPerPage={results_per_page}"
if self.nvd_api_key is not None:
headers: dict[str, str] | None = {"apiKey": self.nvd_api_key}
else:
headers = None
start_index = 0
while True:
r = self.session.get(url + f"&startIndex={start_index}", headers=headers)
r_json = r.json()
vulnerabilities = r_json["vulnerabilities"]
total_results = r_json["totalResults"]
if not total_results:
break
# We should always have something to read if there are some results
assert r_json["resultsPerPage"] != 0
for cve_entry in vulnerabilities:
yield CVE(cve_entry["cve"])
if (total_results - start_index) > r_json["resultsPerPage"]:
# Some results are missing
start_index += r_json["resultsPerPage"]
else:
break
@cached_property
def session(self) -> Session:
"""Return an http requests Session supporting cache.
Use requests_cache CachedSession when cache is requested.
"""
if self.cache_db_path:
from requests_cache import CachedSession
from datetime import timedelta
session = CachedSession(
self.cache_db_path,
backend=self.cache_backend,
# Use Cache-Control headers for expiration, if available
cache_control=True,
# Otherwise renew the cache every day
expire_after=timedelta(days=1),
# Use cache data in case of errors
stale_if_error=True,
# Ignore headers
match_header=False,
)
logger.debug(f"using requests cache from {self.cache_db_path}")
return session
else:
return Session()
|
PypiClean
|
/cellseg_models_pytorch-0.1.22.tar.gz/cellseg_models_pytorch-0.1.22/cellseg_models_pytorch/utils/latency_benchmark.py
|
from timeit import repeat
from typing import Any, Callable, Dict, List, Tuple, Union
import numpy as np
import torch
from tqdm import tqdm
from cellseg_models_pytorch.inference import BaseInferer
__all__ = ["LatencyBenchmarker"]
class LatencyBenchmarker:
def __init__(self, inferer: BaseInferer) -> None:
"""Benchmark latencies of the model an post-processing pipelines.
Parameters
----------
inferer : BaseInferer
An inferer object that contains model outputs.
"""
self.inferer = inferer
try:
self.inferer.out_masks
self.inferer.soft_masks
except AttributeError:
raise AttributeError(
"Did not find `out_masks` or `soft_masks` attributes. "
"To get these, run inference with `inferer.infer()`. "
"Remember to set the `save_intermediate param to True for the inferer.`"
)
def inference_latency(
self, reps: int = 1, warmup_reps: int = 1, **kwargs
) -> List[Dict[str, Any]]:
"""Compute the inference-pipeline latency.
NOTE: computes only inference not post-processing latency.
Parameters
----------
reps : int, defalt=1
Repetition per batch.
warmup_reps : int, default=1
Warm up repetitions.
Returns
-------
List[Dict[str, Any]]:
Latency info info of the samples.
"""
times = []
with tqdm(self.inferer.dataloader, unit="batch") as loader:
with torch.no_grad():
for data in loader:
res = self._compute_latency(
self.inferer._infer_batch,
maps={"input_batch": data["im"]},
reps=reps,
warmup_reps=warmup_reps,
**kwargs,
)
bsize = data["im"].shape[0]
res["n_images"] = bsize
res["input_shape"] = tuple(data["im"].shape[1:])
res["throughput (img/s)"] = res["mean latency(s)"] / bsize
times.append(res)
return times
def inference_postproc_latency(self, reps: int = 1) -> List[Dict[str, Any]]:
"""Compute the latency of the whole inference + post-processing pipeline.
Parameters
----------
reps : int, default=1
Number of repetitions of the full pipeline.
Returns
-------
List[Dict[str, Any]]
The latency and throughput info of the pipeline.
"""
timings = repeat(
lambda: self.inferer.infer(),
repeat=reps,
number=1,
)
timings = np.array(timings)
mean_syn = np.mean(timings)
std_syn = np.std(timings)
res = {}
res["n_images"] = len(list(self.inferer.soft_masks.keys()))
res["repetitions"] = reps
res["total mean latency(s)"] = mean_syn
res["total std latency(s)"] = std_syn
res["mean throughput (img/s)"] = mean_syn / res["n_images"]
res["std throughput (img/s))"] = std_syn / res["n_images"]
return [res]
def postproc_latency(
self, which: str = "inst", reps_per_img: int = 10, **kwargs
) -> List[Dict[str, Any]]:
"""Compute the post-processing latencies.
Parameters
----------
which : str, default="inst"
Which post-processing type. One of "inst", "sem", "type".
reps_per_img : int, default=10
Number of repetitions per image.
**kwargs:
Arbitrary keyword args for the post-proc func.
Returns
-------
List[Dict[str, Any]]:
A list of dicts containing throughput info of each of the samples.
"""
PP_LOOKUP = {
"sem": self._compute_sem_postproc_latency,
"type": self._compute_type_postproc_latency,
"inst": self._compute_inst_postproc_latency,
}
allowed = list(PP_LOOKUP.keys())
if which not in allowed:
raise ValueError(f"Illegal `type` arg. Got: {type}. Allowed: {allowed}")
aux_key = self.inferer.postprocessor.aux_key
inst_key = self.inferer.postprocessor.inst_key
samples = list(self.inferer.soft_masks.keys())
times = []
with tqdm(samples, total=len(samples)) as pbar:
for k in samples:
if which == "inst":
res = PP_LOOKUP["inst"](
prob_map=self.inferer.soft_masks[k][inst_key],
aux_map=self.inferer.soft_masks[k][aux_key],
return_cell_count=True,
reps=reps_per_img,
**kwargs,
)
elif which == "type":
res = PP_LOOKUP["type"](
prob_map=self.inferer.soft_masks[k][inst_key],
inst_map=self.inferer.out_masks[k]["inst"],
reps=reps_per_img,
**kwargs,
)
elif which == "sem":
res = PP_LOOKUP["sem"](
prob_map=self.inferer.soft_masks[k]["sem"],
reps=reps_per_img,
**kwargs,
)
res["name"] = k
times.append(res)
pbar.update(1)
return times
def model_latency(
self,
input_size: Tuple[int, int] = (256, 256),
batch_size: int = 1,
reps: int = 100,
warmup_reps: int = 3,
device="cuda",
) -> List[Dict[str, Any]]:
"""Measure the model inference latency in secods.
I.e. one forward pass of the model.
Parameters
----------
input_size : Tuple[int, int]
Height and width of the input.
batch_size : int, default=1
Batch size.
reps : int, default=100
Number of repetitions to run the latency measurement.
warmup_reps : int, default=3
Number of repetitions that are used for warming up the gpu.
I.e. the number of repetitions that are excluded from the
beginning.
device : str, default="cuda"
One of 'cuda' or 'cpu'.
Returns
-------
List[Dict[str, Any]]:
The latency mean and standard deviation in secods + extra info.
"""
dummy_input = torch.randn(
batch_size,
3,
input_size[0],
input_size[1],
dtype=torch.float,
device=device,
)
if device == "cpu":
self.inferer.predictor.model.cpu()
starter = torch.cuda.Event(enable_timing=True)
ender = torch.cuda.Event(enable_timing=True)
timings = np.zeros((reps, 1))
with tqdm(total=reps, unit="rep") as pbar:
with torch.no_grad():
for rep in range(reps):
starter.record()
_ = self.inferer.predictor.forward_pass(dummy_input)
ender.record()
# wait for gpu sync
torch.cuda.synchronize()
curr_time = starter.elapsed_time(ender) / 1000
timings[rep] = curr_time
pbar.update(1)
mean_syn = np.sum(timings[warmup_reps:]) / (reps - warmup_reps)
std_syn = np.std(timings[warmup_reps:])
shape = tuple(dummy_input.shape[1:])
res = {
"batch_size": batch_size,
"input_shape": shape,
"mean latency(s)": mean_syn,
"std latency(s)": std_syn,
}
return [res]
def model_throughput(
self,
input_size: Tuple[int, int] = (256, 256),
batch_size: int = 1,
reps: int = 100,
warmup_reps: int = 3,
device="cuda",
) -> List[Dict[str, Any]]:
"""Measure the inference throughput in seconds.
I.e. Measure model forward pass throughput (image/s).
Parameters
----------
input_size : Tuple[int, int]
Height and width of the input.
batch_size : int, default=1
Batch size for the model.
reps : int, default=300
Number of repetitions to run the latency measurement.
warmup_reps : int, default=3
Number of repetitions that are used for warming up the gpu.
I.e. the number of repetitions that are excluded from the
beginning.
device : str, default="cuda"
One of 'cuda' or 'cpu'.
Returns
-------
List[Dict[str, Any]]:
The throughput of the model (image/s) + extra info.
"""
dummy_input = torch.randn(
batch_size,
3,
input_size[0],
input_size[1],
dtype=torch.float,
device=device,
)
if device == "cpu":
self.inferer.predictor.model.cpu()
total_time = 0
with torch.no_grad():
for _ in range(reps):
starter = torch.cuda.Event(enable_timing=True)
ender = torch.cuda.Event(enable_timing=True)
starter.record()
_ = self.inferer.predictor.forward_pass(dummy_input)
ender.record()
torch.cuda.synchronize()
curr_time = starter.elapsed_time(ender) / 1000
total_time += curr_time
throughput = ((reps - warmup_reps) * batch_size) / total_time
shape = tuple(dummy_input.shape[1:])
res = {
"batch_size": batch_size,
"input_shape": shape,
"throughput(img/s)": throughput,
}
return [res]
def _compute_latency(
self,
func: Callable,
maps: Dict[str, Union[np.ndarray, torch.Tensor]],
reps: int = 300,
warmup_reps: int = 3,
**kwargs,
) -> Dict[str, Any]:
"""Run the latency measurements."""
if kwargs:
kwargs = {**maps, **kwargs}
else:
kwargs = maps
timings = repeat(
lambda: func(**kwargs),
repeat=reps + warmup_reps,
number=1,
)
timings = np.array(timings)
mean_syn = np.mean(timings[warmup_reps:])
std_syn = np.std(timings[warmup_reps:])
res = {}
res["repetitions"] = reps
res["mean latency(s)"] = mean_syn
res["std latency(s)"] = std_syn
return res
def _compute_inst_postproc_latency(
self,
prob_map: np.ndarray,
aux_map: np.ndarray,
reps: int = 10,
warmup_reps: int = 2,
**kwargs,
) -> Dict[str, Any]:
"""Compute the instance segmentation post-proc latency in seconds.
I.e. One run of the post-procesing method.
NOTE: returns also additional data that affects the latency such
as the number of objects and number of object pixels in the image.
Parameters
----------
prob_map : np.ndarray
The probability map of the object instances. Shape: (C, H, W).
aux_map : np.ndarray
The auxilliary regression output. Shape: (C, H, W).
reps : int, default=10
Number of repetitions for timeit.
warmup_reps : int, default=2
Warmup loops for the function.
**kwargs:
Arbitrary keyword args for the post-proc func.
Returns
-------
Dict[str, Any]:
A dictionary with data related to the sample latency.
"""
res = self._compute_latency(
self.inferer.postprocessor._get_inst_map,
maps={"prob_map": prob_map, "aux_map": aux_map},
reps=reps,
warmup_reps=warmup_reps,
**kwargs,
)
x = self.inferer.postprocessor._get_inst_map(prob_map, aux_map, **kwargs)
cells, counts = np.unique(x, return_counts=True)
res["input_shape"] = x.shape
res["ncells"] = len(cells)
res["npixels"] = np.sum(counts[1:])
return res
def _compute_sem_postproc_latency(
self, prob_map: np.ndarray, reps: int = 10, warmup_reps: int = 2, **kwargs
) -> List[Dict[str, Any]]:
"""Compute the semantic segmentation post-proc latency in seconds.
I.e. One run of the post-procesing method.
Parameters
----------
prob_map : np.ndarray
The probability map of the semantic segmentation. Shape: (C, H, W).
reps : int, default=10
Number of repetitions for timeit.
warmup_reps : int, default=2
Warmup loops for the function.
**kwargs:
Arbitrary keyword args for the post-proc func.
Returns
-------
Dict[str, Any]:
A dictionary with data related to the sample latency.
"""
res = self._compute_latency(
self.inferer.postprocessor._get_sem_map,
maps={"prob_map": prob_map},
reps=reps,
warmup_reps=warmup_reps,
**kwargs,
)
x = self.inferer.postprocessor._get_sem_map(prob_map, **kwargs)
_, npixels = np.unique(x, return_counts=True)
res["input_shape"] = x.shape
res["npixels"] = np.sum(npixels[1:])
return res
def _compute_type_postproc_latency(
self,
prob_map: np.ndarray,
inst_map: np.ndarray,
reps: int = 10,
warmup_reps: int = 2,
**kwargs,
) -> List[Dict[str, Any]]:
"""Compute the type segmentation post-proc latency in seconds.
I.e. One run of the post-procesing method.
Parameters
----------
prob_map : np.ndarray
The probability map of the object instances. Shape: (C, H, W).
inst_map : np.ndarray
The labelled instance segmentation map. Shape: (H, W).
reps : int, default=10
Number of repetitions for timeit.
warmup_reps : int, default=2
Warmup loops for the function.
**kwargs:
Arbitrary keyword args for the post-proc func.
Returns
-------
Dict[str, Any]:
A dictionary with data related to the sample latency.
"""
res = self._compute_latency(
self.inferer.postprocessor._get_type_map,
maps={"prob_map": prob_map, "inst_map": inst_map},
reps=reps,
warmup_reps=warmup_reps,
**kwargs,
)
x = self.inferer.postprocessor._get_type_map(
prob_map, inst_map, use_mask=True, **kwargs
)
cells = np.unique(inst_map)
_, npixels = np.unique(x, return_counts=True)
res["img_shape"] = x.shape
res["ncells"] = len(cells)
res["npixels"] = np.sum(npixels[1:])
return res
|
PypiClean
|
/p01.cgi-0.5.1.zip/p01.cgi-0.5.1/src/p01/cgi/parser.py
|
__docformat__ = "reStructuredText"
import re
import os
import urllib
import rfc822
import tempfile
import cStringIO
import zope.interface
from p01.cgi import interfaces
#TODO: should this be a zope.conf property?
maxlen = 0
#TODO: maybe move all those methods to a class?
OVERSIZE_FIELD_CONTENT = 1000
class SimpleField:
"""Simple key value pair field."""
zope.interface.implements(interfaces.ISimpleField)
def __init__(self, name, value):
"""Constructor from field name and value."""
self.name = name
self.value = value
def __repr__(self):
return "<%s, %r = %r>" % (self.__class__.__name__, self.name,
self.value)
def parseFormData(method, inputStream=None, headers=None, boundary="",
environ=os.environ, tmpFileFactory=None, tmpFileFactoryArguments=None):
"""Parse form data and return a list of fields."""
# GET or HEAD request
if method == 'GET' or method == 'HEAD':
qs = environ.get('QUERY_STRING', None)
if qs is not None:
# parse query string and return a simple field storage
return [SimpleField(key, value) for key, value
in parseQueryString(qs)]
return None
# POST request -- be specific, not that we catch unknown methods
if method == 'POST':
content_type = environ.get('CONTENT_TYPE')
if content_type:
if content_type.startswith('multipart/'):
fieldStorage = parseMultiParts(inputStream, headers, boundary,
environ, tmpFileFactory, tmpFileFactoryArguments)
return fieldStorage.list
if content_type.startswith('application/x-www-form-urlencoded'):
return parseUrlEncoded(inputStream, headers, environ)
#all other types get None
return None
def parseUrlEncoded(inputStream=None, headers=None, environ=os.environ):
"""Parse x-www-form-urlencoded form data and return a list of fields.
No subparts or whatever supported"""
# setup header if not given
if headers is None:
headers = {}
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
elif 'HTTP_CONTENT_LENGTH' in environ:
headers['content-length'] = environ['HTTP_CONTENT_LENGTH']
clen = -1
if 'content-length' in headers:
try:
clen = int(headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
# TODO: implement maxlen support via zope.conf or os.environ?
raise ValueError, 'Maximum content length exceeded'
qs = inputStream.read(clen)
if qs is not None:
# parse query string and return a simple field storage
return [SimpleField(key, value) for key, value
in parseQueryString(qs)]
return None
def parseMultiParts(inputStream=None, headers=None, boundary="",
environ=os.environ, tmpFileFactory=None, tmpFileFactoryArguments=None):
"""Parse multipart form data and return a list of fields.
Or called for a contained part (where content-disposition is ``form-data``
Or called for a separator part that get thrown away"""
#TODO: maybe separate the above 3 functions into 3 different methods
# that could ensure more robust content checking
fieldStorage = MultiPartField(inputStream, boundary, tmpFileFactory,
tmpFileFactoryArguments)
# setup header if not given
if headers is None:
headers = {}
headers['content-type'] = environ.get('CONTENT_TYPE', 'text/plain')
if 'CONTENT_LENGTH' in environ:
headers['content-length'] = environ['CONTENT_LENGTH']
elif 'HTTP_CONTENT_LENGTH' in environ:
headers['content-length'] = environ['HTTP_CONTENT_LENGTH']
fieldStorage.headers = headers
# Process content-disposition header
cdisp, pdict = "", {}
if 'content-disposition' in fieldStorage.headers:
cdisp, pdict = parseHeader(fieldStorage.headers['content-disposition'])
fieldStorage.disposition = cdisp
fieldStorage.disposition_options = pdict
if 'name' in pdict:
fieldStorage.name = pdict['name']
if 'filename' in pdict:
fieldStorage.filename = pdict['filename']
# Process content-type header
if 'content-type' in fieldStorage.headers:
ctype, pdict = parseHeader(fieldStorage.headers['content-type'])
else:
ctype, pdict = "text/plain", {}
fieldStorage.type = ctype
fieldStorage.type_options = pdict
fieldStorage.innerboundary = ""
if 'boundary' in pdict:
fieldStorage.innerboundary = pdict['boundary']
clen = -1
if 'content-length' in fieldStorage.headers:
try:
clen = int(fieldStorage.headers['content-length'])
except ValueError:
pass
if maxlen and clen > maxlen:
# TODO: implement maxlen support via zope.conf or os.environ?
raise ValueError, 'Maximum content length exceeded'
fieldStorage.length = clen
if ctype.startswith('multipart/'):
fieldStorage.readMulti(environ)
else:
fieldStorage.readSingle()
return fieldStorage
def validBoundary(s):
return re.match("^[ -~]{0,200}[!-~]$", s)
def parseHeader(line):
"""Returns the main content-type and a dictionary of options."""
plist = [x.strip() for x in line.split(';')]
key = plist.pop(0).lower()
pdict = {}
for p in plist:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def parseQueryString(qs):
"""Parse a URL-encoded query string into a list of key, value pair."""
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for kv in pairs:
if not kv:
continue
nv = kv.split('=', 1)
if len(nv) != 2:
# ensure an empty string as value for a given key
nv.append('')
if len(nv[1]):
name = urllib.unquote(nv[0].replace('+', ' '))
value = urllib.unquote(nv[1].replace('+', ' '))
r.append((name, value))
return r
class MultiPartField:
"""Store a sequence of fields, reading multipart/form-data."""
zope.interface.implements(interfaces.IMultiPartField)
headers = None
disposition = None
disposition_options = None
type = None
type_options = None
def __init__(self, inputStream=None, boundary="", tmpFileFactory=None,
tmpFileFactoryArguments=None):
"""MultiPartField used for multipart content."""
self.inputStream = inputStream
self.outerboundary = boundary
self.innerboundary = ""
self.bufsize = 8*1024
self.length = -1
self.done = 0
self.name = None
self.filename = None
self.list = None
self.file = None
if tmpFileFactory is None:
self.tmpFileFactory = tempfile.TemporaryFile
else:
self.tmpFileFactory = tmpFileFactory
self.tmpFileFactoryArguments = tmpFileFactoryArguments
@property
def value(self):
if self.file and self.filename is None:
# this will only return the file data as value if the filename is
# not None. A real file upload value must get accessed via self.file
# because it returns None as value.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
return value
elif self.list is not None:
return self.list
return None
def addPart(self, part):
self.list.append(part)
def readMulti(self, environ):
"""Read a part that is itself multipart."""
ib = self.innerboundary
if not validBoundary(ib):
raise ValueError, 'Invalid boundary in multipart form: %r' % (ib,)
self.list = []
# consume first part
part = parseMultiParts(self.inputStream, {}, ib, environ,
self.tmpFileFactory, self.tmpFileFactoryArguments)
# and throw it away
while not part.done:
headers = rfc822.Message(self.inputStream)
part = parseMultiParts(self.inputStream, headers, ib, environ,
self.tmpFileFactory, self.tmpFileFactoryArguments)
self.addPart(part)
self.skipLines()
def readSingle(self):
"""Read an atomic part."""
if self.length >= 0:
self.readBinary()
self.skipLines()
else:
self.readLines()
self.file.seek(0)
def readBinary(self):
"""Read binary data."""
self.file = self.makeTMPFile()
todo = self.length
if todo >= 0:
while todo > 0:
data = self.inputStream.read(min(todo, self.bufsize))
if not data:
self.done = -1
break
self.file.write(data)
todo = todo - len(data)
def readLines(self):
"""Read lines until EOF or outerboundary."""
if self.filename is not None:
# if we have a file upload (known by a filename) we use our
# tmpFileFactory
self.file = self.makeTMPFile()
self.__file = None
else:
# if we have no fileuplaod we start with a StringIO, later we move
# the data to a tempfile.TemporaryFile if the data will become to
# big
self.file = self.__file = cStringIO.StringIO()
if self.outerboundary:
self.readLinesToOuterboundary()
else:
self.readLinesToEOF()
def __write(self, line):
if self.__file is not None:
if self.__file.tell() + len(line) > OVERSIZE_FIELD_CONTENT:
# copy data to tmp file if to big
self.file = self.makeTMPFile()
self.file.write(self.__file.getvalue())
self.__file = None
self.file.write(line)
def readLinesToEOF(self):
"""Read lines until EOF."""
while 1:
line = self.inputStream.readline(1<<16)
if not line:
self.done = -1
break
self.__write(line)
def readLinesToOuterboundary(self):
"""Read lines until outerboundary."""
next = "--" + self.outerboundary
last = next + "--"
delim = ""
last_line_lfend = True
while 1:
line = self.inputStream.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
odelim = delim
if line[-2:] == "\r\n":
delim = "\r\n"
line = line[:-2]
last_line_lfend = True
elif line[-1] == "\n":
delim = "\n"
line = line[:-1]
last_line_lfend = True
else:
delim = ""
last_line_lfend = False
self.__write(odelim + line)
def skipLines(self):
"""Skip lines until outer boundary is defined."""
if not self.outerboundary or self.done:
return
next = "--" + self.outerboundary
last = next + "--"
last_line_lfend = True
while 1:
line = self.inputStream.readline(1<<16)
if not line:
self.done = -1
break
if line[:2] == "--" and last_line_lfend:
strippedline = line.strip()
if strippedline == next:
break
if strippedline == last:
self.done = 1
break
last_line_lfend = line.endswith('\n')
def makeTMPFile(self):
if self.tmpFileFactoryArguments is not None:
return self.tmpFileFactory(**self.tmpFileFactoryArguments)
else:
return self.tmpFileFactory()
def __repr__(self):
if self.filename:
return "<%s, %r: %r>" % (self.__class__.__name__, self.name,
self.filename)
else:
return "<%s, %r>" % (self.__class__.__name__, self.name)
|
PypiClean
|
/automic_rest-0.0.6-py3-none-any.whl/automic_rest/createrepository.py
|
import os
import json
from automic_rest import config
import requests
from requests.exceptions import HTTPError
from requests.exceptions import Timeout
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class createRepository:
def __init__(self, client_id:int=0, body=None):
# Summary: Initializes the repository for the specified client.
self.response = None
self.body = None
self.url = None
self.headers = None
self.content = None
self.text = None
self.status = None
self.path = config().setArgs('/{client_id}/repositories', locals())
self.bodydata = body
self.request()
def request(self):
requests_headers = {
'Content-type': 'application/json',
'Accept': 'application/json',
'Authorization' : "Basic %s" % config().base64auth
}
try:
r = requests.post(
config().url+self.path,
headers=requests_headers,
data=json.dumps(self.bodydata),
verify=config().sslverify,
timeout=config().timeout
)
# request body
self.body = r.request.body
# request url
self.url = r.request.url
# response headers
self.headers = r.headers
# raw bytes
self.content = r.content
# converts bytes to string
self.text = r.text
# convert raw bytes to json_dict
self.response = r.json()
# http status_code
self.status = r.status_code
# If the response was successful, no Exception will be raised
r.raise_for_status()
except HTTPError as http_err:
print(f'HTTP error occurred: {http_err}') # Python 3.6
except Exception as err:
print(f'Other error occurred: {err}') # Python 3.6
except Timeout:
print('The request timed out')
else:
pass
return self
|
PypiClean
|
/django-trojsten-submit-0.1.0.tar.gz/django-trojsten-submit-0.1.0/submit/defaults.py
|
from django.core.urlresolvers import reverse
from django.utils.html import format_html
from django.utils.translation import ugettext_lazy as _
import submit.settings as submit_settings
from submit.models import Submit
def is_submit_accepted(submit):
"""
This method defines which submits will be accepted, penalized or not accepted.
This method is called after the submit is created, but before it is saved in database.
e.g. submits after deadline are not accepted
"""
return Submit.ACCEPTED
def form_success_message(submit):
"""
Returned message will be added to `messages` after successful submit via form.
"""
if submit.receiver.send_to_judge:
return format_html(
_('Submit successful. Testing protocol will be soon available <a href="{link}">here</a>.'),
link=reverse('view_submit', args=[submit.id])
)
return _('Submit successful.')
def prefetch_data_for_score_calculation(reviews_qs):
"""
Format of displayed score can depend on other models.
Modify a default queryset of `Review.objects` by pre-fetching data
so that the function `display_score` won't need any additional database queries.
"""
return reviews_qs
def display_score(review):
"""
This function is called when a score is displayed to the user - `review.display_score()`.
Since `review.score` holds "data without context", points that will be displayed to the user are expected to be
calculated from `review.score` e.g. score may be a percentual value for a specific type of submit and absolute
value for other type ...
"""
return str(review.score)
def render_review_comment(review):
"""
Allows tweaks such as markdown rendering.
"""
return review.comment
def submit_receiver_type(receiver):
if '.zip' in receiver.get_extensions() and receiver.send_to_judge:
return 'testable zip'
if receiver.send_to_judge:
return 'source'
if receiver.has_form:
return 'description'
if receiver.external_link or receiver.allow_external_submits:
return 'external'
return 'other'
def display_submit_receiver_name(receiver):
"""
Used in admin to allow better identification of type of receiver.
(Type is not a property of receiver, but can be determined from receiver attributes.)
"""
return '{} ({})'.format(receiver.id, submit_receiver_type(receiver))
def default_inputs_folder_at_judge(receiver):
"""
When a receiver is added to a task and `receiver.send_to_judge` is checked,
this function will be used to automatically set the name of the folder with inputs at judge server.
When this function is called SubmitReceiver object is created but is not saved in database yet.
"""
return '{}-{}'.format(submit_settings.JUDGE_INTERFACE_IDENTITY, receiver.id)
def can_post_submit(receiver, user):
"""
Defines who and when can post submits.
e.g. some tasks may be hidden
"""
return True
def has_admin_privileges_for_receiver(receiver, user):
"""
Defines who can view all submits of this receiver.
e.g. an organizator of specific competition can access all submits of all users in this competition
"""
return user.is_staff
|
PypiClean
|
/airflow_code_editor-5.2.2-py3-none-any.whl/airflow_code_editor/static/addon/scroll/simplescrollbars.js
|
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
function Bar(cls, orientation, scroll) {
this.orientation = orientation;
this.scroll = scroll;
this.screen = this.total = this.size = 1;
this.pos = 0;
this.node = document.createElement("div");
this.node.className = cls + "-" + orientation;
this.inner = this.node.appendChild(document.createElement("div"));
var self = this;
CodeMirror.on(this.inner, "mousedown", function(e) {
if (e.which != 1) return;
CodeMirror.e_preventDefault(e);
var axis = self.orientation == "horizontal" ? "pageX" : "pageY";
var start = e[axis], startpos = self.pos;
function done() {
CodeMirror.off(document, "mousemove", move);
CodeMirror.off(document, "mouseup", done);
}
function move(e) {
if (e.which != 1) return done();
self.moveTo(startpos + (e[axis] - start) * (self.total / self.size));
}
CodeMirror.on(document, "mousemove", move);
CodeMirror.on(document, "mouseup", done);
});
CodeMirror.on(this.node, "click", function(e) {
CodeMirror.e_preventDefault(e);
var innerBox = self.inner.getBoundingClientRect(), where;
if (self.orientation == "horizontal")
where = e.clientX < innerBox.left ? -1 : e.clientX > innerBox.right ? 1 : 0;
else
where = e.clientY < innerBox.top ? -1 : e.clientY > innerBox.bottom ? 1 : 0;
self.moveTo(self.pos + where * self.screen);
});
function onWheel(e) {
var moved = CodeMirror.wheelEventPixels(e)[self.orientation == "horizontal" ? "x" : "y"];
var oldPos = self.pos;
self.moveTo(self.pos + moved);
if (self.pos != oldPos) CodeMirror.e_preventDefault(e);
}
CodeMirror.on(this.node, "mousewheel", onWheel);
CodeMirror.on(this.node, "DOMMouseScroll", onWheel);
}
Bar.prototype.setPos = function(pos, force) {
if (pos < 0) pos = 0;
if (pos > this.total - this.screen) pos = this.total - this.screen;
if (!force && pos == this.pos) return false;
this.pos = pos;
this.inner.style[this.orientation == "horizontal" ? "left" : "top"] =
(pos * (this.size / this.total)) + "px";
return true
};
Bar.prototype.moveTo = function(pos) {
if (this.setPos(pos)) this.scroll(pos, this.orientation);
}
var minButtonSize = 10;
Bar.prototype.update = function(scrollSize, clientSize, barSize) {
var sizeChanged = this.screen != clientSize || this.total != scrollSize || this.size != barSize
if (sizeChanged) {
this.screen = clientSize;
this.total = scrollSize;
this.size = barSize;
}
var buttonSize = this.screen * (this.size / this.total);
if (buttonSize < minButtonSize) {
this.size -= minButtonSize - buttonSize;
buttonSize = minButtonSize;
}
this.inner.style[this.orientation == "horizontal" ? "width" : "height"] =
buttonSize + "px";
this.setPos(this.pos, sizeChanged);
};
function SimpleScrollbars(cls, place, scroll) {
this.addClass = cls;
this.horiz = new Bar(cls, "horizontal", scroll);
place(this.horiz.node);
this.vert = new Bar(cls, "vertical", scroll);
place(this.vert.node);
this.width = null;
}
SimpleScrollbars.prototype.update = function(measure) {
if (this.width == null) {
var style = window.getComputedStyle ? window.getComputedStyle(this.horiz.node) : this.horiz.node.currentStyle;
if (style) this.width = parseInt(style.height);
}
var width = this.width || 0;
var needsH = measure.scrollWidth > measure.clientWidth + 1;
var needsV = measure.scrollHeight > measure.clientHeight + 1;
this.vert.node.style.display = needsV ? "block" : "none";
this.horiz.node.style.display = needsH ? "block" : "none";
if (needsV) {
this.vert.update(measure.scrollHeight, measure.clientHeight,
measure.viewHeight - (needsH ? width : 0));
this.vert.node.style.bottom = needsH ? width + "px" : "0";
}
if (needsH) {
this.horiz.update(measure.scrollWidth, measure.clientWidth,
measure.viewWidth - (needsV ? width : 0) - measure.barLeft);
this.horiz.node.style.right = needsV ? width + "px" : "0";
this.horiz.node.style.left = measure.barLeft + "px";
}
return {right: needsV ? width : 0, bottom: needsH ? width : 0};
};
SimpleScrollbars.prototype.setScrollTop = function(pos) {
this.vert.setPos(pos);
};
SimpleScrollbars.prototype.setScrollLeft = function(pos) {
this.horiz.setPos(pos);
};
SimpleScrollbars.prototype.clear = function() {
var parent = this.horiz.node.parentNode;
parent.removeChild(this.horiz.node);
parent.removeChild(this.vert.node);
};
CodeMirror.scrollbarModel.simple = function(place, scroll) {
return new SimpleScrollbars("CodeMirror-simplescroll", place, scroll);
};
CodeMirror.scrollbarModel.overlay = function(place, scroll) {
return new SimpleScrollbars("CodeMirror-overlayscroll", place, scroll);
};
});
|
PypiClean
|
/range_set-0.3.1.tar.gz/range_set-0.3.1/README.rst
|
RangeSet
======
License: Your choice of MIT or Apache License 2.0
---------
Python Sets are nice to work with, but very inefficient if you need a large
set with mostly-consecutive integers. The RangeSet class provides efficient
handling and storage for these sets.
Non-integers?
=============
RangeSet works with any class whose instances are
* comparable
* step-able, i.e. you can add 1 to them / subtract 1 from them.
* discrete, i.e. there is no value between ``n`` and ``n+1``.
RangeSet doesn't add or subtract any other values, nor does it try to
subtract two instances from each other.
The requirement to subtract 1 is an optimization that could be removed if
necessary.
|
PypiClean
|
/la_businesses-0.1.1-py3-none-any.whl/la_businesses/la_businesses.py
|
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
import logging
import datetime as dt
import pytz # for timezone handling
import pandas as pd
import requests
import simplekml
# ----------------------------------------------------------------------------#
# CONSTANTS AND CONFIGURATION
# ----------------------------------------------------------------------------#
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
URL = "https://data.lacity.org/api/views/6rrh-rzua/rows.csv?accessType=DOWNLOAD"
COMPLETE_LIST = "all_businesses.csv" # full database of businesses
RECENT_LIST = "recent_businesses.csv" # subset of recent businesses
DEFAULT_NDAYS = 30 # select businesses opened since this many days ago
WRITE_CHUNK_SIZE = 1024 # bytes
OUTPUT_DIR = Path.cwd() / 'files' # save all files here
OUTPUT_DIR.mkdir(parents=True, exist_ok=True) # make the dir if not exist
# ----------------------------------------------------------------------------#
# CORE FUNCTIONS
# ----------------------------------------------------------------------------#
def get_business_list():
"""Download latest businesses database."""
response = requests.get(URL, stream=True)
# Throw an error for bad status codes
response.raise_for_status()
with open(OUTPUT_DIR / COMPLETE_LIST, "wb") as handle:
for block in response.iter_content(DEFAULT_NDAYS):
handle.write(block)
logging.info(f"Saved complete business list as {COMPLETE_LIST}.")
return OUTPUT_DIR / COMPLETE_LIST
def load_business_list(file=None, update=False):
"""Load (optionally identified) database from file or download it first."""
business_list_file = OUTPUT_DIR / COMPLETE_LIST
if file:
logging.info(f"Loading business list {file.name} ...")
df = pd.read_csv(file)
df["LOCATION START DATE"] = pd.to_datetime(df["LOCATION START DATE"])
df["LOCATION END DATE"] = pd.to_datetime(df["LOCATION END DATE"])
logging.debug("Converted dates")
return df
if update or not business_list_file.exists():
logging.info("Downloading database of businesses ...")
get_business_list()
logging.info(
f"Loading all businesses ...\n" \
f"Using cached data from {last_mod(business_list_file)}. " \
f"Use -u flag to update.")
df = pd.read_csv(business_list_file)
df["LOCATION START DATE"] = pd.to_datetime(df["LOCATION START DATE"])
df["LOCATION END DATE"] = pd.to_datetime(df["LOCATION END DATE"])
logging.debug("Converted dates")
return df
def last_mod(file):
"""Returns a string of the last modified time of a Path() in local timezone"""
fmt = "%d %b %Y at %I:%M %p %Z" # time format
return pytz.utc.localize(dt.datetime.utcfromtimestamp(
file.stat().st_mtime)).astimezone().strftime(fmt)
def select_recent(df, outfile=None, ndays=DEFAULT_NDAYS):
logging.info(f"Selecting businesses starting {ndays} days ago or later ...")
cutoff_date = dt.datetime.now() - dt.timedelta(days=ndays)
df = df[df["LOCATION START DATE"] > cutoff_date]
logging.debug(f"Selected recent since {cutoff_date.date()}: {len(df)} items")
df = df.sort_values(by="LOCATION START DATE", ascending=False)
logging.debug("Sorted by start date")
output_filename = outfile or RECENT_LIST
output_file = OUTPUT_DIR / output_filename
df.to_csv(output_file, index=False)
logging.info(f"Saved {len(df)} recent businesses to {output_file}.")
return df
def df_to_kml(df, outfile=None):
"""Make a KML file from pd.DataFrame of addresses"""
df = df.dropna(subset=["LOCATION"])
df = df.reset_index(drop=True)
logging.debug("Ignoring places with no lat-long")
kml = simplekml.Kml()
for id, row in df.iterrows():
long, lat = eval(row["LOCATION"])
kml.newpoint(
name=str(row["BUSINESS NAME"]) + "\n" + str(row["DBA NAME"]),
description=", ".join(
[row["STREET ADDRESS"], row["CITY"], row["ZIP CODE"]]
),
coords=[(lat, long)],
)
output_filename = outfile or "recent_businesses"
output_file = OUTPUT_DIR / (output_filename + ".kml")
kml.save(output_file)
logging.debug("made points")
logging.info("Created KML file " + str(output_file))
# ----------------------------------------------------------------------------#
# USER INTERFACE FUNCTIONS
# ----------------------------------------------------------------------------#
def get_parser():
parser = ArgumentParser(
description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-u", "--update", action="store_true", dest="update", help="update data"
)
parser.add_argument(
"-d",
"--days",
action="store",
dest="ndays",
type=int,
default=DEFAULT_NDAYS,
help="started since NDAYS days ago",
)
return parser
def main():
try:
args = get_parser().parse_args()
df = load_business_list(update=args.update)
df = select_recent(df, ndays=args.ndays)
df_to_kml(df)
except KeyboardInterrupt:
logging.error("!!! PROGRAM ABORTED WITH CTRL-C. !!!\n")
if __name__ == "__main__":
main()
|
PypiClean
|
/RsCMPX_LteMeas-4.0.185.tar.gz/RsCMPX_LteMeas-4.0.185/RsCMPX_LteMeas/Implementations/LteMeas/MultiEval/ListPy/EsFlatness/Maxr/Extreme.py
|
from typing import List
from .......Internal.Core import Core
from .......Internal.CommandsGroup import CommandsGroup
from .......Internal import Conversions
from .......Internal.ArgSingleSuppressed import ArgSingleSuppressed
from .......Internal.Types import DataType
from ....... import repcap
# noinspection PyPep8Naming,PyAttributeOutsideInit,SpellCheckingInspection
class ExtremeCls:
"""Extreme commands group definition. 2 total commands, 0 Subgroups, 2 group commands"""
def __init__(self, core: Core, parent):
self._core = core
self._cmd_group = CommandsGroup("extreme", core, parent)
def fetch(self, maxRange=repcap.MaxRange.Default) -> List[float]:
"""SCPI: FETCh:LTE:MEASurement<Instance>:MEValuation:LIST:ESFLatness:MAXR<nr>:EXTReme \n
Snippet: value: List[float] = driver.lteMeas.multiEval.listPy.esFlatness.maxr.extreme.fetch(maxRange = repcap.MaxRange.Default) \n
Return equalizer spectrum flatness single value results (maximum within a range) for all measured list mode segments. The
values described below are returned by FETCh commands. CALCulate commands return limit check results instead, one value
for each result listed below. \n
Suppressed linked return values: reliability \n
:param maxRange: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Maxr')
:return: maxr: Comma-separated list of values, one per measured segment."""
maxRange_cmd_val = self._cmd_group.get_repcap_cmd_value(maxRange, repcap.MaxRange)
suppressed = ArgSingleSuppressed(0, DataType.Integer, False, 1, 'Reliability')
response = self._core.io.query_bin_or_ascii_float_list_suppressed(f'FETCh:LTE:MEASurement<Instance>:MEValuation:LIST:ESFLatness:MAXR{maxRange_cmd_val}:EXTReme?', suppressed)
return response
def calculate(self, maxRange=repcap.MaxRange.Default) -> List[float or bool]:
"""SCPI: CALCulate:LTE:MEASurement<Instance>:MEValuation:LIST:ESFLatness:MAXR<nr>:EXTReme \n
Snippet: value: List[float or bool] = driver.lteMeas.multiEval.listPy.esFlatness.maxr.extreme.calculate(maxRange = repcap.MaxRange.Default) \n
Return equalizer spectrum flatness single value results (maximum within a range) for all measured list mode segments. The
values described below are returned by FETCh commands. CALCulate commands return limit check results instead, one value
for each result listed below. \n
Suppressed linked return values: reliability \n
:param maxRange: optional repeated capability selector. Default value: Nr1 (settable in the interface 'Maxr')
:return: maxr: (float or boolean items) Comma-separated list of values, one per measured segment."""
maxRange_cmd_val = self._cmd_group.get_repcap_cmd_value(maxRange, repcap.MaxRange)
suppressed = ArgSingleSuppressed(0, DataType.Integer, False, 1, 'Reliability')
response = self._core.io.query_str_suppressed(f'CALCulate:LTE:MEASurement<Instance>:MEValuation:LIST:ESFLatness:MAXR{maxRange_cmd_val}:EXTReme?', suppressed)
return Conversions.str_to_float_or_bool_list(response)
|
PypiClean
|
/aim-ui-custom-3.15.2.post2.tar.gz/aim-ui-custom-3.15.2.post2/aim_ui/build/vs/editor/editor.main.nls.zh-cn.js
|
define("vs/editor/editor.main.nls.zh-cn",{"vs/base/browser/ui/actionbar/actionViewItems":["{0} ({1})"],"vs/base/browser/ui/findinput/findInput":["\u8F93\u5165"],"vs/base/browser/ui/findinput/findInputCheckboxes":["\u533A\u5206\u5927\u5C0F\u5199","\u5168\u5B57\u5339\u914D","\u4F7F\u7528\u6B63\u5219\u8868\u8FBE\u5F0F"],"vs/base/browser/ui/findinput/replaceInput":["\u8F93\u5165","\u4FDD\u7559\u5927\u5C0F\u5199"],"vs/base/browser/ui/iconLabel/iconLabelHover":["\u6B63\u5728\u52A0\u8F7D\u2026"],"vs/base/browser/ui/inputbox/inputBox":["\u9519\u8BEF: {0}","\u8B66\u544A: {0}","\u4FE1\u606F: {0}","\u5BF9\u4E8E\u5386\u53F2\u8BB0\u5F55"],"vs/base/browser/ui/keybindingLabel/keybindingLabel":["\u672A\u7ED1\u5B9A"],"vs/base/browser/ui/tree/abstractTree":["\u6E05\u9664","\u7981\u7528\u8F93\u5165\u65F6\u7B5B\u9009","\u542F\u7528\u8F93\u5165\u65F6\u7B5B\u9009","\u672A\u627E\u5230\u5143\u7D20","\u5DF2\u5339\u914D {0} \u4E2A\u5143\u7D20(\u5171 {1} \u4E2A)"],"vs/base/common/actions":["(\u7A7A)"],"vs/base/common/errorMessage":["{0}: {1}","\u53D1\u751F\u4E86\u7CFB\u7EDF\u9519\u8BEF ({0})","\u51FA\u73B0\u672A\u77E5\u9519\u8BEF\u3002\u6709\u5173\u8BE6\u7EC6\u4FE1\u606F\uFF0C\u8BF7\u53C2\u9605\u65E5\u5FD7\u3002","\u51FA\u73B0\u672A\u77E5\u9519\u8BEF\u3002\u6709\u5173\u8BE6\u7EC6\u4FE1\u606F\uFF0C\u8BF7\u53C2\u9605\u65E5\u5FD7\u3002","{0} \u4E2A(\u5171 {1} \u4E2A\u9519\u8BEF)","\u51FA\u73B0\u672A\u77E5\u9519\u8BEF\u3002\u6709\u5173\u8BE6\u7EC6\u4FE1\u606F\uFF0C\u8BF7\u53C2\u9605\u65E5\u5FD7\u3002"],"vs/base/common/keybindingLabels":["Ctrl","Shift","Alt","Windows","Ctrl","Shift","Alt","\u8D85\u952E","Control","Shift","\u9009\u9879","Command","Control","Shift","Alt","Windows","Control","Shift","Alt","\u8D85\u952E"],"vs/base/parts/quickinput/browser/quickInput":["\u4E0A\u4E00\u6B65",'\u6309 "Enter" \u4EE5\u786E\u8BA4\u6216\u6309 "Esc" \u4EE5\u53D6\u6D88',"{0}/{1}","\u5728\u6B64\u8F93\u5165\u53EF\u7F29\u5C0F\u7ED3\u679C\u8303\u56F4\u3002","{0} \u4E2A\u7ED3\u679C","\u5DF2\u9009 {0} \u9879","\u786E\u5B9A","\u81EA\u5B9A\u4E49","\u540E\u9000 ({0})","\u4E0A\u4E00\u6B65"],"vs/base/parts/quickinput/browser/quickInputList":["\u5FEB\u901F\u8F93\u5165"],"vs/editor/browser/controller/textAreaHandler":["\u7F16\u8F91\u5668","\u73B0\u5728\u65E0\u6CD5\u8BBF\u95EE\u7F16\u8F91\u5668\u3002\u6309 {0} \u83B7\u53D6\u9009\u9879\u3002"],"vs/editor/browser/coreCommands":["\u5373\u4F7F\u8F6C\u5230\u8F83\u957F\u7684\u884C\uFF0C\u4E5F\u4E00\u76F4\u5230\u672B\u5C3E","\u5373\u4F7F\u8F6C\u5230\u8F83\u957F\u7684\u884C\uFF0C\u4E5F\u4E00\u76F4\u5230\u672B\u5C3E","\u5DF2\u5220\u9664\u8F85\u52A9\u6E38\u6807"],"vs/editor/browser/editorExtensions":["\u64A4\u6D88(&&U)","\u64A4\u6D88","\u6062\u590D(&&R)","\u6062\u590D","\u5168\u9009(&&S)","\u9009\u62E9\u5168\u90E8"],"vs/editor/browser/widget/codeEditorWidget":["\u5149\u6807\u6570\u91CF\u88AB\u9650\u5236\u4E3A {0}\u3002"],"vs/editor/browser/widget/diffEditorWidget":["\u5DEE\u5F02\u7F16\u8F91\u5668\u4E2D\u63D2\u5165\u9879\u7684\u7EBF\u6761\u4FEE\u9970\u3002","\u5DEE\u5F02\u7F16\u8F91\u5668\u4E2D\u5220\u9664\u9879\u7684\u7EBF\u6761\u4FEE\u9970\u3002","\u6587\u4EF6\u8FC7\u5927\uFF0C\u65E0\u6CD5\u6BD4\u8F83\u3002"],"vs/editor/browser/widget/diffReview":["\u5DEE\u5F02\u8BC4\u5BA1\u4E2D\u7684\u201C\u63D2\u5165\u201D\u56FE\u6807\u3002","\u5DEE\u5F02\u8BC4\u5BA1\u4E2D\u7684\u201C\u5220\u9664\u201D\u56FE\u6807\u3002","\u5DEE\u5F02\u8BC4\u5BA1\u4E2D\u7684\u201C\u5173\u95ED\u201D\u56FE\u6807\u3002","\u5173\u95ED","\u672A\u66F4\u6539\u884C","\u66F4\u6539\u4E86 1 \u884C","\u66F4\u6539\u4E86 {0} \u884C","\u5DEE\u5F02 {0}/ {1}: \u539F\u59CB\u884C {2}\uFF0C{3}\uFF0C\u4FEE\u6539\u540E\u7684\u884C {4}\uFF0C{5}","\u7A7A\u767D","{0} \u672A\u66F4\u6539\u7684\u884C {1}","{0}\u539F\u59CB\u884C{1}\u4FEE\u6539\u7684\u884C{2}","+ {0}\u4FEE\u6539\u7684\u884C{1}","- {0}\u539F\u59CB\u884C{1}","\u8F6C\u81F3\u4E0B\u4E00\u4E2A\u5DEE\u5F02","\u8F6C\u81F3\u4E0A\u4E00\u4E2A\u5DEE\u5F02"],"vs/editor/browser/widget/inlineDiffMargin":["\u590D\u5236\u5DF2\u5220\u9664\u7684\u884C","\u590D\u5236\u5DF2\u5220\u9664\u7684\u884C","\u590D\u5236\u66F4\u6539\u7684\u884C","\u590D\u5236\u66F4\u6539\u7684\u884C","\u590D\u5236\u5DF2\u5220\u9664\u7684\u884C({0})","\u590D\u5236\u66F4\u6539\u7684\u884C({0})","\u8FD8\u539F\u6B64\u66F4\u6539","\u590D\u5236\u5DF2\u5220\u9664\u7684\u884C({0})","\u590D\u5236\u66F4\u6539\u7684\u884C({0})"],"vs/editor/common/config/editorConfigurationSchema":["\u7F16\u8F91\u5668","\u4E00\u4E2A\u5236\u8868\u7B26\u7B49\u4E8E\u7684\u7A7A\u683C\u6570\u3002\u5728 `#editor.detectIndentation#` \u542F\u7528\u65F6\uFF0C\u6839\u636E\u6587\u4EF6\u5185\u5BB9\uFF0C\u8BE5\u8BBE\u7F6E\u53EF\u80FD\u4F1A\u88AB\u8986\u76D6\u3002","\u6309 `Tab` \u952E\u65F6\u63D2\u5165\u7A7A\u683C\u3002\u8BE5\u8BBE\u7F6E\u5728 `#editor.detectIndentation#` \u542F\u7528\u65F6\u6839\u636E\u6587\u4EF6\u5185\u5BB9\u53EF\u80FD\u4F1A\u88AB\u8986\u76D6\u3002","\u63A7\u5236\u662F\u5426\u5728\u6253\u5F00\u6587\u4EF6\u65F6\uFF0C\u57FA\u4E8E\u6587\u4EF6\u5185\u5BB9\u81EA\u52A8\u68C0\u6D4B `#editor.tabSize#` \u548C `#editor.insertSpaces#`\u3002","\u5220\u9664\u81EA\u52A8\u63D2\u5165\u7684\u5C3E\u968F\u7A7A\u767D\u7B26\u53F7\u3002","\u5BF9\u5927\u578B\u6587\u4EF6\u8FDB\u884C\u7279\u6B8A\u5904\u7406\uFF0C\u7981\u7528\u67D0\u4E9B\u5185\u5B58\u5BC6\u96C6\u578B\u529F\u80FD\u3002","\u63A7\u5236\u662F\u5426\u6839\u636E\u6587\u6863\u4E2D\u7684\u6587\u5B57\u8BA1\u7B97\u81EA\u52A8\u5B8C\u6210\u5217\u8868\u3002","\u4EC5\u5EFA\u8BAE\u6D3B\u52A8\u6587\u6863\u4E2D\u7684\u5B57\u8BCD\u3002","\u5EFA\u8BAE\u4F7F\u7528\u540C\u4E00\u8BED\u8A00\u7684\u6240\u6709\u6253\u5F00\u7684\u6587\u6863\u4E2D\u7684\u5B57\u8BCD\u3002","\u5EFA\u8BAE\u6240\u6709\u6253\u5F00\u7684\u6587\u6863\u4E2D\u7684\u5B57\u8BCD\u3002","\u63A7\u5236\u901A\u8FC7\u54EA\u4E9B\u6587\u6863\u8BA1\u7B97\u57FA\u4E8E\u5B57\u8BCD\u7684\u8865\u5168\u3002","\u5BF9\u6240\u6709\u989C\u8272\u4E3B\u9898\u542F\u7528\u8BED\u4E49\u7A81\u51FA\u663E\u793A\u3002","\u5BF9\u6240\u6709\u989C\u8272\u4E3B\u9898\u7981\u7528\u8BED\u4E49\u7A81\u51FA\u663E\u793A\u3002",'\u8BED\u4E49\u7A81\u51FA\u663E\u793A\u662F\u7531\u5F53\u524D\u989C\u8272\u4E3B\u9898\u7684 "semanticHighlighting" \u8BBE\u7F6E\u914D\u7F6E\u7684\u3002',"\u63A7\u5236\u662F\u5426\u4E3A\u652F\u6301\u5B83\u7684\u8BED\u8A00\u663E\u793A\u8BED\u4E49\u7A81\u51FA\u663E\u793A\u3002","\u5728\u901F\u89C8\u7F16\u8F91\u5668\u4E2D\uFF0C\u5373\u4F7F\u53CC\u51FB\u5176\u4E2D\u7684\u5185\u5BB9\u6216\u8005\u6309 `Esc` \u952E\uFF0C\u4E5F\u4FDD\u6301\u5176\u6253\u5F00\u72B6\u6001\u3002","\u7531\u4E8E\u6027\u80FD\u539F\u56E0\uFF0C\u8D85\u8FC7\u8FD9\u4E2A\u957F\u5EA6\u7684\u884C\u5C06\u4E0D\u4F1A\u88AB\u6807\u8BB0","\u5B9A\u4E49\u589E\u52A0\u548C\u51CF\u5C11\u7F29\u8FDB\u7684\u62EC\u53F7\u3002","\u5DE6\u65B9\u62EC\u53F7\u5B57\u7B26\u6216\u5B57\u7B26\u4E32\u5E8F\u5217\u3002","\u53F3\u65B9\u62EC\u53F7\u5B57\u7B26\u6216\u5B57\u7B26\u4E32\u5E8F\u5217\u3002","\u5982\u679C\u542F\u7528\u65B9\u62EC\u53F7\u5BF9\u7740\u8272\uFF0C\u5219\u6309\u7167\u5176\u5D4C\u5957\u7EA7\u522B\u5B9A\u4E49\u5DF2\u7740\u8272\u7684\u65B9\u62EC\u53F7\u5BF9\u3002","\u5DE6\u65B9\u62EC\u53F7\u5B57\u7B26\u6216\u5B57\u7B26\u4E32\u5E8F\u5217\u3002","\u53F3\u65B9\u62EC\u53F7\u5B57\u7B26\u6216\u5B57\u7B26\u4E32\u5E8F\u5217\u3002","\u8D85\u65F6(\u4EE5\u6BEB\u79D2\u4E3A\u5355\u4F4D)\uFF0C\u4E4B\u540E\u5C06\u53D6\u6D88\u5DEE\u5F02\u8BA1\u7B97\u3002\u4F7F\u75280\u8868\u793A\u6CA1\u6709\u8D85\u65F6\u3002","\u8981\u4E3A\u5176\u8BA1\u7B97\u5DEE\u5F02\u7684\u6700\u5927\u6587\u4EF6\u5927\u5C0F(MB)\u3002\u4F7F\u7528 0 \u8868\u793A\u65E0\u9650\u5236\u3002","\u63A7\u5236\u5DEE\u5F02\u7F16\u8F91\u5668\u7684\u663E\u793A\u65B9\u5F0F\u662F\u5E76\u6392\u8FD8\u662F\u5185\u8054\u3002","\u542F\u7528\u540E\uFF0C\u5DEE\u5F02\u7F16\u8F91\u5668\u5C06\u5FFD\u7565\u524D\u5BFC\u7A7A\u683C\u6216\u5C3E\u968F\u7A7A\u683C\u4E2D\u7684\u66F4\u6539\u3002","\u63A7\u5236\u5DEE\u5F02\u7F16\u8F91\u5668\u662F\u5426\u4E3A\u6DFB\u52A0/\u5220\u9664\u7684\u66F4\u6539\u663E\u793A +/- \u6307\u793A\u7B26\u53F7\u3002","\u63A7\u5236\u662F\u5426\u5728\u7F16\u8F91\u5668\u4E2D\u663E\u793A CodeLens\u3002","\u6C38\u4E0D\u6362\u884C\u3002","\u5C06\u5728\u89C6\u533A\u5BBD\u5EA6\u5904\u6362\u884C\u3002","\u5C06\u6839\u636E `#editor.wordWrap#` \u8BBE\u7F6E\u6362\u884C\u3002"],"vs/editor/common/config/editorOptions":["\u7F16\u8F91\u5668\u5C06\u4F7F\u7528\u5E73\u53F0 API \u4EE5\u68C0\u6D4B\u662F\u5426\u9644\u52A0\u4E86\u5C4F\u5E55\u9605\u8BFB\u5668\u3002","\u7F16\u8F91\u5668\u5C06\u9488\u5BF9\u4E0E\u5C4F\u5E55\u9605\u8BFB\u5668\u642D\u914D\u4F7F\u7528\u8FDB\u884C\u6C38\u4E45\u4F18\u5316\u3002\u5C06\u7981\u7528\u81EA\u52A8\u6362\u884C\u3002","\u7F16\u8F91\u5668\u5C06\u4E0D\u518D\u5BF9\u5C4F\u5E55\u9605\u8BFB\u5668\u7684\u4F7F\u7528\u8FDB\u884C\u4F18\u5316\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5E94\u5728\u5BF9\u5C4F\u5E55\u9605\u8BFB\u5668\u8FDB\u884C\u4E86\u4F18\u5316\u7684\u6A21\u5F0F\u4E0B\u8FD0\u884C\u3002\u8BBE\u7F6E\u4E3A\u201C\u5F00\u201D\u5C06\u7981\u7528\u81EA\u52A8\u6362\u884C\u3002","\u63A7\u5236\u5728\u6CE8\u91CA\u65F6\u662F\u5426\u63D2\u5165\u7A7A\u683C\u5B57\u7B26\u3002","\u63A7\u5236\u5728\u5BF9\u884C\u6CE8\u91CA\u6267\u884C\u5207\u6362\u3001\u6DFB\u52A0\u6216\u5220\u9664\u64CD\u4F5C\u65F6\uFF0C\u662F\u5426\u5E94\u5FFD\u7565\u7A7A\u884C\u3002","\u63A7\u5236\u5728\u6CA1\u6709\u9009\u62E9\u5185\u5BB9\u65F6\u8FDB\u884C\u590D\u5236\u662F\u5426\u590D\u5236\u5F53\u524D\u884C\u3002","\u63A7\u5236\u5728\u952E\u5165\u65F6\u5149\u6807\u662F\u5426\u5E94\u8DF3\u8F6C\u4EE5\u67E5\u627E\u5339\u914D\u9879\u3002","\u5207\u52FF\u4E3A\u7F16\u8F91\u5668\u9009\u62E9\u4E2D\u7684\u641C\u7D22\u5B57\u7B26\u4E32\u8BBE\u5B9A\u79CD\u5B50\u3002","\u59CB\u7EC8\u4E3A\u7F16\u8F91\u5668\u9009\u62E9\u4E2D\u7684\u641C\u7D22\u5B57\u7B26\u4E32\u8BBE\u5B9A\u79CD\u5B50\uFF0C\u5305\u62EC\u5149\u6807\u4F4D\u7F6E\u7684\u5B57\u8BCD\u3002","\u4EC5\u4E3A\u7F16\u8F91\u5668\u9009\u62E9\u4E2D\u7684\u641C\u7D22\u5B57\u7B26\u4E32\u8BBE\u5B9A\u79CD\u5B50\u3002","\u63A7\u5236\u662F\u5426\u5C06\u7F16\u8F91\u5668\u9009\u4E2D\u5185\u5BB9\u4F5C\u4E3A\u641C\u7D22\u8BCD\u586B\u5165\u5230\u67E5\u627E\u5C0F\u7EC4\u4EF6\u4E2D\u3002","\u4ECE\u4E0D\u81EA\u52A8\u6253\u5F00\u201C\u5728\u9009\u5B9A\u5185\u5BB9\u4E2D\u67E5\u627E\u201D(\u9ED8\u8BA4)\u3002","\u59CB\u7EC8\u81EA\u52A8\u6253\u5F00\u201C\u5728\u9009\u5B9A\u5185\u5BB9\u4E2D\u67E5\u627E\u201D\u3002","\u9009\u62E9\u591A\u884C\u5185\u5BB9\u65F6\uFF0C\u81EA\u52A8\u6253\u5F00\u201C\u5728\u9009\u5B9A\u5185\u5BB9\u4E2D\u67E5\u627E\u201D\u3002","\u63A7\u5236\u81EA\u52A8\u6253\u5F00\u201C\u5728\u9009\u5B9A\u5185\u5BB9\u4E2D\u67E5\u627E\u201D\u7684\u6761\u4EF6\u3002","\u63A7\u5236\u201C\u67E5\u627E\u201D\u5C0F\u7EC4\u4EF6\u662F\u5426\u8BFB\u53D6\u6216\u4FEE\u6539 macOS \u7684\u5171\u4EAB\u67E5\u627E\u526A\u8D34\u677F\u3002",'\u63A7\u5236 "\u67E5\u627E\u5C0F\u90E8\u4EF6" \u662F\u5426\u5E94\u5728\u7F16\u8F91\u5668\u9876\u90E8\u6DFB\u52A0\u989D\u5916\u7684\u884C\u3002\u5982\u679C\u4E3A true, \u5219\u53EF\u4EE5\u5728 "\u67E5\u627E\u5C0F\u5DE5\u5177" \u53EF\u89C1\u65F6\u6EDA\u52A8\u5230\u7B2C\u4E00\u884C\u4E4B\u5916\u3002',"\u63A7\u5236\u5728\u627E\u4E0D\u5230\u5176\u4ED6\u5339\u914D\u9879\u65F6\uFF0C\u662F\u5426\u81EA\u52A8\u4ECE\u5F00\u5934(\u6216\u7ED3\u5C3E)\u91CD\u65B0\u5F00\u59CB\u641C\u7D22\u3002",'\u542F\u7528/\u7981\u7528\u5B57\u4F53\u8FDE\u5B57("calt" \u548C "liga" \u5B57\u4F53\u7279\u6027)\u3002\u5C06\u6B64\u66F4\u6539\u4E3A\u5B57\u7B26\u4E32\uFF0C\u53EF\u5BF9 "font-feature-settings" CSS \u5C5E\u6027\u8FDB\u884C\u7CBE\u7EC6\u63A7\u5236\u3002','\u663E\u5F0F "font-feature-settings" CSS \u5C5E\u6027\u3002\u5982\u679C\u53EA\u9700\u6253\u5F00/\u5173\u95ED\u8FDE\u5B57\uFF0C\u53EF\u4EE5\u6539\u4E3A\u4F20\u9012\u5E03\u5C14\u503C\u3002','\u914D\u7F6E\u5B57\u4F53\u8FDE\u5B57\u6216\u5B57\u4F53\u7279\u6027\u3002\u53EF\u4EE5\u662F\u7528\u4E8E\u542F\u7528/\u7981\u7528\u8FDE\u5B57\u7684\u5E03\u5C14\u503C\uFF0C\u6216\u7528\u4E8E\u8BBE\u7F6E CSS "font-feature-settings" \u5C5E\u6027\u503C\u7684\u5B57\u7B26\u4E32\u3002',"\u63A7\u5236\u5B57\u4F53\u5927\u5C0F(\u50CF\u7D20)\u3002","\u4EC5\u5141\u8BB8\u4F7F\u7528\u5173\u952E\u5B57\u201C\u6B63\u5E38\u201D\u548C\u201C\u52A0\u7C97\u201D\uFF0C\u6216\u4F7F\u7528\u4ECB\u4E8E 1 \u81F3 1000 \u4E4B\u95F4\u7684\u6570\u5B57\u3002","\u63A7\u5236\u5B57\u4F53\u7C97\u7EC6\u3002\u63A5\u53D7\u5173\u952E\u5B57\u201C\u6B63\u5E38\u201D\u548C\u201C\u52A0\u7C97\u201D\uFF0C\u6216\u8005\u63A5\u53D7\u4ECB\u4E8E 1 \u81F3 1000 \u4E4B\u95F4\u7684\u6570\u5B57\u3002","\u663E\u793A\u7ED3\u679C\u7684\u9884\u89C8\u89C6\u56FE (\u9ED8\u8BA4\u503C)","\u8F6C\u5230\u4E3B\u7ED3\u679C\u5E76\u663E\u793A\u9884\u89C8\u89C6\u56FE","\u8F6C\u5230\u4E3B\u7ED3\u679C\uFF0C\u5E76\u5BF9\u5176\u4ED6\u4EBA\u542F\u7528\u9632\u5077\u7AA5\u5BFC\u822A",'\u6B64\u8BBE\u7F6E\u5DF2\u5F03\u7528\uFF0C\u8BF7\u6539\u7528\u5355\u72EC\u7684\u8BBE\u7F6E\uFF0C\u5982"editor.editor.gotoLocation.multipleDefinitions"\u6216"editor.editor.gotoLocation.multipleImplementations"\u3002','\u63A7\u5236\u5B58\u5728\u591A\u4E2A\u76EE\u6807\u4F4D\u7F6E\u65F6"\u8F6C\u5230\u5B9A\u4E49"\u547D\u4EE4\u7684\u884C\u4E3A\u3002','\u63A7\u5236\u5B58\u5728\u591A\u4E2A\u76EE\u6807\u4F4D\u7F6E\u65F6"\u8F6C\u5230\u7C7B\u578B\u5B9A\u4E49"\u547D\u4EE4\u7684\u884C\u4E3A\u3002','\u63A7\u5236\u5B58\u5728\u591A\u4E2A\u76EE\u6807\u4F4D\u7F6E\u65F6"\u8F6C\u5230\u58F0\u660E"\u547D\u4EE4\u7684\u884C\u4E3A\u3002','\u63A7\u5236\u5B58\u5728\u591A\u4E2A\u76EE\u6807\u4F4D\u7F6E\u65F6"\u8F6C\u5230\u5B9E\u73B0"\u547D\u4EE4\u7684\u884C\u4E3A\u3002','\u63A7\u5236\u5B58\u5728\u591A\u4E2A\u76EE\u6807\u4F4D\u7F6E\u65F6"\u8F6C\u5230\u5F15\u7528"\u547D\u4EE4\u7684\u884C\u4E3A\u3002','\u5F53"\u8F6C\u5230\u5B9A\u4E49"\u7684\u7ED3\u679C\u4E3A\u5F53\u524D\u4F4D\u7F6E\u65F6\u5C06\u8981\u6267\u884C\u7684\u66FF\u4EE3\u547D\u4EE4\u7684 ID\u3002','\u5F53"\u8F6C\u5230\u7C7B\u578B\u5B9A\u4E49"\u7684\u7ED3\u679C\u662F\u5F53\u524D\u4F4D\u7F6E\u65F6\u6B63\u5728\u6267\u884C\u7684\u5907\u7528\u547D\u4EE4 ID\u3002','\u5F53"\u8F6C\u5230\u58F0\u660E"\u7684\u7ED3\u679C\u4E3A\u5F53\u524D\u4F4D\u7F6E\u65F6\u5C06\u8981\u6267\u884C\u7684\u66FF\u4EE3\u547D\u4EE4\u7684 ID\u3002','\u5F53"\u8F6C\u5230\u5B9E\u73B0"\u7684\u7ED3\u679C\u4E3A\u5F53\u524D\u4F4D\u7F6E\u65F6\u5C06\u8981\u6267\u884C\u7684\u66FF\u4EE3\u547D\u4EE4\u7684 ID\u3002','\u5F53"\u8F6C\u5230\u5F15\u7528"\u7684\u7ED3\u679C\u662F\u5F53\u524D\u4F4D\u7F6E\u65F6\u6B63\u5728\u6267\u884C\u7684\u66FF\u4EE3\u547D\u4EE4 ID\u3002',"\u63A7\u5236\u662F\u5426\u663E\u793A\u60AC\u505C\u63D0\u793A\u3002","\u63A7\u5236\u663E\u793A\u60AC\u505C\u63D0\u793A\u524D\u7684\u7B49\u5F85\u65F6\u95F4 (\u6BEB\u79D2)\u3002","\u63A7\u5236\u5F53\u9F20\u6807\u79FB\u52A8\u5230\u60AC\u505C\u63D0\u793A\u4E0A\u65F6\uFF0C\u5176\u662F\u5426\u4FDD\u6301\u53EF\u89C1\u3002","\u5982\u679C\u6709\u7A7A\u95F4\uFF0C\u9996\u9009\u5728\u7EBF\u6761\u4E0A\u65B9\u663E\u793A\u60AC\u505C\u3002","\u5728\u7F16\u8F91\u5668\u4E2D\u542F\u7528\u4EE3\u7801\u64CD\u4F5C\u5C0F\u706F\u6CE1\u63D0\u793A\u3002","\u5728\u7F16\u8F91\u5668\u4E2D\u542F\u7528\u5185\u8054\u63D0\u793A\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u4E2D\u5185\u5D4C\u63D0\u793A\u7684\u5B57\u53F7\u3002\u5F53\u914D\u7F6E\u7684\u503C\u5C0F\u4E8E `5` \u6216\u5927\u4E8E\u7F16\u8F91\u5668\u5B57\u53F7\u65F6\uFF0C\u9ED8\u8BA4\u4F7F\u7528 90% \u7684 `#editor.fontSize#`\u3002","\u5728\u7F16\u8F91\u5668\u4E2D\u63A7\u5236\u5185\u5D4C\u63D0\u793A\u7684\u5B57\u4F53\u7CFB\u5217\u3002\u8BBE\u7F6E\u4E3A\u7A7A\u65F6\uFF0C\u4F7F\u7528 `#editor.fontFamily#`\u3002",`\u63A7\u5236\u884C\u9AD8\u3002\r
- \u4F7F\u7528 0 \u6839\u636E\u5B57\u53F7\u81EA\u52A8\u8BA1\u7B97\u884C\u9AD8\u3002\r
- \u4ECB\u4E8E 0 \u548C 8 \u4E4B\u95F4\u7684\u503C\u5C06\u7528\u4F5C\u5B57\u53F7\u7684\u4E58\u6570\u3002\r
- \u5927\u4E8E\u6216\u7B49\u4E8E 8 \u7684\u503C\u5C06\u7528\u4F5C\u6709\u6548\u503C\u3002`,"\u63A7\u5236\u662F\u5426\u663E\u793A\u7F29\u7565\u56FE\u3002","\u8FF7\u4F60\u5730\u56FE\u7684\u5927\u5C0F\u4E0E\u7F16\u8F91\u5668\u5185\u5BB9\u76F8\u540C(\u5E76\u4E14\u53EF\u80FD\u6EDA\u52A8)\u3002","\u8FF7\u4F60\u5730\u56FE\u5C06\u6839\u636E\u9700\u8981\u62C9\u4F38\u6216\u7F29\u5C0F\u4EE5\u586B\u5145\u7F16\u8F91\u5668\u7684\u9AD8\u5EA6(\u4E0D\u6EDA\u52A8)\u3002","\u8FF7\u4F60\u5730\u56FE\u5C06\u6839\u636E\u9700\u8981\u7F29\u5C0F\uFF0C\u6C38\u8FDC\u4E0D\u4F1A\u5927\u4E8E\u7F16\u8F91\u5668(\u4E0D\u6EDA\u52A8)\u3002","\u63A7\u5236\u8FF7\u4F60\u5730\u56FE\u7684\u5927\u5C0F\u3002","\u63A7\u5236\u5728\u54EA\u4E00\u4FA7\u663E\u793A\u7F29\u7565\u56FE\u3002","\u63A7\u5236\u4F55\u65F6\u663E\u793A\u8FF7\u4F60\u5730\u56FE\u6ED1\u5757\u3002","\u5728\u8FF7\u4F60\u5730\u56FE\u4E2D\u7ED8\u5236\u7684\u5185\u5BB9\u6BD4\u4F8B: 1\u30012 \u6216 3\u3002","\u6E32\u67D3\u6BCF\u884C\u7684\u5B9E\u9645\u5B57\u7B26\uFF0C\u800C\u4E0D\u662F\u8272\u5757\u3002","\u9650\u5236\u7F29\u7565\u56FE\u7684\u5BBD\u5EA6\uFF0C\u63A7\u5236\u5176\u6700\u591A\u663E\u793A\u7684\u5217\u6570\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u7684\u9876\u8FB9\u548C\u7B2C\u4E00\u884C\u4E4B\u95F4\u7684\u95F4\u8DDD\u91CF\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u7684\u5E95\u8FB9\u548C\u6700\u540E\u4E00\u884C\u4E4B\u95F4\u7684\u95F4\u8DDD\u91CF\u3002","\u5728\u8F93\u5165\u65F6\u663E\u793A\u542B\u6709\u53C2\u6570\u6587\u6863\u548C\u7C7B\u578B\u4FE1\u606F\u7684\u5C0F\u9762\u677F\u3002","\u63A7\u5236\u53C2\u6570\u63D0\u793A\u83DC\u5355\u5728\u5230\u8FBE\u5217\u8868\u672B\u5C3E\u65F6\u8FDB\u884C\u5FAA\u73AF\u8FD8\u662F\u5173\u95ED\u3002","\u5728\u5B57\u7B26\u4E32\u5185\u542F\u7528\u5FEB\u901F\u5EFA\u8BAE\u3002","\u5728\u6CE8\u91CA\u5185\u542F\u7528\u5FEB\u901F\u5EFA\u8BAE\u3002","\u5728\u5B57\u7B26\u4E32\u548C\u6CE8\u91CA\u5916\u542F\u7528\u5FEB\u901F\u5EFA\u8BAE\u3002","\u63A7\u5236\u662F\u5426\u5728\u952E\u5165\u65F6\u81EA\u52A8\u663E\u793A\u5EFA\u8BAE\u3002","\u4E0D\u663E\u793A\u884C\u53F7\u3002","\u5C06\u884C\u53F7\u663E\u793A\u4E3A\u7EDD\u5BF9\u884C\u6570\u3002","\u5C06\u884C\u53F7\u663E\u793A\u4E3A\u4E0E\u5149\u6807\u76F8\u9694\u7684\u884C\u6570\u3002","\u6BCF 10 \u884C\u663E\u793A\u4E00\u6B21\u884C\u53F7\u3002","\u63A7\u5236\u884C\u53F7\u7684\u663E\u793A\u3002","\u6B64\u7F16\u8F91\u5668\u6807\u5C3A\u5C06\u6E32\u67D3\u7684\u7B49\u5BBD\u5B57\u7B26\u6570\u3002","\u6B64\u7F16\u8F91\u5668\u6807\u5C3A\u7684\u989C\u8272\u3002","\u5728\u4E00\u5B9A\u6570\u91CF\u7684\u7B49\u5BBD\u5B57\u7B26\u540E\u663E\u793A\u5782\u76F4\u6807\u5C3A\u3002\u8F93\u5165\u591A\u4E2A\u503C\uFF0C\u663E\u793A\u591A\u4E2A\u6807\u5C3A\u3002\u82E5\u6570\u7EC4\u4E3A\u7A7A\uFF0C\u5219\u4E0D\u7ED8\u5236\u6807\u5C3A\u3002","\u5782\u76F4\u6EDA\u52A8\u6761\u4EC5\u5728\u5FC5\u8981\u65F6\u53EF\u89C1\u3002","\u5782\u76F4\u6EDA\u52A8\u6761\u5C06\u59CB\u7EC8\u53EF\u89C1\u3002","\u5782\u76F4\u6EDA\u52A8\u6761\u5C06\u59CB\u7EC8\u9690\u85CF\u3002","\u63A7\u5236\u5782\u76F4\u6EDA\u52A8\u6761\u7684\u53EF\u89C1\u6027\u3002","\u6C34\u5E73\u6EDA\u52A8\u6761\u4EC5\u5728\u5FC5\u8981\u65F6\u53EF\u89C1\u3002","\u6C34\u5E73\u6EDA\u52A8\u6761\u5C06\u59CB\u7EC8\u53EF\u89C1\u3002","\u6C34\u5E73\u6EDA\u52A8\u6761\u5C06\u59CB\u7EC8\u9690\u85CF\u3002","\u63A7\u5236\u6C34\u5E73\u6EDA\u52A8\u6761\u7684\u53EF\u89C1\u6027\u3002","\u5782\u76F4\u6EDA\u52A8\u6761\u7684\u5BBD\u5EA6\u3002","\u6C34\u5E73\u6EDA\u52A8\u6761\u7684\u9AD8\u5EA6\u3002","\u63A7\u5236\u5355\u51FB\u6309\u9875\u6EDA\u52A8\u8FD8\u662F\u8DF3\u8F6C\u5230\u5355\u51FB\u4F4D\u7F6E\u3002","\u63A7\u5236\u662F\u5426\u7A81\u51FA\u663E\u793A\u6240\u6709\u975E\u57FA\u672C ASCII \u5B57\u7B26\u3002\u53EA\u6709\u4ECB\u4E8E U+0020 \u5230 U+007E \u4E4B\u95F4\u7684\u5B57\u7B26\u3001\u5236\u8868\u7B26\u3001\u6362\u884C\u7B26\u548C\u56DE\u8F66\u7B26\u624D\u88AB\u89C6\u4E3A\u57FA\u672C ASCII\u3002","\u63A7\u5236\u662F\u5426\u7A81\u51FA\u663E\u793A\u4EC5\u4FDD\u7559\u7A7A\u683C\u6216\u5B8C\u5168\u6CA1\u6709\u5BBD\u5EA6\u7684\u5B57\u7B26\u3002","\u63A7\u5236\u662F\u5426\u7A81\u51FA\u663E\u793A\u53EF\u80FD\u4E0E\u57FA\u672C ASCII \u5B57\u7B26\u6DF7\u6DC6\u7684\u5B57\u7B26\uFF0C\u4F46\u5F53\u524D\u7528\u6237\u533A\u57DF\u8BBE\u7F6E\u4E2D\u5E38\u89C1\u7684\u5B57\u7B26\u9664\u5916\u3002","\u63A7\u5236\u6CE8\u91CA\u4E2D\u7684\u5B57\u7B26\u662F\u5426\u4E5F\u5E94\u8FDB\u884C Unicode \u7A81\u51FA\u663E\u793A\u3002","\u63A7\u5236\u5B57\u7B26\u4E32\u4E2D\u7684\u5B57\u7B26\u662F\u5426\u4E5F\u5E94\u8FDB\u884C unicode \u7A81\u51FA\u663E\u793A\u3002","\u5B9A\u4E49\u672A\u7A81\u51FA\u663E\u793A\u7684\u5141\u8BB8\u5B57\u7B26\u3002","\u672A\u7A81\u51FA\u663E\u793A\u5728\u5141\u8BB8\u533A\u57DF\u8BBE\u7F6E\u4E2D\u5E38\u89C1\u7684 Unicode \u5B57\u7B26\u3002","\u63A7\u5236\u662F\u5426\u5728\u7F16\u8F91\u5668\u4E2D\u81EA\u52A8\u663E\u793A\u5185\u8054\u5EFA\u8BAE\u3002","\u63A7\u5236\u662F\u5426\u542F\u7528\u62EC\u53F7\u5BF9\u7740\u8272\u3002\u4F7F\u7528 \u201Cworkbench.colorCustomizations\u201D \u66FF\u4EE3\u62EC\u53F7\u7A81\u51FA\u663E\u793A\u989C\u8272\u3002","\u542F\u7528\u62EC\u53F7\u5BF9\u53C2\u8003\u7EBF\u3002","\u4EC5\u4E3A\u6D3B\u52A8\u62EC\u53F7\u5BF9\u542F\u7528\u62EC\u53F7\u5BF9\u53C2\u8003\u7EBF\u3002","\u7981\u7528\u62EC\u53F7\u5BF9\u53C2\u8003\u7EBF\u3002","\u63A7\u5236\u662F\u5426\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u542F\u7528\u6C34\u5E73\u53C2\u8003\u7EBF\u4F5C\u4E3A\u5782\u76F4\u62EC\u53F7\u5BF9\u53C2\u8003\u7EBF\u7684\u6DFB\u52A0\u9879\u3002","\u4EC5\u4E3A\u6D3B\u52A8\u62EC\u53F7\u5BF9\u542F\u7528\u6C34\u5E73\u53C2\u8003\u7EBF\u3002","\u7981\u7528\u6C34\u5E73\u62EC\u53F7\u5BF9\u53C2\u8003\u7EBF\u3002","\u63A7\u5236\u662F\u5426\u542F\u7528\u6C34\u5E73\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5E94\u7A81\u51FA\u663E\u793A\u6D3B\u52A8\u7684\u62EC\u53F7\u5BF9\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u663E\u793A\u7F29\u8FDB\u53C2\u8003\u7EBF\u3002","\u63A7\u5236\u662F\u5426\u7A81\u51FA\u663E\u793A\u7F16\u8F91\u5668\u4E2D\u6D3B\u52A8\u7684\u7F29\u8FDB\u53C2\u8003\u7EBF\u3002","\u63D2\u5165\u5EFA\u8BAE\u800C\u4E0D\u8986\u76D6\u5149\u6807\u53F3\u4FA7\u7684\u6587\u672C\u3002","\u63D2\u5165\u5EFA\u8BAE\u5E76\u8986\u76D6\u5149\u6807\u53F3\u4FA7\u7684\u6587\u672C\u3002","\u63A7\u5236\u63A5\u53D7\u8865\u5168\u65F6\u662F\u5426\u8986\u76D6\u5355\u8BCD\u3002\u8BF7\u6CE8\u610F\uFF0C\u8FD9\u53D6\u51B3\u4E8E\u6269\u5C55\u9009\u62E9\u4F7F\u7528\u6B64\u529F\u80FD\u3002","\u63A7\u5236\u5BF9\u5EFA\u8BAE\u7684\u7B5B\u9009\u548C\u6392\u5E8F\u662F\u5426\u8003\u8651\u5C0F\u7684\u62FC\u5199\u9519\u8BEF\u3002","\u63A7\u5236\u6392\u5E8F\u65F6\u662F\u5426\u9996\u9009\u5149\u6807\u9644\u8FD1\u7684\u5B57\u8BCD\u3002","\u63A7\u5236\u662F\u5426\u5728\u591A\u4E2A\u5DE5\u4F5C\u533A\u548C\u7A97\u53E3\u95F4\u5171\u4EAB\u8BB0\u5FC6\u7684\u5EFA\u8BAE\u9009\u9879(\u9700\u8981 `#editor.suggestSelection#`)\u3002","\u63A7\u5236\u6D3B\u52A8\u4EE3\u7801\u6BB5\u662F\u5426\u963B\u6B62\u5FEB\u901F\u5EFA\u8BAE\u3002","\u63A7\u5236\u662F\u5426\u5728\u5EFA\u8BAE\u4E2D\u663E\u793A\u6216\u9690\u85CF\u56FE\u6807\u3002","\u63A7\u5236\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u5E95\u90E8\u7684\u72B6\u6001\u680F\u7684\u53EF\u89C1\u6027\u3002","\u63A7\u5236\u662F\u5426\u5728\u7F16\u8F91\u5668\u4E2D\u9884\u89C8\u5EFA\u8BAE\u7ED3\u679C\u3002","\u63A7\u5236\u5EFA\u8BAE\u8BE6\u7EC6\u4FE1\u606F\u662F\u968F\u6807\u7B7E\u4E00\u8D77\u663E\u793A\u8FD8\u662F\u4EC5\u663E\u793A\u5728\u8BE6\u7EC6\u4FE1\u606F\u5C0F\u7EC4\u4EF6\u4E2D","\u6B64\u8BBE\u7F6E\u5DF2\u5F03\u7528\u3002\u73B0\u5728\u53EF\u4EE5\u8C03\u6574\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u7684\u5927\u5C0F\u3002",'\u6B64\u8BBE\u7F6E\u5DF2\u5F03\u7528\uFF0C\u8BF7\u6539\u7528\u5355\u72EC\u7684\u8BBE\u7F6E\uFF0C\u5982"editor.suggest.showKeywords"\u6216"editor.suggest.showSnippets"\u3002',"\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u65B9\u6CD5\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u51FD\u6570\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u6784\u9020\u51FD\u6570\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u5DF2\u542F\u7528\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u5B57\u6BB5\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u53D8\u91CF\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u7C7B\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u7ED3\u6784\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u63A5\u53E3\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u6A21\u5757\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u5C5E\u6027\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u4E8B\u4EF6\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u64CD\u4F5C\u7B26\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u5355\u4F4D\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u503C\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u5E38\u91CF\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u679A\u4E3E\u201D\u5EFA\u8BAE\u3002",'\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A "enumMember" \u5EFA\u8BAE\u3002',"\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u5173\u952E\u5B57\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u6587\u672C\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u989C\u8272\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u6587\u4EF6\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u53C2\u8003\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u81EA\u5B9A\u4E49\u989C\u8272\u201D\u5EFA\u8BAE\u3002","\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u6587\u4EF6\u5939\u201D\u5EFA\u8BAE\u3002",'\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A "typeParameter" \u5EFA\u8BAE\u3002',"\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A\u201C\u7247\u6BB5\u201D\u5EFA\u8BAE\u3002",'\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A"\u7528\u6237"\u5EFA\u8BAE\u3002','\u542F\u7528\u540E\uFF0CIntelliSense \u5C06\u663E\u793A"\u95EE\u9898"\u5EFA\u8BAE\u3002',"\u662F\u5426\u5E94\u59CB\u7EC8\u9009\u62E9\u524D\u5BFC\u548C\u5C3E\u968F\u7A7A\u683C\u3002","\u63A7\u5236\u662F\u5426\u5E94\u5728\u9047\u5230\u63D0\u4EA4\u5B57\u7B26\u65F6\u63A5\u53D7\u5EFA\u8BAE\u3002\u4F8B\u5982\uFF0C\u5728 JavaScript \u4E2D\uFF0C\u534A\u89D2\u5206\u53F7 (`;`) \u53EF\u4EE5\u4E3A\u63D0\u4EA4\u5B57\u7B26\uFF0C\u80FD\u591F\u5728\u63A5\u53D7\u5EFA\u8BAE\u7684\u540C\u65F6\u952E\u5165\u8BE5\u5B57\u7B26\u3002","\u4EC5\u5F53\u5EFA\u8BAE\u5305\u542B\u6587\u672C\u6539\u52A8\u65F6\u624D\u53EF\u4F7F\u7528 `Enter` \u952E\u8FDB\u884C\u63A5\u53D7\u3002","\u63A7\u5236\u9664\u4E86 `Tab` \u952E\u4EE5\u5916\uFF0C `Enter` \u952E\u662F\u5426\u540C\u6837\u53EF\u4EE5\u63A5\u53D7\u5EFA\u8BAE\u3002\u8FD9\u80FD\u51CF\u5C11\u201C\u63D2\u5165\u65B0\u884C\u201D\u548C\u201C\u63A5\u53D7\u5EFA\u8BAE\u201D\u547D\u4EE4\u4E4B\u95F4\u7684\u6B67\u4E49\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u4E2D\u53EF\u7531\u5C4F\u5E55\u9605\u8BFB\u5668\u4E00\u6B21\u8BFB\u51FA\u7684\u884C\u6570\u3002\u6211\u4EEC\u68C0\u6D4B\u5230\u5C4F\u5E55\u9605\u8BFB\u5668\u65F6\uFF0C\u4F1A\u81EA\u52A8\u5C06\u9ED8\u8BA4\u503C\u8BBE\u7F6E\u4E3A 500\u3002\u8B66\u544A: \u5982\u679C\u884C\u6570\u5927\u4E8E\u9ED8\u8BA4\u503C\uFF0C\u53EF\u80FD\u4F1A\u5F71\u54CD\u6027\u80FD\u3002","\u7F16\u8F91\u5668\u5185\u5BB9","\u4F7F\u7528\u8BED\u8A00\u914D\u7F6E\u786E\u5B9A\u4F55\u65F6\u81EA\u52A8\u95ED\u5408\u62EC\u53F7\u3002","\u4EC5\u5F53\u5149\u6807\u4F4D\u4E8E\u7A7A\u767D\u5B57\u7B26\u5DE6\u4FA7\u65F6\uFF0C\u624D\u81EA\u52A8\u95ED\u5408\u62EC\u53F7\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5728\u5DE6\u62EC\u53F7\u540E\u81EA\u52A8\u63D2\u5165\u53F3\u62EC\u53F7\u3002","\u4EC5\u5728\u81EA\u52A8\u63D2\u5165\u65F6\u624D\u5220\u9664\u76F8\u90BB\u7684\u53F3\u5F15\u53F7\u6216\u53F3\u62EC\u53F7\u3002","\u63A7\u5236\u5728\u5220\u9664\u65F6\u7F16\u8F91\u5668\u662F\u5426\u5E94\u5220\u9664\u76F8\u90BB\u7684\u53F3\u5F15\u53F7\u6216\u53F3\u65B9\u62EC\u53F7\u3002","\u4EC5\u5728\u81EA\u52A8\u63D2\u5165\u65F6\u624D\u6539\u5199\u53F3\u5F15\u53F7\u6216\u53F3\u62EC\u53F7\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5E94\u6539\u5199\u53F3\u5F15\u53F7\u6216\u53F3\u62EC\u53F7\u3002","\u4F7F\u7528\u8BED\u8A00\u914D\u7F6E\u786E\u5B9A\u4F55\u65F6\u81EA\u52A8\u95ED\u5408\u5F15\u53F7\u3002","\u4EC5\u5F53\u5149\u6807\u4F4D\u4E8E\u7A7A\u767D\u5B57\u7B26\u5DE6\u4FA7\u65F6\uFF0C\u624D\u81EA\u52A8\u95ED\u5408\u5F15\u53F7\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5728\u5DE6\u5F15\u53F7\u540E\u81EA\u52A8\u63D2\u5165\u53F3\u5F15\u53F7\u3002","\u7F16\u8F91\u5668\u4E0D\u4F1A\u81EA\u52A8\u63D2\u5165\u7F29\u8FDB\u3002","\u7F16\u8F91\u5668\u5C06\u4FDD\u7559\u5F53\u524D\u884C\u7684\u7F29\u8FDB\u3002","\u7F16\u8F91\u5668\u5C06\u4FDD\u7559\u5F53\u524D\u884C\u7684\u7F29\u8FDB\u5E76\u9075\u5FAA\u8BED\u8A00\u5B9A\u4E49\u7684\u62EC\u53F7\u3002","\u7F16\u8F91\u5668\u5C06\u4FDD\u7559\u5F53\u524D\u884C\u7684\u7F29\u8FDB\u3001\u4F7F\u7528\u8BED\u8A00\u5B9A\u4E49\u7684\u62EC\u53F7\u5E76\u8C03\u7528\u8BED\u8A00\u5B9A\u4E49\u7684\u7279\u5B9A onEnterRules\u3002","\u7F16\u8F91\u5668\u5C06\u4FDD\u7559\u5F53\u524D\u884C\u7684\u7F29\u8FDB\uFF0C\u4F7F\u7528\u8BED\u8A00\u5B9A\u4E49\u7684\u62EC\u53F7\uFF0C\u8C03\u7528\u7531\u8BED\u8A00\u5B9A\u4E49\u7684\u7279\u6B8A\u8F93\u5165\u89C4\u5219\uFF0C\u5E76\u9075\u5FAA\u7531\u8BED\u8A00\u5B9A\u4E49\u7684\u7F29\u8FDB\u89C4\u5219\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5E94\u5728\u7528\u6237\u952E\u5165\u3001\u7C98\u8D34\u3001\u79FB\u52A8\u6216\u7F29\u8FDB\u884C\u65F6\u81EA\u52A8\u8C03\u6574\u7F29\u8FDB\u3002","\u4F7F\u7528\u8BED\u8A00\u914D\u7F6E\u786E\u5B9A\u4F55\u65F6\u81EA\u52A8\u5305\u4F4F\u6240\u9009\u5185\u5BB9\u3002","\u4F7F\u7528\u5F15\u53F7\u800C\u975E\u62EC\u53F7\u6765\u5305\u4F4F\u6240\u9009\u5185\u5BB9\u3002","\u4F7F\u7528\u62EC\u53F7\u800C\u975E\u5F15\u53F7\u6765\u5305\u4F4F\u6240\u9009\u5185\u5BB9\u3002","\u63A7\u5236\u5728\u952E\u5165\u5F15\u53F7\u6216\u65B9\u62EC\u53F7\u65F6\uFF0C\u7F16\u8F91\u5668\u662F\u5426\u5E94\u81EA\u52A8\u5C06\u6240\u9009\u5185\u5BB9\u62EC\u8D77\u6765\u3002","\u5728\u4F7F\u7528\u7A7A\u683C\u8FDB\u884C\u7F29\u8FDB\u65F6\u6A21\u62DF\u5236\u8868\u7B26\u7684\u9009\u62E9\u884C\u4E3A\u3002\u6240\u9009\u5185\u5BB9\u5C06\u59CB\u7EC8\u4F7F\u7528\u5236\u8868\u7B26\u505C\u6B62\u4F4D\u3002","\u63A7\u5236\u662F\u5426\u5728\u7F16\u8F91\u5668\u4E2D\u663E\u793A CodeLens\u3002","\u63A7\u5236 CodeLens \u7684\u5B57\u4F53\u7CFB\u5217\u3002","\u63A7\u5236 CodeLens \u7684\u5B57\u53F7(\u4EE5\u50CF\u7D20\u4E3A\u5355\u4F4D)\u3002\u8BBE\u7F6E\u4E3A `0` \u65F6\uFF0C\u5C06\u4F7F\u7528 90% \u7684 `#editor.fontSize#`\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u663E\u793A\u5185\u8054\u989C\u8272\u4FEE\u9970\u5668\u548C\u989C\u8272\u9009\u53D6\u5668\u3002","\u542F\u7528\u4F7F\u7528\u9F20\u6807\u548C\u952E\u8FDB\u884C\u5217\u9009\u62E9\u3002","\u63A7\u5236\u5728\u590D\u5236\u65F6\u662F\u5426\u540C\u65F6\u590D\u5236\u8BED\u6CD5\u9AD8\u4EAE\u3002","\u63A7\u5236\u5149\u6807\u7684\u52A8\u753B\u6837\u5F0F\u3002","\u63A7\u5236\u662F\u5426\u542F\u7528\u5E73\u6ED1\u63D2\u5165\u52A8\u753B\u3002","\u63A7\u5236\u5149\u6807\u6837\u5F0F\u3002",'\u63A7\u5236\u5149\u6807\u5468\u56F4\u53EF\u89C1\u7684\u524D\u7F6E\u884C\u548C\u5C3E\u968F\u884C\u7684\u6700\u5C0F\u6570\u76EE\u3002\u5728\u5176\u4ED6\u4E00\u4E9B\u7F16\u8F91\u5668\u4E2D\u79F0\u4E3A "scrollOff" \u6216 "scrollOffset"\u3002','\u4EC5\u5F53\u901A\u8FC7\u952E\u76D8\u6216 API \u89E6\u53D1\u65F6\uFF0C\u624D\u4F1A\u5F3A\u5236\u6267\u884C"\u5149\u6807\u73AF\u7ED5\u884C"\u3002','\u59CB\u7EC8\u5F3A\u5236\u6267\u884C "cursorSurroundingLines"','\u63A7\u5236\u4F55\u65F6\u5E94\u5F3A\u5236\u6267\u884C"\u5149\u6807\u73AF\u7ED5\u884C"\u3002',"\u5F53 `#editor.cursorStyle#` \u8BBE\u7F6E\u4E3A `line` \u65F6\uFF0C\u63A7\u5236\u5149\u6807\u7684\u5BBD\u5EA6\u3002","\u63A7\u5236\u5728\u7F16\u8F91\u5668\u4E2D\u662F\u5426\u5141\u8BB8\u901A\u8FC7\u62D6\u653E\u6765\u79FB\u52A8\u9009\u4E2D\u5185\u5BB9\u3002",'\u6309\u4E0B"Alt"\u65F6\u6EDA\u52A8\u901F\u5EA6\u500D\u589E\u3002',"\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u542F\u7528\u4E86\u4EE3\u7801\u6298\u53E0\u3002","\u4F7F\u7528\u7279\u5B9A\u4E8E\u8BED\u8A00\u7684\u6298\u53E0\u7B56\u7565(\u5982\u679C\u53EF\u7528)\uFF0C\u5426\u5219\u4F7F\u7528\u57FA\u4E8E\u7F29\u8FDB\u7684\u7B56\u7565\u3002","\u4F7F\u7528\u57FA\u4E8E\u7F29\u8FDB\u7684\u6298\u53E0\u7B56\u7565\u3002","\u63A7\u5236\u8BA1\u7B97\u6298\u53E0\u8303\u56F4\u7684\u7B56\u7565\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5E94\u7A81\u51FA\u663E\u793A\u6298\u53E0\u8303\u56F4\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u81EA\u52A8\u6298\u53E0\u5BFC\u5165\u8303\u56F4\u3002","\u53EF\u6298\u53E0\u533A\u57DF\u7684\u6700\u5927\u6570\u91CF\u3002\u5982\u679C\u5F53\u524D\u6E90\u5177\u6709\u5927\u91CF\u53EF\u6298\u53E0\u533A\u57DF\uFF0C\u90A3\u4E48\u589E\u52A0\u6B64\u503C\u53EF\u80FD\u4F1A\u5BFC\u81F4\u7F16\u8F91\u5668\u7684\u54CD\u5E94\u901F\u5EA6\u53D8\u6162\u3002","\u63A7\u5236\u5355\u51FB\u5DF2\u6298\u53E0\u7684\u884C\u540E\u9762\u7684\u7A7A\u5185\u5BB9\u662F\u5426\u4F1A\u5C55\u5F00\u8BE5\u884C\u3002","\u63A7\u5236\u5B57\u4F53\u7CFB\u5217\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u81EA\u52A8\u683C\u5F0F\u5316\u7C98\u8D34\u7684\u5185\u5BB9\u3002\u683C\u5F0F\u5316\u7A0B\u5E8F\u5FC5\u987B\u53EF\u7528\uFF0C\u5E76\u4E14\u80FD\u9488\u5BF9\u6587\u6863\u4E2D\u7684\u67D0\u4E00\u8303\u56F4\u8FDB\u884C\u683C\u5F0F\u5316\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u5728\u952E\u5165\u4E00\u884C\u540E\u662F\u5426\u81EA\u52A8\u683C\u5F0F\u5316\u8BE5\u884C\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5E94\u5448\u73B0\u5782\u76F4\u5B57\u5F62\u8FB9\u8DDD\u3002\u5B57\u5F62\u8FB9\u8DDD\u6700\u5E38\u7528\u4E8E\u8C03\u8BD5\u3002","\u63A7\u5236\u662F\u5426\u5728\u6982\u89C8\u6807\u5C3A\u4E2D\u9690\u85CF\u5149\u6807\u3002","\u63A7\u5236\u5B57\u6BCD\u95F4\u8DDD(\u50CF\u7D20)\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5DF2\u542F\u7528\u94FE\u63A5\u7F16\u8F91\u3002\u76F8\u5173\u7B26\u53F7(\u5982 HTML \u6807\u8BB0)\u5728\u7F16\u8F91\u65F6\u8FDB\u884C\u66F4\u65B0\uFF0C\u5177\u4F53\u7531\u8BED\u8A00\u800C\u5B9A\u3002","\u63A7\u5236\u662F\u5426\u5728\u7F16\u8F91\u5668\u4E2D\u68C0\u6D4B\u94FE\u63A5\u5E76\u4F7F\u5176\u53EF\u88AB\u70B9\u51FB\u3002","\u7A81\u51FA\u663E\u793A\u5339\u914D\u7684\u62EC\u53F7\u3002","\u5BF9\u9F20\u6807\u6EDA\u8F6E\u6EDA\u52A8\u4E8B\u4EF6\u7684 `deltaX` \u548C `deltaY` \u4E58\u4E0A\u7684\u7CFB\u6570\u3002","\u6309\u4F4F `Ctrl` \u952E\u5E76\u6EDA\u52A8\u9F20\u6807\u6EDA\u8F6E\u65F6\u5BF9\u7F16\u8F91\u5668\u5B57\u4F53\u5927\u5C0F\u8FDB\u884C\u7F29\u653E\u3002","\u5F53\u591A\u4E2A\u5149\u6807\u91CD\u53E0\u65F6\u8FDB\u884C\u5408\u5E76\u3002","\u6620\u5C04\u4E3A `Ctrl` (Windows \u548C Linux) \u6216 `Command` (macOS)\u3002","\u6620\u5C04\u4E3A `Alt` (Windows \u548C Linux) \u6216 `Option` (macOS)\u3002","\u5728\u901A\u8FC7\u9F20\u6807\u6DFB\u52A0\u591A\u4E2A\u5149\u6807\u65F6\u4F7F\u7528\u7684\u4FEE\u6539\u952E\u3002\u201C\u8F6C\u5230\u5B9A\u4E49\u201D\u548C\u201C\u6253\u5F00\u94FE\u63A5\u201D\u529F\u80FD\u6240\u9700\u7684\u9F20\u6807\u52A8\u4F5C\u5C06\u4F1A\u76F8\u5E94\u8C03\u6574\uFF0C\u4E0D\u4E0E\u591A\u5149\u6807\u4FEE\u6539\u952E\u51B2\u7A81\u3002[\u9605\u8BFB\u8BE6\u7EC6\u4FE1\u606F](https://code.visualstudio.com/docs/editor/codebasics#_multicursor-modifier)\u3002","\u6BCF\u4E2A\u5149\u6807\u7C98\u8D34\u4E00\u884C\u6587\u672C\u3002","\u6BCF\u4E2A\u5149\u6807\u7C98\u8D34\u5168\u6587\u3002","\u63A7\u5236\u7C98\u8D34\u65F6\u7C98\u8D34\u6587\u672C\u7684\u884C\u8BA1\u6570\u4E0E\u5149\u6807\u8BA1\u6570\u76F8\u5339\u914D\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u7A81\u51FA\u663E\u793A\u8BED\u4E49\u7B26\u53F7\u7684\u5339\u914D\u9879\u3002","\u63A7\u5236\u662F\u5426\u5728\u6982\u89C8\u6807\u5C3A\u5468\u56F4\u7ED8\u5236\u8FB9\u6846\u3002","\u6253\u5F00\u901F\u89C8\u65F6\u805A\u7126\u6811","\u6253\u5F00\u9884\u89C8\u65F6\u5C06\u7126\u70B9\u653E\u5728\u7F16\u8F91\u5668\u4E0A","\u63A7\u5236\u662F\u5C06\u7126\u70B9\u653E\u5728\u5185\u8054\u7F16\u8F91\u5668\u4E0A\u8FD8\u662F\u653E\u5728\u9884\u89C8\u5C0F\u90E8\u4EF6\u4E2D\u7684\u6811\u4E0A\u3002",'\u63A7\u5236"\u8F6C\u5230\u5B9A\u4E49"\u9F20\u6807\u624B\u52BF\u662F\u5426\u59CB\u7EC8\u6253\u5F00\u9884\u89C8\u5C0F\u90E8\u4EF6\u3002',"\u63A7\u5236\u663E\u793A\u5FEB\u901F\u5EFA\u8BAE\u524D\u7684\u7B49\u5F85\u65F6\u95F4 (\u6BEB\u79D2)\u3002","\u63A7\u5236\u662F\u5426\u5728\u7F16\u8F91\u5668\u4E2D\u8F93\u5165\u65F6\u81EA\u52A8\u91CD\u547D\u540D\u3002",'\u5DF2\u5F03\u7528\uFF0C\u8BF7\u6539\u7528 "editor.linkedEditing"\u3002',"\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u663E\u793A\u63A7\u5236\u5B57\u7B26\u3002","\u5F53\u6587\u4EF6\u4EE5\u6362\u884C\u7B26\u7ED3\u675F\u65F6, \u5448\u73B0\u6700\u540E\u4E00\u884C\u7684\u884C\u53F7\u3002","\u540C\u65F6\u7A81\u51FA\u663E\u793A\u5BFC\u822A\u7EBF\u548C\u5F53\u524D\u884C\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u7684\u5F53\u524D\u884C\u8FDB\u884C\u9AD8\u4EAE\u663E\u793A\u7684\u65B9\u5F0F\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u4EC5\u5728\u7126\u70B9\u5728\u7F16\u8F91\u5668\u65F6\u7A81\u51FA\u663E\u793A\u5F53\u524D\u884C\u3002","\u5448\u73B0\u7A7A\u683C\u5B57\u7B26(\u5B57\u8BCD\u4E4B\u95F4\u7684\u5355\u4E2A\u7A7A\u683C\u9664\u5916)\u3002","\u4EC5\u5728\u9009\u5B9A\u6587\u672C\u4E0A\u5448\u73B0\u7A7A\u767D\u5B57\u7B26\u3002","\u4EC5\u5448\u73B0\u5C3E\u968F\u7A7A\u683C\u5B57\u7B26\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u5728\u7A7A\u767D\u5B57\u7B26\u4E0A\u663E\u793A\u7B26\u53F7\u7684\u65B9\u5F0F\u3002","\u63A7\u5236\u9009\u533A\u662F\u5426\u6709\u5706\u89D2\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u6C34\u5E73\u6EDA\u52A8\u65F6\u53EF\u4EE5\u8D85\u8FC7\u8303\u56F4\u7684\u5B57\u7B26\u6570\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u53EF\u4EE5\u6EDA\u52A8\u5230\u6700\u540E\u4E00\u884C\u4E4B\u540E\u3002","\u540C\u65F6\u5782\u76F4\u548C\u6C34\u5E73\u6EDA\u52A8\u65F6\uFF0C\u4EC5\u6CBF\u4E3B\u8F74\u6EDA\u52A8\u3002\u5728\u89E6\u63A7\u677F\u4E0A\u5782\u76F4\u6EDA\u52A8\u65F6\uFF0C\u53EF\u9632\u6B62\u6C34\u5E73\u6F02\u79FB\u3002","\u63A7\u5236\u662F\u5426\u652F\u6301 Linux \u4E3B\u526A\u8D34\u677F\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u5E94\u7A81\u51FA\u663E\u793A\u4E0E\u6240\u9009\u5185\u5BB9\u7C7B\u4F3C\u7684\u5339\u914D\u9879\u3002","\u59CB\u7EC8\u663E\u793A\u6298\u53E0\u63A7\u4EF6\u3002","\u4EC5\u5728\u9F20\u6807\u4F4D\u4E8E\u88C5\u8BA2\u7EBF\u4E0A\u65B9\u65F6\u663E\u793A\u6298\u53E0\u63A7\u4EF6\u3002","\u63A7\u5236\u4F55\u65F6\u663E\u793A\u884C\u53F7\u69FD\u4E0A\u7684\u6298\u53E0\u63A7\u4EF6\u3002","\u63A7\u5236\u662F\u5426\u6DE1\u5316\u672A\u4F7F\u7528\u7684\u4EE3\u7801\u3002","\u63A7\u5236\u52A0\u5220\u9664\u7EBF\u88AB\u5F03\u7528\u7684\u53D8\u91CF\u3002","\u5728\u5176\u4ED6\u5EFA\u8BAE\u4E0A\u65B9\u663E\u793A\u4EE3\u7801\u7247\u6BB5\u5EFA\u8BAE\u3002","\u5728\u5176\u4ED6\u5EFA\u8BAE\u4E0B\u65B9\u663E\u793A\u4EE3\u7801\u7247\u6BB5\u5EFA\u8BAE\u3002","\u5728\u5176\u4ED6\u5EFA\u8BAE\u4E2D\u7A7F\u63D2\u663E\u793A\u4EE3\u7801\u7247\u6BB5\u5EFA\u8BAE\u3002","\u4E0D\u663E\u793A\u4EE3\u7801\u7247\u6BB5\u5EFA\u8BAE\u3002","\u63A7\u5236\u4EE3\u7801\u7247\u6BB5\u662F\u5426\u4E0E\u5176\u4ED6\u5EFA\u8BAE\u4E00\u8D77\u663E\u793A\u53CA\u5176\u6392\u5217\u7684\u4F4D\u7F6E\u3002","\u63A7\u5236\u7F16\u8F91\u5668\u662F\u5426\u4F7F\u7528\u52A8\u753B\u6EDA\u52A8\u3002","\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u7684\u5B57\u53F7\u3002\u5982\u679C\u8BBE\u7F6E\u4E3A `0`\uFF0C\u5219\u4F7F\u7528 `#editor.fontSize#` \u7684\u503C\u3002","\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u7684\u884C\u9AD8\u3002\u5982\u679C\u8BBE\u7F6E\u4E3A `0`\uFF0C\u5219\u4F7F\u7528 `#editor.lineHeight#` \u7684\u503C\u3002\u6700\u5C0F\u503C\u4E3A 8\u3002","\u63A7\u5236\u5728\u952E\u5165\u89E6\u53D1\u5B57\u7B26\u540E\u662F\u5426\u81EA\u52A8\u663E\u793A\u5EFA\u8BAE\u3002","\u59CB\u7EC8\u9009\u62E9\u7B2C\u4E00\u4E2A\u5EFA\u8BAE\u3002","\u9009\u62E9\u6700\u8FD1\u7684\u5EFA\u8BAE\uFF0C\u9664\u975E\u8FDB\u4E00\u6B65\u952E\u5165\u9009\u62E9\u5176\u4ED6\u9879\u3002\u4F8B\u5982 `console. -> console.log`\uFF0C\u56E0\u4E3A\u6700\u8FD1\u8865\u5168\u8FC7 `log`\u3002","\u6839\u636E\u4E4B\u524D\u8865\u5168\u8FC7\u7684\u5EFA\u8BAE\u7684\u524D\u7F00\u6765\u8FDB\u884C\u9009\u62E9\u3002\u4F8B\u5982\uFF0C`co -> console`\u3001`con -> const`\u3002","\u63A7\u5236\u5728\u5EFA\u8BAE\u5217\u8868\u4E2D\u5982\u4F55\u9884\u5148\u9009\u62E9\u5EFA\u8BAE\u3002","\u5728\u6309\u4E0B Tab \u952E\u65F6\u8FDB\u884C Tab \u8865\u5168\uFF0C\u5C06\u63D2\u5165\u6700\u4F73\u5339\u914D\u5EFA\u8BAE\u3002","\u7981\u7528 Tab \u8865\u5168\u3002",'\u5728\u524D\u7F00\u5339\u914D\u65F6\u8FDB\u884C Tab \u8865\u5168\u3002\u5728 "quickSuggestions" \u672A\u542F\u7528\u65F6\u4F53\u9A8C\u6700\u597D\u3002',"\u542F\u7528 Tab \u8865\u5168\u3002","\u81EA\u52A8\u5220\u9664\u5F02\u5E38\u7684\u884C\u7EC8\u6B62\u7B26\u3002","\u5FFD\u7565\u5F02\u5E38\u7684\u884C\u7EC8\u6B62\u7B26\u3002","\u63D0\u793A\u5220\u9664\u5F02\u5E38\u7684\u884C\u7EC8\u6B62\u7B26\u3002","\u5220\u9664\u53EF\u80FD\u5BFC\u81F4\u95EE\u9898\u7684\u5F02\u5E38\u884C\u7EC8\u6B62\u7B26\u3002","\u6839\u636E\u5236\u8868\u4F4D\u63D2\u5165\u548C\u5220\u9664\u7A7A\u683C\u3002","\u6267\u884C\u5355\u8BCD\u76F8\u5173\u7684\u5BFC\u822A\u6216\u64CD\u4F5C\u65F6\u4F5C\u4E3A\u5355\u8BCD\u5206\u9694\u7B26\u7684\u5B57\u7B26\u3002","\u6C38\u4E0D\u6362\u884C\u3002","\u5C06\u5728\u89C6\u533A\u5BBD\u5EA6\u5904\u6362\u884C\u3002","\u5728 `#editor.wordWrapColumn#` \u5904\u6298\u884C\u3002","\u5728\u89C6\u533A\u5BBD\u5EA6\u548C `#editor.wordWrapColumn#` \u4E2D\u7684\u8F83\u5C0F\u503C\u5904\u6298\u884C\u3002","\u63A7\u5236\u6298\u884C\u7684\u65B9\u5F0F\u3002","\u5728 `#editor.wordWrap#` \u4E3A `wordWrapColumn` \u6216 `bounded` \u65F6\uFF0C\u63A7\u5236\u7F16\u8F91\u5668\u7684\u6298\u884C\u5217\u3002","\u6CA1\u6709\u7F29\u8FDB\u3002\u6298\u884C\u4ECE\u7B2C 1 \u5217\u5F00\u59CB\u3002","\u6298\u884C\u7684\u7F29\u8FDB\u91CF\u4E0E\u5176\u7236\u7EA7\u76F8\u540C\u3002","\u6298\u884C\u7684\u7F29\u8FDB\u91CF\u6BD4\u5176\u7236\u7EA7\u591A 1\u3002","\u6298\u884C\u7684\u7F29\u8FDB\u91CF\u6BD4\u5176\u7236\u7EA7\u591A 2\u3002","\u63A7\u5236\u6298\u884C\u7684\u7F29\u8FDB\u3002","\u5047\u5B9A\u6240\u6709\u5B57\u7B26\u7684\u5BBD\u5EA6\u76F8\u540C\u3002\u8FD9\u662F\u4E00\u79CD\u5FEB\u901F\u7B97\u6CD5\uFF0C\u9002\u7528\u4E8E\u7B49\u5BBD\u5B57\u4F53\u548C\u67D0\u4E9B\u5B57\u5F62\u5BBD\u5EA6\u76F8\u7B49\u7684\u6587\u5B57(\u5982\u62C9\u4E01\u5B57\u7B26)\u3002","\u5C06\u5305\u88C5\u70B9\u8BA1\u7B97\u59D4\u6258\u7ED9\u6D4F\u89C8\u5668\u3002\u8FD9\u662F\u4E00\u4E2A\u7F13\u6162\u7B97\u6CD5\uFF0C\u53EF\u80FD\u4F1A\u5BFC\u81F4\u5927\u578B\u6587\u4EF6\u88AB\u51BB\u7ED3\uFF0C\u4F46\u5B83\u5728\u6240\u6709\u60C5\u51B5\u4E0B\u90FD\u6B63\u5E38\u5DE5\u4F5C\u3002","\u63A7\u5236\u8BA1\u7B97\u5305\u88F9\u70B9\u7684\u7B97\u6CD5\u3002"],"vs/editor/common/core/editorColorRegistry":["\u5149\u6807\u6240\u5728\u884C\u9AD8\u4EAE\u5185\u5BB9\u7684\u80CC\u666F\u989C\u8272\u3002","\u5149\u6807\u6240\u5728\u884C\u56DB\u5468\u8FB9\u6846\u7684\u80CC\u666F\u989C\u8272\u3002","\u80CC\u666F\u989C\u8272\u7684\u9AD8\u4EAE\u8303\u56F4\uFF0C\u559C\u6B22\u901A\u8FC7\u5FEB\u901F\u6253\u5F00\u548C\u67E5\u627E\u529F\u80FD\u3002\u989C\u8272\u4E0D\u80FD\u4E0D\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u5E95\u5C42\u88C5\u9970\u3002","\u9AD8\u4EAE\u533A\u57DF\u8FB9\u6846\u7684\u80CC\u666F\u989C\u8272\u3002","\u9AD8\u4EAE\u663E\u793A\u7B26\u53F7\u7684\u80CC\u666F\u989C\u8272\uFF0C\u4F8B\u5982\u8F6C\u5230\u5B9A\u4E49\u6216\u8F6C\u5230\u4E0B\u4E00\u4E2A/\u4E0A\u4E00\u4E2A\u7B26\u53F7\u3002\u989C\u8272\u4E0D\u80FD\u662F\u4E0D\u900F\u660E\u7684\uFF0C\u4EE5\u514D\u9690\u85CF\u5E95\u5C42\u88C5\u9970\u3002","\u9AD8\u4EAE\u663E\u793A\u7B26\u53F7\u5468\u56F4\u7684\u8FB9\u6846\u7684\u80CC\u666F\u989C\u8272\u3002","\u7F16\u8F91\u5668\u5149\u6807\u989C\u8272\u3002","\u7F16\u8F91\u5668\u5149\u6807\u7684\u80CC\u666F\u8272\u3002\u53EF\u4EE5\u81EA\u5B9A\u4E49\u5757\u578B\u5149\u6807\u8986\u76D6\u5B57\u7B26\u7684\u989C\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u7A7A\u767D\u5B57\u7B26\u7684\u989C\u8272\u3002","\u7F16\u8F91\u5668\u7F29\u8FDB\u53C2\u8003\u7EBF\u7684\u989C\u8272\u3002","\u7F16\u8F91\u5668\u6D3B\u52A8\u7F29\u8FDB\u53C2\u8003\u7EBF\u7684\u989C\u8272\u3002","\u7F16\u8F91\u5668\u884C\u53F7\u7684\u989C\u8272\u3002","\u7F16\u8F91\u5668\u6D3B\u52A8\u884C\u53F7\u7684\u989C\u8272",'"Id" \u5DF2\u88AB\u5F03\u7528\uFF0C\u8BF7\u6539\u7528 "editorLineNumber.activeForeground"\u3002',"\u7F16\u8F91\u5668\u6D3B\u52A8\u884C\u53F7\u7684\u989C\u8272","\u7F16\u8F91\u5668\u6807\u5C3A\u7684\u989C\u8272\u3002","\u7F16\u8F91\u5668 CodeLens \u7684\u524D\u666F\u8272","\u5339\u914D\u62EC\u53F7\u7684\u80CC\u666F\u8272","\u5339\u914D\u62EC\u53F7\u5916\u6846\u7684\u989C\u8272","\u6982\u89C8\u6807\u5C3A\u8FB9\u6846\u7684\u989C\u8272\u3002","\u7F16\u8F91\u5668\u6982\u8FF0\u6807\u5C3A\u7684\u80CC\u666F\u8272\u3002\u4EC5\u5F53\u7F29\u7565\u56FE\u5DF2\u542F\u7528\u4E14\u7F6E\u4E8E\u7F16\u8F91\u5668\u53F3\u4FA7\u65F6\u624D\u4F7F\u7528\u3002","\u7F16\u8F91\u5668\u5BFC\u822A\u7EBF\u7684\u80CC\u666F\u8272\u3002\u5BFC\u822A\u7EBF\u5305\u62EC\u8FB9\u7F18\u7B26\u53F7\u548C\u884C\u53F7\u3002","\u7F16\u8F91\u5668\u4E2D\u4E0D\u5FC5\u8981(\u672A\u4F7F\u7528)\u7684\u6E90\u4EE3\u7801\u7684\u8FB9\u6846\u989C\u8272\u3002",'\u975E\u5FC5\u987B(\u672A\u4F7F\u7528)\u4EE3\u7801\u7684\u5728\u7F16\u8F91\u5668\u4E2D\u663E\u793A\u7684\u4E0D\u900F\u660E\u5EA6\u3002\u4F8B\u5982\uFF0C"#000000c0" \u5C06\u4EE5 75% \u7684\u4E0D\u900F\u660E\u5EA6\u663E\u793A\u4EE3\u7801\u3002\u5BF9\u4E8E\u9AD8\u5BF9\u6BD4\u5EA6\u4E3B\u9898\uFF0C\u8BF7\u4F7F\u7528 \u201DeditorUnnecessaryCode.border\u201C \u4E3B\u9898\u6765\u4E3A\u975E\u5FC5\u987B\u4EE3\u7801\u6DFB\u52A0\u4E0B\u5212\u7EBF\uFF0C\u4EE5\u907F\u514D\u989C\u8272\u6DE1\u5316\u3002',"\u7F16\u8F91\u5668\u4E2D\u865A\u5F71\u6587\u672C\u7684\u8FB9\u6846\u989C\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u865A\u5F71\u6587\u672C\u7684\u524D\u666F\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u865A\u5F71\u6587\u672C\u7684\u80CC\u666F\u8272\u3002","\u7528\u4E8E\u7A81\u51FA\u663E\u793A\u8303\u56F4\u7684\u6982\u8FF0\u6807\u5C3A\u6807\u8BB0\u989C\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u6982\u89C8\u6807\u5C3A\u4E2D\u9519\u8BEF\u6807\u8BB0\u7684\u989C\u8272\u3002","\u6982\u89C8\u6807\u5C3A\u4E2D\u8B66\u544A\u6807\u8BB0\u7684\u989C\u8272\u3002","\u6982\u89C8\u6807\u5C3A\u4E2D\u4FE1\u606F\u6807\u8BB0\u7684\u989C\u8272\u3002","\u62EC\u53F7\u7684\u524D\u666F\u8272(1)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u7740\u8272\u3002","\u62EC\u53F7\u7684\u524D\u666F\u8272(2)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u7740\u8272\u3002","\u62EC\u53F7\u7684\u524D\u666F\u8272(3)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u7740\u8272\u3002","\u62EC\u53F7\u7684\u524D\u666F\u8272(4)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u7740\u8272\u3002","\u62EC\u53F7\u7684\u524D\u666F\u8272(5)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u7740\u8272\u3002","\u62EC\u53F7\u7684\u524D\u666F\u8272(6)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u7740\u8272\u3002","\u65B9\u62EC\u53F7\u51FA\u73B0\u610F\u5916\u7684\u524D\u666F\u8272\u3002","\u975E\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(1)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u975E\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(2)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u975E\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(3)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u975E\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(4)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u975E\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(5)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u975E\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(6)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(1)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(2)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(3)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(4)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(5)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u6D3B\u52A8\u62EC\u53F7\u5BF9\u6307\u5357\u7684\u80CC\u666F\u8272(6)\u3002\u9700\u8981\u542F\u7528\u62EC\u53F7\u5BF9\u6307\u5357\u3002","\u7528\u4E8E\u7A81\u51FA\u663E\u793A Unicode \u5B57\u7B26\u7684\u8FB9\u6846\u989C\u8272\u3002"],"vs/editor/common/editorContextKeys":["\u7F16\u8F91\u5668\u6587\u672C\u662F\u5426\u5177\u6709\u7126\u70B9(\u5149\u6807\u662F\u5426\u95EA\u70C1)","\u7F16\u8F91\u5668\u6216\u7F16\u8F91\u5668\u5C0F\u7EC4\u4EF6\u662F\u5426\u5177\u6709\u7126\u70B9(\u4F8B\u5982\u7126\u70B9\u5728\u201C\u67E5\u627E\u201D\u5C0F\u7EC4\u4EF6\u4E2D)","\u7F16\u8F91\u5668\u6216 RTF \u8F93\u5165\u662F\u5426\u6709\u7126\u70B9(\u5149\u6807\u662F\u5426\u95EA\u70C1)","\u7F16\u8F91\u5668\u662F\u5426\u4E3A\u53EA\u8BFB","\u4E0A\u4E0B\u6587\u662F\u5426\u4E3A\u5DEE\u5F02\u7F16\u8F91\u5668",'\u662F\u5426\u5DF2\u542F\u7528 "editor.columnSelection"',"\u7F16\u8F91\u5668\u662F\u5426\u5DF2\u9009\u5B9A\u6587\u672C","\u7F16\u8F91\u5668\u662F\u5426\u6709\u591A\u4E2A\u9009\u62E9",'"Tab" \u662F\u5426\u5C06\u7126\u70B9\u79FB\u51FA\u7F16\u8F91\u5668',"\u7F16\u8F91\u5668\u8F6F\u952E\u76D8\u662F\u5426\u53EF\u89C1","\u8BE5\u7F16\u8F91\u5668\u662F\u5426\u662F\u66F4\u5927\u7684\u7F16\u8F91\u5668(\u4F8B\u5982\u7B14\u8BB0\u672C)\u7684\u4E00\u90E8\u5206","\u7F16\u8F91\u5668\u7684\u8BED\u8A00\u6807\u8BC6\u7B26","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u8865\u5168\u9879\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u4EE3\u7801\u64CD\u4F5C\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709 CodeLens \u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u5B9A\u4E49\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u58F0\u660E\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u5B9E\u73B0\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u7C7B\u578B\u5B9A\u4E49\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u60AC\u505C\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u6587\u6863\u7A81\u51FA\u663E\u793A\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u6587\u6863\u7B26\u53F7\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u5F15\u7528\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u91CD\u547D\u540D\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u7B7E\u540D\u5E2E\u52A9\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u5185\u8054\u63D0\u793A\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u6587\u6863\u683C\u5F0F\u8BBE\u7F6E\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u6587\u6863\u9009\u62E9\u683C\u5F0F\u8BBE\u7F6E\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u5177\u6709\u591A\u4E2A\u6587\u6863\u683C\u5F0F\u8BBE\u7F6E\u63D0\u4F9B\u7A0B\u5E8F","\u7F16\u8F91\u5668\u662F\u5426\u6709\u591A\u4E2A\u6587\u6863\u9009\u62E9\u683C\u5F0F\u8BBE\u7F6E\u63D0\u4F9B\u7A0B\u5E8F"],"vs/editor/common/languages/modesRegistry":["\u7EAF\u6587\u672C"],"vs/editor/common/model/editStack":["\u8F93\u5165"],"vs/editor/common/standaloneStrings":["\u65E0\u9009\u62E9","\u884C {0}, \u5217 {1} (\u9009\u4E2D {2})","\u884C {0}, \u5217 {1}","{0} \u9009\u62E9(\u5DF2\u9009\u62E9 {1} \u4E2A\u5B57\u7B26)","{0} \u9009\u62E9",'\u73B0\u5728\u5C06 "\u8F85\u52A9\u529F\u80FD\u652F\u6301" \u8BBE\u7F6E\u66F4\u6539\u4E3A "\u6253\u5F00"\u3002',"\u73B0\u5728\u6B63\u5728\u6253\u5F00\u201C\u7F16\u8F91\u5668\u8F85\u52A9\u529F\u80FD\u201D\u6587\u6863\u9875\u3002","\u5728\u5DEE\u5F02\u7F16\u8F91\u5668\u7684\u53EA\u8BFB\u7A97\u683C\u4E2D\u3002","\u5728\u4E00\u4E2A\u5DEE\u5F02\u7F16\u8F91\u5668\u7684\u7A97\u683C\u4E2D\u3002","\u5728\u53EA\u8BFB\u4EE3\u7801\u7F16\u8F91\u5668\u4E2D","\u5728\u4EE3\u7801\u7F16\u8F91\u5668\u4E2D","\u82E5\u8981\u914D\u7F6E\u7F16\u8F91\u5668\uFF0C\u5C06\u5176\u8FDB\u884C\u4F18\u5316\u4EE5\u6700\u597D\u5730\u914D\u5408\u5C4F\u5E55\u9605\u8BFB\u5668\u7684\u4F7F\u7528\uFF0C\u8BF7\u7ACB\u5373\u6309 Command+E\u3002","\u82E5\u8981\u914D\u7F6E\u7F16\u8F91\u5668\uFF0C\u5C06\u5176\u8FDB\u884C\u4F18\u5316\u4EE5\u6700\u9AD8\u6548\u5730\u914D\u5408\u5C4F\u5E55\u9605\u8BFB\u5668\u7684\u4F7F\u7528\uFF0C\u6309\u4E0B Ctrl+E\u3002","\u914D\u7F6E\u7F16\u8F91\u5668\uFF0C\u5C06\u5176\u8FDB\u884C\u4F18\u5316\u4EE5\u6700\u597D\u5730\u914D\u5408\u5C4F\u5E55\u8BFB\u53D6\u5668\u7684\u4F7F\u7528\u3002","\u7F16\u8F91\u5668\u88AB\u914D\u7F6E\u4E3A\u6C38\u8FDC\u4E0D\u8FDB\u884C\u4F18\u5316\u4EE5\u914D\u5408\u5C4F\u5E55\u8BFB\u53D6\u5668\u7684\u4F7F\u7528, \u800C\u5F53\u524D\u4E0D\u662F\u8FD9\u79CD\u60C5\u51B5\u3002","\u5728\u5F53\u524D\u7F16\u8F91\u5668\u4E2D\u6309 Tab \u4F1A\u5C06\u7126\u70B9\u79FB\u52A8\u5230\u4E0B\u4E00\u4E2A\u53EF\u805A\u7126\u7684\u5143\u7D20\u3002\u901A\u8FC7\u6309 {0} \u5207\u6362\u6B64\u884C\u4E3A\u3002","\u5728\u5F53\u524D\u7F16\u8F91\u5668\u4E2D\u6309 Tab \u4F1A\u5C06\u7126\u70B9\u79FB\u52A8\u5230\u4E0B\u4E00\u4E2A\u53EF\u805A\u7126\u7684\u5143\u7D20\u3002\u5F53\u524D\u65E0\u6CD5\u901A\u8FC7\u6309\u952E\u7ED1\u5B9A\u89E6\u53D1\u547D\u4EE4 {0}\u3002","\u5728\u5F53\u524D\u7F16\u8F91\u5668\u4E2D\u6309 Tab \u5C06\u63D2\u5165\u5236\u8868\u7B26\u3002\u901A\u8FC7\u6309 {0} \u5207\u6362\u6B64\u884C\u4E3A\u3002","\u5728\u5F53\u524D\u7F16\u8F91\u5668\u4E2D\u6309 Tab \u4F1A\u63D2\u5165\u5236\u8868\u7B26\u3002\u5F53\u524D\u65E0\u6CD5\u901A\u8FC7\u952E\u7ED1\u5B9A\u89E6\u53D1\u547D\u4EE4 {0}\u3002","\u73B0\u5728\u6309 Command+H \u6253\u5F00\u4E00\u4E2A\u6D4F\u89C8\u5668\u7A97\u53E3, \u5176\u4E2D\u5305\u542B\u6709\u5173\u7F16\u8F91\u5668\u8F85\u52A9\u529F\u80FD\u7684\u8BE6\u7EC6\u4FE1\u606F\u3002","\u73B0\u5728\u6309 Ctrl+H \u6253\u5F00\u4E00\u4E2A\u6D4F\u89C8\u5668\u7A97\u53E3, \u5176\u4E2D\u5305\u542B\u6709\u5173\u7F16\u8F91\u5668\u8F85\u52A9\u529F\u80FD\u7684\u66F4\u591A\u4FE1\u606F\u3002","\u4F60\u53EF\u4EE5\u6309 Esc \u6216 Shift+Esc \u6D88\u9664\u6B64\u5DE5\u5177\u63D0\u793A\u5E76\u8FD4\u56DE\u5230\u7F16\u8F91\u5668\u3002","\u663E\u793A\u8F85\u52A9\u529F\u80FD\u5E2E\u52A9","\u5F00\u53D1\u4EBA\u5458: \u68C0\u67E5\u4EE4\u724C","\u8F6C\u5230\u884C/\u5217...","\u663E\u793A\u6240\u6709\u5FEB\u901F\u8BBF\u95EE\u63D0\u4F9B\u7A0B\u5E8F","\u547D\u4EE4\u9762\u677F","\u663E\u793A\u5E76\u8FD0\u884C\u547D\u4EE4","\u8F6C\u5230\u7B26\u53F7...","\u6309\u7C7B\u522B\u8F6C\u5230\u7B26\u53F7...","\u7F16\u8F91\u5668\u5185\u5BB9","\u6309 Alt+F1 \u53EF\u6253\u5F00\u8F85\u52A9\u529F\u80FD\u9009\u9879\u3002","\u5207\u6362\u9AD8\u5BF9\u6BD4\u5EA6\u4E3B\u9898","\u5728 {1} \u4E2A\u6587\u4EF6\u4E2D\u8FDB\u884C\u4E86 {0} \u6B21\u7F16\u8F91"],"vs/editor/contrib/anchorSelect/browser/anchorSelect":["\u9009\u62E9\u5B9A\u4F4D\u70B9","\u5B9A\u4F4D\u70B9\u8BBE\u7F6E\u4E3A {0}:{1}","\u8BBE\u7F6E\u9009\u62E9\u5B9A\u4F4D\u70B9","\u8F6C\u5230\u9009\u62E9\u5B9A\u4F4D\u70B9","\u9009\u62E9\u4ECE\u5B9A\u4F4D\u70B9\u5230\u5149\u6807","\u53D6\u6D88\u9009\u62E9\u5B9A\u4F4D\u70B9"],"vs/editor/contrib/bracketMatching/browser/bracketMatching":["\u6982\u89C8\u6807\u5C3A\u4E0A\u8868\u793A\u5339\u914D\u62EC\u53F7\u7684\u6807\u8BB0\u989C\u8272\u3002","\u8F6C\u5230\u62EC\u53F7","\u9009\u62E9\u62EC\u53F7\u6240\u6709\u5185\u5BB9","\u8F6C\u5230\u62EC\u53F7(&&B)"],"vs/editor/contrib/caretOperations/browser/caretOperations":["\u5411\u5DE6\u79FB\u52A8\u6240\u9009\u6587\u672C","\u5411\u53F3\u79FB\u52A8\u6240\u9009\u6587\u672C"],"vs/editor/contrib/caretOperations/browser/transpose":["\u8F6C\u7F6E\u5B57\u6BCD"],"vs/editor/contrib/clipboard/browser/clipboard":["\u526A\u5207(&&T)","\u526A\u5207","\u526A\u5207","\u526A\u5207","\u590D\u5236(&&C)","\u590D\u5236","\u590D\u5236","\u590D\u5236","\u590D\u5236\u4E3A","\u590D\u5236\u4E3A","\u7C98\u8D34(&&P)","\u7C98\u8D34","\u7C98\u8D34","\u7C98\u8D34","\u590D\u5236\u5E76\u7A81\u51FA\u663E\u793A\u8BED\u6CD5"],"vs/editor/contrib/codeAction/browser/codeActionCommands":["\u8981\u8FD0\u884C\u7684\u4EE3\u7801\u64CD\u4F5C\u7684\u79CD\u7C7B\u3002","\u63A7\u5236\u4F55\u65F6\u5E94\u7528\u8FD4\u56DE\u7684\u64CD\u4F5C\u3002","\u59CB\u7EC8\u5E94\u7528\u7B2C\u4E00\u4E2A\u8FD4\u56DE\u7684\u4EE3\u7801\u64CD\u4F5C\u3002","\u5982\u679C\u4EC5\u8FD4\u56DE\u7684\u7B2C\u4E00\u4E2A\u4EE3\u7801\u64CD\u4F5C\uFF0C\u5219\u5E94\u7528\u8BE5\u64CD\u4F5C\u3002","\u4E0D\u8981\u5E94\u7528\u8FD4\u56DE\u7684\u4EE3\u7801\u64CD\u4F5C\u3002","\u5982\u679C\u53EA\u5E94\u8FD4\u56DE\u9996\u9009\u4EE3\u7801\u64CD\u4F5C\uFF0C\u5219\u5E94\u8FD4\u56DE\u63A7\u4EF6\u3002","\u5E94\u7528\u4EE3\u7801\u64CD\u4F5C\u65F6\u53D1\u751F\u672A\u77E5\u9519\u8BEF","\u5FEB\u901F\u4FEE\u590D...","\u6CA1\u6709\u53EF\u7528\u7684\u4EE3\u7801\u64CD\u4F5C",'\u6CA1\u6709\u9002\u7528\u4E8E"{0}"\u7684\u9996\u9009\u4EE3\u7801\u64CD\u4F5C','\u6CA1\u6709\u9002\u7528\u4E8E"{0}"\u7684\u4EE3\u7801\u64CD\u4F5C',"\u6CA1\u6709\u53EF\u7528\u7684\u9996\u9009\u4EE3\u7801\u64CD\u4F5C","\u6CA1\u6709\u53EF\u7528\u7684\u4EE3\u7801\u64CD\u4F5C","\u91CD\u6784...",'\u6CA1\u6709\u9002\u7528\u4E8E"{0}"\u7684\u9996\u9009\u91CD\u6784','\u6CA1\u6709\u53EF\u7528\u7684"{0}"\u91CD\u6784',"\u6CA1\u6709\u53EF\u7528\u7684\u9996\u9009\u91CD\u6784","\u6CA1\u6709\u53EF\u7528\u7684\u91CD\u6784\u64CD\u4F5C","\u6E90\u4EE3\u7801\u64CD\u4F5C...",'\u6CA1\u6709\u9002\u7528\u4E8E"{0}"\u7684\u9996\u9009\u6E90\u64CD\u4F5C',"\u6CA1\u6709\u9002\u7528\u4E8E\u201C {0}\u201D\u7684\u6E90\u64CD\u4F5C","\u6CA1\u6709\u53EF\u7528\u7684\u9996\u9009\u6E90\u64CD\u4F5C","\u6CA1\u6709\u53EF\u7528\u7684\u6E90\u4EE3\u7801\u64CD\u4F5C","\u6574\u7406 import \u8BED\u53E5","\u6CA1\u6709\u53EF\u7528\u7684\u6574\u7406 import \u8BED\u53E5\u64CD\u4F5C","\u5168\u90E8\u4FEE\u590D","\u6CA1\u6709\u53EF\u7528\u7684\u201C\u5168\u90E8\u4FEE\u590D\u201D\u64CD\u4F5C","\u81EA\u52A8\u4FEE\u590D...","\u6CA1\u6709\u53EF\u7528\u7684\u81EA\u52A8\u4FEE\u590D\u7A0B\u5E8F"],"vs/editor/contrib/codeAction/browser/lightBulbWidget":["\u663E\u793A\u4EE3\u7801\u64CD\u4F5C\u3002\u9996\u9009\u53EF\u7528\u7684\u5FEB\u901F\u4FEE\u590D({0})","\u663E\u793A\u4EE3\u7801\u64CD\u4F5C({0})","\u663E\u793A\u4EE3\u7801\u64CD\u4F5C"],"vs/editor/contrib/codelens/browser/codelensController":["\u663E\u793A\u5F53\u524D\u884C\u7684 Code Lens \u547D\u4EE4"],"vs/editor/contrib/colorPicker/browser/colorPickerWidget":["\u5355\u51FB\u4EE5\u5207\u6362\u989C\u8272\u9009\u9879 (rgb/hsl/hex)"],"vs/editor/contrib/comment/browser/comment":["\u5207\u6362\u884C\u6CE8\u91CA","\u5207\u6362\u884C\u6CE8\u91CA(&&T)","\u6DFB\u52A0\u884C\u6CE8\u91CA","\u5220\u9664\u884C\u6CE8\u91CA","\u5207\u6362\u5757\u6CE8\u91CA","\u5207\u6362\u5757\u6CE8\u91CA(&&B)"],"vs/editor/contrib/contextmenu/browser/contextmenu":["\u663E\u793A\u7F16\u8F91\u5668\u4E0A\u4E0B\u6587\u83DC\u5355"],"vs/editor/contrib/cursorUndo/browser/cursorUndo":["\u5149\u6807\u64A4\u6D88","\u5149\u6807\u91CD\u505A"],"vs/editor/contrib/editorState/browser/keybindingCancellation":["\u7F16\u8F91\u5668\u662F\u5426\u8FD0\u884C\u53EF\u53D6\u6D88\u7684\u64CD\u4F5C\uFF0C\u4F8B\u5982\u201C\u9884\u89C8\u5F15\u7528\u201D"],"vs/editor/contrib/find/browser/findController":["\u67E5\u627E","\u67E5\u627E(&&F)",`\u91CD\u5199\u201C\u4F7F\u7528\u6B63\u5219\u8868\u8FBE\u5F0F\u201D\u6807\u8BB0\u3002\r
\u5C06\u4E0D\u4F1A\u4FDD\u7559\u8BE5\u6807\u8BB0\u4F9B\u5C06\u6765\u4F7F\u7528\u3002\r
0: \u4E0D\u6267\u884C\u4EFB\u4F55\u64CD\u4F5C\r
1: True\r
2: False`,`\u91CD\u5199\u201C\u5339\u914D\u6574\u4E2A\u5B57\u8BCD\u201D\u6807\u8BB0\u3002\r
\u5C06\u4E0D\u4F1A\u4FDD\u7559\u8BE5\u6807\u8BB0\u4F9B\u5C06\u6765\u4F7F\u7528\u3002\r
0: \u4E0D\u6267\u884C\u4EFB\u4F55\u64CD\u4F5C\r
1: True\r
2: False`,`\u91CD\u5199\u201C\u6570\u5B66\u6848\u4F8B\u201D\u6807\u8BB0\u3002\r
\u5C06\u4E0D\u4F1A\u4FDD\u7559\u8BE5\u6807\u8BB0\u4F9B\u5C06\u6765\u4F7F\u7528\u3002\r
0: \u4E0D\u6267\u884C\u4EFB\u4F55\u64CD\u4F5C\r
1: True\r
2: False`,`\u91CD\u5199\u201C\u4FDD\u7559\u670D\u52A1\u6848\u4F8B\u201D\u6807\u8BB0\u3002\r
\u5C06\u4E0D\u4F1A\u4FDD\u7559\u8BE5\u6807\u8BB0\u4F9B\u5C06\u6765\u4F7F\u7528\u3002\r
0: \u4E0D\u6267\u884C\u4EFB\u4F55\u64CD\u4F5C\r
1: True\r
2: False`,"\u4F7F\u7528\u53C2\u6570\u67E5\u627E","\u67E5\u627E\u9009\u5B9A\u5185\u5BB9","\u67E5\u627E\u4E0B\u4E00\u4E2A","\u67E5\u627E\u4E0A\u4E00\u4E2A","\u67E5\u627E\u4E0B\u4E00\u4E2A\u9009\u62E9","\u67E5\u627E\u4E0A\u4E00\u4E2A\u9009\u62E9","\u66FF\u6362","\u66FF\u6362(&&R)"],"vs/editor/contrib/find/browser/findWidget":["\u7F16\u8F91\u5668\u67E5\u627E\u5C0F\u7EC4\u4EF6\u4E2D\u7684\u201C\u5728\u9009\u5B9A\u5185\u5BB9\u4E2D\u67E5\u627E\u201D\u56FE\u6807\u3002","\u7528\u4E8E\u6307\u793A\u7F16\u8F91\u5668\u67E5\u627E\u5C0F\u7EC4\u4EF6\u5DF2\u6298\u53E0\u7684\u56FE\u6807\u3002","\u7528\u4E8E\u6307\u793A\u7F16\u8F91\u5668\u67E5\u627E\u5C0F\u7EC4\u4EF6\u5DF2\u5C55\u5F00\u7684\u56FE\u6807\u3002","\u7F16\u8F91\u5668\u67E5\u627E\u5C0F\u7EC4\u4EF6\u4E2D\u7684\u201C\u66FF\u6362\u201D\u56FE\u6807\u3002","\u7F16\u8F91\u5668\u67E5\u627E\u5C0F\u7EC4\u4EF6\u4E2D\u7684\u201C\u5168\u90E8\u66FF\u6362\u201D\u56FE\u6807\u3002","\u7F16\u8F91\u5668\u67E5\u627E\u5C0F\u7EC4\u4EF6\u4E2D\u7684\u201C\u67E5\u627E\u4E0A\u4E00\u4E2A\u201D\u56FE\u6807\u3002","\u7F16\u8F91\u5668\u67E5\u627E\u5C0F\u7EC4\u4EF6\u4E2D\u7684\u201C\u67E5\u627E\u4E0B\u4E00\u4E2A\u201D\u56FE\u6807\u3002","\u67E5\u627E","\u67E5\u627E","\u4E0A\u4E00\u4E2A\u5339\u914D\u9879","\u4E0B\u4E00\u4E2A\u5339\u914D\u9879","\u5728\u9009\u5B9A\u5185\u5BB9\u4E2D\u67E5\u627E","\u5173\u95ED","\u66FF\u6362","\u66FF\u6362","\u66FF\u6362","\u5168\u90E8\u66FF\u6362","\u5207\u6362\u66FF\u6362","\u4EC5\u9AD8\u4EAE\u4E86\u524D {0} \u4E2A\u7ED3\u679C\uFF0C\u4F46\u6240\u6709\u67E5\u627E\u64CD\u4F5C\u5747\u9488\u5BF9\u5168\u6587\u3002","{1} \u4E2D\u7684 {0}","\u65E0\u7ED3\u679C","\u627E\u5230 {0}","\u4E3A\u201C{1}\u201D\u627E\u5230 {0}","\u5728 {2} \u5904\u627E\u5230\u201C{1}\u201D\u7684 {0}","\u4E3A\u201C{1}\u201D\u627E\u5230 {0}","Ctrl+Enter \u73B0\u5728\u7531\u5168\u90E8\u66FF\u6362\u6539\u4E3A\u63D2\u5165\u6362\u884C\u3002\u4F60\u53EF\u4EE5\u4FEE\u6539editor.action.replaceAll \u7684\u6309\u952E\u7ED1\u5B9A\u4EE5\u8986\u76D6\u6B64\u884C\u4E3A\u3002"],"vs/editor/contrib/folding/browser/folding":['\u53EF\u6298\u53E0\u533A\u57DF\u7684\u6570\u91CF\u9650\u5236\u4E3A\u6700\u591A {0} \u4E2A\u3002\u589E\u52A0\u914D\u7F6E\u9009\u9879[\u201C\u6700\u5927\u6298\u53E0\u533A\u57DF\u6570\u201D](command:workbench.action.openSettings?["editor.foldingMaximumRegions"])\u4EE5\u542F\u7528\u66F4\u591A\u529F\u80FD\u3002',"\u5C55\u5F00","\u4EE5\u9012\u5F52\u65B9\u5F0F\u5C55\u5F00","\u6298\u53E0","\u5207\u6362\u6298\u53E0","\u4EE5\u9012\u5F52\u65B9\u5F0F\u6298\u53E0","\u6298\u53E0\u6240\u6709\u5757\u6CE8\u91CA","\u6298\u53E0\u6240\u6709\u533A\u57DF","\u5C55\u5F00\u6240\u6709\u533A\u57DF","\u6298\u53E0\u9664\u6240\u9009\u533A\u57DF\u4E4B\u5916\u7684\u6240\u6709\u533A\u57DF","\u5C55\u5F00\u9664\u6240\u9009\u533A\u57DF\u4E4B\u5916\u7684\u6240\u6709\u533A\u57DF","\u5168\u90E8\u6298\u53E0","\u5168\u90E8\u5C55\u5F00","\u8DF3\u8F6C\u5230\u7236\u7EA7\u6298\u53E0","\u8F6C\u5230\u4E0A\u4E00\u4E2A\u6298\u53E0\u8303\u56F4","\u8F6C\u5230\u4E0B\u4E00\u4E2A\u6298\u53E0\u8303\u56F4","\u6298\u53E0\u7EA7\u522B {0}","\u6298\u53E0\u8303\u56F4\u540E\u9762\u7684\u80CC\u666F\u989C\u8272\u3002\u989C\u8272\u5FC5\u987B\u8BBE\u4E3A\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u5E95\u5C42\u88C5\u9970\u3002","\u7F16\u8F91\u5668\u88C5\u8BA2\u7EBF\u4E2D\u6298\u53E0\u63A7\u4EF6\u7684\u989C\u8272\u3002"],"vs/editor/contrib/folding/browser/foldingDecorations":["\u7F16\u8F91\u5668\u5B57\u5F62\u8FB9\u8DDD\u4E2D\u5DF2\u5C55\u5F00\u7684\u8303\u56F4\u7684\u56FE\u6807\u3002","\u7F16\u8F91\u5668\u5B57\u5F62\u8FB9\u8DDD\u4E2D\u5DF2\u6298\u53E0\u7684\u8303\u56F4\u7684\u56FE\u6807\u3002"],"vs/editor/contrib/fontZoom/browser/fontZoom":["\u653E\u5927\u7F16\u8F91\u5668\u5B57\u4F53","\u7F29\u5C0F\u7F16\u8F91\u5668\u5B57\u4F53","\u91CD\u7F6E\u7F16\u8F91\u5668\u5B57\u4F53\u5927\u5C0F"],"vs/editor/contrib/format/browser/format":["\u5728\u7B2C {0} \u884C\u8FDB\u884C\u4E86 1 \u6B21\u683C\u5F0F\u7F16\u8F91","\u5728\u7B2C {1} \u884C\u8FDB\u884C\u4E86 {0} \u6B21\u683C\u5F0F\u7F16\u8F91","\u7B2C {0} \u884C\u5230\u7B2C {1} \u884C\u95F4\u8FDB\u884C\u4E86 1 \u6B21\u683C\u5F0F\u7F16\u8F91","\u7B2C {1} \u884C\u5230\u7B2C {2} \u884C\u95F4\u8FDB\u884C\u4E86 {0} \u6B21\u683C\u5F0F\u7F16\u8F91"],"vs/editor/contrib/format/browser/formatActions":["\u683C\u5F0F\u5316\u6587\u6863","\u683C\u5F0F\u5316\u9009\u5B9A\u5185\u5BB9"],"vs/editor/contrib/gotoError/browser/gotoError":["\u8F6C\u5230\u4E0B\u4E00\u4E2A\u95EE\u9898 (\u9519\u8BEF\u3001\u8B66\u544A\u3001\u4FE1\u606F)","\u201C\u8F6C\u5230\u4E0B\u4E00\u4E2A\u201D\u6807\u8BB0\u7684\u56FE\u6807\u3002","\u8F6C\u5230\u4E0A\u4E00\u4E2A\u95EE\u9898 (\u9519\u8BEF\u3001\u8B66\u544A\u3001\u4FE1\u606F)","\u201C\u8F6C\u5230\u4E0A\u4E00\u4E2A\u201D\u6807\u8BB0\u7684\u56FE\u6807\u3002","\u8F6C\u5230\u6587\u4EF6\u4E2D\u7684\u4E0B\u4E00\u4E2A\u95EE\u9898 (\u9519\u8BEF\u3001\u8B66\u544A\u3001\u4FE1\u606F)","\u4E0B\u4E00\u4E2A\u95EE\u9898(&&P)","\u8F6C\u5230\u6587\u4EF6\u4E2D\u7684\u4E0A\u4E00\u4E2A\u95EE\u9898 (\u9519\u8BEF\u3001\u8B66\u544A\u3001\u4FE1\u606F)","\u4E0A\u4E00\u4E2A\u95EE\u9898(&&P)"],"vs/editor/contrib/gotoError/browser/gotoErrorWidget":["\u9519\u8BEF","\u8B66\u544A","\u4FE1\u606F","\u63D0\u793A","{1} \u4E2D\u7684 {0}","{0} \u4E2A\u95EE\u9898(\u5171 {1} \u4E2A)","{0} \u4E2A\u95EE\u9898(\u5171 {1} \u4E2A)","\u7F16\u8F91\u5668\u6807\u8BB0\u5BFC\u822A\u5C0F\u7EC4\u4EF6\u9519\u8BEF\u989C\u8272\u3002","\u7F16\u8F91\u5668\u6807\u8BB0\u5BFC\u822A\u5C0F\u7EC4\u4EF6\u9519\u8BEF\u6807\u9898\u80CC\u666F\u8272\u3002","\u7F16\u8F91\u5668\u6807\u8BB0\u5BFC\u822A\u5C0F\u7EC4\u4EF6\u8B66\u544A\u989C\u8272\u3002","\u7F16\u8F91\u5668\u6807\u8BB0\u5BFC\u822A\u5C0F\u7EC4\u4EF6\u8B66\u544A\u6807\u9898\u80CC\u666F\u8272\u3002","\u7F16\u8F91\u5668\u6807\u8BB0\u5BFC\u822A\u5C0F\u7EC4\u4EF6\u4FE1\u606F\u989C\u8272\u3002","\u7F16\u8F91\u5668\u6807\u8BB0\u5BFC\u822A\u5C0F\u7EC4\u4EF6\u4FE1\u606F\u6807\u9898\u80CC\u666F\u8272\u3002","\u7F16\u8F91\u5668\u6807\u8BB0\u5BFC\u822A\u5C0F\u7EC4\u4EF6\u80CC\u666F\u8272\u3002"],"vs/editor/contrib/gotoSymbol/browser/goToCommands":["\u5FEB\u901F\u67E5\u770B","\u5B9A\u4E49","\u672A\u627E\u5230\u201C{0}\u201D\u7684\u4EFB\u4F55\u5B9A\u4E49","\u627E\u4E0D\u5230\u5B9A\u4E49","\u8F6C\u5230\u5B9A\u4E49","\u6253\u5F00\u4FA7\u8FB9\u7684\u5B9A\u4E49","\u901F\u89C8\u5B9A\u4E49","\u58F0\u660E","\u672A\u627E\u5230\u201C{0}\u201D\u7684\u58F0\u660E","\u672A\u627E\u5230\u58F0\u660E","\u8F6C\u5230\u58F0\u660E","\u672A\u627E\u5230\u201C{0}\u201D\u7684\u58F0\u660E","\u672A\u627E\u5230\u58F0\u660E","\u67E5\u770B\u58F0\u660E","\u7C7B\u578B\u5B9A\u4E49","\u672A\u627E\u5230\u201C{0}\u201D\u7684\u7C7B\u578B\u5B9A\u4E49","\u672A\u627E\u5230\u7C7B\u578B\u5B9A\u4E49","\u8F6C\u5230\u7C7B\u578B\u5B9A\u4E49","\u5FEB\u901F\u67E5\u770B\u7C7B\u578B\u5B9A\u4E49","\u5B9E\u73B0","\u672A\u627E\u5230\u201C{0}\u201D\u7684\u5B9E\u73B0","\u672A\u627E\u5230\u5B9E\u73B0","\u8F6C\u5230\u5B9E\u73B0","\u67E5\u770B\u5B9E\u73B0",'\u672A\u627E\u5230"{0}"\u7684\u5F15\u7528',"\u672A\u627E\u5230\u5F15\u7528","\u8F6C\u5230\u5F15\u7528","\u5F15\u7528","\u67E5\u770B\u5F15\u7528","\u5F15\u7528","\u8F6C\u5230\u4EFB\u4F55\u7B26\u53F7","\u4F4D\u7F6E","\u65E0\u201C{0}\u201D\u7684\u7ED3\u679C","\u5F15\u7528","\u8F6C\u5230\u5B9A\u4E49(&&D)","\u8F6C\u5230\u58F0\u660E(&&D)","\u8F6C\u5230\u7C7B\u578B\u5B9A\u4E49(&&T)","\u8F6C\u5230\u5B9E\u73B0(&&I)","\u8F6C\u5230\u5F15\u7528(&&R)"],"vs/editor/contrib/gotoSymbol/browser/link/goToDefinitionAtPosition":["\u5355\u51FB\u663E\u793A {0} \u4E2A\u5B9A\u4E49\u3002"],"vs/editor/contrib/gotoSymbol/browser/peek/referencesController":["\u5F15\u7528\u901F\u89C8\u662F\u5426\u53EF\u89C1\uFF0C\u4F8B\u5982\u201C\u901F\u89C8\u5F15\u7528\u201D\u6216\u201C\u901F\u89C8\u5B9A\u4E49\u201D","\u6B63\u5728\u52A0\u8F7D...","{0} ({1})"],"vs/editor/contrib/gotoSymbol/browser/peek/referencesTree":["{0} \u4E2A\u5F15\u7528","{0} \u4E2A\u5F15\u7528","\u5F15\u7528"],"vs/editor/contrib/gotoSymbol/browser/peek/referencesWidget":["\u65E0\u53EF\u7528\u9884\u89C8","\u65E0\u7ED3\u679C","\u5F15\u7528"],"vs/editor/contrib/gotoSymbol/browser/referencesModel":["\u5728\u6587\u4EF6 {0} \u7684 {1} \u884C {2} \u5217\u7684\u7B26\u53F7","{0} \u4E2D {1} \u884C {2} \u5217\u7684\u7B26\u53F7\uFF0C{3}","{0} \u4E2D\u6709 1 \u4E2A\u7B26\u53F7\uFF0C\u5B8C\u6574\u8DEF\u5F84: {1}","{1} \u4E2D\u6709 {0} \u4E2A\u7B26\u53F7\uFF0C\u5B8C\u6574\u8DEF\u5F84: {2}","\u672A\u627E\u5230\u7ED3\u679C","\u5728 {0} \u4E2D\u627E\u5230 1 \u4E2A\u7B26\u53F7","\u5728 {1} \u4E2D\u627E\u5230 {0} \u4E2A\u7B26\u53F7","\u5728 {1} \u4E2A\u6587\u4EF6\u4E2D\u627E\u5230 {0} \u4E2A\u7B26\u53F7"],"vs/editor/contrib/gotoSymbol/browser/symbolNavigation":["\u662F\u5426\u5B58\u5728\u53EA\u80FD\u901A\u8FC7\u952E\u76D8\u5BFC\u822A\u7684\u7B26\u53F7\u4F4D\u7F6E\u3002","{1} \u7684\u7B26\u53F7 {0}\uFF0C\u4E0B\u4E00\u4E2A\u4F7F\u7528 {2}","{1} \u7684\u7B26\u53F7 {0}"],"vs/editor/contrib/hover/browser/hover":["\u663E\u793A\u60AC\u505C","\u663E\u793A\u5B9A\u4E49\u9884\u89C8\u60AC\u505C"],"vs/editor/contrib/hover/browser/markdownHoverParticipant":["\u6B63\u5728\u52A0\u8F7D...","\u51FA\u4E8E\u6027\u80FD\u539F\u56E0\uFF0C\u672A\u5BF9\u957F\u884C\u8FDB\u884C\u89E3\u6790\u3002\u89E3\u6790\u957F\u5EA6\u9608\u503C\u53EF\u901A\u8FC7\u201Ceditor.maxTokenizationLineLength\u201D\u8FDB\u884C\u914D\u7F6E\u3002"],"vs/editor/contrib/hover/browser/markerHoverParticipant":["\u67E5\u770B\u95EE\u9898","\u6CA1\u6709\u53EF\u7528\u7684\u5FEB\u901F\u4FEE\u590D","\u6B63\u5728\u68C0\u67E5\u5FEB\u901F\u4FEE\u590D...","\u6CA1\u6709\u53EF\u7528\u7684\u5FEB\u901F\u4FEE\u590D","\u5FEB\u901F\u4FEE\u590D..."],"vs/editor/contrib/inPlaceReplace/browser/inPlaceReplace":["\u66FF\u6362\u4E3A\u4E0A\u4E00\u4E2A\u503C","\u66FF\u6362\u4E3A\u4E0B\u4E00\u4E2A\u503C"],"vs/editor/contrib/indentation/browser/indentation":["\u5C06\u7F29\u8FDB\u8F6C\u6362\u4E3A\u7A7A\u683C","\u5C06\u7F29\u8FDB\u8F6C\u6362\u4E3A\u5236\u8868\u7B26","\u5DF2\u914D\u7F6E\u5236\u8868\u7B26\u5927\u5C0F","\u9009\u62E9\u5F53\u524D\u6587\u4EF6\u7684\u5236\u8868\u7B26\u5927\u5C0F",'\u4F7F\u7528 "Tab" \u7F29\u8FDB',"\u4F7F\u7528\u7A7A\u683C\u7F29\u8FDB","\u4ECE\u5185\u5BB9\u4E2D\u68C0\u6D4B\u7F29\u8FDB\u65B9\u5F0F","\u91CD\u65B0\u7F29\u8FDB\u884C","\u91CD\u65B0\u7F29\u8FDB\u6240\u9009\u884C"],"vs/editor/contrib/inlayHints/browser/inlayHintsHover":["cmd + \u70B9\u51FB","ctrl + \u70B9\u51FB","option + \u70B9\u51FB","alt + \u70B9\u51FB","\u8F6C\u5230\u5B9A\u4E49 ({0})\uFF0C\u70B9\u51FB\u53F3\u952E\u4EE5\u67E5\u770B\u8BE6\u7EC6\u4FE1\u606F","\u8F6C\u5230\u5B9A\u4E49\uFF08{0}\uFF09","\u6267\u884C\u547D\u4EE4"],"vs/editor/contrib/inlineCompletions/browser/ghostTextController":["\u5185\u8054\u5EFA\u8BAE\u662F\u5426\u53EF\u89C1","\u5185\u8054\u5EFA\u8BAE\u662F\u5426\u4EE5\u7A7A\u767D\u5F00\u5934","\u5185\u8054\u5EFA\u8BAE\u662F\u5426\u4EE5\u5C0F\u4E8E\u9009\u9879\u5361\u63D2\u5165\u5185\u5BB9\u7684\u7A7A\u683C\u5F00\u5934","\u663E\u793A\u4E0B\u4E00\u4E2A\u5185\u8054\u5EFA\u8BAE","\u663E\u793A\u4E0A\u4E00\u4E2A\u5185\u8054\u5EFA\u8BAE","\u89E6\u53D1\u5185\u8054\u5EFA\u8BAE"],"vs/editor/contrib/inlineCompletions/browser/inlineCompletionsHoverParticipant":["\u4E0B\u4E00\u4E2A","\u4E0A\u4E00\u4E2A","\u63A5\u53D7","\u5EFA\u8BAE:"],"vs/editor/contrib/lineSelection/browser/lineSelection":["\u5C55\u5F00\u884C\u9009\u62E9"],"vs/editor/contrib/linesOperations/browser/linesOperations":["\u5411\u4E0A\u590D\u5236\u884C","\u5411\u4E0A\u590D\u5236\u4E00\u884C(&&C)","\u5411\u4E0B\u590D\u5236\u884C","\u5411\u4E0B\u590D\u5236\u4E00\u884C(&&P)","\u91CD\u590D\u9009\u62E9","\u91CD\u590D\u9009\u62E9(&&D)","\u5411\u4E0A\u79FB\u52A8\u884C","\u5411\u4E0A\u79FB\u52A8\u4E00\u884C(&&V)","\u5411\u4E0B\u79FB\u52A8\u884C","\u5411\u4E0B\u79FB\u52A8\u4E00\u884C(&&L)","\u6309\u5347\u5E8F\u6392\u5217\u884C","\u6309\u964D\u5E8F\u6392\u5217\u884C","\u5220\u9664\u91CD\u590D\u884C","\u88C1\u526A\u5C3E\u968F\u7A7A\u683C","\u5220\u9664\u884C","\u884C\u7F29\u8FDB","\u884C\u51CF\u5C11\u7F29\u8FDB","\u5728\u4E0A\u9762\u63D2\u5165\u884C","\u5728\u4E0B\u9762\u63D2\u5165\u884C","\u5220\u9664\u5DE6\u4FA7\u6240\u6709\u5185\u5BB9","\u5220\u9664\u53F3\u4FA7\u6240\u6709\u5185\u5BB9","\u5408\u5E76\u884C","\u8F6C\u7F6E\u5149\u6807\u5904\u7684\u5B57\u7B26","\u8F6C\u6362\u4E3A\u5927\u5199","\u8F6C\u6362\u4E3A\u5C0F\u5199","\u8F6C\u6362\u4E3A\u8BCD\u9996\u5B57\u6BCD\u5927\u5199","\u8F6C\u6362\u4E3A\u86C7\u5F62\u547D\u540D\u6CD5"],"vs/editor/contrib/linkedEditing/browser/linkedEditing":["\u542F\u52A8\u94FE\u63A5\u7F16\u8F91","\u7F16\u8F91\u5668\u6839\u636E\u7C7B\u578B\u81EA\u52A8\u91CD\u547D\u540D\u65F6\u7684\u80CC\u666F\u8272\u3002"],"vs/editor/contrib/links/browser/links":["\u6B64\u94FE\u63A5\u683C\u5F0F\u4E0D\u6B63\u786E\uFF0C\u65E0\u6CD5\u6253\u5F00: {0}","\u6B64\u94FE\u63A5\u76EE\u6807\u5DF2\u4E22\u5931\uFF0C\u65E0\u6CD5\u6253\u5F00\u3002","\u6267\u884C\u547D\u4EE4","\u6253\u5F00\u94FE\u63A5","cmd + \u5355\u51FB","ctrl + \u5355\u51FB","option + \u5355\u51FB","alt + \u5355\u51FB","\u6267\u884C\u547D\u4EE4 {0}","\u6253\u5F00\u94FE\u63A5"],"vs/editor/contrib/message/browser/messageController":["\u7F16\u8F91\u5668\u5F53\u524D\u662F\u5426\u6B63\u5728\u663E\u793A\u5185\u8054\u6D88\u606F","\u65E0\u6CD5\u5728\u53EA\u8BFB\u7F16\u8F91\u5668\u4E2D\u7F16\u8F91"],"vs/editor/contrib/multicursor/browser/multicursor":["\u6DFB\u52A0\u7684\u5149\u6807: {0}","\u6DFB\u52A0\u7684\u6E38\u6807: {0}","\u5728\u4E0A\u9762\u6DFB\u52A0\u5149\u6807","\u5728\u4E0A\u9762\u6DFB\u52A0\u5149\u6807(&&A)","\u5728\u4E0B\u9762\u6DFB\u52A0\u5149\u6807","\u5728\u4E0B\u9762\u6DFB\u52A0\u5149\u6807(&&D)","\u5728\u884C\u5C3E\u6DFB\u52A0\u5149\u6807","\u5728\u884C\u5C3E\u6DFB\u52A0\u5149\u6807(&&U)","\u5728\u5E95\u90E8\u6DFB\u52A0\u5149\u6807","\u5728\u9876\u90E8\u6DFB\u52A0\u5149\u6807","\u5C06\u4E0B\u4E00\u4E2A\u67E5\u627E\u5339\u914D\u9879\u6DFB\u52A0\u5230\u9009\u62E9","\u6DFB\u52A0\u4E0B\u4E00\u4E2A\u5339\u914D\u9879(&&N)","\u5C06\u9009\u62E9\u5185\u5BB9\u6DFB\u52A0\u5230\u4E0A\u4E00\u67E5\u627E\u5339\u914D\u9879","\u6DFB\u52A0\u4E0A\u4E00\u4E2A\u5339\u914D\u9879(&&R)","\u5C06\u4E0A\u6B21\u9009\u62E9\u79FB\u52A8\u5230\u4E0B\u4E00\u4E2A\u67E5\u627E\u5339\u914D\u9879","\u5C06\u4E0A\u4E2A\u9009\u62E9\u5185\u5BB9\u79FB\u52A8\u5230\u4E0A\u4E00\u67E5\u627E\u5339\u914D\u9879","\u9009\u62E9\u6240\u6709\u627E\u5230\u7684\u67E5\u627E\u5339\u914D\u9879","\u9009\u62E9\u6240\u6709\u5339\u914D\u9879(&&O)","\u66F4\u6539\u6240\u6709\u5339\u914D\u9879"],"vs/editor/contrib/parameterHints/browser/parameterHints":["\u89E6\u53D1\u53C2\u6570\u63D0\u793A"],"vs/editor/contrib/parameterHints/browser/parameterHintsWidget":["\u201C\u663E\u793A\u4E0B\u4E00\u4E2A\u53C2\u6570\u201D\u63D0\u793A\u7684\u56FE\u6807\u3002","\u201C\u663E\u793A\u4E0A\u4E00\u4E2A\u53C2\u6570\u201D\u63D0\u793A\u7684\u56FE\u6807\u3002","{0}\uFF0C\u63D0\u793A","\u53C2\u6570\u63D0\u793A\u4E2D\u6D3B\u52A8\u9879\u7684\u524D\u666F\u8272\u3002"],"vs/editor/contrib/peekView/browser/peekView":["\u901F\u89C8\u4E2D\u662F\u5426\u5D4C\u5165\u4E86\u5F53\u524D\u4EE3\u7801\u7F16\u8F91\u5668","\u5173\u95ED","\u901F\u89C8\u89C6\u56FE\u6807\u9898\u533A\u57DF\u80CC\u666F\u989C\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u6807\u9898\u989C\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u6807\u9898\u4FE1\u606F\u989C\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u8FB9\u6846\u548C\u7BAD\u5934\u989C\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u7ED3\u679C\u5217\u8868\u80CC\u666F\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u7ED3\u679C\u5217\u8868\u4E2D\u884C\u8282\u70B9\u7684\u524D\u666F\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u7ED3\u679C\u5217\u8868\u4E2D\u6587\u4EF6\u8282\u70B9\u7684\u524D\u666F\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u7ED3\u679C\u5217\u8868\u4E2D\u6240\u9009\u6761\u76EE\u7684\u80CC\u666F\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u7ED3\u679C\u5217\u8868\u4E2D\u6240\u9009\u6761\u76EE\u7684\u524D\u666F\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u7F16\u8F91\u5668\u80CC\u666F\u8272\u3002","\u901F\u89C8\u89C6\u56FE\u7F16\u8F91\u5668\u4E2D\u88C5\u8BA2\u7EBF\u7684\u80CC\u666F\u8272\u3002","\u5728\u901F\u89C8\u89C6\u56FE\u7ED3\u679C\u5217\u8868\u4E2D\u5339\u914D\u7A81\u51FA\u663E\u793A\u989C\u8272\u3002","\u5728\u901F\u89C8\u89C6\u56FE\u7F16\u8F91\u5668\u4E2D\u5339\u914D\u7A81\u51FA\u663E\u793A\u989C\u8272\u3002","\u5728\u901F\u89C8\u89C6\u56FE\u7F16\u8F91\u5668\u4E2D\u5339\u914D\u9879\u7684\u7A81\u51FA\u663E\u793A\u8FB9\u6846\u3002"],"vs/editor/contrib/quickAccess/browser/gotoLineQuickAccess":["\u5148\u6253\u5F00\u6587\u672C\u7F16\u8F91\u5668\u7136\u540E\u8DF3\u8F6C\u5230\u884C\u3002","\u8F6C\u5230\u7B2C {0} \u884C\u7B2C {1} \u4E2A\u5B57\u7B26\u3002","\u8F6C\u5230\u884C {0}\u3002","\u5F53\u524D\u884C: {0}\uFF0C\u5B57\u7B26: {1}\u3002\u952E\u5165\u8981\u5BFC\u822A\u5230\u7684\u884C\u53F7(\u4ECB\u4E8E 1 \u81F3 {2} \u4E4B\u95F4)\u3002","\u5F53\u524D\u884C: {0}\uFF0C\u5B57\u7B26: {1}\u3002 \u952E\u5165\u8981\u5BFC\u822A\u5230\u7684\u884C\u53F7\u3002"],"vs/editor/contrib/quickAccess/browser/gotoSymbolQuickAccess":["\u8981\u8F6C\u5230\u7B26\u53F7\uFF0C\u9996\u5148\u6253\u5F00\u5177\u6709\u7B26\u53F7\u4FE1\u606F\u7684\u6587\u672C\u7F16\u8F91\u5668\u3002","\u6D3B\u52A8\u6587\u672C\u7F16\u8F91\u5668\u4E0D\u63D0\u4F9B\u7B26\u53F7\u4FE1\u606F\u3002","\u6CA1\u6709\u5339\u914D\u7684\u7F16\u8F91\u5668\u7B26\u53F7","\u6CA1\u6709\u7F16\u8F91\u5668\u7B26\u53F7","\u5728\u4FA7\u8FB9\u6253\u5F00","\u5728\u5E95\u90E8\u6253\u5F00","\u7B26\u53F7({0})","\u5C5E\u6027({0})","\u65B9\u6CD5({0})","\u51FD\u6570({0})","\u6784\u9020\u51FD\u6570 ({0})","\u53D8\u91CF({0})","\u7C7B({0})","\u7ED3\u6784({0})","\u4E8B\u4EF6({0})","\u8FD0\u7B97\u7B26({0})","\u63A5\u53E3({0})","\u547D\u540D\u7A7A\u95F4({0})","\u5305({0})","\u7C7B\u578B\u53C2\u6570({0})","\u6A21\u5757({0})","\u5C5E\u6027({0})","\u679A\u4E3E({0})","\u679A\u4E3E\u6210\u5458({0})","\u5B57\u7B26\u4E32({0})","\u6587\u4EF6({0})","\u6570\u7EC4({0})","\u6570\u5B57({0})","\u5E03\u5C14\u503C({0})","\u5BF9\u8C61({0})","\u952E({0})","\u5B57\u6BB5({0})","\u5E38\u91CF({0})"],"vs/editor/contrib/rename/browser/rename":["\u65E0\u7ED3\u679C\u3002","\u89E3\u6790\u91CD\u547D\u540D\u4F4D\u7F6E\u65F6\u53D1\u751F\u672A\u77E5\u9519\u8BEF","\u6B63\u5728\u91CD\u547D\u540D\u201C{0}\u201D","\u91CD\u547D\u540D {0}","\u6210\u529F\u5C06\u201C{0}\u201D\u91CD\u547D\u540D\u4E3A\u201C{1}\u201D\u3002\u6458\u8981: {2}","\u91CD\u547D\u540D\u65E0\u6CD5\u5E94\u7528\u4FEE\u6539","\u91CD\u547D\u540D\u65E0\u6CD5\u8BA1\u7B97\u4FEE\u6539","\u91CD\u547D\u540D\u7B26\u53F7","\u542F\u7528/\u7981\u7528\u91CD\u547D\u540D\u4E4B\u524D\u9884\u89C8\u66F4\u6539\u7684\u529F\u80FD"],"vs/editor/contrib/rename/browser/renameInputField":["\u91CD\u547D\u540D\u8F93\u5165\u5C0F\u7EC4\u4EF6\u662F\u5426\u53EF\u89C1",'\u91CD\u547D\u540D\u8F93\u5165\u3002\u952E\u5165\u65B0\u540D\u79F0\u5E76\u6309 "Enter" \u63D0\u4EA4\u3002',"\u6309 {0} \u8FDB\u884C\u91CD\u547D\u540D\uFF0C\u6309 {1} \u8FDB\u884C\u9884\u89C8"],"vs/editor/contrib/smartSelect/browser/smartSelect":["\u5C55\u5F00\u9009\u62E9","\u6269\u5927\u9009\u533A(&&E)","\u6536\u8D77\u9009\u62E9","\u7F29\u5C0F\u9009\u533A(&&S)"],"vs/editor/contrib/snippet/browser/snippetController2":["\u7F16\u8F91\u5668\u76EE\u524D\u662F\u5426\u5728\u4EE3\u7801\u7247\u6BB5\u6A21\u5F0F\u4E0B","\u5728\u4EE3\u7801\u7247\u6BB5\u6A21\u5F0F\u4E0B\u65F6\u662F\u5426\u5B58\u5728\u4E0B\u4E00\u5236\u8868\u4F4D","\u5728\u4EE3\u7801\u7247\u6BB5\u6A21\u5F0F\u4E0B\u65F6\u662F\u5426\u5B58\u5728\u4E0A\u4E00\u5236\u8868\u4F4D"],"vs/editor/contrib/snippet/browser/snippetVariables":["\u661F\u671F\u5929","\u661F\u671F\u4E00","\u661F\u671F\u4E8C","\u661F\u671F\u4E09","\u661F\u671F\u56DB","\u661F\u671F\u4E94","\u661F\u671F\u516D","\u5468\u65E5","\u5468\u4E00","\u5468\u4E8C","\u5468\u4E09","\u5468\u56DB","\u5468\u4E94","\u5468\u516D","\u4E00\u6708","\u4E8C\u6708","\u4E09\u6708","\u56DB\u6708","5\u6708","\u516D\u6708","\u4E03\u6708","\u516B\u6708","\u4E5D\u6708","\u5341\u6708","\u5341\u4E00\u6708","\u5341\u4E8C\u6708","1\u6708","2\u6708","3\u6708","4\u6708","5\u6708","6\u6708","7\u6708","8\u6708","9\u6708","10\u6708","11 \u6708","12\u6708"],"vs/editor/contrib/suggest/browser/suggest":["\u5EFA\u8BAE\u8BE6\u7EC6\u4FE1\u606F\u662F\u5426\u53EF\u89C1","\u662F\u5426\u5B58\u5728\u591A\u6761\u5EFA\u8BAE\u53EF\u4F9B\u9009\u62E9","\u63D2\u5165\u5F53\u524D\u5EFA\u8BAE\u662F\u5426\u4F1A\u5BFC\u81F4\u66F4\u6539\u6216\u5BFC\u81F4\u5DF2\u952E\u5165\u6240\u6709\u5185\u5BB9","\u6309 Enter \u65F6\u662F\u5426\u4F1A\u63D2\u5165\u5EFA\u8BAE","\u5F53\u524D\u5EFA\u8BAE\u662F\u5426\u5177\u6709\u63D2\u5165\u548C\u66FF\u6362\u884C\u4E3A","\u9ED8\u8BA4\u884C\u4E3A\u662F\u5426\u662F\u63D2\u5165\u6216\u66FF\u6362","\u5F53\u524D\u5EFA\u8BAE\u662F\u5426\u652F\u6301\u89E3\u6790\u66F4\u591A\u8BE6\u7EC6\u4FE1\u606F"],"vs/editor/contrib/suggest/browser/suggestController":["\u9009\u62E9\u201C{0}\u201D\u540E\u8FDB\u884C\u4E86\u5176\u4ED6 {1} \u6B21\u7F16\u8F91","\u89E6\u53D1\u5EFA\u8BAE","\u63D2\u5165","\u63D2\u5165","\u66FF\u6362","\u66FF\u6362","\u63D2\u5165","\u663E\u793A\u66F4\u5C11","\u663E\u793A\u66F4\u591A","\u91CD\u7F6E\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u5927\u5C0F"],"vs/editor/contrib/suggest/browser/suggestWidget":["\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u7684\u80CC\u666F\u8272\u3002","\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u7684\u8FB9\u6846\u989C\u8272\u3002","\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u7684\u524D\u666F\u8272\u3002","\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u4E2D\u6240\u9009\u6761\u76EE\u7684\u524D\u666F\u8272\u3002","\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u4E2D\u6240\u9009\u6761\u76EE\u7684\u56FE\u6807\u524D\u666F\u8272\u3002","\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u4E2D\u6240\u9009\u6761\u76EE\u7684\u80CC\u666F\u8272\u3002","\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u4E2D\u5339\u914D\u5185\u5BB9\u7684\u9AD8\u4EAE\u989C\u8272\u3002","\u5F53\u67D0\u9879\u83B7\u5F97\u7126\u70B9\u65F6\uFF0C\u5728\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u4E2D\u7A81\u51FA\u663E\u793A\u7684\u5339\u914D\u9879\u7684\u989C\u8272\u3002","\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u72B6\u6001\u7684\u524D\u666F\u8272\u3002","\u6B63\u5728\u52A0\u8F7D...","\u65E0\u5EFA\u8BAE\u3002","\u5EFA\u8BAE","{0}{1}\uFF0C{2}","{0}{1}","{0}\uFF0C{1}","{0}\uFF0C\u6587\u6863: {1}"],"vs/editor/contrib/suggest/browser/suggestWidgetDetails":["\u5173\u95ED","\u6B63\u5728\u52A0\u8F7D\u2026"],"vs/editor/contrib/suggest/browser/suggestWidgetRenderer":["\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u4E2D\u7684\u8BE6\u7EC6\u4FE1\u606F\u7684\u56FE\u6807\u3002","\u4E86\u89E3\u8BE6\u7EC6\u4FE1\u606F"],"vs/editor/contrib/suggest/browser/suggestWidgetStatus":["{0} ({1})"],"vs/editor/contrib/symbolIcons/browser/symbolIcons":["\u6570\u7EC4\u7B26\u53F7\u7684\u524D\u666F\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u5C06\u663E\u793A\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u4E2D\u3002","\u5E03\u5C14\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u7C7B\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u989C\u8272\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u5E38\u91CF\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u6784\u9020\u51FD\u6570\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u679A\u4E3E\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u679A\u4E3E\u5668\u6210\u5458\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u4E8B\u4EF6\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u5B57\u6BB5\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u6587\u4EF6\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u6587\u4EF6\u5939\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u51FD\u6570\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u63A5\u53E3\u7B26\u53F7\u7684\u524D\u666F\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u5C06\u663E\u793A\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u4E2D\u3002","\u952E\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u5173\u952E\u5B57\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u65B9\u6CD5\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u6A21\u5757\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u547D\u540D\u7A7A\u95F4\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u8F6E\u5ED3\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u7A7A\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u6570\u5B57\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u5BF9\u8C61\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u8FD0\u7B97\u7B26\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u5305\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u5C5E\u6027\u7B26\u53F7\u7684\u524D\u666F\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u7EC4\u4EF6\u4E2D\u3002","\u53C2\u8003\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u7247\u6BB5\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u5B57\u7B26\u4E32\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u8F6E\u5ED3\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u7ED3\u6784\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u6587\u672C\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u7C7B\u578B\u53C2\u6570\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u5355\u4F4D\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002","\u53D8\u91CF\u7B26\u53F7\u7684\u524D\u666F\u989C\u8272\u3002\u8FD9\u4E9B\u7B26\u53F7\u51FA\u73B0\u5728\u5927\u7EB2\u3001\u75D5\u8FF9\u5BFC\u822A\u680F\u548C\u5EFA\u8BAE\u5C0F\u90E8\u4EF6\u4E2D\u3002"],"vs/editor/contrib/toggleTabFocusMode/browser/toggleTabFocusMode":["\u5207\u6362 Tab \u952E\u79FB\u52A8\u7126\u70B9","Tab \u952E\u5C06\u79FB\u52A8\u5230\u4E0B\u4E00\u53EF\u805A\u7126\u7684\u5143\u7D20","Tab \u952E\u5C06\u63D2\u5165\u5236\u8868\u7B26"],"vs/editor/contrib/tokenization/browser/tokenization":["\u5F00\u53D1\u4EBA\u5458: \u5F3A\u5236\u91CD\u65B0\u8FDB\u884C\u6807\u8BB0"],"vs/editor/contrib/unicodeHighlighter/browser/unicodeHighlighter":["\u6269\u5C55\u7F16\u8F91\u5668\u4E2D\u968F\u8B66\u544A\u6D88\u606F\u4E00\u540C\u663E\u793A\u7684\u56FE\u6807\u3002","\u672C\u6587\u6863\u5305\u542B\u8BB8\u591A\u975E\u57FA\u672C ASCII unicode \u5B57\u7B26","\u672C\u6587\u6863\u5305\u542B\u8BB8\u591A\u4E0D\u660E\u786E\u7684 unicode \u5B57\u7B26","\u672C\u6587\u6863\u5305\u542B\u8BB8\u591A\u4E0D\u53EF\u89C1\u7684 unicode \u5B57\u7B26","\u5B57\u7B26 {0} \u53EF\u80FD\u4F1A\u4E0E\u5B57\u7B26 {1} \u6DF7\u6DC6\uFF0C\u540E\u8005\u5728\u6E90\u4EE3\u7801\u4E2D\u66F4\u4E3A\u5E38\u89C1\u3002","\u5B57\u7B26 {0} \u4E0D\u53EF\u89C1\u3002","\u5B57\u7B26 {0} \u4E0D\u662F\u57FA\u672C ASCII \u5B57\u7B26\u3002","\u8C03\u6574\u8BBE\u7F6E","\u7981\u7528\u6279\u6CE8\u4E2D\u7684\u7A81\u51FA\u663E\u793A","\u7981\u7528\u6279\u6CE8\u4E2D\u5B57\u7B26\u7684\u7A81\u51FA\u663E\u793A","\u7981\u7528\u5B57\u7B26\u4E32\u4E2D\u7684\u7A81\u51FA\u663E\u793A","\u7981\u7528\u5B57\u7B26\u4E32\u4E2D\u5B57\u7B26\u7684\u7A81\u51FA\u663E\u793A","\u7981\u7528\u4E0D\u660E\u786E\u7684\u7A81\u51FA\u663E\u793A","\u7981\u6B62\u7A81\u51FA\u663E\u793A\u6B67\u4E49\u5B57\u7B26","\u7981\u7528\u4E0D\u53EF\u89C1\u7A81\u51FA\u663E\u793A","\u7981\u6B62\u7A81\u51FA\u663E\u793A\u4E0D\u53EF\u89C1\u5B57\u7B26","\u7981\u7528\u975E ASCII \u7A81\u51FA\u663E\u793A","\u7981\u6B62\u7A81\u51FA\u663E\u793A\u975E\u57FA\u672C ASCII \u5B57\u7B26","\u663E\u793A\u6392\u9664\u9009\u9879","\u4E0D\u7A81\u51FA\u663E\u793A {0} (\u4E0D\u53EF\u89C1\u5B57\u7B26)","\u5728\u7A81\u51FA\u663E\u793A\u5185\u5BB9\u4E2D\u6392\u9664{0}","\u5141\u8BB8\u8BED\u8A00\u201C{0}\u201D\u4E2D\u66F4\u5E38\u89C1\u7684 unicode \u5B57\u7B26\u3002","\u914D\u7F6E Unicode \u7A81\u51FA\u663E\u793A\u9009\u9879"],"vs/editor/contrib/unusualLineTerminators/browser/unusualLineTerminators":["\u5F02\u5E38\u884C\u7EC8\u6B62\u7B26","\u68C0\u6D4B\u5230\u5F02\u5E38\u884C\u7EC8\u6B62\u7B26",`\u6587\u4EF6\u201C{0}\u201D\u5305\u542B\u4E00\u4E2A\u6216\u591A\u4E2A\u5F02\u5E38\u7684\u884C\u7EC8\u6B62\u7B26\uFF0C\u4F8B\u5982\u884C\u5206\u9694\u7B26(LS)\u6216\u6BB5\u843D\u5206\u9694\u7B26(PS)\u3002\r
\r
\u5EFA\u8BAE\u4ECE\u6587\u4EF6\u4E2D\u5220\u9664\u5B83\u4EEC\u3002\u53EF\u901A\u8FC7\u201Ceditor.unusualLineTerminators\u201D\u8FDB\u884C\u914D\u7F6E\u3002`,"\u5220\u9664\u5F02\u5E38\u884C\u7EC8\u6B62\u7B26","\u5FFD\u7565"],"vs/editor/contrib/wordHighlighter/browser/wordHighlighter":["\u8BFB\u53D6\u8BBF\u95EE\u671F\u95F4\u7B26\u53F7\u7684\u80CC\u666F\u8272\uFF0C\u4F8B\u5982\u8BFB\u53D6\u53D8\u91CF\u65F6\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u5199\u5165\u8BBF\u95EE\u8FC7\u7A0B\u4E2D\u7B26\u53F7\u7684\u80CC\u666F\u8272\uFF0C\u4F8B\u5982\u5199\u5165\u53D8\u91CF\u65F6\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u7B26\u53F7\u5728\u8FDB\u884C\u8BFB\u53D6\u8BBF\u95EE\u64CD\u4F5C\u65F6\u7684\u8FB9\u6846\u989C\u8272\uFF0C\u4F8B\u5982\u8BFB\u53D6\u53D8\u91CF\u3002","\u7B26\u53F7\u5728\u8FDB\u884C\u5199\u5165\u8BBF\u95EE\u64CD\u4F5C\u65F6\u7684\u8FB9\u6846\u989C\u8272\uFF0C\u4F8B\u5982\u5199\u5165\u53D8\u91CF\u3002","\u7528\u4E8E\u7A81\u51FA\u663E\u793A\u7B26\u53F7\u7684\u6982\u8FF0\u6807\u5C3A\u6807\u8BB0\u989C\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u7528\u4E8E\u7A81\u51FA\u663E\u793A\u5199\u6743\u9650\u7B26\u53F7\u7684\u6982\u8FF0\u6807\u5C3A\u6807\u8BB0\u989C\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u8F6C\u5230\u4E0B\u4E00\u4E2A\u7A81\u51FA\u663E\u793A\u7684\u7B26\u53F7","\u8F6C\u5230\u4E0A\u4E00\u4E2A\u7A81\u51FA\u663E\u793A\u7684\u7B26\u53F7","\u89E6\u53D1\u7B26\u53F7\u9AD8\u4EAE"],"vs/editor/contrib/wordOperations/browser/wordOperations":["\u5220\u9664 Word"],"vs/platform/actions/browser/menuEntryActionViewItem":["{0} ({1})","{0} ({1})"],"vs/platform/configuration/common/configurationRegistry":["\u9ED8\u8BA4\u8BED\u8A00\u914D\u7F6E\u66FF\u4EE3","\u914D\u7F6E\u8981\u4E3A {0} \u8BED\u8A00\u66FF\u4EE3\u7684\u8BBE\u7F6E\u3002","\u9488\u5BF9\u67D0\u79CD\u8BED\u8A00\uFF0C\u914D\u7F6E\u66FF\u4EE3\u7F16\u8F91\u5668\u8BBE\u7F6E\u3002","\u6B64\u8BBE\u7F6E\u4E0D\u652F\u6301\u6309\u8BED\u8A00\u914D\u7F6E\u3002","\u9488\u5BF9\u67D0\u79CD\u8BED\u8A00\uFF0C\u914D\u7F6E\u66FF\u4EE3\u7F16\u8F91\u5668\u8BBE\u7F6E\u3002","\u6B64\u8BBE\u7F6E\u4E0D\u652F\u6301\u6309\u8BED\u8A00\u914D\u7F6E\u3002","\u65E0\u6CD5\u6CE8\u518C\u7A7A\u5C5E\u6027",'\u65E0\u6CD5\u6CE8\u518C\u201C{0}\u201D\u3002\u5176\u7B26\u5408\u63CF\u8FF0\u7279\u5B9A\u8BED\u8A00\u7F16\u8F91\u5668\u8BBE\u7F6E\u7684\u8868\u8FBE\u5F0F "\\\\[.*\\\\]$"\u3002\u8BF7\u4F7F\u7528 "configurationDefaults"\u3002',"\u65E0\u6CD5\u6CE8\u518C\u201C{0}\u201D\u3002\u6B64\u5C5E\u6027\u5DF2\u6CE8\u518C\u3002"],"vs/platform/contextkey/browser/contextKeyService":["\u7528\u4E8E\u8FD4\u56DE\u4E0A\u4E0B\u6587\u952E\u7684\u76F8\u5173\u4FE1\u606F\u7684\u547D\u4EE4"],"vs/platform/contextkey/common/contextkeys":["\u64CD\u4F5C\u7CFB\u7EDF\u662F\u5426 macOS","\u64CD\u4F5C\u7CFB\u7EDF\u662F\u5426\u4E3A Linux","\u64CD\u4F5C\u7CFB\u7EDF\u662F\u5426\u4E3A Windows","\u5E73\u53F0\u662F\u5426\u4E3A Web \u6D4F\u89C8\u5668","\u64CD\u4F5C\u7CFB\u7EDF\u662F\u5426\u662F\u975E\u6D4F\u89C8\u5668\u5E73\u53F0\u4E0A\u7684 macOS","\u64CD\u4F5C\u7CFB\u7EDF\u662F\u5426\u4E3A iOS","\u952E\u76D8\u7126\u70B9\u662F\u5426\u5728\u8F93\u5165\u6846\u4E2D"],"vs/platform/history/browser/contextScopedHistoryWidget":["\u5EFA\u8BAE\u662F\u5426\u53EF\u89C1"],"vs/platform/keybinding/common/abstractKeybindingService":["({0})\u5DF2\u6309\u4E0B\u3002\u6B63\u5728\u7B49\u5F85\u6309\u4E0B\u7B2C\u4E8C\u4E2A\u952E...","\u7EC4\u5408\u952E({0}\uFF0C{1})\u4E0D\u662F\u547D\u4EE4\u3002"],"vs/platform/list/browser/listService":["\u5DE5\u4F5C\u53F0","\u6620\u5C04\u4E3A `Ctrl` (Windows \u548C Linux) \u6216 `Command` (macOS)\u3002","\u6620\u5C04\u4E3A `Alt` (Windows \u548C Linux) \u6216 `Option` (macOS)\u3002","\u5728\u901A\u8FC7\u9F20\u6807\u591A\u9009\u6811\u548C\u5217\u8868\u6761\u76EE\u65F6\u4F7F\u7528\u7684\u4FEE\u6539\u952E (\u4F8B\u5982\u201C\u8D44\u6E90\u7BA1\u7406\u5668\u201D\u3001\u201C\u6253\u5F00\u7684\u7F16\u8F91\u5668\u201D\u548C\u201C\u6E90\u4EE3\u7801\u7BA1\u7406\u201D\u89C6\u56FE)\u3002\u201C\u5728\u4FA7\u8FB9\u6253\u5F00\u201D\u529F\u80FD\u6240\u9700\u7684\u9F20\u6807\u52A8\u4F5C (\u82E5\u53EF\u7528) \u5C06\u4F1A\u76F8\u5E94\u8C03\u6574\uFF0C\u4E0D\u4E0E\u591A\u9009\u4FEE\u6539\u952E\u51B2\u7A81\u3002","\u63A7\u5236\u5982\u4F55\u4F7F\u7528\u9F20\u6807\u6253\u5F00\u6811\u548C\u5217\u8868\u4E2D\u7684\u9879(\u82E5\u652F\u6301)\u3002\u8BF7\u6CE8\u610F\uFF0C\u5982\u679C\u6B64\u8BBE\u7F6E\u4E0D\u9002\u7528\uFF0C\u67D0\u4E9B\u6811\u548C\u5217\u8868\u53EF\u80FD\u4F1A\u9009\u62E9\u5FFD\u7565\u5B83\u3002","\u63A7\u5236\u5217\u8868\u548C\u6811\u662F\u5426\u652F\u6301\u5DE5\u4F5C\u53F0\u4E2D\u7684\u6C34\u5E73\u6EDA\u52A8\u3002\u8B66\u544A: \u6253\u5F00\u6B64\u8BBE\u7F6E\u5F71\u54CD\u4F1A\u5F71\u54CD\u6027\u80FD\u3002","\u63A7\u5236\u6811\u7F29\u8FDB(\u4EE5\u50CF\u7D20\u4E3A\u5355\u4F4D)\u3002","\u63A7\u5236\u6811\u662F\u5426\u5E94\u5448\u73B0\u7F29\u8FDB\u53C2\u8003\u7EBF\u3002","\u63A7\u5236\u5217\u8868\u548C\u6811\u662F\u5426\u5177\u6709\u5E73\u6ED1\u6EDA\u52A8\u6548\u679C\u3002","\u5BF9\u9F20\u6807\u6EDA\u8F6E\u6EDA\u52A8\u4E8B\u4EF6\u7684 `deltaX` \u548C `deltaY` \u4E58\u4E0A\u7684\u7CFB\u6570\u3002",'\u6309\u4E0B"Alt"\u65F6\u6EDA\u52A8\u901F\u5EA6\u500D\u589E\u3002',"\u7B80\u5355\u952E\u76D8\u5BFC\u822A\u805A\u7126\u4E0E\u952E\u76D8\u8F93\u5165\u76F8\u5339\u914D\u7684\u5143\u7D20\u3002\u4EC5\u5BF9\u524D\u7F00\u8FDB\u884C\u5339\u914D\u3002","\u9AD8\u4EAE\u952E\u76D8\u5BFC\u822A\u4F1A\u7A81\u51FA\u663E\u793A\u4E0E\u952E\u76D8\u8F93\u5165\u76F8\u5339\u914D\u7684\u5143\u7D20\u3002\u8FDB\u4E00\u6B65\u5411\u4E0A\u548C\u5411\u4E0B\u5BFC\u822A\u5C06\u4EC5\u904D\u5386\u7A81\u51FA\u663E\u793A\u7684\u5143\u7D20\u3002","\u7B5B\u9009\u5668\u952E\u76D8\u5BFC\u822A\u5C06\u7B5B\u9009\u51FA\u5E76\u9690\u85CF\u4E0E\u952E\u76D8\u8F93\u5165\u4E0D\u5339\u914D\u7684\u6240\u6709\u5143\u7D20\u3002","\u63A7\u5236\u5DE5\u4F5C\u53F0\u4E2D\u7684\u5217\u8868\u548C\u6811\u7684\u952E\u76D8\u5BFC\u822A\u6837\u5F0F\u3002\u5B83\u53EF\u4E3A\u201C\u7B80\u5355\u201D\u3001\u201C\u7A81\u51FA\u663E\u793A\u201D\u6216\u201C\u7B5B\u9009\u201D\u3002","\u63A7\u5236\u5217\u8868\u548C\u6811\u4E2D\u7684\u952E\u76D8\u5BFC\u822A\u662F\u5426\u4EC5\u901A\u8FC7\u952E\u5165\u81EA\u52A8\u89E6\u53D1\u3002\u5982\u679C\u8BBE\u7F6E\u4E3A `false` \uFF0C\u952E\u76D8\u5BFC\u822A\u53EA\u5728\u6267\u884C `list.toggleKeyboardNavigation` \u547D\u4EE4\u65F6\u89E6\u53D1\uFF0C\u60A8\u53EF\u4EE5\u4E3A\u8BE5\u547D\u4EE4\u6307\u5B9A\u952E\u76D8\u5FEB\u6377\u65B9\u5F0F\u3002","\u63A7\u5236\u5728\u5355\u51FB\u6587\u4EF6\u5939\u540D\u79F0\u65F6\u5982\u4F55\u6269\u5C55\u6811\u6587\u4EF6\u5939\u3002\u8BF7\u6CE8\u610F\uFF0C\u5982\u679C\u4E0D\u9002\u7528\uFF0C\u67D0\u4E9B\u6811\u548C\u5217\u8868\u53EF\u80FD\u4F1A\u9009\u62E9\u5FFD\u7565\u6B64\u8BBE\u7F6E\u3002"],"vs/platform/markers/common/markers":["\u9519\u8BEF","\u8B66\u544A","\u4FE1\u606F"],"vs/platform/quickinput/browser/commandsQuickAccess":["{0}, {1}","\u6700\u8FD1\u4F7F\u7528","\u5176\u4ED6\u547D\u4EE4",'\u547D\u4EE4"{0}"\u5BFC\u81F4\u9519\u8BEF ({1})'],"vs/platform/quickinput/browser/helpQuickAccess":["\u5168\u5C40\u547D\u4EE4","\u7F16\u8F91\u5668\u547D\u4EE4","{0}, {1}"],"vs/platform/theme/common/colorRegistry":["\u6574\u4F53\u524D\u666F\u8272\u3002\u6B64\u989C\u8272\u4EC5\u5728\u4E0D\u88AB\u7EC4\u4EF6\u8986\u76D6\u65F6\u9002\u7528\u3002","\u9519\u8BEF\u4FE1\u606F\u7684\u6574\u4F53\u524D\u666F\u8272\u3002\u6B64\u989C\u8272\u4EC5\u5728\u4E0D\u88AB\u7EC4\u4EF6\u8986\u76D6\u65F6\u9002\u7528\u3002","\u63D0\u4F9B\u5176\u4ED6\u4FE1\u606F\u7684\u8BF4\u660E\u6587\u672C\u7684\u524D\u666F\u8272\uFF0C\u4F8B\u5982\u6807\u7B7E\u6587\u672C\u3002","\u5DE5\u4F5C\u53F0\u4E2D\u56FE\u6807\u7684\u9ED8\u8BA4\u989C\u8272\u3002","\u7126\u70B9\u5143\u7D20\u7684\u6574\u4F53\u8FB9\u6846\u989C\u8272\u3002\u6B64\u989C\u8272\u4EC5\u5728\u4E0D\u88AB\u5176\u4ED6\u7EC4\u4EF6\u8986\u76D6\u65F6\u9002\u7528\u3002","\u5728\u5143\u7D20\u5468\u56F4\u989D\u5916\u7684\u4E00\u5C42\u8FB9\u6846\uFF0C\u7528\u6765\u63D0\u9AD8\u5BF9\u6BD4\u5EA6\u4ECE\u800C\u533A\u522B\u5176\u4ED6\u5143\u7D20\u3002","\u5728\u6D3B\u52A8\u5143\u7D20\u5468\u56F4\u989D\u5916\u7684\u4E00\u5C42\u8FB9\u6846\uFF0C\u7528\u6765\u63D0\u9AD8\u5BF9\u6BD4\u5EA6\u4ECE\u800C\u533A\u522B\u5176\u4ED6\u5143\u7D20\u3002","\u5DE5\u4F5C\u53F0\u6240\u9009\u6587\u672C\u7684\u80CC\u666F\u989C\u8272(\u4F8B\u5982\u8F93\u5165\u5B57\u6BB5\u6216\u6587\u672C\u533A\u57DF)\u3002\u6CE8\u610F\uFF0C\u672C\u8BBE\u7F6E\u4E0D\u9002\u7528\u4E8E\u7F16\u8F91\u5668\u3002","\u6587\u5B57\u5206\u9694\u7B26\u7684\u989C\u8272\u3002","\u6587\u672C\u4E2D\u94FE\u63A5\u7684\u524D\u666F\u8272\u3002","\u6587\u672C\u4E2D\u94FE\u63A5\u5728\u70B9\u51FB\u6216\u9F20\u6807\u60AC\u505C\u65F6\u7684\u524D\u666F\u8272 \u3002","\u9884\u683C\u5F0F\u5316\u6587\u672C\u6BB5\u7684\u524D\u666F\u8272\u3002","\u6587\u672C\u4E2D\u5757\u5F15\u7528\u7684\u80CC\u666F\u989C\u8272\u3002","\u6587\u672C\u4E2D\u5757\u5F15\u7528\u7684\u8FB9\u6846\u989C\u8272\u3002","\u6587\u672C\u4E2D\u4EE3\u7801\u5757\u7684\u80CC\u666F\u989C\u8272\u3002","\u7F16\u8F91\u5668\u5185\u5C0F\u7EC4\u4EF6(\u5982\u67E5\u627E/\u66FF\u6362)\u7684\u9634\u5F71\u989C\u8272\u3002","\u8F93\u5165\u6846\u80CC\u666F\u8272\u3002","\u8F93\u5165\u6846\u524D\u666F\u8272\u3002","\u8F93\u5165\u6846\u8FB9\u6846\u3002","\u8F93\u5165\u5B57\u6BB5\u4E2D\u5DF2\u6FC0\u6D3B\u9009\u9879\u7684\u8FB9\u6846\u989C\u8272\u3002","\u8F93\u5165\u5B57\u6BB5\u4E2D\u6FC0\u6D3B\u9009\u9879\u7684\u80CC\u666F\u989C\u8272\u3002","\u8F93\u5165\u5B57\u6BB5\u4E2D\u9009\u9879\u7684\u80CC\u666F\u60AC\u505C\u989C\u8272\u3002","\u8F93\u5165\u5B57\u6BB5\u4E2D\u5DF2\u6FC0\u6D3B\u7684\u9009\u9879\u7684\u524D\u666F\u8272\u3002","\u8F93\u5165\u6846\u4E2D\u5360\u4F4D\u7B26\u7684\u524D\u666F\u8272\u3002","\u8F93\u5165\u9A8C\u8BC1\u7ED3\u679C\u4E3A\u4FE1\u606F\u7EA7\u522B\u65F6\u7684\u80CC\u666F\u8272\u3002","\u8F93\u5165\u9A8C\u8BC1\u7ED3\u679C\u4E3A\u4FE1\u606F\u7EA7\u522B\u65F6\u7684\u524D\u666F\u8272\u3002","\u4E25\u91CD\u6027\u4E3A\u4FE1\u606F\u65F6\u8F93\u5165\u9A8C\u8BC1\u7684\u8FB9\u6846\u989C\u8272\u3002","\u4E25\u91CD\u6027\u4E3A\u8B66\u544A\u65F6\u8F93\u5165\u9A8C\u8BC1\u7684\u80CC\u666F\u8272\u3002","\u8F93\u5165\u9A8C\u8BC1\u7ED3\u679C\u4E3A\u8B66\u544A\u7EA7\u522B\u65F6\u7684\u524D\u666F\u8272\u3002","\u4E25\u91CD\u6027\u4E3A\u8B66\u544A\u65F6\u8F93\u5165\u9A8C\u8BC1\u7684\u8FB9\u6846\u989C\u8272\u3002","\u8F93\u5165\u9A8C\u8BC1\u7ED3\u679C\u4E3A\u9519\u8BEF\u7EA7\u522B\u65F6\u7684\u80CC\u666F\u8272\u3002","\u8F93\u5165\u9A8C\u8BC1\u7ED3\u679C\u4E3A\u9519\u8BEF\u7EA7\u522B\u65F6\u7684\u524D\u666F\u8272\u3002","\u4E25\u91CD\u6027\u4E3A\u9519\u8BEF\u65F6\u8F93\u5165\u9A8C\u8BC1\u7684\u8FB9\u6846\u989C\u8272\u3002","\u4E0B\u62C9\u5217\u8868\u80CC\u666F\u8272\u3002","\u4E0B\u62C9\u5217\u8868\u80CC\u666F\u8272\u3002","\u4E0B\u62C9\u5217\u8868\u524D\u666F\u8272\u3002","\u4E0B\u62C9\u5217\u8868\u8FB9\u6846\u3002","\u590D\u9009\u6846\u5C0F\u90E8\u4EF6\u7684\u80CC\u666F\u989C\u8272\u3002","\u590D\u9009\u6846\u5C0F\u90E8\u4EF6\u7684\u524D\u666F\u8272\u3002","\u590D\u9009\u6846\u5C0F\u90E8\u4EF6\u7684\u8FB9\u6846\u989C\u8272\u3002","\u6309\u94AE\u524D\u666F\u8272\u3002","\u6309\u94AE\u80CC\u666F\u8272\u3002","\u6309\u94AE\u5728\u60AC\u505C\u65F6\u7684\u80CC\u666F\u989C\u8272\u3002","\u6309\u94AE\u8FB9\u6846\u989C\u8272\u3002","\u8F85\u52A9\u6309\u94AE\u524D\u666F\u8272\u3002","\u8F85\u52A9\u6309\u94AE\u80CC\u666F\u8272\u3002","\u60AC\u505C\u65F6\u7684\u8F85\u52A9\u6309\u94AE\u80CC\u666F\u8272\u3002","Badge \u80CC\u666F\u8272\u3002Badge \u662F\u5C0F\u578B\u7684\u4FE1\u606F\u6807\u7B7E\uFF0C\u5982\u8868\u793A\u641C\u7D22\u7ED3\u679C\u6570\u91CF\u7684\u6807\u7B7E\u3002","Badge \u524D\u666F\u8272\u3002Badge \u662F\u5C0F\u578B\u7684\u4FE1\u606F\u6807\u7B7E\uFF0C\u5982\u8868\u793A\u641C\u7D22\u7ED3\u679C\u6570\u91CF\u7684\u6807\u7B7E\u3002","\u8868\u793A\u89C6\u56FE\u88AB\u6EDA\u52A8\u7684\u6EDA\u52A8\u6761\u9634\u5F71\u3002","\u6EDA\u52A8\u6761\u6ED1\u5757\u80CC\u666F\u8272","\u6EDA\u52A8\u6761\u6ED1\u5757\u5728\u60AC\u505C\u65F6\u7684\u80CC\u666F\u8272","\u6EDA\u52A8\u6761\u6ED1\u5757\u5728\u88AB\u70B9\u51FB\u65F6\u7684\u80CC\u666F\u8272\u3002","\u8868\u793A\u957F\u65F6\u95F4\u64CD\u4F5C\u7684\u8FDB\u5EA6\u6761\u7684\u80CC\u666F\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u9519\u8BEF\u6587\u672C\u7684\u80CC\u666F\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u7F16\u8F91\u5668\u4E2D\u9519\u8BEF\u6CE2\u6D6A\u7EBF\u7684\u524D\u666F\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u9519\u8BEF\u6846\u7684\u8FB9\u6846\u989C\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u8B66\u544A\u6587\u672C\u7684\u80CC\u666F\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u7F16\u8F91\u5668\u4E2D\u8B66\u544A\u6CE2\u6D6A\u7EBF\u7684\u524D\u666F\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u8B66\u544A\u6846\u7684\u8FB9\u6846\u989C\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u4FE1\u606F\u6587\u672C\u7684\u80CC\u666F\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u7F16\u8F91\u5668\u4E2D\u4FE1\u606F\u6CE2\u6D6A\u7EBF\u7684\u524D\u666F\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u4FE1\u606F\u6846\u7684\u8FB9\u6846\u989C\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u63D0\u793A\u6CE2\u6D6A\u7EBF\u7684\u524D\u666F\u8272\u3002","\u7F16\u8F91\u5668\u4E2D\u63D0\u793A\u6846\u7684\u8FB9\u6846\u989C\u8272\u3002","\u6D3B\u52A8\u6846\u683C\u7684\u8FB9\u6846\u989C\u8272\u3002","\u7F16\u8F91\u5668\u80CC\u666F\u8272\u3002","\u7F16\u8F91\u5668\u9ED8\u8BA4\u524D\u666F\u8272\u3002","\u7F16\u8F91\u5668\u7EC4\u4EF6(\u5982\u67E5\u627E/\u66FF\u6362)\u80CC\u666F\u989C\u8272\u3002","\u7F16\u8F91\u5668\u5C0F\u90E8\u4EF6\u7684\u524D\u666F\u8272\uFF0C\u5982\u67E5\u627E/\u66FF\u6362\u3002","\u7F16\u8F91\u5668\u5C0F\u90E8\u4EF6\u7684\u8FB9\u6846\u989C\u8272\u3002\u6B64\u989C\u8272\u4EC5\u5728\u5C0F\u90E8\u4EF6\u6709\u8FB9\u6846\u4E14\u4E0D\u88AB\u5C0F\u90E8\u4EF6\u91CD\u5199\u65F6\u9002\u7528\u3002","\u7F16\u8F91\u5668\u5C0F\u90E8\u4EF6\u5927\u5C0F\u8C03\u6574\u6761\u7684\u8FB9\u6846\u989C\u8272\u3002\u6B64\u989C\u8272\u4EC5\u5728\u5C0F\u90E8\u4EF6\u6709\u8C03\u6574\u8FB9\u6846\u4E14\u4E0D\u88AB\u5C0F\u90E8\u4EF6\u989C\u8272\u8986\u76D6\u65F6\u4F7F\u7528\u3002","\u80CC\u666F\u989C\u8272\u5FEB\u901F\u9009\u53D6\u5668\u3002\u5FEB\u901F\u9009\u53D6\u5668\u5C0F\u90E8\u4EF6\u662F\u9009\u53D6\u5668(\u5982\u547D\u4EE4\u8C03\u8272\u677F)\u7684\u5BB9\u5668\u3002","\u524D\u666F\u989C\u8272\u5FEB\u901F\u9009\u53D6\u5668\u3002\u5FEB\u901F\u9009\u53D6\u5668\u5C0F\u90E8\u4EF6\u662F\u547D\u4EE4\u8C03\u8272\u677F\u7B49\u9009\u53D6\u5668\u7684\u5BB9\u5668\u3002","\u6807\u9898\u80CC\u666F\u989C\u8272\u5FEB\u901F\u9009\u53D6\u5668\u3002\u5FEB\u901F\u9009\u53D6\u5668\u5C0F\u90E8\u4EF6\u662F\u547D\u4EE4\u8C03\u8272\u677F\u7B49\u9009\u53D6\u5668\u7684\u5BB9\u5668\u3002","\u5FEB\u901F\u9009\u53D6\u5668\u5206\u7EC4\u6807\u7B7E\u7684\u989C\u8272\u3002","\u5FEB\u901F\u9009\u53D6\u5668\u5206\u7EC4\u8FB9\u6846\u7684\u989C\u8272\u3002","\u952E\u7ED1\u5B9A\u6807\u7B7E\u80CC\u666F\u8272\u3002\u952E\u7ED1\u5B9A\u6807\u7B7E\u7528\u4E8E\u8868\u793A\u952E\u76D8\u5FEB\u6377\u65B9\u5F0F\u3002","\u952E\u7ED1\u5B9A\u6807\u7B7E\u524D\u666F\u8272\u3002\u952E\u7ED1\u5B9A\u6807\u7B7E\u7528\u4E8E\u8868\u793A\u952E\u76D8\u5FEB\u6377\u65B9\u5F0F\u3002","\u952E\u7ED1\u5B9A\u6807\u7B7E\u8FB9\u6846\u8272\u3002\u952E\u7ED1\u5B9A\u6807\u7B7E\u7528\u4E8E\u8868\u793A\u952E\u76D8\u5FEB\u6377\u65B9\u5F0F\u3002","\u952E\u7ED1\u5B9A\u6807\u7B7E\u8FB9\u6846\u5E95\u90E8\u8272\u3002\u952E\u7ED1\u5B9A\u6807\u7B7E\u7528\u4E8E\u8868\u793A\u952E\u76D8\u5FEB\u6377\u65B9\u5F0F\u3002","\u7F16\u8F91\u5668\u6240\u9009\u5185\u5BB9\u7684\u989C\u8272\u3002","\u7528\u4EE5\u5F70\u663E\u9AD8\u5BF9\u6BD4\u5EA6\u7684\u6240\u9009\u6587\u672C\u7684\u989C\u8272\u3002","\u975E\u6D3B\u52A8\u7F16\u8F91\u5668\u4E2D\u6240\u9009\u5185\u5BB9\u7684\u989C\u8272\uFF0C\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u88C5\u9970\u6548\u679C\u3002","\u5177\u6709\u4E0E\u6240\u9009\u9879\u76F8\u5173\u5185\u5BB9\u7684\u533A\u57DF\u7684\u989C\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u4E0E\u6240\u9009\u9879\u5185\u5BB9\u76F8\u540C\u7684\u533A\u57DF\u7684\u8FB9\u6846\u989C\u8272\u3002","\u5F53\u524D\u641C\u7D22\u5339\u914D\u9879\u7684\u989C\u8272\u3002","\u5176\u4ED6\u641C\u7D22\u5339\u914D\u9879\u7684\u989C\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u9650\u5236\u641C\u7D22\u8303\u56F4\u7684\u989C\u8272\u3002\u989C\u8272\u4E0D\u80FD\u4E0D\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u5E95\u5C42\u88C5\u9970\u3002","\u5F53\u524D\u641C\u7D22\u5339\u914D\u9879\u7684\u8FB9\u6846\u989C\u8272\u3002","\u5176\u4ED6\u641C\u7D22\u5339\u914D\u9879\u7684\u8FB9\u6846\u989C\u8272\u3002","\u9650\u5236\u641C\u7D22\u7684\u8303\u56F4\u7684\u8FB9\u6846\u989C\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u641C\u7D22\u7F16\u8F91\u5668\u67E5\u8BE2\u5339\u914D\u7684\u989C\u8272\u3002","\u641C\u7D22\u7F16\u8F91\u5668\u67E5\u8BE2\u5339\u914D\u7684\u8FB9\u6846\u989C\u8272\u3002","\u5728\u4E0B\u9762\u7A81\u51FA\u663E\u793A\u60AC\u505C\u7684\u5B57\u8BCD\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u7F16\u8F91\u5668\u60AC\u505C\u63D0\u793A\u7684\u80CC\u666F\u989C\u8272\u3002","\u7F16\u8F91\u5668\u60AC\u505C\u7684\u524D\u666F\u989C\u8272\u3002","\u5149\u6807\u60AC\u505C\u65F6\u7F16\u8F91\u5668\u7684\u8FB9\u6846\u989C\u8272\u3002","\u7F16\u8F91\u5668\u60AC\u505C\u72B6\u6001\u680F\u7684\u80CC\u666F\u8272\u3002","\u6D3B\u52A8\u94FE\u63A5\u989C\u8272\u3002","\u5185\u8054\u63D0\u793A\u7684\u524D\u666F\u8272","\u5185\u8054\u63D0\u793A\u7684\u80CC\u666F\u8272","\u7C7B\u578B\u5185\u8054\u63D0\u793A\u7684\u524D\u666F\u8272","\u7C7B\u578B\u5185\u8054\u63D0\u793A\u7684\u80CC\u666F\u8272","\u53C2\u6570\u5185\u8054\u63D0\u793A\u7684\u524D\u666F\u8272","\u53C2\u6570\u5185\u8054\u63D0\u793A\u7684\u80CC\u666F\u8272","\u7528\u4E8E\u706F\u6CE1\u64CD\u4F5C\u56FE\u6807\u7684\u989C\u8272\u3002","\u7528\u4E8E\u706F\u6CE1\u81EA\u52A8\u4FEE\u590D\u64CD\u4F5C\u56FE\u6807\u7684\u989C\u8272\u3002","\u5DF2\u63D2\u5165\u7684\u6587\u672C\u7684\u80CC\u666F\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u5DF2\u5220\u9664\u7684\u6587\u672C\u7684\u80CC\u666F\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u5DF2\u63D2\u5165\u7684\u884C\u7684\u80CC\u666F\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u5DF2\u5220\u9664\u7684\u884C\u7684\u80CC\u666F\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u63D2\u5165\u884C\u7684\u8FB9\u8DDD\u7684\u80CC\u666F\u8272\u3002","\u5220\u9664\u884C\u7684\u8FB9\u8DDD\u7684\u80CC\u666F\u8272\u3002","\u63D2\u5165\u5185\u5BB9\u7684\u5DEE\u5F02\u6982\u8FF0\u6807\u5C3A\u524D\u666F\u3002","\u5220\u9664\u5185\u5BB9\u7684\u5DEE\u5F02\u6982\u8FF0\u6807\u5C3A\u524D\u666F\u3002","\u63D2\u5165\u7684\u6587\u672C\u7684\u8F6E\u5ED3\u989C\u8272\u3002","\u88AB\u5220\u9664\u6587\u672C\u7684\u8F6E\u5ED3\u989C\u8272\u3002","\u4E24\u4E2A\u6587\u672C\u7F16\u8F91\u5668\u4E4B\u95F4\u7684\u8FB9\u6846\u989C\u8272\u3002","\u5DEE\u5F02\u7F16\u8F91\u5668\u7684\u5BF9\u89D2\u7EBF\u586B\u5145\u989C\u8272\u3002\u5BF9\u89D2\u7EBF\u586B\u5145\u7528\u4E8E\u5E76\u6392\u5DEE\u5F02\u89C6\u56FE\u3002","\u7126\u70B9\u9879\u5728\u5217\u8868\u6216\u6811\u6D3B\u52A8\u65F6\u7684\u80CC\u666F\u989C\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868\u6216\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u6CA1\u6709\u3002","\u7126\u70B9\u9879\u5728\u5217\u8868\u6216\u6811\u6D3B\u52A8\u65F6\u7684\u524D\u666F\u989C\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868\u6216\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u6CA1\u6709\u3002","\u5217\u8868/\u6811\u6D3B\u52A8\u65F6\uFF0C\u7126\u70B9\u9879\u76EE\u7684\u5217\u8868/\u6811\u8FB9\u6846\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868/\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u6CA1\u6709\u3002","\u5DF2\u9009\u9879\u5728\u5217\u8868\u6216\u6811\u6D3B\u52A8\u65F6\u7684\u80CC\u666F\u989C\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868\u6216\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u6CA1\u6709\u3002","\u5DF2\u9009\u9879\u5728\u5217\u8868\u6216\u6811\u6D3B\u52A8\u65F6\u7684\u524D\u666F\u989C\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868\u6216\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u6CA1\u6709\u3002","\u5DF2\u9009\u9879\u5728\u5217\u8868/\u6811\u6D3B\u52A8\u65F6\u7684\u5217\u8868/\u6811\u56FE\u6807\u524D\u666F\u989C\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868/\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u5219\u6CA1\u6709\u3002","\u5DF2\u9009\u9879\u5728\u5217\u8868\u6216\u6811\u975E\u6D3B\u52A8\u65F6\u7684\u80CC\u666F\u989C\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868\u6216\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u6CA1\u6709\u3002","\u5DF2\u9009\u9879\u5728\u5217\u8868\u6216\u6811\u975E\u6D3B\u52A8\u65F6\u7684\u524D\u666F\u989C\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868\u6216\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u6CA1\u6709\u3002","\u5DF2\u9009\u9879\u5728\u5217\u8868/\u6811\u975E\u6D3B\u52A8\u65F6\u7684\u56FE\u6807\u524D\u666F\u989C\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868/\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u5219\u6CA1\u6709\u3002","\u975E\u6D3B\u52A8\u7684\u5217\u8868\u6216\u6811\u63A7\u4EF6\u4E2D\u7126\u70B9\u9879\u7684\u80CC\u666F\u989C\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868\u6216\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u6CA1\u6709\u3002","\u5217\u8868/\u6570\u975E\u6D3B\u52A8\u65F6\uFF0C\u7126\u70B9\u9879\u76EE\u7684\u5217\u8868/\u6811\u8FB9\u6846\u8272\u3002\u6D3B\u52A8\u7684\u5217\u8868/\u6811\u5177\u6709\u952E\u76D8\u7126\u70B9\uFF0C\u975E\u6D3B\u52A8\u7684\u6CA1\u6709\u3002","\u4F7F\u7528\u9F20\u6807\u79FB\u52A8\u9879\u76EE\u65F6\uFF0C\u5217\u8868\u6216\u6811\u7684\u80CC\u666F\u989C\u8272\u3002","\u9F20\u6807\u5728\u9879\u76EE\u4E0A\u60AC\u505C\u65F6\uFF0C\u5217\u8868\u6216\u6811\u7684\u524D\u666F\u989C\u8272\u3002","\u4F7F\u7528\u9F20\u6807\u79FB\u52A8\u9879\u76EE\u65F6\uFF0C\u5217\u8868\u6216\u6811\u8FDB\u884C\u62D6\u653E\u7684\u80CC\u666F\u989C\u8272\u3002","\u5728\u5217\u8868\u6216\u6811\u4E2D\u641C\u7D22\u65F6\uFF0C\u5176\u4E2D\u5339\u914D\u5185\u5BB9\u7684\u9AD8\u4EAE\u989C\u8272\u3002","\u5728\u5217\u8868\u6216\u6811\u4E2D\u641C\u7D22\u65F6\uFF0C\u5339\u914D\u6D3B\u52A8\u805A\u7126\u9879\u7684\u7A81\u51FA\u663E\u793A\u5185\u5BB9\u7684\u5217\u8868/\u6811\u524D\u666F\u8272\u3002","\u5217\u8868\u6216\u6811\u4E2D\u65E0\u6548\u9879\u7684\u524D\u666F\u8272\uFF0C\u4F8B\u5982\u8D44\u6E90\u7BA1\u7406\u5668\u4E2D\u6CA1\u6709\u89E3\u6790\u7684\u6839\u76EE\u5F55\u3002","\u5305\u542B\u9519\u8BEF\u7684\u5217\u8868\u9879\u7684\u524D\u666F\u989C\u8272\u3002","\u5305\u542B\u8B66\u544A\u7684\u5217\u8868\u9879\u7684\u524D\u666F\u989C\u8272\u3002","\u5217\u8868\u548C\u6811\u4E2D\u7C7B\u578B\u7B5B\u9009\u5668\u5C0F\u7EC4\u4EF6\u7684\u80CC\u666F\u8272\u3002","\u5217\u8868\u548C\u6811\u4E2D\u7C7B\u578B\u7B5B\u9009\u5668\u5C0F\u7EC4\u4EF6\u7684\u8F6E\u5ED3\u989C\u8272\u3002","\u5F53\u6CA1\u6709\u5339\u914D\u9879\u65F6\uFF0C\u5217\u8868\u548C\u6811\u4E2D\u7C7B\u578B\u7B5B\u9009\u5668\u5C0F\u7EC4\u4EF6\u7684\u8F6E\u5ED3\u989C\u8272\u3002","\u7B5B\u9009\u540E\u7684\u5339\u914D\u9879\u7684\u80CC\u666F\u989C\u8272\u3002","\u7B5B\u9009\u540E\u7684\u5339\u914D\u9879\u7684\u8FB9\u6846\u989C\u8272\u3002","\u7F29\u8FDB\u53C2\u8003\u7EBF\u7684\u6811\u63CF\u8FB9\u989C\u8272\u3002","\u5217\u4E4B\u95F4\u7684\u8868\u8FB9\u6846\u989C\u8272\u3002","\u5947\u6570\u8868\u884C\u7684\u80CC\u666F\u8272\u3002","\u53D6\u6D88\u5F3A\u8C03\u7684\u9879\u76EE\u7684\u5217\u8868/\u6811\u524D\u666F\u989C\u8272\u3002","\u8BF7\u6539\u7528 quickInputList.focusBackground","\u7126\u70B9\u9879\u76EE\u7684\u5FEB\u901F\u9009\u62E9\u5668\u524D\u666F\u8272\u3002","\u7126\u70B9\u9879\u76EE\u7684\u5FEB\u901F\u9009\u53D6\u5668\u56FE\u6807\u524D\u666F\u8272\u3002","\u7126\u70B9\u9879\u76EE\u7684\u5FEB\u901F\u9009\u62E9\u5668\u80CC\u666F\u8272\u3002","\u83DC\u5355\u7684\u8FB9\u6846\u989C\u8272\u3002","\u83DC\u5355\u9879\u7684\u524D\u666F\u989C\u8272\u3002","\u83DC\u5355\u9879\u7684\u80CC\u666F\u989C\u8272\u3002","\u83DC\u5355\u4E2D\u9009\u5B9A\u83DC\u5355\u9879\u7684\u524D\u666F\u8272\u3002","\u83DC\u5355\u4E2D\u6240\u9009\u83DC\u5355\u9879\u7684\u80CC\u666F\u8272\u3002","\u83DC\u5355\u4E2D\u6240\u9009\u83DC\u5355\u9879\u7684\u8FB9\u6846\u989C\u8272\u3002","\u83DC\u5355\u4E2D\u5206\u9694\u7EBF\u7684\u989C\u8272\u3002","\u4F7F\u7528\u9F20\u6807\u60AC\u505C\u5728\u64CD\u4F5C\u4E0A\u65F6\u663E\u793A\u5DE5\u5177\u680F\u80CC\u666F","\u4F7F\u7528\u9F20\u6807\u60AC\u505C\u5728\u64CD\u4F5C\u4E0A\u65F6\u663E\u793A\u5DE5\u5177\u680F\u8F6E\u5ED3","\u5C06\u9F20\u6807\u60AC\u505C\u5728\u64CD\u4F5C\u4E0A\u65F6\u7684\u5DE5\u5177\u680F\u80CC\u666F","\u4EE3\u7801\u7247\u6BB5 Tab \u4F4D\u7684\u9AD8\u4EAE\u80CC\u666F\u8272\u3002","\u4EE3\u7801\u7247\u6BB5 Tab \u4F4D\u7684\u9AD8\u4EAE\u8FB9\u6846\u989C\u8272\u3002","\u4EE3\u7801\u7247\u6BB5\u4E2D\u6700\u540E\u7684 Tab \u4F4D\u7684\u9AD8\u4EAE\u80CC\u666F\u8272\u3002","\u4EE3\u7801\u7247\u6BB5\u4E2D\u6700\u540E\u7684\u5236\u8868\u4F4D\u7684\u9AD8\u4EAE\u8FB9\u6846\u989C\u8272\u3002","\u7126\u70B9\u5BFC\u822A\u8DEF\u5F84\u7684\u989C\u8272","\u5BFC\u822A\u8DEF\u5F84\u9879\u7684\u80CC\u666F\u8272\u3002","\u7126\u70B9\u5BFC\u822A\u8DEF\u5F84\u7684\u989C\u8272","\u5DF2\u9009\u5BFC\u822A\u8DEF\u5F84\u9879\u7684\u989C\u8272\u3002","\u5BFC\u822A\u8DEF\u5F84\u9879\u9009\u62E9\u5668\u7684\u80CC\u666F\u8272\u3002","\u5F53\u524D\u6807\u9898\u80CC\u666F\u7684\u5185\u8054\u5408\u5E76\u51B2\u7A81\u3002\u989C\u8272\u4E0D\u80FD\u4E0D\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u5E95\u5C42\u88C5\u9970\u3002","\u5185\u8054\u5408\u5E76\u51B2\u7A81\u4E2D\u7684\u5F53\u524D\u5185\u5BB9\u80CC\u666F\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u5185\u8054\u5408\u5E76\u51B2\u7A81\u4E2D\u7684\u4F20\u5165\u6807\u9898\u80CC\u666F\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u5185\u8054\u5408\u5E76\u51B2\u7A81\u4E2D\u7684\u4F20\u5165\u5185\u5BB9\u80CC\u666F\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u5185\u8054\u5408\u5E76\u51B2\u7A81\u4E2D\u7684\u5E38\u89C1\u7956\u5148\u6807\u5934\u80CC\u666F\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u5185\u8054\u5408\u5E76\u51B2\u7A81\u4E2D\u7684\u5E38\u89C1\u7956\u5148\u5185\u5BB9\u80CC\u666F\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u5185\u8054\u5408\u5E76\u51B2\u7A81\u4E2D\u6807\u5934\u548C\u5206\u5272\u7EBF\u7684\u8FB9\u6846\u989C\u8272\u3002","\u5185\u8054\u5408\u5E76\u51B2\u7A81\u4E2D\u5F53\u524D\u7248\u672C\u533A\u57DF\u7684\u6982\u89C8\u6807\u5C3A\u524D\u666F\u8272\u3002","\u5185\u8054\u5408\u5E76\u51B2\u7A81\u4E2D\u4F20\u5165\u7684\u7248\u672C\u533A\u57DF\u7684\u6982\u89C8\u6807\u5C3A\u524D\u666F\u8272\u3002","\u5185\u8054\u5408\u5E76\u51B2\u7A81\u4E2D\u5171\u540C\u7956\u5148\u533A\u57DF\u7684\u6982\u89C8\u6807\u5C3A\u524D\u666F\u8272\u3002","\u7528\u4E8E\u67E5\u627E\u5339\u914D\u9879\u7684\u6982\u8FF0\u6807\u5C3A\u6807\u8BB0\u989C\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u7528\u4E8E\u7A81\u51FA\u663E\u793A\u6240\u9009\u5185\u5BB9\u7684\u6982\u8FF0\u6807\u5C3A\u6807\u8BB0\u989C\u8272\u3002\u989C\u8272\u5FC5\u987B\u900F\u660E\uFF0C\u4EE5\u514D\u9690\u85CF\u4E0B\u9762\u7684\u4FEE\u9970\u6548\u679C\u3002","\u7528\u4E8E\u67E5\u627E\u5339\u914D\u9879\u7684\u8FF7\u4F60\u5730\u56FE\u6807\u8BB0\u989C\u8272\u3002","\u7528\u4E8E\u91CD\u590D\u7F16\u8F91\u5668\u9009\u62E9\u7684\u7F29\u7565\u56FE\u6807\u8BB0\u989C\u8272\u3002","\u7F16\u8F91\u5668\u9009\u533A\u5728\u8FF7\u4F60\u5730\u56FE\u4E2D\u5BF9\u5E94\u7684\u6807\u8BB0\u989C\u8272\u3002","\u7528\u4E8E\u9519\u8BEF\u7684\u8FF7\u4F60\u5730\u56FE\u6807\u8BB0\u989C\u8272\u3002","\u7528\u4E8E\u8B66\u544A\u7684\u8FF7\u4F60\u5730\u56FE\u6807\u8BB0\u989C\u8272\u3002","\u8FF7\u4F60\u5730\u56FE\u80CC\u666F\u989C\u8272\u3002",'\u5728\u7F29\u7565\u56FE\u4E2D\u5448\u73B0\u7684\u524D\u666F\u5143\u7D20\u7684\u4E0D\u900F\u660E\u5EA6\u3002\u4F8B\u5982\uFF0C"#000000c0" \u5C06\u5448\u73B0\u4E0D\u900F\u660E\u5EA6\u4E3A 75% \u7684\u5143\u7D20\u3002',"\u8FF7\u4F60\u5730\u56FE\u6ED1\u5757\u80CC\u666F\u989C\u8272\u3002","\u60AC\u505C\u65F6\uFF0C\u8FF7\u4F60\u5730\u56FE\u6ED1\u5757\u7684\u80CC\u666F\u989C\u8272\u3002","\u5355\u51FB\u65F6\uFF0C\u8FF7\u4F60\u5730\u56FE\u6ED1\u5757\u7684\u80CC\u666F\u989C\u8272\u3002","\u7528\u4E8E\u95EE\u9898\u9519\u8BEF\u56FE\u6807\u7684\u989C\u8272\u3002","\u7528\u4E8E\u95EE\u9898\u8B66\u544A\u56FE\u6807\u7684\u989C\u8272\u3002","\u7528\u4E8E\u95EE\u9898\u4FE1\u606F\u56FE\u6807\u7684\u989C\u8272\u3002","\u56FE\u8868\u4E2D\u4F7F\u7528\u7684\u524D\u666F\u989C\u8272\u3002","\u7528\u4E8E\u56FE\u8868\u4E2D\u7684\u6C34\u5E73\u7EBF\u6761\u7684\u989C\u8272\u3002","\u56FE\u8868\u53EF\u89C6\u5316\u6548\u679C\u4E2D\u4F7F\u7528\u7684\u7EA2\u8272\u3002","\u56FE\u8868\u53EF\u89C6\u5316\u6548\u679C\u4E2D\u4F7F\u7528\u7684\u84DD\u8272\u3002","\u56FE\u8868\u53EF\u89C6\u5316\u6548\u679C\u4E2D\u4F7F\u7528\u7684\u9EC4\u8272\u3002","\u56FE\u8868\u53EF\u89C6\u5316\u6548\u679C\u4E2D\u4F7F\u7528\u7684\u6A59\u8272\u3002","\u56FE\u8868\u53EF\u89C6\u5316\u6548\u679C\u4E2D\u4F7F\u7528\u7684\u7EFF\u8272\u3002","\u56FE\u8868\u53EF\u89C6\u5316\u6548\u679C\u4E2D\u4F7F\u7528\u7684\u7D2B\u8272\u3002"],"vs/platform/theme/common/iconRegistry":["\u8981\u4F7F\u7528\u7684\u5B57\u4F53\u7684 ID\u3002\u5982\u679C\u672A\u8BBE\u7F6E\uFF0C\u5219\u4F7F\u7528\u6700\u5148\u5B9A\u4E49\u7684\u5B57\u4F53\u3002","\u4E0E\u56FE\u6807\u5B9A\u4E49\u5173\u8054\u7684\u5B57\u4F53\u5B57\u7B26\u3002","\u5C0F\u7EC4\u4EF6\u4E2D\u201C\u5173\u95ED\u201D\u64CD\u4F5C\u7684\u56FE\u6807\u3002","\u201C\u8F6C\u5230\u4E0A\u4E00\u4E2A\u7F16\u8F91\u5668\u4F4D\u7F6E\u201D\u56FE\u6807\u3002","\u201C\u8F6C\u5230\u4E0B\u4E00\u4E2A\u7F16\u8F91\u5668\u4F4D\u7F6E\u201D\u56FE\u6807\u3002"],"vs/platform/undoRedo/common/undoRedoService":["\u4EE5\u4E0B\u6587\u4EF6\u5DF2\u5173\u95ED\u5E76\u4E14\u5DF2\u5728\u78C1\u76D8\u4E0A\u4FEE\u6539: {0}\u3002","\u4EE5\u4E0B\u6587\u4EF6\u5DF2\u4EE5\u4E0D\u517C\u5BB9\u7684\u65B9\u5F0F\u4FEE\u6539: {0}\u3002","\u65E0\u6CD5\u5728\u6240\u6709\u6587\u4EF6\u4E2D\u64A4\u6D88\u201C{0}\u201D\u3002{1}","\u65E0\u6CD5\u5728\u6240\u6709\u6587\u4EF6\u4E2D\u64A4\u6D88\u201C{0}\u201D\u3002{1}","\u65E0\u6CD5\u64A4\u6D88\u6240\u6709\u6587\u4EF6\u7684\u201C{0}\u201D\uFF0C\u56E0\u4E3A\u5DF2\u66F4\u6539 {1}","\u65E0\u6CD5\u8DE8\u6240\u6709\u6587\u4EF6\u64A4\u9500\u201C{0}\u201D\uFF0C\u56E0\u4E3A {1} \u4E0A\u5DF2\u6709\u4E00\u9879\u64A4\u6D88\u6216\u91CD\u505A\u64CD\u4F5C\u6B63\u5728\u8FD0\u884C","\u65E0\u6CD5\u8DE8\u6240\u6709\u6587\u4EF6\u64A4\u9500\u201C{0}\u201D\uFF0C\u56E0\u4E3A\u540C\u65F6\u53D1\u751F\u4E86\u4E00\u9879\u64A4\u6D88\u6216\u91CD\u505A\u64CD\u4F5C","\u662F\u5426\u8981\u5728\u6240\u6709\u6587\u4EF6\u4E2D\u64A4\u6D88\u201C{0}\u201D?","\u5728 {0} \u4E2A\u6587\u4EF6\u4E2D\u64A4\u6D88","\u64A4\u6D88\u6B64\u6587\u4EF6","\u53D6\u6D88","\u65E0\u6CD5\u64A4\u9500\u201C{0}\u201D\uFF0C\u56E0\u4E3A\u5DF2\u6709\u4E00\u9879\u64A4\u6D88\u6216\u91CD\u505A\u64CD\u4F5C\u6B63\u5728\u8FD0\u884C\u3002","\u662F\u5426\u8981\u64A4\u6D88\u201C{0}\u201D?","\u662F","\u5426","\u65E0\u6CD5\u5728\u6240\u6709\u6587\u4EF6\u4E2D\u91CD\u505A\u201C{0}\u201D\u3002{1}","\u65E0\u6CD5\u5728\u6240\u6709\u6587\u4EF6\u4E2D\u91CD\u505A\u201C{0}\u201D\u3002{1}","\u65E0\u6CD5\u5BF9\u6240\u6709\u6587\u4EF6\u91CD\u505A\u201C{0}\u201D\uFF0C\u56E0\u4E3A\u5DF2\u66F4\u6539 {1}","\u65E0\u6CD5\u8DE8\u6240\u6709\u6587\u4EF6\u91CD\u505A\u201C{0}\u201D\uFF0C\u56E0\u4E3A {1} \u4E0A\u5DF2\u6709\u4E00\u9879\u64A4\u6D88\u6216\u91CD\u505A\u64CD\u4F5C\u6B63\u5728\u8FD0\u884C","\u65E0\u6CD5\u8DE8\u6240\u6709\u6587\u4EF6\u91CD\u505A\u201C{0}\u201D\uFF0C\u56E0\u4E3A\u540C\u65F6\u53D1\u751F\u4E86\u4E00\u9879\u64A4\u6D88\u6216\u91CD\u505A\u64CD\u4F5C","\u65E0\u6CD5\u91CD\u505A\u201C{0}\u201D\uFF0C\u56E0\u4E3A\u5DF2\u6709\u4E00\u9879\u64A4\u6D88\u6216\u91CD\u505A\u64CD\u4F5C\u6B63\u5728\u8FD0\u884C\u3002"],"vs/platform/workspace/common/workspace":["Code \u5DE5\u4F5C\u533A"]});
//# sourceMappingURL=../../../min-maps/vs/editor/editor.main.nls.zh-cn.js.map
|
PypiClean
|
/pyqode3.core-3.2.35.tar.gz/pyqode3.core-3.2.35/pyqode/core/modes/outline.py
|
import logging
from pyqode.core.api import Mode
from pyqode.core.api import DelayJobRunner
from pyqode.core.backend import NotRunning
from pyqode.core.share import Definition
from pyqode.qt import QtCore
def _logger():
return logging.getLogger(__name__)
class OutlineMode(Mode, QtCore.QObject):
"""
Generic mode that provides outline information through the
document_changed signal and a specialised worker function.
To use this mode, you need to write a worker function that returns a list
of pyqode.core.share.Definition (see
pyqode.python.backend.workers.defined_names() for an example of how to
implement the worker function).
"""
#: Signal emitted when the document structure changed.
document_changed = QtCore.Signal()
@property
def definitions(self):
"""
Gets the list of top level definitions.
"""
return self._results
def __init__(self, worker, delay=1000):
Mode.__init__(self)
QtCore.QObject.__init__(self)
self._worker = worker
self._jobRunner = DelayJobRunner(delay=delay)
#: The list of definitions found in the file, each item is a
#: pyqode.core.share.Definition.
self._results = []
def on_state_changed(self, state):
if state:
self.editor.new_text_set.connect(self._run_analysis)
self.editor.textChanged.connect(self._request_analysis)
else:
self.editor.textChanged.disconnect(self._request_analysis)
self.editor.new_text_set.disconnect(self._run_analysis)
self._jobRunner.cancel_requests()
def _request_analysis(self):
self._jobRunner.request_job(self._run_analysis)
def _run_analysis(self):
try:
self.editor.file
self.editor.toPlainText()
except (RuntimeError, AttributeError):
# called by the timer after the editor got deleted
return
if self.enabled:
request_data = {
'code': self.editor.toPlainText(),
'path': self.editor.file.path,
'encoding': self.editor.file.encoding
}
try:
self.editor.backend.send_request(
self._worker, request_data,
on_receive=self._on_results_available)
except NotRunning:
QtCore.QTimer.singleShot(100, self._run_analysis)
else:
self._results = []
self.document_changed.emit()
def _on_results_available(self, results):
if results:
results = [Definition.from_dict(ddict) for ddict in results]
self._results = results
if self._results is not None:
_logger().log(5, "Document structure changed")
self.document_changed.emit()
|
PypiClean
|
/snowcross-0.3.0.tar.gz/snowcross-0.3.0/README.md
|
# snowcross-python
Adaptors for tools and services in a Snowflake-centric data platform.
> **WARNING** While this repository is private, releases are published _publicly_ to
> [PyPI](https://pypi.org/project/snowcross/)! All commits, pull requests and this readme are
> hidden, however the source code is easily extracted from published artifacts. Refrain from
> including any references to proprietary code, infrastructure or architecture. This package is
> for generic utilities for tools and services only.
## Requirements
Requires Python 3.10 or above.
## Usage
You can install from [PyPI](https://pypi.org/project/snowcross/):
```shell
pip install snowcross
```
When using `locator` functionality, install extra dependencies `snowcross[locator]`.
|
PypiClean
|
/django-cms-4.1.0rc4.tar.gz/django-cms-4.1.0rc4/cms/app_registration.py
|
import inspect
from functools import lru_cache
from importlib import import_module
from django.apps import apps
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import module_has_submodule
from cms.app_base import CMSAppConfig, CMSAppExtension
from cms.constants import CMS_CONFIG_NAME
def _find_subclasses(module, klass):
"""
Helper function.
Returns a list of classes in module which inherit from klass.
"""
classes = []
# Find all classes that inherit from klass
for name, obj in inspect.getmembers(module):
is_subclass = (
# Ignore the import of klass itself
inspect.isclass(obj) and issubclass(obj, klass) and obj != klass
)
if is_subclass:
classes.append(obj)
return classes
def _find_config(cms_module):
"""
Helper function.
Returns the class inheriting from CMSAppConfig in the given module.
If no such class exists in the module, returns None.
If multiple classes inherit from CMSAppConfig, raises
ImproperlyConfigured exception.
"""
cms_config_classes = _find_subclasses(cms_module, CMSAppConfig)
if len(cms_config_classes) == 1:
return cms_config_classes[0]
elif len(cms_config_classes) > 1:
raise ImproperlyConfigured(
"cms_config.py files can't define more than one "
"class which inherits from CMSAppConfig")
def _find_extension(cms_module):
"""
Helper function.
Returns the class inheriting from CMSAppExtension in the given module.
If no such class exists in the module, returns None.
If multiple classes inherit from CMSAppExtension, raises
ImproperlyConfigured exception.
"""
cms_extension_classes = _find_subclasses(cms_module, CMSAppExtension)
if len(cms_extension_classes) == 1:
return cms_extension_classes[0]
elif len(cms_extension_classes) > 1:
raise ImproperlyConfigured(
"cms_config.py files can't define more than one "
"class which inherits from CMSAppExtension")
def autodiscover_cms_configs():
"""
Find and import all cms_config.py files. Add a cms_app attribute
to django's app config with an instance of the cms config.
"""
for app_config in apps.get_app_configs():
try:
cms_module = import_module(
'%s.%s' % (app_config.name, CMS_CONFIG_NAME))
except: # NOQA
# If something in cms_config.py raises an exception let that
# exception bubble up. Only catch the exception if
# cms_config.py doesn't exist
if module_has_submodule(app_config.module, CMS_CONFIG_NAME):
raise
else:
config = _find_config(cms_module)
extension = _find_extension(cms_module)
# We are adding these attributes here rather than in
# django's app config definition because there are
# all kinds of limitations as to what can be imported
# in django's apps.py and leaving it to devs to define this
# there could cause issues
if config:
app_config.cms_config = config(app_config)
if extension:
app_config.cms_extension = extension()
if not config and not extension:
raise ImproperlyConfigured(
"cms_config.py files must define at least one "
"class which inherits from CMSAppConfig or "
"CMSAppExtension")
@lru_cache(maxsize=None)
def get_cms_extension_apps():
"""
Returns django app configs of apps with a cms extension
"""
# NOTE: The cms_extension attr is added by the autodiscover_cms_configs
# function if a cms_config.py file with a suitable class is found.
cms_apps = [app_config for app_config in apps.get_app_configs()
if hasattr(app_config, 'cms_extension')]
return cms_apps
@lru_cache(maxsize=None)
def get_cms_config_apps():
"""
Returns django app configs of apps with a cms config
"""
# NOTE: The cms_config attr is added by the autodiscover_cms_configs
# function if a cms_config.py file with a suitable class is found.
cms_apps = [app_config for app_config in apps.get_app_configs()
if hasattr(app_config, 'cms_config')]
return cms_apps
def configure_cms_apps(apps_with_features):
"""
Check installed apps for apps that are configured to use cms addons
and run code to register them with their config
"""
for app_with_feature in apps_with_features:
enabled_property = "{}_enabled".format(app_with_feature.label)
configure_app = app_with_feature.cms_extension.configure_app
for app_config in get_cms_config_apps():
if getattr(app_config.cms_config, enabled_property, False):
# Feature enabled for this app so configure
configure_app(app_config.cms_config)
def ready_cms_apps(apps_with_features):
"""Run ready() methods on every registered cms extension,
so that final checks can happen after all apps have been configured
"""
for app_with_feature in apps_with_features:
app_with_feature.cms_extension.ready()
# TODO: Remove this function once backwards compatibility is deprecated
def backwards_compatibility_config():
"""
Ensure that old ways of configuring and setting up things (plugin
pools etc.) still work.
NOTE: The autodiscover code has been copied over from
django.utils.module_loading.autodiscover_modules
This is because django's autodiscover function imports
django.apps.apps from within the body of the function, which
interferes with backwards compatibility testing. The testing is
non-trivial and requires the patching of django.apps.apps (which
is impossible when imports are done inside of functions).
"""
# The old pools defined various discover methods that looked for
# specific files and ran the register code in them. Listing the
# names of the files here.
modules_to_autodiscover = ['cms_wizards']
for module in modules_to_autodiscover:
for app_config in apps.get_app_configs():
try:
import_module('%s.%s' % (app_config.name, module))
except Exception: # NOQA
# Decide whether to bubble up this error. If the app just
# doesn't have the module in question, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module):
raise
|
PypiClean
|
/django-rflatpages-0.0.9.tar.gz/django-rflatpages-0.0.9/rflatpages/rflatpages-example/project/static/js/tiny_mce/themes/advanced/js/link.js
|
tinyMCEPopup.requireLangPack();
var LinkDialog = {
preInit : function() {
var url;
if (url = tinyMCEPopup.getParam("external_link_list_url"))
document.write('<script language="javascript" type="text/javascript" src="' + tinyMCEPopup.editor.documentBaseURI.toAbsolute(url) + '"></script>');
},
init : function() {
var f = document.forms[0], ed = tinyMCEPopup.editor;
// Setup browse button
document.getElementById('hrefbrowsercontainer').innerHTML = getBrowserHTML('hrefbrowser', 'href', 'file', 'theme_advanced_link');
if (isVisible('hrefbrowser'))
document.getElementById('href').style.width = '180px';
this.fillClassList('class_list');
this.fillFileList('link_list', 'tinyMCELinkList');
this.fillTargetList('target_list');
if (e = ed.dom.getParent(ed.selection.getNode(), 'A')) {
f.href.value = ed.dom.getAttrib(e, 'href');
f.linktitle.value = ed.dom.getAttrib(e, 'title');
f.insert.value = ed.getLang('update');
selectByValue(f, 'link_list', f.href.value);
selectByValue(f, 'target_list', ed.dom.getAttrib(e, 'target'));
selectByValue(f, 'class_list', ed.dom.getAttrib(e, 'class'));
}
},
update : function() {
var f = document.forms[0], ed = tinyMCEPopup.editor, e, b;
tinyMCEPopup.restoreSelection();
e = ed.dom.getParent(ed.selection.getNode(), 'A');
// Remove element if there is no href
if (!f.href.value) {
if (e) {
tinyMCEPopup.execCommand("mceBeginUndoLevel");
b = ed.selection.getBookmark();
ed.dom.remove(e, 1);
ed.selection.moveToBookmark(b);
tinyMCEPopup.execCommand("mceEndUndoLevel");
tinyMCEPopup.close();
return;
}
}
tinyMCEPopup.execCommand("mceBeginUndoLevel");
// Create new anchor elements
if (e == null) {
ed.getDoc().execCommand("unlink", false, null);
tinyMCEPopup.execCommand("CreateLink", false, "#mce_temp_url#", {skip_undo : 1});
tinymce.each(ed.dom.select("a"), function(n) {
if (ed.dom.getAttrib(n, 'href') == '#mce_temp_url#') {
e = n;
ed.dom.setAttribs(e, {
href : f.href.value,
title : f.linktitle.value,
target : f.target_list ? getSelectValue(f, "target_list") : null,
'class' : f.class_list ? getSelectValue(f, "class_list") : null
});
}
});
} else {
ed.dom.setAttribs(e, {
href : f.href.value,
title : f.linktitle.value,
target : f.target_list ? getSelectValue(f, "target_list") : null,
'class' : f.class_list ? getSelectValue(f, "class_list") : null
});
}
// Don't move caret if selection was image
if (e.childNodes.length != 1 || e.firstChild.nodeName != 'IMG') {
ed.focus();
ed.selection.select(e);
ed.selection.collapse(0);
tinyMCEPopup.storeSelection();
}
tinyMCEPopup.execCommand("mceEndUndoLevel");
tinyMCEPopup.close();
},
checkPrefix : function(n) {
if (n.value && Validator.isEmail(n) && !/^\s*mailto:/i.test(n.value) && confirm(tinyMCEPopup.getLang('advanced_dlg.link_is_email')))
n.value = 'mailto:' + n.value;
if (/^\s*www\./i.test(n.value) && confirm(tinyMCEPopup.getLang('advanced_dlg.link_is_external')))
n.value = 'http://' + n.value;
},
fillFileList : function(id, l) {
var dom = tinyMCEPopup.dom, lst = dom.get(id), v, cl;
l = window[l];
if (l && l.length > 0) {
lst.options[lst.options.length] = new Option('', '');
tinymce.each(l, function(o) {
lst.options[lst.options.length] = new Option(o[0], o[1]);
});
} else
dom.remove(dom.getParent(id, 'tr'));
},
fillClassList : function(id) {
var dom = tinyMCEPopup.dom, lst = dom.get(id), v, cl;
if (v = tinyMCEPopup.getParam('theme_advanced_styles')) {
cl = [];
tinymce.each(v.split(';'), function(v) {
var p = v.split('=');
cl.push({'title' : p[0], 'class' : p[1]});
});
} else
cl = tinyMCEPopup.editor.dom.getClasses();
if (cl.length > 0) {
lst.options[lst.options.length] = new Option(tinyMCEPopup.getLang('not_set'), '');
tinymce.each(cl, function(o) {
lst.options[lst.options.length] = new Option(o.title || o['class'], o['class']);
});
} else
dom.remove(dom.getParent(id, 'tr'));
},
fillTargetList : function(id) {
var dom = tinyMCEPopup.dom, lst = dom.get(id), v;
lst.options[lst.options.length] = new Option(tinyMCEPopup.getLang('not_set'), '');
lst.options[lst.options.length] = new Option(tinyMCEPopup.getLang('advanced_dlg.link_target_same'), '_self');
lst.options[lst.options.length] = new Option(tinyMCEPopup.getLang('advanced_dlg.link_target_blank'), '_blank');
if (v = tinyMCEPopup.getParam('theme_advanced_link_targets')) {
tinymce.each(v.split(','), function(v) {
v = v.split('=');
lst.options[lst.options.length] = new Option(v[0], v[1]);
});
}
}
};
LinkDialog.preInit();
tinyMCEPopup.onInit.add(LinkDialog.init, LinkDialog);
|
PypiClean
|
/mle_hyperopt-0.0.9-py3-none-any.whl/mle_hyperopt/strategies/grid.py
|
from typing import Optional, List
from ..strategy import Strategy
from ..spaces import GridSpace
from ..utils import visualize_2D_grid, print_grid_hello
class GridSearch(Strategy):
def __init__(
self,
real: Optional[dict] = None,
integer: Optional[dict] = None,
categorical: Optional[dict] = None,
search_config: Optional[dict] = None,
maximize_objective: bool = False,
fixed_params: Optional[dict] = None,
reload_path: Optional[str] = None,
reload_list: Optional[list] = None,
seed_id: int = 42,
verbose: bool = False,
):
"""Grid Search Strategy.
Args:
real (Optional[dict], optional):
Dictionary of real-valued search variables & their resolution.
E.g. {"lrate": {"begin": 0.1, "end": 0.5, "bins": 5}}
Defaults to None.
integer (Optional[dict], optional):
Dictionary of integer-valued search variables & their resolution.
E.g. {"batch_size": {"begin": 1, "end": 5, "bins": 5}}
Defaults to None.
categorical (Optional[dict], optional):
Dictionary of categorical-valued search variables.
E.g. {"arch": ["mlp", "cnn"]}
Defaults to None.
search_config (dict, optional): Grid search hyperparameters.
Defaults to None.
maximize_objective (bool, optional): Whether to maximize objective.
Defaults to False.
fixed_params (Optional[dict], optional):
Fixed parameters that will be added to all configurations.
Defaults to None.
reload_path (Optional[str], optional):
Path to load previous search log from. Defaults to None.
reload_list (Optional[list], optional):
List of previous results to reload. Defaults to None.
seed_id (int, optional):
Random seed for reproducibility. Defaults to 42.
verbose (bool, optional):
Option to print intermediate results. Defaults to False.
"""
self.search_name = "Grid"
self.space = GridSpace(real, integer, categorical)
Strategy.__init__(
self,
real,
integer,
categorical,
search_config,
maximize_objective,
fixed_params,
reload_path,
reload_list,
seed_id,
verbose,
)
# Generate all possible combinations of param configs in list & loop
# over the list when doing the grid search
self.num_param_configs = len(self.space)
self.grid_counter = self.eval_counter
# Add start-up message printing the search space
if self.verbose:
self.print_hello()
self.print_hello_strategy()
def ask_search(self, batch_size: int) -> List[dict]:
"""Get proposals to eval next (in batches) - Grid Search.
Args:
batch_size (int): Number of desired configurations
Returns:
List[dict]: List of configuration dictionaries
"""
# Set grid counter to eval_counter in order ensure while
# That results for grid configuration are collected before continuation
grid_counter = self.eval_counter
param_batch = []
# Sample a new configuration for each eval in the batch
while (
len(param_batch) < batch_size
and grid_counter < self.num_param_configs
):
# Get parameter batch from the grid
proposal_params = self.space.sample(grid_counter)
if proposal_params not in (self.all_evaluated_params + param_batch):
# Add parameter proposal to the batch list
param_batch.append(proposal_params)
grid_counter += 1
else:
# Otherwise continue sampling proposals
continue
return param_batch
def update_search(self) -> None:
"""Update search log data - Grid Search"""
# Make sure that the grid_counter equals the eval_counter
# This is only relevant if we load in new log data mid-search
self.grid_counter = self.eval_counter
def plot_grid(
self,
fixed_params: Optional[dict] = None,
params_to_plot: List[str] = [],
target_to_plot: str = "objective",
plot_title: str = "Temp Title",
plot_subtitle: Optional[str] = None,
xy_labels: Optional[List[str]] = ["x-label", "y-label"],
variable_name: Optional[str] = "Var Label",
every_nth_tick: int = 1,
fname: Optional[str] = None,
):
"""Plot 2D heatmap of evaluations.
Args:
fixed_params (Optional[dict], optional):
Dict of parameter keys and values to fix for plot. Defaults to None.
params_to_plot (List[str], optional):
Parameter names to plot. Defaults to [].
target_to_plot (str, optional):
Name of variable to plot. Defaults to "objective".
plot_title (str, optional):
Title of figure plot. Defaults to "Temp Title".
plot_subtitle (Optional[str], optional):
Subtitle of figure plot. Defaults to None.
xy_labels (Optional[List[str]], optional):
Label names. Defaults to ["x-label", "y-label"].
variable_name (Optional[str], optional):
Name of variable in heatmap bar. Defaults to "Var Label".
every_nth_tick (int, optional):
Controls spacing between ticks. Defaults to 1.
fname (Optional[str], optional):
File name to save plot to. Defaults to None.
Returns:
[type]: Figure and axis matplotlib objects
"""
fig, ax = visualize_2D_grid(
self.df,
fixed_params,
params_to_plot,
target_to_plot,
plot_title,
plot_subtitle,
xy_labels,
variable_name,
every_nth_tick,
)
# Save the figure if a filename was provided
if fname is not None:
fig.savefig(fname, dpi=300)
else:
return fig, ax
def print_hello_strategy(self) -> None:
"""Hello message specific to grid search."""
print_grid_hello(self.num_param_configs, self.space.num_dims)
|
PypiClean
|
/change_detection_pytorch-0.1.4.tar.gz/change_detection_pytorch-0.1.4/change_detection_pytorch/unet/model.py
|
from typing import Optional, Union, List
from .decoder import UnetDecoder
from ..encoders import get_encoder
from ..base import SegmentationModel
from ..base import SegmentationHead, ClassificationHead
class Unet(SegmentationModel):
"""Unet_ is a fully convolution neural network for image semantic segmentation. Consist of *encoder*
and *decoder* parts connected with *skip connections*. Encoder extract features of different spatial
resolution (skip connections) which are used by decoder to define accurate segmentation mask. Use *concatenation*
for fusing decoder blocks with skip connections.
Args:
encoder_name: Name of the classification model that will be used as an encoder (a.k.a backbone)
to extract features of different spatial resolution
encoder_depth: A number of stages used in encoder in range [3, 5]. Each stage generate features
two times smaller in spatial dimensions than previous one (e.g. for depth 0 we will have features
with shapes [(N, C, H, W),], for depth 1 - [(N, C, H, W), (N, C, H // 2, W // 2)] and so on).
Default is 5
encoder_weights: One of **None** (random initialization), **"imagenet"** (pre-training on ImageNet) and
other pretrained weights (see table with available weights for each encoder_name)
decoder_channels: List of integers which specify **in_channels** parameter for convolutions used in decoder.
Length of the list should be the same as **encoder_depth**
decoder_use_batchnorm: If **True**, BatchNorm2d layer between Conv2D and Activation layers
is used. If **"inplace"** InplaceABN will be used, allows to decrease memory consumption.
Available options are **True, False, "inplace"**
decoder_attention_type: Attention module used in decoder of the model. Available options are **None** and **scse**.
SCSE paper - https://arxiv.org/abs/1808.08127
in_channels: A number of input channels for the model, default is 3 (RGB images)
classes: A number of classes for output mask (or you can think as a number of channels of output mask)
activation: An activation function to apply after the final convolution layer.
Available options are **"sigmoid"**, **"softmax"**, **"logsoftmax"**, **"tanh"**, **"identity"**, **callable** and **None**.
Default is **None**
aux_params: Dictionary with parameters of the auxiliary output (classification head). Auxiliary output is build
on top of encoder if **aux_params** is not **None** (default). Supported params:
- classes (int): A number of classes
- pooling (str): One of "max", "avg". Default is "avg"
- dropout (float): Dropout factor in [0, 1)
- activation (str): An activation function to apply "sigmoid"/"softmax" (could be **None** to return logits)
siam_encoder: Whether using siamese branch. Default is True
fusion_form: The form of fusing features from two branches. Available options are **"concat"**, **"sum"**, and **"diff"**.
Default is **concat**
Returns:
``torch.nn.Module``: Unet
.. _Unet:
https://arxiv.org/abs/1505.04597
"""
def __init__(
self,
encoder_name: str = "resnet34",
encoder_depth: int = 5,
encoder_weights: Optional[str] = "imagenet",
decoder_use_batchnorm: bool = True,
decoder_channels: List[int] = (256, 128, 64, 32, 16),
decoder_attention_type: Optional[str] = None,
in_channels: int = 3,
classes: int = 1,
activation: Optional[Union[str, callable]] = None,
aux_params: Optional[dict] = None,
siam_encoder: bool = True,
fusion_form: str = "concat",
**kwargs
):
super().__init__()
self.siam_encoder = siam_encoder
self.encoder = get_encoder(
encoder_name,
in_channels=in_channels,
depth=encoder_depth,
weights=encoder_weights,
)
if not self.siam_encoder:
self.encoder_non_siam = get_encoder(
encoder_name,
in_channels=in_channels,
depth=encoder_depth,
weights=encoder_weights,
)
self.decoder = UnetDecoder(
encoder_channels=self.encoder.out_channels,
decoder_channels=decoder_channels,
n_blocks=encoder_depth,
use_batchnorm=decoder_use_batchnorm,
center=True if encoder_name.startswith("vgg") else False,
attention_type=decoder_attention_type,
fusion_form=fusion_form,
)
self.segmentation_head = SegmentationHead(
in_channels=decoder_channels[-1],
out_channels=classes,
activation=activation,
kernel_size=3,
)
if aux_params is not None:
self.classification_head = ClassificationHead(
in_channels=self.encoder.out_channels[-1], **aux_params
)
else:
self.classification_head = None
self.name = "u-{}".format(encoder_name)
self.initialize()
if __name__ == "__main__":
import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu'
input1 = torch.randn(1, 3, 256, 256).to(device)
input2 = torch.randn(1, 3, 256, 256).to(device)
net = Unet()
res = net.forward(input1, input2)
print(res.shape)
|
PypiClean
|
/legend_daq2lh5-1.0.0-py3-none-any.whl/daq2lh5/data_streamer.py
|
from __future__ import annotations
import fnmatch
import logging
from abc import ABC, abstractmethod
from .raw_buffer import RawBuffer, RawBufferLibrary, RawBufferList
log = logging.getLogger(__name__)
class DataStreamer(ABC):
"""Base clase for data streams.
Provides a uniform interface for streaming, e.g.:
>>> header = ds.open_stream(stream_name)
>>> for chunk in ds: do_something(chunk)
Also provides default management of the :class:`.RawBufferLibrary` used for
data reading: allocation (if needed), configuration (to match the stream)
and fill level checking. Derived classes must define the functions
:meth:`.get_decoder_list`, :meth:`.open_stream`, and :meth:`.read_packet`;
see below.
"""
def __init__(self) -> None:
self.rb_lib = None
self.chunk_mode = None
self.n_bytes_read = 0
self.any_full = False
self.packet_id = 0
@abstractmethod
def open_stream(
self,
stream_name: str,
rb_lib: RawBufferLibrary = None,
buffer_size: int = 8192,
chunk_mode: str = "any_full",
out_stream: str = "",
) -> tuple[list[RawBuffer], int]:
r"""Open and initialize a data stream.
Open the stream, read in the header, set up the buffers.
Call ``super().initialize([args])`` from derived class after loading
header info to run this default version that sets up buffers in
`rb_lib` using the stream's decoders.
Notes
-----
this default version has no actual return value! You must overload this
function, set :attr:`self.n_bytes_read` to the header packet size, and
return the header data.
Parameters
----------
stream_name
typically a filename or e.g. a port for streaming.
rb_lib
a library of buffers for readout from the data stream. `rb_lib`
will have its LGDO's initialized during this function.
buffer_size
length of buffers to be read out in :meth:`read_chunk` (for buffers
with variable length).
chunk_mode : 'any_full', 'only_full' or 'single_packet'
sets the mode use for :meth:`read_chunk`.
out_stream
optional name of output stream for default `rb_lib` generation.
Returns
-------
header_data
header_data is a list of :class:`.RawBuffer`\ 's containing any
file header data, ready for writing to file or further processing.
It's not a :class:`.RawBufferList` since the buffers may have a
different format.
"""
# call super().initialize([args]) to run this default code
# after loading header info, then follow it with the return call.
# store chunk mode
self.chunk_mode = chunk_mode
# prepare rb_lib -- its lgdo's should still be uninitialized
if rb_lib is None:
rb_lib = self.build_default_rb_lib(out_stream=out_stream)
self.rb_lib = rb_lib
# now initialize lgdo's for raw buffers
decoders = self.get_decoder_list()
dec_names = []
for decoder in decoders:
dec_name = type(decoder).__name__
# set up wildcard decoder buffers
if dec_name not in rb_lib:
if "*" not in rb_lib:
continue # user didn't want this decoder
rb_lib[dec_name] = RawBufferList()
dec_key = dec_name
if dec_key.endswith("Decoder"):
dec_key = dec_key.removesuffix("Decoder")
out_name = rb_lib["*"][0].out_name.format(name=dec_key)
out_stream = rb_lib["*"][0].out_stream.format(name=dec_key)
proc_spec = rb_lib["*"][0].proc_spec
key_lists = decoder.get_key_lists()
for ii, key_list in enumerate(key_lists):
this_name = out_name
if len(key_lists) > 1:
if len(key_list) == 1:
this_name = f"{out_name}_{key_list[0]}"
else:
this_name = f"{out_name}_{ii}"
rb = RawBuffer(
key_list=key_list,
out_stream=out_stream,
out_name=this_name,
proc_spec=proc_spec,
)
rb_lib[dec_name].append(rb)
# dec_name is in rb_lib: store the name, and initialize its buffer lgdos
dec_names.append(dec_name)
# set up wildcard key buffers
for rb in rb_lib[dec_name]:
if (
len(rb.key_list) == 1
and isinstance(rb.key_list[0], str)
and "*" in rb.key_list[0]
):
matched_key_lists = []
for key_list in decoder.get_key_lists():
# special case: decoders without keys
if rb.key_list[0] == "*" and key_list == [None]:
matched_key_lists.append(key_list)
continue
key_type = type(key_list[0])
for ik in range(len(key_list)):
key_list[ik] = str(key_list[ik])
matched_keys = fnmatch.filter(key_list, rb.key_list[0])
if len(matched_keys) > 1:
for ik in range(len(matched_keys)):
matched_keys[ik] = key_type(key_list[ik])
matched_key_lists.append(matched_keys)
if len(matched_key_lists) == 0:
log.warning(
f"no matched keys for key_list {rb.key_list[0]} in {dec_name}.{rb.out_name}"
)
continue
rb.key_list = sum(matched_key_lists, [])
keyed_name_rbs = []
ii = 0
while ii < len(rb_lib[dec_name]):
if "{key" in rb_lib[dec_name][ii].out_name:
keyed_name_rbs.append(rb_lib[dec_name].pop(ii))
else:
ii += 1
for rb in keyed_name_rbs:
for key in rb.key_list:
# keys can be strs or ints; try as-is, but can get a
# ValueError e.g. when using a wildcard with int keys. In
# that case, switch to the other type and try again
try:
expanded_name = rb.out_name.format(key=key)
except ValueError:
if isinstance(key, str):
key = int(key)
else:
key = str(key)
expanded_name = rb.out_name.format(key=key)
new_rb = RawBuffer(
key_list=[key],
out_stream=rb.out_stream,
out_name=expanded_name,
proc_spec=rb.proc_spec,
)
rb_lib[dec_name].append(new_rb)
for rb in rb_lib[dec_name]:
# use the first available key
key = rb.key_list[0] if len(rb.key_list) > 0 else None
rb.lgdo = decoder.make_lgdo(key=key, size=buffer_size)
# make sure there were no entries in rb_lib that weren't among the
# decoders. If so, just emit a warning and continue.
if "*" in rb_lib:
rb_lib.pop("*")
for dec_name in rb_lib.keys():
if dec_name not in dec_names:
log.warning(f"no decoder named '{dec_name}' requested by rb_lib")
@abstractmethod
def close_stream(self) -> None:
"""Close this data stream.
.. note::
Needs to be overloaded.
"""
pass
@abstractmethod
def read_packet(self) -> bool:
"""Reads a single packet's worth of data in to the :class:`.RawBufferLibrary`.
Needs to be overloaded. Gets called by :meth:`.read_chunk` Needs to
update :attr:`self.any_full` if any buffers would possibly over-fill on
the next read. Needs to update :attr:`self.n_bytes_read` too.
Returns
-------
still_has_data
returns `True` while there is still data to read.
"""
return True
def read_chunk(
self,
chunk_mode_override: str = None,
rp_max: int = 1000000,
clear_full_buffers: bool = True,
) -> tuple[list[RawBuffer], int]:
"""Reads a chunk of data into raw buffers.
Reads packets until at least one buffer is too full to perform another
read. Default version just calls :meth:`.read_packet` over and over.
Overload as necessary.
Notes
-----
user is responsible for resetting / clearing the raw buffers prior to
calling :meth:`.read_chunk` again.
Parameters
----------
chunk_mode_override : 'any_full', 'only_full' or 'single_packet'
- ``None`` : do not override self.chunk_mode
- ``any_full`` : returns all raw buffers with data as soon as any one
buffer gets full
- ``only_full`` : returns only those raw buffers that became full (or
nearly full) during the read. This minimizes the number of write calls.
- ``single_packet`` : returns all raw buffers with data after a single
read is performed. This is useful for streaming data out as soon
as it is read in (e.g. for diagnostics or in-line analysis).
rp_max
maximum number of packets to read before returning anyway, even if
one of the other conditions is not met.
clear_full_buffers
automatically clear any buffers that report themselves as being
full prior to reading the chunk. Set to `False` if clearing
manually for a minor speed-up.
Returns
-------
chunk_list : list of RawBuffers, int
chunk_list is the list of RawBuffers with data ready for writing to
file or further processing. The list contains all buffers with data
or just all full buffers depending on the flag full_only. Note
chunk_list is not a RawBufferList since the RawBuffers inside may
not all have the same structure
"""
if clear_full_buffers:
self.rb_lib.clear_full()
self.any_full = False
chunk_mode = (
self.chunk_mode if chunk_mode_override is None else chunk_mode_override
)
read_one_packet = chunk_mode == "single_packet"
only_full = chunk_mode == "only_full"
n_packets = 0
still_has_data = True
while True:
still_has_data = self.read_packet()
if not still_has_data:
break
n_packets += 1
if read_one_packet or n_packets > rp_max:
break
if self.any_full:
break
# send back all rb's with data if we finished reading
if not still_has_data:
only_full = False
list_of_rbs = []
for rb_list in self.rb_lib.values():
for rb in rb_list:
if not only_full: # any_full or read_one_packet
if rb.loc > 0:
list_of_rbs.append(rb)
elif rb.is_full():
list_of_rbs.append(rb)
if not still_has_data:
log.debug(f"decoding complete. flushing {len(list_of_rbs)} buffers")
return list_of_rbs
@abstractmethod
def get_decoder_list(self) -> list:
"""Returns a list of decoder objects for this data stream.
Notes
-----
Needs to be overloaded. Gets called during :meth:`.open_stream`.
"""
return []
def build_default_rb_lib(self, out_stream: str = "") -> RawBufferLibrary:
"""Build the most basic :class:`~.RawBufferLibrary` that will work for
this stream.
A :class:`.RawBufferList` containing a single :class:`.RawBuffer` is
built for each decoder name returned by :meth:`.get_decoder_list`. Each
buffer's `out_name` is set to the decoder name. The LGDO's do not get
initialized.
"""
rb_lib = RawBufferLibrary()
decoders = self.get_decoder_list()
if len(decoders) == 0:
log.warning(
f"no decoders returned by get_decoder_list() for {type(self).__name__}"
)
return rb_lib
for decoder in decoders:
dec_name = type(decoder).__name__
dec_key = dec_name
if dec_key.endswith("Decoder"):
dec_key = dec_key.removesuffix("Decoder")
key_lists = decoder.get_key_lists()
for ii, key_list in enumerate(key_lists):
this_name = dec_key
if len(key_lists) > 1:
if len(key_list) == 1:
this_name = f"{dec_key}_{key_list[0]}"
else:
this_name = f"{dec_key}_{ii}"
rb = RawBuffer(
key_list=key_list, out_stream=out_stream, out_name=this_name
)
if dec_name not in rb_lib:
rb_lib[dec_name] = RawBufferList()
rb_lib[dec_name].append(rb)
return rb_lib
|
PypiClean
|
/ray_for_mars-1.12.1-cp38-cp38-manylinux2014_x86_64.whl/ray_for_mars-1.12.1.data/purelib/ray/serve/model_wrappers.py
|
from typing import Dict, Optional, Type, Union
from ray._private.utils import import_attr
from ray.ml.checkpoint import Checkpoint
from ray.ml.predictor import Predictor
from ray.serve.drivers import InputSchemaFn, SimpleSchemaIngress
import ray
from ray import serve
def _load_checkpoint(
checkpoint: Union[Checkpoint, Dict],
) -> Checkpoint:
if isinstance(checkpoint, dict):
user_keys = set(checkpoint.keys())
expected_keys = {"checkpoint_cls", "uri"}
if user_keys != expected_keys:
raise ValueError(
"The `checkpoint` dictionary is expects keys "
f"{expected_keys} but got {user_keys}"
)
checkpoint = import_attr(checkpoint["checkpoint_cls"]).from_uri(
checkpoint["uri"]
)
assert isinstance(checkpoint, Checkpoint)
return checkpoint
def _load_predictor_cls(
predictor_cls: Union[str, Type[Predictor]],
) -> Type[Predictor]:
if isinstance(predictor_cls, str):
predictor_cls = import_attr(predictor_cls)
if not issubclass(predictor_cls, Predictor):
raise ValueError(
f"{predictor_cls} class must be a subclass of ray.ml `Predictor`"
)
return predictor_cls
class ModelWrapper(SimpleSchemaIngress):
"""Serve any Ray AIR predictor from an AIR checkpoint.
Args:
predictor_cls(str, Type[Predictor]): The class or path for predictor class.
The type must be a subclass of :class:`ray.ml.predicotr.Predictor`.
checkpoint(Checkpoint, dict): The checkpoint object or a dictionary describe
the object.
- The checkpoint object must be a subclass of
:class:`ray.ml.checkpoint.Checkpoint`.
- The dictionary should be in the form of
``{"checkpoint_cls": "import.path.MyCheckpoint",
"uri": "uri_to_load_from"}``.
Serve will then call ``MyCheckpoint.from_uri("uri_to_load_from")`` to
instantiate the object.
input_schema(str, InputSchemaFn, None): The FastAPI input conversion
function. By default, Serve will use the
:ref:`NdArray <serve-ndarray-schema>` schema and convert to numpy array.
You can pass in any FastAPI dependency resolver that returns
an array. When you pass in a string, Serve will import it.
Please refer to :ref:`Serve HTTP adatpers <serve-http-adapters>`
documentation to learn more.
batching_params(dict, None, False): override the default parameters to
:func:`ray.serve.batch`. Pass ``False`` to disable batching.
"""
def __init__(
self,
predictor_cls: Union[str, Type[Predictor]],
checkpoint: Union[Checkpoint, Dict],
input_schema: Union[
str, InputSchemaFn
] = "ray.serve.http_adapters.json_to_ndarray",
batching_params: Optional[Union[Dict[str, int], bool]] = None,
):
predictor_cls = _load_predictor_cls(predictor_cls)
checkpoint = _load_checkpoint(checkpoint)
self.model = predictor_cls.from_checkpoint(checkpoint)
# Configure Batching
if batching_params is False:
# Inject noop decorator to disable batching
batching_decorator = lambda f: f # noqa: E731
else:
batching_params = batching_params or dict()
batching_decorator = serve.batch(**batching_params)
@batching_decorator
async def batched_predict(inp):
out = self.model.predict(inp)
if isinstance(out, ray.ObjectRef):
out = await out
return out
self.batched_predict = batched_predict
super().__init__(input_schema)
async def predict(self, inp):
"""Perform inference directly without HTTP."""
return await self.batched_predict(inp)
@serve.deployment
class ModelWrapperDeployment(ModelWrapper):
"""Ray Serve Deployment of the ModelWrapper class."""
|
PypiClean
|
/sdksio_verizon_apis_sdk-1.0.0-py3-none-any.whl/verizon/controllers/uicc_device_profile_management_controller.py
|
from verizon.api_helper import APIHelper
from verizon.configuration import Server
from verizon.http.api_response import ApiResponse
from verizon.controllers.base_controller import BaseController
from apimatic_core.request_builder import RequestBuilder
from apimatic_core.response_handler import ResponseHandler
from apimatic_core.types.parameter import Parameter
from verizon.http.http_method_enum import HttpMethodEnum
from apimatic_core.authentication.multiple.single_auth import Single
from apimatic_core.authentication.multiple.and_auth_group import And
from apimatic_core.authentication.multiple.or_auth_group import Or
from verizon.models.request_response import RequestResponse
from verizon.models.device_management_result import DeviceManagementResult
from verizon.exceptions.rest_error_response_exception import RestErrorResponseException
from verizon.exceptions.connectivity_management_result_exception import ConnectivityManagementResultException
class UICCDeviceProfileManagementController(BaseController):
"""A Controller to access Endpoints in the verizon API."""
def __init__(self, config):
super(UICCDeviceProfileManagementController, self).__init__(config)
def disable_local_profile(self,
body):
"""Does a POST request to /v1/devices/profile/actions/disable.
Disable a local profile on eUICC devices. The default or boot profile
will become the enabled profile.
Args:
body (ProfileChangeStateRequest): Update state
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Request
ID
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.M2M)
.path('/v1/devices/profile/actions/disable')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(RequestResponse.from_dictionary)
.is_api_response(True)
.local_error('400', 'Error Response', RestErrorResponseException)
).execute()
def download_local_profile_to_disable(self,
body):
"""Does a POST request to /v1/devices/profile/actions/download_disable.
Downloads an eUICC local profile to devices and leaves the profile
disabled.
Args:
body (ProfileChangeStateRequest): Device Profile Query
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Request
ID received on a successful response.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.M2M)
.path('/v1/devices/profile/actions/download_disable')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(DeviceManagementResult.from_dictionary)
.is_api_response(True)
.local_error('400', 'Error response.', ConnectivityManagementResultException)
).execute()
def delete_local_profile(self,
body):
"""Does a POST request to /v1/devices/profile/actions/delete.
Delete a local profile from eUICC devices. If the local profile is
enabled, it will first be disabled and the boot or default profile
will be enabled.
Args:
body (ProfileChangeStateRequest): Update state
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Request
ID
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.M2M)
.path('/v1/devices/profile/actions/delete')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(RequestResponse.from_dictionary)
.is_api_response(True)
.local_error('400', 'Error Response', RestErrorResponseException)
).execute()
def download_local_profile_to_enable(self,
body):
"""Does a POST request to /v1/devices/profile/actions/download_enable.
Downloads an eUICC local profile to devices and enables the profile.
Args:
body (ProfileChangeStateRequest): Device Profile Query
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Request
ID received on a successful response.
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.M2M)
.path('/v1/devices/profile/actions/download_enable')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(DeviceManagementResult.from_dictionary)
.is_api_response(True)
.local_error('400', 'Error response.', ConnectivityManagementResultException)
).execute()
def enable_local_profile(self,
body):
"""Does a POST request to /v1/devices/profile/actions/enable.
Enable a local profile that has been downloaded to eUICC devices.
Args:
body (ProfileChangeStateRequest): Update state
Returns:
ApiResponse: An object with the response value as well as other
useful information such as status codes and headers. Request
ID
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
return super().new_api_call_builder.request(
RequestBuilder().server(Server.M2M)
.path('/v1/devices/profile/actions/enable')
.http_method(HttpMethodEnum.POST)
.header_param(Parameter()
.key('Content-Type')
.value('application/json'))
.body_param(Parameter()
.value(body))
.header_param(Parameter()
.key('accept')
.value('application/json'))
.body_serializer(APIHelper.json_serialize)
.auth(Single('global'))
).response(
ResponseHandler()
.deserializer(APIHelper.json_deserialize)
.deserialize_into(RequestResponse.from_dictionary)
.is_api_response(True)
.local_error('400', 'Error Response', RestErrorResponseException)
).execute()
|
PypiClean
|
/robotice-0.0.1.tar.gz/robotice-0.0.1/docs/source/support/cli.rst
|
.. _cli-test:
======================
Command Line Interface
======================
Robotice Welcome!
----------------
.. code-block:: bash
(robotice)root@control-single:/srv/robotice/service# bin/robotice -h
.. code-block:: yaml
usage: robotice [-r ROLE] [--version] [-d] [-v]
______ _ _
(_____ \ | | _ (_)
_____) )___ | |__ ___ _| |_ _ ____ _____
| __ // _ \| _ \ / _ (_ _) |/ ___) ___ |
| | \ \ |_| | |_) ) |_| || |_| ( (___| ____|
|_| |_\___/|____/ \___/ \__)_|\____)_____) 0.2.54
Optional arguments:
-r ROLE, --role ROLE role [reactor,monitor, ..]
--version Shows the Robotice version.
-d, --debug Defaults to env[ROBOTICE_DEBUG].
-v, --verbose Print more verbose output.
See "robotice help COMMAND" for help on a specific command.
Robotice inspect
----------------
is only ping to all workers
.. code-block:: bash
(robotice)root@control-single:/srv/robotice/service# bin/robotice reactor inspect -d
+------------------------------------------------------+--------+
| Worker | Status |
+------------------------------------------------------+--------+
| [email protected] | ok |
| [email protected] | ok |
| [email protected] | ok |
| [email protected] | ok |
+------------------------------------------------------+--------+
Check config
-----
.. code-block:: bash
robotice reactor config
.. code-block:: yaml
{
"system_name": "box",
"cpu_arch": "x86_64",
"name": "rabbitmq1.box.robotice.cz",
"database": {
"engine": "redis",
"host": "localhost",
"port": 6379
},
"broker": "amqp://robotice:robotice@localhost:5672//robotice",
"metering": {
"host": "localhost",
"sample_rate": 1,
"port": 8125
},
"environment": "dev",
"os_family": "Debian",
"debug": true,
"dsn": "http://##:##@host/number"
}
Robotice run
----------------
.. code-block:: bash
(robotice)root@control-single:/srv/robotice/service# bin/robotice run reactor
(robotice)root@control-single:/srv/robotice/service# bin/robotice run monitor -d
(robotice)root@control-single:/srv/robotice/service# bin/robotice run api
|
PypiClean
|
/vtex-client-0.1.0.tar.gz/vtex-client-0.1.0/vtex_client/base.py
|
from __future__ import unicode_literals
import json
import requests
from . import faults
class BaseClient(object):
"""Base client for VTEX webservice"""
api_url = "https://api.vtexpayments.com.br/{}"
def _get_headers(self):
return {'CONTENT-TYPE': 'application/json'}
def _handle_error(self, response):
status = response.status_code
response_data = response.json()
if 'error' in response_data:
error_message = response_data['error']['message']
error_code = response_data['error']['code']
elif 'Message' in response_data:
error_message = response_data['Message']
error_code = None
else:
raise KeyError("Response does not contain the expected errorkeys")
if status == 400:
raise faults.InvalidDataError(error_message, error_code)
elif status in (401, 403):
raise faults.AuthorizationError(error_message, error_code)
elif status == 500:
raise faults.GetewayError(error_message, error_code)
else:
raise ValueError("{} is a invalid status code".format(status))
def _make_url(self, url_sufix):
return self.api_url.format(url_sufix)
def _make_request(self, url_sufix, method, data=None):
"""Send a request to gateway and handles error responses.
:param url_sufix: Endpoint url
:param method: HTTP verb used in request
:param data: Data to be sent to gateway
:returns: Loaded JSON response of request
"""
if not data:
data = {}
url = self._make_url(url_sufix)
response = getattr(requests, method)(url,
data=json.dumps(data),
headers=self._get_headers())
if response.status_code != 200:
return self._handle_error(response)
return response.json() if response.text else {}
class BaseAuthenticatedClient(BaseClient):
"""Base authenticated client for VTEX webservice"""
api_url = "https://{}.vtexpayments.com.br/{}"
def __init__(self, api_store, api_key, api_token):
self.api_store = api_store
self.api_key = api_key
self.api_token = api_token
def _make_url(self, url_sufix):
return self.api_url.format(self.api_store, url_sufix)
def _get_headers(self):
headers = super(BaseAuthenticatedClient, self)._get_headers()
headers.update({'X-VTEX-API-APPKEY': self.api_key,
'X-VTEX-API-APPTOKEN': self.api_token})
return headers
|
PypiClean
|
/zanon-0.3.3.tar.gz/zanon-0.3.3/zanonymity/study_output.py
|
from collections import defaultdict
import numpy as np
import json
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator
MAX_GENERALIZATION = 20
stepsize = np.array([3000, 5000, 10000, 30000, 500000])
numeric_category = False
with open('output.json',"r") as f:
data = json.load(f)
labels = []
labels.append("Anonymized")
details = [x for x in data["all_details"] if not all(v == 0 for v in x)]
if numeric_category:
for i in reversed(stepsize):
labels.append(str(int(i/1000))+" km")
else:
for i in range(len(details)):
labels.append(str(i + 1) + "-detail")
to_plot = np.vstack((data["tot_anon"], details))
color = 'tab:red'
fig, ax_left = plt.subplots(figsize=(20, 10))
ax_right = ax_left.twinx()
ax_third = ax_left.twinx()
ax_third.spines["right"].set_position(("axes", 1.1))
ax_left.plot(data["time"],data["z"], color=color, linewidth="5")
ax_right.plot(data["time"], data["kanon"], color='black', linewidth="5")
ax_third.stackplot(data["time"], to_plot,
labels = labels ,alpha=0.4)
ax_third.legend(loc='upper left', prop={"size":20})
ax_left.set_xlabel('time', fontsize=20)
ax_left.set_ylabel('z', color=color, fontsize=20)
ax_left.autoscale()
ax_third.autoscale()
ax_third.set_ylabel('Tuple traffic', color = "blue", fontsize=20)
ax_third.tick_params(axis='y', labelcolor="blue", labelsize=20.0)
ax_right.set_ylim(bottom = 0.0, top = 1.0)
ax_left.tick_params(axis='y', labelcolor=color, labelsize=20.0)
ax_right.set_ylabel('pkanon', color='black', fontsize= 20)
ax_right.tick_params(axis='y', labelcolor='black', labelsize = 20.0)
ax_left.get_xaxis().set_major_locator(LinearLocator(numticks=20))
ax_left.tick_params(labelsize=20)
fig.autofmt_xdate(rotation = 45)
fig.tight_layout()
fig.savefig('z_tuning.pdf')
with open('trace.txt') as f:
rows = sum(1 for _ in f)
final_dataset = defaultdict(set)
file = open('output.txt','r')
gen = [0]*MAX_GENERALIZATION
tot = 0
for line in file:
tot += 1
t,u,a = line.split("\t")
t = float(t)
a.strip()
final_dataset[u].add(a)
cat = a.split("*")
gen[len(cat)] += 1
final_dataset_inv = defaultdict(list)
for k,v in final_dataset.items():
final_dataset_inv[str(v)].append(k)
ks = np.array([len(v) for v in final_dataset_inv.values()])
for k in range(2,5):
print("Final " + str(k) + "-anonymization: " + str(sum(ks[ks >= k])/sum(ks)))
for index,i in enumerate(gen):
if(i == 0 and index == 0):
continue
elif(i == 0):
break
print("Tuple passed with " + str(index ) + "-details level: " + str(i))
print("Tuple anonymized: " + str(rows - tot))
|
PypiClean
|
/apis_core-0.18.14.tar.gz/apis_core-0.18.14/apis_core/apis_metainfo/migrations/0001_initial.py
|
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Collection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Source',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('orig_filename', models.CharField(blank=True, max_length=255)),
('indexed', models.BooleanField(default=False)),
('pubinfo', models.CharField(blank=True, max_length=400)),
('author', models.CharField(blank=True, max_length=255)),
('orig_id', models.PositiveIntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='TempEntityClass',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255)),
('review', models.BooleanField(default=False, help_text='Should be set to True, if the data record holds up quality standards.')),
('start_date', models.DateField(blank=True, null=True)),
('start_start_date', models.DateField(blank=True, null=True)),
('start_end_date', models.DateField(blank=True, null=True)),
('end_date', models.DateField(blank=True, null=True)),
('end_start_date', models.DateField(blank=True, null=True)),
('end_end_date', models.DateField(blank=True, null=True)),
('start_date_written', models.CharField(blank=True, max_length=255, null=True, verbose_name='Start')),
('end_date_written', models.CharField(blank=True, max_length=255, null=True, verbose_name='End')),
('status', models.CharField(max_length=100)),
('references', models.TextField(blank=True, null=True)),
('notes', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Text',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Uri',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uri', models.URLField(blank=True, max_length=255, null=True, unique=True)),
('domain', models.CharField(blank=True, max_length=255)),
('rdf_link', models.URLField(blank=True)),
('loaded', models.BooleanField(default=False)),
('loaded_time', models.DateTimeField(blank=True, null=True)),
('entity', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='apis_metainfo.TempEntityClass')),
],
),
migrations.CreateModel(
name='UriCandidate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uri', models.URLField()),
('confidence', models.FloatField(blank=True, null=True)),
('responsible', models.CharField(max_length=255)),
('entity', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='apis_metainfo.TempEntityClass')),
],
),
]
|
PypiClean
|
/notion_cli_py-1.0.1-py3-none-any.whl/notion_cli_py/client/client.py
|
import toml
import os
import requests
import sys
from ..utils import logger
class Client:
def __init__(self, label):
self.logger = logger.init_logger(__name__)
try:
PATH = os.environ['HOME'] + "/.notion_cli/config.toml"
config = toml.load(open(PATH))
self.config = config[label]
except FileNotFoundError:
self.logger.error("[ERR] Not Found config file ({PATH}).".format(PATH=PATH))
self.logger.error(" You need to create config file before utilizing notion-cli operations.")
self.logger.error(" Please execute following command.")
self.logger.error(" * notion-cli configure set")
sys.exit(1)
except KeyError:
self.logger.error("[ERR] Not found label ({label}).".format(label=label))
self.logger.error(" To create label, please execute following command.")
self.logger.error(" * notion-cli configure set")
sys.exit(1)
def get_headers(self):
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"Notion-Version": self.config["notion_api_version"],
"Authorization": "Bearer {token}".format(token=self.config["token"])
}
return headers
def get_page(self, page_id):
""" get_page """
url = "https://api.notion.com/v1/pages/{page_id}".format(
page_id=page_id)
headers = self.get_headers()
response = requests.request("GET", url, headers=headers)
return response.text
def get_page_property(self, page_id, property_id, page_size, start_cursor):
""" get page property """
url = "https://api.notion.com/v1/pages/{page_id}/properties/{property_id}?page_size={page_size}".format(
page_id=page_id, property_id=property_id, page_size=page_size)
if start_cursor:
url += "&start_cursor={start_cursor}".format(
start_cursor=start_cursor)
headers = self.get_headers()
response = requests.request("GET", url, headers=headers)
return response.text
def create_page(self, payload):
url = "https://api.notion.com/v1/pages"
headers = self.get_headers()
response = requests.request("POST", url, json=payload, headers=headers)
return response.text
def update_page(self, page_id, payload):
url = "https://api.notion.com/v1/pages/{page_id}".format(
page_id=page_id)
headers = self.get_headers()
response = requests.request(
"PATCH", url, json=payload, headers=headers)
return response.text
def get_database(self, database_id):
url = "https://api.notion.com/v1/databases/{database_id}".format(
database_id=database_id)
headers = self.get_headers()
response = requests.request("GET", url, headers=headers)
return response.text
def create_database(self, payload):
url = "https://api.notion.com/v1/databases"
headers = self.get_headers()
response = requests.request("POST", url, json=payload, headers=headers)
return response.text
def update_database(self, database_id, payload):
url = "https://api.notion.com/v1/databases/{database_id}".format(
database_id=database_id)
headers = self.get_headers()
response = requests.request(
"PATCH", url, json=payload, headers=headers)
return response.text
def query_database(self, database_id, payload):
url = "https://api.notion.com/v1/databases/{database_id}/query".format(
database_id=database_id)
headers = self.get_headers()
response = requests.request("POST", url, json=payload, headers=headers)
return response.text
def get_block(self, block_id):
url = "https://api.notion.com/v1/blocks/{block_id}".format(
block_id=block_id)
headers = self.get_headers()
response = requests.request("GET", url, headers=headers)
return response.text
def get_block_children(self, block_id, page_size, start_cursor):
url = "https://api.notion.com/v1/blocks/{block_id}/children?page_size={page_size}".format(
block_id=block_id, page_size=page_size)
if start_cursor:
url += "&start_cursor={start_cursor}".format(
start_cursor=start_cursor)
headers = self.get_headers()
response = requests.request("GET", url, headers=headers)
return response.text
def update_block(self, block_id, payload):
url = "https://api.notion.com/v1/blocks/{block_id}".format(
block_id=block_id)
headers = self.get_headers()
response = requests.request(
"PATCH", url, json=payload, headers=headers)
return response.text
def delete_block(self, block_id):
url = "https://api.notion.com/v1/blocks/{block_id}".format(
block_id=block_id)
headers = self.get_headers()
response = requests.request("DELETE", url, headers=headers)
return response.text
def append_block_children(self, block_id, payload):
url = "https://api.notion.com/v1/blocks/{block_id}/children".format(
block_id=block_id)
headers = self.get_headers()
response = requests.request(
"PATCH", url, json=payload, headers=headers)
return response.text
def get_user(self, user_id):
url = "https://api.notion.com/v1/users/{user_id}".format(
user_id=user_id)
headers = self.get_headers()
response = requests.request("GET", url, headers=headers)
return response.text
def get_all_user(self, page_size, start_cursor):
url = "https://api.notion.com/v1/users?page_size={page_size}".format(
page_size=page_size)
if start_cursor:
url += "&start_cursor={start_cursor}".format(
start_cursor=start_cursor)
headers = self.get_headers()
response = requests.request("GET", url, headers=headers)
return response.text
def search(self, payload):
url = "https://api.notion.com/v1/search"
headers = self.get_headers()
response = requests.request("POST", url, json=payload, headers=headers)
return response.text
|
PypiClean
|
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/lorem/en_US/__init__.py
|
from typing import Dict
from .. import Provider as LoremProvider
class Provider(LoremProvider):
"""Implement lorem provider for ``en_US`` locale.
Word list is based on the source(s) below, and some words have been removed
to make the word list appropriate for public testing.
Sources:
- https://www.educall.com.tr/blog/post/500-most-common-english-verbs
- http://www.ef.edu/english-resources/english-vocabulary/top-1000-words/
- https://www.talkenglish.com/vocabulary/top-1500-nouns.aspx
- https://www.talkenglish.com/vocabulary/top-250-adverbs.aspx
- https://www.talkenglish.com/vocabulary/top-500-adjectives.aspx
"""
word_list = (
"a",
"ability",
"able",
"about",
"above",
"accept",
"according",
"account",
"across",
"act",
"action",
"activity",
"actually",
"add",
"address",
"administration",
"admit",
"adult",
"affect",
"after",
"again",
"against",
"age",
"agency",
"agent",
"ago",
"agree",
"agreement",
"ahead",
"air",
"all",
"allow",
"almost",
"alone",
"along",
"already",
"also",
"although",
"always",
"American",
"among",
"amount",
"analysis",
"and",
"animal",
"another",
"answer",
"any",
"anyone",
"anything",
"appear",
"apply",
"approach",
"area",
"argue",
"arm",
"around",
"arrive",
"art",
"article",
"artist",
"as",
"ask",
"assume",
"at",
"attack",
"attention",
"attorney",
"audience",
"author",
"authority",
"available",
"avoid",
"away",
"baby",
"back",
"bad",
"bag",
"ball",
"bank",
"bar",
"base",
"be",
"beat",
"beautiful",
"because",
"become",
"bed",
"before",
"begin",
"behavior",
"behind",
"believe",
"benefit",
"best",
"better",
"between",
"beyond",
"big",
"bill",
"billion",
"bit",
"black",
"blood",
"blue",
"board",
"body",
"book",
"born",
"both",
"box",
"boy",
"break",
"bring",
"brother",
"budget",
"build",
"building",
"business",
"but",
"buy",
"by",
"call",
"camera",
"campaign",
"can",
"candidate",
"capital",
"car",
"card",
"care",
"career",
"carry",
"case",
"catch",
"cause",
"cell",
"center",
"central",
"century",
"certain",
"certainly",
"chair",
"challenge",
"chance",
"change",
"character",
"charge",
"check",
"child",
"choice",
"choose",
"church",
"citizen",
"city",
"civil",
"claim",
"class",
"clear",
"clearly",
"close",
"coach",
"cold",
"collection",
"college",
"color",
"commercial",
"common",
"community",
"company",
"compare",
"computer",
"concern",
"condition",
"conference",
"Congress",
"consider",
"consumer",
"contain",
"continue",
"control",
"cost",
"could",
"country",
"couple",
"course",
"court",
"cover",
"create",
"crime",
"cultural",
"culture",
"cup",
"current",
"customer",
"cut",
"dark",
"data",
"daughter",
"day",
"deal",
"debate",
"decade",
"decide",
"decision",
"deep",
"defense",
"degree",
"Democrat",
"democratic",
"describe",
"design",
"despite",
"detail",
"determine",
"develop",
"development",
"difference",
"different",
"difficult",
"dinner",
"direction",
"director",
"discover",
"discuss",
"discussion",
"do",
"doctor",
"dog",
"door",
"down",
"draw",
"dream",
"drive",
"drop",
"drug",
"during",
"each",
"early",
"east",
"easy",
"eat",
"economic",
"economy",
"edge",
"education",
"effect",
"effort",
"eight",
"either",
"election",
"else",
"employee",
"end",
"energy",
"enjoy",
"enough",
"enter",
"entire",
"environment",
"environmental",
"especially",
"establish",
"even",
"evening",
"event",
"ever",
"every",
"everybody",
"everyone",
"everything",
"evidence",
"exactly",
"example",
"executive",
"exist",
"expect",
"experience",
"expert",
"explain",
"eye",
"face",
"fact",
"factor",
"fall",
"family",
"far",
"fast",
"father",
"fear",
"federal",
"feel",
"feeling",
"few",
"field",
"fight",
"figure",
"fill",
"film",
"final",
"finally",
"financial",
"find",
"fine",
"finish",
"fire",
"firm",
"first",
"fish",
"five",
"floor",
"fly",
"focus",
"follow",
"food",
"foot",
"for",
"force",
"foreign",
"forget",
"form",
"former",
"forward",
"four",
"free",
"friend",
"from",
"front",
"full",
"fund",
"future",
"game",
"garden",
"gas",
"general",
"generation",
"get",
"girl",
"give",
"glass",
"go",
"goal",
"good",
"government",
"great",
"green",
"ground",
"group",
"grow",
"growth",
"guess",
"gun",
"guy",
"hair",
"half",
"hand",
"happen",
"happy",
"hard",
"have",
"he",
"head",
"health",
"hear",
"heart",
"heavy",
"help",
"her",
"here",
"herself",
"high",
"him",
"himself",
"his",
"history",
"hit",
"hold",
"home",
"hope",
"hospital",
"hot",
"hotel",
"hour",
"house",
"how",
"however",
"huge",
"human",
"hundred",
"husband",
"I",
"idea",
"identify",
"if",
"image",
"imagine",
"impact",
"important",
"improve",
"in",
"include",
"including",
"increase",
"indeed",
"indicate",
"individual",
"industry",
"information",
"inside",
"instead",
"institution",
"interest",
"interesting",
"international",
"interview",
"into",
"investment",
"involve",
"issue",
"it",
"item",
"its",
"itself",
"job",
"join",
"just",
"keep",
"key",
"kid",
"kind",
"kitchen",
"know",
"knowledge",
"land",
"language",
"large",
"last",
"late",
"later",
"laugh",
"law",
"lawyer",
"lay",
"lead",
"leader",
"learn",
"least",
"leave",
"left",
"leg",
"less",
"let",
"letter",
"level",
"life",
"light",
"like",
"likely",
"line",
"list",
"listen",
"little",
"live",
"local",
"long",
"look",
"lose",
"loss",
"lot",
"low",
"machine",
"magazine",
"main",
"maintain",
"major",
"majority",
"make",
"man",
"manage",
"management",
"manager",
"many",
"market",
"marriage",
"material",
"matter",
"may",
"maybe",
"me",
"mean",
"measure",
"media",
"medical",
"meet",
"meeting",
"member",
"memory",
"mention",
"message",
"method",
"middle",
"might",
"military",
"million",
"mind",
"minute",
"miss",
"mission",
"model",
"modern",
"moment",
"money",
"month",
"more",
"morning",
"most",
"mother",
"mouth",
"move",
"movement",
"movie",
"Mr",
"Mrs",
"much",
"music",
"must",
"my",
"myself",
"name",
"nation",
"national",
"natural",
"nature",
"near",
"nearly",
"necessary",
"need",
"network",
"never",
"new",
"news",
"newspaper",
"next",
"nice",
"night",
"no",
"none",
"nor",
"north",
"not",
"note",
"nothing",
"notice",
"now",
"number",
"occur",
"of",
"off",
"offer",
"office",
"officer",
"official",
"often",
"oil",
"ok",
"old",
"on",
"once",
"one",
"only",
"onto",
"open",
"operation",
"opportunity",
"option",
"or",
"order",
"organization",
"other",
"others",
"our",
"out",
"outside",
"over",
"own",
"owner",
"page",
"painting",
"paper",
"parent",
"part",
"participant",
"particular",
"particularly",
"partner",
"party",
"pass",
"past",
"pattern",
"pay",
"peace",
"people",
"per",
"perform",
"performance",
"perhaps",
"person",
"personal",
"phone",
"physical",
"pick",
"picture",
"piece",
"place",
"plan",
"plant",
"play",
"player",
"PM",
"point",
"police",
"policy",
"political",
"politics",
"poor",
"popular",
"population",
"position",
"positive",
"possible",
"power",
"practice",
"prepare",
"present",
"president",
"pressure",
"pretty",
"prevent",
"price",
"probably",
"process",
"produce",
"product",
"production",
"professional",
"professor",
"program",
"project",
"property",
"protect",
"prove",
"provide",
"public",
"pull",
"purpose",
"push",
"put",
"quality",
"question",
"quickly",
"quite",
"race",
"radio",
"raise",
"range",
"rate",
"rather",
"reach",
"read",
"ready",
"real",
"reality",
"realize",
"really",
"reason",
"receive",
"recent",
"recently",
"recognize",
"record",
"red",
"reduce",
"reflect",
"region",
"relate",
"relationship",
"religious",
"remain",
"remember",
"report",
"represent",
"Republican",
"require",
"research",
"resource",
"respond",
"response",
"responsibility",
"rest",
"result",
"return",
"reveal",
"rich",
"right",
"rise",
"risk",
"road",
"rock",
"role",
"room",
"rule",
"run",
"safe",
"same",
"save",
"say",
"scene",
"school",
"science",
"scientist",
"score",
"sea",
"season",
"seat",
"second",
"section",
"security",
"see",
"seek",
"seem",
"sell",
"send",
"senior",
"sense",
"series",
"serious",
"serve",
"service",
"set",
"seven",
"several",
"shake",
"share",
"she",
"short",
"should",
"shoulder",
"show",
"side",
"sign",
"significant",
"similar",
"simple",
"simply",
"since",
"sing",
"single",
"sister",
"sit",
"site",
"situation",
"six",
"size",
"skill",
"skin",
"small",
"smile",
"so",
"social",
"society",
"soldier",
"some",
"somebody",
"someone",
"something",
"sometimes",
"son",
"song",
"soon",
"sort",
"sound",
"source",
"south",
"southern",
"space",
"speak",
"special",
"specific",
"speech",
"spend",
"sport",
"spring",
"staff",
"stage",
"stand",
"standard",
"star",
"start",
"state",
"statement",
"station",
"stay",
"step",
"still",
"stock",
"stop",
"store",
"story",
"strategy",
"street",
"strong",
"structure",
"student",
"study",
"stuff",
"style",
"subject",
"success",
"successful",
"such",
"suddenly",
"suffer",
"suggest",
"summer",
"support",
"sure",
"surface",
"system",
"table",
"take",
"talk",
"task",
"tax",
"teach",
"teacher",
"team",
"technology",
"television",
"tell",
"ten",
"tend",
"term",
"test",
"than",
"thank",
"that",
"the",
"their",
"them",
"themselves",
"then",
"theory",
"there",
"these",
"they",
"thing",
"think",
"third",
"this",
"those",
"though",
"thought",
"thousand",
"threat",
"three",
"through",
"throughout",
"throw",
"thus",
"time",
"to",
"today",
"together",
"tonight",
"too",
"top",
"total",
"tough",
"toward",
"town",
"trade",
"traditional",
"training",
"travel",
"treat",
"treatment",
"tree",
"trial",
"trip",
"trouble",
"true",
"truth",
"try",
"turn",
"TV",
"two",
"type",
"under",
"understand",
"unit",
"until",
"up",
"upon",
"us",
"use",
"usually",
"value",
"various",
"very",
"view",
"visit",
"voice",
"vote",
"wait",
"walk",
"wall",
"want",
"war",
"watch",
"water",
"way",
"we",
"wear",
"week",
"weight",
"well",
"west",
"western",
"what",
"whatever",
"when",
"where",
"whether",
"which",
"while",
"white",
"who",
"whole",
"whom",
"whose",
"why",
"wide",
"wife",
"will",
"win",
"wind",
"window",
"wish",
"with",
"within",
"without",
"woman",
"wonder",
"word",
"work",
"worker",
"world",
"worry",
"would",
"write",
"writer",
"wrong",
"yard",
"yeah",
"year",
"yes",
"yet",
"you",
"young",
"your",
"yourself",
)
parts_of_speech: Dict[str, tuple] = {
"verb": (
"be",
"have",
"do",
"say",
"get",
"make",
"go",
"see",
"know",
"take",
"think",
"come",
"give",
"look",
"use",
"find",
"want",
"tell",
"put",
"mean",
"become",
"leave",
"work",
"need",
"feel",
"seem",
"ask",
"show",
"try",
"call",
"keep",
"provide",
"hold",
"turn",
"follow",
"begin",
"bring",
"like",
"going",
"help",
"start",
"run",
"write",
"set",
"move",
"play",
"pay",
"hear",
"include",
"believe",
"allow",
"meet",
"lead",
"live",
"stand",
"happen",
"carry",
"talk",
"appear",
"produce",
"sit",
"offer",
"consider",
"expect",
"let",
"read",
"require",
"continue",
"lose",
"add",
"change",
"fall",
"remain",
"remember",
"buy",
"speak",
"stop",
"send",
"receive",
"decide",
"win",
"understand",
"describe",
"develop",
"agree",
"open",
"reach",
"build",
"involve",
"spend",
"return",
"draw",
"die",
"hope",
"create",
"walk",
"sell",
"wait",
"cause",
"pass",
"lie",
"accept",
"watch",
"raise",
"base",
"apply",
"break",
"explain",
"learn",
"increase",
"cover",
"grow",
"claim",
"report",
"support",
"cut",
"form",
"stay",
"contain",
"reduce",
"establish",
"join",
"wish",
"seek",
"choose",
"deal",
"face",
"fail",
"serve",
"end",
"kill",
"occur",
"drive",
"represent",
"rise",
"discuss",
"love",
"pick",
"place",
"argue",
"prove",
"wear",
"catch",
"enjoy",
"eat",
"introduce",
"enter",
"present",
"arrive",
"ensure",
"point",
"plan",
"pull",
"refer",
"act",
"relate",
"affect",
"close",
"identify",
"manage",
"thank",
"compare",
"announce",
"obtain",
"note",
"forget",
"indicate",
"wonder",
"maintain",
"publish",
"suffer",
"avoid",
"express",
"suppose",
"finish",
"determine",
"design",
"listen",
"save",
"tend",
"treat",
"control",
"share",
"remove",
"throw",
"visit",
"exist",
"force",
"reflect",
"admit",
"assume",
"smile",
"prepare",
"replace",
"fill",
"improve",
"mention",
"fight",
"intend",
"miss",
"discover",
"drop",
"hit",
"push",
"prevent",
"refuse",
"regard",
"lay",
"reveal",
"teach",
"answer",
"operate",
"state",
"depend",
"enable",
"record",
"check",
"complete",
"cost",
"sound",
"laugh",
"realise",
"extend",
"arise",
"notice",
"define",
"examine",
"fit",
"study",
"bear",
"hang",
"recognise",
"shake",
"sign",
"attend",
"fly",
"gain",
"result",
"travel",
"adopt",
"confirm",
"protect",
"demand",
"stare",
"imagine",
"attempt",
"beat",
"born",
"associate",
"care",
"marry",
"collect",
"voice",
"employ",
"issue",
"release",
"emerge",
"mind",
"aim",
"deny",
"mark",
"shoot",
"appoint",
"order",
"supply",
"drink",
"observe",
"reply",
"ignore",
"link",
"propose",
"ring",
"settle",
"strike",
"press",
"respond",
"arrange",
"survive",
"concentrate",
"lift",
"approach",
"cross",
"test",
"charge",
"experience",
"touch",
"acquire",
"commit",
"demonstrate",
"grant",
"prefer",
"repeat",
"sleep",
"threaten",
"feed",
"insist",
"launch",
"limit",
"promote",
"deliver",
"measure",
"own",
"retain",
"attract",
"belong",
"consist",
"contribute",
"hide",
"promise",
"reject",
"cry",
"impose",
"invite",
"sing",
"vary",
"warn",
"address",
"declare",
"destroy",
"worry",
"divide",
"head",
"name",
"stick",
"nod",
"recognize",
"train",
"attack",
"clear",
"combine",
"handle",
"influence",
"realize",
"recommend",
"shout",
"spread",
"undertake",
"account",
"select",
"climb",
"contact",
"recall",
"secure",
"step",
"transfer",
"welcome",
"conclude",
"disappear",
"display",
"dress",
"illustrate",
"imply",
"organise",
"direct",
"escape",
"generate",
"remind",
"advise",
"afford",
"earn",
"hand",
"inform",
"rely",
"succeed",
"approve",
"burn",
"fear",
"vote",
"conduct",
"cope",
"derive",
"elect",
"gather",
"jump",
"last",
"match",
"matter",
"persuade",
"ride",
"shut",
"blow",
"estimate",
"recover",
"score",
"slip",
"count",
"hate",
"attach",
"exercise",
"house",
"lean",
"roll",
"wash",
"accuse",
"bind",
"judge",
"rest",
"steal",
"comment",
"exclude",
"focus",
"hurt",
"stretch",
"withdraw",
"back",
"fix",
"justify",
"knock",
"pursue",
"switch",
"benefit",
"lack",
"list",
"occupy",
"permit",
"surround",
"abandon",
"blame",
"complain",
"connect",
"construct",
"dominate",
"engage",
"paint",
"quote",
"view",
"incorporate",
"interpret",
"proceed",
"search",
"separate",
"stress",
"alter",
"analyse",
"arrest",
"bother",
"defend",
"expand",
"implement",
"possess",
"review",
"suit",
"tie",
"assist",
"calculate",
"glance",
"mix",
"question",
"resolve",
"rule",
"suspect",
"wake",
"appeal",
"challenge",
"clean",
"damage",
"guess",
"reckon",
"restore",
"restrict",
"specify",
"constitute",
"convert",
"distinguish",
"submit",
"trust",
"urge",
"feature",
"land",
"locate",
"predict",
"preserve",
"solve",
"sort",
"struggle",
"cast",
"cook",
"dance",
"invest",
"lock",
"owe",
"pour",
"shift",
"kick",
"kiss",
"light",
"purchase",
"race",
"retire",
),
"noun": (
"people",
"history",
"way",
"art",
"world",
"information",
"map",
"family",
"government",
"health",
"system",
"computer",
"meat",
"year",
"thanks",
"music",
"person",
"reading",
"method",
"data",
"food",
"understanding",
"theory",
"law",
"bird",
"literature",
"problem",
"software",
"control",
"knowledge",
"power",
"ability",
"economics",
"love",
"internet",
"television",
"science",
"library",
"nature",
"fact",
"product",
"idea",
"temperature",
"investment",
"area",
"society",
"activity",
"story",
"industry",
"media",
"thing",
"oven",
"community",
"definition",
"safety",
"quality",
"development",
"language",
"management",
"player",
"variety",
"video",
"week",
"security",
"country",
"exam",
"movie",
"organization",
"equipment",
"physics",
"analysis",
"policy",
"series",
"thought",
"basis",
"boyfriend",
"direction",
"strategy",
"technology",
"army",
"camera",
"freedom",
"paper",
"environment",
"child",
"instance",
"month",
"truth",
"marketing",
"university",
"writing",
"article",
"department",
"difference",
"goal",
"news",
"audience",
"fishing",
"growth",
"income",
"marriage",
"user",
"combination",
"failure",
"meaning",
"medicine",
"philosophy",
"teacher",
"communication",
"night",
"chemistry",
"disease",
"disk",
"energy",
"nation",
"road",
"role",
"soup",
"advertising",
"location",
"success",
"addition",
"apartment",
"education",
"math",
"moment",
"painting",
"politics",
"attention",
"decision",
"event",
"property",
"shopping",
"student",
"wood",
"competition",
"distribution",
"entertainment",
"office",
"population",
"president",
"unit",
"category",
"cigarette",
"context",
"introduction",
"opportunity",
"performance",
"driver",
"flight",
"length",
"magazine",
"newspaper",
"relationship",
"teaching",
"cell",
"dealer",
"finding",
"lake",
"member",
"message",
"phone",
"scene",
"appearance",
"association",
"concept",
"customer",
"death",
"discussion",
"housing",
"inflation",
"insurance",
"mood",
"woman",
"advice",
"blood",
"effort",
"expression",
"importance",
"opinion",
"payment",
"reality",
"responsibility",
"situation",
"skill",
"statement",
"wealth",
"application",
"city",
"county",
"depth",
"estate",
"foundation",
"grandmother",
"heart",
"perspective",
"photo",
"recipe",
"studio",
"topic",
"collection",
"depression",
"imagination",
"passion",
"percentage",
"resource",
"setting",
"ad",
"agency",
"college",
"connection",
"criticism",
"debt",
"description",
"memory",
"patience",
"secretary",
"solution",
"administration",
"aspect",
"attitude",
"director",
"personality",
"psychology",
"recommendation",
"response",
"selection",
"storage",
"version",
"alcohol",
"argument",
"complaint",
"contract",
"emphasis",
"highway",
"loss",
"membership",
"possession",
"preparation",
"steak",
"union",
"agreement",
"cancer",
"currency",
"employment",
"engineering",
"entry",
"interaction",
"mixture",
"preference",
"region",
"republic",
"tradition",
"virus",
"actor",
"classroom",
"delivery",
"device",
"difficulty",
"drama",
"election",
"engine",
"football",
"guidance",
"hotel",
"owner",
"priority",
"protection",
"suggestion",
"tension",
"variation",
"anxiety",
"atmosphere",
"awareness",
"bath",
"bread",
"candidate",
"climate",
"comparison",
"confusion",
"construction",
"elevator",
"emotion",
"employee",
"employer",
"guest",
"height",
"leadership",
"mall",
"manager",
"operation",
"recording",
"sample",
"transportation",
"charity",
"cousin",
"disaster",
"editor",
"efficiency",
"excitement",
"extent",
"feedback",
"guitar",
"homework",
"leader",
"mom",
"outcome",
"permission",
"presentation",
"promotion",
"reflection",
"refrigerator",
"resolution",
"revenue",
"session",
"singer",
"tennis",
"basket",
"bonus",
"cabinet",
"childhood",
"church",
"clothes",
"coffee",
"dinner",
"drawing",
"hair",
"hearing",
"initiative",
"judgment",
"lab",
"measurement",
"mode",
"mud",
"orange",
"poetry",
"police",
"possibility",
"procedure",
"queen",
"ratio",
"relation",
"restaurant",
"satisfaction",
"sector",
"signature",
"significance",
"song",
"tooth",
"town",
"vehicle",
"volume",
"wife",
"accident",
"airport",
"appointment",
"arrival",
"assumption",
"baseball",
"chapter",
"committee",
"conversation",
"database",
"enthusiasm",
"error",
"explanation",
"farmer",
"gate",
"girl",
"hall",
"historian",
"hospital",
"injury",
"instruction",
"maintenance",
"manufacturer",
"meal",
"perception",
"pie",
"poem",
"presence",
"proposal",
"reception",
"replacement",
"revolution",
"river",
"son",
"speech",
"tea",
"village",
"warning",
"winner",
"worker",
"writer",
"assistance",
"breath",
"buyer",
"chest",
"chocolate",
"conclusion",
"contribution",
"cookie",
"courage",
"desk",
"drawer",
"establishment",
"examination",
"garbage",
"grocery",
"honey",
"impression",
"improvement",
"independence",
"insect",
"inspection",
"inspector",
"king",
"ladder",
"menu",
"penalty",
"piano",
"potato",
"profession",
"professor",
"quantity",
"reaction",
"requirement",
"salad",
"sister",
"supermarket",
"tongue",
"weakness",
"wedding",
"affair",
"ambition",
"analyst",
"apple",
"assignment",
"assistant",
"bathroom",
"bedroom",
"beer",
"birthday",
"celebration",
"championship",
"cheek",
"client",
"consequence",
"departure",
"diamond",
"dirt",
"ear",
"fortune",
"friendship",
"funeral",
"gene",
"girlfriend",
"hat",
"indication",
"intention",
"lady",
"midnight",
"negotiation",
"obligation",
"passenger",
"pizza",
"platform",
"poet",
"pollution",
"recognition",
"reputation",
"shirt",
"sir",
"speaker",
"stranger",
"surgery",
"sympathy",
"tale",
"throat",
"trainer",
"uncle",
"youth",
"time",
"work",
"film",
"water",
"money",
"example",
"while",
"business",
"study",
"game",
"life",
"form",
"air",
"day",
"place",
"number",
"part",
"field",
"fish",
"back",
"process",
"heat",
"hand",
"experience",
"job",
"book",
"end",
"point",
"type",
"home",
"economy",
"value",
"body",
"market",
"guide",
"interest",
"state",
"radio",
"course",
"company",
"price",
"size",
"card",
"list",
"mind",
"trade",
"line",
"care",
"group",
"risk",
"word",
"fat",
"force",
"key",
"light",
"training",
"name",
"school",
"top",
"amount",
"level",
"order",
"practice",
"research",
"sense",
"service",
"piece",
"web",
"boss",
"sport",
"fun",
"house",
"page",
"term",
"test",
"answer",
"sound",
"focus",
"matter",
"kind",
"soil",
"board",
"oil",
"picture",
"access",
"garden",
"range",
"rate",
"reason",
"future",
"site",
"demand",
"exercise",
"image",
"case",
"cause",
"coast",
"action",
"age",
"bad",
"boat",
"record",
"result",
"section",
"building",
"mouse",
"cash",
"class",
"nothing",
"period",
"plan",
"store",
"tax",
"side",
"subject",
"space",
"rule",
"stock",
"weather",
"chance",
"figure",
"man",
"model",
"source",
"beginning",
"earth",
"program",
"chicken",
"design",
"feature",
"head",
"material",
"purpose",
"question",
"rock",
"salt",
"act",
"birth",
"car",
"dog",
"object",
"scale",
"sun",
"note",
"profit",
"rent",
"speed",
"style",
"war",
"bank",
"craft",
"half",
"inside",
"outside",
"standard",
"bus",
"exchange",
"eye",
"fire",
"position",
"pressure",
"stress",
"advantage",
"benefit",
"box",
"frame",
"issue",
"step",
"cycle",
"face",
"item",
"metal",
"paint",
"review",
"room",
"screen",
"structure",
"view",
"account",
"ball",
"discipline",
"medium",
"share",
"balance",
"bit",
"black",
"bottom",
"choice",
"gift",
"impact",
"machine",
"shape",
"tool",
"wind",
"address",
"average",
"career",
"culture",
"morning",
"pot",
"sign",
"table",
"task",
"condition",
"contact",
"credit",
"egg",
"hope",
"ice",
"network",
"north",
"square",
"attempt",
"date",
"effect",
"link",
"post",
"star",
"voice",
"capital",
"challenge",
"friend",
"self",
"shot",
"brush",
"couple",
"debate",
"exit",
"front",
"function",
"lack",
"living",
"plant",
"plastic",
"spot",
"summer",
"taste",
"theme",
"track",
"wing",
"brain",
"button",
"click",
"desire",
"foot",
"gas",
"influence",
"notice",
"rain",
"wall",
"base",
"damage",
"distance",
"feeling",
"pair",
"savings",
"staff",
"sugar",
"target",
"text",
"animal",
"author",
"budget",
"discount",
"file",
"ground",
"lesson",
"minute",
"officer",
"phase",
"reference",
"register",
"sky",
"stage",
"stick",
"title",
"trouble",
"bowl",
"bridge",
"campaign",
"character",
"club",
"edge",
"evidence",
"fan",
"letter",
"lock",
"maximum",
"novel",
"option",
"pack",
"park",
"plenty",
"quarter",
"skin",
"sort",
"weight",
"baby",
"background",
"carry",
"dish",
"factor",
"fruit",
"glass",
"joint",
"master",
"muscle",
"red",
"strength",
"traffic",
"trip",
"vegetable",
"appeal",
"chart",
"gear",
"ideal",
"kitchen",
"land",
"log",
"mother",
"net",
"party",
"principle",
"relative",
"sale",
"season",
"signal",
"spirit",
"street",
"tree",
"wave",
"belt",
"bench",
"commission",
"copy",
"drop",
"minimum",
"path",
"progress",
"project",
"sea",
"south",
"status",
"stuff",
"ticket",
"tour",
"angle",
"blue",
"breakfast",
"confidence",
"daughter",
"degree",
"doctor",
"dot",
"dream",
"duty",
"essay",
"father",
"fee",
"finance",
"hour",
"juice",
"limit",
"luck",
"milk",
"mouth",
"peace",
"pipe",
"seat",
"stable",
"storm",
"substance",
"team",
"trick",
"afternoon",
"bat",
"beach",
"blank",
"catch",
"chain",
"consideration",
"cream",
"crew",
"detail",
"gold",
"interview",
"kid",
"mark",
"match",
"mission",
"pain",
"pleasure",
"score",
"screw",
"sex",
"shop",
"shower",
"suit",
"tone",
"window",
"agent",
"band",
"block",
"bone",
"calendar",
"cap",
"coat",
"contest",
"corner",
"court",
"cup",
"district",
"door",
"east",
"finger",
"garage",
"guarantee",
"hole",
"hook",
"implement",
"layer",
"lecture",
"lie",
"manner",
"meeting",
"nose",
"parking",
"partner",
"profile",
"respect",
"rice",
"routine",
"schedule",
"swimming",
"telephone",
"tip",
"winter",
"airline",
"bag",
"battle",
"bed",
"bill",
"bother",
"cake",
"code",
"curve",
"designer",
"dimension",
"dress",
"ease",
"emergency",
"evening",
"extension",
"farm",
"fight",
"gap",
"grade",
"holiday",
"horror",
"horse",
"host",
"husband",
"loan",
"mistake",
"mountain",
"nail",
"noise",
"occasion",
"package",
"patient",
"pause",
"phrase",
"proof",
"race",
"relief",
"sand",
"sentence",
"shoulder",
"smoke",
"stomach",
"string",
"tourist",
"towel",
"vacation",
"west",
"wheel",
"wine",
"arm",
"aside",
"associate",
"bet",
"blow",
"border",
"branch",
"breast",
"brother",
"buddy",
"bunch",
"chip",
"coach",
"cross",
"document",
"draft",
"dust",
"expert",
"floor",
"god",
"golf",
"habit",
"iron",
"judge",
"knife",
"landscape",
"league",
"mail",
"mess",
"native",
"opening",
"parent",
"pattern",
"pin",
"pool",
"pound",
"request",
"salary",
"shame",
"shelter",
"shoe",
"silver",
"tackle",
"tank",
"trust",
"assist",
"bake",
"bar",
"bell",
"bike",
"blame",
"boy",
"brick",
"chair",
"closet",
"clue",
"collar",
"comment",
"conference",
"devil",
"diet",
"fear",
"fuel",
"glove",
"jacket",
"lunch",
"monitor",
"mortgage",
"nurse",
"pace",
"panic",
"peak",
"plane",
"reward",
"row",
"sandwich",
"shock",
"spite",
"spray",
"surprise",
"till",
"transition",
"weekend",
"welcome",
"yard",
"alarm",
"bend",
"bicycle",
"bite",
"blind",
"bottle",
"cable",
"candle",
"clerk",
"cloud",
"concert",
"counter",
"flower",
"grandfather",
"harm",
"knee",
"lawyer",
"leather",
"load",
"mirror",
"neck",
"pension",
"plate",
"purple",
"ruin",
"ship",
"skirt",
"slice",
"snow",
"specialist",
"stroke",
"switch",
"trash",
"tune",
"zone",
"anger",
"award",
"bid",
"bitter",
"boot",
"bug",
"camp",
"candy",
"carpet",
"cat",
"champion",
"channel",
"clock",
"comfort",
"cow",
"crack",
"engineer",
"entrance",
"fault",
"grass",
"guy",
"hell",
"highlight",
"incident",
"island",
"joke",
"jury",
"leg",
"lip",
"mate",
"motor",
"nerve",
"passage",
"pen",
"pride",
"priest",
"prize",
"promise",
"resident",
"resort",
"ring",
"roof",
"rope",
"sail",
"scheme",
"script",
"sock",
"station",
"toe",
"tower",
"truck",
"witness",
),
"adverb": (
"not",
"also",
"very",
"often",
"however",
"too",
"usually",
"really",
"early",
"never",
"always",
"sometimes",
"together",
"likely",
"simply",
"generally",
"instead",
"actually",
"again",
"rather",
"almost",
"especially",
"ever",
"quickly",
"probably",
"already",
"below",
"directly",
"therefore",
"else",
"thus",
"easily",
"eventually",
"exactly",
"certainly",
"normally",
"currently",
"extremely",
"finally",
"constantly",
"properly",
"soon",
"specifically",
"ahead",
"daily",
"highly",
"immediately",
"relatively",
"slowly",
"fairly",
"primarily",
"completely",
"ultimately",
"widely",
"recently",
"seriously",
"frequently",
"fully",
"mostly",
"naturally",
"nearly",
"occasionally",
"carefully",
"clearly",
"essentially",
"possibly",
"slightly",
"somewhat",
"equally",
"greatly",
"necessarily",
"personally",
"rarely",
"regularly",
"similarly",
"basically",
"closely",
"effectively",
"initially",
"literally",
"mainly",
"merely",
"gently",
"hopefully",
"originally",
"roughly",
"significantly",
"totally",
"twice",
"elsewhere",
"everywhere",
"obviously",
"perfectly",
"physically",
"successfully",
"suddenly",
"truly",
"virtually",
"altogether",
"anyway",
"automatically",
"deeply",
"definitely",
"deliberately",
"hardly",
"readily",
"terribly",
"unfortunately",
"forth",
"briefly",
"moreover",
"strongly",
"honestly",
"previously",
"as",
"there",
"when",
"how",
"so",
"up",
"out",
"only",
"well",
"then",
"first",
"where",
"why",
"now",
"around",
"once",
"down",
"off",
"here",
"tonight",
"away",
"today",
"far",
"quite",
"later",
"above",
"yet",
"maybe",
"otherwise",
"near",
"forward",
"somewhere",
"anywhere",
"please",
"forever",
"somehow",
"absolutely",
"abroad",
"yeah",
"nowhere",
"tomorrow",
"yesterday",
),
"adjective": (
"different",
"used",
"important",
"every",
"large",
"available",
"popular",
"able",
"basic",
"known",
"various",
"difficult",
"several",
"united",
"historical",
"hot",
"useful",
"mental",
"scared",
"additional",
"emotional",
"old",
"political",
"similar",
"healthy",
"financial",
"medical",
"traditional",
"federal",
"entire",
"strong",
"actual",
"significant",
"successful",
"electrical",
"expensive",
"pregnant",
"intelligent",
"interesting",
"poor",
"happy",
"responsible",
"cute",
"helpful",
"recent",
"willing",
"nice",
"wonderful",
"impossible",
"serious",
"huge",
"rare",
"technical",
"typical",
"competitive",
"critical",
"electronic",
"immediate",
"aware",
"educational",
"environmental",
"global",
"legal",
"relevant",
"accurate",
"capable",
"dangerous",
"dramatic",
"efficient",
"powerful",
"foreign",
"hungry",
"practical",
"psychological",
"severe",
"suitable",
"numerous",
"sufficient",
"unusual",
"consistent",
"cultural",
"existing",
"famous",
"pure",
"afraid",
"obvious",
"careful",
"latter",
"unhappy",
"acceptable",
"aggressive",
"boring",
"distinct",
"eastern",
"logical",
"reasonable",
"strict",
"administrative",
"automatic",
"civil",
"former",
"massive",
"southern",
"unfair",
"visible",
"alive",
"angry",
"desperate",
"exciting",
"friendly",
"lucky",
"realistic",
"sorry",
"ugly",
"unlikely",
"anxious",
"comprehensive",
"curious",
"impressive",
"informal",
"inner",
"pleasant",
"sexual",
"sudden",
"terrible",
"unable",
"weak",
"wooden",
"asleep",
"confident",
"conscious",
"decent",
"embarrassed",
"guilty",
"lonely",
"mad",
"nervous",
"odd",
"remarkable",
"substantial",
"suspicious",
"tall",
"tiny",
"more",
"some",
"all",
"many",
"most",
"other",
"such",
"even",
"new",
"just",
"good",
"any",
"each",
"much",
"own",
"great",
"another",
"same",
"few",
"free",
"right",
"still",
"best",
"public",
"human",
"both",
"local",
"sure",
"better",
"general",
"specific",
"enough",
"long",
"small",
"less",
"high",
"certain",
"little",
"common",
"next",
"simple",
"hard",
"past",
"big",
"possible",
"particular",
"real",
"major",
"personal",
"current",
"left",
"national",
"least",
"natural",
"physical",
"short",
"last",
"single",
"individual",
"main",
"potential",
"professional",
"international",
"lower",
"open",
"according",
"alternative",
"special",
"working",
"true",
"whole",
"clear",
"dry",
"easy",
"cold",
"commercial",
"full",
"low",
"primary",
"worth",
"necessary",
"positive",
"present",
"close",
"creative",
"green",
"late",
"fit",
"glad",
"proper",
"complex",
"content",
"due",
"effective",
"middle",
"regular",
"fast",
"independent",
"original",
"wide",
"beautiful",
"complete",
"active",
"negative",
"safe",
"visual",
"wrong",
"ago",
"quick",
"ready",
"straight",
"white",
"direct",
"excellent",
"extra",
"junior",
"pretty",
"unique",
"classic",
"final",
"overall",
"private",
"separate",
"western",
"alone",
"familiar",
"official",
"perfect",
"bright",
"broad",
"comfortable",
"flat",
"rich",
"warm",
"young",
"heavy",
"valuable",
"correct",
"leading",
"slow",
"clean",
"fresh",
"normal",
"secret",
"tough",
"brown",
"cheap",
"deep",
"objective",
"secure",
"thin",
"chemical",
"cool",
"extreme",
"exact",
"fair",
"fine",
"formal",
"opposite",
"remote",
"total",
"vast",
"lost",
"smooth",
"dark",
"double",
"equal",
"firm",
"frequent",
"internal",
"sensitive",
"constant",
"minor",
"previous",
"raw",
"soft",
"solid",
"weird",
"amazing",
"annual",
"busy",
"dead",
"false",
"round",
"sharp",
"thick",
"wise",
"equivalent",
"initial",
"narrow",
"nearby",
"proud",
"spiritual",
"wild",
"adult",
"apart",
"brief",
"crazy",
"prior",
"rough",
"sad",
"sick",
"strange",
"external",
"illegal",
"loud",
"mobile",
"nasty",
"ordinary",
"royal",
"senior",
"super",
"tight",
"upper",
"yellow",
"dependent",
"funny",
"gross",
"ill",
"spare",
"sweet",
"upstairs",
"usual",
"brave",
"calm",
"dirty",
"downtown",
"grand",
"honest",
"loose",
"male",
"quiet",
"brilliant",
"dear",
"drunk",
"empty",
"female",
"inevitable",
"neat",
"ok",
"representative",
"silly",
"slight",
"smart",
"stupid",
"temporary",
"weekly",
),
}
|
PypiClean
|
/accel-brain-base-1.1.0.tar.gz/accel-brain-base-1.1.0/accelbrainbase/controllablemodel/_mxnet/transformer_controller.py
|
from accelbrainbase.controllable_model import ControllableModel
from accelbrainbase._mxnet._exception.init_deferred_error import InitDeferredError
from accelbrainbase.observabledata._mxnet.transformer_model import TransformerModel
from accelbrainbase.observabledata._mxnet.transformermodel.transformer_encoder import TransformerEncoder
from accelbrainbase.observabledata._mxnet.transformermodel.transformer_decoder import TransformerDecoder
from accelbrainbase.iteratabledata.transformer_iterator import TransformerIterator
from accelbrainbase.computable_loss import ComputableLoss
from accelbrainbase.regularizatable_data import RegularizatableData
from mxnet.gluon.block import HybridBlock
from mxnet import gluon
from mxnet import autograd
import numpy as np
import mxnet as mx
import mxnet.ndarray as nd
from mxnet import MXNetError
from logging import getLogger
class TransformerController(HybridBlock, ControllableModel):
'''
Transformer.
References:
- Bahdanau, D., Cho, K., & Bengio, Y. (2014). Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473.
- Floridi, L., & Chiriatti, M. (2020). GPT-3: Its nature, scope, limits, and consequences. Minds and Machines, 30(4), 681-694.
- Miller, A., Fisch, A., Dodge, J., Karimi, A. H., Bordes, A., & Weston, J. (2016). Key-value memory networks for directly reading documents. arXiv preprint arXiv:1606.03126.
- Radford, A., Narasimhan, K., Salimans, T., & Sutskever, I. (2018) Improving Language Understanding by Generative Pre-Training. OpenAI (URL: https://s3-us-west-2.amazonaws.com/openai-assets/research-covers/language-unsupervised/language_understanding_paper.pdf)
- Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., & Sutskever, I. (2019). Language models are unsupervised multitask learners. OpenAI blog, 1(8), 9.
- Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., & Polosukhin, I. (2017). Attention is all you need. arXiv preprint arXiv:1706.03762.
'''
# `bool` that means initialization in this class will be deferred or not.
__init_deferred_flag = False
def __init__(
self,
computable_loss=None,
encoder=None,
decoder=None,
layer_n=3,
head_n=3,
seq_len=5,
depth_dim=100,
hidden_dim=100,
self_attention_activation_list=[],
multi_head_attention_activation_list=[],
fc_activation_list=[],
optimizer_name="SGD",
learning_rate=1e-05,
learning_attenuate_rate=1.0,
attenuate_epoch=50,
dropout_rate=0.5,
hybridize_flag=True,
ctx=mx.gpu(),
initializer=None,
regularizatable_data_list=[],
weight_decay=0.01,
positional_embedding_weignt=1.0,
**kwargs
):
'''
Init.
Args:
computable_loss: is-a `ComputableLoss` or `gluon.loss`.
encoder: is-a `TransformerModel`.
decoder: is-a `TransformerModel`.
layer_n: `int` of the number of layers.
head_n: `int` of the number of heads for multi-head attention model.
seq_len: `int` of the length of sequences.
depth_dim: `int` of dimension of dense layer.
hidden_dim: `int` of dimension of hidden(encoder) layer.
self_attention_activation_list: `list` of `str` of activation function for self-attention model.
multi_head_attention_activation_list: `list` of `str` of activation function for multi-head attention model.
fc_activation_list: `list` of `str` of activation function in fully-connected layers.
learning_rate: `float` of learning rate.
learning_attenuate_rate: `float` of attenuate the `learning_rate` by a factor of this value every `attenuate_epoch`.
attenuate_epoch: `int` of attenuate the `learning_rate` by a factor of `learning_attenuate_rate` every `attenuate_epoch`.
optimizer_name: `str` of name of optimizer.
hybridize_flag: Call `mxnet.gluon.HybridBlock.hybridize()` or not.
scale: `float` of scaling factor for initial parameters.
ctx: `mx.cpu()` or `mx.gpu()`.
initializer: is-a `mxnet.initializer` for parameters of model. If `None`, it is drawing from the Xavier distribution.
'''
super(TransformerController, self).__init__()
if computable_loss is None:
computable_loss = gluon.loss.SoftmaxCrossEntropyLoss(
axis=-1,
sparse_label=False,
from_logits=False,
weight=None,
batch_axis=0
)
self.__computable_loss = computable_loss
if encoder is None:
if hidden_dim is None or hidden_dim == depth_dim:
encoder = TransformerEncoder(
depth_dim=depth_dim,
layer_n=layer_n,
head_n=head_n,
self_attention_activation_list=self_attention_activation_list,
fc_activation_list=fc_activation_list,
computable_loss=computable_loss,
initializer=initializer,
not_init_flag=True,
hybridize_flag=hybridize_flag,
dropout_rate=dropout_rate
)
else:
encoder = TransformerEncoder(
depth_dim=hidden_dim,
layer_n=layer_n,
head_n=head_n,
self_attention_activation_list=self_attention_activation_list,
fc_activation_list=fc_activation_list,
computable_loss=computable_loss,
initializer=initializer,
not_init_flag=True,
hybridize_flag=hybridize_flag,
dropout_rate=dropout_rate
)
encoder.embedding_weignt = positional_embedding_weignt
else:
if isinstance(encoder, TransformerModel) is False:
raise TypeError("The type of `encoder` must be `TransformerModel`.")
if decoder is None:
if hidden_dim is None or hidden_dim == depth_dim:
decoder = TransformerDecoder(
head_n=head_n,
depth_dim=depth_dim,
layer_n=layer_n,
self_attention_activation_list=self_attention_activation_list,
multi_head_attention_activation_list=multi_head_attention_activation_list,
fc_activation_list=fc_activation_list,
computable_loss=computable_loss,
initializer=initializer,
not_init_flag=True,
hybridize_flag=hybridize_flag
)
else:
decoder = TransformerDecoder(
head_n=head_n,
depth_dim=hidden_dim,
output_dim=depth_dim,
layer_n=layer_n,
self_attention_activation_list=self_attention_activation_list,
multi_head_attention_activation_list=multi_head_attention_activation_list,
fc_activation_list=fc_activation_list,
computable_loss=computable_loss,
initializer=initializer,
not_init_flag=True,
hybridize_flag=hybridize_flag
)
decoder.embedding_weignt = positional_embedding_weignt
else:
if isinstance(decoder, TransformerModel) is False:
raise TypeError("The type of `decoder` must be `TransformerModel`.")
logger = getLogger("accelbrainbase")
self.logger = logger
if initializer is None:
self.initializer = mx.initializer.Xavier(
rnd_type="gaussian",
factor_type="in",
magnitude=2
)
else:
if isinstance(initializer, mx.initializer.Initializer) is False:
raise TypeError("The type of `initializer` must be `mxnet.initializer.Initializer`.")
self.initializer = initializer
self.encoder = encoder
self.decoder = decoder
with self.name_scope():
if hidden_dim is not None and hidden_dim != depth_dim:
self.encoder_hidden_fc = gluon.nn.Dense(
hidden_dim,
use_bias=True,
flatten=False,
)
self.register_child(self.encoder_hidden_fc)
self.decoder_hidden_fc = gluon.nn.Dense(
hidden_dim,
use_bias=True,
flatten=False,
)
self.register_child(self.decoder_hidden_fc)
else:
self.encoder_hidden_fc = None
self.decoder_hidden_fc = None
self.register_child(self.encoder)
self.register_child(self.decoder)
if self.init_deferred_flag is False:
try:
self.collect_params().initialize(self.initializer, force_reinit=True, ctx=ctx)
self.trainer = gluon.Trainer(
self.collect_params(),
optimizer_name,
{
"learning_rate": learning_rate,
"wd": weight_decay
}
)
if hybridize_flag is True:
self.encoder.hybridize()
self.decoder.hybridize()
except InitDeferredError:
self.logger.debug("The initialization should be deferred.")
for v in regularizatable_data_list:
if isinstance(v, RegularizatableData) is False:
raise TypeError("The type of values of `regularizatable_data_list` must be `RegularizatableData`.")
self.__regularizatable_data_list = regularizatable_data_list
self.__learning_rate = learning_rate
self.__learning_attenuate_rate = learning_attenuate_rate
self.__attenuate_epoch = attenuate_epoch
self.seq_len = seq_len
def collect_params(self, select=None):
'''
Overrided `collect_params` in `mxnet.gluon.HybridBlok`.
'''
params_dict = self.encoder.collect_params(select)
params_dict.update(self.decoder.collect_params(select))
if self.encoder_hidden_fc is not None:
params_dict.update(self.encoder_hidden_fc.collect_params(select))
if self.decoder_hidden_fc is not None:
params_dict.update(self.decoder_hidden_fc.collect_params(select))
return params_dict
def learn(self, iteratable_data):
'''
Learn samples drawn by `IteratableData.generate_learned_samples()`.
Args:
iteratable_data: is-a `TransformerIterator`.
'''
if isinstance(iteratable_data, TransformerIterator) is False:
raise TypeError("The type of `iteratable_data` must be `TransformerIterator`.")
self.__loss_list = []
learning_rate = self.__learning_rate
try:
epoch = 0
iter_n = 0
for encoded_observed_arr, decoded_observed_arr, encoded_mask_arr, decoded_mask_arr, test_encoded_observed_arr, test_decoded_observed_arr, test_encoded_mask_arr, test_decoded_mask_arr, training_target_arr, test_target_arr in iteratable_data.generate_learned_samples():
self.epoch = epoch
if ((epoch + 1) % self.__attenuate_epoch == 0):
learning_rate = learning_rate * self.__learning_attenuate_rate
self.trainer.set_learning_rate(learning_rate)
with autograd.record():
pred_arr = self.inference(
encoded_observed_arr,
decoded_observed_arr,
encoded_mask_arr,
decoded_mask_arr
)
loss = self.compute_loss(
pred_arr,
training_target_arr
)
loss.backward()
self.trainer.step(encoded_observed_arr.shape[0])
self.regularize()
if (iter_n+1) % int(iteratable_data.iter_n / iteratable_data.epochs) == 0:
test_pred_arr = self.inference(
test_encoded_observed_arr,
test_decoded_observed_arr,
test_encoded_mask_arr,
test_decoded_mask_arr
)
test_loss = self.compute_loss(
test_pred_arr,
test_target_arr
)
self.__loss_list.append((loss.asnumpy().mean(), test_loss.asnumpy().mean()))
self.logger.debug("Epochs: " + str(epoch + 1) + " Train loss: " + str(loss.asnumpy().mean()) + " Test loss: " + str(test_loss.asnumpy().mean()))
epoch += 1
iter_n += 1
except KeyboardInterrupt:
self.logger.debug("Interrupt.")
self.logger.debug("end. ")
def inference(
self,
encoded_observed_arr,
decoded_observed_arr,
encoded_mask_arr=None,
decoded_mask_arr=None,
):
'''
Inference samples drawn by `IteratableData.generate_inferenced_samples()`.
Args:
encoded_observed_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
decoded_observed_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
encoded_mask_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
decoded_mask_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
Returns:
`mxnet.ndarray` of inferenced feature points.
'''
if encoded_mask_arr is None:
encoded_mask_arr = nd.ones(
shape=(encoded_observed_arr.shape[0], 1, 1, 1),
ctx=encoded_observed_arr.context
)
if decoded_mask_arr is None:
decoded_mask_arr = nd.ones(
shape=(decoded_observed_arr.shape[0], 1, 1, 1),
ctx=decoded_observed_arr.context
)
return self(
encoded_observed_arr,
decoded_observed_arr,
encoded_mask_arr,
decoded_mask_arr,
)
def compute_loss(self, pred_arr, labeled_arr):
'''
Compute loss.
Args:
pred_arr: `mxnet.ndarray` or `mxnet.symbol`.
labeled_arr: `mxnet.ndarray` or `mxnet.symbol`.
Returns:
loss.
'''
return self.__computable_loss(pred_arr, labeled_arr)
def hybrid_forward(
self,
F,
encoded_observed_arr,
decoded_observed_arr,
encoded_mask_arr=None,
decoded_mask_arr=None,
):
'''
Hybrid forward with Gluon API.
Args:
F: `mxnet.ndarray` or `mxnet.symbol`.
encoded_observed_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
decoded_observed_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
encoded_mask_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
decoded_mask_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
Returns:
`mxnet.ndarray` or `mxnet.symbol` of inferenced feature points.
'''
# rank-3
return self.forward_propagation(
F,
encoded_observed_arr,
decoded_observed_arr,
encoded_mask_arr,
decoded_mask_arr,
)
def forward_propagation(
self,
F,
encoded_observed_arr,
decoded_observed_arr,
encoded_mask_arr=None,
decoded_mask_arr=None,
):
'''
Hybrid forward with Gluon API.
Args:
F: `mxnet.ndarray` or `mxnet.symbol`.
encoded_observed_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
decoded_observed_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
encoded_mask_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
decoded_mask_arr: rank-3 Array like or sparse matrix as the observed data points.
The shape is: (batch size, the length of sequence, feature points)
Returns:
`mxnet.ndarray` or `mxnet.symbol` of inferenced feature points.
'''
if self.encoder_hidden_fc is not None:
encoded_observed_arr = self.encoder_hidden_fc(encoded_observed_arr)
if self.decoder_hidden_fc is not None:
decoded_observed_arr = self.decoder_hidden_fc(decoded_observed_arr)
steps_arr = F.arange(self.seq_len)
mask_arr = F.broadcast_lesser_equal(
steps_arr.reshape((1, -1)),
steps_arr.reshape((-1, 1))
)
ones_arr = F.ones_like(steps_arr)
seq_len_arr = ones_arr * self.seq_len
batch_mask_arr = F.broadcast_lesser(
steps_arr.reshape((1, -1)),
seq_len_arr.reshape((-1, 1))
)
_decoded_mask_arr = F.broadcast_mul(batch_mask_arr, F.expand_dims(mask_arr, 0))
_decoded_mask_arr = F.expand_dims(_decoded_mask_arr, 0)
_decoded_mask_arr = _decoded_mask_arr + 1e-08
if decoded_mask_arr is None:
decoded_mask_arr = _decoded_mask_arr
else:
decoded_mask_arr = F.broadcast_add(decoded_mask_arr, _decoded_mask_arr)
encoded_arr = self.encoder.inference(
encoded_observed_arr,
encoded_mask_arr
)
self.feature_points_arr = encoded_arr
decoded_arr = self.decoder.inference(
decoded_observed_arr,
encoded_arr,
decoded_mask_arr,
encoded_mask_arr,
)
return decoded_arr
def extract_learned_dict(self):
'''
Extract (pre-) learned parameters.
Returns:
`dict` of the parameters.
'''
params_dict = self.collect_params()
params_arr_dict = {}
for k in params_dict:
params_arr_dict.setdefault(k, params_dict[k].data())
return params_arr_dict
def regularize(self):
'''
Regularization.
'''
params_dict = self.extract_learned_dict()
for regularizatable in self.__regularizatable_data_list:
params_dict = regularizatable.regularize(params_dict)
for k, params in self.collect_params().items():
params.set_data(params_dict[k])
def __rename_file(self, filename):
filename_list = filename.split(".")
_format = filename_list[-1]
g_filename = filename.replace("." + _format, "_encoder." + _format)
d_filename = filename.replace("." + _format, "_decoder." + _format)
return g_filename, d_filename
def save_parameters(self, filename):
'''
Save parameters to files.
Args:
filename: File name.
'''
e_filename, d_filename = self.__rename_file(filename)
self.encoder.save_parameters(e_filename)
self.decoder.save_parameters(d_filename)
def load_parameters(self, filename, ctx=None, allow_missing=False, ignore_extra=False):
'''
Load parameters to files.
Args:
filename: File name.
ctx: `mx.cpu()` or `mx.gpu()`.
allow_missing: `bool` of whether to silently skip loading parameters not represents in the file.
ignore_extra: `bool` of whether to silently ignre parameters from the file that are not present in this `Block`.
'''
e_filename, d_filename = self.__rename_file(filename)
self.encoder.load_parameters(e_filename, ctx=ctx, allow_missing=allow_missing, ignore_extra=ignore_extra)
self.decoder.load_parameters(d_filename, ctx=ctx, allow_missing=allow_missing, ignore_extra=ignore_extra)
def set_readonly(self, value):
''' setter '''
raise TypeError("This property must be read-only.")
def get_init_deferred_flag(self):
''' getter for `bool` that means initialization in this class will be deferred or not.'''
return self.__init_deferred_flag
def set_init_deferred_flag(self, value):
''' setter for `bool` that means initialization in this class will be deferred or not.'''
self.__init_deferred_flag = value
init_deferred_flag = property(get_init_deferred_flag, set_init_deferred_flag)
def get_loss_arr(self):
''' getter '''
return np.array(self.__loss_list)
def set_loss_arr(self, value):
''' setter '''
raise TypeError("This property must be read-only.")
loss_arr = property(get_loss_arr, set_loss_arr)
|
PypiClean
|
/hvl_ccb-0.14.1.tar.gz/hvl_ccb-0.14.1/docs/hvl_ccb.dev.rst
|
hvl\_ccb.dev
============
Subpackages
-----------
.. toctree::
:maxdepth: 4
hvl_ccb.dev.crylas
hvl_ccb.dev.cube
hvl_ccb.dev.ea_psi9000
hvl_ccb.dev.fluke884x
hvl_ccb.dev.fug
hvl_ccb.dev.heinzinger
hvl_ccb.dev.highland_t560
hvl_ccb.dev.labjack
hvl_ccb.dev.lauda
hvl_ccb.dev.mbw973
hvl_ccb.dev.newport
hvl_ccb.dev.pfeiffer_tpg
hvl_ccb.dev.picotech_pt104
hvl_ccb.dev.protocols
hvl_ccb.dev.rs_rto1024
hvl_ccb.dev.se_ils2t
hvl_ccb.dev.sst_luminox
hvl_ccb.dev.technix
hvl_ccb.dev.tiepie
Submodules
----------
.. toctree::
:maxdepth: 4
hvl_ccb.dev.base
hvl_ccb.dev.utils
hvl_ccb.dev.visa
Module contents
---------------
.. automodule:: hvl_ccb.dev
:members:
:undoc-members:
:show-inheritance:
|
PypiClean
|
/lx-pylib-0.3.0.tar.gz/lx-pylib-0.3.0/README.rst
|
lx-pylib
========
.. image:: https://travis-ci.org/hilarryxu/lx-pylib.svg?branch=master
:target: https://travis-ci.org/hilarryxu/lx-pylib
Some useful python utils.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
$ pip install lx-pylib
.. _pip: https://pip.pypa.io/en/stable/quickstart/
Links
-----
* License: `BSD <https://github.com/hilarryxu/lx-pylib/blob/master/LICENSE>`_
* Code: https://github.com/hilarryxu/lx-pylib
* Issue tracker: https://github.com/hilarryxu/lx-pylib/issues
|
PypiClean
|
/collective.properties-1.0a2.tar.gz/collective.properties-1.0a2/collective/properties/converter.py
|
from zope.component import adapts
from zope.schema.interfaces import IBytesLine, IBytes, IChoice
from Products.CMFPlone.utils import safe_unicode, normalizeString
from z3c.form.interfaces import ITextAreaWidget, ITextWidget
from z3c.form.converter import BaseDataConverter, SequenceDataConverter
from collective.properties.interfaces import IBytesLineTuple, \
ISpecialSelectWidget, ISpecialOrderedSelectWidget
class BytesLineTupleTextAreaDataConverter(BaseDataConverter):
"""A special converter between BytesLineTuple field and text area widget.
Field is a tuple of utf-8 encoded strings while widget needs one utf-8
encodded string containing above mentioned strings joined by line break.
"""
adapts(IBytesLineTuple, ITextAreaWidget)
def toWidgetValue(self, value):
"""Convert from Python sequence (tuple or list) to HTML representation:
unicode string with line breaks between each sequence item.
"""
if value is self.field.missing_value:
return u''
# we keep value in bytes field to be compliant with OFS.PropertyManager
return u'\r\n'.join([safe_unicode(v) for v in value])
def toFieldValue(self, value):
"""See interfaces.IDataConverter"""
if value == u'':
return self.field.missing_value
collectionType = self.field._type
if isinstance(collectionType, tuple):
collectionType = collectionType[-1]
return collectionType([v.encode('utf-8') for v in value.split(u'\r\n')])
class BytesLineTextDataConverter(BaseDataConverter):
"""A special converter between bytes line field and text widget."""
adapts(IBytesLine, ITextWidget)
def toWidgetValue(self, value):
"""Converts string to unicode"""
if value is self.field.missing_value:
return u''
return safe_unicode(value)
def toFieldValue(self, value):
"""See interfaces.IDataConverter"""
if value == u'':
return self.field.missing_value
return value.encode('utf-8')
class BytesTextAreaDataConverter(BytesLineTextDataConverter):
"""A special converter between bytes field and text area widget."""
adapts(IBytes, ITextAreaWidget)
class ChoiceSpecialSelectDataConverter(SequenceDataConverter):
"""A special converter between choice field and special select widget.
It won't break if current field values is not present inside widget terms.
Here we also are not using term token, but only value. This widget we adapt
here works only with term value and title. Token is not used as
it won't work for terms containing non-ascii characters in it's value and at
the same time to stay compliant with IPropertyManager form.
"""
adapts(IChoice, ISpecialSelectWidget)
def toWidgetValue(self, value):
widget = self.widget
# if the value is the missing value, then an empty list is produced.
if value is self.field.missing_value:
return []
# Look up the term in the terms
terms = widget.updateTerms()
if value not in terms:
return []
# ensure we get unicode for select widget, otherwise it breaks
return [value]
def toFieldValue(self, value):
widget = self.widget
if not len(value) or value[0] == widget.noValueToken:
return self.field.missing_value
# as we use widget that only uses term values this means
# we got ready to use value and ensure we value is within
# vocabulary terms
terms = widget.updateTerms()
if value[0] not in terms:
return self.field.missing_value
return value[0]
class BytesLineTupleSpecialOrderedSelectDataConverter(BaseDataConverter):
"""A special converter between bytesline tuple field and special ordered
select widget.
We use it for the same purposes as above defined converter for choice field.
"""
adapts(IBytesLineTuple, ISpecialOrderedSelectWidget)
def toWidgetValue(self, value):
"""Convert from Python bool to HTML representation."""
widget = self.widget
if widget.terms is None:
widget.updateTerms()
terms = widget.terms
return [entry for entry in value if entry in terms]
def toFieldValue(self, value):
"""See interfaces.IDataConverter"""
widget = self.widget
if widget.terms is None:
widget.updateTerms()
collectionType = self.field._type
if isinstance(collectionType, tuple):
collectionType = collectionType[-1]
terms = widget.terms
return collectionType([entry for entry in value if entry in terms])
|
PypiClean
|
/hotelling-0.5.0.tar.gz/hotelling-0.5.0/CONTRIBUTING.rst
|
.. highlight:: shell
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every little bit
helps, and credit will always be given.
You can contribute in many ways:
Types of Contributions
----------------------
Report Bugs
~~~~~~~~~~~
Report bugs at https://github.com/fdion/hotelling/issues.
If you are reporting a bug, please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Fix Bugs
~~~~~~~~
Look through the GitHub issues for bugs. Anything tagged with "bug" and "help
wanted" is open to whoever wants to implement it.
Implement Features
~~~~~~~~~~~~~~~~~~
Look through the GitHub issues for features. Anything tagged with "enhancement"
and "help wanted" is open to whoever wants to implement it.
Write Documentation
~~~~~~~~~~~~~~~~~~~
Hotelling T2 could always use more documentation, whether as part of the
official Hotelling T2 docs, in docstrings, or even on the web in blog posts,
articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an issue at https://github.com/fdion/hotelling/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that contributions
are welcome :)
Get Started!
------------
Ready to contribute? Here's how to set up `hotelling` for local development.
1. Fork the `hotelling` repo on GitHub.
2. Clone your fork locally::
$ git clone [email protected]:your_name_here/hotelling.git
3. Install your local copy into a virtualenv. Assuming you have virtualenvwrapper installed, this is how you set up your fork for local development::
$ mkvirtualenv hotelling
$ cd hotelling/
$ python setup.py develop
4. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
5. When you're done making changes, check that your changes pass flake8 and the
tests, including testing other Python versions with tox::
$ flake8 hotelling tests
$ python setup.py test or pytest
$ tox
To get flake8 and tox, just pip install them into your virtualenv.
6. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
7. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated. Put
your new functionality into a function with a docstring, and add the
feature to the list in README.rst.
3. The pull request should work for Python 2.7, 3.5, 3.6, 3.7 and 3.8, and for PyPy. Check
https://travis-ci.org/fdion/hotelling/pull_requests
and make sure that the tests pass for all supported Python versions.
Tips
----
To run a subset of tests::
$ pytest tests.test_hotelling
Deploying
---------
A reminder for the maintainers on how to deploy.
Make sure all your changes are committed (including an entry in HISTORY.rst).
Then run::
$ bump2version patch # possible: major / minor / patch
$ git push
$ git push --tags
Travis will then deploy to PyPI if tests pass.
|
PypiClean
|
/boot-synth-1.2.0.tar.gz/boot-synth-1.2.0/synth/projects_master/nginx_router/frontend/react/node_modules/loader-utils/node_modules/json5/README.md
|
# JSON5 – JSON for Humans
[][Build Status]
[][Coverage
Status]
The JSON5 Data Interchange Format (JSON5) is a superset of [JSON] that aims to
alleviate some of the limitations of JSON by expanding its syntax to include
some productions from [ECMAScript 5.1].
This JavaScript library is the official reference implementation for JSON5
parsing and serialization libraries.
[Build Status]: https://travis-ci.org/json5/json5
[Coverage Status]: https://coveralls.io/github/json5/json5
[JSON]: https://tools.ietf.org/html/rfc7159
[ECMAScript 5.1]: https://www.ecma-international.org/ecma-262/5.1/
## Summary of Features
The following ECMAScript 5.1 features, which are not supported in JSON, have
been extended to JSON5.
### Objects
- Object keys may be an ECMAScript 5.1 _[IdentifierName]_.
- Objects may have a single trailing comma.
### Arrays
- Arrays may have a single trailing comma.
### Strings
- Strings may be single quoted.
- Strings may span multiple lines by escaping new line characters.
- Strings may include character escapes.
### Numbers
- Numbers may be hexadecimal.
- Numbers may have a leading or trailing decimal point.
- Numbers may be [IEEE 754] positive infinity, negative infinity, and NaN.
- Numbers may begin with an explicit plus sign.
### Comments
- Single and multi-line comments are allowed.
### White Space
- Additional white space characters are allowed.
[IdentifierName]: https://www.ecma-international.org/ecma-262/5.1/#sec-7.6
[IEEE 754]: http://ieeexplore.ieee.org/servlet/opac?punumber=4610933
## Short Example
```js
{
// comments
unquoted: 'and you can quote me on that',
singleQuotes: 'I can use "double quotes" here',
lineBreaks: "Look, Mom! \
No \\n's!",
hexadecimal: 0xdecaf,
leadingDecimalPoint: .8675309, andTrailing: 8675309.,
positiveSign: +1,
trailingComma: 'in objects', andIn: ['arrays',],
"backwardsCompatible": "with JSON",
}
```
## Specification
For a detailed explanation of the JSON5 format, please read the [official
specification](https://json5.github.io/json5-spec/).
## Installation
### Node.js
```sh
npm install json5
```
```js
const JSON5 = require('json5')
```
### Browsers
```html
<script src="https://unpkg.com/json5@^1.0.0"></script>
```
This will create a global `JSON5` variable.
## API
The JSON5 API is compatible with the [JSON API].
[JSON API]:
https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON
### JSON5.parse()
Parses a JSON5 string, constructing the JavaScript value or object described by
the string. An optional reviver function can be provided to perform a
transformation on the resulting object before it is returned.
#### Syntax
JSON5.parse(text[, reviver])
#### Parameters
- `text`: The string to parse as JSON5.
- `reviver`: If a function, this prescribes how the value originally produced by
parsing is transformed, before being returned.
#### Return value
The object corresponding to the given JSON5 text.
### JSON5.stringify()
Converts a JavaScript value to a JSON5 string, optionally replacing values if a
replacer function is specified, or optionally including only the specified
properties if a replacer array is specified.
#### Syntax
JSON5.stringify(value[, replacer[, space]])
JSON5.stringify(value[, options])
#### Parameters
- `value`: The value to convert to a JSON5 string.
- `replacer`: A function that alters the behavior of the stringification
process, or an array of String and Number objects that serve as a whitelist
for selecting/filtering the properties of the value object to be included in
the JSON5 string. If this value is null or not provided, all properties of the
object are included in the resulting JSON5 string.
- `space`: A String or Number object that's used to insert white space into the
output JSON5 string for readability purposes. If this is a Number, it
indicates the number of space characters to use as white space; this number is
capped at 10 (if it is greater, the value is just 10). Values less than 1
indicate that no space should be used. If this is a String, the string (or the
first 10 characters of the string, if it's longer than that) is used as white
space. If this parameter is not provided (or is null), no white space is used.
If white space is used, trailing commas will be used in objects and arrays.
- `options`: An object with the following properties:
- `replacer`: Same as the `replacer` parameter.
- `space`: Same as the `space` parameter.
- `quote`: A String representing the quote character to use when serializing
strings.
#### Return value
A JSON5 string representing the value.
### Node.js `require()` JSON5 files
When using Node.js, you can `require()` JSON5 files by adding the following
statement.
```js
require('json5/lib/register')
```
Then you can load a JSON5 file with a Node.js `require()` statement. For
example:
```js
const config = require('./config.json5')
```
## CLI
Since JSON is more widely used than JSON5, this package includes a CLI for
converting JSON5 to JSON and for validating the syntax of JSON5 documents.
### Installation
```sh
npm install --global json5
```
### Usage
```sh
json5 [options] <file>
```
If `<file>` is not provided, then STDIN is used.
#### Options:
- `-s`, `--space`: The number of spaces to indent or `t` for tabs
- `-o`, `--out-file [file]`: Output to the specified file, otherwise STDOUT
- `-v`, `--validate`: Validate JSON5 but do not output JSON
- `-V`, `--version`: Output the version number
- `-h`, `--help`: Output usage information
## Contibuting
### Development
```sh
git clone https://github.com/json5/json5
cd json5
npm install
```
When contributing code, please write relevant tests and run `npm test` and `npm
run lint` before submitting pull requests. Please use an editor that supports
[EditorConfig](http://editorconfig.org/).
### Issues
To report bugs or request features regarding the JSON5 data format, please
submit an issue to the [official specification
repository](https://github.com/json5/json5-spec).
To report bugs or request features regarding the JavaScript implentation of
JSON5, please submit an issue to this repository.
## License
MIT. See [LICENSE.md](./LICENSE.md) for details.
## Credits
[Assem Kishore](https://github.com/aseemk) founded this project.
[Michael Bolin](http://bolinfest.com/) independently arrived at and published
some of these same ideas with awesome explanations and detail. Recommended
reading: [Suggested Improvements to JSON](http://bolinfest.com/essays/json.html)
[Douglas Crockford](http://www.crockford.com/) of course designed and built
JSON, but his state machine diagrams on the [JSON website](http://json.org/), as
cheesy as it may sound, gave us motivation and confidence that building a new
parser to implement these ideas was within reach! The original
implementation of JSON5 was also modeled directly off of Doug’s open-source
[json_parse.js] parser. We’re grateful for that clean and well-documented
code.
[json_parse.js]:
https://github.com/douglascrockford/JSON-js/blob/master/json_parse.js
[Max Nanasy](https://github.com/MaxNanasy) has been an early and prolific
supporter, contributing multiple patches and ideas.
[Andrew Eisenberg](https://github.com/aeisenberg) contributed the original
`stringify` method.
[Jordan Tucker](https://github.com/jordanbtucker) has aligned JSON5 more closely
with ES5, wrote the official JSON5 specification, completely rewrote the
codebase from the ground up, and is actively maintaining this project.
|
PypiClean
|
/kiwitcms-12.4.tar.gz/kiwitcms-12.4/tcms/node_modules/es6-set/node_modules/type/docs/plain-function.md
|
# Plain Function
A _Function_ instance that is not a _Class_
## `plain-function/is`
Confirms if given object is a _plain function_
```javascript
const isPlainFunction = require("type/plain-function/is");
isPlainFunction(function () {}); // true
isPlainFunction(() => {}); // true
isPlainFunction(class {}); // false
isPlainFunction("foo"); // false
```
## `plain-function/ensure`
If given argument is a _plain function_ object, it is returned back. Otherwise `TypeError` is thrown.
```javascript
const ensurePlainFunction = require("type/function/ensure");
const fn = function () {};
ensurePlainFunction(fn); // fn
ensurePlainFunction(class {}); // Thrown TypeError: class is not a plain function
```
|
PypiClean
|
/mkdocs_git_authors_plugin-0.7.2-py3-none-any.whl/mkdocs_git_authors_plugin/git/command.py
|
import subprocess
class GitCommandError(Exception):
"""
Exception thrown by a GitCommand.
"""
pass
class GitCommand(object):
"""
Wrapper around a Git command.
Instantiate with a command name and an optional args list.
These can later be modified with set_command() and set_args().
Execute the command with run()
If successful the results can be read as string lists with
- stdout()
- stderr()
In case of an error a verbose GitCommandError is raised.
"""
def __init__(self, command: str, args: list = []):
"""
Initialize the GitCommand.
Args:
command a string ('git' will implicitly be prepended)
args: a string list with remaining command arguments.
Defaults to an empty list
"""
self.set_command(command)
self.set_args(args)
self._stdout = None
self._stderr = None
self._completed = False
def run(self):
"""
Execute the configured Git command.
In case of success the results can be retrieved as string lists
with self.stdout() and self.stderr(), otherwise a GitCommandError
is raised.
Args:
Returns:
The process's return code.
Note: usually the result will be used through the methods.
"""
args = ["git"]
args.append(self._command)
args.extend(self._args)
p = subprocess.run(
args,
# encoding='utf8', # Uncomment after dropping support for python 3.5
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
try:
p.check_returncode()
except subprocess.CalledProcessError:
msg = ["GitCommand error:"]
msg.append('Command "%s" failed' % " ".join(args))
msg.append("Return code: %s" % p.returncode)
msg.append("Output:")
msg.append(p.stdout.decode("utf-8"))
msg.append("Error messages:")
msg.append(p.stderr.decode("utf-8"))
raise GitCommandError("\n".join(msg))
self._stdout = p.stdout.decode("utf-8").strip("'\n").split("\n")
self._stderr = p.stderr.decode("utf-8").strip("'\n").split("\n")
self._completed = True
return int(str(p.returncode))
def set_args(self, args: list):
"""
Change the command arguments.
Args:
args: list of process arguments
"""
self._args = args
def set_command(self, command: str):
"""
Change the Git command.
Args:
command: string with the git-NNN command name.
"""
self._command = command
def stderr(self):
"""
Return the stderr output of the command as a string list.
Args:
Returns:
string list
"""
if not self._completed:
raise GitCommandError("Trying to read from uncompleted GitCommand")
return self._stderr
def stdout(self):
"""
Return the stdout output of the command as a string list.
Args:
Returns:
string list
"""
if not self._completed:
raise GitCommandError("Trying to read from uncompleted GitCommand")
return self._stdout
|
PypiClean
|
/tincanradar-0.1.1.tar.gz/tincanradar-0.1.1/README.rst
|
.. image:: https://travis-ci.org/scivision/tincanradar.svg?branch=master
:target: https://travis-ci.org/scivision/tincanradar
.. image:: https://coveralls.io/repos/github/scivision/tincanradar/badge.svg?branch=master
:target: https://coveralls.io/github/scivision/tincanradar?branch=master
.. image:: https://api.codeclimate.com/v1/badges/c837e410c41e163d47bd/maintainability
:target: https://codeclimate.com/github/scivision/tincanradar/maintainability
:alt: Maintainability
=============
Tin Can Radar
=============
Utilities for designing, building, and using a $35 Tin Can Radar, from the original 2006 prototype
designed and built by Michael Hirsch and advised by Greg Charvat.
I include utilities for designing the Wilkenson power divider used to siphon off a sample
of the transmit waveform for the homodyne receiver.
I include design equations for using coffee cans for antennas,
as well as the more broadband Linear Tapered Slot Antenna.
If you need something more, start an issue or send a message.
.. contents::
=========================== ==========================================================
Function Description
=========================== ==========================================================
FS2dBm.py Convert field strength in dBuV/m or uV/m to 50 ohm dBm
ToneFinder.py Simulate noisy sinusoids from target returns
Friis.py Compute Free Space Path Loss (dB)
=========================== ==========================================================
Forward Model
=============
A forward model of linear/non-linear FMCW chirp is provided in Python using an optional Fortran library for speed.
Build
=======
* Mac: ``brew install gcc``
* Linux: ``apt install gfortran``
* Windows: install `gfortran <https://www.scivision.co/install-latest-gfortran-on-ubuntu/>`_
Install
-------
::
pip install -e .
Matlab
======
Old scripts of some duplicity and perhaps not full correctness are in the matlab directory.
Optional
========
Fortran
-------
::
cd bin
cmake ..
make
|
PypiClean
|
/django-lfc-1.2b2.tar.gz/django-lfc-1.2b2/lfc/static/lfc/js/lfc_tinymce.js
|
var editor;
function addEditor(selector, hide_save) {
if (hide_save == true) {
buttons = "bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,|,bullist,numlist,|,forecolor,backcolor,styleselect,formatselect,image,|,link,mylink,unlink,|,removeformat,code,|,fullscreen"
}
else {
buttons = "save,bold,italic,underline,strikethrough,|,justifyleft,justifycenter,justifyright,justifyfull,|,bullist,numlist,|,forecolor,backcolor,styleselect,formatselect,image,|,link,mylink,unlink,|,removeformat,code,|,fullscreen"
}
// Theme options
$(selector).tinymce({
// Location of TinyMCE script
script_url : '/static/lfc/tiny_mce/tiny_mce.js',
// General options
theme : "advanced",
plugins : "safari,save,iespell,directionality,fullscreen,xhtmlxtras",
theme_advanced_buttons1 : buttons,
theme_advanced_buttons2 : "",
theme_advanced_buttons3 : "",
theme_advanced_buttons4 : "",
theme_advanced_toolbar_location : "top",
theme_advanced_toolbar_align : "left",
save_onsavecallback : "save",
relative_urls : false,
height : "480",
cleanup : false,
content_css : "/static/lfc_theme/css/tiny.css",
// theme_advanced_statusbar_location : "bottom",
// theme_advanced_resizing : true,
setup : function(ed) {
ed.addButton('link', {
onclick : function(e) {
filebrowser(e, ed);
}
});
ed.addButton('image', {
onclick : function(e) {
imagebrowser(e, ed);
}
});
}
});
};
function insertHTML(html) {
editor.selection.setContent(html);
}
function getSelectedNode() {
return editor.selection.getNode();
}
function getSelectedText() {
content = editor.selection.getContent();
if (content.indexOf("<img") != -1) {
return content;
}
else {
return editor.selection.getContent({format : 'text'});
}
}
function update_editor() {
/* for each field first detach tinymce and then attach again */
$(".wysiwyginput").each(function(idx) {
if (typeof(tinyMCE) != 'undefined') {
var obj = $(this);
if (obj.length > 0){
obj.tinymce().remove();
}
}
addEditor(obj, false);
});
}
function save(ed) {
var form = $("#" + ed.id).parents("form:first");
$.ajax({
url: form.attr("action"),
type: form.attr("method"),
data: form.serializeArray(),
dataType: "json",
success : function(data) {
show_message(data["message"]);
for (var html in data["html"])
$(data["html"][html][0]).html(data["html"][html][1]);
}
})
}
function filebrowser(e, ed) {
editor = ed;
node = editor.selection.getNode();
url = node.href || "";
title = node.title || "";
target = node.target || "";
var id = $("#obj-id").attr("data");
$.get("/manage/filebrowser?obj_id=" + id + "&url=" + url + "&title=" + title + "&target=" + target, function(data) {
data = $.parseJSON(data);
$("#overlay .content").html(data["html"]);
switch (data["current_view"]) {
case "mail": display_mail(); break;
case "content": display_content(); break;
case "extern": display_extern(); break;
}
});
$("#overlay").dialog("open");
}
function imagebrowser(e, ed) {
editor = ed;
node = editor.selection.getNode();
url = node.src || "";
title = node.title || "";
klass = node.className || ""
var id = $("#obj-id").attr("data");
$.get("/manage/imagebrowser?obj_id=" + id + "&url=" + url + "&title=" + title + "&class=" + klass, function(data) {
data = $.parseJSON(data);
$("#overlay .content").html(data["html"]);
});
$("#overlay").dialog("open");
}
|
PypiClean
|
/baiduads-sdk-auto-snapshot-2022.2.1.5.tar.gz/baiduads-sdk-auto-snapshot-2022.2.1.5/baiduads/pricestrategy/model/add_price_strategy_response_wrapper.py
|
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
def lazy_import():
from baiduads.common.model.api_response_header import ApiResponseHeader
from baiduads.pricestrategy.model.add_price_strategy_response_wrapper_body import AddPriceStrategyResponseWrapperBody
globals()['AddPriceStrategyResponseWrapperBody'] = AddPriceStrategyResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
class AddPriceStrategyResponseWrapper(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'header': (ApiResponseHeader,), # noqa: E501
'body': (AddPriceStrategyResponseWrapperBody,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'header': 'header', # noqa: E501
'body': 'body', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""AddPriceStrategyResponseWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiResponseHeader): [optional] # noqa: E501
body (AddPriceStrategyResponseWrapperBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""AddPriceStrategyResponseWrapper - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
header (ApiResponseHeader): [optional] # noqa: E501
body (AddPriceStrategyResponseWrapperBody): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/stac_nb-0.4.0.tar.gz/stac_nb-0.4.0/docs/installation.rst
|
.. highlight:: shell
============
Installation
============
Stable release
--------------
To install stac-nb, run this command in your terminal:
.. code-block:: console
$ pip install stac-nb
This is the preferred method to install stac-nb, as it will always install the most recent stable release.
If you don't have `pip`_ installed, this `Python installation guide`_ can guide
you through the process.
.. _pip: https://pip.pypa.io
.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
From sources
------------
The sources for stac-nb can be downloaded from the `Github repo`_.
You can either clone the public repository:
.. code-block:: console
$ git clone git://github.com/darrenwiens/stac-nb
Or download the `tarball`_:
.. code-block:: console
$ curl -OJL https://github.com/darrenwiens/stac-nb/tarball/master
Once you have a copy of the source, you can install it with:
.. code-block:: console
$ python setup.py install
.. _Github repo: https://github.com/darrenwiens/stac-nb
.. _tarball: https://github.com/darrenwiens/stac-nb/tarball/master
|
PypiClean
|
/Pyrseas-0.9.1.tar.gz/Pyrseas-0.9.1/docs/cfgobjects.rst
|
Augmenter Configuration Objects
===============================
These configuration objects are predefined in the Augmenter modules or
can be defined or overriden by configuration elements in the
``augmenter`` map. Please see also :doc:`configitems` and
:doc:`predefaug`.
.. module:: pyrseas.augment.function
Configuration Functions
-----------------------
A :class:`CfgFunction` class specifies a Postgres function to be used
by other augmenter objects. For example, this includes procedures to
be invoked by triggers used to maintain audit columns. The
:class:`CfgFunctionDict` class holds all the :class:`CfgFunction`
objects, indexed by the function name and its arguments. A
:class:`CfgFunctionSource` class represents the source code for a
function or part of that source code. A :class:`CfgFunctionTemplate`
class represents the source code for a function, which may include
other elements that can be substituted in the final result. The class
:class:`CfgFunctionSourceDict` holds all the templates currently
defined.
.. autoclass:: CfgFunction
.. automethod:: CfgFunction.apply
.. autoclass:: CfgFunctionDict
.. automethod:: CfgFunctionDict.from_map
.. autoclass:: CfgFunctionSource
.. autoclass:: CfgFunctionTemplate
.. autoclass:: CfgFunctionSourceDict
.. module:: pyrseas.augment.column
Configuration Columns
---------------------
A :class:`CfgColumn` class defines a column to be added to a table by
other augmenter objects. For example, this includes various columns
that serve to capture audit trail information. The columns can be
combined in various ways by the :class:`CfgAuditColumn` objects. The
:class:`CfgColumnDict` class holds all the :class:`CfgColumn` objects,
indexed by column name.
.. autoclass:: CfgColumn
.. automethod:: CfgColumn.apply
.. autoclass:: CfgColumnDict
.. automethod:: CfgColumnDict.from_map
.. module:: pyrseas.augment.trigger
Configuration Triggers
----------------------
A :class:`CfgTrigger` class defines a trigger to be added to a table
by other augmentation objects. For example, this includes triggers to
maintain audit trail columns. The :class:`CfgTriggerDict` class holds
all the :class:`CfgTrigger` objects, indexed by trigger name.
.. autoclass:: CfgTrigger
.. automethod:: CfgTrigger.apply
.. autoclass:: CfgTriggerDict
.. automethod:: CfgTriggerDict.from_map
.. module:: pyrseas.augment.audit
Configuration Audit Columns
---------------------------
A :class:`CfgAuditColumn` class defines a set of attributes (columns,
triggers) to be added to a table. The :class:`CfgAuditColumnDict`
class holds all the :class:`CfgAuditColumn` objects, indexed by
augmentation name.
.. autoclass:: CfgAuditColumn
.. automethod:: CfgAuditColumn.apply
.. autoclass:: CfgAuditColumnDict
.. automethod:: CfgAuditColumnDict.from_map
|
PypiClean
|
/azure_mgmt_storagesync-2.0.0b1-py3-none-any.whl/azure/mgmt/storagesync/operations/_operation_status_operations.py
|
import sys
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import MicrosoftStorageSyncMixinABC, _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
location_name: str,
workflow_id: str,
operation_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-06-01")) # type: Literal["2022-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/locations/{locationName}/workflows/{workflowId}/operations/{operationId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"locationName": _SERIALIZER.url("location_name", location_name, "str"),
"workflowId": _SERIALIZER.url("workflow_id", workflow_id, "str"),
"operationId": _SERIALIZER.url("operation_id", operation_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class OperationStatusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storagesync.MicrosoftStorageSync`'s
:attr:`operation_status` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self, resource_group_name: str, location_name: str, workflow_id: str, operation_id: str, **kwargs: Any
) -> _models.OperationStatus:
"""Get Operation status.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param location_name: The desired region to obtain information from. Required.
:type location_name: str
:param workflow_id: workflow Id. Required.
:type workflow_id: str
:param operation_id: operation Id. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: OperationStatus or the result of cls(response)
:rtype: ~azure.mgmt.storagesync.models.OperationStatus
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2022-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.OperationStatus]
request = build_get_request(
resource_group_name=resource_group_name,
location_name=location_name,
workflow_id=workflow_id,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageSyncError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers["x-ms-request-id"] = self._deserialize("str", response.headers.get("x-ms-request-id"))
response_headers["x-ms-correlation-request-id"] = self._deserialize(
"str", response.headers.get("x-ms-correlation-request-id")
)
deserialized = self._deserialize("OperationStatus", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.StorageSync/locations/{locationName}/workflows/{workflowId}/operations/{operationId}"} # type: ignore
|
PypiClean
|
/fds.sdk.CapitalStructureReportBuilder-1.0.6-py3-none-any.whl/fds/sdk/CapitalStructureReportBuilder/model_utils.py
|
from datetime import date, datetime # noqa: F401
from copy import deepcopy
import inspect
import io
import os
import pprint
import re
import tempfile
from dateutil.parser import parse
from fds.sdk.CapitalStructureReportBuilder.exceptions import (
ApiKeyError,
ApiAttributeError,
ApiTypeError,
ApiValueError,
)
none_type = type(None)
file_type = io.IOBase
def convert_js_args_to_python_args(fn):
from functools import wraps
@wraps(fn)
def wrapped_init(_self, *args, **kwargs):
"""
An attribute named `self` received from the api will conflicts with the reserved `self`
parameter of a class method. During generation, `self` attributes are mapped
to `_self` in models. Here, we name `_self` instead of `self` to avoid conflicts.
"""
spec_property_naming = kwargs.get('_spec_property_naming', False)
if spec_property_naming:
kwargs = change_keys_js_to_python(kwargs, _self if isinstance(_self, type) else _self.__class__)
return fn(_self, *args, **kwargs)
return wrapped_init
class cached_property(object):
# this caches the result of the function call for fn with no inputs
# use this as a decorator on function methods that you want converted
# into cached properties
result_key = '_results'
def __init__(self, fn):
self._fn = fn
def __get__(self, instance, cls=None):
if self.result_key in vars(self):
return vars(self)[self.result_key]
else:
result = self._fn()
setattr(self, self.result_key, result)
return result
PRIMITIVE_TYPES = (list, float, int, bool, datetime, date, str, file_type)
def allows_single_value_input(cls):
"""
This function returns True if the input composed schema model or any
descendant model allows a value only input
This is true for cases where oneOf contains items like:
oneOf:
- float
- NumberWithValidation
- StringEnum
- ArrayModel
- null
TODO: lru_cache this
"""
if (
issubclass(cls, ModelSimple) or
cls in PRIMITIVE_TYPES
):
return True
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return False
return any(allows_single_value_input(c) for c in cls._composed_schemas['oneOf'])
return False
def composed_model_input_classes(cls):
"""
This function returns a list of the possible models that can be accepted as
inputs.
TODO: lru_cache this
"""
if issubclass(cls, ModelSimple) or cls in PRIMITIVE_TYPES:
return [cls]
elif issubclass(cls, ModelNormal):
if cls.discriminator is None:
return [cls]
else:
return get_discriminated_classes(cls)
elif issubclass(cls, ModelComposed):
if not cls._composed_schemas['oneOf']:
return []
if cls.discriminator is None:
input_classes = []
for c in cls._composed_schemas['oneOf']:
input_classes.extend(composed_model_input_classes(c))
return input_classes
else:
return get_discriminated_classes(cls)
return []
class OpenApiModel(object):
"""The base class for all OpenAPIModels"""
def set_attribute(self, name, value):
# this is only used to set properties on self
path_to_item = []
if self._path_to_item:
path_to_item.extend(self._path_to_item)
path_to_item.append(name)
if name in self.openapi_types:
required_types_mixed = self.openapi_types[name]
elif self.additional_properties_type is None:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
path_to_item
)
elif self.additional_properties_type is not None:
required_types_mixed = self.additional_properties_type
if get_simple_class(name) != str:
error_msg = type_error_message(
var_name=name,
var_value=name,
valid_classes=(str,),
key_type=True
)
raise ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=(str,),
key_type=True
)
if self._check_type:
value = validate_and_convert_types(
value, required_types_mixed, path_to_item, self._spec_property_naming,
self._check_type, configuration=self._configuration)
if (name,) in self.allowed_values:
check_allowed_values(
self.allowed_values,
(name,),
value
)
if (name,) in self.validations:
check_validations(
self.validations,
(name,),
value,
self._configuration
)
self.__dict__['_data_store'][name] = value
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
def __setattr__(self, attr, value):
"""set the value of an attribute using dot notation: `instance.attr = val`"""
self[attr] = value
def __getattr__(self, attr):
"""get the value of an attribute using dot notation: `instance.attr`"""
return self.__getitem__(attr)
def __copy__(self):
cls = self.__class__
if self.get("_spec_property_naming", False):
return cls._new_from_openapi_data(**self.__dict__)
else:
return new_cls.__new__(cls, **self.__dict__)
def __deepcopy__(self, memo):
cls = self.__class__
if self.get("_spec_property_naming", False):
new_inst = cls._new_from_openapi_data()
else:
new_inst = cls.__new__(cls)
for k, v in self.__dict__.items():
setattr(new_inst, k, deepcopy(v, memo))
return new_inst
def __new__(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return super(OpenApiModel, cls).__new__(cls)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return super(OpenApiModel, cls).__new__(cls)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = super(OpenApiModel, cls).__new__(cls)
self_inst.__init__(*args, **kwargs)
if kwargs.get("_spec_property_naming", False):
# when true, implies new is from deserialization
new_inst = new_cls._new_from_openapi_data(*args, **kwargs)
else:
new_inst = new_cls.__new__(new_cls, *args, **kwargs)
new_inst.__init__(*args, **kwargs)
return new_inst
@classmethod
@convert_js_args_to_python_args
def _new_from_openapi_data(cls, *args, **kwargs):
# this function uses the discriminator to
# pick a new schema/class to instantiate because a discriminator
# propertyName value was passed in
if len(args) == 1:
arg = args[0]
if arg is None and is_type_nullable(cls):
# The input data is the 'null' value and the type is nullable.
return None
if issubclass(cls, ModelComposed) and allows_single_value_input(cls):
model_kwargs = {}
oneof_instance = get_oneof_instance(cls, model_kwargs, kwargs, model_arg=arg)
return oneof_instance
visited_composed_classes = kwargs.get('_visited_composed_classes', ())
if (
cls.discriminator is None or
cls in visited_composed_classes
):
# Use case 1: this openapi schema (cls) does not have a discriminator
# Use case 2: we have already visited this class before and are sure that we
# want to instantiate it this time. We have visited this class deserializing
# a payload with a discriminator. During that process we traveled through
# this class but did not make an instance of it. Now we are making an
# instance of a composed class which contains cls in it, so this time make an instance of cls.
#
# Here's an example of use case 2: If Animal has a discriminator
# petType and we pass in "Dog", and the class Dog
# allOf includes Animal, we move through Animal
# once using the discriminator, and pick Dog.
# Then in the composed schema dog Dog, we will make an instance of the
# Animal class (because Dal has allOf: Animal) but this time we won't travel
# through Animal's discriminator because we passed in
# _visited_composed_classes = (Animal,)
return cls._from_openapi_data(*args, **kwargs)
# Get the name and value of the discriminator property.
# The discriminator name is obtained from the discriminator meta-data
# and the discriminator value is obtained from the input data.
discr_propertyname_py = list(cls.discriminator.keys())[0]
discr_propertyname_js = cls.attribute_map[discr_propertyname_py]
if discr_propertyname_js in kwargs:
discr_value = kwargs[discr_propertyname_js]
elif discr_propertyname_py in kwargs:
discr_value = kwargs[discr_propertyname_py]
else:
# The input data does not contain the discriminator property.
path_to_item = kwargs.get('_path_to_item', ())
raise ApiValueError(
"Cannot deserialize input data due to missing discriminator. "
"The discriminator property '%s' is missing at path: %s" %
(discr_propertyname_js, path_to_item)
)
# Implementation note: the last argument to get_discriminator_class
# is a list of visited classes. get_discriminator_class may recursively
# call itself and update the list of visited classes, and the initial
# value must be an empty list. Hence not using 'visited_composed_classes'
new_cls = get_discriminator_class(
cls, discr_propertyname_py, discr_value, [])
if new_cls is None:
path_to_item = kwargs.get('_path_to_item', ())
disc_prop_value = kwargs.get(
discr_propertyname_js, kwargs.get(discr_propertyname_py))
raise ApiValueError(
"Cannot deserialize input data due to invalid discriminator "
"value. The OpenAPI document has no mapping for discriminator "
"property '%s'='%s' at path: %s" %
(discr_propertyname_js, disc_prop_value, path_to_item)
)
if new_cls in visited_composed_classes:
# if we are making an instance of a composed schema Descendent
# which allOf includes Ancestor, then Ancestor contains
# a discriminator that includes Descendent.
# So if we make an instance of Descendent, we have to make an
# instance of Ancestor to hold the allOf properties.
# This code detects that use case and makes the instance of Ancestor
# For example:
# When making an instance of Dog, _visited_composed_classes = (Dog,)
# then we make an instance of Animal to include in dog._composed_instances
# so when we are here, cls is Animal
# cls.discriminator != None
# cls not in _visited_composed_classes
# new_cls = Dog
# but we know we know that we already have Dog
# because it is in visited_composed_classes
# so make Animal here
return cls._from_openapi_data(*args, **kwargs)
# Build a list containing all oneOf and anyOf descendants.
oneof_anyof_classes = None
if cls._composed_schemas is not None:
oneof_anyof_classes = (
cls._composed_schemas.get('oneOf', ()) +
cls._composed_schemas.get('anyOf', ()))
oneof_anyof_child = new_cls in oneof_anyof_classes
kwargs['_visited_composed_classes'] = visited_composed_classes + (cls,)
if cls._composed_schemas.get('allOf') and oneof_anyof_child:
# Validate that we can make self because when we make the
# new_cls it will not include the allOf validations in self
self_inst = cls._from_openapi_data(*args, **kwargs)
new_inst = new_cls._new_from_openapi_data(*args, **kwargs)
return new_inst
class ModelSimple(OpenApiModel):
"""the parent class of models whose type != object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attribute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_str(self):
"""Returns the string representation of the model"""
return str(self.value)
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
this_val = self._data_store['value']
that_val = other._data_store['value']
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
return vals_equal
class ModelNormal(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
self.set_attribute(name, value)
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
return self.__dict__['_data_store'].get(name, default)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
if name in self:
return self.get(name)
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
def __contains__(self, name):
"""used by `in` operator to check if an attribute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
return name in self.__dict__['_data_store']
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
class ModelComposed(OpenApiModel):
"""the parent class of models whose type == object in their
swagger/openapi and have oneOf/allOf/anyOf
When one sets a property we use var_name_to_model_instances to store the value in
the correct class instances + run any type checking + validation code.
When one gets a property we use var_name_to_model_instances to get the value
from the correct class instances.
This allows multiple composed schemas to contain the same property with additive
constraints on the value.
_composed_schemas (dict) stores the anyOf/allOf/oneOf classes
key (str): allOf/oneOf/anyOf
value (list): the classes in the XOf definition.
Note: none_type can be included when the openapi document version >= 3.1.0
_composed_instances (list): stores a list of instances of the composed schemas
defined in _composed_schemas. When properties are accessed in the self instance,
they are returned from the self._data_store or the data stores in the instances
in self._composed_schemas
_var_name_to_model_instances (dict): maps between a variable name on self and
the composed instances (self included) which contain that data
key (str): property name
value (list): list of class instances, self or instances in _composed_instances
which contain the value that the key is referring to.
"""
def __setitem__(self, name, value):
"""set the value of an attribute using square-bracket notation: `instance[attr] = val`"""
if name in self.required_properties:
self.__dict__[name] = value
return
"""
Use cases:
1. additional_properties_type is None (additionalProperties == False in spec)
Check for property presence in self.openapi_types
if not present then throw an error
if present set in self, set attribute
always set on composed schemas
2. additional_properties_type exists
set attribute on self
always set on composed schemas
"""
if self.additional_properties_type is None:
"""
For an attribute to exist on a composed schema it must:
- fulfill schema_requirements in the self composed schema not considering oneOf/anyOf/allOf schemas AND
- fulfill schema_requirements in each oneOf/anyOf/allOf schemas
schema_requirements:
For an attribute to exist on a schema it must:
- be present in properties at the schema OR
- have additionalProperties unset (defaults additionalProperties = any type) OR
- have additionalProperties set
"""
if name not in self.openapi_types:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
# attribute must be set on self and composed instances
self.set_attribute(name, value)
for model_instance in self._composed_instances:
setattr(model_instance, name, value)
if name not in self._var_name_to_model_instances:
# we assigned an additional property
self.__dict__['_var_name_to_model_instances'][name] = self._composed_instances + [self]
return None
__unset_attribute_value__ = object()
def get(self, name, default=None):
"""returns the value of an attribute or some default value if the attribute was not set"""
if name in self.required_properties:
return self.__dict__[name]
# get the attribute from the correct instance
model_instances = self._var_name_to_model_instances.get(name)
values = []
# A composed model stores self and child (oneof/anyOf/allOf) models under
# self._var_name_to_model_instances.
# Any property must exist in self and all model instances
# The value stored in all model instances must be the same
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
v = model_instance._data_store[name]
if v not in values:
values.append(v)
len_values = len(values)
if len_values == 0:
return default
elif len_values == 1:
return values[0]
elif len_values > 1:
raise ApiValueError(
"Values stored for property {0} in {1} differ when looking "
"at self and self's composed instances. All values must be "
"the same".format(name, type(self).__name__),
[e for e in [self._path_to_item, name] if e]
)
def __getitem__(self, name):
"""get the value of an attribute using square-bracket notation: `instance[attr]`"""
value = self.get(name, self.__unset_attribute_value__)
if value is self.__unset_attribute_value__:
raise ApiAttributeError(
"{0} has no attribute '{1}'".format(
type(self).__name__, name),
[e for e in [self._path_to_item, name] if e]
)
return value
def __contains__(self, name):
"""used by `in` operator to check if an attribute value was set in an instance: `'attr' in instance`"""
if name in self.required_properties:
return name in self.__dict__
model_instances = self._var_name_to_model_instances.get(
name, self._additional_properties_model_instances)
if model_instances:
for model_instance in model_instances:
if name in model_instance._data_store:
return True
return False
def to_dict(self):
"""Returns the model properties as a dict"""
return model_to_dict(self, serialize=False)
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, self.__class__):
return False
if not set(self._data_store.keys()) == set(other._data_store.keys()):
return False
for _var_name, this_val in self._data_store.items():
that_val = other._data_store[_var_name]
types = set()
types.add(this_val.__class__)
types.add(that_val.__class__)
vals_equal = this_val == that_val
if not vals_equal:
return False
return True
COERCION_INDEX_BY_TYPE = {
ModelComposed: 0,
ModelNormal: 1,
ModelSimple: 2,
none_type: 3, # The type of 'None'.
list: 4,
dict: 5,
float: 6,
int: 7,
bool: 8,
datetime: 9,
date: 10,
str: 11,
file_type: 12, # 'file_type' is an alias for the built-in 'file' or 'io.IOBase' type.
}
# these are used to limit what type conversions we try to do
# when we have a valid type already and we want to try converting
# to another type
UPCONVERSION_TYPE_PAIRS = (
(str, datetime),
(str, date),
(int, float), # A float may be serialized as an integer, e.g. '3' is a valid serialized float.
(list, ModelComposed),
(dict, ModelComposed),
(str, ModelComposed),
(int, ModelComposed),
(float, ModelComposed),
(list, ModelComposed),
(list, ModelNormal),
(dict, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
)
COERCIBLE_TYPE_PAIRS = {
False: ( # client instantiation of a model with client data
# (dict, ModelComposed),
# (list, ModelComposed),
# (dict, ModelNormal),
# (list, ModelNormal),
# (str, ModelSimple),
# (int, ModelSimple),
# (float, ModelSimple),
# (list, ModelSimple),
# (str, int),
# (str, float),
# (str, datetime),
# (str, date),
# (int, str),
# (float, str),
),
True: ( # server -> client data
(dict, ModelComposed),
(list, ModelComposed),
(dict, ModelNormal),
(list, ModelNormal),
(str, ModelSimple),
(int, ModelSimple),
(float, ModelSimple),
(list, ModelSimple),
# (str, int),
# (str, float),
(str, datetime),
(str, date),
# (int, str),
# (float, str),
(str, file_type)
),
}
def get_simple_class(input_value):
"""Returns an input_value's simple class that we will use for type checking
Python2:
float and int will return int, where int is the python3 int backport
str and unicode will return str, where str is the python3 str backport
Note: float and int ARE both instances of int backport
Note: str_py2 and unicode_py2 are NOT both instances of str backport
Args:
input_value (class/class_instance): the item for which we will return
the simple class
"""
if isinstance(input_value, type):
# input_value is a class
return input_value
elif isinstance(input_value, tuple):
return tuple
elif isinstance(input_value, list):
return list
elif isinstance(input_value, dict):
return dict
elif isinstance(input_value, none_type):
return none_type
elif isinstance(input_value, file_type):
return file_type
elif isinstance(input_value, bool):
# this must be higher than the int check because
# isinstance(True, int) == True
return bool
elif isinstance(input_value, int):
return int
elif isinstance(input_value, datetime):
# this must be higher than the date check because
# isinstance(datetime_instance, date) == True
return datetime
elif isinstance(input_value, date):
return date
elif isinstance(input_value, str):
return str
return type(input_value)
def check_allowed_values(allowed_values, input_variable_path, input_values):
"""Raises an exception if the input_values are not allowed
Args:
allowed_values (dict): the allowed_values dict
input_variable_path (tuple): the path to the input variable
input_values (list/str/int/float/date/datetime): the values that we
are checking to see if they are in allowed_values
"""
these_allowed_values = list(allowed_values[input_variable_path].values())
if (isinstance(input_values, list)
and not set(input_values).issubset(
set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values) - set(these_allowed_values))),
raise ApiValueError(
"Invalid values for `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (isinstance(input_values, dict)
and not set(
input_values.keys()).issubset(set(these_allowed_values))):
invalid_values = ", ".join(
map(str, set(input_values.keys()) - set(these_allowed_values)))
raise ApiValueError(
"Invalid keys in `%s` [%s], must be a subset of [%s]" %
(
input_variable_path[0],
invalid_values,
", ".join(map(str, these_allowed_values))
)
)
elif (not isinstance(input_values, (list, dict))
and input_values not in these_allowed_values):
raise ApiValueError(
"Invalid value for `%s` (%s), must be one of %s" %
(
input_variable_path[0],
input_values,
these_allowed_values
)
)
def is_json_validation_enabled(schema_keyword, configuration=None):
"""Returns true if JSON schema validation is enabled for the specified
validation keyword. This can be used to skip JSON schema structural validation
as requested in the configuration.
Args:
schema_keyword (string): the name of a JSON schema validation keyword.
configuration (Configuration): the configuration class.
"""
return (configuration is None or
not hasattr(configuration, '_disabled_client_side_validations') or
schema_keyword not in configuration._disabled_client_side_validations)
def check_validations(
validations, input_variable_path, input_values,
configuration=None):
"""Raises an exception if the input_values are invalid
Args:
validations (dict): the validation dictionary.
input_variable_path (tuple): the path to the input variable.
input_values (list/str/int/float/date/datetime): the values that we
are checking.
configuration (Configuration): the configuration class.
"""
if input_values is None:
return
current_validations = validations[input_variable_path]
if (is_json_validation_enabled('multipleOf', configuration) and
'multiple_of' in current_validations and
isinstance(input_values, (int, float)) and
not (float(input_values) / current_validations['multiple_of']).is_integer()):
# Note 'multipleOf' will be as good as the floating point arithmetic.
raise ApiValueError(
"Invalid value for `%s`, value must be a multiple of "
"`%s`" % (
input_variable_path[0],
current_validations['multiple_of']
)
)
if (is_json_validation_enabled('maxLength', configuration) and
'max_length' in current_validations and
len(input_values) > current_validations['max_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['max_length']
)
)
if (is_json_validation_enabled('minLength', configuration) and
'min_length' in current_validations and
len(input_values) < current_validations['min_length']):
raise ApiValueError(
"Invalid value for `%s`, length must be greater than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['min_length']
)
)
if (is_json_validation_enabled('maxItems', configuration) and
'max_items' in current_validations and
len(input_values) > current_validations['max_items']):
raise ApiValueError(
"Invalid value for `%s`, number of items must be less than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['max_items']
)
)
if (is_json_validation_enabled('minItems', configuration) and
'min_items' in current_validations and
len(input_values) < current_validations['min_items']):
raise ValueError(
"Invalid value for `%s`, number of items must be greater than or "
"equal to `%s`" % (
input_variable_path[0],
current_validations['min_items']
)
)
items = ('exclusive_maximum', 'inclusive_maximum', 'exclusive_minimum',
'inclusive_minimum')
if (any(item in current_validations for item in items)):
if isinstance(input_values, list):
max_val = max(input_values)
min_val = min(input_values)
elif isinstance(input_values, dict):
max_val = max(input_values.values())
min_val = min(input_values.values())
else:
max_val = input_values
min_val = input_values
if (is_json_validation_enabled('exclusiveMaximum', configuration) and
'exclusive_maximum' in current_validations and
max_val >= current_validations['exclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than `%s`" % (
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('maximum', configuration) and
'inclusive_maximum' in current_validations and
max_val > current_validations['inclusive_maximum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value less than or equal to "
"`%s`" % (
input_variable_path[0],
current_validations['inclusive_maximum']
)
)
if (is_json_validation_enabled('exclusiveMinimum', configuration) and
'exclusive_minimum' in current_validations and
min_val <= current_validations['exclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than `%s`" %
(
input_variable_path[0],
current_validations['exclusive_maximum']
)
)
if (is_json_validation_enabled('minimum', configuration) and
'inclusive_minimum' in current_validations and
min_val < current_validations['inclusive_minimum']):
raise ApiValueError(
"Invalid value for `%s`, must be a value greater than or equal "
"to `%s`" % (
input_variable_path[0],
current_validations['inclusive_minimum']
)
)
flags = current_validations.get('regex', {}).get('flags', 0)
if (is_json_validation_enabled('pattern', configuration) and
'regex' in current_validations and
not re.search(current_validations['regex']['pattern'],
input_values, flags=flags)):
err_msg = r"Invalid value for `%s`, must match regular expression `%s`" % (
input_variable_path[0],
current_validations['regex']['pattern']
)
if flags != 0:
# Don't print the regex flags if the flags are not
# specified in the OAS document.
err_msg = r"%s with flags=`%s`" % (err_msg, flags)
raise ApiValueError(err_msg)
def order_response_types(required_types):
"""Returns the required types sorted in coercion order
Args:
required_types (list/tuple): collection of classes or instance of
list or dict with class information inside it.
Returns:
(list): coercion order sorted collection of classes or instance
of list or dict with class information inside it.
"""
def index_getter(class_or_instance):
if isinstance(class_or_instance, list):
return COERCION_INDEX_BY_TYPE[list]
elif isinstance(class_or_instance, dict):
return COERCION_INDEX_BY_TYPE[dict]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelComposed)):
return COERCION_INDEX_BY_TYPE[ModelComposed]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelNormal)):
return COERCION_INDEX_BY_TYPE[ModelNormal]
elif (inspect.isclass(class_or_instance)
and issubclass(class_or_instance, ModelSimple)):
return COERCION_INDEX_BY_TYPE[ModelSimple]
elif class_or_instance in COERCION_INDEX_BY_TYPE:
return COERCION_INDEX_BY_TYPE[class_or_instance]
raise ApiValueError("Unsupported type: %s" % class_or_instance)
sorted_types = sorted(
required_types,
key=lambda class_or_instance: index_getter(class_or_instance)
)
return sorted_types
def remove_uncoercible(required_types_classes, current_item, spec_property_naming,
must_convert=True):
"""Only keeps the type conversions that are possible
Args:
required_types_classes (tuple): tuple of classes that are required
these should be ordered by COERCION_INDEX_BY_TYPE
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
current_item (any): the current item (input data) to be converted
Keyword Args:
must_convert (bool): if True the item to convert is of the wrong
type and we want a big list of coercibles
if False, we want a limited list of coercibles
Returns:
(list): the remaining coercible required types, classes only
"""
current_type_simple = get_simple_class(current_item)
results_classes = []
for required_type_class in required_types_classes:
# convert our models to OpenApiModel
required_type_class_simplified = required_type_class
if isinstance(required_type_class_simplified, type):
if issubclass(required_type_class_simplified, ModelComposed):
required_type_class_simplified = ModelComposed
elif issubclass(required_type_class_simplified, ModelNormal):
required_type_class_simplified = ModelNormal
elif issubclass(required_type_class_simplified, ModelSimple):
required_type_class_simplified = ModelSimple
if required_type_class_simplified == current_type_simple:
# don't consider converting to one's own class
continue
class_pair = (current_type_simple, required_type_class_simplified)
if must_convert and class_pair in COERCIBLE_TYPE_PAIRS[spec_property_naming]:
results_classes.append(required_type_class)
elif class_pair in UPCONVERSION_TYPE_PAIRS:
results_classes.append(required_type_class)
return results_classes
def get_discriminated_classes(cls):
"""
Returns all the classes that a discriminator converts to
TODO: lru_cache this
"""
possible_classes = []
key = list(cls.discriminator.keys())[0]
if is_type_nullable(cls):
possible_classes.append(cls)
for discr_cls in cls.discriminator[key].values():
if hasattr(discr_cls, 'discriminator') and discr_cls.discriminator is not None:
possible_classes.extend(get_discriminated_classes(discr_cls))
else:
possible_classes.append(discr_cls)
return possible_classes
def get_possible_classes(cls, from_server_context):
# TODO: lru_cache this
possible_classes = [cls]
if from_server_context:
return possible_classes
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
possible_classes = []
possible_classes.extend(get_discriminated_classes(cls))
elif issubclass(cls, ModelComposed):
possible_classes.extend(composed_model_input_classes(cls))
return possible_classes
def get_required_type_classes(required_types_mixed, spec_property_naming):
"""Converts the tuple required_types into a tuple and a dict described
below
Args:
required_types_mixed (tuple/list): will contain either classes or
instance of list or dict
spec_property_naming (bool): if True these values came from the
server, and we use the data types in our endpoints.
If False, we are client side and we need to include
oneOf and discriminator classes inside the data types in our endpoints
Returns:
(valid_classes, dict_valid_class_to_child_types_mixed):
valid_classes (tuple): the valid classes that the current item
should be
dict_valid_class_to_child_types_mixed (dict):
valid_class (class): this is the key
child_types_mixed (list/dict/tuple): describes the valid child
types
"""
valid_classes = []
child_req_types_by_current_type = {}
for required_type in required_types_mixed:
if isinstance(required_type, list):
valid_classes.append(list)
child_req_types_by_current_type[list] = required_type
elif isinstance(required_type, tuple):
valid_classes.append(tuple)
child_req_types_by_current_type[tuple] = required_type
elif isinstance(required_type, dict):
valid_classes.append(dict)
child_req_types_by_current_type[dict] = required_type[str]
else:
valid_classes.extend(get_possible_classes(required_type, spec_property_naming))
return tuple(valid_classes), child_req_types_by_current_type
def change_keys_js_to_python(input_dict, model_class):
"""
Converts from javascript_key keys in the input_dict to python_keys in
the output dict using the mapping in model_class.
If the input_dict contains a key which does not declared in the model_class,
the key is added to the output dict as is. The assumption is the model_class
may have undeclared properties (additionalProperties attribute in the OAS
document).
"""
if getattr(model_class, 'attribute_map', None) is None:
return input_dict
output_dict = {}
reversed_attr_map = {value: key for key, value in
model_class.attribute_map.items()}
for javascript_key, value in input_dict.items():
python_key = reversed_attr_map.get(javascript_key)
if python_key is None:
# if the key is unknown, it is in error or it is an
# additionalProperties variable
python_key = javascript_key
output_dict[python_key] = value
return output_dict
def get_type_error(var_value, path_to_item, valid_classes, key_type=False):
error_msg = type_error_message(
var_name=path_to_item[-1],
var_value=var_value,
valid_classes=valid_classes,
key_type=key_type
)
return ApiTypeError(
error_msg,
path_to_item=path_to_item,
valid_classes=valid_classes,
key_type=key_type
)
def deserialize_primitive(data, klass, path_to_item):
"""Deserializes string to primitive type.
:param data: str/int/float
:param klass: str/class the class to convert to
:return: int, float, str, bool, date, datetime
"""
additional_message = ""
try:
if klass in {datetime, date}:
additional_message = (
"If you need your parameter to have a fallback "
"string value, please set its type as `type: {}` in your "
"spec. That allows the value to be any type. "
)
if klass == datetime:
if len(data) < 8:
raise ValueError("This is not a datetime")
# The string should be in iso8601 datetime format.
parsed_datetime = parse(data)
date_only = (
parsed_datetime.hour == 0 and
parsed_datetime.minute == 0 and
parsed_datetime.second == 0 and
parsed_datetime.tzinfo is None and
8 <= len(data) <= 10
)
if date_only:
raise ValueError("This is a date, not a datetime")
return parsed_datetime
elif klass == date:
if len(data) < 8:
raise ValueError("This is not a date")
return parse(data).date()
else:
converted_value = klass(data)
if isinstance(data, str) and klass == float:
if str(converted_value) != data:
# '7' -> 7.0 -> '7.0' != '7'
raise ValueError('This is not a float')
return converted_value
except (OverflowError, ValueError) as ex:
# parse can raise OverflowError
raise ApiValueError(
"{0}Failed to parse {1} as {2}".format(
additional_message, repr(data), klass.__name__
),
path_to_item=path_to_item
) from ex
def get_discriminator_class(model_class,
discr_name,
discr_value, cls_visited):
"""Returns the child class specified by the discriminator.
Args:
model_class (OpenApiModel): the model class.
discr_name (string): the name of the discriminator property.
discr_value (any): the discriminator value.
cls_visited (list): list of model classes that have been visited.
Used to determine the discriminator class without
visiting circular references indefinitely.
Returns:
used_model_class (class/None): the chosen child class that will be used
to deserialize the data, for example dog.Dog.
If a class is not found, None is returned.
"""
if model_class in cls_visited:
# The class has already been visited and no suitable class was found.
return None
cls_visited.append(model_class)
used_model_class = None
if discr_name in model_class.discriminator:
class_name_to_discr_class = model_class.discriminator[discr_name]
used_model_class = class_name_to_discr_class.get(discr_value)
if used_model_class is None:
# We didn't find a discriminated class in class_name_to_discr_class.
# So look in the ancestor or descendant discriminators
# The discriminator mapping may exist in a descendant (anyOf, oneOf)
# or ancestor (allOf).
# Ancestor example: in the GrandparentAnimal -> ParentPet -> ChildCat
# hierarchy, the discriminator mappings may be defined at any level
# in the hierarchy.
# Descendant example: mammal -> whale/zebra/Pig -> BasquePig/DanishPig
# if we try to make BasquePig from mammal, we need to travel through
# the oneOf descendant discriminators to find BasquePig
descendant_classes = model_class._composed_schemas.get('oneOf', ()) + \
model_class._composed_schemas.get('anyOf', ())
ancestor_classes = model_class._composed_schemas.get('allOf', ())
possible_classes = descendant_classes + ancestor_classes
for cls in possible_classes:
# Check if the schema has inherited discriminators.
if hasattr(cls, 'discriminator') and cls.discriminator is not None:
used_model_class = get_discriminator_class(
cls, discr_name, discr_value, cls_visited)
if used_model_class is not None:
return used_model_class
return used_model_class
def deserialize_model(model_data, model_class, path_to_item, check_type,
configuration, spec_property_naming):
"""Deserializes model_data to model instance.
Args:
model_data (int/str/float/bool/none_type/list/dict): data to instantiate the model
model_class (OpenApiModel): the model class
path_to_item (list): path to the model in the received data
check_type (bool): whether to check the data tupe for the values in
the model
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
Returns:
model instance
Raise:
ApiTypeError
ApiValueError
ApiKeyError
"""
kw_args = dict(_check_type=check_type,
_path_to_item=path_to_item,
_configuration=configuration,
_spec_property_naming=spec_property_naming)
if issubclass(model_class, ModelSimple):
return model_class._new_from_openapi_data(model_data, **kw_args)
if isinstance(model_data, dict):
kw_args.update(model_data)
return model_class._new_from_openapi_data(**kw_args)
elif isinstance(model_data, PRIMITIVE_TYPES):
return model_class._new_from_openapi_data(model_data, **kw_args)
def deserialize_file(response_data, configuration, content_disposition=None):
"""Deserializes body to file
Saves response body into a file in a temporary folder,
using the filename from the `Content-Disposition` header if provided.
Args:
param response_data (str): the file data to write
configuration (Configuration): the instance to use to convert files
Keyword Args:
content_disposition (str): the value of the Content-Disposition
header
Returns:
(file_type): the deserialized file which is open
The user is responsible for closing and reading the file
"""
fd, path = tempfile.mkstemp(dir=configuration.temp_folder_path)
os.close(fd)
os.remove(path)
if content_disposition:
filename = re.search(r'filename=[\'"]?([^\'"\s]+)[\'"]?',
content_disposition).group(1)
path = os.path.join(os.path.dirname(path), filename)
with open(path, "wb") as f:
if isinstance(response_data, str):
# change str to bytes so we can write it
response_data = response_data.encode('utf-8')
f.write(response_data)
f = open(path, "rb")
return f
def attempt_convert_item(input_value, valid_classes, path_to_item,
configuration, spec_property_naming, key_type=False,
must_convert=False, check_type=True):
"""
Args:
input_value (any): the data to convert
valid_classes (any): the classes that are valid
path_to_item (list): the path to the item to convert
configuration (Configuration): the instance to use to convert files
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
key_type (bool): if True we need to convert a key type (not supported)
must_convert (bool): if True we must convert
check_type (bool): if True we check the type or the returned data in
ModelComposed/ModelNormal/ModelSimple instances
Returns:
instance (any) the fixed item
Raises:
ApiTypeError
ApiValueError
ApiKeyError
"""
valid_classes_ordered = order_response_types(valid_classes)
valid_classes_coercible = remove_uncoercible(
valid_classes_ordered, input_value, spec_property_naming)
if not valid_classes_coercible or key_type:
# we do not handle keytype errors, json will take care
# of this for us
if must_convert or configuration is None or not configuration.discard_unknown_keys:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=key_type)
for valid_class in valid_classes_coercible:
try:
if issubclass(valid_class, OpenApiModel):
return deserialize_model(input_value, valid_class,
path_to_item, check_type,
configuration, spec_property_naming)
elif valid_class == file_type:
return deserialize_file(input_value, configuration)
return deserialize_primitive(input_value, valid_class,
path_to_item)
except (ApiTypeError, ApiValueError, ApiKeyError) as conversion_exc:
if must_convert:
raise conversion_exc
# if we have conversion errors when must_convert == False
# we ignore the exception and move on to the next class
continue
# we were unable to convert, must_convert == False
return input_value
def is_type_nullable(input_type):
"""
Returns true if None is an allowed value for the specified input_type.
A type is nullable if at least one of the following conditions is true:
1. The OAS 'nullable' attribute has been specified,
1. The type is the 'null' type,
1. The type is a anyOf/oneOf composed schema, and a child schema is
the 'null' type.
Args:
input_type (type): the class of the input_value that we are
checking
Returns:
bool
"""
if input_type is none_type:
return True
if issubclass(input_type, OpenApiModel) and input_type._nullable:
return True
if issubclass(input_type, ModelComposed):
# If oneOf/anyOf, check if the 'null' type is one of the allowed types.
for t in input_type._composed_schemas.get('oneOf', ()):
if is_type_nullable(t): return True
for t in input_type._composed_schemas.get('anyOf', ()):
if is_type_nullable(t): return True
return False
def is_valid_type(input_class_simple, valid_classes):
"""
Args:
input_class_simple (class): the class of the input_value that we are
checking
valid_classes (tuple): the valid classes that the current item
should be
Returns:
bool
"""
if issubclass(input_class_simple, OpenApiModel) and \
valid_classes == (bool, date, datetime, dict, float, int, list, str, none_type,):
return True
valid_type = input_class_simple in valid_classes
if not valid_type and (
issubclass(input_class_simple, OpenApiModel) or
input_class_simple is none_type):
for valid_class in valid_classes:
if input_class_simple is none_type and is_type_nullable(valid_class):
# Schema is oneOf/anyOf and the 'null' type is one of the allowed types.
return True
if not (issubclass(valid_class, OpenApiModel) and valid_class.discriminator):
continue
discr_propertyname_py = list(valid_class.discriminator.keys())[0]
discriminator_classes = (
valid_class.discriminator[discr_propertyname_py].values()
)
valid_type = is_valid_type(input_class_simple, discriminator_classes)
if valid_type:
return True
return valid_type
def validate_and_convert_types(input_value, required_types_mixed, path_to_item,
spec_property_naming, _check_type, configuration=None):
"""Raises a TypeError is there is a problem, otherwise returns value
Args:
input_value (any): the data to validate/convert
required_types_mixed (list/dict/tuple): A list of
valid classes, or a list tuples of valid classes, or a dict where
the value is a tuple of value classes
path_to_item: (list) the path to the data being validated
this stores a list of keys or indices to get to the data being
validated
spec_property_naming (bool): True if the variable names in the input
data are serialized names as specified in the OpenAPI document.
False if the variables names in the input data are python
variable names in PEP-8 snake case.
_check_type: (boolean) if true, type will be checked and conversion
will be attempted.
configuration: (Configuration): the configuration class to use
when converting file_type items.
If passed, conversion will be attempted when possible
If not passed, no conversions will be attempted and
exceptions will be raised
Returns:
the correctly typed value
Raises:
ApiTypeError
"""
results = get_required_type_classes(required_types_mixed, spec_property_naming)
valid_classes, child_req_types_by_current_type = results
input_class_simple = get_simple_class(input_value)
valid_type = is_valid_type(input_class_simple, valid_classes)
if not valid_type:
if configuration:
# if input_value is not valid_type try to convert it
converted_instance = attempt_convert_item(
input_value,
valid_classes,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=True,
check_type=_check_type
)
return converted_instance
else:
raise get_type_error(input_value, path_to_item, valid_classes,
key_type=False)
# input_value's type is in valid_classes
if len(valid_classes) > 1 and configuration:
# there are valid classes which are not the current class
valid_classes_coercible = remove_uncoercible(
valid_classes, input_value, spec_property_naming, must_convert=False)
if valid_classes_coercible:
converted_instance = attempt_convert_item(
input_value,
valid_classes_coercible,
path_to_item,
configuration,
spec_property_naming,
key_type=False,
must_convert=False,
check_type=_check_type
)
return converted_instance
if child_req_types_by_current_type == {}:
# all types are of the required types and there are no more inner
# variables left to look at
return input_value
inner_required_types = child_req_types_by_current_type.get(
type(input_value)
)
if inner_required_types is None:
# for this type, there are not more inner variables left to look at
return input_value
if isinstance(input_value, list):
if input_value == []:
# allow an empty list
return input_value
for index, inner_value in enumerate(input_value):
inner_path = list(path_to_item)
inner_path.append(index)
input_value[index] = validate_and_convert_types(
inner_value,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
elif isinstance(input_value, dict):
if input_value == {}:
# allow an empty dict
return input_value
for inner_key, inner_val in input_value.items():
inner_path = list(path_to_item)
inner_path.append(inner_key)
if get_simple_class(inner_key) != str:
raise get_type_error(inner_key, inner_path, valid_classes,
key_type=True)
input_value[inner_key] = validate_and_convert_types(
inner_val,
inner_required_types,
inner_path,
spec_property_naming,
_check_type,
configuration=configuration
)
return input_value
def model_to_dict(model_instance, serialize=True):
"""Returns the model properties as a dict
Args:
model_instance (one of your model instances): the model instance that
will be converted to a dict.
Keyword Args:
serialize (bool): if True, the keys in the dict will be values from
attribute_map
"""
result = {}
extract_item = lambda item: (item[0], model_to_dict(item[1], serialize=serialize)) if hasattr(item[1], '_data_store') else item
model_instances = [model_instance]
if model_instance._composed_schemas:
model_instances.extend(model_instance._composed_instances)
seen_json_attribute_names = set()
used_fallback_python_attribute_names = set()
py_to_json_map = {}
for model_instance in model_instances:
for attr, value in model_instance._data_store.items():
if serialize:
# we use get here because additional property key names do not
# exist in attribute_map
try:
attr = model_instance.attribute_map[attr]
py_to_json_map.update(model_instance.attribute_map)
seen_json_attribute_names.add(attr)
except KeyError:
used_fallback_python_attribute_names.add(attr)
if isinstance(value, list):
if not value:
# empty list or None
result[attr] = value
else:
res = []
for v in value:
if isinstance(v, PRIMITIVE_TYPES) or v is None:
res.append(v)
elif isinstance(v, ModelSimple):
res.append(v.value)
elif isinstance(v, dict):
res.append(dict(map(
extract_item,
v.items()
)))
else:
res.append(model_to_dict(v, serialize=serialize))
result[attr] = res
elif isinstance(value, dict):
result[attr] = dict(map(
extract_item,
value.items()
))
elif isinstance(value, ModelSimple):
result[attr] = value.value
elif hasattr(value, '_data_store'):
result[attr] = model_to_dict(value, serialize=serialize)
else:
result[attr] = value
if serialize:
for python_key in used_fallback_python_attribute_names:
json_key = py_to_json_map.get(python_key)
if json_key is None:
continue
if python_key == json_key:
continue
json_key_assigned_no_need_for_python_key = json_key in seen_json_attribute_names
if json_key_assigned_no_need_for_python_key:
del result[python_key]
return result
def type_error_message(var_value=None, var_name=None, valid_classes=None,
key_type=None):
"""
Keyword Args:
var_value (any): the variable which has the type_error
var_name (str): the name of the variable which has the typ error
valid_classes (tuple): the accepted classes for current_item's
value
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
"""
key_or_value = 'value'
if key_type:
key_or_value = 'key'
valid_classes_phrase = get_valid_classes_phrase(valid_classes)
msg = (
"Invalid type for variable '{0}'. Required {1} type {2} and "
"passed type was {3}".format(
var_name,
key_or_value,
valid_classes_phrase,
type(var_value).__name__,
)
)
return msg
def get_valid_classes_phrase(input_classes):
"""Returns a string phrase describing what types are allowed
"""
all_classes = list(input_classes)
all_classes = sorted(all_classes, key=lambda cls: cls.__name__)
all_class_names = [cls.__name__ for cls in all_classes]
if len(all_class_names) == 1:
return 'is {0}'.format(all_class_names[0])
return "is one of [{0}]".format(", ".join(all_class_names))
def get_allof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
used to make instances
constant_args (dict):
metadata arguments:
_check_type
_path_to_item
_spec_property_naming
_configuration
_visited_composed_classes
Returns
composed_instances (list)
"""
composed_instances = []
for allof_class in self._composed_schemas['allOf']:
try:
if constant_args.get('_spec_property_naming'):
allof_instance = allof_class._from_openapi_data(**model_args, **constant_args)
else:
allof_instance = allof_class(**model_args, **constant_args)
composed_instances.append(allof_instance)
except Exception as ex:
raise ApiValueError(
"Invalid inputs given to generate an instance of '%s'. The "
"input data was invalid for the allOf schema '%s' in the composed "
"schema '%s'. Error=%s" % (
allof_class.__name__,
allof_class.__name__,
self.__class__.__name__,
str(ex)
)
) from ex
return composed_instances
def get_oneof_instance(cls, model_kwargs, constant_kwargs, model_arg=None):
"""
Find the oneOf schema that matches the input data (e.g. payload).
If exactly one schema matches the input data, an instance of that schema
is returned.
If zero or more than one schema match the input data, an exception is raised.
In OAS 3.x, the payload MUST, by validation, match exactly one of the
schemas described by oneOf.
Args:
cls: the class we are handling
model_kwargs (dict): var_name to var_value
The input data, e.g. the payload that must match a oneOf schema
in the OpenAPI document.
constant_kwargs (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Kwargs:
model_arg: (int, float, bool, str, date, datetime, ModelSimple, None):
the value to assign to a primitive class or ModelSimple class
Notes:
- this is only passed in when oneOf includes types which are not object
- None is used to suppress handling of model_arg, nullable models are handled in __new__
Returns
oneof_instance (instance)
"""
if len(cls._composed_schemas['oneOf']) == 0:
return None
oneof_instances = []
# Iterate over each oneOf schema and determine if the input data
# matches the oneOf schemas.
for oneof_class in cls._composed_schemas['oneOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if oneof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
single_value_input = allows_single_value_input(oneof_class)
try:
if not single_value_input:
if model_arg is not None:
continue;
if constant_kwargs.get('_spec_property_naming'):
oneof_instance = oneof_class._new_from_openapi_data(**model_kwargs, **constant_kwargs)
else:
oneof_instance = oneof_class(**model_kwargs, **constant_kwargs)
else:
if issubclass(oneof_class, ModelSimple):
if constant_kwargs.get('_spec_property_naming'):
oneof_instance = oneof_class._new_from_openapi_data(model_arg, **constant_kwargs)
else:
oneof_instance = oneof_class(model_arg, **constant_kwargs)
elif oneof_class in PRIMITIVE_TYPES:
oneof_instance = validate_and_convert_types(
model_arg,
(oneof_class,),
constant_kwargs['_path_to_item'],
constant_kwargs['_spec_property_naming'],
constant_kwargs['_check_type'],
configuration=constant_kwargs['_configuration']
)
oneof_instances.append((oneof_class, oneof_instance))
except Exception:
pass
if len(oneof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None "
"of the oneOf schemas matched the input data." %
cls.__name__
)
elif len(oneof_instances) > 1:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. Multiple "
"oneOf schemas matched the inputs, but a max of one is allowed. "
"Candidates: %s" %
(cls.__name__, oneof_instances)
)
return oneof_instances[0][1]
def get_anyof_instances(self, model_args, constant_args):
"""
Args:
self: the class we are handling
model_args (dict): var_name to var_value
The input data, e.g. the payload that must match at least one
anyOf child schema in the OpenAPI document.
constant_args (dict): var_name to var_value
args that every model requires, including configuration, server
and path to item.
Returns
anyof_instances (list)
"""
anyof_instances = []
if len(self._composed_schemas['anyOf']) == 0:
return anyof_instances
for anyof_class in self._composed_schemas['anyOf']:
# The composed oneOf schema allows the 'null' type and the input data
# is the null value. This is a OAS >= 3.1 feature.
if anyof_class is none_type:
# skip none_types because we are deserializing dict data.
# none_type deserialization is handled in the __new__ method
continue
try:
if constant_args.get('_spec_property_naming'):
anyof_instance = anyof_class._new_from_openapi_data(**model_args, **constant_args)
else:
anyof_instance = anyof_class(**model_args, **constant_args)
anyof_instances.append(anyof_instance)
except Exception:
pass
if len(anyof_instances) == 0:
raise ApiValueError(
"Invalid inputs given to generate an instance of %s. None of the "
"anyOf schemas matched the inputs." %
self.__class__.__name__
)
return anyof_instances
def get_discarded_args(self, composed_instances, model_args):
"""
Gathers the args that were discarded by configuration.discard_unknown_keys
"""
model_arg_keys = model_args.keys()
discarded_args = set()
# arguments passed to self were already converted to python names
# before __init__ was called
for instance in composed_instances:
if instance.__class__ in self._composed_schemas['allOf']:
try:
keys = instance.to_dict().keys()
discarded_keys = model_args - keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
else:
try:
all_keys = set(model_to_dict(instance, serialize=False).keys())
js_keys = model_to_dict(instance, serialize=True).keys()
all_keys.update(js_keys)
discarded_keys = model_arg_keys - all_keys
discarded_args.update(discarded_keys)
except Exception:
# allOf integer schema will throw exception
pass
return discarded_args
def validate_get_composed_info(constant_args, model_args, self):
"""
For composed schemas, generate schema instances for
all schemas in the oneOf/anyOf/allOf definition. If additional
properties are allowed, also assign those properties on
all matched schemas that contain additionalProperties.
Openapi schemas are python classes.
Exceptions are raised if:
- 0 or > 1 oneOf schema matches the model_args input data
- no anyOf schema matches the model_args input data
- any of the allOf schemas do not match the model_args input data
Args:
constant_args (dict): these are the args that every model requires
model_args (dict): these are the required and optional spec args that
were passed in to make this model
self (class): the class that we are instantiating
This class contains self._composed_schemas
Returns:
composed_info (list): length three
composed_instances (list): the composed instances which are not
self
var_name_to_model_instances (dict): a dict going from var_name
to the model_instance which holds that var_name
the model_instance may be self or an instance of one of the
classes in self.composed_instances()
additional_properties_model_instances (list): a list of the
model instances which have the property
additional_properties_type. This list can include self
"""
# create composed_instances
composed_instances = []
allof_instances = get_allof_instances(self, model_args, constant_args)
composed_instances.extend(allof_instances)
oneof_instance = get_oneof_instance(self.__class__, model_args, constant_args)
if oneof_instance is not None:
composed_instances.append(oneof_instance)
anyof_instances = get_anyof_instances(self, model_args, constant_args)
composed_instances.extend(anyof_instances)
"""
set additional_properties_model_instances
additional properties must be evaluated at the schema level
so self's additional properties are most important
If self is a composed schema with:
- no properties defined in self
- additionalProperties: False
Then for object payloads every property is an additional property
and they are not allowed, so only empty dict is allowed
Properties must be set on all matching schemas
so when a property is assigned toa composed instance, it must be set on all
composed instances regardless of additionalProperties presence
keeping it to prevent breaking changes in v5.0.1
TODO remove cls._additional_properties_model_instances in 6.0.0
"""
additional_properties_model_instances = []
if self.additional_properties_type is not None:
additional_properties_model_instances = [self]
"""
no need to set properties on self in here, they will be set in __init__
By here all composed schema oneOf/anyOf/allOf instances have their properties set using
model_args
"""
discarded_args = get_discarded_args(self, composed_instances, model_args)
# map variable names to composed_instances
var_name_to_model_instances = {}
for prop_name in model_args:
if prop_name not in discarded_args:
var_name_to_model_instances[prop_name] = [self] + composed_instances
return [
composed_instances,
var_name_to_model_instances,
additional_properties_model_instances,
discarded_args
]
|
PypiClean
|
/usbsecurity-server-4.9.1.tar.gz/usbsecurity-server-4.9.1/usbsecurity_server/static/admin/css/vendor/select2/LICENSE-SELECT2.md
|
The MIT License (MIT)
Copyright (c) 2012-2017 Kevin Brown, Igor Vaynberg, and Select2 contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
|
PypiClean
|
/gamification-engine-0.4.0.tar.gz/gamification-engine-0.4.0/gengine/app/jsscripts/node_modules/intl/locale-data/jsonp/en-GI.js
|
IntlPolyfill.__addLocaleData({locale:"en-GI",date:{ca:["gregory","buddhist","chinese","coptic","dangi","ethioaa","ethiopic","generic","hebrew","indian","islamic","islamicc","japanese","persian","roc"],hourNo0:true,hour12:false,formats:{short:"{1}, {0}",medium:"{1}, {0}",full:"{1} 'at' {0}",long:"{1} 'at' {0}",availableFormats:{"d":"d","E":"ccc",Ed:"E d",Ehm:"E h:mm a",EHm:"E HH:mm",Ehms:"E h:mm:ss a",EHms:"E HH:mm:ss",Gy:"y G",GyMMM:"MMM y G",GyMMMd:"d MMM y G",GyMMMEd:"E, d MMM y G","h":"h a","H":"HH",hm:"h:mm a",Hm:"HH:mm",hms:"h:mm:ss a",Hms:"HH:mm:ss",hmsv:"h:mm:ss a v",Hmsv:"HH:mm:ss v",hmv:"h:mm a v",Hmv:"HH:mm v","M":"L",Md:"dd/MM",MEd:"E, dd/MM",MMdd:"dd/MM",MMM:"LLL",MMMd:"d MMM",MMMEd:"E, d MMM",MMMMd:"d MMMM",ms:"mm:ss","y":"y",yM:"MM/y",yMd:"dd/MM/y",yMEd:"E, dd/MM/y",yMMM:"MMM y",yMMMd:"d MMM y",yMMMEd:"E, d MMM y",yMMMM:"MMMM y",yQQQ:"QQQ y",yQQQQ:"QQQQ y"},dateFormats:{yMMMMEEEEd:"EEEE, d MMMM y",yMMMMd:"d MMMM y",yMMMd:"d MMM y",yMd:"dd/MM/y"},timeFormats:{hmmsszzzz:"HH:mm:ss zzzz",hmsz:"HH:mm:ss z",hms:"HH:mm:ss",hm:"HH:mm"}},calendars:{buddhist:{months:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],short:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],long:["January","February","March","April","May","June","July","August","September","October","November","December"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["BE"],short:["BE"],long:["BE"]},dayPeriods:{am:"AM",pm:"PM"}},chinese:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Mo1","Mo2","Mo3","Mo4","Mo5","Mo6","Mo7","Mo8","Mo9","Mo10","Mo11","Mo12"],long:["Month1","Month2","Month3","Month4","Month5","Month6","Month7","Month8","Month9","Month10","Month11","Month12"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},dayPeriods:{am:"AM",pm:"PM"}},coptic:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12","13"],short:["Tout","Baba","Hator","Kiahk","Toba","Amshir","Baramhat","Baramouda","Bashans","Paona","Epep","Mesra","Nasie"],long:["Tout","Baba","Hator","Kiahk","Toba","Amshir","Baramhat","Baramouda","Bashans","Paona","Epep","Mesra","Nasie"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["ERA0","ERA1"],short:["ERA0","ERA1"],long:["ERA0","ERA1"]},dayPeriods:{am:"AM",pm:"PM"}},dangi:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Mo1","Mo2","Mo3","Mo4","Mo5","Mo6","Mo7","Mo8","Mo9","Mo10","Mo11","Mo12"],long:["Month1","Month2","Month3","Month4","Month5","Month6","Month7","Month8","Month9","Month10","Month11","Month12"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},dayPeriods:{am:"AM",pm:"PM"}},ethiopic:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12","13"],short:["Meskerem","Tekemt","Hedar","Tahsas","Ter","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehasse","Pagumen"],long:["Meskerem","Tekemt","Hedar","Tahsas","Ter","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehasse","Pagumen"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["ERA0","ERA1"],short:["ERA0","ERA1"],long:["ERA0","ERA1"]},dayPeriods:{am:"AM",pm:"PM"}},ethioaa:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12","13"],short:["Meskerem","Tekemt","Hedar","Tahsas","Ter","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehasse","Pagumen"],long:["Meskerem","Tekemt","Hedar","Tahsas","Ter","Yekatit","Megabit","Miazia","Genbot","Sene","Hamle","Nehasse","Pagumen"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["ERA0"],short:["ERA0"],long:["ERA0"]},dayPeriods:{am:"AM",pm:"PM"}},generic:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["M01","M02","M03","M04","M05","M06","M07","M08","M09","M10","M11","M12"],long:["M01","M02","M03","M04","M05","M06","M07","M08","M09","M10","M11","M12"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["ERA0","ERA1"],short:["ERA0","ERA1"],long:["ERA0","ERA1"]},dayPeriods:{am:"AM",pm:"PM"}},gregory:{months:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],short:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],long:["January","February","March","April","May","June","July","August","September","October","November","December"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["B","A","BCE","CE"],short:["BC","AD","BCE","CE"],long:["Before Christ","Anno Domini","Before Common Era","Common Era"]},dayPeriods:{am:"AM",pm:"PM"}},hebrew:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12","13","7"],short:["Tishri","Heshvan","Kislev","Tevet","Shevat","Adar I","Adar","Nisan","Iyar","Sivan","Tamuz","Av","Elul","Adar II"],long:["Tishri","Heshvan","Kislev","Tevet","Shevat","Adar I","Adar","Nisan","Iyar","Sivan","Tamuz","Av","Elul","Adar II"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["AM"],short:["AM"],long:["AM"]},dayPeriods:{am:"AM",pm:"PM"}},indian:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Chaitra","Vaisakha","Jyaistha","Asadha","Sravana","Bhadra","Asvina","Kartika","Agrahayana","Pausa","Magha","Phalguna"],long:["Chaitra","Vaisakha","Jyaistha","Asadha","Sravana","Bhadra","Asvina","Kartika","Agrahayana","Pausa","Magha","Phalguna"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["Saka"],short:["Saka"],long:["Saka"]},dayPeriods:{am:"AM",pm:"PM"}},islamic:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Muh.","Saf.","Rab. I","Rab. II","Jum. I","Jum. II","Raj.","Sha.","Ram.","Shaw.","Dhuʻl-Q.","Dhuʻl-H."],long:["Muharram","Safar","Rabiʻ I","Rabiʻ II","Jumada I","Jumada II","Rajab","Shaʻban","Ramadan","Shawwal","Dhuʻl-Qiʻdah","Dhuʻl-Hijjah"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["AH"],short:["AH"],long:["AH"]},dayPeriods:{am:"AM",pm:"PM"}},islamicc:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Muh.","Saf.","Rab. I","Rab. II","Jum. I","Jum. II","Raj.","Sha.","Ram.","Shaw.","Dhuʻl-Q.","Dhuʻl-H."],long:["Muharram","Safar","Rabiʻ I","Rabiʻ II","Jumada I","Jumada II","Rajab","Shaʻban","Ramadan","Shawwal","Dhuʻl-Qiʻdah","Dhuʻl-Hijjah"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["AH"],short:["AH"],long:["AH"]},dayPeriods:{am:"AM",pm:"PM"}},japanese:{months:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],short:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],long:["January","February","March","April","May","June","July","August","September","October","November","December"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["Taika (645–650)","Hakuchi (650–671)","Hakuhō (672–686)","Shuchō (686–701)","Taihō (701–704)","Keiun (704–708)","Wadō (708–715)","Reiki (715–717)","Yōrō (717–724)","Jinki (724–729)","Tenpyō (729–749)","Tenpyō-kampō (749-749)","Tenpyō-shōhō (749-757)","Tenpyō-hōji (757-765)","Tenpyō-jingo (765-767)","Jingo-keiun (767-770)","Hōki (770–780)","Ten-ō (781-782)","Enryaku (782–806)","Daidō (806–810)","Kōnin (810–824)","Tenchō (824–834)","Jōwa (834–848)","Kajō (848–851)","Ninju (851–854)","Saikō (854–857)","Ten-an (857-859)","Jōgan (859–877)","Gangyō (877–885)","Ninna (885–889)","Kanpyō (889–898)","Shōtai (898–901)","Engi (901–923)","Enchō (923–931)","Jōhei (931–938)","Tengyō (938–947)","Tenryaku (947–957)","Tentoku (957–961)","Ōwa (961–964)","Kōhō (964–968)","Anna (968–970)","Tenroku (970–973)","Ten’en (973–976)","Jōgen (976–978)","Tengen (978–983)","Eikan (983–985)","Kanna (985–987)","Eien (987–989)","Eiso (989–990)","Shōryaku (990–995)","Chōtoku (995–999)","Chōhō (999–1004)","Kankō (1004–1012)","Chōwa (1012–1017)","Kannin (1017–1021)","Jian (1021–1024)","Manju (1024–1028)","Chōgen (1028–1037)","Chōryaku (1037–1040)","Chōkyū (1040–1044)","Kantoku (1044–1046)","Eishō (1046–1053)","Tengi (1053–1058)","Kōhei (1058–1065)","Jiryaku (1065–1069)","Enkyū (1069–1074)","Shōho (1074–1077)","Shōryaku (1077–1081)","Eihō (1081–1084)","Ōtoku (1084–1087)","Kanji (1087–1094)","Kahō (1094–1096)","Eichō (1096–1097)","Jōtoku (1097–1099)","Kōwa (1099–1104)","Chōji (1104–1106)","Kashō (1106–1108)","Tennin (1108–1110)","Ten-ei (1110-1113)","Eikyū (1113–1118)","Gen’ei (1118–1120)","Hōan (1120–1124)","Tenji (1124–1126)","Daiji (1126–1131)","Tenshō (1131–1132)","Chōshō (1132–1135)","Hōen (1135–1141)","Eiji (1141–1142)","Kōji (1142–1144)","Ten’yō (1144–1145)","Kyūan (1145–1151)","Ninpei (1151–1154)","Kyūju (1154–1156)","Hōgen (1156–1159)","Heiji (1159–1160)","Eiryaku (1160–1161)","Ōho (1161–1163)","Chōkan (1163–1165)","Eiman (1165–1166)","Nin’an (1166–1169)","Kaō (1169–1171)","Shōan (1171–1175)","Angen (1175–1177)","Jishō (1177–1181)","Yōwa (1181–1182)","Juei (1182–1184)","Genryaku (1184–1185)","Bunji (1185–1190)","Kenkyū (1190–1199)","Shōji (1199–1201)","Kennin (1201–1204)","Genkyū (1204–1206)","Ken’ei (1206–1207)","Jōgen (1207–1211)","Kenryaku (1211–1213)","Kenpō (1213–1219)","Jōkyū (1219–1222)","Jōō (1222–1224)","Gennin (1224–1225)","Karoku (1225–1227)","Antei (1227–1229)","Kanki (1229–1232)","Jōei (1232–1233)","Tenpuku (1233–1234)","Bunryaku (1234–1235)","Katei (1235–1238)","Ryakunin (1238–1239)","En’ō (1239–1240)","Ninji (1240–1243)","Kangen (1243–1247)","Hōji (1247–1249)","Kenchō (1249–1256)","Kōgen (1256–1257)","Shōka (1257–1259)","Shōgen (1259–1260)","Bun’ō (1260–1261)","Kōchō (1261–1264)","Bun’ei (1264–1275)","Kenji (1275–1278)","Kōan (1278–1288)","Shōō (1288–1293)","Einin (1293–1299)","Shōan (1299–1302)","Kengen (1302–1303)","Kagen (1303–1306)","Tokuji (1306–1308)","Enkyō (1308–1311)","Ōchō (1311–1312)","Shōwa (1312–1317)","Bunpō (1317–1319)","Genō (1319–1321)","Genkō (1321–1324)","Shōchū (1324–1326)","Karyaku (1326–1329)","Gentoku (1329–1331)","Genkō (1331–1334)","Kenmu (1334–1336)","Engen (1336–1340)","Kōkoku (1340–1346)","Shōhei (1346–1370)","Kentoku (1370–1372)","Bunchū (1372–1375)","Tenju (1375–1379)","Kōryaku (1379–1381)","Kōwa (1381–1384)","Genchū (1384–1392)","Meitoku (1384–1387)","Kakei (1387–1389)","Kōō (1389–1390)","Meitoku (1390–1394)","Ōei (1394–1428)","Shōchō (1428–1429)","Eikyō (1429–1441)","Kakitsu (1441–1444)","Bun’an (1444–1449)","Hōtoku (1449–1452)","Kyōtoku (1452–1455)","Kōshō (1455–1457)","Chōroku (1457–1460)","Kanshō (1460–1466)","Bunshō (1466–1467)","Ōnin (1467–1469)","Bunmei (1469–1487)","Chōkyō (1487–1489)","Entoku (1489–1492)","Meiō (1492–1501)","Bunki (1501–1504)","Eishō (1504–1521)","Taiei (1521–1528)","Kyōroku (1528–1532)","Tenbun (1532–1555)","Kōji (1555–1558)","Eiroku (1558–1570)","Genki (1570–1573)","Tenshō (1573–1592)","Bunroku (1592–1596)","Keichō (1596–1615)","Genna (1615–1624)","Kan’ei (1624–1644)","Shōho (1644–1648)","Keian (1648–1652)","Jōō (1652–1655)","Meireki (1655–1658)","Manji (1658–1661)","Kanbun (1661–1673)","Enpō (1673–1681)","Tenna (1681–1684)","Jōkyō (1684–1688)","Genroku (1688–1704)","Hōei (1704–1711)","Shōtoku (1711–1716)","Kyōhō (1716–1736)","Genbun (1736–1741)","Kanpō (1741–1744)","Enkyō (1744–1748)","Kan’en (1748–1751)","Hōreki (1751–1764)","Meiwa (1764–1772)","An’ei (1772–1781)","Tenmei (1781–1789)","Kansei (1789–1801)","Kyōwa (1801–1804)","Bunka (1804–1818)","Bunsei (1818–1830)","Tenpō (1830–1844)","Kōka (1844–1848)","Kaei (1848–1854)","Ansei (1854–1860)","Man’en (1860–1861)","Bunkyū (1861–1864)","Genji (1864–1865)","Keiō (1865–1868)","M","T","S","H"],short:["Taika (645–650)","Hakuchi (650–671)","Hakuhō (672–686)","Shuchō (686–701)","Taihō (701–704)","Keiun (704–708)","Wadō (708–715)","Reiki (715–717)","Yōrō (717–724)","Jinki (724–729)","Tenpyō (729–749)","Tenpyō-kampō (749-749)","Tenpyō-shōhō (749-757)","Tenpyō-hōji (757-765)","Tenpyō-jingo (765-767)","Jingo-keiun (767-770)","Hōki (770–780)","Ten-ō (781-782)","Enryaku (782–806)","Daidō (806–810)","Kōnin (810–824)","Tenchō (824–834)","Jōwa (834–848)","Kajō (848–851)","Ninju (851–854)","Saikō (854–857)","Ten-an (857-859)","Jōgan (859–877)","Gangyō (877–885)","Ninna (885–889)","Kanpyō (889–898)","Shōtai (898–901)","Engi (901–923)","Enchō (923–931)","Jōhei (931–938)","Tengyō (938–947)","Tenryaku (947–957)","Tentoku (957–961)","Ōwa (961–964)","Kōhō (964–968)","Anna (968–970)","Tenroku (970–973)","Ten’en (973–976)","Jōgen (976–978)","Tengen (978–983)","Eikan (983–985)","Kanna (985–987)","Eien (987–989)","Eiso (989–990)","Shōryaku (990–995)","Chōtoku (995–999)","Chōhō (999–1004)","Kankō (1004–1012)","Chōwa (1012–1017)","Kannin (1017–1021)","Jian (1021–1024)","Manju (1024–1028)","Chōgen (1028–1037)","Chōryaku (1037–1040)","Chōkyū (1040–1044)","Kantoku (1044–1046)","Eishō (1046–1053)","Tengi (1053–1058)","Kōhei (1058–1065)","Jiryaku (1065–1069)","Enkyū (1069–1074)","Shōho (1074–1077)","Shōryaku (1077–1081)","Eihō (1081–1084)","Ōtoku (1084–1087)","Kanji (1087–1094)","Kahō (1094–1096)","Eichō (1096–1097)","Jōtoku (1097–1099)","Kōwa (1099–1104)","Chōji (1104–1106)","Kashō (1106–1108)","Tennin (1108–1110)","Ten-ei (1110-1113)","Eikyū (1113–1118)","Gen’ei (1118–1120)","Hōan (1120–1124)","Tenji (1124–1126)","Daiji (1126–1131)","Tenshō (1131–1132)","Chōshō (1132–1135)","Hōen (1135–1141)","Eiji (1141–1142)","Kōji (1142–1144)","Ten’yō (1144–1145)","Kyūan (1145–1151)","Ninpei (1151–1154)","Kyūju (1154–1156)","Hōgen (1156–1159)","Heiji (1159–1160)","Eiryaku (1160–1161)","Ōho (1161–1163)","Chōkan (1163–1165)","Eiman (1165–1166)","Nin’an (1166–1169)","Kaō (1169–1171)","Shōan (1171–1175)","Angen (1175–1177)","Jishō (1177–1181)","Yōwa (1181–1182)","Juei (1182–1184)","Genryaku (1184–1185)","Bunji (1185–1190)","Kenkyū (1190–1199)","Shōji (1199–1201)","Kennin (1201–1204)","Genkyū (1204–1206)","Ken’ei (1206–1207)","Jōgen (1207–1211)","Kenryaku (1211–1213)","Kenpō (1213–1219)","Jōkyū (1219–1222)","Jōō (1222–1224)","Gennin (1224–1225)","Karoku (1225–1227)","Antei (1227–1229)","Kanki (1229–1232)","Jōei (1232–1233)","Tenpuku (1233–1234)","Bunryaku (1234–1235)","Katei (1235–1238)","Ryakunin (1238–1239)","En’ō (1239–1240)","Ninji (1240–1243)","Kangen (1243–1247)","Hōji (1247–1249)","Kenchō (1249–1256)","Kōgen (1256–1257)","Shōka (1257–1259)","Shōgen (1259–1260)","Bun’ō (1260–1261)","Kōchō (1261–1264)","Bun’ei (1264–1275)","Kenji (1275–1278)","Kōan (1278–1288)","Shōō (1288–1293)","Einin (1293–1299)","Shōan (1299–1302)","Kengen (1302–1303)","Kagen (1303–1306)","Tokuji (1306–1308)","Enkyō (1308–1311)","Ōchō (1311–1312)","Shōwa (1312–1317)","Bunpō (1317–1319)","Genō (1319–1321)","Genkō (1321–1324)","Shōchū (1324–1326)","Karyaku (1326–1329)","Gentoku (1329–1331)","Genkō (1331–1334)","Kenmu (1334–1336)","Engen (1336–1340)","Kōkoku (1340–1346)","Shōhei (1346–1370)","Kentoku (1370–1372)","Bunchū (1372–1375)","Tenju (1375–1379)","Kōryaku (1379–1381)","Kōwa (1381–1384)","Genchū (1384–1392)","Meitoku (1384–1387)","Kakei (1387–1389)","Kōō (1389–1390)","Meitoku (1390–1394)","Ōei (1394–1428)","Shōchō (1428–1429)","Eikyō (1429–1441)","Kakitsu (1441–1444)","Bun’an (1444–1449)","Hōtoku (1449–1452)","Kyōtoku (1452–1455)","Kōshō (1455–1457)","Chōroku (1457–1460)","Kanshō (1460–1466)","Bunshō (1466–1467)","Ōnin (1467–1469)","Bunmei (1469–1487)","Chōkyō (1487–1489)","Entoku (1489–1492)","Meiō (1492–1501)","Bunki (1501–1504)","Eishō (1504–1521)","Taiei (1521–1528)","Kyōroku (1528–1532)","Tenbun (1532–1555)","Kōji (1555–1558)","Eiroku (1558–1570)","Genki (1570–1573)","Tenshō (1573–1592)","Bunroku (1592–1596)","Keichō (1596–1615)","Genna (1615–1624)","Kan’ei (1624–1644)","Shōho (1644–1648)","Keian (1648–1652)","Jōō (1652–1655)","Meireki (1655–1658)","Manji (1658–1661)","Kanbun (1661–1673)","Enpō (1673–1681)","Tenna (1681–1684)","Jōkyō (1684–1688)","Genroku (1688–1704)","Hōei (1704–1711)","Shōtoku (1711–1716)","Kyōhō (1716–1736)","Genbun (1736–1741)","Kanpō (1741–1744)","Enkyō (1744–1748)","Kan’en (1748–1751)","Hōreki (1751–1764)","Meiwa (1764–1772)","An’ei (1772–1781)","Tenmei (1781–1789)","Kansei (1789–1801)","Kyōwa (1801–1804)","Bunka (1804–1818)","Bunsei (1818–1830)","Tenpō (1830–1844)","Kōka (1844–1848)","Kaei (1848–1854)","Ansei (1854–1860)","Man’en (1860–1861)","Bunkyū (1861–1864)","Genji (1864–1865)","Keiō (1865–1868)","Meiji","Taishō","Shōwa","Heisei"],long:["Taika (645–650)","Hakuchi (650–671)","Hakuhō (672–686)","Shuchō (686–701)","Taihō (701–704)","Keiun (704–708)","Wadō (708–715)","Reiki (715–717)","Yōrō (717–724)","Jinki (724–729)","Tenpyō (729–749)","Tenpyō-kampō (749-749)","Tenpyō-shōhō (749-757)","Tenpyō-hōji (757-765)","Tenpyō-jingo (765-767)","Jingo-keiun (767-770)","Hōki (770–780)","Ten-ō (781-782)","Enryaku (782–806)","Daidō (806–810)","Kōnin (810–824)","Tenchō (824–834)","Jōwa (834–848)","Kajō (848–851)","Ninju (851–854)","Saikō (854–857)","Ten-an (857-859)","Jōgan (859–877)","Gangyō (877–885)","Ninna (885–889)","Kanpyō (889–898)","Shōtai (898–901)","Engi (901–923)","Enchō (923–931)","Jōhei (931–938)","Tengyō (938–947)","Tenryaku (947–957)","Tentoku (957–961)","Ōwa (961–964)","Kōhō (964–968)","Anna (968–970)","Tenroku (970–973)","Ten’en (973–976)","Jōgen (976–978)","Tengen (978–983)","Eikan (983–985)","Kanna (985–987)","Eien (987–989)","Eiso (989–990)","Shōryaku (990–995)","Chōtoku (995–999)","Chōhō (999–1004)","Kankō (1004–1012)","Chōwa (1012–1017)","Kannin (1017–1021)","Jian (1021–1024)","Manju (1024–1028)","Chōgen (1028–1037)","Chōryaku (1037–1040)","Chōkyū (1040–1044)","Kantoku (1044–1046)","Eishō (1046–1053)","Tengi (1053–1058)","Kōhei (1058–1065)","Jiryaku (1065–1069)","Enkyū (1069–1074)","Shōho (1074–1077)","Shōryaku (1077–1081)","Eihō (1081–1084)","Ōtoku (1084–1087)","Kanji (1087–1094)","Kahō (1094–1096)","Eichō (1096–1097)","Jōtoku (1097–1099)","Kōwa (1099–1104)","Chōji (1104–1106)","Kashō (1106–1108)","Tennin (1108–1110)","Ten-ei (1110-1113)","Eikyū (1113–1118)","Gen’ei (1118–1120)","Hōan (1120–1124)","Tenji (1124–1126)","Daiji (1126–1131)","Tenshō (1131–1132)","Chōshō (1132–1135)","Hōen (1135–1141)","Eiji (1141–1142)","Kōji (1142–1144)","Ten’yō (1144–1145)","Kyūan (1145–1151)","Ninpei (1151–1154)","Kyūju (1154–1156)","Hōgen (1156–1159)","Heiji (1159–1160)","Eiryaku (1160–1161)","Ōho (1161–1163)","Chōkan (1163–1165)","Eiman (1165–1166)","Nin’an (1166–1169)","Kaō (1169–1171)","Shōan (1171–1175)","Angen (1175–1177)","Jishō (1177–1181)","Yōwa (1181–1182)","Juei (1182–1184)","Genryaku (1184–1185)","Bunji (1185–1190)","Kenkyū (1190–1199)","Shōji (1199–1201)","Kennin (1201–1204)","Genkyū (1204–1206)","Ken’ei (1206–1207)","Jōgen (1207–1211)","Kenryaku (1211–1213)","Kenpō (1213–1219)","Jōkyū (1219–1222)","Jōō (1222–1224)","Gennin (1224–1225)","Karoku (1225–1227)","Antei (1227–1229)","Kanki (1229–1232)","Jōei (1232–1233)","Tenpuku (1233–1234)","Bunryaku (1234–1235)","Katei (1235–1238)","Ryakunin (1238–1239)","En’ō (1239–1240)","Ninji (1240–1243)","Kangen (1243–1247)","Hōji (1247–1249)","Kenchō (1249–1256)","Kōgen (1256–1257)","Shōka (1257–1259)","Shōgen (1259–1260)","Bun’ō (1260–1261)","Kōchō (1261–1264)","Bun’ei (1264–1275)","Kenji (1275–1278)","Kōan (1278–1288)","Shōō (1288–1293)","Einin (1293–1299)","Shōan (1299–1302)","Kengen (1302–1303)","Kagen (1303–1306)","Tokuji (1306–1308)","Enkyō (1308–1311)","Ōchō (1311–1312)","Shōwa (1312–1317)","Bunpō (1317–1319)","Genō (1319–1321)","Genkō (1321–1324)","Shōchū (1324–1326)","Karyaku (1326–1329)","Gentoku (1329–1331)","Genkō (1331–1334)","Kenmu (1334–1336)","Engen (1336–1340)","Kōkoku (1340–1346)","Shōhei (1346–1370)","Kentoku (1370–1372)","Bunchū (1372–1375)","Tenju (1375–1379)","Kōryaku (1379–1381)","Kōwa (1381–1384)","Genchū (1384–1392)","Meitoku (1384–1387)","Kakei (1387–1389)","Kōō (1389–1390)","Meitoku (1390–1394)","Ōei (1394–1428)","Shōchō (1428–1429)","Eikyō (1429–1441)","Kakitsu (1441–1444)","Bun’an (1444–1449)","Hōtoku (1449–1452)","Kyōtoku (1452–1455)","Kōshō (1455–1457)","Chōroku (1457–1460)","Kanshō (1460–1466)","Bunshō (1466–1467)","Ōnin (1467–1469)","Bunmei (1469–1487)","Chōkyō (1487–1489)","Entoku (1489–1492)","Meiō (1492–1501)","Bunki (1501–1504)","Eishō (1504–1521)","Taiei (1521–1528)","Kyōroku (1528–1532)","Tenbun (1532–1555)","Kōji (1555–1558)","Eiroku (1558–1570)","Genki (1570–1573)","Tenshō (1573–1592)","Bunroku (1592–1596)","Keichō (1596–1615)","Genna (1615–1624)","Kan’ei (1624–1644)","Shōho (1644–1648)","Keian (1648–1652)","Jōō (1652–1655)","Meireki (1655–1658)","Manji (1658–1661)","Kanbun (1661–1673)","Enpō (1673–1681)","Tenna (1681–1684)","Jōkyō (1684–1688)","Genroku (1688–1704)","Hōei (1704–1711)","Shōtoku (1711–1716)","Kyōhō (1716–1736)","Genbun (1736–1741)","Kanpō (1741–1744)","Enkyō (1744–1748)","Kan’en (1748–1751)","Hōreki (1751–1764)","Meiwa (1764–1772)","An’ei (1772–1781)","Tenmei (1781–1789)","Kansei (1789–1801)","Kyōwa (1801–1804)","Bunka (1804–1818)","Bunsei (1818–1830)","Tenpō (1830–1844)","Kōka (1844–1848)","Kaei (1848–1854)","Ansei (1854–1860)","Man’en (1860–1861)","Bunkyū (1861–1864)","Genji (1864–1865)","Keiō (1865–1868)","Meiji","Taishō","Shōwa","Heisei"]},dayPeriods:{am:"AM",pm:"PM"}},persian:{months:{narrow:["1","2","3","4","5","6","7","8","9","10","11","12"],short:["Farvardin","Ordibehesht","Khordad","Tir","Mordad","Shahrivar","Mehr","Aban","Azar","Dey","Bahman","Esfand"],long:["Farvardin","Ordibehesht","Khordad","Tir","Mordad","Shahrivar","Mehr","Aban","Azar","Dey","Bahman","Esfand"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["AP"],short:["AP"],long:["AP"]},dayPeriods:{am:"AM",pm:"PM"}},roc:{months:{narrow:["J","F","M","A","M","J","J","A","S","O","N","D"],short:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"],long:["January","February","March","April","May","June","July","August","September","October","November","December"]},days:{narrow:["S","M","T","W","T","F","S"],short:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],long:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]},eras:{narrow:["Before R.O.C.","Minguo"],short:["Before R.O.C.","Minguo"],long:["Before R.O.C.","Minguo"]},dayPeriods:{am:"AM",pm:"PM"}}}},number:{nu:["latn"],patterns:{decimal:{positivePattern:"{number}",negativePattern:"{minusSign}{number}"},currency:{positivePattern:"{currency}{number}",negativePattern:"{minusSign}{currency}{number}"},percent:{positivePattern:"{number}{percentSign}",negativePattern:"{minusSign}{number}{percentSign}"}},symbols:{latn:{decimal:".",group:",",nan:"NaN",plusSign:"+",minusSign:"-",percentSign:"%",infinity:"∞"}},currencies:{AUD:"A$",BRL:"R$",CAD:"CA$",CNY:"CN¥",EUR:"€",GBP:"GB£",GIP:"£",HKD:"HK$",ILS:"₪",INR:"₹",JPY:"JP¥",KRW:"₩",MXN:"MX$",NZD:"NZ$",TWD:"NT$",USD:"US$",VND:"₫",XAF:"FCFA",XCD:"EC$",XOF:"CFA",XPF:"CFPF"}}});
|
PypiClean
|
/gladia_torchaudio-2.1.0a0-py3-none-any.whl/torchaudio/models/wav2vec2/utils/import_fairseq.py
|
import re
from torch.nn import Module
from ..model import wav2vec2_model, Wav2Vec2Model
def _parse_config(w2v_model):
encoder = w2v_model.encoder
conv_layers = w2v_model.feature_extractor.conv_layers
extractor_mode = "layer_norm"
if "GroupNorm" in conv_layers[0][2].__class__.__name__:
extractor_mode = "group_norm"
else:
extractor_mode = "layer_norm"
conv_layer_config = [(l[0].out_channels, l[0].kernel_size[0], l[0].stride[0]) for l in conv_layers]
if all(l[0].bias is None for l in conv_layers):
conv_bias = False
elif all(l[0].bias is not None for l in conv_layers):
conv_bias = True
else:
raise ValueError("Either all the convolutions layers have bias term or none of them should.")
config = {
"extractor_mode": extractor_mode,
"extractor_conv_layer_config": conv_layer_config,
"extractor_conv_bias": conv_bias,
"encoder_embed_dim": w2v_model.post_extract_proj.out_features,
"encoder_projection_dropout": w2v_model.dropout_input.p,
"encoder_pos_conv_kernel": encoder.pos_conv[0].kernel_size[0],
"encoder_pos_conv_groups": encoder.pos_conv[0].groups,
"encoder_num_layers": len(encoder.layers),
"encoder_num_heads": encoder.layers[0].self_attn.num_heads,
"encoder_attention_dropout": encoder.layers[0].self_attn.dropout_module.p,
"encoder_ff_interm_features": encoder.layers[0].fc1.out_features,
"encoder_ff_interm_dropout": encoder.layers[0].dropout2.p,
"encoder_dropout": encoder.layers[0].dropout3.p,
"encoder_layer_norm_first": encoder.layer_norm_first,
"encoder_layer_drop": encoder.layerdrop,
}
return config
def _map_key(key):
key_ = key
if key.startswith("w2v_model."):
key = key.replace("w2v_model.", "")
if re.match(r"(mask_emb|quantizer|project_q|final_proj|mask_emb)", key):
return None
# Feature Extractor
# Group norm when "extractor_mode" is "default".
# (Only the first layer)
# "conv_layers.0.2.weight" -> "conv_layers.0.layer_norm.weight"
# "conv_layers.0.2.bias" -> "conv_layers.0.layer_norm.bias"
match = re.match(r"feature_extractor\.conv_layers\.0\.2\.(weight|bias)", key)
if match:
return f"feature_extractor.conv_layers.0.layer_norm.{match.group(1)}"
# Convolutions
# "conv_layers.X.0.weight" -> "conv_layers.X.conv.weight"
# "conv_layers.X.0.bias" -> "conv_layers.X.conv.bias"
match = re.match(r"feature_extractor\.conv_layers\.(\d+)\.0\.(weight|bias)", key)
if match:
return f"feature_extractor.conv_layers.{match.group(1)}.conv.{match.group(2)}"
# Layer norm when "extractor_mode" is "layer_norm".
# "conv_layers.X.2.1.weight" -> "conv_layers.X.layer_norm.weight"
# "conv_layers.X.2.1.bias" -> "conv_layers.X.layer_norm.bias"
match = re.match(r"feature_extractor\.conv_layers\.(\d+)\.2\.1\.(weight|bias)", key)
if match:
return f"feature_extractor.conv_layers.{match.group(1)}.layer_norm.{match.group(2)}"
match = re.match(r"post_extract_proj\.(weight|bias)", key)
# Encoder - Feature projection
if match:
return f"encoder.feature_projection.projection.{match.group(1)}"
match = re.match(r"layer_norm\.(weight|bias)", key)
if match:
return f"encoder.feature_projection.layer_norm.{match.group(1)}"
# Encoder - Transformer - Convolutional positional embedding
match = re.match(r"encoder\.pos_conv\.0\.(bias|weight_g|weight_v)", key)
if match:
return f"encoder.transformer.pos_conv_embed.conv.{match.group(1)}"
match = re.match(r"encoder\.layer_norm\.(weight|bias)", key)
if match:
return f"encoder.transformer.layer_norm.{match.group(1)}"
# Encoder - Transformer - Self attention layers
match = re.match(r"encoder\.layers\.(\d+)\.self_attn\.((k_|v_|q_|out_)proj\.(weight|bias))", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.attention.{match.group(2)}"
match = re.match(r"encoder\.layers\.(\d+)\.self_attn_layer_norm\.(weight|bias)", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.layer_norm.{match.group(2)}"
match = re.match(r"encoder\.layers\.(\d+)\.fc1\.(weight|bias)", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.feed_forward.intermediate_dense.{match.group(2)}"
match = re.match(r"encoder\.layers\.(\d+)\.fc2\.(weight|bias)", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.feed_forward.output_dense.{match.group(2)}"
match = re.match(r"encoder\.layers\.(\d+)\.final_layer_norm\.(weight|bias)", key)
if match:
return f"encoder.transformer.layers.{match.group(1)}.final_layer_norm.{match.group(2)}"
match = re.match(r"proj\.(weight|bias)", key)
# Auxiliary Module
# Only relevant when loading fine-tuned models
if match:
return f"aux.{match.group(1)}"
# HuBERT Extension
if key in ["label_embs_concat"]:
return key
raise ValueError(f"Unexpected key: {key_}")
def _convert_state_dict(state_dict):
converted = {}
for k, v in state_dict.items():
k = _map_key(k)
if k is not None:
converted[k] = v
return converted
def import_fairseq_model(original: Module) -> Wav2Vec2Model:
"""Builds :class:`Wav2Vec2Model` from the corresponding model object of
`fairseq <https://github.com/pytorch/fairseq>`_.
Args:
original (torch.nn.Module):
An instance of fairseq's Wav2Vec2.0 or HuBERT model.
One of ``fairseq.models.wav2vec.wav2vec2_asr.Wav2VecEncoder``,
``fairseq.models.wav2vec.wav2vec2.Wav2Vec2Model`` or
``fairseq.models.hubert.hubert_asr.HubertEncoder``.
Returns:
Wav2Vec2Model: Imported model.
Example - Loading pretrain-only model
>>> from torchaudio.models.wav2vec2.utils import import_fairseq_model
>>>
>>> # Load model using fairseq
>>> model_file = 'wav2vec_small.pt'
>>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
>>> original = model[0]
>>> imported = import_fairseq_model(original)
>>>
>>> # Perform feature extraction
>>> waveform, _ = torchaudio.load('audio.wav')
>>> features, _ = imported.extract_features(waveform)
>>>
>>> # Compare result with the original model from fairseq
>>> reference = original.feature_extractor(waveform).transpose(1, 2)
>>> torch.testing.assert_allclose(features, reference)
Example - Fine-tuned model
>>> from torchaudio.models.wav2vec2.utils import import_fairseq_model
>>>
>>> # Load model using fairseq
>>> model_file = 'wav2vec_small_960h.pt'
>>> model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
>>> original = model[0]
>>> imported = import_fairseq_model(original.w2v_encoder)
>>>
>>> # Perform encoding
>>> waveform, _ = torchaudio.load('audio.wav')
>>> emission, _ = imported(waveform)
>>>
>>> # Compare result with the original model from fairseq
>>> mask = torch.zeros_like(waveform)
>>> reference = original(waveform, mask)['encoder_out'].transpose(0, 1)
>>> torch.testing.assert_allclose(emission, reference)
"""
class_ = original.__class__.__name__
if class_ == "Wav2Vec2Model":
return _import_wav2vec2_pretraining(original)
if class_ == "Wav2VecEncoder":
return _import_wav2vec2_finetuning(original)
if class_ == "HubertModel":
return _import_hubert_pretraining(original)
if class_ == "HubertEncoder":
return _import_hubert_finetuning(original)
raise ValueError(f"Expected an instance of `Wav2Vec2Model` or `Wav2VecEncoder`. Found: {class_}")
def _import_wav2vec2_finetuning(original: Module) -> Wav2Vec2Model:
config = _parse_config(original.w2v_model)
model = wav2vec2_model(**config, aux_num_out=original.proj.out_features)
model.load_state_dict(_convert_state_dict(original.state_dict()))
return model
def _import_wav2vec2_pretraining(original: Module) -> Wav2Vec2Model:
config = _parse_config(original)
model = wav2vec2_model(**config, aux_num_out=None)
model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False)
return model
def _import_hubert_finetuning(original: Module) -> Wav2Vec2Model:
config = _parse_config(original.w2v_model)
model = wav2vec2_model(**config, aux_num_out=original.proj.out_features)
model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False)
return model
def _import_hubert_pretraining(original: Module) -> Wav2Vec2Model:
config = _parse_config(original)
model = wav2vec2_model(**config, aux_num_out=None)
model.load_state_dict(_convert_state_dict(original.state_dict()), strict=False)
return model
|
PypiClean
|
/h2o_pysparkling_3.3-3.42.0.2.post1.tar.gz/h2o_pysparkling_3.3-3.42.0.2.post1/h2o/transforms/transform_base.py
|
from h2o.utils.compatibility import * # NOQA
from h2o import expr
from ..frame import H2OFrame
class TransformAttributeError(AttributeError):
def __init__(self, obj, method):
super(AttributeError, self).__init__("No {} method for {}".format(method, obj.__class__.__name__))
class H2OTransformer(object):
"""
H2O Transforms.
H2O Transforms implement the following methods:
- fit
- transform
- fit_transform
- inverse_transform
- export
- to_rest
"""
def __init__(self):
pass
def fit(self, X, y=None, **params):
raise TransformAttributeError(self, "fit")
def transform(self, X, y=None, **params):
raise TransformAttributeError(self, "transform")
def inverse_transform(self, X, y=None, **params):
raise TransformAttributeError(self, "inverse_transform")
def export(self, X, y, **params):
raise TransformAttributeError(self, "export")
def fit_transform(self, X, y=None, **params):
return self.fit(X, y, **params).transform(X, **params)
def get_params(self, deep=True):
"""
Get parameters for this estimator.
:param bool deep: if True, return parameters of all subobjects that are estimators.
:returns: A dict of parameters.
"""
out = dict()
for key, value in self.parms.items():
if deep and isinstance(value, H2OTransformer):
deep_items = list(value.get_params().items())
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
self.parms.update(params)
return self
@staticmethod
def _dummy_frame():
fr = H2OFrame._expr(expr.ExprNode())
fr._is_frame = False
fr._ex._children = None
fr._ex._cache.dummy_fill()
return fr
def to_rest(self, args):
return "__".join(str(a) for a in args)
|
PypiClean
|
/latexcodec-2.0.1.tar.gz/latexcodec-2.0.1/README.rst
|
latexcodec
==========
|travis| |codecov|
A lexer and codec to work with LaTeX code in Python.
* Download: http://pypi.python.org/pypi/latexcodec/#downloads
* Documentation: http://latexcodec.readthedocs.org/
* Development: http://github.com/mcmtroffaes/latexcodec/
.. |travis| image:: https://travis-ci.org/mcmtroffaes/latexcodec.png?branch=develop
:target: https://travis-ci.org/mcmtroffaes/latexcodec
:alt: travis-ci
.. |codecov| image:: https://codecov.io/gh/mcmtroffaes/latexcodec/branch/develop/graph/badge.svg
:target: https://codecov.io/gh/mcmtroffaes/latexcodec
:alt: codecov
The codec provides a convenient way of going between text written in
LaTeX and unicode. Since it is not a LaTeX compiler, it is more
appropriate for short chunks of text, such as a paragraph or the
values of a BibTeX entry, and it is not appropriate for a full LaTeX
document. In particular, its behavior on the LaTeX commands that do
not simply select characters is intended to allow the unicode
representation to be understandable by a human reader, but is not
canonical and may require hand tuning to produce the desired effect.
The encoder does a best effort to replace unicode characters outside
of the range used as LaTeX input (ascii by default) with a LaTeX
command that selects the character. More technically, the unicode code
point is replaced by a LaTeX command that selects a glyph that
reasonably represents the code point. Unicode characters with special
uses in LaTeX are replaced by their LaTeX equivalents. For example,
====================== ===================
original text encoded LaTeX
====================== ===================
``¥`` ``\yen``
``ü`` ``\"u``
``\N{NO-BREAK SPACE}`` ``~``
``~`` ``\textasciitilde``
``%`` ``\%``
``#`` ``\#``
``\textbf{x}`` ``\textbf{x}``
====================== ===================
The decoder does a best effort to replace LaTeX commands that select
characters with the unicode for the character they are selecting. For
example,
===================== ======================
original LaTeX decoded unicode
===================== ======================
``\yen`` ``¥``
``\"u`` ``ü``
``~`` ``\N{NO-BREAK SPACE}``
``\textasciitilde`` ``~``
``\%`` ``%``
``\#`` ``#``
``\textbf{x}`` ``\textbf {x}``
``#`` ``#``
===================== ======================
In addition, comments are dropped (including the final newline that
marks the end of a comment), paragraphs are canonicalized into double
newlines, and other newlines are left as is. Spacing after LaTeX
commands is also canonicalized.
For example,
::
hi % bye
there\par world
\textbf {awesome}
is decoded as
::
hi there
world
\textbf {awesome}
When decoding, LaTeX commands not directly selecting characters (for
example, macros and formatting commands) are passed through
unchanged. The same happens for LaTeX commands that select characters
but are not yet recognized by the codec. Either case can result in a
hybrid unicode string in which some characters are understood as
literally the character and others as parts of unexpanded commands.
Consequently, at times, backslashes will be left intact for denoting
the start of a potentially unrecognized control sequence.
Given the numerous and changing packages providing such LaTeX
commands, the codec will never be complete, and new translations of
unrecognized unicode or unrecognized LaTeX symbols are always welcome.
|
PypiClean
|
/intelcom_helper-0.5.0.tar.gz/intelcom_helper-0.5.0/intelcom_helper/snowflake.py
|
import os
import pandas as pd
import snowflake.connector
from sqlalchemy import create_engine
from snowflake.sqlalchemy import URL
from snowflake.connector.pandas_tools import write_pandas
def fetch_data(sql_query):
""" fetch data from snowflake tables """
ctx = snowflake.connector.connect(
user=os.environ['SK_USER'],
account=os.environ['SK_ACCOUNT'],
password=os.environ['SK_PASSWORD'],
role=os.environ['SK_ROLE'],
warehouse=os.environ['SK_WAREHOUSE'])
cur = ctx.cursor()
try:
cur.execute(sql_query)
dataframe = cur.fetch_pandas_all()
dataframe.columns = dataframe.columns.str.lower()
return dataframe
except Exception as ex:
print(ex)
finally:
cur.close()
ctx.close()
def fetch_data_sso(sql_query, email):
""" fetch data from snowflake tables using SSO authentication"""
conn = snowflake.connector.connect(
user=email.upper(),
account=os.environ['SK_ACCOUNT'],
authenticator='externalbrowser',
warehouse=os.environ['SK_WAREHOUSE'],
)
cur = conn.cursor()
try:
cur.execute(sql_query)
dataframe = cur.fetch_pandas_all()
dataframe.columns = dataframe.columns.str.lower()
except Exception as ex:
print(ex)
finally:
cur.close()
conn.close()
return dataframe
def create_table(dataframe, sf_warehouse, sf_database, sf_schema, sf_table_name, overwrite=False):
""" create table and write data into snowflake """
# Create Snowflake engine
sf_table_name = sf_table_name.lower()
engine = create_engine(URL(
user=os.environ['SK_USER'],
account=os.environ['SK_ACCOUNT'],
password=os.environ['SK_PASSWORD'],
role=os.environ['SK_ROLE'],
warehouse=sf_warehouse,
database=sf_database,
schema=sf_schema))
print('Snowflake engine created')
overwrite_table = 'fail'
if overwrite is True:
overwrite_table = 'replace'
# Create Snowflake Connection
with engine.connect() as connection:
try:
# Save dataframe locally
print('Uploading dataset to Snowflake...')
filename = f"{sf_table_name}.csv"
file_path = os.path.abspath(filename)
dataframe.to_csv(file_path, header=False, index=False)
# Create table in Snowflake
dataframe.head(0).to_sql(name=sf_table_name, con=connection, if_exists=overwrite_table, index=False)
# Put file in S3 stage and copy file to table
connection.execute(f"put file://{file_path}* @%{sf_table_name}")
connection.execute(f"copy into {sf_table_name}")
print('Successfully uploaded {} rows into : {}.{}.{}'.format(dataframe.shape[0], sf_database, sf_schema,
sf_table_name.upper()))
except Exception as ex:
print(ex)
finally:
os.remove(file_path)
def write_data(dataframe, sf_warehouse, sf_database, sf_schema, sf_table_name):
""" upload dataframe to snowflake table"""
cnx = snowflake.connector.connect(
user=os.environ['SK_USER'],
account=os.environ['SK_ACCOUNT'],
password=os.environ['SK_PASSWORD'],
role=os.environ['SK_ROLE'],
warehouse=sf_warehouse,
database=sf_database,
schema=sf_schema)
try:
print('Uploading dataset to Snowflake...')
dataframe.columns = dataframe.columns.str.upper()
success, nchunks, nrows, _ = write_pandas(cnx, dataframe, table_name=sf_table_name, )
if success:
print('Successfully uploaded {} rows into : {}.{}.{}'.format(nrows, sf_database, sf_schema,
sf_table_name.upper()))
except Exception as ex:
print(ex)
|
PypiClean
|
/parquetranger-0.5.2.tar.gz/parquetranger-0.5.2/notebooks/doc-001-quickstart.ipynb
|
# Quickstart
```
import pandas as pd
from parquetranger import TableRepo
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6],
"B": ["x", "y", "z", "x1", "x2", "x3"],
"C": [1, 2, 1, 1, 1, 2],
"C2": ["a", "a", "b", "a", "c", "c"],
},
index=["a1", "a2", "a3", "a4", "a5", "a6"],
)
df
trepo = TableRepo("some_tmp_path", group_cols="C2") # this creates the directory
trepo.extend(df)
trepo.get_full_df()
df2 = pd.DataFrame(
{
"A": [21, 22, 23],
"B": ["X", "Y", "Z"],
"C": [10,20,1],
"C2": ["a", "b", "a"],
},
index=["a1", "a4", "a7"]
)
trepo.replace_records(df2) # replaces based on index
trepo.get_full_df()
trepo.replace_groups(df2)
trepo.get_full_df() # replaced the whole groups where C2==a and C2==b with the records that were present in df2
trepo.replace_all(df2) # erases everything and puts df2 in. all traces of df are lost
trepo.get_full_df()
trepo.replace_records(df, by_groups=True) # replaces records based on index, but only looks for indices within groups, so this way duplicate a4 index is possible
# as they are in different groups, with different values in C2
trepo.get_full_df()
trepo.purge() # deletes everything
```
|
PypiClean
|
/vnpy_extra-0.8.20211220.0-py3-none-any.whl/vnpy_extra/pm/by_json.py
|
import json
import logging
from tqdm import tqdm
from vnpy_extra.db.orm import StrategyBacktestStats, SymbolsInfo, set_account, StrategyBacktestStatusEnum, \
AccountStrategyMapping
from vnpy_extra.pm.plot import plot_with_base_position
from vnpy_extra.utils.symbol import get_main_contract
def get_id_name_symbol(name: str):
"""通过name拆分冲 id_name, symbol(仅支持合约)"""
# 以下方法存在较严重偏差,容易与参数发生混淆
# idx = 0
# for _ in name.split('_')[::-1]:
# if PATTERN_INSTRUMENT_TYPE_RESTRICT.match(_):
# idx += len(_) + 1
# else:
# break
#
# if idx == 0:
# raise ValueError(f"{name}不是有效的名称")
# id_name, symbol = name[:-idx], name[-idx + 1:]
idx = name[::-1].find('_')
id_name, symbol = name[:-idx - 1], name[-idx:]
return id_name, symbol
def get_merged_profit_by_json(json_file_path: str, use_json_setting=True, encoding='utf-8'):
"""根据json文件中的策略名称,找到对应策略曲线拟合出合并后的收益率曲线"""
with open(json_file_path, encoding=encoding) as f:
name_setting_dic = json.load(f)
stats_list = []
tqdm_obj = tqdm(name_setting_dic.items(), total=len(name_setting_dic))
for name, setting_dic in tqdm_obj:
id_name, symbol_str = get_id_name_symbol(name)
vt_symbol = setting_dic.get('vt_symbol', None)
tqdm_obj.set_description(f"{id_name:150s}{vt_symbol}")
si_obj: SymbolsInfo = SymbolsInfo.get_instance(vt_symbol)
stats: StrategyBacktestStats = StrategyBacktestStats.get_by_id_name_and_symbol_info(id_name, si_obj)
if not stats:
main_vt_symbol = get_main_contract(si_obj.symbols)
si_main_obj = SymbolsInfo.get_instance(main_vt_symbol)
# si_obj = SymbolsInfo.get_by_symbol_str(symbol_str)
stats: StrategyBacktestStats = StrategyBacktestStats.get_by_id_name_and_symbol_info(id_name, si_main_obj)
logging.warning(f"\n{id_name}[{vt_symbol}] 对应回测数据没有找到,尝试寻找[{si_main_obj.symbols}]主连回测数据")
if use_json_setting:
stats.strategy_settings = setting_dic['setting']
stats_list.append(stats)
plot_with_base_position(stats_list=stats_list, log_weights=True, output_csv=True)
def _run_get_merged_profit_by_json():
json_file_path = r"F:\downloads\cta_strategy_setting.json"
get_merged_profit_by_json(json_file_path)
def add_stg_2_account_by_json(
user_name: str, broker_id: str, json_file_path: str, encoding='utf-8', clean_before_insert=True):
set_account(user_name, broker_id)
with open(json_file_path, encoding=encoding) as f:
name_setting_dic = json.load(f)
stg_short_num_dic = {}
if len(name_setting_dic) > 0 and clean_before_insert:
AccountStrategyMapping.delete_by_account()
tqdm_obj = tqdm(name_setting_dic.items(), total=len(name_setting_dic))
for name, setting_dic in tqdm_obj:
id_name, symbol_str = get_id_name_symbol(name)
vt_symbol = setting_dic.get('vt_symbol', None)
tqdm_obj.set_description(f"{id_name:130s}{vt_symbol}")
if not vt_symbol:
vt_symbol = setting_dic.get('vt_symbols', None)
if not vt_symbol:
raise ValueError("setting_dic 缺少 vt_symbol/vt_symbols 字段")
si_obj: SymbolsInfo = SymbolsInfo.get_instance(vt_symbol)
stats: StrategyBacktestStats = StrategyBacktestStats.get_by_id_name_and_symbol_info(id_name, si_obj)
main_vt_symbol = get_main_contract(si_obj.symbols)
si_main_obj = SymbolsInfo.get_instance(main_vt_symbol)
# si_obj = SymbolsInfo.get_by_symbol_str(symbol_str)
stats_main: StrategyBacktestStats = StrategyBacktestStats.get_by_id_name_and_symbol_info(id_name, si_main_obj)
if not stats_main and not stats_main:
logging.warning("")
logging.warning(f"{id_name}[{vt_symbol}]合约回测数据,[{si_main_obj.symbols}]主连回测数据 均未有找到")
continue
if not stats:
logging.info("")
logging.info(
f"{id_name}[{vt_symbol}] 对应回测数据没有找到,使用[{si_main_obj.symbols}]主连回测数据,并创建相应合约回测记录")
stats_main.apply_2_symbol(symbols_curr=vt_symbol, shown_name=name, short_name=name)
stats_main.update_backtest_status(
StrategyBacktestStatusEnum.QuasiOnline,
remark_4_log=si_main_obj.symbols,
stg_short_num_dic=stg_short_num_dic,
)
AccountStrategyMapping.add_2_account(
stg_info_id=stats_main.stg_info_id,
symbols_info_id=si_obj.id,
id_name=stats_main.id_name,
short_name=name,
shown_name=name,
strategy_settings={k: v for k, v in setting_dic['setting'].items() if k not in ('class_name',)},
)
count = AccountStrategyMapping.get_count_by_account()
print('\n')
logging.info(f"{user_name}[{broker_id}] {count} 条记录已经被创建。")
def _run_add_stg_2_account():
user_name, broker_id = "0", "0"
json_file_path = r"F:\downloads\cta_strategy_setting.json"
add_stg_2_account_by_json(user_name, broker_id, json_file_path)
if __name__ == "__main__":
# _run_get_merged_profit_by_json()
_run_add_stg_2_account()
|
PypiClean
|
/bf-nlp-package-13.3.tar.gz/bf-nlp-package-13.3/rasa/nlu/featurizers/dense_featurizer/mitie_featurizer.py
|
import numpy as np
import typing
from typing import Any, List, Text, Optional, Dict, Type
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.components import Component
from rasa.nlu.featurizers.featurizer import DenseFeaturizer
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.nlu.utils.mitie_utils import MitieNLP
from rasa.nlu.training_data import Message, TrainingData
from rasa.nlu.constants import (
TEXT,
TOKENS_NAMES,
DENSE_FEATURE_NAMES,
DENSE_FEATURIZABLE_ATTRIBUTES,
)
from rasa.utils.tensorflow.constants import MEAN_POOLING, POOLING
if typing.TYPE_CHECKING:
import mitie
class MitieFeaturizer(DenseFeaturizer):
@classmethod
def required_components(cls) -> List[Type[Component]]:
return [MitieNLP, Tokenizer]
defaults = {
# Specify what pooling operation should be used to calculate the vector of
# the CLS token. Available options: 'mean' and 'max'
POOLING: MEAN_POOLING
}
def __init__(self, component_config: Optional[Dict[Text, Any]] = None) -> None:
super().__init__(component_config)
self.pooling_operation = self.component_config["pooling"]
@classmethod
def required_packages(cls) -> List[Text]:
return ["mitie", "numpy"]
def ndim(self, feature_extractor: "mitie.total_word_feature_extractor") -> int:
return feature_extractor.num_dimensions
def get_tokens_by_attribute(self, example: Message, attribute: Text) -> Any:
return example.get(TOKENS_NAMES[attribute])
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
mitie_feature_extractor = self._mitie_feature_extractor(**kwargs)
for example in training_data.intent_examples:
for attribute in DENSE_FEATURIZABLE_ATTRIBUTES:
self.process_training_example(
example, attribute, mitie_feature_extractor
)
def process_training_example(
self, example: Message, attribute: Text, mitie_feature_extractor: Any
):
attribute_tokens = self.get_tokens_by_attribute(example, attribute)
if attribute_tokens is not None:
features = self.features_for_tokens(
attribute_tokens, mitie_feature_extractor
)
example.set(
DENSE_FEATURE_NAMES[attribute],
self._combine_with_existing_dense_features(
example, features, DENSE_FEATURE_NAMES[attribute]
),
)
def process(self, message: Message, **kwargs: Any) -> None:
mitie_feature_extractor = self._mitie_feature_extractor(**kwargs)
features = self.features_for_tokens(
message.get(TOKENS_NAMES[TEXT]), mitie_feature_extractor
)
message.set(
DENSE_FEATURE_NAMES[TEXT],
self._combine_with_existing_dense_features(
message, features, DENSE_FEATURE_NAMES[TEXT]
),
)
def _mitie_feature_extractor(self, **kwargs) -> Any:
mitie_feature_extractor = kwargs.get("mitie_feature_extractor")
if not mitie_feature_extractor:
raise Exception(
"Failed to train 'MitieFeaturizer'. "
"Missing a proper MITIE feature extractor. "
"Make sure this component is preceded by "
"the 'MitieNLP' component in the pipeline "
"configuration."
)
return mitie_feature_extractor
def features_for_tokens(
self,
tokens: List[Token],
feature_extractor: "mitie.total_word_feature_extractor",
) -> np.ndarray:
# remove CLS token from tokens
tokens_without_cls = tokens[:-1]
# calculate features
features = []
for token in tokens_without_cls:
features.append(feature_extractor.get_feature_vector(token.text))
features = np.array(features)
cls_token_vec = self._calculate_cls_vector(features, self.pooling_operation)
features = np.concatenate([features, cls_token_vec])
return features
|
PypiClean
|
/PyrogramXd-2.0.64-py3-none-any.whl/pyrogram/methods/messages/send_audio.py
|
import os
import re
from datetime import datetime
from typing import Union, BinaryIO, List, Optional, Callable
import pyrogram
from pyrogram import StopTransmission, enums
from pyrogram import raw
from pyrogram import types
from pyrogram import utils
from pyrogram.errors import FilePartMissing
from pyrogram.file_id import FileType
class SendAudio:
async def send_audio(
self: "pyrogram.Client",
chat_id: Union[int, str],
audio: Union[str, BinaryIO],
caption: str = "",
parse_mode: Optional["enums.ParseMode"] = None,
caption_entities: List["types.MessageEntity"] = None,
duration: int = 0,
performer: str = None,
title: str = None,
thumb: Union[str, BinaryIO] = None,
file_name: str = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
schedule_date: datetime = None,
protect_content: bool = None,
reply_markup: Union[
"types.InlineKeyboardMarkup",
"types.ReplyKeyboardMarkup",
"types.ReplyKeyboardRemove",
"types.ForceReply"
] = None,
progress: Callable = None,
progress_args: tuple = ()
) -> Optional["types.Message"]:
"""Send audio files.
For sending voice messages, use the :meth:`~pyrogram.Client.send_voice` method instead.
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
For your personal cloud (Saved Messages) you can simply use "me" or "self".
For a contact that exists in your Telegram address book you can use his phone number (str).
audio (``str`` | ``BinaryIO``):
Audio file to send.
Pass a file_id as string to send an audio file that exists on the Telegram servers,
pass an HTTP URL as a string for Telegram to get an audio file from the Internet,
pass a file path as string to upload a new audio file that exists on your local machine, or
pass a binary file-like object with its attribute ".name" set for in-memory uploads.
caption (``str``, *optional*):
Audio caption, 0-1024 characters.
parse_mode (:obj:`~pyrogram.enums.ParseMode`, *optional*):
By default, texts are parsed using both Markdown and HTML styles.
You can combine both syntaxes together.
caption_entities (List of :obj:`~pyrogram.types.MessageEntity`):
List of special entities that appear in the caption, which can be specified instead of *parse_mode*.
duration (``int``, *optional*):
Duration of the audio in seconds.
performer (``str``, *optional*):
Performer.
title (``str``, *optional*):
Track name.
thumb (``str`` | ``BinaryIO``, *optional*):
Thumbnail of the music file album cover.
The thumbnail should be in JPEG format and less than 200 KB in size.
A thumbnail's width and height should not exceed 320 pixels.
Thumbnails can't be reused and can be only uploaded as a new file.
file_name (``str``, *optional*):
File name of the audio sent.
Defaults to file's path basename.
disable_notification (``bool``, *optional*):
Sends the message silently.
Users will receive a notification with no sound.
reply_to_message_id (``int``, *optional*):
If the message is a reply, ID of the original message.
schedule_date (:py:obj:`~datetime.datetime`, *optional*):
Date when the message will be automatically sent.
protect_content (``bool``, *optional*):
Protects the contents of the sent message from forwarding and saving.
reply_markup (:obj:`~pyrogram.types.InlineKeyboardMarkup` | :obj:`~pyrogram.types.ReplyKeyboardMarkup` | :obj:`~pyrogram.types.ReplyKeyboardRemove` | :obj:`~pyrogram.types.ForceReply`, *optional*):
Additional interface options. An object for an inline keyboard, custom reply keyboard,
instructions to remove reply keyboard or to force a reply from the user.
progress (``Callable``, *optional*):
Pass a callback function to view the file transmission progress.
The function must take *(current, total)* as positional arguments (look at Other Parameters below for a
detailed description) and will be called back each time a new file chunk has been successfully
transmitted.
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function.
You can pass anything you need to be available in the progress callback scope; for example, a Message
object or a Client instance in order to edit the message with the updated progress status.
Other Parameters:
current (``int``):
The amount of bytes transmitted so far.
total (``int``):
The total size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the ``progress_args`` parameter.
You can either keep ``*args`` or add every single extra argument in your function signature.
Returns:
:obj:`~pyrogram.types.Message` | ``None``: On success, the sent audio message is returned, otherwise, in
case the upload is deliberately stopped with :meth:`~pyrogram.Client.stop_transmission`, None is returned.
Example:
.. code-block:: python
# Send audio file by uploading from file
await app.send_audio("me", "audio.mp3")
# Add caption to the audio
await app.send_audio("me", "audio.mp3", caption="audio caption")
# Set audio metadata
await app.send_audio(
"me", "audio.mp3",
title="Title", performer="Performer", duration=234)
# Keep track of the progress while uploading
async def progress(current, total):
print(f"{current * 100 / total:.1f}%")
await app.send_audio("me", "audio.mp3", progress=progress)
"""
file = None
try:
if isinstance(audio, str):
if os.path.isfile(audio):
thumb = await self.save_file(thumb)
file = await self.save_file(audio, progress=progress, progress_args=progress_args)
media = raw.types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(audio) or "audio/mpeg",
file=file,
thumb=thumb,
attributes=[
raw.types.DocumentAttributeAudio(
duration=duration,
performer=performer,
title=title
),
raw.types.DocumentAttributeFilename(file_name=file_name or os.path.basename(audio))
]
)
elif re.match("^https?://", audio):
media = raw.types.InputMediaDocumentExternal(
url=audio
)
else:
media = utils.get_input_media_from_file_id(audio, FileType.AUDIO)
else:
thumb = await self.save_file(thumb)
file = await self.save_file(audio, progress=progress, progress_args=progress_args)
media = raw.types.InputMediaUploadedDocument(
mime_type=self.guess_mime_type(file_name or audio.name) or "audio/mpeg",
file=file,
thumb=thumb,
attributes=[
raw.types.DocumentAttributeAudio(
duration=duration,
performer=performer,
title=title
),
raw.types.DocumentAttributeFilename(file_name=file_name or audio.name)
]
)
while True:
try:
r = await self.invoke(
raw.functions.messages.SendMedia(
peer=await self.resolve_peer(chat_id),
media=media,
silent=disable_notification or None,
reply_to_msg_id=reply_to_message_id,
random_id=self.rnd_id(),
schedule_date=utils.datetime_to_timestamp(schedule_date),
noforwards=protect_content,
reply_markup=await reply_markup.write(self) if reply_markup else None,
**await utils.parse_text_entities(self, caption, parse_mode, caption_entities)
)
)
except FilePartMissing as e:
await self.save_file(audio, file_id=file.id, file_part=e.value)
else:
for i in r.updates:
if isinstance(i, (raw.types.UpdateNewMessage,
raw.types.UpdateNewChannelMessage,
raw.types.UpdateNewScheduledMessage)):
return await types.Message._parse(
self, i.message,
{i.id: i for i in r.users},
{i.id: i for i in r.chats},
is_scheduled=isinstance(i, raw.types.UpdateNewScheduledMessage)
)
except StopTransmission:
return None
|
PypiClean
|
/pulsar-odm-0.7.0.tar.gz/pulsar-odm-0.7.0/odm/dialects/postgresql/pool.py
|
from sqlalchemy import pool
from pulsar.apps.greenio import GreenLock
class GreenletPool(pool.Pool):
'''A Pool that maintains one connection per greenlet.
Maintains one connection per each greenlet, never moving a
connection to a greenlet other than the one which it was
created in.
'''
def __init__(self, creator, pool_size=10, timeout=30, **kw):
super().__init__(creator, **kw)
self._lock = GreenLock()
self._max_size = pool_size
self._timeout = timeout
self._connections = set()
self._available_connections = set()
def dispose(self):
for conn in self._connections:
try:
conn.close()
except Exception:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "size: %d, available: %d" % (self.size(),
len(self._available_connections))
def size(self):
return len(self._connections)
def max_size(self):
return self._max_size
def timeout(self):
return self._timeout
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
pool_size=self.max_size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
dialect=self._dialect)
def _do_return_conn(self, conn):
self._available_connections.add(conn)
def _do_get(self):
try:
return self._available_connections.pop()
except KeyError:
pass
# Only create one connection at a time, otherwise psycopg2 block!
with self._lock:
conn = self._create_connection()
self._connections.add(conn)
return conn
|
PypiClean
|
/meta1-testnet-0.2.11.tar.gz/meta1-testnet-0.2.11/CODE_OF_CONDUCT.md
|
Code of Conduct
===============
Our Pledge
----------
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our
project and our community a harassment-free experience for everyone,
regardless of age, body size, disability, ethnicity, gender identity and
expression, level of experience, nationality, personal appearance, race,
religion, or sexual identity and orientation.
Our Standards
-------------
Examples of behavior that contributes to creating a positive environment
include:
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
- The use of sexualized language or imagery and unwelcome sexual
attention or advances
- Trolling, insulting/derogatory comments, and personal or political
attacks
- Public or private harassment
- Publishing others' private information, such as a physical or
electronic address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in
a professional setting
Our Responsibilities
--------------------
Project maintainers are responsible for clarifying the standards of
acceptable behavior and are expected to take appropriate and fair
corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit,
or reject comments, commits, code, wiki edits, issues, and other
contributions that are not aligned to this Code of Conduct, or to ban
temporarily or permanently any contributor for other behaviors that they
deem inappropriate, threatening, offensive, or harmful.
Scope
-----
This Code of Conduct applies both within project spaces and in public
spaces when an individual is representing the project or its community.
Examples of representing a project or community include using an
official project e-mail address, posting via an official social media
account, or acting as an appointed representative at an online or
offline event. Representation of a project may be further defined and
clarified by project maintainers.
Enforcement
-----------
Instances of abusive, harassing, or otherwise unacceptable behavior may
be reported by contacting the project team at \[INSERT EMAIL ADDRESS\].
All complaints will be reviewed and investigated and will result in a
response that is deemed necessary and appropriate to the circumstances.
The project team is obligated to maintain confidentiality with regard to
the reporter of an incident. Further details of specific enforcement
policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in
good faith may face temporary or permanent repercussions as determined
by other members of the project's leadership.
Attribution
-----------
This Code of Conduct is adapted from the [Contributor
Covenant](http://contributor-covenant.org), version 1.4, available at
[<http://contributor-covenant.org/version/1/4>](http://contributor-covenant.org/version/1/4/)
|
PypiClean
|
/fds.sdk.OptimizationEngineAPIMultiperiod-0.21.7-py3-none-any.whl/fds/sdk/OptimizationEngineAPIMultiperiod/model/optimizer_inputs_mp_target_probability_term.py
|
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.OptimizationEngineAPIMultiperiod.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.OptimizationEngineAPIMultiperiod.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.OptimizationEngineAPIMultiperiod.model.optimizer_inputs_across_periods import OptimizerInputsAcrossPeriods
from fds.sdk.OptimizationEngineAPIMultiperiod.model.optimizer_inputs_bound_source_enum import OptimizerInputsBoundSourceEnum
from fds.sdk.OptimizationEngineAPIMultiperiod.model.optimizer_inputs_on_periods import OptimizerInputsOnPeriods
from fds.sdk.OptimizationEngineAPIMultiperiod.model.optimizer_inputs_target_probability_term import OptimizerInputsTargetProbabilityTerm
globals()['OptimizerInputsAcrossPeriods'] = OptimizerInputsAcrossPeriods
globals()['OptimizerInputsBoundSourceEnum'] = OptimizerInputsBoundSourceEnum
globals()['OptimizerInputsOnPeriods'] = OptimizerInputsOnPeriods
globals()['OptimizerInputsTargetProbabilityTerm'] = OptimizerInputsTargetProbabilityTerm
class OptimizerInputsMPTargetProbabilityTerm(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'term': (OptimizerInputsTargetProbabilityTerm,), # noqa: E501
'constrain_on': (OptimizerInputsBoundSourceEnum,), # noqa: E501
'on_periods': (OptimizerInputsOnPeriods,), # noqa: E501
'across_periods': (OptimizerInputsAcrossPeriods,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'term': 'term', # noqa: E501
'constrain_on': 'constrain_on', # noqa: E501
'on_periods': 'on_periods', # noqa: E501
'across_periods': 'across_periods', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""OptimizerInputsMPTargetProbabilityTerm - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
term (OptimizerInputsTargetProbabilityTerm): [optional] # noqa: E501
constrain_on (OptimizerInputsBoundSourceEnum): [optional] # noqa: E501
on_periods (OptimizerInputsOnPeriods): [optional] # noqa: E501
across_periods (OptimizerInputsAcrossPeriods): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""OptimizerInputsMPTargetProbabilityTerm - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
term (OptimizerInputsTargetProbabilityTerm): [optional] # noqa: E501
constrain_on (OptimizerInputsBoundSourceEnum): [optional] # noqa: E501
on_periods (OptimizerInputsOnPeriods): [optional] # noqa: E501
across_periods (OptimizerInputsAcrossPeriods): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
|
PypiClean
|
/black-it-0.2.1.tar.gz/black-it-0.2.1/black_it/samplers/halton.py
|
"""This module contains the implementation for the Halton sampler."""
import itertools
from typing import Iterator, List, Optional
import numpy as np
from numpy.random import default_rng
from numpy.typing import NDArray
from black_it.samplers.base import BaseSampler
from black_it.search_space import SearchSpace
from black_it.utils.base import check_arg, digitize_data
_MIN_SEQUENCE_START_INDEX = 20
_MAX_SEQUENCE_START_INDEX = 2**16
class HaltonSampler(BaseSampler):
"""
Halton low discrepancy sequence.
This snippet implements the Halton sequence following the generalization of
a sequence of *Van der Corput* in n-dimensions.
"""
def __init__(
self,
batch_size: int,
random_state: Optional[int] = None,
max_deduplication_passes: int = 5,
) -> None:
"""
Initialize the sampler.
Args:
batch_size: the number of points sampled every time the sampler is called
random_state: the random state of the sampler, fixing this number the sampler behaves deterministically
max_deduplication_passes: the maximum number of sample deduplication passes.
"""
super().__init__(batch_size, random_state, max_deduplication_passes)
self._prime_number_generator = _CachedPrimesCalculator()
# drop first N entries to avoid linear correlation
self._reset_sequence_index()
@property
def random_state(self) -> Optional[int]:
"""Get the random state."""
return self._random_state
@random_state.setter
def random_state(self, random_state: Optional[int]) -> None:
"""Set the random state."""
self._random_state = random_state
self._random_generator = default_rng(self.random_state)
self._reset_sequence_index()
def _reset_sequence_index(self) -> None:
"""Reset the sequence index pointer."""
self._sequence_index = self.random_generator.integers(
_MIN_SEQUENCE_START_INDEX, _MAX_SEQUENCE_START_INDEX
)
def sample_batch(
self,
batch_size: int,
search_space: SearchSpace,
existing_points: NDArray[np.float64],
existing_losses: NDArray[np.float64],
) -> NDArray[np.float64]:
"""
Sample points using Halton sequence.
Args:
batch_size: the number of samples
search_space: an object containing the details of the parameter search space
existing_points: the parameters already sampled (not used)
existing_losses: the loss corresponding to the sampled parameters (not used)
Returns:
the parameter sampled
"""
unit_cube_points: NDArray[np.float64] = self._halton(
batch_size, search_space.dims
)
p_bounds: NDArray[np.float64] = search_space.parameters_bounds
sampled_points = p_bounds[0] + unit_cube_points * (p_bounds[1] - p_bounds[0])
return digitize_data(sampled_points, search_space.param_grid)
def _halton(self, nb_samples: int, dims: int) -> NDArray[np.float64]:
"""
Get a Halton sequence.
It uses a simple prime number generator, which takes the first `dims` primes.
Args:
nb_samples: number of samples
dims: the number of dimensions of the space to sample from the unitary cube
Returns:
sequence of Halton.
"""
bases: NDArray[np.int64] = self._prime_number_generator.get_n_primes(dims)
# Generate a sample using a Halton sequence.
sample: NDArray[np.float64] = halton(
sample_size=nb_samples, bases=bases, n_start=self._sequence_index
)
# increment sequence start index for the next sampling
self._sequence_index += nb_samples
return sample
class _PrimesIterator:
"""
This class implements an iterator that iterates over all primes via unbounded Sieve of Erathosthenes.
Adapted from:
https://wthwdik.wordpress.com/2007/08/30/an-unbounded-sieve-of-eratosthenes/
It caches the sequence of primes up to the highest n.
"""
def __init__(self) -> None:
"""Initialize the iterator."""
self._primes = [[2, 2]]
self._candidate = 2
def __iter__(self) -> Iterator:
"""Make the class iterable."""
return self
def __next__(self) -> int:
"""Get the next prime number."""
while True:
self._candidate = self._candidate + 1
for i in self._primes:
while self._candidate > i[1]:
i[1] = i[0] + i[1]
if self._candidate == i[1]:
break
else:
# if here, we have i == primes[-1]:
self._primes.append([self._candidate, self._candidate])
return self._candidate
class _CachedPrimesCalculator: # pylint: disable=too-few-public-methods
"""Utility class to compute and cache the first n prime numbers."""
def __init__(self) -> None:
"""Initialize the object."""
self._primes_iterator = _PrimesIterator()
self._cached_primes: List[int] = [2]
def get_n_primes(self, n: int) -> NDArray[np.int64]:
"""
Get the first n primes.
Args:
n: the number of primes.
Returns:
a list containing the first n primes.
"""
check_arg(n >= 1, "input must be greater than 0")
if n <= len(self._cached_primes):
return np.array(self._cached_primes[:n])
nb_next_primes = n - len(self._cached_primes)
next_primes = itertools.islice(self._primes_iterator, nb_next_primes)
self._cached_primes.extend(next_primes)
return np.array(self._cached_primes[:n])
def halton(
sample_size: int, bases: NDArray[np.int64], n_start: int
) -> NDArray[np.float64]:
"""
Van der Corput sequence, generalized as to accept a starting point in the sequence.
Args:
sample_size: number of element of the sequence
bases: bases of the sequence
n_start: starting point of the sequence
Returns:
sequence of Halton
"""
check_arg(sample_size > 0, "sample size must be greater than zero")
check_arg(bool((bases > 1).all()), "based must be greater than one")
check_arg(n_start >= 0, "n_start must be greater or equal zero")
nb_bases = len(bases)
sequence: NDArray[np.float64] = np.zeros(shape=(sample_size, nb_bases))
for index in range(n_start + 1, sample_size + n_start + 1):
n_th_numbers: NDArray[np.float64] = np.zeros(shape=nb_bases)
denoms: NDArray[np.float64] = np.ones(shape=nb_bases)
done: NDArray[np.bool8] = np.zeros(shape=nb_bases, dtype=np.bool8)
i = np.repeat(np.int64(index), repeats=nb_bases)
while (i > 0).any():
i, remainders = np.divmod(i, bases)
denoms *= bases
# mask reminders in case i = 0
remainders[done] = 0.0
n_th_numbers += remainders / denoms
done[i == 0] = True
sequence[index - 1 - n_start, :] = n_th_numbers
return sequence
|
PypiClean
|
/ruamel.windowsregistry-0.1.1.tar.gz/ruamel.windowsregistry-0.1.1/__init__.py
|
from typing import Dict, Any
_package_data: Dict[str, Any] = dict(
full_package_name='ruamel.windowsregistry',
version_info=(0, 1, 1),
__version__='0.1.1',
version_timestamp='2023-02-01 10:10:42',
author='Anthon van der Neut',
author_email='[email protected]',
description='get and set values on windows registry',
keywords='pypi statistics',
# entry_points='windowsregistry=ruamel.windowsregistry.__main__:main',
entry_points=None,
license='Copyright Ruamel bvba 2007-2023',
since=2023,
# status='α|β|stable', # the package status on PyPI
# data_files="",
# universal=True, # py2 + py3
# install_requires=['ruamel.std.pathlib', ],
tox=dict(env='3',), # *->all p->pypy
python_requires='>=3',
) # NOQA
version_info = _package_data['version_info']
__version__ = _package_data['__version__']
####
import weakref
import winreg as _winreg
date_time_format = '%Y-%m-%d %H:%M:%S'
class WindowsRegistry:
"""keeps registry open until the WindowsRegistry object is deleted.
need to access including HKEY_LOCAL_MACHINE etc. prefix
"""
def __init__(self, log=None, verbose=0):
self._registries = {}
self.log = log if log else self.nolog
def get(self, fullpath, smart=True, readonly=False, default=None):
"""if key is not found, return default (i.e. None)
smart -> convert loaded type smartly
for the (Default) value of key make sure it ends with a /
"""
key, name = self._get_key(fullpath, readonly=readonly)
try:
val, type_id = _winreg.QueryValueEx(key, name)
_winreg.CloseKey(key)
if not smart:
return val
elif type_id == _winreg.REG_SZ:
return val
elif type_id == _winreg.REG_DWORD:
return int(val)
elif type_id == _winreg.REG_BINARY:
if val:
return pickle.loads(bz2.decompress(val))
return val
elif type_id == _winreg.REG_MULTI_SZ:
return val
except WindowsError as e:
self.log('WindowsError', e)
self.log('fullpath', fullpath)
return default
except TypeError:
return None
def set(self, fullpath, value):
key, name = self._get_key(fullpath)
try:
_winreg.SetValueEx(key, name, 0, _winreg.REG_SZ, value)
except Exception as e:
self.log(f'value not settable [{fullpath}] [{value}: {e}]')
raise
_winreg.CloseKey(key)
def setint(self, fullpath, value):
key, name = self._get_key(fullpath)
try:
_winreg.SetValueEx(key, name, 0, _winreg.REG_DWORD, value)
except Exception as e:
self.log(f'integer value not settable [{fullpath}] [{value}: {e}]')
raise
_winreg.CloseKey(key)
def set_datetime(self, fullpath, dts=None):
self.log('set_datetime')
key, name = self._get_key(fullpath)
try:
if dts is None:
import datetime
dts = datetime.datetime.now()
value = dts.strftime(date_time_format)
_winreg.SetValueEx(key, name, 0, _winreg.REG_SZ, value)
except Exception as e:
self.log(f'datetime value not settable [{fullpath}] [{value}: {e}]')
raise
_winreg.CloseKey(key)
def setbin(self, fullpath, value):
key, name = self._get_key(fullpath)
_winreg.SetValueEx(key, name, 0, _winreg.REG_BINARY, value)
_winreg.CloseKey(key)
def delete(self, fullpath):
self.log(f'deleting {fullpath}')
key, name = self._get_key(fullpath)
self.log(f'del {key} {name}')
_winreg.DeleteValue(key, name)
def delete_key(self, full_path):
self.log('deleting key {full_path}')
parent_key, name = self._get_key(full_path)
_winreg.DeleteKey(parent_key, name)
def subkeys(self, fullpath):
if fullpath[-1] == '/':
fullpath = fullpath[:-1]
regname, path = fullpath.replace('/', '\\').split('\\', 1)
wreg = _winreg.__dict__[regname]
h_key = _winreg.OpenKey(wreg, path)
result = []
try:
count = -1
while True:
count += 1
result.append(_winreg.EnumKey(h_key, count))
except WindowsError as e:
pass
return result
def subvalues(self, fullpath):
if fullpath[-1] == '/':
fullpath = fullpath[:-1]
regname, path = fullpath.replace('/', '\\').split('\\', 1)
wreg = _winreg.__dict__[regname]
h_key = _winreg.OpenKey(wreg, path)
result = []
try:
count = -1
while True:
count += 1
result.append(_winreg.EnumValue(h_key, count))
except WindowsError as e:
pass
return result
def _get_key(self, fullpath, readonly=False):
# e.g. HKEY_LOCAL_MACHINE/SYSTEM/CurrentControlSet/Control/
access_method = _winreg.KEY_ALL_ACCESS if not readonly else _winreg.KEY_READ
regname, path = fullpath.replace('/', '\\').split('\\', 1)
wreg = _winreg.__dict__[regname]
path, name = path.rsplit('\\', 1)
reg = self._registries.setdefault(regname, _winreg.ConnectRegistry(None, wreg))
try:
key = _winreg.OpenKey(reg, path, 0, access_method)
except WindowsError:
if readonly:
return None, None
try:
createkey = _winreg.CreateKey(reg, path)
key = _winreg.OpenKey(reg, path, 0, access_method)
except:
am = 'R'
if access_method == _winreg.KEY_ALL_ACCESS:
am = 'RW'
self.log(f'Error creating registry path {fullpath} {am}')
raise
return key, name
def close(self):
if _winreg is not None:
for k, v in self._registries.items():
_winreg.CloseKey(v)
self._registries.clear()
def __del__(self):
self.close()
@classmethod
def nolog(*args, **kw):
pass
windows_registry = WindowsRegistry()
|
PypiClean
|
/anpl-0.0.7-py3-none-any.whl/anchor/token.py
|
import typing
__all__: typing.List[str] = list(['pmdict', 'kwdict', 'namedict', 'NAME',])
PERCENT: typing.Literal = 'PERCENT'
LPAR: typing.Literal = 'LPAR'
RPAR: typing.Literal = 'RPAR'
STAR: typing.Literal = 'STAR'
DOUBLESTAR: typing.Literal = 'DOUBLESTAR'
PLUS: typing.Literal = 'PLUS'
COMMA: typing.Literal = 'COMMA'
MINUS: typing.Literal = 'MINUS'
RARROW: typing.Literal = 'RARROW'
DOT: typing.Literal = 'DOT'
SLASH: typing.Literal = 'SLASH'
DOUBLESLASH: typing.Literal = 'DOUBLESLASH'
COLON: typing.Literal = 'COLON'
SEMI: typing.Literal = 'SEMI'
EQUAL: typing.Literal = 'EQUAL'
EQEQUAL: typing.Literal = 'EQEQUAL'
NOTEQUAL: typing.Literal = 'NOTEQUAL'
LESS: typing.Literal = 'LESS'
LESSEQUAL: typing.Literal = 'LESSEQUAL'
GREATER: typing.Literal = 'GREATER'
GREATEREQUAL: typing.Literal = 'GREATEREQUAL'
LSQB: typing.Literal = 'LSQB'
RSQB: typing.Literal = 'RSQB'
LBRACE: typing.Literal = 'LBRACE'
RBRACE: typing.Literal = 'RBRACE'
# tokens defined only for parsing
UPLUS: typing.Literal = 'UPLUS'
UMINUS: typing.Literal = 'UMINUS'
pmdict: typing.Dict[str, str] = {
COMMA : r',',
LPAR : r'\(',
RPAR : r'\)',
PLUS : r'\+',
MINUS : r'-',
STAR : r'\*',
DOUBLESTAR : r'\*\*',
SLASH : r'/',
DOUBLESLASH : r'//',
PERCENT : r'%',
RARROW : r'->',
DOT : r'\.',
COLON : r':',
SEMI : r';',
EQUAL : r'=',
EQEQUAL : r'==',
NOTEQUAL : r'!=',
LESS : r'<',
LESSEQUAL : r'<=',
GREATER : r'>',
GREATEREQUAL : r'>=',
LSQB : r'\[',
RSQB : r'\]',
LBRACE : r'{',
RBRACE : r'}',
}
BEGIN: typing.Literal = 'BEGIN'
END: typing.Literal = 'END'
TRUE: typing.Literal = 'TRUE'
FALSE: typing.Literal = 'FALSE'
NULL: typing.Literal = 'NULL'
OR: typing.Literal = 'OR'
AND: typing.Literal = 'AND'
NOT: typing.Literal = 'NOT'
CLASS: typing.Literal = 'CLASS'
PROPERTY: typing.Literal = 'PROPERTY'
METHOD: typing.Literal = 'METHOD'
FUNCTION: typing.Literal = 'FUNCTION'
RETURN: typing.Literal = 'RETURN'
IF: typing.Literal = 'IF'
THEN: typing.Literal = 'THEN'
ELIF: typing.Literal = 'ELIF'
ELSE: typing.Literal = 'ELSE'
ITERATE: typing.Literal = 'ITERATE'
FOR: typing.Literal = 'FOR'
LOOP: typing.Literal = 'LOOP'
CONTINUE: typing.Literal = 'CONTINUE'
BREAK: typing.Literal = 'BREAK'
PUBLIC: typing.Literal = 'PUBLIC'
PRIVATE: typing.Literal = 'PRIVATE'
PROTECTED: typing.Literal = 'PROTECTED'
FACTORY: typing.Literal = 'FACTORY'
GET: typing.Literal = 'GET'
SET: typing.Literal = 'SET'
REF: typing.Literal = 'REF'
VAL: typing.Literal = 'VAL'
kwdict: typing.Dict[str, str] = {
BEGIN : 'begin',
END : 'end',
TRUE : 'True',
FALSE : 'False',
NULL : 'Null',
OR : 'or',
AND : 'and',
NOT : 'not',
CLASS : 'class',
PROPERTY : 'property',
METHOD : 'method',
FUNCTION : 'function',
RETURN : 'return',
IF : 'if',
THEN : 'then',
ELIF : 'elif',
ELSE : 'else',
ITERATE : 'iterate',
FOR : 'for',
LOOP : 'loop',
CONTINUE : 'continue',
BREAK : 'break',
PUBLIC : 'public',
PRIVATE : 'private',
PROTECTED : 'protected',
FACTORY : 'factory',
GET : 'get',
SET : 'set',
REF : 'ref',
VAL : 'val',
}
__all__.extend(
[name for name, value in globals().items() if isinstance(value, str)]
)
namedict: typing.Dict[str, str] = dict(pmdict | kwdict)
NAME: typing.Dict[str, str] = dict({
value.replace('\\', ''): name
for name, value in namedict.items()
})
|
PypiClean
|
/ensmallen_graph-0.6.0-cp37-cp37m-manylinux2010_x86_64.whl/ensmallen_graph/datasets/string/lactobacillusgallinarum.py
|
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def LactobacillusGallinarum(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Lactobacillus gallinarum graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Lactobacillus gallinarum graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:51:03.463466
The undirected graph Lactobacillus gallinarum has 2076 nodes and 129309
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.06004 and has 12 connected components, where the component
with most nodes has 2043 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 90, the mean node degree is 124.58,
and the node degree mode is 2. The top 5 most central nodes are 1423748.BALB01000003_gene384
(degree 910), 1423748.BALB01000011_gene1149 (degree 750), 1423748.BALB01000008_gene953
(degree 680), 1423748.BALB01000004_gene637 (degree 593) and 1423748.BALB01000003_gene416
(degree 591).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import LactobacillusGallinarum
# Then load the graph
graph = LactobacillusGallinarum()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="LactobacillusGallinarum",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
PypiClean
|
/all_params_env-0.0.1.tar.gz/all_params_env-0.0.1/all_params_env/opt_selfimple/deOpt.py
|
from opt_selfimple.opt import Optimization
from sko.DE import DE
import numpy as np
import scipy.stats as stats
import copy
hyperparams_DeOpt_default = {
'individual number': 50,
'mutation probability': 0.01,
'precision': 1e-7
}
conditions_DeOpt_default = {
'generate times': 20
}
optConfig_DeOpt_default = {
'init': {'init type': 'uniform'},
}
outConfig_DeOpt_default = {
}
class DeOpt(Optimization):
def __init__(self, object_fn, constraints, hyperparams=None, conditions=None, minimize=True, optConfig=None,
outConfig=None):
"""
遗传算法实现(scikit-opt)
Args:
object_fn:
constraints:
hyperparams:
conditions:
minimize:
optConfig:
outConfig:
"""
if hyperparams is None:
hyperparams = hyperparams_GaOpt_default
if conditions is None:
conditions = conditions_GaOpt_default
if optConfig is None:
optConfig = optConfig_GaOpt_default
if outConfig is None:
outConfig = outConfig_GaOpt_default
super().__init__(object_fn, constraints, hyperparams, conditions, minimize, outConfig)
self._lb = np.array([lu[0] for lu in self._constraints])
self._ub = np.array([lu[1] for lu in self._constraints])
# hyper parameters
self._num_indiv = self._hyperparams['individual number']
self._p_mut = self._hyperparams['mutation probability']
self._precision = self._hyperparams['precision']
self._optConfig = optConfig
self._dim = len(self._constraints)
if 'generate times' in self._conditions:
self._num_gener = self._conditions['generate times']
def _init_population(self):
"""初始化群体并返回,返回的是未经编码的群体"""
if self._optConfig['init']['init type'] == 'uniform': # 均匀分布
popul = np.random.uniform(low=self._lb, high=self._ub, size=(self._num_indiv, self._dim))
elif self._optConfig['init']['init type'] == 'normal': # 正态分布
mean = self._optConfig['init']['init info']['mean']
std = self._optConfig['init']['init info']['std']
popul = stats.truncnorm((self._lb - mean) / std, (self._ub - mean) / std, loc=mean, scale=std).rvs(
size=(self._num_indiv, self._dim))
elif self._optConfig['init']['init type'] == 'histogram': # 自定义直方图分布
popul_raw = [stats.rv_histogram(histogram=hist).rvs(size=self._num_indiv) for hist in
self._optConfig['init']['init info']['histogram']]
popul_raw = np.stack(popul_raw, axis=-1)
popul = np.clip(popul_raw, self._lb, self._ub) # 直方图的边界设定可能和constraint不同,故clip修正之
elif self._optConfig['init']['init type'] == 'given': # 使用直接给定的初始群体
popul = self._optConfig['init']['init info']['given data']
else:
raise Exception('没有在_optConfig给出init设置!')
return popul
def optimize(self):
"""进行优化"""
ga = GA(func=self._object,
n_dim=self._dim,
size_pop=self._num_indiv,
max_iter=self._num_gener,
prob_mut=self._p_mut,
lb=self._lb, ub=self._ub,
precision=self._precision
)
# region 初始化
popul = self._init_population()
ga.Chrom = ga.x2chrom(popul)
# endregion
best_x, best_y = ga.run()
return best_x
|
PypiClean
|
/pdpc-decisions-1.3.2.tar.gz/pdpc-decisions-1.3.2/pdpc_decisions/corpus_text/decision_v2.py
|
import logging
import re
from typing import List, Dict, Optional
from pdfminer.high_level import extract_pages, extract_text
from pdfminer.layout import LTTextContainer, LAParams, LTTextBoxHorizontal, LTTextLineHorizontal, LTChar
from pdpc_decisions.classes import PDPCDecisionItem, Options, PDFFile
from pdpc_decisions.corpus_text import common
from pdpc_decisions.corpus_text.common import BaseCorpusDocumentFactory
logger = logging.getLogger(__name__)
def check_next_footnote(footnotes, index):
if not len(footnotes) == index + 1:
next_footnote = footnotes[index + 1]
else:
return False
if re.match(r'\d+\s*', next_footnote.get_text().strip()):
if isinstance(next_footnote, LTTextBoxHorizontal):
first_char = next(iter(next(iter(next_footnote))))
elif isinstance(next_footnote, LTTextLineHorizontal):
first_char = next(iter(next_footnote))
else:
return False
if round(first_char.height) < common.get_main_text_size([next_footnote]):
return True
else:
return False
class DecisionV2Factory(BaseCorpusDocumentFactory):
def __init__(self, **kwargs):
super(DecisionV2Factory, self).__init__(laparams=LAParams(line_margin=0.5), **kwargs)
self._common_font = ''
self._paragraph_marks = []
self._text_margins = [72]
self._paragraph_strings: List[str] = []
self._current_paragraph_mark = ''
self._main_text_size = 12
self._footnotes: Dict[str, str] = dict()
def _extract_pages_and_text_containers(self, pdf):
self._pages = list(extract_pages(pdf, laparams=self.data.get('laparams', None)))[1:]
for page in self._pages:
self._text_containers.append([element for element in page if
isinstance(element, LTTextContainer) and
element.get_text() != '' and not
re.search(r'^\s+$', element.get_text())])
def pre_process(self):
BaseCorpusDocumentFactory.pre_process(self)
self._common_font = common.get_common_font_from_pages(self._pages)
margin_limit = 3 if len(self._pages) > 3 else len(self._pages)
self._text_margins = common.get_text_margins(list(self.get_text_containers()), margin_limit)
self._main_text_size = common.get_main_text_size(list(self.get_text_containers()))
for index, containers in enumerate(self._text_containers):
containers = common.split_joined_text_containers(containers)
containers = [container for container in containers if all([
container.get_text().strip() != '',
round(container.x0) in self._text_margins,
round(container.height) <= self._main_text_size + 1 or self._check_footnote(container)
])]
containers = sorted(containers, key=lambda item: item.x0)
containers = sorted(containers, key=lambda item: item.y0, reverse=True)
self._text_containers[index] = containers
self._footnotes = self._get_and_remove_footnotes()
logger.info("Pre process finished.")
def _get_and_remove_footnotes(self):
footnotes = []
for index, page in enumerate(self._pages):
new_containers, footnote_page = common.get_footnotes_using_separator(page,
self._text_containers[index],
self._text_margins[0],
self._main_text_size)
self._text_containers[index] = new_containers
if footnote_page:
footnotes.extend(footnote_page)
return common.construct_footnotes(footnotes)
def process_all(self):
for index, page_containers in enumerate(self._text_containers):
self.process_page(page_containers)
def process_paragraph(self, paragraph: LTTextContainer, index: int, page_containers: List[LTTextContainer]) -> None:
logger.info(f"New container: {paragraph}")
if self._check_skip_paragraph(paragraph, index):
return
container_string = self._replace_footnote(paragraph)
container_string = self._check_top_paragraph(container_string, paragraph)
self._paragraph_strings.append(container_string)
logger.info(f'Added to paragraph strings')
self._check_paragraph_end(index, page_containers)
logger.info("End of this container.")
def _check_footnote(self, paragraph):
result = []
char_text = []
if isinstance(paragraph, LTTextBoxHorizontal):
char_list = list(iter(next(iter(paragraph))))
elif isinstance(paragraph, LTTextLineHorizontal):
char_list = list(iter(paragraph))
else:
return None
for index, char in enumerate(char_list):
if isinstance(char, LTChar) and round(char.height) < self._main_text_size and \
re.match(r'\d', char.get_text()):
char_text.append(char.get_text())
if (index + 1 == len(char_list)) or (isinstance(char_list[index + 1], LTChar) and round(
char_list[index + 1].height) >= self._main_text_size):
footnote_mark = ''.join(char_text)
result.append(footnote_mark)
char_text.clear()
return result if len(result) > 0 else None
def _replace_footnote(self, paragraph):
result = paragraph.get_text().strip()
if footnotes_marks := self._check_footnote(paragraph):
for footnote_mark in footnotes_marks:
if self._footnotes.get(footnote_mark):
result = result.replace(footnote_mark, f" ({self._footnotes[footnote_mark]})", 1)
logger.info(f'Replaced a footnote: {footnote_mark}, {self._footnotes[footnote_mark]}')
else:
logger.warning(f'Footnote mark ({footnote_mark}) cannot be replaced as it is not in the footnotes.')
return result
def _check_top_paragraph(self, container_string, paragraph):
if all([
round(paragraph.x0) == self._text_margins[0],
match := re.match(r'\d+\.?\s*', paragraph.get_text()),
not self._current_paragraph_mark
]):
self._current_paragraph_mark = match.group(0).strip()
logger.info(f"Added a paragraph mark: {self._current_paragraph_mark}")
container_string = container_string.replace(self._current_paragraph_mark, '', 1)
logger.info(f"Adjust container: {container_string}")
return container_string
def _check_paragraph_end(self, index, page_containers):
if all([common.check_gap_before_after_container(page_containers, index, equal=True),
not common.check_common_font(page_containers[index], self._common_font),
not self._current_paragraph_mark
]):
self._result.add_paragraph(" ".join(self._paragraph_strings))
logger.info(f'Added a header-like paragraph: {self._result.paragraphs[-1].text}')
self._paragraph_strings.clear()
return
if re.search(r'[.?!][")]?\d*\s*$', page_containers[index].get_text()) and any([
len(self._paragraph_strings) == 1,
common.check_gap_before_after_container(page_containers, index)
]):
if self._current_paragraph_mark:
self._result.add_paragraph(" ".join(self._paragraph_strings), self._current_paragraph_mark)
else:
logger.warning(
f'No paragraph mark was found for ({self._paragraph_strings[0]}).')
if len(self._result.paragraphs) > 0:
logger.info('Adding to previous parargraph')
self._paragraph_strings.insert(0, self._result.paragraphs[-1].text)
self._result.paragraphs[-1].update_text(" ".join(self._paragraph_strings))
else:
logger.warning('Creating a new paragraph')
self._result.add_paragraph(" ".join(self._paragraph_strings))
logger.info(f'Added a paragraph: {self._result.paragraphs[-1]}')
self._paragraph_strings.clear()
self._current_paragraph_mark = ''
logger.info('Reset paragraph mark and string.')
def _check_skip_paragraph(self, paragraph, index):
paragraph_text = paragraph.get_text().strip()
if common.check_text_is_date(paragraph):
logger.info('Date found, skipping')
return True
if any([
re.match(r'[A-Z ]+\s*$', paragraph_text),
re.match(r'(\[\d{4}])\s+((?:\d\s+)?[A-Z|()]+)\s+\[?(\d+)\]?', paragraph_text),
re.match(r'Tan Kiat How', paragraph_text),
re.match(r'Yeong Zee Kin', paragraph_text)
]):
logger.info('Meta-info found, skipping')
return True
if index == 0 and len(self._pages) > 1:
if paragraph_text == self._text_containers[1][0].get_text().strip():
logger.info('Looks like a header. Skipping.')
return True
if round(paragraph.y0) > 700 and re.match(r'(\[\d{4}])\s+((?:\d\s+)?[A-Z|()]+)\s+\[?(\d+)\]?', paragraph_text):
logger.info('Looks like a header. Skipping.')
return True
if 284 < round(paragraph.x0) < 296 and re.match(r'\d+$', paragraph_text):
logger.info('Looks like a footer. Skipping.')
return True
if re.match(r'\d+ of \d+$', paragraph_text):
logger.info('Looks like a footer. Skipping.')
return True
return False
@classmethod
def check_decision(cls, item: Optional[PDPCDecisionItem] = None, options: Optional[Options] = None) -> bool:
with PDFFile(item, options) as pdf:
first_page = extract_pages(pdf, page_numbers=[0], laparams=LAParams(line_margin=0.1, char_margin=3.5))
text = extract_text(pdf, page_numbers=[0], laparams=LAParams(line_margin=0.1, char_margin=3.5))
containers = common.extract_text_containers(first_page)
if len(text.split()) <= 100:
for container in containers:
container_text = container.get_text().strip()
if container_text == 'DECISION' or container_text == 'GROUNDS OF DECISION':
return True
return False
|
PypiClean
|
/tensorflow_quantum-0.7.2-cp37-cp37m-macosx_10_11_x86_64.whl/tensorflow_quantum/core/ops/math_ops/fidelity_op.py
|
"""Module for tfq.math.fidelity op."""
import tensorflow as tf
from tensorflow_quantum.core.ops.math_ops import inner_product_op
@tf.function
@tf.custom_gradient
def fidelity(programs, symbol_names, symbol_values, other_programs):
"""Calculate the fidelity between circuits.
Compute (potentially many) fidelities between the given circuits and
the symbol free comparison circuits.
Calculates out[i][j] = $ | \langle \psi_{\text{programs[i]}} \\
(\text{symbol\_values[i]}) | \psi_{\text{other\_programs[j]}} \rangle \\
|^2 $
>>> symbols = sympy.symbols('alpha beta')
>>> qubits = cirq.GridQubit.rect(1, 2)
>>> reference_circuits = [
... cirq.Circuit((cirq.H**symbols[0]).on_each(qubits)),
... cirq.Circuit(
... cirq.X(qubits[0]) ** symbols[0],
... cirq.Y(qubits[1]) ** symbols[1])
... ]
>>> other_circuits = [
... cirq.Circuit(cirq.X.on_each(qubits)),
... cirq.Circuit((cirq.Y**0.125).on_each(qubits)),
... cirq.Circuit((cirq.X**0.5).on_each(qubits))
... ]
>>> reference_tensor = tfq.convert_to_tensor(reference_circuits)
>>> symbol_tensor = tf.convert_to_tensor([s.name for s in symbols])
>>> values_tensor = tf.convert_to_tensor(np.arange(4).reshape(2, 2))
>>> other_tensor = tfq.convert_to_tensor([other_circuits, other_circuits])
>>> fid = tfq.math.fidelity(reference_tensor, symbol_tensor,
... values_tensor, other_tensor)
>>> fid
tf.Tensor(
[[ 0., 0.925, 0.25],
[ 0., 0.036, 0.25]],shape=(2, 3), dtype=float32)
Note: `other_programs` must not contain any free symbols. These can
be resolved beforehand with `tfq.resolve_parameters`.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specificed by programs, following the ordering
dictated by `symbol_names`.
other_programs: `tf.Tensor` of strings with shape [batch_size, n_others]
containing the string representations of the circuits with which to
compute the overlap on `programs` with. Must not contain any free
symbols.
Returns:
`tf.Tensor` with shape [batch_size, n_others] where `out[i][j]` is equal
to the fidelity of `programs[i]` with `symbol_values[i]`
resolved in and `other_programs[i][j]`.
"""
f32_vals = tf.cast(symbol_values, tf.float32)
ip = inner_product_op.inner_product(programs, symbol_names, f32_vals,
other_programs)
def grad(dy):
ret_zero = tf.equal(tf.size(symbol_names), 0)
inner_prod_grad = tf.cond(
ret_zero, lambda: tf.zeros_like(symbol_values, dtype=tf.float32),
lambda: tf.math.real(2. * ip * inner_product_op._inner_product_grad(
programs, symbol_names, symbol_values, other_programs, dy)))
return [None, None, inner_prod_grad, None]
return tf.math.abs(ip)**2, grad
|
PypiClean
|
/gordo_components-0.47.0-py3-none-any.whl/gordo_components/machine/validators.py
|
import collections
import copy
import re
import datetime
import pandas as pd
import dateutil.parser
import logging
from gordo_components.serializer import pipeline_from_definition
from gordo_components.machine.dataset.sensor_tag import SensorTag
logger = logging.getLogger(__name__)
class BaseDescriptor:
"""
Base descriptor class
New object should override __set__(self, instance, value) method to check
if 'value' meets required needs.
"""
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set_name__(self, owner, name):
self.name = name
def __set__(self, instance, value):
raise NotImplementedError("Setting value not implemented for this Validator!")
class ValidDataset(BaseDescriptor):
"""
Descriptor for attributes requiring type :class:`gordo_components.workflow.config_elements.Dataset`
"""
def __set__(self, instance, value):
# Avoid circular dependency imports
from gordo_components.machine.dataset.base import GordoBaseDataset
if not isinstance(value, GordoBaseDataset):
raise TypeError(
f"Expected value to be an instance of GordoBaseDataset, found {value}"
)
instance.__dict__[self.name] = value
class ValidDatasetKwargs(BaseDescriptor):
"""
Descriptor for attributes requiring type :class:`gordo_components.workflow.config_elements.Dataset`
"""
def _verify_resolution(self, resolution: str):
"""
Verifies that a resolution string is supported in pandas
"""
try:
pd.tseries.frequencies.to_offset(resolution)
except ValueError:
raise ValueError(
'Values for "resolution" must match pandas frequency terms: '
"http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html"
)
def __set__(self, instance, value):
if not isinstance(value, dict):
raise TypeError(f"Expected kwargs to be an instance of dict, found {value}")
# Check that if 'resolution' is defined, it's one of supported pandas resampling frequencies
if "resolution" in value:
self._verify_resolution(value["resolution"])
instance.__dict__[self.name] = value
class ValidModel(BaseDescriptor):
"""
Descriptor for attributes requiring type Union[dict, str]
"""
def __set__(self, instance, value):
if getattr(instance, "_strict", True):
try:
pipeline_from_definition(value)
except Exception as e:
raise ValueError(f"Pipeline from definition failed: {e}")
instance.__dict__[self.name] = value
class ValidMetadata(BaseDescriptor):
"""
Descriptor for attributes requiring type Optional[dict]
"""
def __set__(self, instance, value):
from gordo_components.machine.metadata import Metadata
if value is not None and not any(
isinstance(value, Obj) for Obj in (dict, Metadata)
):
raise ValueError(f"Can either be None or an instance of dict or Metadata")
instance.__dict__[self.name] = value
class ValidDataProvider(BaseDescriptor):
"""
Descriptor for DataProvider
"""
def __set__(self, instance, value):
# Avoid circular dependency imports
from gordo_components.machine.dataset.data_provider.base import (
GordoBaseDataProvider,
)
if not isinstance(value, GordoBaseDataProvider):
raise TypeError(
f"Expected value to be an instance of GordoBaseDataProvider, "
f"found {value} "
)
instance.__dict__[self.name] = value
class ValidMachineRuntime(BaseDescriptor):
"""
Descriptor for runtime dict in a machine object. Must be a valid runtime, but also
must contain server.resources.limits/requests.memory/cpu to be valid.
"""
def __set__(self, instance, value):
if not isinstance(value, dict):
raise ValueError(f"Runtime must be an instance of dict")
value = fix_runtime(value)
instance.__dict__[self.name] = value
def fix_runtime(runtime_dict):
"""A valid runtime description must satisfy that any resource
description must have that limit >= requests. This function will bump any limits
that is too low."""
runtime_dict = copy.deepcopy(runtime_dict)
# We must also limit/request errors
for key, val in runtime_dict.items():
if isinstance(val, collections.Mapping):
resource = val.get("resources")
if resource:
runtime_dict[key]["resources"] = fix_resource_limits(resource)
return runtime_dict
def fix_resource_limits(resources: dict) -> dict:
"""
Resource limitations must be higher or equal to resource requests, if they are
both specified. This bumps any limits to the corresponding request if they are both
set.
Parameters
----------
resources: dict
Dictionary with possible requests/limits
Examples
--------
>>> fix_resource_limits({"requests": {"cpu": 10}, "limits":{"cpu":9}})
{'requests': {'cpu': 10}, 'limits': {'cpu': 10}}
>>> fix_resource_limits({"requests": {"cpu": 10}})
{'requests': {'cpu': 10}}
Returns
-------
dict:
A copy of `resource_dict` with the any limits bumped to the corresponding request if
they are both set.
"""
resources = copy.deepcopy(resources)
requests = resources.get("requests", dict())
limits = resources.get("limits", dict())
request_memory = requests.get("memory")
limits_memory = limits.get("memory")
requests_cpu = requests.get("cpu")
limits_cpu = limits.get("cpu")
for r in [request_memory, limits_memory, requests_cpu, limits_cpu]:
if r is not None and not isinstance(r, int):
raise ValueError(
f"Resource descriptions must be integers, and '{r}' is not."
)
if (
limits_memory is not None
and request_memory is not None
and request_memory > limits_memory
):
logger.warning(
f"Memory limit {limits_memory} can not be smaller than memory "
f"request {request_memory}, increasing memory limit to be equal"
f" to request. "
)
limits["memory"] = request_memory
if (
limits_cpu is not None
and requests_cpu is not None
and requests_cpu > limits_cpu
):
logger.warning(
f"CPU limit {limits.get('cpu')} can not be smaller than cpu request"
f" {requests.get('cpu')}, increasing cpu limit to be equal to request."
)
limits["cpu"] = requests_cpu
return resources
class ValidDatetime(BaseDescriptor):
"""
Descriptor for attributes requiring valid datetime.datetime attribute
"""
def __set__(self, instance, value):
datetime_value = None
if isinstance(value, datetime.datetime):
datetime_value = value
elif isinstance(value, str):
datetime_value = dateutil.parser.isoparse(value)
else:
raise ValueError(
f"'{value}' is not a valid datetime.datetime object or string!"
)
if datetime_value.tzinfo is None:
raise ValueError(f"Provide timezone to timestamp '{value}'")
instance.__dict__[self.name] = datetime_value
class ValidTagList(BaseDescriptor):
"""
Descriptor for attributes requiring a non-empty list of strings
"""
def __set__(self, instance, value):
if (
len(value) == 0
or not isinstance(value, list)
or not any(isinstance(value[0], inst) for inst in (str, dict, SensorTag))
):
raise ValueError(f"Requires setting a non-empty list of strings")
instance.__dict__[self.name] = value
class ValidUrlString(BaseDescriptor):
"""
Descriptor for use in objects which require valid URL values.
Where 'valid URL values' is Gordo's version: alphanumeric with dashes.
Use:
class MySpecialClass:
url_attribute = ValidUrlString()
...
myspecialclass = MySpecialClass()
myspecialclass.url_attribute = 'this-is-ok'
myspecialclass.url_attribute = 'this will r@ise a ValueError'
"""
def __set__(self, instance, value):
if not self.valid_url_string(value):
raise ValueError(
f"'{value}' is not a valid Gordo url value. Only lower-case alphanumeric with dashes allowed.'"
)
if len(value) > 63:
raise ValueError(
f"'{value}' should be less than 63 chars, as required by Kubernetes/AKS DNS requirements."
)
instance.__dict__[self.name] = value
@staticmethod
def valid_url_string(string: str) -> bool:
"""
What we (Gordo) deem to be a suitable URL is the same as kubernetes
lowercase alphanumeric with dashes but not ending or starting with a dash
Parameters
----------
string: str - String to check
Returns
-------
bool
"""
return bool(
re.match(
r"^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*)$",
string,
)
)
|
PypiClean
|
/TwistedSNMP-working-0.3.13.zip/TwistedSNMP-working-0.3.13/v4/agentproxy.py
|
__metaclass__ = type
from pysnmp.entity import config
from twisted.internet import defer, reactor
from pysnmp.entity.rfc3413 import cmdgen
DEFAULT_BULK_REPETITION_SIZE = 256
def targetNames( ):
"""Create an iterable producing monotonously increasing integers"""
i = 0
while 1:
i += 1
yield i
class BaseProxy:
"""Base class for all proxies based on SNMP v4.x"""
_targetCache = {}
_newTargetName = targetNames().next
def __init__( self, engine, targetName, snmpVersion='3' ):
"""Initialize the Proxy object's core parameters"""
self.engine = engine
self.targetName = targetName
self.snmpVersion = self.resolveVersion( snmpVersion )
def resolveVersion( self, versionString ):
"""Resolve to canonical format for version"""
if versionString in ('1','v1'):
return '1'
elif versionString in ('2','2c','v2','v2c'):
return '2c'
else:
return '3'
def _targetName( self, engine, ip, port, paramName ):
"""Get/create a target name for given target for given connection name"""
key = (ip,port,paramName)
targetName = self._targetCache.get( key )
if targetName is None:
nameID = self._newTargetName()
targetName = 'target-%s'%(nameID,)
config.addTargetAddr(
engine, targetName, config.snmpUDPDomain,
(ip, port), paramName
)
self._targetCache[ key ] = targetName
return targetName
def get(self, oids, timeout=2.0, retryCount=4):
"""Retrieve a single set of OIDs from the remote agent
oids -- list of dotted-numeric oids to retrieve
retryCount -- number of retries
timeout -- initial timeout, is multipled by 1.5 on each
timeout iteration.
return value is a defered for an { oid : value } mapping
for each oid in requested set
XXX Should be raising an error if the response has an
error message
"""
oids = [(oid,None) for oid in oids ]
df = defer.Deferred( )
cmdgen.GetCommandGenerator().sendReq(
self.engine,
self.targetName,
oids,
self._onGetResult,
df
)
return df
def _onGetResult(
self, sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBinds, df
):
"""Handle response from remote agent to our request"""
# If we have an error, call errback on df, otherwise
# call callback with the results
if errorIndication:
# XXX need differentiation and a common error type...
df.errback(
RuntimeError( errorIndication ),
)
else:
df.callback( dict([
(a,b)
for a,b in varBinds
#if not isinstance( b, v2c.EndOfMibView)
]))
def _onTimeout( self, df, timeout, retryCount ):
"""Implement our timeout handling algorithm
Try up to retryCount times to retrieve, on failure,
we abandon the attempt.
"""
# have to get support for this in pysnmp 4.x
def set( self, oids, timeout=2.0, retryCount=4):
"""Set a variable on our connected agent
oids -- dictionary of oid:value pairs, or a list of
(oid,value) tuples to be set on the agent
raises errors if the setting fails
"""
df = defer.Deferred( )
cmdgen.SetCommandGenerator().sendReq(
self.engine, self.targetName,
oids,
self._onSetResult,
df,
)
return df
def _onSetResult(
self, sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBinds,
df,
):
if errorIndication:
df.errback( RuntimeError( errorIndication, errorStatus, errorIndex ))
else:
df.callback( dict(varBinds))
def getTable(
self, roots, includeStart=0,
recordCallback=None,
retryCount=4, timeout= 2.0,
maxRepetitions= DEFAULT_BULK_REPETITION_SIZE,
startOIDs=None,
):
"""Convenience method for creating and running a TableRetriever
roots -- root OIDs to retrieve
includeStart -- whether to include the starting OID
in the set of results, by default, return the OID
*after* the root oids.
Note: Only implemented for v1 protocols, and likely
to be dropped eventually, as it seems somewhat
superfluous.
recordCallback -- called for each new record discovered
recordCallback( root, oid, value )
retryCount -- number of retries
timeout -- initial timeout, is multipled by 1.5 on each
timeout iteration.
maxRepetitions -- size for each block requested from the
server, i.e. how many records to download at a single
time
startOIDs -- optional OID markers to be used as starting point,
i.e. if passed in, we retrieve the table from startOIDs to
the end of the table excluding startOIDs themselves, rather
than from roots to the end of the table.
Will use bulk downloading when available (i.e. if
we have implementation v2c, not v1).
return value is a defered for a { rootOID: { oid: value } } mapping
"""
df = defer.Deferred( )
result = {}
if startOIDs is None:
startOIDs = roots
def _onTabularResult(
sendRequestHandle,
errorIndication, errorStatus, errorIndex,
varBindTable, df
):
"""Process a (partial) tabular result"""
foundRoots = {}
foundNonNull = False
for row in varBindTable:
foundNonNull = False
for (key,value) in row:
if value is not None:
foundNonNull = True
for r in roots:
if key[:len(r)] == r:
tbl = result.get( r )
if tbl is None:
tbl = result[ r ] = {}
tbl[ key] = value
foundRoots[ r ] = key
if not foundRoots or not foundNonNull:
df.callback( result )
else:
roots[:] = foundRoots.keys()
if self.snmpVersion != '1':
cmdgen.BulkCommandGenerator().sendReq(
self.engine, self.targetName,
0, # nonRepeaters (count)
maxRepetitions,
[(r,None) for r in foundRoots.values()], # varBinds
_onTabularResult,
df,
)
else:
cmdgen.NextCommandGenerator().sendReq(
self.engine, self.targetName,
[(r,None) for r in foundRoots.values()],
_onTabularResult, df
)
if self.snmpVersion != '1':
cmdgen.BulkCommandGenerator().sendReq(
self.engine, self.targetName,
0, # nonRepeaters (count)
maxRepetitions,
[(r,None) for r in startOIDs], # varBinds
_onTabularResult,
df,
)
else:
cmdgen.NextCommandGenerator().sendReq(
self.engine, self.targetName,
[(r,None) for r in startOIDs],
_onTabularResult, df
)
return df
def listenTrap(
self, ipAddress=None, genericType=None, specificType=None,
community=None,
callback=None,
):
"""Listen for incoming traps, direct to given callback
ipAddress -- address from which to allow messages
genericType, specificType -- if present, only messages with the given
type are passed to the callback
community -- if present, only messages with this community string are
accepted/passed on to the callback
callback -- callable object to register, or None to deregister
"""
class AgentProxy(BaseProxy):
"""Proxy object for querying a remote agent"""
_v1ParamCache = {}
_newV1Name = targetNames().next
def __init__(
self, ip, port=161,
community='public', snmpVersion = '1',
engine=None, allowCache = False,
):
"""Initialize the SNMPProtocol object
ip -- ipAddress to which we connect
port -- remote port for the connection
community -- community to use for SNMP v1 or v2c conversations
snmpVersion -- '1', '2' or 3, indicating the supported SNMP version
engine -- configured PySNMP v4 engine
securityName -- name by which our connection parameters are known
if not provided, autogenerated
authProtocol -- authorisation protocol used to connect
authKey -- authorisation key used to connect
privProtocol -- protocol used to obscure requests from viewers
privKey -- key used to obscure requests
allowCache -- if True, we will optimise queries for the assumption
that we will be sending large numbers of identical queries
by caching every request we create and reusing it for all
identical queries. This means you cannot hold onto the
requests, which isn't a problem if you're just using the
proxy through the published interfaces.
"""
targetName = self.v1TargetName(
engine,
ip, port=port,
community=community,
snmpVersion=snmpVersion,
)
super( AgentProxy, self ).__init__( engine, targetName,snmpVersion )
self.ip = str(ip)
self.port = int(port or 161)
self.community = str(community)
self.snmpVersion = snmpVersion
self.allowCache = allowCache
def v1TargetName(
self, engine,
ip, port=161,
community='public',
snmpVersion='2',
):
"""Find/create target name for v1/v2 connection to given agent"""
key = (community,snmpVersion=='1')
paramName = self._v1ParamCache.get( key )
if paramName is None:
nameID = self._newV1Name()
name = 'v1sys-%s'%(nameID)
config.addV1System(engine, name, community)
paramName = 'v1param-%s'%(nameID)
if snmpVersion == '1':
version = 0
else:
version = 1
config.addTargetParams(
engine, paramName, name, 'noAuthNoPriv', version
)
self._v1ParamCache[ key ] = paramName
return self._targetName( engine, ip, port, paramName )
class V3Proxy(BaseProxy):
"""Proxy object for querying a remote agent using SNMP version 3"""
AUTH_PROTOCOL_REGISTRY = {
'MD5': config.usmHMACMD5AuthProtocol,
'SHA': config.usmHMACSHAAuthProtocol,
None: config.usmNoAuthProtocol,
'': config.usmNoAuthProtocol,
False:config.usmNoAuthProtocol,
}
PRIV_PROTOCOL_REGISTRY = {
'DES': config.usmDESPrivProtocol,
None: config.usmNoPrivProtocol,
'': config.usmNoPrivProtocol,
False:config.usmNoPrivProtocol,
}
_v3paramCache = {}
_newV3Name = targetNames().next
def __init__(
self, ip, port=161,
engine=None,
authKey=None,
privKey=None,
authProtocol='MD5',
privProtocol='DES',
allowCache = False,
):
"""Initialize the Proxy object
ip -- ipAddress to which we connect
port -- remote port for the connection
engine -- configured PySNMP v4 engine
authKey -- authorisation key used to connect
privKey -- key used to obscure requests
authProtocol -- authorisation protocol used to connect
privProtocol -- protocol used to obscure requests from viewers
allowCache -- if True, we will optimise queries for the assumption
that we will be sending large numbers of identical queries
by caching every request we create and reusing it for all
identical queries. This means you cannot hold onto the
requests, which isn't a problem if you're just using the
proxy through the published interfaces.
"""
targetName = self.v3TargetName(
ip, port=port,
authKey=authKey,
privKey=privKey,
authProtocol=authProtocol,
privProtocol=privProtocol,
)
super( V3Proxy, self ).__init__( engine, targetName, snmpVersion='3' )
self.ip = str(ip)
self.port = int(port or 161)
self.snmpVersion = '3'
self.allowCache = allowCache
def v3TargetName(
self, engine,
ip, port=161,
authKey=None,
privKey=None,
authProtocol='MD5',
privProtocol='DES',
):
"""Find/create target name for v1/v2 connection to given agent
authProtocol -- one of None, 'MD5' or 'SHA' determining the hashing
of the authorisation key (password)
privProtocol -- one of None or 'DES', determining encryption of the
messages sent to the agent
authKey -- authorisation key (password) for the agent
privKey -- key used to obscure requests from eavesdroppers
"""
if authKey is None:
authProtocol = None
if privKey is None:
privProtocol = None
authProtocol = self.AUTH_PROTOCOL_REGISTRY[ authProtocol ]
privProtocol = self.PRIV_PROTOCOL_REGISTRY[ privProtocol ]
key = ( authProtocol, authKey, privProtocol, privKey )
paramName = self._v3paramCache.get( key )
if paramName is None:
nameID = self._newV3Name()
name = 'v3user-%s'%(nameID)
config.addV3User(
engine, name,
authProtocol=authProtocol, authKey=authKey,
privProtocol=privProtocol, privKey=privKey
)
if authProtocol is config.usmNoAuthProtocol:
paramType = "noAuthNoPriv"
elif privProtocol is config.usmNoPrivProtocol:
paramType = "authNoPriv"
else:
paramType = "authPriv"
paramName = 'v3param-%s'%(nameID)
config.addTargetParams(
engine, paramName, name, paramType,
mpModel = 3
)
self._v3paramCache[ key ] = paramName
return self._targetName( engine, ip, port, paramName )
|
PypiClean
|
/tencentcloud_iac_pulumi-0.1.5.tar.gz/tencentcloud_iac_pulumi-0.1.5/tencentcloud_iac_pulumi/instance/outputs.py
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'InstanceDataDisk',
'GetTypesFilterResult',
'GetTypesInstanceTypeResult',
]
@pulumi.output_type
class InstanceDataDisk(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataDiskSize":
suggest = "data_disk_size"
elif key == "dataDiskType":
suggest = "data_disk_type"
elif key == "dataDiskId":
suggest = "data_disk_id"
elif key == "dataDiskSnapshotId":
suggest = "data_disk_snapshot_id"
elif key == "deleteWithInstance":
suggest = "delete_with_instance"
elif key == "throughputPerformance":
suggest = "throughput_performance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceDataDisk. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceDataDisk.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceDataDisk.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_disk_size: int,
data_disk_type: str,
data_disk_id: Optional[str] = None,
data_disk_snapshot_id: Optional[str] = None,
delete_with_instance: Optional[bool] = None,
encrypt: Optional[bool] = None,
throughput_performance: Optional[int] = None):
"""
:param int data_disk_size: Size of the data disk, and unit is GB.
:param str data_disk_type: Data disk type. For more information about limits on different data disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: LOCAL_BASIC: local disk, LOCAL_SSD: local SSD disk, LOCAL_NVME: local NVME disk, specified in the InstanceType, LOCAL_PRO: local HDD disk, specified in the InstanceType, CLOUD_BASIC: HDD cloud disk, CLOUD_PREMIUM: Premium Cloud Storage, CLOUD_SSD: SSD, CLOUD_HSSD: Enhanced SSD, CLOUD_TSSD: Tremendous SSD, CLOUD_BSSD: Balanced SSD.
:param str data_disk_id: Data disk ID used to initialize the data disk. When data disk type is `LOCAL_BASIC` and `LOCAL_SSD`, disk id is not supported.
:param str data_disk_snapshot_id: Snapshot ID of the data disk. The selected data disk snapshot size must be smaller than the data disk size.
:param bool delete_with_instance: Decides whether the disk is deleted with instance(only applied to `CLOUD_BASIC`, `CLOUD_SSD` and `CLOUD_PREMIUM` disk with `POSTPAID_BY_HOUR` instance), default is true.
:param bool encrypt: Decides whether the disk is encrypted. Default is `false`.
:param int throughput_performance: Add extra performance to the data disk. Only works when disk type is `CLOUD_TSSD` or `CLOUD_HSSD`.
"""
pulumi.set(__self__, "data_disk_size", data_disk_size)
pulumi.set(__self__, "data_disk_type", data_disk_type)
if data_disk_id is not None:
pulumi.set(__self__, "data_disk_id", data_disk_id)
if data_disk_snapshot_id is not None:
pulumi.set(__self__, "data_disk_snapshot_id", data_disk_snapshot_id)
if delete_with_instance is not None:
pulumi.set(__self__, "delete_with_instance", delete_with_instance)
if encrypt is not None:
pulumi.set(__self__, "encrypt", encrypt)
if throughput_performance is not None:
pulumi.set(__self__, "throughput_performance", throughput_performance)
@property
@pulumi.getter(name="dataDiskSize")
def data_disk_size(self) -> int:
"""
Size of the data disk, and unit is GB.
"""
return pulumi.get(self, "data_disk_size")
@property
@pulumi.getter(name="dataDiskType")
def data_disk_type(self) -> str:
"""
Data disk type. For more information about limits on different data disk types, see [Storage Overview](https://intl.cloud.tencent.com/document/product/213/4952). Valid values: LOCAL_BASIC: local disk, LOCAL_SSD: local SSD disk, LOCAL_NVME: local NVME disk, specified in the InstanceType, LOCAL_PRO: local HDD disk, specified in the InstanceType, CLOUD_BASIC: HDD cloud disk, CLOUD_PREMIUM: Premium Cloud Storage, CLOUD_SSD: SSD, CLOUD_HSSD: Enhanced SSD, CLOUD_TSSD: Tremendous SSD, CLOUD_BSSD: Balanced SSD.
"""
return pulumi.get(self, "data_disk_type")
@property
@pulumi.getter(name="dataDiskId")
def data_disk_id(self) -> Optional[str]:
"""
Data disk ID used to initialize the data disk. When data disk type is `LOCAL_BASIC` and `LOCAL_SSD`, disk id is not supported.
"""
return pulumi.get(self, "data_disk_id")
@property
@pulumi.getter(name="dataDiskSnapshotId")
def data_disk_snapshot_id(self) -> Optional[str]:
"""
Snapshot ID of the data disk. The selected data disk snapshot size must be smaller than the data disk size.
"""
return pulumi.get(self, "data_disk_snapshot_id")
@property
@pulumi.getter(name="deleteWithInstance")
def delete_with_instance(self) -> Optional[bool]:
"""
Decides whether the disk is deleted with instance(only applied to `CLOUD_BASIC`, `CLOUD_SSD` and `CLOUD_PREMIUM` disk with `POSTPAID_BY_HOUR` instance), default is true.
"""
return pulumi.get(self, "delete_with_instance")
@property
@pulumi.getter
def encrypt(self) -> Optional[bool]:
"""
Decides whether the disk is encrypted. Default is `false`.
"""
return pulumi.get(self, "encrypt")
@property
@pulumi.getter(name="throughputPerformance")
def throughput_performance(self) -> Optional[int]:
"""
Add extra performance to the data disk. Only works when disk type is `CLOUD_TSSD` or `CLOUD_HSSD`.
"""
return pulumi.get(self, "throughput_performance")
@pulumi.output_type
class GetTypesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str]):
"""
:param str name: The filter name. Valid values: `zone`, `instance-family` and `instance-charge-type`.
:param Sequence[str] values: The filter values.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
The filter name. Valid values: `zone`, `instance-family` and `instance-charge-type`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
The filter values.
"""
return pulumi.get(self, "values")
@pulumi.output_type
class GetTypesInstanceTypeResult(dict):
def __init__(__self__, *,
availability_zone: str,
cpu_core_count: int,
family: str,
gpu_core_count: int,
instance_charge_type: str,
instance_type: str,
memory_size: int,
status: str):
"""
:param str availability_zone: The available zone that the CVM instance locates at. This field is conflict with `filter`.
:param int cpu_core_count: The number of CPU cores of the instance.
:param str family: Type series of the instance.
:param int gpu_core_count: The number of GPU cores of the instance.
:param str instance_charge_type: Charge type of the instance.
:param str instance_type: Type of the instance.
:param int memory_size: Instance memory capacity, unit in GB.
:param str status: Sell status of the instance.
"""
pulumi.set(__self__, "availability_zone", availability_zone)
pulumi.set(__self__, "cpu_core_count", cpu_core_count)
pulumi.set(__self__, "family", family)
pulumi.set(__self__, "gpu_core_count", gpu_core_count)
pulumi.set(__self__, "instance_charge_type", instance_charge_type)
pulumi.set(__self__, "instance_type", instance_type)
pulumi.set(__self__, "memory_size", memory_size)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> str:
"""
The available zone that the CVM instance locates at. This field is conflict with `filter`.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter(name="cpuCoreCount")
def cpu_core_count(self) -> int:
"""
The number of CPU cores of the instance.
"""
return pulumi.get(self, "cpu_core_count")
@property
@pulumi.getter
def family(self) -> str:
"""
Type series of the instance.
"""
return pulumi.get(self, "family")
@property
@pulumi.getter(name="gpuCoreCount")
def gpu_core_count(self) -> int:
"""
The number of GPU cores of the instance.
"""
return pulumi.get(self, "gpu_core_count")
@property
@pulumi.getter(name="instanceChargeType")
def instance_charge_type(self) -> str:
"""
Charge type of the instance.
"""
return pulumi.get(self, "instance_charge_type")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Type of the instance.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="memorySize")
def memory_size(self) -> int:
"""
Instance memory capacity, unit in GB.
"""
return pulumi.get(self, "memory_size")
@property
@pulumi.getter
def status(self) -> str:
"""
Sell status of the instance.
"""
return pulumi.get(self, "status")
|
PypiClean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.