repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
MisterY/asset-allocation | asset_allocation/model.py | AssetAllocationModel.calculate_set_values | def calculate_set_values(self):
""" Calculate the expected totals based on set allocations """
for ac in self.asset_classes:
ac.alloc_value = self.total_amount * ac.allocation / Decimal(100) | python | def calculate_set_values(self):
""" Calculate the expected totals based on set allocations """
for ac in self.asset_classes:
ac.alloc_value = self.total_amount * ac.allocation / Decimal(100) | Calculate the expected totals based on set allocations | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/model.py#L229-L232 |
MisterY/asset-allocation | asset_allocation/model.py | AssetAllocationModel.calculate_current_allocation | def calculate_current_allocation(self):
""" Calculates the current allocation % based on the value """
for ac in self.asset_classes:
ac.curr_alloc = ac.curr_value * 100 / self.total_amount | python | def calculate_current_allocation(self):
""" Calculates the current allocation % based on the value """
for ac in self.asset_classes:
ac.curr_alloc = ac.curr_value * 100 / self.total_amount | Calculates the current allocation % based on the value | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/model.py#L234-L237 |
MisterY/asset-allocation | asset_allocation/model.py | AssetAllocationModel.calculate_current_value | def calculate_current_value(self):
""" Add all the stock values and assign to the asset classes """
# must be recursive
total = Decimal(0)
for ac in self.classes:
self.__calculate_current_value(ac)
total += ac.curr_value
self.total_amount = total | python | def calculate_current_value(self):
""" Add all the stock values and assign to the asset classes """
# must be recursive
total = Decimal(0)
for ac in self.classes:
self.__calculate_current_value(ac)
total += ac.curr_value
self.total_amount = total | Add all the stock values and assign to the asset classes | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/model.py#L239-L246 |
MisterY/asset-allocation | asset_allocation/model.py | AssetAllocationModel.__calculate_current_value | def __calculate_current_value(self, asset_class: AssetClass):
""" Calculate totals for asset class by adding all the children values """
# Is this the final asset class, the one with stocks?
if asset_class.stocks:
# add all the stocks
stocks_sum = Decimal(0)
for stock in asset_class.stocks:
# recalculate into base currency!
stocks_sum += stock.value_in_base_currency
asset_class.curr_value = stocks_sum
if asset_class.classes:
# load totals for child classes
for child in asset_class.classes:
self.__calculate_current_value(child)
asset_class.curr_value += child.curr_value | python | def __calculate_current_value(self, asset_class: AssetClass):
""" Calculate totals for asset class by adding all the children values """
# Is this the final asset class, the one with stocks?
if asset_class.stocks:
# add all the stocks
stocks_sum = Decimal(0)
for stock in asset_class.stocks:
# recalculate into base currency!
stocks_sum += stock.value_in_base_currency
asset_class.curr_value = stocks_sum
if asset_class.classes:
# load totals for child classes
for child in asset_class.classes:
self.__calculate_current_value(child)
asset_class.curr_value += child.curr_value | Calculate totals for asset class by adding all the children values | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/model.py#L248-L264 |
MisterY/asset-allocation | asset_allocation/currency.py | CurrencyConverter.load_currency | def load_currency(self, mnemonic: str):
""" load the latest rate for the given mnemonic; expressed in the base currency """
# , base_currency: str <= ignored for now.
if self.rate and self.rate.currency == mnemonic:
# Already loaded.
return
app = PriceDbApplication()
# TODO use the base_currency parameter for the query #33
symbol = SecuritySymbol("CURRENCY", mnemonic)
self.rate = app.get_latest_price(symbol)
if not self.rate:
raise ValueError(f"No rate found for {mnemonic}!") | python | def load_currency(self, mnemonic: str):
""" load the latest rate for the given mnemonic; expressed in the base currency """
# , base_currency: str <= ignored for now.
if self.rate and self.rate.currency == mnemonic:
# Already loaded.
return
app = PriceDbApplication()
# TODO use the base_currency parameter for the query #33
symbol = SecuritySymbol("CURRENCY", mnemonic)
self.rate = app.get_latest_price(symbol)
if not self.rate:
raise ValueError(f"No rate found for {mnemonic}!") | load the latest rate for the given mnemonic; expressed in the base currency | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/currency.py#L12-L24 |
MisterY/asset-allocation | asset_allocation/cli.py | show | def show(format, full):
""" Print current allocation to the console. """
# load asset allocation
app = AppAggregate()
app.logger = logger
model = app.get_asset_allocation()
if format == "ascii":
formatter = AsciiFormatter()
elif format == "html":
formatter = HtmlFormatter
else:
raise ValueError(f"Unknown formatter {format}")
# formatters can display stock information with --full
output = formatter.format(model, full=full)
print(output) | python | def show(format, full):
""" Print current allocation to the console. """
# load asset allocation
app = AppAggregate()
app.logger = logger
model = app.get_asset_allocation()
if format == "ascii":
formatter = AsciiFormatter()
elif format == "html":
formatter = HtmlFormatter
else:
raise ValueError(f"Unknown formatter {format}")
# formatters can display stock information with --full
output = formatter.format(model, full=full)
print(output) | Print current allocation to the console. | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/cli.py#L30-L46 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.load_cash_balances | def load_cash_balances(self):
""" Loads cash balances from GnuCash book and recalculates into the default currency """
from gnucash_portfolio.accounts import AccountsAggregate, AccountAggregate
cfg = self.__get_config()
cash_root_name = cfg.get(ConfigKeys.cash_root)
# Load cash from all accounts under the root.
gc_db = self.config.get(ConfigKeys.gnucash_book_path)
with open_book(gc_db, open_if_lock=True) as book:
svc = AccountsAggregate(book)
root_account = svc.get_by_fullname(cash_root_name)
acct_svc = AccountAggregate(book, root_account)
cash_balances = acct_svc.load_cash_balances_with_children(cash_root_name)
# Treat each sum per currency as a Stock, for display in full mode.
self.__store_cash_balances_per_currency(cash_balances) | python | def load_cash_balances(self):
""" Loads cash balances from GnuCash book and recalculates into the default currency """
from gnucash_portfolio.accounts import AccountsAggregate, AccountAggregate
cfg = self.__get_config()
cash_root_name = cfg.get(ConfigKeys.cash_root)
# Load cash from all accounts under the root.
gc_db = self.config.get(ConfigKeys.gnucash_book_path)
with open_book(gc_db, open_if_lock=True) as book:
svc = AccountsAggregate(book)
root_account = svc.get_by_fullname(cash_root_name)
acct_svc = AccountAggregate(book, root_account)
cash_balances = acct_svc.load_cash_balances_with_children(cash_root_name)
# Treat each sum per currency as a Stock, for display in full mode.
self.__store_cash_balances_per_currency(cash_balances) | Loads cash balances from GnuCash book and recalculates into the default currency | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L29-L44 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.__store_cash_balances_per_currency | def __store_cash_balances_per_currency(self, cash_balances):
""" Store balance per currency as Stock records under Cash class """
cash = self.model.get_cash_asset_class()
for cur_symbol in cash_balances:
item = CashBalance(cur_symbol)
item.parent = cash
quantity = cash_balances[cur_symbol]["total"]
item.value = Decimal(quantity)
item.currency = cur_symbol
# self.logger.debug(f"adding {item}")
cash.stocks.append(item)
self.model.stocks.append(item) | python | def __store_cash_balances_per_currency(self, cash_balances):
""" Store balance per currency as Stock records under Cash class """
cash = self.model.get_cash_asset_class()
for cur_symbol in cash_balances:
item = CashBalance(cur_symbol)
item.parent = cash
quantity = cash_balances[cur_symbol]["total"]
item.value = Decimal(quantity)
item.currency = cur_symbol
# self.logger.debug(f"adding {item}")
cash.stocks.append(item)
self.model.stocks.append(item) | Store balance per currency as Stock records under Cash class | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L53-L67 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.load_tree_from_db | def load_tree_from_db(self) -> AssetAllocationModel:
""" Reads the asset allocation data only, and constructs the AA tree """
self.model = AssetAllocationModel()
# currency
self.model.currency = self.__get_config().get(ConfigKeys.default_currency)
# Asset Classes
db = self.__get_session()
first_level = (
db.query(dal.AssetClass)
.filter(dal.AssetClass.parentid == None)
.order_by(dal.AssetClass.sortorder)
.all()
)
# create tree
for entity in first_level:
ac = self.__map_entity(entity)
self.model.classes.append(ac)
# Add to index
self.model.asset_classes.append(ac)
# append child classes recursively
self.__load_child_classes(ac)
return self.model | python | def load_tree_from_db(self) -> AssetAllocationModel:
""" Reads the asset allocation data only, and constructs the AA tree """
self.model = AssetAllocationModel()
# currency
self.model.currency = self.__get_config().get(ConfigKeys.default_currency)
# Asset Classes
db = self.__get_session()
first_level = (
db.query(dal.AssetClass)
.filter(dal.AssetClass.parentid == None)
.order_by(dal.AssetClass.sortorder)
.all()
)
# create tree
for entity in first_level:
ac = self.__map_entity(entity)
self.model.classes.append(ac)
# Add to index
self.model.asset_classes.append(ac)
# append child classes recursively
self.__load_child_classes(ac)
return self.model | Reads the asset allocation data only, and constructs the AA tree | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L69-L95 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.load_stock_links | def load_stock_links(self):
""" Read stock links into the model """
links = self.__get_session().query(dal.AssetClassStock).all()
for entity in links:
# log(DEBUG, f"adding {entity.symbol} to {entity.assetclassid}")
# mapping
stock: Stock = Stock(entity.symbol)
# find parent classes by id and assign children
parent: AssetClass = self.model.get_class_by_id(entity.assetclassid)
if parent:
# Assign to parent.
parent.stocks.append(stock)
# Add to index for easy reference
self.model.stocks.append(stock) | python | def load_stock_links(self):
""" Read stock links into the model """
links = self.__get_session().query(dal.AssetClassStock).all()
for entity in links:
# log(DEBUG, f"adding {entity.symbol} to {entity.assetclassid}")
# mapping
stock: Stock = Stock(entity.symbol)
# find parent classes by id and assign children
parent: AssetClass = self.model.get_class_by_id(entity.assetclassid)
if parent:
# Assign to parent.
parent.stocks.append(stock)
# Add to index for easy reference
self.model.stocks.append(stock) | Read stock links into the model | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L97-L110 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.load_stock_quantity | def load_stock_quantity(self):
""" Loads quantities for all stocks """
info = StocksInfo(self.config)
for stock in self.model.stocks:
stock.quantity = info.load_stock_quantity(stock.symbol)
info.gc_book.close() | python | def load_stock_quantity(self):
""" Loads quantities for all stocks """
info = StocksInfo(self.config)
for stock in self.model.stocks:
stock.quantity = info.load_stock_quantity(stock.symbol)
info.gc_book.close() | Loads quantities for all stocks | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L112-L117 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.load_stock_prices | def load_stock_prices(self):
""" Load latest prices for securities """
from pricedb import SecuritySymbol
info = StocksInfo(self.config)
for item in self.model.stocks:
symbol = SecuritySymbol("", "")
symbol.parse(item.symbol)
price: PriceModel = info.load_latest_price(symbol)
if not price:
# Use a dummy price of 1, effectively keeping the original amount.
price = PriceModel()
price.currency = self.config.get(ConfigKeys.default_currency)
price.value = Decimal(1)
item.price = price.value
if isinstance(item, Stock):
item.currency = price.currency
# Do not set currency for Cash balance records.
info.close_databases() | python | def load_stock_prices(self):
""" Load latest prices for securities """
from pricedb import SecuritySymbol
info = StocksInfo(self.config)
for item in self.model.stocks:
symbol = SecuritySymbol("", "")
symbol.parse(item.symbol)
price: PriceModel = info.load_latest_price(symbol)
if not price:
# Use a dummy price of 1, effectively keeping the original amount.
price = PriceModel()
price.currency = self.config.get(ConfigKeys.default_currency)
price.value = Decimal(1)
item.price = price.value
if isinstance(item, Stock):
item.currency = price.currency
# Do not set currency for Cash balance records.
info.close_databases() | Load latest prices for securities | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L119-L138 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.recalculate_stock_values_into_base | def recalculate_stock_values_into_base(self):
""" Loads the exchange rates and recalculates stock holding values into
base currency """
from .currency import CurrencyConverter
conv = CurrencyConverter()
cash = self.model.get_cash_asset_class()
for stock in self.model.stocks:
if stock.currency != self.base_currency:
# Recalculate into base currency
conv.load_currency(stock.currency)
assert isinstance(stock.value, Decimal)
val_base = stock.value * conv.rate.value
else:
# Already in base currency.
val_base = stock.value
stock.value_in_base_currency = val_base | python | def recalculate_stock_values_into_base(self):
""" Loads the exchange rates and recalculates stock holding values into
base currency """
from .currency import CurrencyConverter
conv = CurrencyConverter()
cash = self.model.get_cash_asset_class()
for stock in self.model.stocks:
if stock.currency != self.base_currency:
# Recalculate into base currency
conv.load_currency(stock.currency)
assert isinstance(stock.value, Decimal)
val_base = stock.value * conv.rate.value
else:
# Already in base currency.
val_base = stock.value
stock.value_in_base_currency = val_base | Loads the exchange rates and recalculates stock holding values into
base currency | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L140-L158 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.__load_child_classes | def __load_child_classes(self, ac: AssetClass):
""" Loads child classes/stocks """
# load child classes for ac
db = self.__get_session()
entities = (
db.query(dal.AssetClass)
.filter(dal.AssetClass.parentid == ac.id)
.order_by(dal.AssetClass.sortorder)
.all()
)
# map
for entity in entities:
child_ac = self.__map_entity(entity)
# depth
child_ac.depth = ac.depth + 1
ac.classes.append(child_ac)
# Add to index
self.model.asset_classes.append(child_ac)
self.__load_child_classes(child_ac) | python | def __load_child_classes(self, ac: AssetClass):
""" Loads child classes/stocks """
# load child classes for ac
db = self.__get_session()
entities = (
db.query(dal.AssetClass)
.filter(dal.AssetClass.parentid == ac.id)
.order_by(dal.AssetClass.sortorder)
.all()
)
# map
for entity in entities:
child_ac = self.__map_entity(entity)
# depth
child_ac.depth = ac.depth + 1
ac.classes.append(child_ac)
# Add to index
self.model.asset_classes.append(child_ac)
self.__load_child_classes(child_ac) | Loads child classes/stocks | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L161-L180 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.__map_entity | def __map_entity(self, entity: dal.AssetClass) -> AssetClass:
""" maps the entity onto the model object """
mapper = self.__get_mapper()
ac = mapper.map_entity(entity)
return ac | python | def __map_entity(self, entity: dal.AssetClass) -> AssetClass:
""" maps the entity onto the model object """
mapper = self.__get_mapper()
ac = mapper.map_entity(entity)
return ac | maps the entity onto the model object | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L182-L186 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.__get_session | def __get_session(self):
""" Opens a db session """
db_path = self.__get_config().get(ConfigKeys.asset_allocation_database_path)
self.session = dal.get_session(db_path)
return self.session | python | def __get_session(self):
""" Opens a db session """
db_path = self.__get_config().get(ConfigKeys.asset_allocation_database_path)
self.session = dal.get_session(db_path)
return self.session | Opens a db session | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L194-L198 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationLoader.__load_asset_class | def __load_asset_class(self, ac_id: int):
""" Loads Asset Class entity """
# open database
db = self.__get_session()
entity = db.query(dal.AssetClass).filter(dal.AssetClass.id == ac_id).first()
return entity | python | def __load_asset_class(self, ac_id: int):
""" Loads Asset Class entity """
# open database
db = self.__get_session()
entity = db.query(dal.AssetClass).filter(dal.AssetClass.id == ac_id).first()
return entity | Loads Asset Class entity | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L206-L211 |
MisterY/asset-allocation | asset_allocation/loader.py | AssetAllocationAggregate.__get_by_fullname | def __get_by_fullname(self, asset_class, fullname: str):
""" Recursive function """
if asset_class.fullname == fullname:
return asset_class
if not hasattr(asset_class, "classes"):
return None
for child in asset_class.classes:
found = self.__get_by_fullname(child, fullname)
if found:
return found
return None | python | def __get_by_fullname(self, asset_class, fullname: str):
""" Recursive function """
if asset_class.fullname == fullname:
return asset_class
if not hasattr(asset_class, "classes"):
return None
for child in asset_class.classes:
found = self.__get_by_fullname(child, fullname)
if found:
return found
return None | Recursive function | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/loader.py#L422-L435 |
MisterY/asset-allocation | asset_allocation/dal.py | get_session | def get_session(db_path: str):
""" Creates and opens a database session """
# cfg = Config()
# db_path = cfg.get(ConfigKeys.asset_allocation_database_path)
# connection
con_str = "sqlite:///" + db_path
# Display all SQLite info with echo.
engine = create_engine(con_str, echo=False)
# create metadata (?)
Base.metadata.create_all(engine)
# create session
Session = sessionmaker(bind=engine)
session = Session()
return session | python | def get_session(db_path: str):
""" Creates and opens a database session """
# cfg = Config()
# db_path = cfg.get(ConfigKeys.asset_allocation_database_path)
# connection
con_str = "sqlite:///" + db_path
# Display all SQLite info with echo.
engine = create_engine(con_str, echo=False)
# create metadata (?)
Base.metadata.create_all(engine)
# create session
Session = sessionmaker(bind=engine)
session = Session()
return session | Creates and opens a database session | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/dal.py#L53-L70 |
MisterY/asset-allocation | asset_allocation/assetclass_cli.py | add | def add(name):
""" Add new Asset Class """
item = AssetClass()
item.name = name
app = AppAggregate()
app.create_asset_class(item)
print(f"Asset class {name} created.") | python | def add(name):
""" Add new Asset Class """
item = AssetClass()
item.name = name
app = AppAggregate()
app.create_asset_class(item)
print(f"Asset class {name} created.") | Add new Asset Class | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/assetclass_cli.py#L28-L35 |
MisterY/asset-allocation | asset_allocation/assetclass_cli.py | edit | def edit(id: int, parent: int, alloc: Decimal):
""" Edit asset class """
saved = False
# load
app = AppAggregate()
item = app.get(id)
if not item:
raise KeyError("Asset Class with id %s not found.", id)
if parent:
assert parent != id, "Parent can not be set to self."
# TODO check if parent exists?
item.parentid = parent
saved = True
# click.echo(f"parent set to {parent}")
if alloc:
assert alloc != Decimal(0)
item.allocation = alloc
saved = True
app.save()
if saved:
click.echo("Data saved.")
else:
click.echo("No data modified. Use --help to see possible parameters.") | python | def edit(id: int, parent: int, alloc: Decimal):
""" Edit asset class """
saved = False
# load
app = AppAggregate()
item = app.get(id)
if not item:
raise KeyError("Asset Class with id %s not found.", id)
if parent:
assert parent != id, "Parent can not be set to self."
# TODO check if parent exists?
item.parentid = parent
saved = True
# click.echo(f"parent set to {parent}")
if alloc:
assert alloc != Decimal(0)
item.allocation = alloc
saved = True
app.save()
if saved:
click.echo("Data saved.")
else:
click.echo("No data modified. Use --help to see possible parameters.") | Edit asset class | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/assetclass_cli.py#L50-L79 |
MisterY/asset-allocation | asset_allocation/assetclass_cli.py | my_list | def my_list():
""" Lists all asset classes """
session = AppAggregate().open_session()
classes = session.query(AssetClass).all()
for item in classes:
print(item) | python | def my_list():
""" Lists all asset classes """
session = AppAggregate().open_session()
classes = session.query(AssetClass).all()
for item in classes:
print(item) | Lists all asset classes | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/assetclass_cli.py#L83-L88 |
MisterY/asset-allocation | asset_allocation/assetclass_cli.py | my_import | def my_import(file):
""" Import Asset Class(es) from a .csv file """
# , help="The path to the CSV file to import. The first row must contain column names."
lines = None
with open(file) as csv_file:
lines = csv_file.readlines()
# Header, the first line.
header = lines[0]
lines.remove(header)
header = header.rstrip()
# Parse records from a csv row.
counter = 0
app = AppAggregate()
app.open_session()
for line in lines:
# Create insert statements
line = line.rstrip()
command = f"insert into AssetClass ({header}) values ({line});"
# insert records
app.session.execute(command)
try:
app.save()
except:
print(f"error: ", sys.exc_info()[0])
app.session.close()
counter += 1
print(f"Data imported. {counter} rows created.") | python | def my_import(file):
""" Import Asset Class(es) from a .csv file """
# , help="The path to the CSV file to import. The first row must contain column names."
lines = None
with open(file) as csv_file:
lines = csv_file.readlines()
# Header, the first line.
header = lines[0]
lines.remove(header)
header = header.rstrip()
# Parse records from a csv row.
counter = 0
app = AppAggregate()
app.open_session()
for line in lines:
# Create insert statements
line = line.rstrip()
command = f"insert into AssetClass ({header}) values ({line});"
# insert records
app.session.execute(command)
try:
app.save()
except:
print(f"error: ", sys.exc_info()[0])
app.session.close()
counter += 1
print(f"Data imported. {counter} rows created.") | Import Asset Class(es) from a .csv file | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/assetclass_cli.py#L93-L121 |
MisterY/asset-allocation | asset_allocation/assetclass_cli.py | tree | def tree():
""" Display a tree of asset classes """
session = AppAggregate().open_session()
classes = session.query(AssetClass).all()
# Get the root classes
root = []
for ac in classes:
if ac.parentid is None:
root.append(ac)
# logger.debug(ac.parentid)
# header
print_row("id", "asset class", "allocation", "level")
print(f"-------------------------------")
for ac in root:
print_item_with_children(ac, classes, 0) | python | def tree():
""" Display a tree of asset classes """
session = AppAggregate().open_session()
classes = session.query(AssetClass).all()
# Get the root classes
root = []
for ac in classes:
if ac.parentid is None:
root.append(ac)
# logger.debug(ac.parentid)
# header
print_row("id", "asset class", "allocation", "level")
print(f"-------------------------------")
for ac in root:
print_item_with_children(ac, classes, 0) | Display a tree of asset classes | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/assetclass_cli.py#L126-L141 |
MisterY/asset-allocation | asset_allocation/assetclass_cli.py | print_item_with_children | def print_item_with_children(ac, classes, level):
""" Print the given item and all children items """
print_row(ac.id, ac.name, f"{ac.allocation:,.2f}", level)
print_children_recursively(classes, ac, level + 1) | python | def print_item_with_children(ac, classes, level):
""" Print the given item and all children items """
print_row(ac.id, ac.name, f"{ac.allocation:,.2f}", level)
print_children_recursively(classes, ac, level + 1) | Print the given item and all children items | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/assetclass_cli.py#L143-L146 |
MisterY/asset-allocation | asset_allocation/assetclass_cli.py | print_children_recursively | def print_children_recursively(all_items, for_item, level):
""" Print asset classes recursively """
children = [child for child in all_items if child.parentid == for_item.id]
for child in children:
#message = f"{for_item.name}({for_item.id}) is a parent to {child.name}({child.id})"
indent = " " * level * 2
id_col = f"{indent} {child.id}"
print_row(id_col, child.name, f"{child.allocation:,.2f}", level)
# Process children.
print_children_recursively(all_items, child, level+1) | python | def print_children_recursively(all_items, for_item, level):
""" Print asset classes recursively """
children = [child for child in all_items if child.parentid == for_item.id]
for child in children:
#message = f"{for_item.name}({for_item.id}) is a parent to {child.name}({child.id})"
indent = " " * level * 2
id_col = f"{indent} {child.id}"
print_row(id_col, child.name, f"{child.allocation:,.2f}", level)
# Process children.
print_children_recursively(all_items, child, level+1) | Print asset classes recursively | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/assetclass_cli.py#L148-L158 |
MisterY/asset-allocation | asset_allocation/assetclass_cli.py | print_row | def print_row(*argv):
""" Print one row of data """
#for i in range(0, len(argv)):
# row += f"{argv[i]}"
# columns
row = ""
# id
row += f"{argv[0]:<3}"
# name
row += f" {argv[1]:<13}"
# allocation
row += f" {argv[2]:>5}"
# level
#row += f"{argv[3]}"
print(row) | python | def print_row(*argv):
""" Print one row of data """
#for i in range(0, len(argv)):
# row += f"{argv[i]}"
# columns
row = ""
# id
row += f"{argv[0]:<3}"
# name
row += f" {argv[1]:<13}"
# allocation
row += f" {argv[2]:>5}"
# level
#row += f"{argv[3]}"
print(row) | Print one row of data | https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/assetclass_cli.py#L160-L175 |
dcwatson/bbcode | bbcode.py | render_html | def render_html(input_text, **context):
"""
A module-level convenience method that creates a default bbcode parser,
and renders the input string as HTML.
"""
global g_parser
if g_parser is None:
g_parser = Parser()
return g_parser.format(input_text, **context) | python | def render_html(input_text, **context):
"""
A module-level convenience method that creates a default bbcode parser,
and renders the input string as HTML.
"""
global g_parser
if g_parser is None:
g_parser = Parser()
return g_parser.format(input_text, **context) | A module-level convenience method that creates a default bbcode parser,
and renders the input string as HTML. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L604-L612 |
dcwatson/bbcode | bbcode.py | Parser.add_formatter | def add_formatter(self, tag_name, render_func, **kwargs):
"""
Installs a render function for the specified tag name. The render function
should have the following signature:
def render(tag_name, value, options, parent, context)
The arguments are as follows:
tag_name
The name of the tag being rendered.
value
The context between start and end tags, or None for standalone tags.
Whether this has been rendered depends on render_embedded tag option.
options
A dictionary of options specified on the opening tag.
parent
The parent TagOptions, if the tag is being rendered inside another tag,
otherwise None.
context
The keyword argument dictionary passed into the format call.
"""
options = TagOptions(tag_name.strip().lower(), **kwargs)
self.recognized_tags[options.tag_name] = (render_func, options) | python | def add_formatter(self, tag_name, render_func, **kwargs):
"""
Installs a render function for the specified tag name. The render function
should have the following signature:
def render(tag_name, value, options, parent, context)
The arguments are as follows:
tag_name
The name of the tag being rendered.
value
The context between start and end tags, or None for standalone tags.
Whether this has been rendered depends on render_embedded tag option.
options
A dictionary of options specified on the opening tag.
parent
The parent TagOptions, if the tag is being rendered inside another tag,
otherwise None.
context
The keyword argument dictionary passed into the format call.
"""
options = TagOptions(tag_name.strip().lower(), **kwargs)
self.recognized_tags[options.tag_name] = (render_func, options) | Installs a render function for the specified tag name. The render function
should have the following signature:
def render(tag_name, value, options, parent, context)
The arguments are as follows:
tag_name
The name of the tag being rendered.
value
The context between start and end tags, or None for standalone tags.
Whether this has been rendered depends on render_embedded tag option.
options
A dictionary of options specified on the opening tag.
parent
The parent TagOptions, if the tag is being rendered inside another tag,
otherwise None.
context
The keyword argument dictionary passed into the format call. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L113-L136 |
dcwatson/bbcode | bbcode.py | Parser.add_simple_formatter | def add_simple_formatter(self, tag_name, format_string, **kwargs):
"""
Installs a formatter that takes the tag options dictionary, puts a value key
in it, and uses it as a format dictionary to the given format string.
"""
def _render(name, value, options, parent, context):
fmt = {}
if options:
fmt.update(options)
fmt.update({'value': value})
return format_string % fmt
self.add_formatter(tag_name, _render, **kwargs) | python | def add_simple_formatter(self, tag_name, format_string, **kwargs):
"""
Installs a formatter that takes the tag options dictionary, puts a value key
in it, and uses it as a format dictionary to the given format string.
"""
def _render(name, value, options, parent, context):
fmt = {}
if options:
fmt.update(options)
fmt.update({'value': value})
return format_string % fmt
self.add_formatter(tag_name, _render, **kwargs) | Installs a formatter that takes the tag options dictionary, puts a value key
in it, and uses it as a format dictionary to the given format string. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L138-L149 |
dcwatson/bbcode | bbcode.py | Parser.install_default_formatters | def install_default_formatters(self):
"""
Installs default formatters for the following tags:
b, i, u, s, list (and \*), quote, code, center, color, url
"""
self.add_simple_formatter('b', '<strong>%(value)s</strong>')
self.add_simple_formatter('i', '<em>%(value)s</em>')
self.add_simple_formatter('u', '<u>%(value)s</u>')
self.add_simple_formatter('s', '<strike>%(value)s</strike>')
self.add_simple_formatter('hr', '<hr />', standalone=True)
self.add_simple_formatter('sub', '<sub>%(value)s</sub>')
self.add_simple_formatter('sup', '<sup>%(value)s</sup>')
def _render_list(name, value, options, parent, context):
list_type = options['list'] if (options and 'list' in options) else '*'
css_opts = {
'1': 'decimal', '01': 'decimal-leading-zero',
'a': 'lower-alpha', 'A': 'upper-alpha',
'i': 'lower-roman', 'I': 'upper-roman',
}
tag = 'ol' if list_type in css_opts else 'ul'
css = ' style="list-style-type:%s;"' % css_opts[list_type] if list_type in css_opts else ''
return '<%s%s>%s</%s>' % (tag, css, value, tag)
self.add_formatter('list', _render_list, transform_newlines=False, strip=True, swallow_trailing_newline=True)
# Make sure transform_newlines = False for [*], so [code] tags can be embedded without transformation.
def _render_list_item(name, value, options, parent, context):
if not parent or parent.tag_name != 'list':
return '[*]%s<br />' % value
return '<li>%s</li>' % value
self.add_formatter('*', _render_list_item, newline_closes=True, transform_newlines=False,
same_tag_closes=True, strip=True)
self.add_simple_formatter('quote', '<blockquote>%(value)s</blockquote>', strip=True,
swallow_trailing_newline=True)
self.add_simple_formatter('code', '<code>%(value)s</code>', render_embedded=False, transform_newlines=False,
swallow_trailing_newline=True, replace_cosmetic=False)
self.add_simple_formatter('center', '<div style="text-align:center;">%(value)s</div>')
def _render_color(name, value, options, parent, context):
if 'color' in options:
color = options['color'].strip()
elif options:
color = list(options.keys())[0].strip()
else:
return value
match = re.match(r'^([a-z]+)|^(#[a-f0-9]{3,6})', color, re.I)
color = match.group() if match else 'inherit'
return '<span style="color:%(color)s;">%(value)s</span>' % {
'color': color,
'value': value,
}
self.add_formatter('color', _render_color)
def _render_url(name, value, options, parent, context):
if options and 'url' in options:
# Option values are not escaped for HTML output.
href = self._replace(options['url'], self.REPLACE_ESCAPE)
else:
href = value
# Completely ignore javascript: and data: "links".
if re.sub(r'[^a-z0-9+]', '', href.lower().split(':', 1)[0]) in ('javascript', 'data', 'vbscript'):
return ''
# Only add the missing http:// if it looks like it starts with a domain name.
if '://' not in href and _domain_re.match(href):
href = 'http://' + href
return self.url_template.format(href=href.replace('"', '%22'), text=value)
self.add_formatter('url', _render_url, replace_links=False, replace_cosmetic=False) | python | def install_default_formatters(self):
"""
Installs default formatters for the following tags:
b, i, u, s, list (and \*), quote, code, center, color, url
"""
self.add_simple_formatter('b', '<strong>%(value)s</strong>')
self.add_simple_formatter('i', '<em>%(value)s</em>')
self.add_simple_formatter('u', '<u>%(value)s</u>')
self.add_simple_formatter('s', '<strike>%(value)s</strike>')
self.add_simple_formatter('hr', '<hr />', standalone=True)
self.add_simple_formatter('sub', '<sub>%(value)s</sub>')
self.add_simple_formatter('sup', '<sup>%(value)s</sup>')
def _render_list(name, value, options, parent, context):
list_type = options['list'] if (options and 'list' in options) else '*'
css_opts = {
'1': 'decimal', '01': 'decimal-leading-zero',
'a': 'lower-alpha', 'A': 'upper-alpha',
'i': 'lower-roman', 'I': 'upper-roman',
}
tag = 'ol' if list_type in css_opts else 'ul'
css = ' style="list-style-type:%s;"' % css_opts[list_type] if list_type in css_opts else ''
return '<%s%s>%s</%s>' % (tag, css, value, tag)
self.add_formatter('list', _render_list, transform_newlines=False, strip=True, swallow_trailing_newline=True)
# Make sure transform_newlines = False for [*], so [code] tags can be embedded without transformation.
def _render_list_item(name, value, options, parent, context):
if not parent or parent.tag_name != 'list':
return '[*]%s<br />' % value
return '<li>%s</li>' % value
self.add_formatter('*', _render_list_item, newline_closes=True, transform_newlines=False,
same_tag_closes=True, strip=True)
self.add_simple_formatter('quote', '<blockquote>%(value)s</blockquote>', strip=True,
swallow_trailing_newline=True)
self.add_simple_formatter('code', '<code>%(value)s</code>', render_embedded=False, transform_newlines=False,
swallow_trailing_newline=True, replace_cosmetic=False)
self.add_simple_formatter('center', '<div style="text-align:center;">%(value)s</div>')
def _render_color(name, value, options, parent, context):
if 'color' in options:
color = options['color'].strip()
elif options:
color = list(options.keys())[0].strip()
else:
return value
match = re.match(r'^([a-z]+)|^(#[a-f0-9]{3,6})', color, re.I)
color = match.group() if match else 'inherit'
return '<span style="color:%(color)s;">%(value)s</span>' % {
'color': color,
'value': value,
}
self.add_formatter('color', _render_color)
def _render_url(name, value, options, parent, context):
if options and 'url' in options:
# Option values are not escaped for HTML output.
href = self._replace(options['url'], self.REPLACE_ESCAPE)
else:
href = value
# Completely ignore javascript: and data: "links".
if re.sub(r'[^a-z0-9+]', '', href.lower().split(':', 1)[0]) in ('javascript', 'data', 'vbscript'):
return ''
# Only add the missing http:// if it looks like it starts with a domain name.
if '://' not in href and _domain_re.match(href):
href = 'http://' + href
return self.url_template.format(href=href.replace('"', '%22'), text=value)
self.add_formatter('url', _render_url, replace_links=False, replace_cosmetic=False) | Installs default formatters for the following tags:
b, i, u, s, list (and \*), quote, code, center, color, url | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L151-L220 |
dcwatson/bbcode | bbcode.py | Parser._replace | def _replace(self, data, replacements):
"""
Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result.
"""
for find, repl in replacements:
data = data.replace(find, repl)
return data | python | def _replace(self, data, replacements):
"""
Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result.
"""
for find, repl in replacements:
data = data.replace(find, repl)
return data | Given a list of 2-tuples (find, repl) this function performs all
replacements on the input and returns the result. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L222-L229 |
dcwatson/bbcode | bbcode.py | Parser._newline_tokenize | def _newline_tokenize(self, data):
"""
Given a string that does not contain any tags, this function will
return a list of NEWLINE and DATA tokens such that if you concatenate
their data, you will have the original string.
"""
parts = data.split('\n')
tokens = []
for num, part in enumerate(parts):
if part:
tokens.append((self.TOKEN_DATA, None, None, part))
if num < (len(parts) - 1):
tokens.append((self.TOKEN_NEWLINE, None, None, '\n'))
return tokens | python | def _newline_tokenize(self, data):
"""
Given a string that does not contain any tags, this function will
return a list of NEWLINE and DATA tokens such that if you concatenate
their data, you will have the original string.
"""
parts = data.split('\n')
tokens = []
for num, part in enumerate(parts):
if part:
tokens.append((self.TOKEN_DATA, None, None, part))
if num < (len(parts) - 1):
tokens.append((self.TOKEN_NEWLINE, None, None, '\n'))
return tokens | Given a string that does not contain any tags, this function will
return a list of NEWLINE and DATA tokens such that if you concatenate
their data, you will have the original string. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L231-L244 |
dcwatson/bbcode | bbcode.py | Parser._parse_opts | def _parse_opts(self, data):
"""
Given a tag string, this function will parse any options out of it and
return a tuple of (tag_name, options_dict). Options may be quoted in order
to preserve spaces, and free-standing options are allowed. The tag name
itself may also serve as an option if it is immediately followed by an equal
sign. Here are some examples:
quote author="Dan Watson"
tag_name=quote, options={'author': 'Dan Watson'}
url="http://test.com/s.php?a=bcd efg" popup
tag_name=url, options={'url': 'http://test.com/s.php?a=bcd efg', 'popup': ''}
"""
name = None
try:
# OrderedDict is only available for 2.7+, so leave regular unsorted dicts as a fallback.
from collections import OrderedDict
opts = OrderedDict()
except ImportError:
opts = {}
in_value = False
in_quote = False
attr = ''
value = ''
attr_done = False
stripped = data.strip()
ls = len(stripped)
pos = 0
while pos < ls:
ch = stripped[pos]
if in_value:
if in_quote:
if ch == '\\' and ls > pos + 1 and stripped[pos + 1] in ('\\', '"', "'"):
value += stripped[pos + 1]
pos += 1
elif ch == in_quote:
in_quote = False
in_value = False
if attr:
opts[attr.lower()] = value.strip()
attr = ''
value = ''
else:
value += ch
else:
if ch in ('"', "'"):
in_quote = ch
elif ch == ' ' and data.find('=', pos + 1) > 0:
# If there is no = after this, the value may accept spaces.
opts[attr.lower()] = value.strip()
attr = ''
value = ''
in_value = False
else:
value += ch
else:
if ch == '=':
in_value = True
if name is None:
name = attr
elif ch == ' ':
attr_done = True
else:
if attr_done:
if attr:
if name is None:
name = attr
else:
opts[attr.lower()] = ''
attr = ''
attr_done = False
attr += ch
pos += 1
if attr:
if name is None:
name = attr
opts[attr.lower()] = value.strip()
return name.lower(), opts | python | def _parse_opts(self, data):
"""
Given a tag string, this function will parse any options out of it and
return a tuple of (tag_name, options_dict). Options may be quoted in order
to preserve spaces, and free-standing options are allowed. The tag name
itself may also serve as an option if it is immediately followed by an equal
sign. Here are some examples:
quote author="Dan Watson"
tag_name=quote, options={'author': 'Dan Watson'}
url="http://test.com/s.php?a=bcd efg" popup
tag_name=url, options={'url': 'http://test.com/s.php?a=bcd efg', 'popup': ''}
"""
name = None
try:
# OrderedDict is only available for 2.7+, so leave regular unsorted dicts as a fallback.
from collections import OrderedDict
opts = OrderedDict()
except ImportError:
opts = {}
in_value = False
in_quote = False
attr = ''
value = ''
attr_done = False
stripped = data.strip()
ls = len(stripped)
pos = 0
while pos < ls:
ch = stripped[pos]
if in_value:
if in_quote:
if ch == '\\' and ls > pos + 1 and stripped[pos + 1] in ('\\', '"', "'"):
value += stripped[pos + 1]
pos += 1
elif ch == in_quote:
in_quote = False
in_value = False
if attr:
opts[attr.lower()] = value.strip()
attr = ''
value = ''
else:
value += ch
else:
if ch in ('"', "'"):
in_quote = ch
elif ch == ' ' and data.find('=', pos + 1) > 0:
# If there is no = after this, the value may accept spaces.
opts[attr.lower()] = value.strip()
attr = ''
value = ''
in_value = False
else:
value += ch
else:
if ch == '=':
in_value = True
if name is None:
name = attr
elif ch == ' ':
attr_done = True
else:
if attr_done:
if attr:
if name is None:
name = attr
else:
opts[attr.lower()] = ''
attr = ''
attr_done = False
attr += ch
pos += 1
if attr:
if name is None:
name = attr
opts[attr.lower()] = value.strip()
return name.lower(), opts | Given a tag string, this function will parse any options out of it and
return a tuple of (tag_name, options_dict). Options may be quoted in order
to preserve spaces, and free-standing options are allowed. The tag name
itself may also serve as an option if it is immediately followed by an equal
sign. Here are some examples:
quote author="Dan Watson"
tag_name=quote, options={'author': 'Dan Watson'}
url="http://test.com/s.php?a=bcd efg" popup
tag_name=url, options={'url': 'http://test.com/s.php?a=bcd efg', 'popup': ''} | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L246-L324 |
dcwatson/bbcode | bbcode.py | Parser._parse_tag | def _parse_tag(self, tag):
"""
Given a tag string (characters enclosed by []), this function will
parse any options and return a tuple of the form:
(valid, tag_name, closer, options)
"""
if not tag.startswith(self.tag_opener) or not tag.endswith(self.tag_closer) or ('\n' in tag) or ('\r' in tag):
return (False, tag, False, None)
tag_name = tag[len(self.tag_opener):-len(self.tag_closer)].strip()
if not tag_name:
return (False, tag, False, None)
closer = False
opts = {}
if tag_name[0] == '/':
tag_name = tag_name[1:]
closer = True
# Parse options inside the opening tag, if needed.
if (('=' in tag_name) or (' ' in tag_name)) and not closer:
tag_name, opts = self._parse_opts(tag_name)
return (True, tag_name.strip().lower(), closer, opts) | python | def _parse_tag(self, tag):
"""
Given a tag string (characters enclosed by []), this function will
parse any options and return a tuple of the form:
(valid, tag_name, closer, options)
"""
if not tag.startswith(self.tag_opener) or not tag.endswith(self.tag_closer) or ('\n' in tag) or ('\r' in tag):
return (False, tag, False, None)
tag_name = tag[len(self.tag_opener):-len(self.tag_closer)].strip()
if not tag_name:
return (False, tag, False, None)
closer = False
opts = {}
if tag_name[0] == '/':
tag_name = tag_name[1:]
closer = True
# Parse options inside the opening tag, if needed.
if (('=' in tag_name) or (' ' in tag_name)) and not closer:
tag_name, opts = self._parse_opts(tag_name)
return (True, tag_name.strip().lower(), closer, opts) | Given a tag string (characters enclosed by []), this function will
parse any options and return a tuple of the form:
(valid, tag_name, closer, options) | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L326-L345 |
dcwatson/bbcode | bbcode.py | Parser._tag_extent | def _tag_extent(self, data, start):
"""
Finds the extent of a tag, accounting for option quoting and new tags starting before the current one closes.
Returns (found_close, end_pos) where valid is False if another tag started before this one closed.
"""
in_quote = False
quotable = False
lto = len(self.tag_opener)
ltc = len(self.tag_closer)
for i in xrange(start + 1, len(data)):
ch = data[i]
if ch == '=':
quotable = True
if ch in ('"', "'"):
if quotable and not in_quote:
in_quote = ch
elif in_quote == ch:
in_quote = False
quotable = False
if not in_quote and data[i:i + lto] == self.tag_opener:
return i, False
if not in_quote and data[i:i + ltc] == self.tag_closer:
return i + ltc, True
return len(data), False | python | def _tag_extent(self, data, start):
"""
Finds the extent of a tag, accounting for option quoting and new tags starting before the current one closes.
Returns (found_close, end_pos) where valid is False if another tag started before this one closed.
"""
in_quote = False
quotable = False
lto = len(self.tag_opener)
ltc = len(self.tag_closer)
for i in xrange(start + 1, len(data)):
ch = data[i]
if ch == '=':
quotable = True
if ch in ('"', "'"):
if quotable and not in_quote:
in_quote = ch
elif in_quote == ch:
in_quote = False
quotable = False
if not in_quote and data[i:i + lto] == self.tag_opener:
return i, False
if not in_quote and data[i:i + ltc] == self.tag_closer:
return i + ltc, True
return len(data), False | Finds the extent of a tag, accounting for option quoting and new tags starting before the current one closes.
Returns (found_close, end_pos) where valid is False if another tag started before this one closed. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L347-L370 |
dcwatson/bbcode | bbcode.py | Parser.tokenize | def tokenize(self, data):
"""
Tokenizes the given string. A token is a 4-tuple of the form:
(token_type, tag_name, tag_options, token_text)
token_type
One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA
tag_name
The name of the tag if token_type=TOKEN_TAG_*, otherwise None
tag_options
A dictionary of options specified for TOKEN_TAG_START, otherwise None
token_text
The original token text
"""
data = data.replace('\r\n', '\n').replace('\r', '\n')
pos = start = end = 0
ld = len(data)
tokens = []
while pos < ld:
start = data.find(self.tag_opener, pos)
if start >= pos:
# Check to see if there was data between this start and the last end.
if start > pos:
tl = self._newline_tokenize(data[pos:start])
tokens.extend(tl)
pos = start
# Find the extent of this tag, if it's ever closed.
end, found_close = self._tag_extent(data, start)
if found_close:
tag = data[start:end]
valid, tag_name, closer, opts = self._parse_tag(tag)
# Make sure this is a well-formed, recognized tag, otherwise it's just data.
if valid and tag_name in self.recognized_tags:
if closer:
tokens.append((self.TOKEN_TAG_END, tag_name, None, tag))
else:
tokens.append((self.TOKEN_TAG_START, tag_name, opts, tag))
elif valid and self.drop_unrecognized and tag_name not in self.recognized_tags:
# If we found a valid (but unrecognized) tag and self.drop_unrecognized is True, just drop it.
pass
else:
tokens.extend(self._newline_tokenize(tag))
else:
# We didn't find a closing tag, tack it on as text.
tokens.extend(self._newline_tokenize(data[start:end]))
pos = end
else:
# No more tags left to parse.
break
if pos < ld:
tl = self._newline_tokenize(data[pos:])
tokens.extend(tl)
return tokens | python | def tokenize(self, data):
"""
Tokenizes the given string. A token is a 4-tuple of the form:
(token_type, tag_name, tag_options, token_text)
token_type
One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA
tag_name
The name of the tag if token_type=TOKEN_TAG_*, otherwise None
tag_options
A dictionary of options specified for TOKEN_TAG_START, otherwise None
token_text
The original token text
"""
data = data.replace('\r\n', '\n').replace('\r', '\n')
pos = start = end = 0
ld = len(data)
tokens = []
while pos < ld:
start = data.find(self.tag_opener, pos)
if start >= pos:
# Check to see if there was data between this start and the last end.
if start > pos:
tl = self._newline_tokenize(data[pos:start])
tokens.extend(tl)
pos = start
# Find the extent of this tag, if it's ever closed.
end, found_close = self._tag_extent(data, start)
if found_close:
tag = data[start:end]
valid, tag_name, closer, opts = self._parse_tag(tag)
# Make sure this is a well-formed, recognized tag, otherwise it's just data.
if valid and tag_name in self.recognized_tags:
if closer:
tokens.append((self.TOKEN_TAG_END, tag_name, None, tag))
else:
tokens.append((self.TOKEN_TAG_START, tag_name, opts, tag))
elif valid and self.drop_unrecognized and tag_name not in self.recognized_tags:
# If we found a valid (but unrecognized) tag and self.drop_unrecognized is True, just drop it.
pass
else:
tokens.extend(self._newline_tokenize(tag))
else:
# We didn't find a closing tag, tack it on as text.
tokens.extend(self._newline_tokenize(data[start:end]))
pos = end
else:
# No more tags left to parse.
break
if pos < ld:
tl = self._newline_tokenize(data[pos:])
tokens.extend(tl)
return tokens | Tokenizes the given string. A token is a 4-tuple of the form:
(token_type, tag_name, tag_options, token_text)
token_type
One of: TOKEN_TAG_START, TOKEN_TAG_END, TOKEN_NEWLINE, TOKEN_DATA
tag_name
The name of the tag if token_type=TOKEN_TAG_*, otherwise None
tag_options
A dictionary of options specified for TOKEN_TAG_START, otherwise None
token_text
The original token text | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L372-L426 |
dcwatson/bbcode | bbcode.py | Parser._find_closing_token | def _find_closing_token(self, tag, tokens, pos):
"""
Given the current tag options, a list of tokens, and the current position
in the token list, this function will find the position of the closing token
associated with the specified tag. This may be a closing tag, a newline, or
simply the end of the list (to ensure tags are closed). This function should
return a tuple of the form (end_pos, consume), where consume should indicate
whether the ending token should be consumed or not.
"""
embed_count = 0
block_count = 0
lt = len(tokens)
while pos < lt:
token_type, tag_name, tag_opts, token_text = tokens[pos]
if token_type == self.TOKEN_DATA:
# Short-circuit for performance.
pos += 1
continue
if tag.newline_closes and token_type in (self.TOKEN_TAG_START, self.TOKEN_TAG_END):
# If we're finding the closing token for a tag that is closed by newlines, but
# there is an embedded tag that doesn't transform newlines (i.e. a code tag
# that keeps newlines intact), we need to skip over that.
inner_tag = self.recognized_tags[tag_name][1]
if not inner_tag.transform_newlines:
if token_type == self.TOKEN_TAG_START:
block_count += 1
else:
block_count -= 1
if token_type == self.TOKEN_NEWLINE and tag.newline_closes and block_count == 0:
# If for some crazy reason there are embedded tags that both close on newline,
# the first newline will automatically close all those nested tags.
return pos, True
elif token_type == self.TOKEN_TAG_START and tag_name == tag.tag_name:
if tag.same_tag_closes:
return pos, False
if tag.render_embedded:
embed_count += 1
elif token_type == self.TOKEN_TAG_END and tag_name == tag.tag_name:
if embed_count > 0:
embed_count -= 1
else:
return pos, True
pos += 1
return pos, True | python | def _find_closing_token(self, tag, tokens, pos):
"""
Given the current tag options, a list of tokens, and the current position
in the token list, this function will find the position of the closing token
associated with the specified tag. This may be a closing tag, a newline, or
simply the end of the list (to ensure tags are closed). This function should
return a tuple of the form (end_pos, consume), where consume should indicate
whether the ending token should be consumed or not.
"""
embed_count = 0
block_count = 0
lt = len(tokens)
while pos < lt:
token_type, tag_name, tag_opts, token_text = tokens[pos]
if token_type == self.TOKEN_DATA:
# Short-circuit for performance.
pos += 1
continue
if tag.newline_closes and token_type in (self.TOKEN_TAG_START, self.TOKEN_TAG_END):
# If we're finding the closing token for a tag that is closed by newlines, but
# there is an embedded tag that doesn't transform newlines (i.e. a code tag
# that keeps newlines intact), we need to skip over that.
inner_tag = self.recognized_tags[tag_name][1]
if not inner_tag.transform_newlines:
if token_type == self.TOKEN_TAG_START:
block_count += 1
else:
block_count -= 1
if token_type == self.TOKEN_NEWLINE and tag.newline_closes and block_count == 0:
# If for some crazy reason there are embedded tags that both close on newline,
# the first newline will automatically close all those nested tags.
return pos, True
elif token_type == self.TOKEN_TAG_START and tag_name == tag.tag_name:
if tag.same_tag_closes:
return pos, False
if tag.render_embedded:
embed_count += 1
elif token_type == self.TOKEN_TAG_END and tag_name == tag.tag_name:
if embed_count > 0:
embed_count -= 1
else:
return pos, True
pos += 1
return pos, True | Given the current tag options, a list of tokens, and the current position
in the token list, this function will find the position of the closing token
associated with the specified tag. This may be a closing tag, a newline, or
simply the end of the list (to ensure tags are closed). This function should
return a tuple of the form (end_pos, consume), where consume should indicate
whether the ending token should be consumed or not. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L428-L471 |
dcwatson/bbcode | bbcode.py | Parser._link_replace | def _link_replace(self, match, **context):
"""
Callback for re.sub to replace link text with markup. Turns out using a callback function
is actually faster than using backrefs, plus this lets us provide a hook for user customization.
linker_takes_context=True means that the linker gets passed context like a standard format function.
"""
url = match.group(0)
if self.linker:
if self.linker_takes_context:
return self.linker(url, context)
else:
return self.linker(url)
else:
href = url
if '://' not in href:
href = 'http://' + href
# Escape quotes to avoid XSS, let the browser escape the rest.
return self.url_template.format(href=href.replace('"', '%22'), text=url) | python | def _link_replace(self, match, **context):
"""
Callback for re.sub to replace link text with markup. Turns out using a callback function
is actually faster than using backrefs, plus this lets us provide a hook for user customization.
linker_takes_context=True means that the linker gets passed context like a standard format function.
"""
url = match.group(0)
if self.linker:
if self.linker_takes_context:
return self.linker(url, context)
else:
return self.linker(url)
else:
href = url
if '://' not in href:
href = 'http://' + href
# Escape quotes to avoid XSS, let the browser escape the rest.
return self.url_template.format(href=href.replace('"', '%22'), text=url) | Callback for re.sub to replace link text with markup. Turns out using a callback function
is actually faster than using backrefs, plus this lets us provide a hook for user customization.
linker_takes_context=True means that the linker gets passed context like a standard format function. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L473-L490 |
dcwatson/bbcode | bbcode.py | Parser._transform | def _transform(self, data, escape_html, replace_links, replace_cosmetic, transform_newlines, **context):
"""
Transforms the input string based on the options specified, taking into account
whether the option is enabled globally for this parser.
"""
url_matches = {}
if self.replace_links and replace_links:
# If we're replacing links in the text (i.e. not those in [url] tags) then we need to be
# careful to pull them out before doing any escaping or cosmetic replacement.
pos = 0
while True:
match = _url_re.search(data, pos)
if not match:
break
# Replace any link with a token that we can substitute back in after replacements.
token = '{{ bbcode-link-%s }}' % len(url_matches)
url_matches[token] = self._link_replace(match, **context)
start, end = match.span()
data = data[:start] + token + data[end:]
# To be perfectly accurate, this should probably be len(data[:start] + token), but
# start will work, because the token itself won't match as a URL.
pos = start
if escape_html:
data = self._replace(data, self.REPLACE_ESCAPE)
if replace_cosmetic:
data = self._replace(data, self.REPLACE_COSMETIC)
# Now put the replaced links back in the text.
for token, replacement in url_matches.items():
data = data.replace(token, replacement)
if transform_newlines:
data = data.replace('\n', '\r')
return data | python | def _transform(self, data, escape_html, replace_links, replace_cosmetic, transform_newlines, **context):
"""
Transforms the input string based on the options specified, taking into account
whether the option is enabled globally for this parser.
"""
url_matches = {}
if self.replace_links and replace_links:
# If we're replacing links in the text (i.e. not those in [url] tags) then we need to be
# careful to pull them out before doing any escaping or cosmetic replacement.
pos = 0
while True:
match = _url_re.search(data, pos)
if not match:
break
# Replace any link with a token that we can substitute back in after replacements.
token = '{{ bbcode-link-%s }}' % len(url_matches)
url_matches[token] = self._link_replace(match, **context)
start, end = match.span()
data = data[:start] + token + data[end:]
# To be perfectly accurate, this should probably be len(data[:start] + token), but
# start will work, because the token itself won't match as a URL.
pos = start
if escape_html:
data = self._replace(data, self.REPLACE_ESCAPE)
if replace_cosmetic:
data = self._replace(data, self.REPLACE_COSMETIC)
# Now put the replaced links back in the text.
for token, replacement in url_matches.items():
data = data.replace(token, replacement)
if transform_newlines:
data = data.replace('\n', '\r')
return data | Transforms the input string based on the options specified, taking into account
whether the option is enabled globally for this parser. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L492-L523 |
dcwatson/bbcode | bbcode.py | Parser.format | def format(self, data, **context):
"""
Formats the input text using any installed renderers. Any context keyword arguments
given here will be passed along to the render functions as a context dictionary.
"""
tokens = self.tokenize(data)
full_context = self.default_context.copy()
full_context.update(context)
return self._format_tokens(tokens, None, **full_context).replace('\r', self.newline) | python | def format(self, data, **context):
"""
Formats the input text using any installed renderers. Any context keyword arguments
given here will be passed along to the render functions as a context dictionary.
"""
tokens = self.tokenize(data)
full_context = self.default_context.copy()
full_context.update(context)
return self._format_tokens(tokens, None, **full_context).replace('\r', self.newline) | Formats the input text using any installed renderers. Any context keyword arguments
given here will be passed along to the render functions as a context dictionary. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L578-L586 |
dcwatson/bbcode | bbcode.py | Parser.strip | def strip(self, data, strip_newlines=False):
"""
Strips out any tags from the input text, using the same tokenization as the formatter.
"""
text = []
for token_type, tag_name, tag_opts, token_text in self.tokenize(data):
if token_type == self.TOKEN_DATA:
text.append(token_text)
elif token_type == self.TOKEN_NEWLINE and not strip_newlines:
text.append(token_text)
return ''.join(text) | python | def strip(self, data, strip_newlines=False):
"""
Strips out any tags from the input text, using the same tokenization as the formatter.
"""
text = []
for token_type, tag_name, tag_opts, token_text in self.tokenize(data):
if token_type == self.TOKEN_DATA:
text.append(token_text)
elif token_type == self.TOKEN_NEWLINE and not strip_newlines:
text.append(token_text)
return ''.join(text) | Strips out any tags from the input text, using the same tokenization as the formatter. | https://github.com/dcwatson/bbcode/blob/eb6f7ff140a78ddb1641102d7382479c4d7c1c78/bbcode.py#L588-L598 |
astroML/gatspy | gatspy/periodic/naive_multiband.py | mode_in_range | def mode_in_range(a, axis=0, tol=1E-3):
"""Find the mode of values to within a certain range"""
a_trunc = a // tol
vals, counts = mode(a_trunc, axis)
mask = (a_trunc == vals)
# mean of each row
return np.sum(a * mask, axis) / np.sum(mask, axis) | python | def mode_in_range(a, axis=0, tol=1E-3):
"""Find the mode of values to within a certain range"""
a_trunc = a // tol
vals, counts = mode(a_trunc, axis)
mask = (a_trunc == vals)
# mean of each row
return np.sum(a * mask, axis) / np.sum(mask, axis) | Find the mode of values to within a certain range | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/naive_multiband.py#L18-L24 |
astroML/gatspy | gatspy/periodic/naive_multiband.py | NaiveMultiband.scores | def scores(self, periods):
"""Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
scores : dict
Dictionary of scores. Dictionary keys are the unique filter names
passed to fit()
"""
return dict([(filt, model.score(periods))
for (filt, model) in self.models_.items()]) | python | def scores(self, periods):
"""Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
scores : dict
Dictionary of scores. Dictionary keys are the unique filter names
passed to fit()
"""
return dict([(filt, model.score(periods))
for (filt, model) in self.models_.items()]) | Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
scores : dict
Dictionary of scores. Dictionary keys are the unique filter names
passed to fit() | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/naive_multiband.py#L78-L93 |
astroML/gatspy | gatspy/periodic/naive_multiband.py | NaiveMultiband.best_periods | def best_periods(self):
"""Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
best_periods : dict
Dictionary of best periods. Dictionary keys are the unique filter
names passed to fit()
"""
for (key, model) in self.models_.items():
model.optimizer = self.optimizer
return dict((filt, model.best_period)
for (filt, model) in self.models_.items()) | python | def best_periods(self):
"""Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
best_periods : dict
Dictionary of best periods. Dictionary keys are the unique filter
names passed to fit()
"""
for (key, model) in self.models_.items():
model.optimizer = self.optimizer
return dict((filt, model.best_period)
for (filt, model) in self.models_.items()) | Compute the scores under the various models
Parameters
----------
periods : array_like
array of periods at which to compute scores
Returns
-------
best_periods : dict
Dictionary of best periods. Dictionary keys are the unique filter
names passed to fit() | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/naive_multiband.py#L95-L113 |
astroML/gatspy | gatspy/periodic/modeler.py | PeriodicModeler.fit | def fit(self, t, y, dy=None):
"""Fit the multiterm Periodogram model to the data.
Parameters
----------
t : array_like, one-dimensional
sequence of observation times
y : array_like, one-dimensional
sequence of observed values
dy : float or array_like (optional)
errors on observed values
"""
# For linear models, dy=1 is equivalent to no errors
if dy is None:
dy = 1
self.t, self.y, self.dy = np.broadcast_arrays(t, y, dy)
self._fit(self.t, self.y, self.dy)
self._best_period = None # reset best period in case of refitting
if self.fit_period:
self._best_period = self._calc_best_period()
return self | python | def fit(self, t, y, dy=None):
"""Fit the multiterm Periodogram model to the data.
Parameters
----------
t : array_like, one-dimensional
sequence of observation times
y : array_like, one-dimensional
sequence of observed values
dy : float or array_like (optional)
errors on observed values
"""
# For linear models, dy=1 is equivalent to no errors
if dy is None:
dy = 1
self.t, self.y, self.dy = np.broadcast_arrays(t, y, dy)
self._fit(self.t, self.y, self.dy)
self._best_period = None # reset best period in case of refitting
if self.fit_period:
self._best_period = self._calc_best_period()
return self | Fit the multiterm Periodogram model to the data.
Parameters
----------
t : array_like, one-dimensional
sequence of observation times
y : array_like, one-dimensional
sequence of observed values
dy : float or array_like (optional)
errors on observed values | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L27-L51 |
astroML/gatspy | gatspy/periodic/modeler.py | PeriodicModeler.predict | def predict(self, t, period=None):
"""Compute the best-fit model at ``t`` for a given period
Parameters
----------
t : float or array_like
times at which to predict
period : float (optional)
The period at which to compute the model. If not specified, it
will be computed via the optimizer provided at initialization.
Returns
-------
y : np.ndarray
predicted model values at times t
"""
t = np.asarray(t)
if period is None:
period = self.best_period
result = self._predict(t.ravel(), period=period)
return result.reshape(t.shape) | python | def predict(self, t, period=None):
"""Compute the best-fit model at ``t`` for a given period
Parameters
----------
t : float or array_like
times at which to predict
period : float (optional)
The period at which to compute the model. If not specified, it
will be computed via the optimizer provided at initialization.
Returns
-------
y : np.ndarray
predicted model values at times t
"""
t = np.asarray(t)
if period is None:
period = self.best_period
result = self._predict(t.ravel(), period=period)
return result.reshape(t.shape) | Compute the best-fit model at ``t`` for a given period
Parameters
----------
t : float or array_like
times at which to predict
period : float (optional)
The period at which to compute the model. If not specified, it
will be computed via the optimizer provided at initialization.
Returns
-------
y : np.ndarray
predicted model values at times t | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L53-L73 |
astroML/gatspy | gatspy/periodic/modeler.py | PeriodicModeler.score_frequency_grid | def score_frequency_grid(self, f0, df, N):
"""Compute the score on a frequency grid.
Some models can compute results faster if the inputs are passed in this
manner.
Parameters
----------
f0, df, N : (float, float, int)
parameters describing the frequency grid freq = f0 + df * arange(N)
Note that these are frequencies, not angular frequencies.
Returns
-------
score : ndarray
the length-N array giving the score at each frequency
"""
return self._score_frequency_grid(f0, df, N) | python | def score_frequency_grid(self, f0, df, N):
"""Compute the score on a frequency grid.
Some models can compute results faster if the inputs are passed in this
manner.
Parameters
----------
f0, df, N : (float, float, int)
parameters describing the frequency grid freq = f0 + df * arange(N)
Note that these are frequencies, not angular frequencies.
Returns
-------
score : ndarray
the length-N array giving the score at each frequency
"""
return self._score_frequency_grid(f0, df, N) | Compute the score on a frequency grid.
Some models can compute results faster if the inputs are passed in this
manner.
Parameters
----------
f0, df, N : (float, float, int)
parameters describing the frequency grid freq = f0 + df * arange(N)
Note that these are frequencies, not angular frequencies.
Returns
-------
score : ndarray
the length-N array giving the score at each frequency | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L75-L92 |
astroML/gatspy | gatspy/periodic/modeler.py | PeriodicModeler.periodogram_auto | def periodogram_auto(self, oversampling=5, nyquist_factor=3,
return_periods=True):
"""Compute the periodogram on an automatically-determined grid
This function uses heuristic arguments to choose a suitable frequency
grid for the data. Note that depending on the data window function,
the model may be sensitive to periodicity at higher frequencies than
this function returns!
The final number of frequencies will be
Nf = oversampling * nyquist_factor * len(t) / 2
Parameters
----------
oversampling : float
the number of samples per approximate peak width
nyquist_factor : float
the highest frequency, in units of the nyquist frequency for points
spread uniformly through the data range.
Returns
-------
period : ndarray
the grid of periods
power : ndarray
the power at each frequency
"""
N = len(self.t)
T = np.max(self.t) - np.min(self.t)
df = 1. / T / oversampling
f0 = df
Nf = int(0.5 * oversampling * nyquist_factor * N)
freq = f0 + df * np.arange(Nf)
return 1. / freq, self._score_frequency_grid(f0, df, Nf) | python | def periodogram_auto(self, oversampling=5, nyquist_factor=3,
return_periods=True):
"""Compute the periodogram on an automatically-determined grid
This function uses heuristic arguments to choose a suitable frequency
grid for the data. Note that depending on the data window function,
the model may be sensitive to periodicity at higher frequencies than
this function returns!
The final number of frequencies will be
Nf = oversampling * nyquist_factor * len(t) / 2
Parameters
----------
oversampling : float
the number of samples per approximate peak width
nyquist_factor : float
the highest frequency, in units of the nyquist frequency for points
spread uniformly through the data range.
Returns
-------
period : ndarray
the grid of periods
power : ndarray
the power at each frequency
"""
N = len(self.t)
T = np.max(self.t) - np.min(self.t)
df = 1. / T / oversampling
f0 = df
Nf = int(0.5 * oversampling * nyquist_factor * N)
freq = f0 + df * np.arange(Nf)
return 1. / freq, self._score_frequency_grid(f0, df, Nf) | Compute the periodogram on an automatically-determined grid
This function uses heuristic arguments to choose a suitable frequency
grid for the data. Note that depending on the data window function,
the model may be sensitive to periodicity at higher frequencies than
this function returns!
The final number of frequencies will be
Nf = oversampling * nyquist_factor * len(t) / 2
Parameters
----------
oversampling : float
the number of samples per approximate peak width
nyquist_factor : float
the highest frequency, in units of the nyquist frequency for points
spread uniformly through the data range.
Returns
-------
period : ndarray
the grid of periods
power : ndarray
the power at each frequency | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L94-L127 |
astroML/gatspy | gatspy/periodic/modeler.py | PeriodicModeler.score | def score(self, periods=None):
"""Compute the periodogram for the given period or periods
Parameters
----------
periods : float or array_like
Array of periods at which to compute the periodogram.
Returns
-------
scores : np.ndarray
Array of normalized powers (between 0 and 1) for each period.
Shape of scores matches the shape of the provided periods.
"""
periods = np.asarray(periods)
return self._score(periods.ravel()).reshape(periods.shape) | python | def score(self, periods=None):
"""Compute the periodogram for the given period or periods
Parameters
----------
periods : float or array_like
Array of periods at which to compute the periodogram.
Returns
-------
scores : np.ndarray
Array of normalized powers (between 0 and 1) for each period.
Shape of scores matches the shape of the provided periods.
"""
periods = np.asarray(periods)
return self._score(periods.ravel()).reshape(periods.shape) | Compute the periodogram for the given period or periods
Parameters
----------
periods : float or array_like
Array of periods at which to compute the periodogram.
Returns
-------
scores : np.ndarray
Array of normalized powers (between 0 and 1) for each period.
Shape of scores matches the shape of the provided periods. | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L129-L144 |
astroML/gatspy | gatspy/periodic/modeler.py | PeriodicModeler.best_period | def best_period(self):
"""Lazy evaluation of the best period given the model"""
if self._best_period is None:
self._best_period = self._calc_best_period()
return self._best_period | python | def best_period(self):
"""Lazy evaluation of the best period given the model"""
if self._best_period is None:
self._best_period = self._calc_best_period()
return self._best_period | Lazy evaluation of the best period given the model | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L149-L153 |
astroML/gatspy | gatspy/periodic/modeler.py | PeriodicModeler.find_best_periods | def find_best_periods(self, n_periods=5, return_scores=False):
"""Find the top several best periods for the model"""
return self.optimizer.find_best_periods(self, n_periods,
return_scores=return_scores) | python | def find_best_periods(self, n_periods=5, return_scores=False):
"""Find the top several best periods for the model"""
return self.optimizer.find_best_periods(self, n_periods,
return_scores=return_scores) | Find the top several best periods for the model | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L155-L158 |
astroML/gatspy | gatspy/periodic/modeler.py | PeriodicModelerMultiband.fit | def fit(self, t, y, dy=None, filts=0):
"""Fit the multiterm Periodogram model to the data.
Parameters
----------
t : array_like, one-dimensional
sequence of observation times
y : array_like, one-dimensional
sequence of observed values
dy : float or array_like (optional)
errors on observed values
filts : array_like (optional)
The array specifying the filter/bandpass for each observation.
"""
self.unique_filts_ = np.unique(filts)
# For linear models, dy=1 is equivalent to no errors
if dy is None:
dy = 1
all_data = np.broadcast_arrays(t, y, dy, filts)
self.t, self.y, self.dy, self.filts = map(np.ravel, all_data)
self._fit(self.t, self.y, self.dy, self.filts)
self._best_period = None # reset best period in case of refitting
if self.fit_period:
self._best_period = self._calc_best_period()
return self | python | def fit(self, t, y, dy=None, filts=0):
"""Fit the multiterm Periodogram model to the data.
Parameters
----------
t : array_like, one-dimensional
sequence of observation times
y : array_like, one-dimensional
sequence of observed values
dy : float or array_like (optional)
errors on observed values
filts : array_like (optional)
The array specifying the filter/bandpass for each observation.
"""
self.unique_filts_ = np.unique(filts)
# For linear models, dy=1 is equivalent to no errors
if dy is None:
dy = 1
all_data = np.broadcast_arrays(t, y, dy, filts)
self.t, self.y, self.dy, self.filts = map(np.ravel, all_data)
self._fit(self.t, self.y, self.dy, self.filts)
self._best_period = None # reset best period in case of refitting
if self.fit_period:
self._best_period = self._calc_best_period()
return self | Fit the multiterm Periodogram model to the data.
Parameters
----------
t : array_like, one-dimensional
sequence of observation times
y : array_like, one-dimensional
sequence of observed values
dy : float or array_like (optional)
errors on observed values
filts : array_like (optional)
The array specifying the filter/bandpass for each observation. | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L186-L214 |
astroML/gatspy | gatspy/periodic/modeler.py | PeriodicModelerMultiband.predict | def predict(self, t, filts, period=None):
"""Compute the best-fit model at ``t`` for a given period
Parameters
----------
t : float or array_like
times at which to predict
filts : array_like (optional)
the array specifying the filter/bandpass for each observation. This
is used only in multiband periodograms.
period : float (optional)
The period at which to compute the model. If not specified, it
will be computed via the optimizer provided at initialization.
Returns
-------
y : np.ndarray
predicted model values at times t
"""
unique_filts = set(np.unique(filts))
if not unique_filts.issubset(self.unique_filts_):
raise ValueError("filts does not match training data: "
"input: {0} output: {1}"
"".format(set(self.unique_filts_),
set(unique_filts)))
t, filts = np.broadcast_arrays(t, filts)
if period is None:
period = self.best_period
result = self._predict(t.ravel(), filts=filts.ravel(), period=period)
return result.reshape(t.shape) | python | def predict(self, t, filts, period=None):
"""Compute the best-fit model at ``t`` for a given period
Parameters
----------
t : float or array_like
times at which to predict
filts : array_like (optional)
the array specifying the filter/bandpass for each observation. This
is used only in multiband periodograms.
period : float (optional)
The period at which to compute the model. If not specified, it
will be computed via the optimizer provided at initialization.
Returns
-------
y : np.ndarray
predicted model values at times t
"""
unique_filts = set(np.unique(filts))
if not unique_filts.issubset(self.unique_filts_):
raise ValueError("filts does not match training data: "
"input: {0} output: {1}"
"".format(set(self.unique_filts_),
set(unique_filts)))
t, filts = np.broadcast_arrays(t, filts)
if period is None:
period = self.best_period
result = self._predict(t.ravel(), filts=filts.ravel(), period=period)
return result.reshape(t.shape) | Compute the best-fit model at ``t`` for a given period
Parameters
----------
t : float or array_like
times at which to predict
filts : array_like (optional)
the array specifying the filter/bandpass for each observation. This
is used only in multiband periodograms.
period : float (optional)
The period at which to compute the model. If not specified, it
will be computed via the optimizer provided at initialization.
Returns
-------
y : np.ndarray
predicted model values at times t | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/modeler.py#L216-L247 |
astroML/gatspy | gatspy/periodic/_least_squares_mixin.py | LeastSquaresMixin._construct_X_M | def _construct_X_M(self, omega, **kwargs):
"""Construct the weighted normal matrix of the problem"""
X = self._construct_X(omega, weighted=True, **kwargs)
M = np.dot(X.T, X)
if getattr(self, 'regularization', None) is not None:
diag = M.ravel(order='K')[::M.shape[0] + 1]
if self.regularize_by_trace:
diag += diag.sum() * np.asarray(self.regularization)
else:
diag += np.asarray(self.regularization)
return X, M | python | def _construct_X_M(self, omega, **kwargs):
"""Construct the weighted normal matrix of the problem"""
X = self._construct_X(omega, weighted=True, **kwargs)
M = np.dot(X.T, X)
if getattr(self, 'regularization', None) is not None:
diag = M.ravel(order='K')[::M.shape[0] + 1]
if self.regularize_by_trace:
diag += diag.sum() * np.asarray(self.regularization)
else:
diag += np.asarray(self.regularization)
return X, M | Construct the weighted normal matrix of the problem | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/_least_squares_mixin.py#L11-L23 |
astroML/gatspy | gatspy/periodic/_least_squares_mixin.py | LeastSquaresMixin._compute_ymean | def _compute_ymean(self, **kwargs):
"""Compute the (weighted) mean of the y data"""
y = np.asarray(kwargs.get('y', self.y))
dy = np.asarray(kwargs.get('dy', self.dy))
if dy.size == 1:
return np.mean(y)
else:
return np.average(y, weights=1 / dy ** 2) | python | def _compute_ymean(self, **kwargs):
"""Compute the (weighted) mean of the y data"""
y = np.asarray(kwargs.get('y', self.y))
dy = np.asarray(kwargs.get('dy', self.dy))
if dy.size == 1:
return np.mean(y)
else:
return np.average(y, weights=1 / dy ** 2) | Compute the (weighted) mean of the y data | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/_least_squares_mixin.py#L25-L33 |
astroML/gatspy | gatspy/periodic/lomb_scargle.py | LombScargle._construct_X | def _construct_X(self, omega, weighted=True, **kwargs):
"""Construct the design matrix for the problem"""
t = kwargs.get('t', self.t)
dy = kwargs.get('dy', self.dy)
fit_offset = kwargs.get('fit_offset', self.fit_offset)
if fit_offset:
offsets = [np.ones(len(t))]
else:
offsets = []
cols = sum(([np.sin((i + 1) * omega * t),
np.cos((i + 1) * omega * t)]
for i in range(self.Nterms)), offsets)
if weighted:
return np.transpose(np.vstack(cols) / dy)
else:
return np.transpose(np.vstack(cols)) | python | def _construct_X(self, omega, weighted=True, **kwargs):
"""Construct the design matrix for the problem"""
t = kwargs.get('t', self.t)
dy = kwargs.get('dy', self.dy)
fit_offset = kwargs.get('fit_offset', self.fit_offset)
if fit_offset:
offsets = [np.ones(len(t))]
else:
offsets = []
cols = sum(([np.sin((i + 1) * omega * t),
np.cos((i + 1) * omega * t)]
for i in range(self.Nterms)), offsets)
if weighted:
return np.transpose(np.vstack(cols) / dy)
else:
return np.transpose(np.vstack(cols)) | Construct the design matrix for the problem | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/lomb_scargle.py#L92-L110 |
astroML/gatspy | gatspy/periodic/template_modeler.py | BaseTemplateModeler._interpolated_template | def _interpolated_template(self, templateid):
"""Return an interpolator for the given template"""
phase, y = self._get_template_by_id(templateid)
# double-check that phase ranges from 0 to 1
assert phase.min() >= 0
assert phase.max() <= 1
# at the start and end points, we need to add ~5 points to make sure
# the spline & derivatives wrap appropriately
phase = np.concatenate([phase[-5:] - 1, phase, phase[:5] + 1])
y = np.concatenate([y[-5:], y, y[:5]])
# Univariate spline allows for derivatives; use this!
return UnivariateSpline(phase, y, s=0, k=5) | python | def _interpolated_template(self, templateid):
"""Return an interpolator for the given template"""
phase, y = self._get_template_by_id(templateid)
# double-check that phase ranges from 0 to 1
assert phase.min() >= 0
assert phase.max() <= 1
# at the start and end points, we need to add ~5 points to make sure
# the spline & derivatives wrap appropriately
phase = np.concatenate([phase[-5:] - 1, phase, phase[:5] + 1])
y = np.concatenate([y[-5:], y, y[:5]])
# Univariate spline allows for derivatives; use this!
return UnivariateSpline(phase, y, s=0, k=5) | Return an interpolator for the given template | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/template_modeler.py#L39-L53 |
astroML/gatspy | gatspy/periodic/template_modeler.py | BaseTemplateModeler._eval_templates | def _eval_templates(self, period):
"""Evaluate the best template for the given period"""
theta_best = [self._optimize(period, tmpid)
for tmpid, _ in enumerate(self.templates)]
chi2 = [self._chi2(theta, period, tmpid)
for tmpid, theta in enumerate(theta_best)]
return theta_best, chi2 | python | def _eval_templates(self, period):
"""Evaluate the best template for the given period"""
theta_best = [self._optimize(period, tmpid)
for tmpid, _ in enumerate(self.templates)]
chi2 = [self._chi2(theta, period, tmpid)
for tmpid, theta in enumerate(theta_best)]
return theta_best, chi2 | Evaluate the best template for the given period | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/template_modeler.py#L77-L84 |
astroML/gatspy | gatspy/periodic/template_modeler.py | BaseTemplateModeler._model | def _model(self, t, theta, period, tmpid):
"""Compute model at t for the given parameters, period, & template"""
template = self.templates[tmpid]
phase = (t / period - theta[2]) % 1
return theta[0] + theta[1] * template(phase) | python | def _model(self, t, theta, period, tmpid):
"""Compute model at t for the given parameters, period, & template"""
template = self.templates[tmpid]
phase = (t / period - theta[2]) % 1
return theta[0] + theta[1] * template(phase) | Compute model at t for the given parameters, period, & template | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/template_modeler.py#L86-L90 |
astroML/gatspy | gatspy/periodic/template_modeler.py | BaseTemplateModeler._chi2 | def _chi2(self, theta, period, tmpid, return_gradient=False):
"""
Compute the chi2 for the given parameters, period, & template
Optionally return the gradient for faster optimization
"""
template = self.templates[tmpid]
phase = (self.t / period - theta[2]) % 1
model = theta[0] + theta[1] * template(phase)
chi2 = (((model - self.y) / self.dy) ** 2).sum()
if return_gradient:
grad = 2 * (model - self.y) / self.dy ** 2
gradient = np.array([np.sum(grad),
np.sum(grad * template(phase)),
-np.sum(grad * theta[1]
* template.derivative(1)(phase))])
return chi2, gradient
else:
return chi2 | python | def _chi2(self, theta, period, tmpid, return_gradient=False):
"""
Compute the chi2 for the given parameters, period, & template
Optionally return the gradient for faster optimization
"""
template = self.templates[tmpid]
phase = (self.t / period - theta[2]) % 1
model = theta[0] + theta[1] * template(phase)
chi2 = (((model - self.y) / self.dy) ** 2).sum()
if return_gradient:
grad = 2 * (model - self.y) / self.dy ** 2
gradient = np.array([np.sum(grad),
np.sum(grad * template(phase)),
-np.sum(grad * theta[1]
* template.derivative(1)(phase))])
return chi2, gradient
else:
return chi2 | Compute the chi2 for the given parameters, period, & template
Optionally return the gradient for faster optimization | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/template_modeler.py#L92-L111 |
astroML/gatspy | gatspy/periodic/template_modeler.py | BaseTemplateModeler._optimize | def _optimize(self, period, tmpid, use_gradient=True):
"""Optimize the model for the given period & template"""
theta_0 = [self.y.min(), self.y.max() - self.y.min(), 0]
result = minimize(self._chi2, theta_0, jac=bool(use_gradient),
bounds=[(None, None), (0, None), (None, None)],
args=(period, tmpid, use_gradient))
return result.x | python | def _optimize(self, period, tmpid, use_gradient=True):
"""Optimize the model for the given period & template"""
theta_0 = [self.y.min(), self.y.max() - self.y.min(), 0]
result = minimize(self._chi2, theta_0, jac=bool(use_gradient),
bounds=[(None, None), (0, None), (None, None)],
args=(period, tmpid, use_gradient))
return result.x | Optimize the model for the given period & template | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/template_modeler.py#L113-L119 |
astroML/gatspy | gatspy/periodic/optimizer.py | LinearScanOptimizer.find_best_periods | def find_best_periods(self, model, n_periods=5, return_scores=False):
"""Find the `n_periods` best periods in the model"""
# compute the estimated peak width from the data range
tmin, tmax = np.min(model.t), np.max(model.t)
width = 2 * np.pi / (tmax - tmin)
# raise a ValueError if period limits are out of range
if tmax - tmin < np.max(self.period_range):
raise ValueError("The optimizer is not designed to search for "
"for periods larger than the data baseline. ")
# our candidate steps in omega is controlled by period_range & coverage
omega_step = width / self.first_pass_coverage
omega_min = 2 * np.pi / np.max(self.period_range)
omega_max = 2 * np.pi / np.min(self.period_range)
omegas = np.arange(omega_min, omega_max + omega_step, omega_step)
periods = 2 * np.pi / omegas
# print some updates if desired
if not self.quiet:
print("Finding optimal frequency:")
print(" - Estimated peak width = {0:.3g}".format(width))
print(" - Using {0} steps per peak; "
"omega_step = {1:.3g}".format(self.first_pass_coverage,
omega_step))
print(" - User-specified period range: "
" {0:.2g} to {1:.2g}".format(periods.min(), periods.max()))
print(" - Computing periods at {0:.0f} steps".format(len(periods)))
sys.stdout.flush()
# Compute the score on the initial grid
N = int(1 + width // omega_step)
score = model.score_frequency_grid(omega_min / (2 * np.pi),
omega_step / (2 * np.pi),
len(omegas))
# find initial candidates of unique peaks
minscore = score.min()
n_candidates = max(5, 2 * n_periods)
candidate_freqs = np.zeros(n_candidates)
candidate_scores = np.zeros(n_candidates)
for i in range(n_candidates):
j = np.argmax(score)
candidate_freqs[i] = omegas[j]
candidate_scores[i] = score[j]
score[max(0, j - N):(j + N)] = minscore
# If required, do a final pass on these unique at higher resolution
if self.final_pass_coverage <= self.first_pass_coverage:
best_periods = 2 * np.pi / candidate_freqs[:n_periods]
best_scores = candidate_scores[:n_periods]
else:
f0 = -omega_step / (2 * np.pi)
df = width / self.final_pass_coverage / (2 * np.pi)
Nf = abs(2 * f0) // df
steps = f0 + df * np.arange(Nf)
candidate_freqs /= (2 * np.pi)
freqs = steps + candidate_freqs[:, np.newaxis]
periods = 1. / freqs
if not self.quiet:
print("Zooming-in on {0} candidate peaks:"
"".format(n_candidates))
print(" - Computing periods at {0:.0f} "
"steps".format(periods.size))
sys.stdout.flush()
#scores = model.score(periods)
scores = np.array([model.score_frequency_grid(c + f0, df, Nf)
for c in candidate_freqs])
best_scores = scores.max(1)
j = np.argmax(scores, 1)
i = np.argsort(best_scores)[::-1]
best_periods = periods[i, j[i]]
best_scores = best_scores[i]
if return_scores:
return best_periods[:n_periods], best_scores[:n_periods]
else:
return best_periods[:n_periods] | python | def find_best_periods(self, model, n_periods=5, return_scores=False):
"""Find the `n_periods` best periods in the model"""
# compute the estimated peak width from the data range
tmin, tmax = np.min(model.t), np.max(model.t)
width = 2 * np.pi / (tmax - tmin)
# raise a ValueError if period limits are out of range
if tmax - tmin < np.max(self.period_range):
raise ValueError("The optimizer is not designed to search for "
"for periods larger than the data baseline. ")
# our candidate steps in omega is controlled by period_range & coverage
omega_step = width / self.first_pass_coverage
omega_min = 2 * np.pi / np.max(self.period_range)
omega_max = 2 * np.pi / np.min(self.period_range)
omegas = np.arange(omega_min, omega_max + omega_step, omega_step)
periods = 2 * np.pi / omegas
# print some updates if desired
if not self.quiet:
print("Finding optimal frequency:")
print(" - Estimated peak width = {0:.3g}".format(width))
print(" - Using {0} steps per peak; "
"omega_step = {1:.3g}".format(self.first_pass_coverage,
omega_step))
print(" - User-specified period range: "
" {0:.2g} to {1:.2g}".format(periods.min(), periods.max()))
print(" - Computing periods at {0:.0f} steps".format(len(periods)))
sys.stdout.flush()
# Compute the score on the initial grid
N = int(1 + width // omega_step)
score = model.score_frequency_grid(omega_min / (2 * np.pi),
omega_step / (2 * np.pi),
len(omegas))
# find initial candidates of unique peaks
minscore = score.min()
n_candidates = max(5, 2 * n_periods)
candidate_freqs = np.zeros(n_candidates)
candidate_scores = np.zeros(n_candidates)
for i in range(n_candidates):
j = np.argmax(score)
candidate_freqs[i] = omegas[j]
candidate_scores[i] = score[j]
score[max(0, j - N):(j + N)] = minscore
# If required, do a final pass on these unique at higher resolution
if self.final_pass_coverage <= self.first_pass_coverage:
best_periods = 2 * np.pi / candidate_freqs[:n_periods]
best_scores = candidate_scores[:n_periods]
else:
f0 = -omega_step / (2 * np.pi)
df = width / self.final_pass_coverage / (2 * np.pi)
Nf = abs(2 * f0) // df
steps = f0 + df * np.arange(Nf)
candidate_freqs /= (2 * np.pi)
freqs = steps + candidate_freqs[:, np.newaxis]
periods = 1. / freqs
if not self.quiet:
print("Zooming-in on {0} candidate peaks:"
"".format(n_candidates))
print(" - Computing periods at {0:.0f} "
"steps".format(periods.size))
sys.stdout.flush()
#scores = model.score(periods)
scores = np.array([model.score_frequency_grid(c + f0, df, Nf)
for c in candidate_freqs])
best_scores = scores.max(1)
j = np.argmax(scores, 1)
i = np.argsort(best_scores)[::-1]
best_periods = periods[i, j[i]]
best_scores = best_scores[i]
if return_scores:
return best_periods[:n_periods], best_scores[:n_periods]
else:
return best_periods[:n_periods] | Find the `n_periods` best periods in the model | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/optimizer.py#L74-L156 |
astroML/gatspy | gatspy/periodic/lomb_scargle_fast.py | factorial | def factorial(N):
"""Compute the factorial of N.
If N <= 10, use a fast lookup table; otherwise use scipy.special.factorial
"""
if N < len(FACTORIALS):
return FACTORIALS[N]
else:
from scipy import special
return int(special.factorial(N)) | python | def factorial(N):
"""Compute the factorial of N.
If N <= 10, use a fast lookup table; otherwise use scipy.special.factorial
"""
if N < len(FACTORIALS):
return FACTORIALS[N]
else:
from scipy import special
return int(special.factorial(N)) | Compute the factorial of N.
If N <= 10, use a fast lookup table; otherwise use scipy.special.factorial | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/lomb_scargle_fast.py#L17-L25 |
astroML/gatspy | gatspy/periodic/lomb_scargle_fast.py | bitceil | def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
# Note: for Python 2.7 and 3.x, this is faster:
# return 1 << int(N - 1).bit_length()
N = int(N) - 1
for i in [1, 2, 4, 8, 16, 32]:
N |= N >> i
return N + 1 | python | def bitceil(N):
"""
Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N)))
"""
# Note: for Python 2.7 and 3.x, this is faster:
# return 1 << int(N - 1).bit_length()
N = int(N) - 1
for i in [1, 2, 4, 8, 16, 32]:
N |= N >> i
return N + 1 | Find the bit (i.e. power of 2) immediately greater than or equal to N
Note: this works for numbers up to 2 ** 64.
Roughly equivalent to int(2 ** np.ceil(np.log2(N))) | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/lomb_scargle_fast.py#L28-L40 |
astroML/gatspy | gatspy/periodic/lomb_scargle_fast.py | extirpolate | def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array_like
array of abscissas
y : array_like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.RandomState(0)
>>> x = 100 * rng.rand(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result | python | def extirpolate(x, y, N=None, M=4):
"""
Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array_like
array of abscissas
y : array_like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.RandomState(0)
>>> x = 100 * rng.rand(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583).
"""
x, y = map(np.ravel, np.broadcast_arrays(x, y))
if N is None:
N = int(np.max(x) + 0.5 * M + 1)
# Now use legendre polynomial weights to populate the results array;
# This is an efficient recursive implementation (See Press et al. 1989)
result = np.zeros(N, dtype=y.dtype)
# first take care of the easy cases where x is an integer
integers = (x % 1 == 0)
np.add.at(result, x[integers].astype(int), y[integers])
x, y = x[~integers], y[~integers]
# For each remaining x, find the index describing the extirpolation range.
# i.e. ilo[i] < x[i] < ilo[i] + M with x[i] in the center,
# adjusted so that the limits are within the range 0...N
ilo = np.clip((x - M // 2).astype(int), 0, N - M)
numerator = y * np.prod(x - ilo - np.arange(M)[:, np.newaxis], 0)
denominator = factorial(M - 1)
for j in range(M):
if j > 0:
denominator *= j / (j - M)
ind = ilo + (M - 1 - j)
np.add.at(result, ind, numerator / (denominator * (x - ind)))
return result | Extirpolate the values (x, y) onto an integer grid range(N),
using lagrange polynomial weights on the M nearest points.
Parameters
----------
x : array_like
array of abscissas
y : array_like
array of ordinates
N : int
number of integer bins to use. For best performance, N should be larger
than the maximum of x
M : int
number of adjoining points on which to extirpolate.
Returns
-------
yN : ndarray
N extirpolated values associated with range(N)
Example
-------
>>> rng = np.random.RandomState(0)
>>> x = 100 * rng.rand(20)
>>> y = np.sin(x)
>>> y_hat = extirpolate(x, y)
>>> x_hat = np.arange(len(y_hat))
>>> f = lambda x: np.sin(x / 10)
>>> np.allclose(np.sum(y * f(x)), np.sum(y_hat * f(x_hat)))
True
Notes
-----
This code is based on the C implementation of spread() presented in
Numerical Recipes in C, Second Edition (Press et al. 1989; p.583). | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/lomb_scargle_fast.py#L43-L107 |
astroML/gatspy | gatspy/periodic/lomb_scargle_fast.py | trig_sum | def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums:
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array_like
array of input times
h : array_like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float (optional, default=0)
The low frequency to use
freq_factor : float (optional, default=1)
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughtly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the tradeoff between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarrays
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
assert df > 0
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
assert(Mfft > 0)
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)
if t0 != 0:
f = f0 + df * np.arange(Nfft)
fftgrid *= np.exp(2j * np.pi * t0 * f)
fftgrid = fftgrid[:N]
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C | python | def trig_sum(t, h, df, N, f0=0, freq_factor=1,
oversampling=5, use_fft=True, Mfft=4):
"""Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums:
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array_like
array of input times
h : array_like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float (optional, default=0)
The low frequency to use
freq_factor : float (optional, default=1)
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughtly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the tradeoff between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarrays
summation arrays for frequencies f = df * np.arange(1, N + 1)
"""
df *= freq_factor
f0 *= freq_factor
assert df > 0
t, h = map(np.ravel, np.broadcast_arrays(t, h))
if use_fft:
Mfft = int(Mfft)
assert(Mfft > 0)
# required size of fft is the power of 2 above the oversampling rate
Nfft = bitceil(N * oversampling)
t0 = t.min()
if f0 > 0:
h = h * np.exp(2j * np.pi * f0 * (t - t0))
tnorm = ((t - t0) * Nfft * df) % Nfft
grid = extirpolate(tnorm, h, Nfft, Mfft)
fftgrid = np.fft.ifft(grid)
if t0 != 0:
f = f0 + df * np.arange(Nfft)
fftgrid *= np.exp(2j * np.pi * t0 * f)
fftgrid = fftgrid[:N]
C = Nfft * fftgrid.real
S = Nfft * fftgrid.imag
else:
f = f0 + df * np.arange(N)
C = np.dot(h, np.cos(2 * np.pi * f * t[:, np.newaxis]))
S = np.dot(h, np.sin(2 * np.pi * f * t[:, np.newaxis]))
return S, C | Compute (approximate) trigonometric sums for a number of frequencies
This routine computes weighted sine and cosine sums:
S_j = sum_i { h_i * sin(2 pi * f_j * t_i) }
C_j = sum_i { h_i * cos(2 pi * f_j * t_i) }
Where f_j = freq_factor * (f0 + j * df) for the values j in 1 ... N.
The sums can be computed either by a brute force O[N^2] method, or
by an FFT-based O[Nlog(N)] method.
Parameters
----------
t : array_like
array of input times
h : array_like
array weights for the sum
df : float
frequency spacing
N : int
number of frequency bins to return
f0 : float (optional, default=0)
The low frequency to use
freq_factor : float (optional, default=1)
Factor which multiplies the frequency
use_fft : bool
if True, use the approximate FFT algorithm to compute the result.
This uses the FFT with Press & Rybicki's Lagrangian extirpolation.
oversampling : int (default = 5)
oversampling freq_factor for the approximation; roughtly the number of
time samples across the highest-frequency sinusoid. This parameter
contains the tradeoff between accuracy and speed. Not referenced
if use_fft is False.
Mfft : int
The number of adjacent points to use in the FFT approximation.
Not referenced if use_fft is False.
Returns
-------
S, C : ndarrays
summation arrays for frequencies f = df * np.arange(1, N + 1) | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/lomb_scargle_fast.py#L110-L187 |
astroML/gatspy | gatspy/periodic/lomb_scargle_fast.py | lomb_scargle_fast | def lomb_scargle_fast(t, y, dy=1, f0=0, df=None, Nf=None,
center_data=True, fit_offset=True,
use_fft=True, freq_oversampling=5, nyquist_factor=2,
trig_sum_kwds=None):
"""Compute a lomb-scargle periodogram for the given data
This implements both an O[N^2] method if use_fft==False, or an
O[NlogN] method if use_fft==True.
Parameters
----------
t, y, dy : array_like
times, values, and errors of the data points. These should be
broadcastable to the same shape. If dy is not specified, a
constant error will be used.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
Defaults, with T = t.max() - t.min():
- f0 = 0
- df is set such that there are ``freq_oversampling`` points per
peak width. ``freq_oversampling`` defaults to 5.
- Nf is set such that the highest frequency is ``nyquist_factor``
times the so-called "average Nyquist frequency".
``nyquist_factor`` defaults to 2.
Note that for unevenly-spaced data, the periodogram can be sensitive
to frequencies far higher than the average Nyquist frequency.
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_offset : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
Other Parameters
----------------
freq_oversampling : float (default=5)
Oversampling factor for the frequency bins. Only referenced if
``df`` is not specified
nyquist_factor : float (default=2)
Parameter controlling the highest probed frequency. Only referenced
if ``Nf`` is not specified.
trig_sum_kwds : dict or None (optional)
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipies in C (2002)
"""
# Validate and setup input data
t, y, dy = map(np.ravel, np.broadcast_arrays(t, y, dy))
w = 1. / (dy ** 2)
w /= w.sum()
# Validate and setup frequency grid
if df is None:
peak_width = 1. / (t.max() - t.min())
df = peak_width / freq_oversampling
if Nf is None:
avg_Nyquist = 0.5 * len(t) / (t.max() - t.min())
Nf = max(16, (nyquist_factor * avg_Nyquist - f0) / df)
Nf = int(Nf)
assert(df > 0)
assert(Nf > 0)
freq = f0 + df * np.arange(Nf)
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_offset:
y = y - np.dot(w, y)
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
#----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_offset:
S, C = trig_sum(t, w, **kwargs)
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if f0 == 0:
warnings.simplefilter("ignore")
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
# fix NaN at zero frequency
if np.isnan(tan_2omega_tau[0]):
tan_2omega_tau[0] = 0
else:
tan_2omega_tau = S2 / C2
# slower/less stable way: we'll use trig identities instead
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
#----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y ** 2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_offset:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if fit_offset and f0 == 0:
warnings.simplefilter("ignore")
power = (YC * YC / CC + YS * YS / SS) / YY
# fix NaN and INF at zero frequency
if np.isnan(power[0]) or np.isinf(power[0]):
power[0] = 0
return freq, power | python | def lomb_scargle_fast(t, y, dy=1, f0=0, df=None, Nf=None,
center_data=True, fit_offset=True,
use_fft=True, freq_oversampling=5, nyquist_factor=2,
trig_sum_kwds=None):
"""Compute a lomb-scargle periodogram for the given data
This implements both an O[N^2] method if use_fft==False, or an
O[NlogN] method if use_fft==True.
Parameters
----------
t, y, dy : array_like
times, values, and errors of the data points. These should be
broadcastable to the same shape. If dy is not specified, a
constant error will be used.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
Defaults, with T = t.max() - t.min():
- f0 = 0
- df is set such that there are ``freq_oversampling`` points per
peak width. ``freq_oversampling`` defaults to 5.
- Nf is set such that the highest frequency is ``nyquist_factor``
times the so-called "average Nyquist frequency".
``nyquist_factor`` defaults to 2.
Note that for unevenly-spaced data, the periodogram can be sensitive
to frequencies far higher than the average Nyquist frequency.
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_offset : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
Other Parameters
----------------
freq_oversampling : float (default=5)
Oversampling factor for the frequency bins. Only referenced if
``df`` is not specified
nyquist_factor : float (default=2)
Parameter controlling the highest probed frequency. Only referenced
if ``Nf`` is not specified.
trig_sum_kwds : dict or None (optional)
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipies in C (2002)
"""
# Validate and setup input data
t, y, dy = map(np.ravel, np.broadcast_arrays(t, y, dy))
w = 1. / (dy ** 2)
w /= w.sum()
# Validate and setup frequency grid
if df is None:
peak_width = 1. / (t.max() - t.min())
df = peak_width / freq_oversampling
if Nf is None:
avg_Nyquist = 0.5 * len(t) / (t.max() - t.min())
Nf = max(16, (nyquist_factor * avg_Nyquist - f0) / df)
Nf = int(Nf)
assert(df > 0)
assert(Nf > 0)
freq = f0 + df * np.arange(Nf)
# Center the data. Even if we're fitting the offset,
# this step makes the expressions below more succinct
if center_data or fit_offset:
y = y - np.dot(w, y)
# set up arguments to trig_sum
kwargs = dict.copy(trig_sum_kwds or {})
kwargs.update(f0=f0, df=df, use_fft=use_fft, N=Nf)
#----------------------------------------------------------------------
# 1. compute functions of the time-shift tau at each frequency
Sh, Ch = trig_sum(t, w * y, **kwargs)
S2, C2 = trig_sum(t, w, freq_factor=2, **kwargs)
if fit_offset:
S, C = trig_sum(t, w, **kwargs)
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if f0 == 0:
warnings.simplefilter("ignore")
tan_2omega_tau = (S2 - 2 * S * C) / (C2 - (C * C - S * S))
# fix NaN at zero frequency
if np.isnan(tan_2omega_tau[0]):
tan_2omega_tau[0] = 0
else:
tan_2omega_tau = S2 / C2
# slower/less stable way: we'll use trig identities instead
# omega_tau = 0.5 * np.arctan(tan_2omega_tau)
# S2w, C2w = np.sin(2 * omega_tau), np.cos(2 * omega_tau)
# Sw, Cw = np.sin(omega_tau), np.cos(omega_tau)
S2w = tan_2omega_tau / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
C2w = 1 / np.sqrt(1 + tan_2omega_tau * tan_2omega_tau)
Cw = np.sqrt(0.5) * np.sqrt(1 + C2w)
Sw = np.sqrt(0.5) * np.sign(S2w) * np.sqrt(1 - C2w)
#----------------------------------------------------------------------
# 2. Compute the periodogram, following Zechmeister & Kurster
# and using tricks from Press & Rybicki.
YY = np.dot(w, y ** 2)
YC = Ch * Cw + Sh * Sw
YS = Sh * Cw - Ch * Sw
CC = 0.5 * (1 + C2 * C2w + S2 * S2w)
SS = 0.5 * (1 - C2 * C2w - S2 * S2w)
if fit_offset:
CC -= (C * Cw + S * Sw) ** 2
SS -= (S * Cw - C * Sw) ** 2
with warnings.catch_warnings():
# Filter "invalid value in divide" warnings for zero-frequency
if fit_offset and f0 == 0:
warnings.simplefilter("ignore")
power = (YC * YC / CC + YS * YS / SS) / YY
# fix NaN and INF at zero frequency
if np.isnan(power[0]) or np.isinf(power[0]):
power[0] = 0
return freq, power | Compute a lomb-scargle periodogram for the given data
This implements both an O[N^2] method if use_fft==False, or an
O[NlogN] method if use_fft==True.
Parameters
----------
t, y, dy : array_like
times, values, and errors of the data points. These should be
broadcastable to the same shape. If dy is not specified, a
constant error will be used.
f0, df, Nf : (float, float, int)
parameters describing the frequency grid, f = f0 + df * arange(Nf).
Defaults, with T = t.max() - t.min():
- f0 = 0
- df is set such that there are ``freq_oversampling`` points per
peak width. ``freq_oversampling`` defaults to 5.
- Nf is set such that the highest frequency is ``nyquist_factor``
times the so-called "average Nyquist frequency".
``nyquist_factor`` defaults to 2.
Note that for unevenly-spaced data, the periodogram can be sensitive
to frequencies far higher than the average Nyquist frequency.
center_data : bool (default=True)
Specify whether to subtract the mean of the data before the fit
fit_offset : bool (default=True)
If True, then compute the floating-mean periodogram; i.e. let the mean
vary with the fit.
use_fft : bool (default=True)
If True, then use the Press & Rybicki O[NlogN] algorithm to compute
the result. Otherwise, use a slower O[N^2] algorithm
Other Parameters
----------------
freq_oversampling : float (default=5)
Oversampling factor for the frequency bins. Only referenced if
``df`` is not specified
nyquist_factor : float (default=2)
Parameter controlling the highest probed frequency. Only referenced
if ``Nf`` is not specified.
trig_sum_kwds : dict or None (optional)
extra keyword arguments to pass to the ``trig_sum`` utility.
Options are ``oversampling`` and ``Mfft``. See documentation
of ``trig_sum`` for details.
Notes
-----
Note that the ``use_fft=True`` algorithm is an approximation to the true
Lomb-Scargle periodogram, and as the number of points grows this
approximation improves. On the other hand, for very small datasets
(<~50 points or so) this approximation may not be useful.
References
----------
.. [1] Press W.H. and Rybicki, G.B, "Fast algorithm for spectral analysis
of unevenly sampled data". ApJ 1:338, p277, 1989
.. [2] M. Zechmeister and M. Kurster, A&A 496, 577-584 (2009)
.. [3] W. Press et al, Numerical Recipies in C (2002) | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/periodic/lomb_scargle_fast.py#L190-L330 |
astroML/gatspy | gatspy/datasets/rrlyrae_generated.py | RRLyraeGenerated.observed | def observed(self, band, corrected=True):
"""Return observed values in the given band
Parameters
----------
band : str
desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z']
corrected : bool (optional)
If true, correct for extinction
Returns
-------
t, mag, dmag : ndarrays
The times, magnitudes, and magnitude errors for the specified band.
"""
if band not in 'ugriz':
raise ValueError("band='{0}' not recognized".format(band))
i = 'ugriz'.find(band)
t, y, dy = self.lcdata.get_lightcurve(self.lcid, return_1d=False)
if corrected:
ext = self.obsmeta['rExt'] * self.ext_correction[band]
else:
ext = 0
return t[:, i], y[:, i] - ext, dy[:, i] | python | def observed(self, band, corrected=True):
"""Return observed values in the given band
Parameters
----------
band : str
desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z']
corrected : bool (optional)
If true, correct for extinction
Returns
-------
t, mag, dmag : ndarrays
The times, magnitudes, and magnitude errors for the specified band.
"""
if band not in 'ugriz':
raise ValueError("band='{0}' not recognized".format(band))
i = 'ugriz'.find(band)
t, y, dy = self.lcdata.get_lightcurve(self.lcid, return_1d=False)
if corrected:
ext = self.obsmeta['rExt'] * self.ext_correction[band]
else:
ext = 0
return t[:, i], y[:, i] - ext, dy[:, i] | Return observed values in the given band
Parameters
----------
band : str
desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z']
corrected : bool (optional)
If true, correct for extinction
Returns
-------
t, mag, dmag : ndarrays
The times, magnitudes, and magnitude errors for the specified band. | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae_generated.py#L77-L102 |
astroML/gatspy | gatspy/datasets/rrlyrae_generated.py | RRLyraeGenerated.generated | def generated(self, band, t, err=None, corrected=True):
"""Return generated magnitudes in the specified band
Parameters
----------
band : str
desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z']
t : array_like
array of times (in days)
err : float or array_like
gaussian error in observations
corrected : bool (optional)
If true, correct for extinction
Returns
-------
mag : ndarray
magnitudes at the specified times under the generated model.
"""
t = np.asarray(t)
num = self.meta[band + 'T']
mu = self.meta[band + '0']
amp = self.meta[band + 'A']
t0 = self.meta[band + 'E']
# if there are nans or infinities, mask them
bad_vals = np.isnan(t) | np.isinf(t)
t[bad_vals] = t0
if corrected:
ext = 0
else:
ext = self.obsmeta['rExt'] * self.ext_correction[band]
func = self._template_func(num, band, mu + ext, amp)
mag = func(((t - t0) / self.period) % 1)
mag[bad_vals] = np.nan
if err is not None:
mag += self.rng.normal(0, err, t.shape)
return mag | python | def generated(self, band, t, err=None, corrected=True):
"""Return generated magnitudes in the specified band
Parameters
----------
band : str
desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z']
t : array_like
array of times (in days)
err : float or array_like
gaussian error in observations
corrected : bool (optional)
If true, correct for extinction
Returns
-------
mag : ndarray
magnitudes at the specified times under the generated model.
"""
t = np.asarray(t)
num = self.meta[band + 'T']
mu = self.meta[band + '0']
amp = self.meta[band + 'A']
t0 = self.meta[band + 'E']
# if there are nans or infinities, mask them
bad_vals = np.isnan(t) | np.isinf(t)
t[bad_vals] = t0
if corrected:
ext = 0
else:
ext = self.obsmeta['rExt'] * self.ext_correction[band]
func = self._template_func(num, band, mu + ext, amp)
mag = func(((t - t0) / self.period) % 1)
mag[bad_vals] = np.nan
if err is not None:
mag += self.rng.normal(0, err, t.shape)
return mag | Return generated magnitudes in the specified band
Parameters
----------
band : str
desired bandpass: should be one of ['u', 'g', 'r', 'i', 'z']
t : array_like
array of times (in days)
err : float or array_like
gaussian error in observations
corrected : bool (optional)
If true, correct for extinction
Returns
-------
mag : ndarray
magnitudes at the specified times under the generated model. | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae_generated.py#L104-L146 |
astroML/gatspy | gatspy/datasets/rrlyrae.py | _get_download_or_cache | def _get_download_or_cache(filename, data_home=None,
url=SESAR_RRLYRAE_URL,
force_download=False):
"""Private utility to download and/or load data from disk cache."""
# Import here so astroML is not required at package level
from astroML.datasets.tools import get_data_home
if data_home is None:
data_home = get_data_home(data_home)
data_home = os.path.join(data_home, 'Sesar2010')
if not os.path.exists(data_home):
os.makedirs(data_home)
src_url = SESAR_RRLYRAE_URL + filename
save_loc = os.path.join(data_home, filename)
if force_download or not os.path.exists(save_loc):
fhandle = urlopen(src_url)
with open(save_loc, 'wb') as cache:
cache.write(fhandle.read())
return save_loc | python | def _get_download_or_cache(filename, data_home=None,
url=SESAR_RRLYRAE_URL,
force_download=False):
"""Private utility to download and/or load data from disk cache."""
# Import here so astroML is not required at package level
from astroML.datasets.tools import get_data_home
if data_home is None:
data_home = get_data_home(data_home)
data_home = os.path.join(data_home, 'Sesar2010')
if not os.path.exists(data_home):
os.makedirs(data_home)
src_url = SESAR_RRLYRAE_URL + filename
save_loc = os.path.join(data_home, filename)
if force_download or not os.path.exists(save_loc):
fhandle = urlopen(src_url)
with open(save_loc, 'wb') as cache:
cache.write(fhandle.read())
return save_loc | Private utility to download and/or load data from disk cache. | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L28-L48 |
astroML/gatspy | gatspy/datasets/rrlyrae.py | fetch_rrlyrae | def fetch_rrlyrae(partial=False, **kwargs):
"""Fetch RR Lyrae light curves from Sesar 2010
Parameters
----------
partial : bool (optional)
If true, return the partial dataset (reduced to 1 band per night)
Returns
-------
rrlyrae : :class:`RRLyraeLC` object
This object contains pointers to the RR Lyrae data.
Other Parameters
----------------
data_home : str (optional)
Specify the local cache directory for the dataset. If not used, it
will default to the ``astroML`` default location.
url : str (optional)
Specify the URL of the datasets. Defaults to webpage associated with
Sesar 2010.
force_download : bool (optional)
If true, then force re-downloading data even if it is already cached
locally. Default is False.
Examples
--------
>>> rrlyrae = fetch_rrlyrae()
>>> rrlyrae.ids[:5]
[1013184, 1019544, 1027882, 1052471, 1056152]
>>> lcid = rrlyrae.ids[0]
>>> t, mag, dmag, bands = rrlyrae.get_lightcurve(lcid)
>>> t[:4]
array([ 51081.347856, 51081.349522, 51081.346189, 51081.347022])
>>> mag[:4]
array([ 18.702, 17.553, 17.236, 17.124])
>>> dmag[:4]
array([ 0.021, 0.005, 0.005, 0.006])
>>> list(bands[:4])
['u', 'g', 'r', 'i']
"""
if partial:
return PartialRRLyraeLC('table1.tar.gz',
cache_kwargs=kwargs)
else:
return RRLyraeLC('table1.tar.gz',
cache_kwargs=kwargs) | python | def fetch_rrlyrae(partial=False, **kwargs):
"""Fetch RR Lyrae light curves from Sesar 2010
Parameters
----------
partial : bool (optional)
If true, return the partial dataset (reduced to 1 band per night)
Returns
-------
rrlyrae : :class:`RRLyraeLC` object
This object contains pointers to the RR Lyrae data.
Other Parameters
----------------
data_home : str (optional)
Specify the local cache directory for the dataset. If not used, it
will default to the ``astroML`` default location.
url : str (optional)
Specify the URL of the datasets. Defaults to webpage associated with
Sesar 2010.
force_download : bool (optional)
If true, then force re-downloading data even if it is already cached
locally. Default is False.
Examples
--------
>>> rrlyrae = fetch_rrlyrae()
>>> rrlyrae.ids[:5]
[1013184, 1019544, 1027882, 1052471, 1056152]
>>> lcid = rrlyrae.ids[0]
>>> t, mag, dmag, bands = rrlyrae.get_lightcurve(lcid)
>>> t[:4]
array([ 51081.347856, 51081.349522, 51081.346189, 51081.347022])
>>> mag[:4]
array([ 18.702, 17.553, 17.236, 17.124])
>>> dmag[:4]
array([ 0.021, 0.005, 0.005, 0.006])
>>> list(bands[:4])
['u', 'g', 'r', 'i']
"""
if partial:
return PartialRRLyraeLC('table1.tar.gz',
cache_kwargs=kwargs)
else:
return RRLyraeLC('table1.tar.gz',
cache_kwargs=kwargs) | Fetch RR Lyrae light curves from Sesar 2010
Parameters
----------
partial : bool (optional)
If true, return the partial dataset (reduced to 1 band per night)
Returns
-------
rrlyrae : :class:`RRLyraeLC` object
This object contains pointers to the RR Lyrae data.
Other Parameters
----------------
data_home : str (optional)
Specify the local cache directory for the dataset. If not used, it
will default to the ``astroML`` default location.
url : str (optional)
Specify the URL of the datasets. Defaults to webpage associated with
Sesar 2010.
force_download : bool (optional)
If true, then force re-downloading data even if it is already cached
locally. Default is False.
Examples
--------
>>> rrlyrae = fetch_rrlyrae()
>>> rrlyrae.ids[:5]
[1013184, 1019544, 1027882, 1052471, 1056152]
>>> lcid = rrlyrae.ids[0]
>>> t, mag, dmag, bands = rrlyrae.get_lightcurve(lcid)
>>> t[:4]
array([ 51081.347856, 51081.349522, 51081.346189, 51081.347022])
>>> mag[:4]
array([ 18.702, 17.553, 17.236, 17.124])
>>> dmag[:4]
array([ 0.021, 0.005, 0.005, 0.006])
>>> list(bands[:4])
['u', 'g', 'r', 'i'] | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L343-L389 |
astroML/gatspy | gatspy/datasets/rrlyrae.py | fetch_rrlyrae_lc_params | def fetch_rrlyrae_lc_params(**kwargs):
"""Fetch data from table 2 of Sesar 2010
This table includes observationally-derived parameters for all the
Sesar 2010 lightcurves.
"""
save_loc = _get_download_or_cache('table2.dat.gz', **kwargs)
dtype = [('id', 'i'), ('type', 'S2'), ('P', 'f'),
('uA', 'f'), ('u0', 'f'), ('uE', 'f'), ('uT', 'f'),
('gA', 'f'), ('g0', 'f'), ('gE', 'f'), ('gT', 'f'),
('rA', 'f'), ('r0', 'f'), ('rE', 'f'), ('rT', 'f'),
('iA', 'f'), ('i0', 'f'), ('iE', 'f'), ('iT', 'f'),
('zA', 'f'), ('z0', 'f'), ('zE', 'f'), ('zT', 'f')]
return np.loadtxt(save_loc, dtype=dtype) | python | def fetch_rrlyrae_lc_params(**kwargs):
"""Fetch data from table 2 of Sesar 2010
This table includes observationally-derived parameters for all the
Sesar 2010 lightcurves.
"""
save_loc = _get_download_or_cache('table2.dat.gz', **kwargs)
dtype = [('id', 'i'), ('type', 'S2'), ('P', 'f'),
('uA', 'f'), ('u0', 'f'), ('uE', 'f'), ('uT', 'f'),
('gA', 'f'), ('g0', 'f'), ('gE', 'f'), ('gT', 'f'),
('rA', 'f'), ('r0', 'f'), ('rE', 'f'), ('rT', 'f'),
('iA', 'f'), ('i0', 'f'), ('iE', 'f'), ('iT', 'f'),
('zA', 'f'), ('z0', 'f'), ('zE', 'f'), ('zT', 'f')]
return np.loadtxt(save_loc, dtype=dtype) | Fetch data from table 2 of Sesar 2010
This table includes observationally-derived parameters for all the
Sesar 2010 lightcurves. | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L392-L407 |
astroML/gatspy | gatspy/datasets/rrlyrae.py | fetch_rrlyrae_fitdata | def fetch_rrlyrae_fitdata(**kwargs):
"""Fetch data from table 3 of Sesar 2010
This table includes parameters derived from template fits to all the
Sesar 2010 lightcurves.
"""
save_loc = _get_download_or_cache('table3.dat.gz', **kwargs)
dtype = [('id', 'i'), ('RA', 'f'), ('DEC', 'f'), ('rExt', 'f'),
('d', 'f'), ('RGC', 'f'),
('u', 'f'), ('g', 'f'), ('r', 'f'),
('i', 'f'), ('z', 'f'), ('V', 'f'),
('ugmin', 'f'), ('ugmin_err', 'f'),
('grmin', 'f'), ('grmin_err', 'f')]
return np.loadtxt(save_loc, dtype=dtype) | python | def fetch_rrlyrae_fitdata(**kwargs):
"""Fetch data from table 3 of Sesar 2010
This table includes parameters derived from template fits to all the
Sesar 2010 lightcurves.
"""
save_loc = _get_download_or_cache('table3.dat.gz', **kwargs)
dtype = [('id', 'i'), ('RA', 'f'), ('DEC', 'f'), ('rExt', 'f'),
('d', 'f'), ('RGC', 'f'),
('u', 'f'), ('g', 'f'), ('r', 'f'),
('i', 'f'), ('z', 'f'), ('V', 'f'),
('ugmin', 'f'), ('ugmin_err', 'f'),
('grmin', 'f'), ('grmin_err', 'f')]
return np.loadtxt(save_loc, dtype=dtype) | Fetch data from table 3 of Sesar 2010
This table includes parameters derived from template fits to all the
Sesar 2010 lightcurves. | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L410-L425 |
astroML/gatspy | gatspy/datasets/rrlyrae.py | RRLyraeLC.get_lightcurve | def get_lightcurve(self, star_id, return_1d=True):
"""Get the light curves for the given ID
Parameters
----------
star_id : int
A valid integer star id representing an object in the dataset
return_1d : boolean (default=True)
Specify whether to return 1D arrays of (t, y, dy, filts) or
2D arrays of (t, y, dy) where each column is a filter.
Returns
-------
t, y, dy : np.ndarrays (if return_1d == False)
Times, magnitudes, and magnitude errors.
The shape of each array is [Nobs, 5], where the columns refer
to [u,g,r,i,z] bands. Non-observations are indicated by NaN.
t, y, dy, filts : np.ndarrays (if return_1d == True)
Times, magnitudes, magnitude errors, and filters
The shape of each array is [Nobs], and non-observations are
filtered out.
"""
filename = '{0}/{1}.dat'.format(self.dirname, star_id)
try:
data = np.loadtxt(self.data.extractfile(filename))
except KeyError:
raise ValueError("invalid star id: {0}".format(star_id))
RA = data[:, 0]
DEC = data[:, 1]
t = data[:, 2::3]
y = data[:, 3::3]
dy = data[:, 4::3]
nans = (y == -99.99)
t[nans] = np.nan
y[nans] = np.nan
dy[nans] = np.nan
if return_1d:
t, y, dy, filts = np.broadcast_arrays(t, y, dy,
['u', 'g', 'r', 'i', 'z'])
good = ~np.isnan(t)
return t[good], y[good], dy[good], filts[good]
else:
return t, y, dy | python | def get_lightcurve(self, star_id, return_1d=True):
"""Get the light curves for the given ID
Parameters
----------
star_id : int
A valid integer star id representing an object in the dataset
return_1d : boolean (default=True)
Specify whether to return 1D arrays of (t, y, dy, filts) or
2D arrays of (t, y, dy) where each column is a filter.
Returns
-------
t, y, dy : np.ndarrays (if return_1d == False)
Times, magnitudes, and magnitude errors.
The shape of each array is [Nobs, 5], where the columns refer
to [u,g,r,i,z] bands. Non-observations are indicated by NaN.
t, y, dy, filts : np.ndarrays (if return_1d == True)
Times, magnitudes, magnitude errors, and filters
The shape of each array is [Nobs], and non-observations are
filtered out.
"""
filename = '{0}/{1}.dat'.format(self.dirname, star_id)
try:
data = np.loadtxt(self.data.extractfile(filename))
except KeyError:
raise ValueError("invalid star id: {0}".format(star_id))
RA = data[:, 0]
DEC = data[:, 1]
t = data[:, 2::3]
y = data[:, 3::3]
dy = data[:, 4::3]
nans = (y == -99.99)
t[nans] = np.nan
y[nans] = np.nan
dy[nans] = np.nan
if return_1d:
t, y, dy, filts = np.broadcast_arrays(t, y, dy,
['u', 'g', 'r', 'i', 'z'])
good = ~np.isnan(t)
return t[good], y[good], dy[good], filts[good]
else:
return t, y, dy | Get the light curves for the given ID
Parameters
----------
star_id : int
A valid integer star id representing an object in the dataset
return_1d : boolean (default=True)
Specify whether to return 1D arrays of (t, y, dy, filts) or
2D arrays of (t, y, dy) where each column is a filter.
Returns
-------
t, y, dy : np.ndarrays (if return_1d == False)
Times, magnitudes, and magnitude errors.
The shape of each array is [Nobs, 5], where the columns refer
to [u,g,r,i,z] bands. Non-observations are indicated by NaN.
t, y, dy, filts : np.ndarrays (if return_1d == True)
Times, magnitudes, magnitude errors, and filters
The shape of each array is [Nobs], and non-observations are
filtered out. | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L125-L173 |
astroML/gatspy | gatspy/datasets/rrlyrae.py | RRLyraeLC.get_metadata | def get_metadata(self, lcid):
"""Get the parameters derived from the fit for the given id.
This is table 2 of Sesar 2010
"""
if self._metadata is None:
self._metadata = fetch_rrlyrae_lc_params()
i = np.where(self._metadata['id'] == lcid)[0]
if len(i) == 0:
raise ValueError("invalid lcid: {0}".format(lcid))
return self._metadata[i[0]] | python | def get_metadata(self, lcid):
"""Get the parameters derived from the fit for the given id.
This is table 2 of Sesar 2010
"""
if self._metadata is None:
self._metadata = fetch_rrlyrae_lc_params()
i = np.where(self._metadata['id'] == lcid)[0]
if len(i) == 0:
raise ValueError("invalid lcid: {0}".format(lcid))
return self._metadata[i[0]] | Get the parameters derived from the fit for the given id.
This is table 2 of Sesar 2010 | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L175-L184 |
astroML/gatspy | gatspy/datasets/rrlyrae.py | RRLyraeLC.get_obsmeta | def get_obsmeta(self, lcid):
"""Get the observation metadata for the given id.
This is table 3 of Sesar 2010
"""
if self._obsdata is None:
self._obsdata = fetch_rrlyrae_fitdata()
i = np.where(self._obsdata['id'] == lcid)[0]
if len(i) == 0:
raise ValueError("invalid lcid: {0}".format(lcid))
return self._obsdata[i[0]] | python | def get_obsmeta(self, lcid):
"""Get the observation metadata for the given id.
This is table 3 of Sesar 2010
"""
if self._obsdata is None:
self._obsdata = fetch_rrlyrae_fitdata()
i = np.where(self._obsdata['id'] == lcid)[0]
if len(i) == 0:
raise ValueError("invalid lcid: {0}".format(lcid))
return self._obsdata[i[0]] | Get the observation metadata for the given id.
This is table 3 of Sesar 2010 | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L186-L195 |
astroML/gatspy | gatspy/datasets/rrlyrae.py | RRLyraeTemplates.get_template | def get_template(self, template_id):
"""Get a particular lightcurve template
Parameters
----------
template_id : str
id of desired template
Returns
-------
phase : ndarray
array of phases
mag : ndarray
array of normalized magnitudes
"""
try:
data = np.loadtxt(self.data.extractfile(template_id + '.dat'))
except KeyError:
raise ValueError("invalid star id: {0}".format(template_id))
return data[:, 0], data[:, 1] | python | def get_template(self, template_id):
"""Get a particular lightcurve template
Parameters
----------
template_id : str
id of desired template
Returns
-------
phase : ndarray
array of phases
mag : ndarray
array of normalized magnitudes
"""
try:
data = np.loadtxt(self.data.extractfile(template_id + '.dat'))
except KeyError:
raise ValueError("invalid star id: {0}".format(template_id))
return data[:, 0], data[:, 1] | Get a particular lightcurve template
Parameters
----------
template_id : str
id of desired template
Returns
-------
phase : ndarray
array of phases
mag : ndarray
array of normalized magnitudes | https://github.com/astroML/gatspy/blob/a8f94082a3f27dfe9cb58165707b883bf28d9223/gatspy/datasets/rrlyrae.py#L322-L340 |
vkurup/python-tcxparser | tcxparser/tcxparser.py | TCXParser.hr_avg | def hr_avg(self):
"""Average heart rate of the workout"""
hr_data = self.hr_values()
return int(sum(hr_data) / len(hr_data)) | python | def hr_avg(self):
"""Average heart rate of the workout"""
hr_data = self.hr_values()
return int(sum(hr_data) / len(hr_data)) | Average heart rate of the workout | https://github.com/vkurup/python-tcxparser/blob/b5bdd86d1e76f842043f28717e261d25025b1a8e/tcxparser/tcxparser.py#L73-L76 |
vkurup/python-tcxparser | tcxparser/tcxparser.py | TCXParser.pace | def pace(self):
"""Average pace (mm:ss/km for the workout"""
secs_per_km = self.duration / (self.distance / 1000)
return time.strftime('%M:%S', time.gmtime(secs_per_km)) | python | def pace(self):
"""Average pace (mm:ss/km for the workout"""
secs_per_km = self.duration / (self.distance / 1000)
return time.strftime('%M:%S', time.gmtime(secs_per_km)) | Average pace (mm:ss/km for the workout | https://github.com/vkurup/python-tcxparser/blob/b5bdd86d1e76f842043f28717e261d25025b1a8e/tcxparser/tcxparser.py#L89-L92 |
vkurup/python-tcxparser | tcxparser/tcxparser.py | TCXParser.ascent | def ascent(self):
"""Returns ascent of workout in meters"""
total_ascent = 0.0
altitude_data = self.altitude_points()
for i in range(len(altitude_data) - 1):
diff = altitude_data[i+1] - altitude_data[i]
if diff > 0.0:
total_ascent += diff
return total_ascent | python | def ascent(self):
"""Returns ascent of workout in meters"""
total_ascent = 0.0
altitude_data = self.altitude_points()
for i in range(len(altitude_data) - 1):
diff = altitude_data[i+1] - altitude_data[i]
if diff > 0.0:
total_ascent += diff
return total_ascent | Returns ascent of workout in meters | https://github.com/vkurup/python-tcxparser/blob/b5bdd86d1e76f842043f28717e261d25025b1a8e/tcxparser/tcxparser.py#L113-L121 |
vkurup/python-tcxparser | tcxparser/tcxparser.py | TCXParser.descent | def descent(self):
"""Returns descent of workout in meters"""
total_descent = 0.0
altitude_data = self.altitude_points()
for i in range(len(altitude_data) - 1):
diff = altitude_data[i+1] - altitude_data[i]
if diff < 0.0:
total_descent += abs(diff)
return total_descent | python | def descent(self):
"""Returns descent of workout in meters"""
total_descent = 0.0
altitude_data = self.altitude_points()
for i in range(len(altitude_data) - 1):
diff = altitude_data[i+1] - altitude_data[i]
if diff < 0.0:
total_descent += abs(diff)
return total_descent | Returns descent of workout in meters | https://github.com/vkurup/python-tcxparser/blob/b5bdd86d1e76f842043f28717e261d25025b1a8e/tcxparser/tcxparser.py#L124-L132 |
uktrade/directory-validators | directory_validators/company.py | keywords_special_characters | def keywords_special_characters(keywords):
"""
Confirms that the keywords don't contain special characters
Args:
keywords (str)
Raises:
django.forms.ValidationError
"""
invalid_chars = '!\"#$%&\'()*+-./:;<=>?@[\\]^_{|}~\t\n'
if any(char in invalid_chars for char in keywords):
raise ValidationError(MESSAGE_KEYWORD_SPECIAL_CHARS) | python | def keywords_special_characters(keywords):
"""
Confirms that the keywords don't contain special characters
Args:
keywords (str)
Raises:
django.forms.ValidationError
"""
invalid_chars = '!\"#$%&\'()*+-./:;<=>?@[\\]^_{|}~\t\n'
if any(char in invalid_chars for char in keywords):
raise ValidationError(MESSAGE_KEYWORD_SPECIAL_CHARS) | Confirms that the keywords don't contain special characters
Args:
keywords (str)
Raises:
django.forms.ValidationError | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/company.py#L45-L57 |
uktrade/directory-validators | directory_validators/company.py | image_format | def image_format(value):
"""
Confirms that the uploaded image is of supported format.
Args:
value (File): The file with an `image` property containing the image
Raises:
django.forms.ValidationError
"""
if value.image.format.upper() not in constants.ALLOWED_IMAGE_FORMATS:
raise ValidationError(MESSAGE_INVALID_IMAGE_FORMAT) | python | def image_format(value):
"""
Confirms that the uploaded image is of supported format.
Args:
value (File): The file with an `image` property containing the image
Raises:
django.forms.ValidationError
"""
if value.image.format.upper() not in constants.ALLOWED_IMAGE_FORMATS:
raise ValidationError(MESSAGE_INVALID_IMAGE_FORMAT) | Confirms that the uploaded image is of supported format.
Args:
value (File): The file with an `image` property containing the image
Raises:
django.forms.ValidationError | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/company.py#L60-L73 |
uktrade/directory-validators | directory_validators/company.py | case_study_social_link_facebook | def case_study_social_link_facebook(value):
"""
Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError
"""
parsed = parse.urlparse(value.lower())
if not parsed.netloc.endswith('facebook.com'):
raise ValidationError(MESSAGE_NOT_FACEBOOK) | python | def case_study_social_link_facebook(value):
"""
Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError
"""
parsed = parse.urlparse(value.lower())
if not parsed.netloc.endswith('facebook.com'):
raise ValidationError(MESSAGE_NOT_FACEBOOK) | Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/company.py#L108-L122 |
uktrade/directory-validators | directory_validators/company.py | case_study_social_link_twitter | def case_study_social_link_twitter(value):
"""
Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError
"""
parsed = parse.urlparse(value.lower())
if not parsed.netloc.endswith('twitter.com'):
raise ValidationError(MESSAGE_NOT_TWITTER) | python | def case_study_social_link_twitter(value):
"""
Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError
"""
parsed = parse.urlparse(value.lower())
if not parsed.netloc.endswith('twitter.com'):
raise ValidationError(MESSAGE_NOT_TWITTER) | Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/company.py#L125-L139 |
uktrade/directory-validators | directory_validators/company.py | case_study_social_link_linkedin | def case_study_social_link_linkedin(value):
"""
Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError
"""
parsed = parse.urlparse(value.lower())
if not parsed.netloc.endswith('linkedin.com'):
raise ValidationError(MESSAGE_NOT_LINKEDIN) | python | def case_study_social_link_linkedin(value):
"""
Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError
"""
parsed = parse.urlparse(value.lower())
if not parsed.netloc.endswith('linkedin.com'):
raise ValidationError(MESSAGE_NOT_LINKEDIN) | Confirms that the social media url is pointed at the correct domain.
Args:
value (string): The url to check.
Raises:
django.forms.ValidationError | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/company.py#L142-L156 |
uktrade/directory-validators | directory_validators/company.py | no_company_with_insufficient_companies_house_data | def no_company_with_insufficient_companies_house_data(value):
"""
Confirms that the company number is not for for a company that
Companies House does not hold information on.
Args:
value (string): The company number to check.
Raises:
django.forms.ValidationError
"""
for prefix, name in company_types_with_insufficient_companies_house_data:
if value.upper().startswith(prefix):
raise ValidationError(
MESSAGE_INSUFFICIENT_DATA, params={'name': name}
) | python | def no_company_with_insufficient_companies_house_data(value):
"""
Confirms that the company number is not for for a company that
Companies House does not hold information on.
Args:
value (string): The company number to check.
Raises:
django.forms.ValidationError
"""
for prefix, name in company_types_with_insufficient_companies_house_data:
if value.upper().startswith(prefix):
raise ValidationError(
MESSAGE_INSUFFICIENT_DATA, params={'name': name}
) | Confirms that the company number is not for for a company that
Companies House does not hold information on.
Args:
value (string): The company number to check.
Raises:
django.forms.ValidationError | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/company.py#L179-L196 |
uktrade/directory-validators | directory_validators/enrolment.py | email_domain_free | def email_domain_free(value):
"""
Confirms that the email address is not using a free service.
@param {str} value
@returns {None}
@raises AssertionError
"""
domain = helpers.get_domain_from_email_address(value)
if domain.lower() in free_domains:
raise ValidationError(MESSAGE_USE_COMPANY_EMAIL) | python | def email_domain_free(value):
"""
Confirms that the email address is not using a free service.
@param {str} value
@returns {None}
@raises AssertionError
"""
domain = helpers.get_domain_from_email_address(value)
if domain.lower() in free_domains:
raise ValidationError(MESSAGE_USE_COMPANY_EMAIL) | Confirms that the email address is not using a free service.
@param {str} value
@returns {None}
@raises AssertionError | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/enrolment.py#L43-L54 |
uktrade/directory-validators | directory_validators/enrolment.py | email_domain_disposable | def email_domain_disposable(value):
"""
Confirms that the email address is not using a disposable service.
@param {str} value
@returns {None}
@raises AssertionError
"""
domain = helpers.get_domain_from_email_address(value)
if domain.lower() in disposable_domains:
raise ValidationError(MESSAGE_USE_COMPANY_EMAIL) | python | def email_domain_disposable(value):
"""
Confirms that the email address is not using a disposable service.
@param {str} value
@returns {None}
@raises AssertionError
"""
domain = helpers.get_domain_from_email_address(value)
if domain.lower() in disposable_domains:
raise ValidationError(MESSAGE_USE_COMPANY_EMAIL) | Confirms that the email address is not using a disposable service.
@param {str} value
@returns {None}
@raises AssertionError | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/enrolment.py#L57-L68 |
uktrade/directory-validators | directory_validators/enrolment.py | domestic_mobile_phone_number | def domestic_mobile_phone_number(value):
"""
Confirms that the phone number is a valid UK phone number.
@param {str} value
@returns {None}
@raises AssertionError
"""
try:
parsed = phonenumbers.parse(value, 'GB')
except NumberParseException:
pass
else:
is_mobile = carrier._is_mobile(number_type(parsed))
if is_mobile and phonenumbers.is_valid_number(parsed):
return None
raise ValidationError(MESSAGE_INVALID_PHONE_NUMBER) | python | def domestic_mobile_phone_number(value):
"""
Confirms that the phone number is a valid UK phone number.
@param {str} value
@returns {None}
@raises AssertionError
"""
try:
parsed = phonenumbers.parse(value, 'GB')
except NumberParseException:
pass
else:
is_mobile = carrier._is_mobile(number_type(parsed))
if is_mobile and phonenumbers.is_valid_number(parsed):
return None
raise ValidationError(MESSAGE_INVALID_PHONE_NUMBER) | Confirms that the phone number is a valid UK phone number.
@param {str} value
@returns {None}
@raises AssertionError | https://github.com/uktrade/directory-validators/blob/e01f9d2aec683e34d978e4f67ed383ea2f9b85a0/directory_validators/enrolment.py#L71-L88 |
ruipgil/TrackToTrip | tracktotrip/segment.py | remove_liers | def remove_liers(points):
""" Removes obvious noise points
Checks time consistency, removing points that appear out of order
Args:
points (:obj:`list` of :obj:`Point`)
Returns:
:obj:`list` of :obj:`Point`
"""
result = [points[0]]
for i in range(1, len(points) - 2):
prv = points[i-1]
crr = points[i]
nxt = points[i+1]
if prv.time <= crr.time and crr.time <= nxt.time:
result.append(crr)
result.append(points[-1])
return result | python | def remove_liers(points):
""" Removes obvious noise points
Checks time consistency, removing points that appear out of order
Args:
points (:obj:`list` of :obj:`Point`)
Returns:
:obj:`list` of :obj:`Point`
"""
result = [points[0]]
for i in range(1, len(points) - 2):
prv = points[i-1]
crr = points[i]
nxt = points[i+1]
if prv.time <= crr.time and crr.time <= nxt.time:
result.append(crr)
result.append(points[-1])
return result | Removes obvious noise points
Checks time consistency, removing points that appear out of order
Args:
points (:obj:`list` of :obj:`Point`)
Returns:
:obj:`list` of :obj:`Point` | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L20-L39 |
ruipgil/TrackToTrip | tracktotrip/segment.py | Segment.bounds | def bounds(self, thr=0, lower_index=0, upper_index=-1):
""" Computes the bounds of the segment, or part of it
Args:
lower_index (int, optional): Start index. Defaults to 0
upper_index (int, optional): End index. Defaults to 0
Returns:
:obj:`tuple` of :obj:`float`: Bounds of the (sub)segment, such that
(min_lat, min_lon, max_lat, max_lon)
"""
points = self.points[lower_index:upper_index]
min_lat = float("inf")
min_lon = float("inf")
max_lat = -float("inf")
max_lon = -float("inf")
for point in points:
min_lat = min(min_lat, point.lat)
min_lon = min(min_lon, point.lon)
max_lat = max(max_lat, point.lat)
max_lon = max(max_lon, point.lon)
return (min_lat - thr, min_lon - thr, max_lat + thr, max_lon + thr) | python | def bounds(self, thr=0, lower_index=0, upper_index=-1):
""" Computes the bounds of the segment, or part of it
Args:
lower_index (int, optional): Start index. Defaults to 0
upper_index (int, optional): End index. Defaults to 0
Returns:
:obj:`tuple` of :obj:`float`: Bounds of the (sub)segment, such that
(min_lat, min_lon, max_lat, max_lon)
"""
points = self.points[lower_index:upper_index]
min_lat = float("inf")
min_lon = float("inf")
max_lat = -float("inf")
max_lon = -float("inf")
for point in points:
min_lat = min(min_lat, point.lat)
min_lon = min(min_lon, point.lon)
max_lat = max(max_lat, point.lat)
max_lon = max(max_lon, point.lon)
return (min_lat - thr, min_lon - thr, max_lat + thr, max_lon + thr) | Computes the bounds of the segment, or part of it
Args:
lower_index (int, optional): Start index. Defaults to 0
upper_index (int, optional): End index. Defaults to 0
Returns:
:obj:`tuple` of :obj:`float`: Bounds of the (sub)segment, such that
(min_lat, min_lon, max_lat, max_lon) | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L65-L88 |
ruipgil/TrackToTrip | tracktotrip/segment.py | Segment.smooth | def smooth(self, noise, strategy=INVERSE_STRATEGY):
""" In-place smoothing
See smooth_segment function
Args:
noise (float): Noise expected
strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY
or smooth.EXTRAPOLATE_STRATEGY
Returns:
:obj:`Segment`
"""
if strategy is INVERSE_STRATEGY:
self.points = with_inverse(self.points, noise)
elif strategy is EXTRAPOLATE_STRATEGY:
self.points = with_extrapolation(self.points, noise, 30)
elif strategy is NO_STRATEGY:
self.points = with_no_strategy(self.points, noise)
return self | python | def smooth(self, noise, strategy=INVERSE_STRATEGY):
""" In-place smoothing
See smooth_segment function
Args:
noise (float): Noise expected
strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY
or smooth.EXTRAPOLATE_STRATEGY
Returns:
:obj:`Segment`
"""
if strategy is INVERSE_STRATEGY:
self.points = with_inverse(self.points, noise)
elif strategy is EXTRAPOLATE_STRATEGY:
self.points = with_extrapolation(self.points, noise, 30)
elif strategy is NO_STRATEGY:
self.points = with_no_strategy(self.points, noise)
return self | In-place smoothing
See smooth_segment function
Args:
noise (float): Noise expected
strategy (int): Strategy to use. Either smooth.INVERSE_STRATEGY
or smooth.EXTRAPOLATE_STRATEGY
Returns:
:obj:`Segment` | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L101-L119 |
ruipgil/TrackToTrip | tracktotrip/segment.py | Segment.simplify | def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
""" In-place segment simplification
See `drp` and `compression` modules
Args:
eps (float): Distance threshold for the `drp` function
max_dist_error (float): Max distance error, in meters
max_speed_error (float): Max speed error, in km/h
topology_only (bool, optional): True to only keep topology, not considering
times when simplifying. Defaults to False.
Returns:
:obj:`Segment`
"""
if topology_only:
self.points = drp(self.points, eps)
else:
self.points = spt(self.points, max_dist_error, max_speed_error)
return self | python | def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
""" In-place segment simplification
See `drp` and `compression` modules
Args:
eps (float): Distance threshold for the `drp` function
max_dist_error (float): Max distance error, in meters
max_speed_error (float): Max speed error, in km/h
topology_only (bool, optional): True to only keep topology, not considering
times when simplifying. Defaults to False.
Returns:
:obj:`Segment`
"""
if topology_only:
self.points = drp(self.points, eps)
else:
self.points = spt(self.points, max_dist_error, max_speed_error)
return self | In-place segment simplification
See `drp` and `compression` modules
Args:
eps (float): Distance threshold for the `drp` function
max_dist_error (float): Max distance error, in meters
max_speed_error (float): Max speed error, in km/h
topology_only (bool, optional): True to only keep topology, not considering
times when simplifying. Defaults to False.
Returns:
:obj:`Segment` | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L134-L152 |
ruipgil/TrackToTrip | tracktotrip/segment.py | Segment.compute_metrics | def compute_metrics(self):
""" Computes metrics for each point
Returns:
:obj:`Segment`: self
"""
for prev, point in pairwise(self.points):
point.compute_metrics(prev)
return self | python | def compute_metrics(self):
""" Computes metrics for each point
Returns:
:obj:`Segment`: self
"""
for prev, point in pairwise(self.points):
point.compute_metrics(prev)
return self | Computes metrics for each point
Returns:
:obj:`Segment`: self | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L154-L162 |
ruipgil/TrackToTrip | tracktotrip/segment.py | Segment.infer_location | def infer_location(
self,
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
):
"""In-place location inferring
See infer_location function
Args:
Returns:
:obj:`Segment`: self
"""
self.location_from = infer_location(
self.points[0],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
self.location_to = infer_location(
self.points[-1],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
return self | python | def infer_location(
self,
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
):
"""In-place location inferring
See infer_location function
Args:
Returns:
:obj:`Segment`: self
"""
self.location_from = infer_location(
self.points[0],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
self.location_to = infer_location(
self.points[-1],
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
return self | In-place location inferring
See infer_location function
Args:
Returns:
:obj:`Segment`: self | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L164-L201 |
ruipgil/TrackToTrip | tracktotrip/segment.py | Segment.infer_transportation_mode | def infer_transportation_mode(self, clf, min_time):
"""In-place transportation mode inferring
See infer_transportation_mode function
Args:
Returns:
:obj:`Segment`: self
"""
self.transportation_modes = speed_clustering(clf, self.points, min_time)
return self | python | def infer_transportation_mode(self, clf, min_time):
"""In-place transportation mode inferring
See infer_transportation_mode function
Args:
Returns:
:obj:`Segment`: self
"""
self.transportation_modes = speed_clustering(clf, self.points, min_time)
return self | In-place transportation mode inferring
See infer_transportation_mode function
Args:
Returns:
:obj:`Segment`: self | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L203-L213 |
ruipgil/TrackToTrip | tracktotrip/segment.py | Segment.merge_and_fit | def merge_and_fit(self, segment):
""" Merges another segment with this one, ordering the points based on a
distance heuristic
Args:
segment (:obj:`Segment`): Segment to merge with
Returns:
:obj:`Segment`: self
"""
self.points = sort_segment_points(self.points, segment.points)
return self | python | def merge_and_fit(self, segment):
""" Merges another segment with this one, ordering the points based on a
distance heuristic
Args:
segment (:obj:`Segment`): Segment to merge with
Returns:
:obj:`Segment`: self
"""
self.points = sort_segment_points(self.points, segment.points)
return self | Merges another segment with this one, ordering the points based on a
distance heuristic
Args:
segment (:obj:`Segment`): Segment to merge with
Returns:
:obj:`Segment`: self | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L215-L225 |
ruipgil/TrackToTrip | tracktotrip/segment.py | Segment.closest_point_to | def closest_point_to(self, point, thr=20.0):
""" Finds the closest point in the segment to a given point
Args:
point (:obj:`Point`)
thr (float, optional): Distance threshold, in meters, to be considered
the same point. Defaults to 20.0
Returns:
(int, Point): Index of the point. -1 if doesn't exist. A point is given if it's along the segment
"""
i = 0
point_arr = point.gen2arr()
def closest_in_line(pointA, pointB):
temp = closest_point(pointA.gen2arr(), pointB.gen2arr(), point_arr)
return Point(temp[1], temp[0], None)
for (p_a, p_b) in pairwise(self.points):
candidate = closest_in_line(p_a, p_b)
if candidate.distance(point) <= thr:
if p_a.distance(point) <= thr:
return i, p_a
elif p_b.distance(point) <= thr:
return i + 1, p_b
else:
return i, candidate
i = i + 1
return -1, None | python | def closest_point_to(self, point, thr=20.0):
""" Finds the closest point in the segment to a given point
Args:
point (:obj:`Point`)
thr (float, optional): Distance threshold, in meters, to be considered
the same point. Defaults to 20.0
Returns:
(int, Point): Index of the point. -1 if doesn't exist. A point is given if it's along the segment
"""
i = 0
point_arr = point.gen2arr()
def closest_in_line(pointA, pointB):
temp = closest_point(pointA.gen2arr(), pointB.gen2arr(), point_arr)
return Point(temp[1], temp[0], None)
for (p_a, p_b) in pairwise(self.points):
candidate = closest_in_line(p_a, p_b)
if candidate.distance(point) <= thr:
if p_a.distance(point) <= thr:
return i, p_a
elif p_b.distance(point) <= thr:
return i + 1, p_b
else:
return i, candidate
i = i + 1
return -1, None | Finds the closest point in the segment to a given point
Args:
point (:obj:`Point`)
thr (float, optional): Distance threshold, in meters, to be considered
the same point. Defaults to 20.0
Returns:
(int, Point): Index of the point. -1 if doesn't exist. A point is given if it's along the segment | https://github.com/ruipgil/TrackToTrip/blob/5537c14ee9748091b5255b658ab528e1d6227f99/tracktotrip/segment.py#L227-L255 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.