repository_name
stringlengths 5
67
| func_path_in_repository
stringlengths 4
234
| func_name
stringlengths 0
314
| whole_func_string
stringlengths 52
3.87M
| language
stringclasses 6
values | func_code_string
stringlengths 52
3.87M
| func_documentation_string
stringlengths 1
47.2k
| func_code_url
stringlengths 85
339
|
---|---|---|---|---|---|---|---|
spencerahill/aospy | aospy/calc.py | _add_metadata_as_attrs_da | def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert):
"""Add metadata attributes to DataArray"""
if dtype_out_vert == 'vert_int':
if units != '':
units = '(vertical integral of {0}): {0} kg m^-2)'.format(units)
else:
units = '(vertical integral of quantity with unspecified units)'
data.attrs['units'] = units
data.attrs['description'] = description
return data | python | def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert):
"""Add metadata attributes to DataArray"""
if dtype_out_vert == 'vert_int':
if units != '':
units = '(vertical integral of {0}): {0} kg m^-2)'.format(units)
else:
units = '(vertical integral of quantity with unspecified units)'
data.attrs['units'] = units
data.attrs['description'] = description
return data | Add metadata attributes to DataArray | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L606-L615 |
spencerahill/aospy | aospy/calc.py | Calc._dir_out | def _dir_out(self):
"""Create string of the data directory to save individual .nc files."""
return os.path.join(self.proj.direc_out, self.proj.name,
self.model.name, self.run.name, self.name) | python | def _dir_out(self):
"""Create string of the data directory to save individual .nc files."""
return os.path.join(self.proj.direc_out, self.proj.name,
self.model.name, self.run.name, self.name) | Create string of the data directory to save individual .nc files. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L67-L70 |
spencerahill/aospy | aospy/calc.py | Calc._dir_tar_out | def _dir_tar_out(self):
"""Create string of the data directory to store a tar file."""
return os.path.join(self.proj.tar_direc_out, self.proj.name,
self.model.name, self.run.name) | python | def _dir_tar_out(self):
"""Create string of the data directory to store a tar file."""
return os.path.join(self.proj.tar_direc_out, self.proj.name,
self.model.name, self.run.name) | Create string of the data directory to store a tar file. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L72-L75 |
spencerahill/aospy | aospy/calc.py | Calc._file_name | def _file_name(self, dtype_out_time, extension='nc'):
"""Create the name of the aospy file."""
if dtype_out_time is None:
dtype_out_time = ''
out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,
dtype_vert=self.dtype_out_vert)
in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,
self.dtype_in_vert)
start_year = utils.times.infer_year(self.start_date)
end_year = utils.times.infer_year(self.end_date)
yr_lbl = utils.io.yr_label((start_year, end_year))
return '.'.join(
[self.name, out_lbl, in_lbl, self.model.name,
self.run.name, yr_lbl, extension]
).replace('..', '.') | python | def _file_name(self, dtype_out_time, extension='nc'):
"""Create the name of the aospy file."""
if dtype_out_time is None:
dtype_out_time = ''
out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,
dtype_vert=self.dtype_out_vert)
in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,
self.dtype_in_vert)
start_year = utils.times.infer_year(self.start_date)
end_year = utils.times.infer_year(self.end_date)
yr_lbl = utils.io.yr_label((start_year, end_year))
return '.'.join(
[self.name, out_lbl, in_lbl, self.model.name,
self.run.name, yr_lbl, extension]
).replace('..', '.') | Create the name of the aospy file. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L77-L91 |
spencerahill/aospy | aospy/calc.py | Calc._print_verbose | def _print_verbose(*args):
"""Print diagnostic message."""
try:
return '{0} {1} ({2})'.format(args[0], args[1], ctime())
except IndexError:
return '{0} ({1})'.format(args[0], ctime()) | python | def _print_verbose(*args):
"""Print diagnostic message."""
try:
return '{0} {1} ({2})'.format(args[0], args[1], ctime())
except IndexError:
return '{0} ({1})'.format(args[0], ctime()) | Print diagnostic message. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L100-L105 |
spencerahill/aospy | aospy/calc.py | Calc._to_desired_dates | def _to_desired_dates(self, arr):
"""Restrict the xarray DataArray or Dataset to the desired months."""
times = utils.times.extract_months(
arr[internal_names.TIME_STR], self.months
)
return arr.sel(time=times) | python | def _to_desired_dates(self, arr):
"""Restrict the xarray DataArray or Dataset to the desired months."""
times = utils.times.extract_months(
arr[internal_names.TIME_STR], self.months
)
return arr.sel(time=times) | Restrict the xarray DataArray or Dataset to the desired months. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L254-L259 |
spencerahill/aospy | aospy/calc.py | Calc._add_grid_attributes | def _add_grid_attributes(self, ds):
"""Add model grid attributes to a dataset"""
for name_int, names_ext in self._grid_attrs.items():
ds_coord_name = set(names_ext).intersection(set(ds.coords) |
set(ds.data_vars))
model_attr = getattr(self.model, name_int, None)
if ds_coord_name and (model_attr is not None):
# Force coords to have desired name.
ds = ds.rename({list(ds_coord_name)[0]: name_int})
ds = ds.set_coords(name_int)
if not np.array_equal(ds[name_int], model_attr):
if np.allclose(ds[name_int], model_attr):
msg = ("Values for '{0}' are nearly (but not exactly) "
"the same in the Run {1} and the Model {2}. "
"Therefore replacing Run's values with the "
"model's.".format(name_int, self.run,
self.model))
logging.info(msg)
ds[name_int].values = model_attr.values
else:
msg = ("Model coordinates for '{0}' do not match those"
" in Run: {1} vs. {2}"
"".format(name_int, ds[name_int], model_attr))
logging.info(msg)
else:
# Bring in coord from model object if it exists.
ds = ds.load()
if model_attr is not None:
ds[name_int] = model_attr
ds = ds.set_coords(name_int)
if (self.dtype_in_vert == 'pressure' and
internal_names.PLEVEL_STR in ds.coords):
self.pressure = ds.level
return ds | python | def _add_grid_attributes(self, ds):
"""Add model grid attributes to a dataset"""
for name_int, names_ext in self._grid_attrs.items():
ds_coord_name = set(names_ext).intersection(set(ds.coords) |
set(ds.data_vars))
model_attr = getattr(self.model, name_int, None)
if ds_coord_name and (model_attr is not None):
# Force coords to have desired name.
ds = ds.rename({list(ds_coord_name)[0]: name_int})
ds = ds.set_coords(name_int)
if not np.array_equal(ds[name_int], model_attr):
if np.allclose(ds[name_int], model_attr):
msg = ("Values for '{0}' are nearly (but not exactly) "
"the same in the Run {1} and the Model {2}. "
"Therefore replacing Run's values with the "
"model's.".format(name_int, self.run,
self.model))
logging.info(msg)
ds[name_int].values = model_attr.values
else:
msg = ("Model coordinates for '{0}' do not match those"
" in Run: {1} vs. {2}"
"".format(name_int, ds[name_int], model_attr))
logging.info(msg)
else:
# Bring in coord from model object if it exists.
ds = ds.load()
if model_attr is not None:
ds[name_int] = model_attr
ds = ds.set_coords(name_int)
if (self.dtype_in_vert == 'pressure' and
internal_names.PLEVEL_STR in ds.coords):
self.pressure = ds.level
return ds | Add model grid attributes to a dataset | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L261-L295 |
spencerahill/aospy | aospy/calc.py | Calc._get_input_data | def _get_input_data(self, var, start_date, end_date):
"""Get the data for a single variable over the desired date range."""
logging.info(self._print_verbose("Getting input data:", var))
if isinstance(var, (float, int)):
return var
else:
cond_pfull = ((not hasattr(self, internal_names.PFULL_STR))
and var.def_vert and
self.dtype_in_vert == internal_names.ETA_STR)
data = self.data_loader.recursively_compute_variable(
var, start_date, end_date, self.time_offset, self.model,
**self.data_loader_attrs)
name = data.name
data = self._add_grid_attributes(data.to_dataset(name=data.name))
data = data[name]
if cond_pfull:
try:
self.pfull_coord = data[internal_names.PFULL_STR]
except KeyError:
pass
# Force all data to be at full pressure levels, not half levels.
bool_to_pfull = (self.dtype_in_vert == internal_names.ETA_STR and
var.def_vert == internal_names.PHALF_STR)
if bool_to_pfull:
data = utils.vertcoord.to_pfull_from_phalf(data,
self.pfull_coord)
if var.def_time:
# Restrict to the desired dates within each year.
if self.dtype_in_time != 'av':
return self._to_desired_dates(data)
else:
return data | python | def _get_input_data(self, var, start_date, end_date):
"""Get the data for a single variable over the desired date range."""
logging.info(self._print_verbose("Getting input data:", var))
if isinstance(var, (float, int)):
return var
else:
cond_pfull = ((not hasattr(self, internal_names.PFULL_STR))
and var.def_vert and
self.dtype_in_vert == internal_names.ETA_STR)
data = self.data_loader.recursively_compute_variable(
var, start_date, end_date, self.time_offset, self.model,
**self.data_loader_attrs)
name = data.name
data = self._add_grid_attributes(data.to_dataset(name=data.name))
data = data[name]
if cond_pfull:
try:
self.pfull_coord = data[internal_names.PFULL_STR]
except KeyError:
pass
# Force all data to be at full pressure levels, not half levels.
bool_to_pfull = (self.dtype_in_vert == internal_names.ETA_STR and
var.def_vert == internal_names.PHALF_STR)
if bool_to_pfull:
data = utils.vertcoord.to_pfull_from_phalf(data,
self.pfull_coord)
if var.def_time:
# Restrict to the desired dates within each year.
if self.dtype_in_time != 'av':
return self._to_desired_dates(data)
else:
return data | Get the data for a single variable over the desired date range. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L297-L329 |
spencerahill/aospy | aospy/calc.py | Calc._get_all_data | def _get_all_data(self, start_date, end_date):
"""Get the needed data from all of the vars in the calculation."""
return [self._get_input_data(var, start_date, end_date)
for var in _replace_pressure(self.variables,
self.dtype_in_vert)] | python | def _get_all_data(self, start_date, end_date):
"""Get the needed data from all of the vars in the calculation."""
return [self._get_input_data(var, start_date, end_date)
for var in _replace_pressure(self.variables,
self.dtype_in_vert)] | Get the needed data from all of the vars in the calculation. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L331-L335 |
spencerahill/aospy | aospy/calc.py | Calc._compute | def _compute(self, data):
"""Perform the calculation."""
local_ts = self._local_ts(*data)
dt = local_ts[internal_names.TIME_WEIGHTS_STR]
# Convert dt to units of days to prevent overflow
dt = dt / np.timedelta64(1, 'D')
return local_ts, dt | python | def _compute(self, data):
"""Perform the calculation."""
local_ts = self._local_ts(*data)
dt = local_ts[internal_names.TIME_WEIGHTS_STR]
# Convert dt to units of days to prevent overflow
dt = dt / np.timedelta64(1, 'D')
return local_ts, dt | Perform the calculation. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L341-L347 |
spencerahill/aospy | aospy/calc.py | Calc._compute_full_ts | def _compute_full_ts(self, data):
"""Perform calculation and create yearly timeseries at each point."""
# Get results at each desired timestep and spatial point.
full_ts, dt = self._compute(data)
# Vertically integrate.
vert_types = ('vert_int', 'vert_av')
if self.dtype_out_vert in vert_types and self.var.def_vert:
dp = self._get_input_data(_DP_VARS[self.dtype_in_vert],
self.start_date, self.end_date)
full_ts = utils.vertcoord.int_dp_g(full_ts, dp)
if self.dtype_out_vert == 'vert_av':
ps = self._get_input_data(utils.vertcoord.ps,
self.start_date, self.end_date)
full_ts *= (GRAV_EARTH / ps)
return full_ts, dt | python | def _compute_full_ts(self, data):
"""Perform calculation and create yearly timeseries at each point."""
# Get results at each desired timestep and spatial point.
full_ts, dt = self._compute(data)
# Vertically integrate.
vert_types = ('vert_int', 'vert_av')
if self.dtype_out_vert in vert_types and self.var.def_vert:
dp = self._get_input_data(_DP_VARS[self.dtype_in_vert],
self.start_date, self.end_date)
full_ts = utils.vertcoord.int_dp_g(full_ts, dp)
if self.dtype_out_vert == 'vert_av':
ps = self._get_input_data(utils.vertcoord.ps,
self.start_date, self.end_date)
full_ts *= (GRAV_EARTH / ps)
return full_ts, dt | Perform calculation and create yearly timeseries at each point. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L349-L363 |
spencerahill/aospy | aospy/calc.py | Calc._full_to_yearly_ts | def _full_to_yearly_ts(self, arr, dt):
"""Average the full timeseries within each year."""
time_defined = self.def_time and not ('av' in self.dtype_in_time)
if time_defined:
arr = utils.times.yearly_average(arr, dt)
return arr | python | def _full_to_yearly_ts(self, arr, dt):
"""Average the full timeseries within each year."""
time_defined = self.def_time and not ('av' in self.dtype_in_time)
if time_defined:
arr = utils.times.yearly_average(arr, dt)
return arr | Average the full timeseries within each year. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L365-L370 |
spencerahill/aospy | aospy/calc.py | Calc._time_reduce | def _time_reduce(self, arr, reduction):
"""Perform the specified time reduction on a local time-series."""
if self.dtype_in_time == 'av' or not self.def_time:
return arr
reductions = {
'ts': lambda xarr: xarr,
'av': lambda xarr: xarr.mean(internal_names.YEAR_STR),
'std': lambda xarr: xarr.std(internal_names.YEAR_STR),
}
try:
return reductions[reduction](arr)
except KeyError:
raise ValueError("Specified time-reduction method '{}' is not "
"supported".format(reduction)) | python | def _time_reduce(self, arr, reduction):
"""Perform the specified time reduction on a local time-series."""
if self.dtype_in_time == 'av' or not self.def_time:
return arr
reductions = {
'ts': lambda xarr: xarr,
'av': lambda xarr: xarr.mean(internal_names.YEAR_STR),
'std': lambda xarr: xarr.std(internal_names.YEAR_STR),
}
try:
return reductions[reduction](arr)
except KeyError:
raise ValueError("Specified time-reduction method '{}' is not "
"supported".format(reduction)) | Perform the specified time reduction on a local time-series. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L372-L385 |
spencerahill/aospy | aospy/calc.py | Calc.region_calcs | def region_calcs(self, arr, func):
"""Perform a calculation for all regions."""
# Get pressure values for data output on hybrid vertical coordinates.
bool_pfull = (self.def_vert and self.dtype_in_vert ==
internal_names.ETA_STR and self.dtype_out_vert is False)
if bool_pfull:
pfull_data = self._get_input_data(_P_VARS[self.dtype_in_vert],
self.start_date,
self.end_date)
pfull = self._full_to_yearly_ts(
pfull_data, arr[internal_names.TIME_WEIGHTS_STR]
).rename('pressure')
# Loop over the regions, performing the calculation.
reg_dat = {}
for reg in self.region:
# Just pass along the data if averaged already.
if 'av' in self.dtype_in_time:
data_out = reg.ts(arr)
# Otherwise perform the calculation.
else:
method = getattr(reg, func)
data_out = method(arr)
if bool_pfull:
# Don't apply e.g. standard deviation to coordinates.
if func not in ['av', 'ts']:
method = reg.ts
# Convert Pa to hPa
coord = method(pfull) * 1e-2
data_out = data_out.assign_coords(
**{reg.name + '_pressure': coord}
)
reg_dat.update(**{reg.name: data_out})
return xr.Dataset(reg_dat) | python | def region_calcs(self, arr, func):
"""Perform a calculation for all regions."""
# Get pressure values for data output on hybrid vertical coordinates.
bool_pfull = (self.def_vert and self.dtype_in_vert ==
internal_names.ETA_STR and self.dtype_out_vert is False)
if bool_pfull:
pfull_data = self._get_input_data(_P_VARS[self.dtype_in_vert],
self.start_date,
self.end_date)
pfull = self._full_to_yearly_ts(
pfull_data, arr[internal_names.TIME_WEIGHTS_STR]
).rename('pressure')
# Loop over the regions, performing the calculation.
reg_dat = {}
for reg in self.region:
# Just pass along the data if averaged already.
if 'av' in self.dtype_in_time:
data_out = reg.ts(arr)
# Otherwise perform the calculation.
else:
method = getattr(reg, func)
data_out = method(arr)
if bool_pfull:
# Don't apply e.g. standard deviation to coordinates.
if func not in ['av', 'ts']:
method = reg.ts
# Convert Pa to hPa
coord = method(pfull) * 1e-2
data_out = data_out.assign_coords(
**{reg.name + '_pressure': coord}
)
reg_dat.update(**{reg.name: data_out})
return xr.Dataset(reg_dat) | Perform a calculation for all regions. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L387-L419 |
spencerahill/aospy | aospy/calc.py | Calc._apply_all_time_reductions | def _apply_all_time_reductions(self, data):
"""Apply all requested time reductions to the data."""
logging.info(self._print_verbose("Applying desired time-"
"reduction methods."))
reduc_specs = [r.split('.') for r in self.dtype_out_time]
reduced = {}
for reduc, specs in zip(self.dtype_out_time, reduc_specs):
func = specs[-1]
if 'reg' in specs:
reduced.update({reduc: self.region_calcs(data, func)})
else:
reduced.update({reduc: self._time_reduce(data, func)})
return OrderedDict(sorted(reduced.items(), key=lambda t: t[0])) | python | def _apply_all_time_reductions(self, data):
"""Apply all requested time reductions to the data."""
logging.info(self._print_verbose("Applying desired time-"
"reduction methods."))
reduc_specs = [r.split('.') for r in self.dtype_out_time]
reduced = {}
for reduc, specs in zip(self.dtype_out_time, reduc_specs):
func = specs[-1]
if 'reg' in specs:
reduced.update({reduc: self.region_calcs(data, func)})
else:
reduced.update({reduc: self._time_reduce(data, func)})
return OrderedDict(sorted(reduced.items(), key=lambda t: t[0])) | Apply all requested time reductions to the data. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L421-L433 |
spencerahill/aospy | aospy/calc.py | Calc.compute | def compute(self, write_to_tar=True):
"""Perform all desired calculations on the data and save externally."""
data = self._get_all_data(self.start_date, self.end_date)
logging.info('Computing timeseries for {0} -- '
'{1}.'.format(self.start_date, self.end_date))
full, full_dt = self._compute_full_ts(data)
full_out = self._full_to_yearly_ts(full, full_dt)
reduced = self._apply_all_time_reductions(full_out)
logging.info("Writing desired gridded outputs to disk.")
for dtype_time, data in reduced.items():
data = _add_metadata_as_attrs(data, self.var.units,
self.var.description,
self.dtype_out_vert)
self.save(data, dtype_time, dtype_out_vert=self.dtype_out_vert,
save_files=True, write_to_tar=write_to_tar)
return self | python | def compute(self, write_to_tar=True):
"""Perform all desired calculations on the data and save externally."""
data = self._get_all_data(self.start_date, self.end_date)
logging.info('Computing timeseries for {0} -- '
'{1}.'.format(self.start_date, self.end_date))
full, full_dt = self._compute_full_ts(data)
full_out = self._full_to_yearly_ts(full, full_dt)
reduced = self._apply_all_time_reductions(full_out)
logging.info("Writing desired gridded outputs to disk.")
for dtype_time, data in reduced.items():
data = _add_metadata_as_attrs(data, self.var.units,
self.var.description,
self.dtype_out_vert)
self.save(data, dtype_time, dtype_out_vert=self.dtype_out_vert,
save_files=True, write_to_tar=write_to_tar)
return self | Perform all desired calculations on the data and save externally. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L435-L450 |
spencerahill/aospy | aospy/calc.py | Calc._save_files | def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path)
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset()
reg_data.update(data)
data_out = reg_data
else:
data_out = data
if isinstance(data_out, xr.DataArray):
data_out = xr.Dataset({self.name: data_out})
data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT') | python | def _save_files(self, data, dtype_out_time):
"""Save the data to netcdf files in direc_out."""
path = self.path_out[dtype_out_time]
if not os.path.isdir(self.dir_out):
os.makedirs(self.dir_out)
if 'reg' in dtype_out_time:
try:
reg_data = xr.open_dataset(path)
except (EOFError, RuntimeError, IOError):
reg_data = xr.Dataset()
reg_data.update(data)
data_out = reg_data
else:
data_out = data
if isinstance(data_out, xr.DataArray):
data_out = xr.Dataset({self.name: data_out})
data_out.to_netcdf(path, engine='netcdf4', format='NETCDF3_64BIT') | Save the data to netcdf files in direc_out. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L452-L468 |
spencerahill/aospy | aospy/calc.py | Calc._write_to_tar | def _write_to_tar(self, dtype_out_time):
"""Add the data to the tar file in tar_out_direc."""
# When submitted in parallel and the directory does not exist yet
# multiple processes may try to create a new directory; this leads
# to an OSError for all processes that tried to make the
# directory, but were later than the first.
try:
os.makedirs(self.dir_tar_out)
except OSError:
pass
# tarfile 'append' mode won't overwrite the old file, which we want.
# So open in 'read' mode, extract the file, and then delete it.
# But 'read' mode throws OSError if file doesn't exist: make it first.
utils.io.dmget([self.path_tar_out])
with tarfile.open(self.path_tar_out, 'a') as tar:
pass
with tarfile.open(self.path_tar_out, 'r') as tar:
old_data_path = os.path.join(self.dir_tar_out,
self.file_name[dtype_out_time])
try:
tar.extract(self.file_name[dtype_out_time],
path=old_data_path)
except KeyError:
pass
else:
# The os module treats files on archive as non-empty
# directories, so can't use os.remove or os.rmdir.
shutil.rmtree(old_data_path)
retcode = subprocess.call([
"tar", "--delete", "--file={}".format(self.path_tar_out),
self.file_name[dtype_out_time]
])
if retcode:
msg = ("The 'tar' command to save your aospy output "
"exited with an error. Most likely, this is due "
"to using an old version of 'tar' (especially if "
"you are on a Mac). Consider installing a newer "
"version of 'tar' or disabling tar output by "
"setting `write_to_tar=False` in the "
"`calc_exec_options` argument of "
"`submit_mult_calcs`.")
logging.warn(msg)
with tarfile.open(self.path_tar_out, 'a') as tar:
tar.add(self.path_out[dtype_out_time],
arcname=self.file_name[dtype_out_time]) | python | def _write_to_tar(self, dtype_out_time):
"""Add the data to the tar file in tar_out_direc."""
# When submitted in parallel and the directory does not exist yet
# multiple processes may try to create a new directory; this leads
# to an OSError for all processes that tried to make the
# directory, but were later than the first.
try:
os.makedirs(self.dir_tar_out)
except OSError:
pass
# tarfile 'append' mode won't overwrite the old file, which we want.
# So open in 'read' mode, extract the file, and then delete it.
# But 'read' mode throws OSError if file doesn't exist: make it first.
utils.io.dmget([self.path_tar_out])
with tarfile.open(self.path_tar_out, 'a') as tar:
pass
with tarfile.open(self.path_tar_out, 'r') as tar:
old_data_path = os.path.join(self.dir_tar_out,
self.file_name[dtype_out_time])
try:
tar.extract(self.file_name[dtype_out_time],
path=old_data_path)
except KeyError:
pass
else:
# The os module treats files on archive as non-empty
# directories, so can't use os.remove or os.rmdir.
shutil.rmtree(old_data_path)
retcode = subprocess.call([
"tar", "--delete", "--file={}".format(self.path_tar_out),
self.file_name[dtype_out_time]
])
if retcode:
msg = ("The 'tar' command to save your aospy output "
"exited with an error. Most likely, this is due "
"to using an old version of 'tar' (especially if "
"you are on a Mac). Consider installing a newer "
"version of 'tar' or disabling tar output by "
"setting `write_to_tar=False` in the "
"`calc_exec_options` argument of "
"`submit_mult_calcs`.")
logging.warn(msg)
with tarfile.open(self.path_tar_out, 'a') as tar:
tar.add(self.path_out[dtype_out_time],
arcname=self.file_name[dtype_out_time]) | Add the data to the tar file in tar_out_direc. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L470-L514 |
spencerahill/aospy | aospy/calc.py | Calc._update_data_out | def _update_data_out(self, data, dtype):
"""Append the data of the given dtype_out to the data_out attr."""
try:
self.data_out.update({dtype: data})
except AttributeError:
self.data_out = {dtype: data} | python | def _update_data_out(self, data, dtype):
"""Append the data of the given dtype_out to the data_out attr."""
try:
self.data_out.update({dtype: data})
except AttributeError:
self.data_out = {dtype: data} | Append the data of the given dtype_out to the data_out attr. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L516-L521 |
spencerahill/aospy | aospy/calc.py | Calc.save | def save(self, data, dtype_out_time, dtype_out_vert=False,
save_files=True, write_to_tar=False):
"""Save aospy data to data_out attr and to an external file."""
self._update_data_out(data, dtype_out_time)
if save_files:
self._save_files(data, dtype_out_time)
if write_to_tar and self.proj.tar_direc_out:
self._write_to_tar(dtype_out_time)
logging.info('\t{}'.format(self.path_out[dtype_out_time])) | python | def save(self, data, dtype_out_time, dtype_out_vert=False,
save_files=True, write_to_tar=False):
"""Save aospy data to data_out attr and to an external file."""
self._update_data_out(data, dtype_out_time)
if save_files:
self._save_files(data, dtype_out_time)
if write_to_tar and self.proj.tar_direc_out:
self._write_to_tar(dtype_out_time)
logging.info('\t{}'.format(self.path_out[dtype_out_time])) | Save aospy data to data_out attr and to an external file. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L523-L531 |
spencerahill/aospy | aospy/calc.py | Calc._load_from_disk | def _load_from_disk(self, dtype_out_time, dtype_out_vert=False,
region=False):
"""Load aospy data saved as netcdf files on the file system."""
ds = xr.open_dataset(self.path_out[dtype_out_time])
if region:
arr = ds[region.name]
# Use region-specific pressure values if available.
if (self.dtype_in_vert == internal_names.ETA_STR
and not dtype_out_vert):
reg_pfull_str = region.name + '_pressure'
arr = arr.drop([r for r in arr.coords.iterkeys()
if r not in (internal_names.PFULL_STR,
reg_pfull_str)])
# Rename pfull to pfull_ref always.
arr = arr.rename({internal_names.PFULL_STR:
internal_names.PFULL_STR + '_ref'})
# Rename region_pfull to pfull if its there.
if hasattr(arr, reg_pfull_str):
return arr.rename({reg_pfull_str:
internal_names.PFULL_STR})
return arr
return arr
return ds[self.name] | python | def _load_from_disk(self, dtype_out_time, dtype_out_vert=False,
region=False):
"""Load aospy data saved as netcdf files on the file system."""
ds = xr.open_dataset(self.path_out[dtype_out_time])
if region:
arr = ds[region.name]
# Use region-specific pressure values if available.
if (self.dtype_in_vert == internal_names.ETA_STR
and not dtype_out_vert):
reg_pfull_str = region.name + '_pressure'
arr = arr.drop([r for r in arr.coords.iterkeys()
if r not in (internal_names.PFULL_STR,
reg_pfull_str)])
# Rename pfull to pfull_ref always.
arr = arr.rename({internal_names.PFULL_STR:
internal_names.PFULL_STR + '_ref'})
# Rename region_pfull to pfull if its there.
if hasattr(arr, reg_pfull_str):
return arr.rename({reg_pfull_str:
internal_names.PFULL_STR})
return arr
return arr
return ds[self.name] | Load aospy data saved as netcdf files on the file system. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L533-L555 |
spencerahill/aospy | aospy/calc.py | Calc._load_from_tar | def _load_from_tar(self, dtype_out_time, dtype_out_vert=False):
"""Load data save in tarball form on the file system."""
path = os.path.join(self.dir_tar_out, 'data.tar')
utils.io.dmget([path])
with tarfile.open(path, 'r') as data_tar:
ds = xr.open_dataset(
data_tar.extractfile(self.file_name[dtype_out_time])
)
return ds[self.name] | python | def _load_from_tar(self, dtype_out_time, dtype_out_vert=False):
"""Load data save in tarball form on the file system."""
path = os.path.join(self.dir_tar_out, 'data.tar')
utils.io.dmget([path])
with tarfile.open(path, 'r') as data_tar:
ds = xr.open_dataset(
data_tar.extractfile(self.file_name[dtype_out_time])
)
return ds[self.name] | Load data save in tarball form on the file system. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L557-L565 |
spencerahill/aospy | aospy/calc.py | Calc.load | def load(self, dtype_out_time, dtype_out_vert=False, region=False,
plot_units=False, mask_unphysical=False):
"""Load the data from the object if possible or from disk."""
msg = ("Loading data from disk for object={0}, dtype_out_time={1}, "
"dtype_out_vert={2}, and region="
"{3}".format(self, dtype_out_time, dtype_out_vert, region))
logging.info(msg + ' ({})'.format(ctime()))
# Grab from the object if its there.
try:
data = self.data_out[dtype_out_time]
except (AttributeError, KeyError):
# Otherwise get from disk. Try scratch first, then archive.
try:
data = self._load_from_disk(dtype_out_time, dtype_out_vert,
region=region)
except IOError:
data = self._load_from_tar(dtype_out_time, dtype_out_vert)
# Copy the array to self.data_out for ease of future access.
self._update_data_out(data, dtype_out_time)
# Apply desired plotting/cleanup methods.
if mask_unphysical:
data = self.var.mask_unphysical(data)
if plot_units:
data = self.var.to_plot_units(data, dtype_vert=dtype_out_vert)
return data | python | def load(self, dtype_out_time, dtype_out_vert=False, region=False,
plot_units=False, mask_unphysical=False):
"""Load the data from the object if possible or from disk."""
msg = ("Loading data from disk for object={0}, dtype_out_time={1}, "
"dtype_out_vert={2}, and region="
"{3}".format(self, dtype_out_time, dtype_out_vert, region))
logging.info(msg + ' ({})'.format(ctime()))
# Grab from the object if its there.
try:
data = self.data_out[dtype_out_time]
except (AttributeError, KeyError):
# Otherwise get from disk. Try scratch first, then archive.
try:
data = self._load_from_disk(dtype_out_time, dtype_out_vert,
region=region)
except IOError:
data = self._load_from_tar(dtype_out_time, dtype_out_vert)
# Copy the array to self.data_out for ease of future access.
self._update_data_out(data, dtype_out_time)
# Apply desired plotting/cleanup methods.
if mask_unphysical:
data = self.var.mask_unphysical(data)
if plot_units:
data = self.var.to_plot_units(data, dtype_vert=dtype_out_vert)
return data | Load the data from the object if possible or from disk. | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/calc.py#L567-L591 |
spencerahill/aospy | aospy/examples/example_obj_lib.py | conv_precip_frac | def conv_precip_frac(precip_largescale, precip_convective):
"""Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
total = total_precip(precip_largescale, precip_convective)
# Mask using xarray's `where` method to prevent divide-by-zero.
return precip_convective / total.where(total) | python | def conv_precip_frac(precip_largescale, precip_convective):
"""Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray
"""
total = total_precip(precip_largescale, precip_convective)
# Mask using xarray's `where` method to prevent divide-by-zero.
return precip_convective / total.where(total) | Fraction of total precip that is from convection parameterization.
Parameters
----------
precip_largescale, precip_convective : xarray.DataArrays
Precipitation from grid-scale condensation and from convective
parameterization, respectively.
Returns
-------
xarray.DataArray | https://github.com/spencerahill/aospy/blob/2f6e775b9b9956c54af117fdcdce2c87196afb6c/aospy/examples/example_obj_lib.py#L52-L67 |
aresch/rencode | rencode/rencode_orig.py | dumps | def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
with lock:
if float_bits == 32:
encode_func[float] = encode_float32
elif float_bits == 64:
encode_func[float] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
return b''.join(r) | python | def dumps(x, float_bits=DEFAULT_FLOAT_BITS):
"""
Dump data structure to str.
Here float_bits is either 32 or 64.
"""
with lock:
if float_bits == 32:
encode_func[float] = encode_float32
elif float_bits == 64:
encode_func[float] = encode_float64
else:
raise ValueError('Float bits (%d) is not 32 or 64' % float_bits)
r = []
encode_func[type(x)](x, r)
return b''.join(r) | Dump data structure to str.
Here float_bits is either 32 or 64. | https://github.com/aresch/rencode/blob/5c928f14567fabc9efb8bbb8ac5e0eef03c61541/rencode/rencode_orig.py#L404-L419 |
pteichman/cobe | cobe/tokenizers.py | MegaHALTokenizer.join | def join(self, words):
"""Capitalize the first alpha character in the reply and the
first alpha character that follows one of [.?!] and a
space."""
chars = list(u"".join(words))
start = True
for i in xrange(len(chars)):
char = chars[i]
if char.isalpha():
if start:
chars[i] = char.upper()
else:
chars[i] = char.lower()
start = False
else:
if i > 2 and chars[i - 1] in ".?!" and char.isspace():
start = True
return u"".join(chars) | python | def join(self, words):
"""Capitalize the first alpha character in the reply and the
first alpha character that follows one of [.?!] and a
space."""
chars = list(u"".join(words))
start = True
for i in xrange(len(chars)):
char = chars[i]
if char.isalpha():
if start:
chars[i] = char.upper()
else:
chars[i] = char.lower()
start = False
else:
if i > 2 and chars[i - 1] in ".?!" and char.isspace():
start = True
return u"".join(chars) | Capitalize the first alpha character in the reply and the
first alpha character that follows one of [.?!] and a
space. | https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/tokenizers.py#L31-L51 |
pteichman/cobe | cobe/brain.py | Brain.start_batch_learning | def start_batch_learning(self):
"""Begin a series of batch learn operations. Data will not be
committed to the database until stop_batch_learning is
called. Learn text using the normal learn(text) method."""
self._learning = True
self.graph.cursor().execute("PRAGMA journal_mode=memory")
self.graph.drop_reply_indexes() | python | def start_batch_learning(self):
"""Begin a series of batch learn operations. Data will not be
committed to the database until stop_batch_learning is
called. Learn text using the normal learn(text) method."""
self._learning = True
self.graph.cursor().execute("PRAGMA journal_mode=memory")
self.graph.drop_reply_indexes() | Begin a series of batch learn operations. Data will not be
committed to the database until stop_batch_learning is
called. Learn text using the normal learn(text) method. | https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L80-L87 |
pteichman/cobe | cobe/brain.py | Brain.stop_batch_learning | def stop_batch_learning(self):
"""Finish a series of batch learn operations."""
self._learning = False
self.graph.commit()
self.graph.cursor().execute("PRAGMA journal_mode=truncate")
self.graph.ensure_indexes() | python | def stop_batch_learning(self):
"""Finish a series of batch learn operations."""
self._learning = False
self.graph.commit()
self.graph.cursor().execute("PRAGMA journal_mode=truncate")
self.graph.ensure_indexes() | Finish a series of batch learn operations. | https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L89-L95 |
pteichman/cobe | cobe/brain.py | Brain.learn | def learn(self, text):
"""Learn a string of text. If the input is not already
Unicode, it will be decoded as utf-8."""
if type(text) != types.UnicodeType:
# Assume that non-Unicode text is encoded as utf-8, which
# should be somewhat safe in the modern world.
text = text.decode("utf-8", "ignore")
tokens = self.tokenizer.split(text)
trace("Brain.learn_input_token_count", len(tokens))
self._learn_tokens(tokens) | python | def learn(self, text):
"""Learn a string of text. If the input is not already
Unicode, it will be decoded as utf-8."""
if type(text) != types.UnicodeType:
# Assume that non-Unicode text is encoded as utf-8, which
# should be somewhat safe in the modern world.
text = text.decode("utf-8", "ignore")
tokens = self.tokenizer.split(text)
trace("Brain.learn_input_token_count", len(tokens))
self._learn_tokens(tokens) | Learn a string of text. If the input is not already
Unicode, it will be decoded as utf-8. | https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L114-L125 |
pteichman/cobe | cobe/brain.py | Brain._to_edges | def _to_edges(self, tokens):
"""This is an iterator that returns the nodes of our graph:
"This is a test" -> "None This" "This is" "is a" "a test" "test None"
Each is annotated with a boolean that tracks whether whitespace was
found between the two tokens."""
# prepend self.order Nones
chain = self._end_context + tokens + self._end_context
has_space = False
context = []
for i in xrange(len(chain)):
context.append(chain[i])
if len(context) == self.order:
if chain[i] == self.SPACE_TOKEN_ID:
context.pop()
has_space = True
continue
yield tuple(context), has_space
context.pop(0)
has_space = False | python | def _to_edges(self, tokens):
"""This is an iterator that returns the nodes of our graph:
"This is a test" -> "None This" "This is" "is a" "a test" "test None"
Each is annotated with a boolean that tracks whether whitespace was
found between the two tokens."""
# prepend self.order Nones
chain = self._end_context + tokens + self._end_context
has_space = False
context = []
for i in xrange(len(chain)):
context.append(chain[i])
if len(context) == self.order:
if chain[i] == self.SPACE_TOKEN_ID:
context.pop()
has_space = True
continue
yield tuple(context), has_space
context.pop(0)
has_space = False | This is an iterator that returns the nodes of our graph:
"This is a test" -> "None This" "This is" "is a" "a test" "test None"
Each is annotated with a boolean that tracks whether whitespace was
found between the two tokens. | https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L127-L152 |
pteichman/cobe | cobe/brain.py | Brain._to_graph | def _to_graph(self, contexts):
"""This is an iterator that returns each edge of our graph
with its two nodes"""
prev = None
for context in contexts:
if prev is None:
prev = context
continue
yield prev[0], context[1], context[0]
prev = context | python | def _to_graph(self, contexts):
"""This is an iterator that returns each edge of our graph
with its two nodes"""
prev = None
for context in contexts:
if prev is None:
prev = context
continue
yield prev[0], context[1], context[0]
prev = context | This is an iterator that returns each edge of our graph
with its two nodes | https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L154-L165 |
pteichman/cobe | cobe/brain.py | Brain.reply | def reply(self, text, loop_ms=500, max_len=None):
"""Reply to a string of text. If the input is not already
Unicode, it will be decoded as utf-8."""
if type(text) != types.UnicodeType:
# Assume that non-Unicode text is encoded as utf-8, which
# should be somewhat safe in the modern world.
text = text.decode("utf-8", "ignore")
tokens = self.tokenizer.split(text)
input_ids = map(self.graph.get_token_by_text, tokens)
# filter out unknown words and non-words from the potential pivots
pivot_set = self._filter_pivots(input_ids)
# Conflate the known ids with the stems of their words
if self.stemmer is not None:
self._conflate_stems(pivot_set, tokens)
# If we didn't recognize any word tokens in the input, pick
# something random from the database and babble.
if len(pivot_set) == 0:
pivot_set = self._babble()
score_cache = {}
best_score = -1.0
best_reply = None
# Loop for approximately loop_ms milliseconds. This can either
# take more (if the first reply takes a long time to generate)
# or less (if the _generate_replies search ends early) time,
# but it should stay roughly accurate.
start = time.time()
end = start + loop_ms * 0.001
count = 0
all_replies = []
_start = time.time()
for edges, pivot_node in self._generate_replies(pivot_set):
reply = Reply(self.graph, tokens, input_ids, pivot_node, edges)
if max_len and self._too_long(max_len, reply):
continue
key = reply.edge_ids
if key not in score_cache:
with trace_us("Brain.evaluate_reply_us"):
score = self.scorer.score(reply)
score_cache[key] = score
else:
# skip scoring, we've already seen this reply
score = -1
if score > best_score:
best_reply = reply
best_score = score
# dump all replies to the console if debugging is enabled
if log.isEnabledFor(logging.DEBUG):
all_replies.append((score, reply))
count += 1
if time.time() > end:
break
if best_reply is None:
# we couldn't find any pivot words in _babble(), so we're
# working with an essentially empty brain. Use the classic
# MegaHAL reply:
return "I don't know enough to answer you yet!"
_time = time.time() - _start
self.scorer.end(best_reply)
if log.isEnabledFor(logging.DEBUG):
replies = [(score, reply.to_text())
for score, reply in all_replies]
replies.sort()
for score, text in replies:
log.debug("%f %s", score, text)
trace("Brain.reply_input_token_count", len(tokens))
trace("Brain.known_word_token_count", len(pivot_set))
trace("Brain.reply_us", _time)
trace("Brain.reply_count", count, _time)
trace("Brain.best_reply_score", int(best_score * 1000))
trace("Brain.best_reply_length", len(best_reply.edge_ids))
log.debug("made %d replies (%d unique) in %f seconds"
% (count, len(score_cache), _time))
if len(text) > 60:
msg = text[0:60] + "..."
else:
msg = text
log.info("[%s] %d %f", msg, count, best_score)
# look up the words for these tokens
with trace_us("Brain.reply_words_lookup_us"):
text = best_reply.to_text()
return text | python | def reply(self, text, loop_ms=500, max_len=None):
"""Reply to a string of text. If the input is not already
Unicode, it will be decoded as utf-8."""
if type(text) != types.UnicodeType:
# Assume that non-Unicode text is encoded as utf-8, which
# should be somewhat safe in the modern world.
text = text.decode("utf-8", "ignore")
tokens = self.tokenizer.split(text)
input_ids = map(self.graph.get_token_by_text, tokens)
# filter out unknown words and non-words from the potential pivots
pivot_set = self._filter_pivots(input_ids)
# Conflate the known ids with the stems of their words
if self.stemmer is not None:
self._conflate_stems(pivot_set, tokens)
# If we didn't recognize any word tokens in the input, pick
# something random from the database and babble.
if len(pivot_set) == 0:
pivot_set = self._babble()
score_cache = {}
best_score = -1.0
best_reply = None
# Loop for approximately loop_ms milliseconds. This can either
# take more (if the first reply takes a long time to generate)
# or less (if the _generate_replies search ends early) time,
# but it should stay roughly accurate.
start = time.time()
end = start + loop_ms * 0.001
count = 0
all_replies = []
_start = time.time()
for edges, pivot_node in self._generate_replies(pivot_set):
reply = Reply(self.graph, tokens, input_ids, pivot_node, edges)
if max_len and self._too_long(max_len, reply):
continue
key = reply.edge_ids
if key not in score_cache:
with trace_us("Brain.evaluate_reply_us"):
score = self.scorer.score(reply)
score_cache[key] = score
else:
# skip scoring, we've already seen this reply
score = -1
if score > best_score:
best_reply = reply
best_score = score
# dump all replies to the console if debugging is enabled
if log.isEnabledFor(logging.DEBUG):
all_replies.append((score, reply))
count += 1
if time.time() > end:
break
if best_reply is None:
# we couldn't find any pivot words in _babble(), so we're
# working with an essentially empty brain. Use the classic
# MegaHAL reply:
return "I don't know enough to answer you yet!"
_time = time.time() - _start
self.scorer.end(best_reply)
if log.isEnabledFor(logging.DEBUG):
replies = [(score, reply.to_text())
for score, reply in all_replies]
replies.sort()
for score, text in replies:
log.debug("%f %s", score, text)
trace("Brain.reply_input_token_count", len(tokens))
trace("Brain.known_word_token_count", len(pivot_set))
trace("Brain.reply_us", _time)
trace("Brain.reply_count", count, _time)
trace("Brain.best_reply_score", int(best_score * 1000))
trace("Brain.best_reply_length", len(best_reply.edge_ids))
log.debug("made %d replies (%d unique) in %f seconds"
% (count, len(score_cache), _time))
if len(text) > 60:
msg = text[0:60] + "..."
else:
msg = text
log.info("[%s] %d %f", msg, count, best_score)
# look up the words for these tokens
with trace_us("Brain.reply_words_lookup_us"):
text = best_reply.to_text()
return text | Reply to a string of text. If the input is not already
Unicode, it will be decoded as utf-8. | https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L197-L303 |
pteichman/cobe | cobe/brain.py | Brain.init | def init(filename, order=3, tokenizer=None):
"""Initialize a brain. This brain's file must not already exist.
Keyword arguments:
order -- Order of the forward/reverse Markov chains (integer)
tokenizer -- One of Cobe, MegaHAL (default Cobe). See documentation
for cobe.tokenizers for details. (string)"""
log.info("Initializing a cobe brain: %s" % filename)
if tokenizer is None:
tokenizer = "Cobe"
if tokenizer not in ("Cobe", "MegaHAL"):
log.info("Unknown tokenizer: %s. Using CobeTokenizer", tokenizer)
tokenizer = "Cobe"
graph = Graph(sqlite3.connect(filename))
with trace_us("Brain.init_time_us"):
graph.init(order, tokenizer) | python | def init(filename, order=3, tokenizer=None):
"""Initialize a brain. This brain's file must not already exist.
Keyword arguments:
order -- Order of the forward/reverse Markov chains (integer)
tokenizer -- One of Cobe, MegaHAL (default Cobe). See documentation
for cobe.tokenizers for details. (string)"""
log.info("Initializing a cobe brain: %s" % filename)
if tokenizer is None:
tokenizer = "Cobe"
if tokenizer not in ("Cobe", "MegaHAL"):
log.info("Unknown tokenizer: %s. Using CobeTokenizer", tokenizer)
tokenizer = "Cobe"
graph = Graph(sqlite3.connect(filename))
with trace_us("Brain.init_time_us"):
graph.init(order, tokenizer) | Initialize a brain. This brain's file must not already exist.
Keyword arguments:
order -- Order of the forward/reverse Markov chains (integer)
tokenizer -- One of Cobe, MegaHAL (default Cobe). See documentation
for cobe.tokenizers for details. (string) | https://github.com/pteichman/cobe/blob/b0dc2a707035035b9a689105c8f833894fb59eb7/cobe/brain.py#L389-L408 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | connect_to_cli | def connect_to_cli(server=None, as_file=True, socket_timeout=1.0, attempts=5, retry_delay=0.3):
'''Returns connected CLI interface socket (as file object, unless as_file=False),
where one can send same commands (as lines) as to "pacmd" tool
or pulseaudio startup files (e.g. "default.pa").
"server" option can be specified to use non-standard unix socket path
(when passed absolute path string) or remote tcp socket,
when passed remote host address (to use default port) or (host, port) tuple.
Be sure to adjust "socket_timeout" option for tcp sockets over laggy internet.
Returned file object has line-buffered output,
so there should be no need to use flush() after every command.
Be sure to read from the socket line-by-line until
"### EOF" or timeout for commands that have output (e.g. "dump\\n").
If default server socket is used (i.e. not specified),
server pid will be signaled to load module-cli between connection attempts.
Completely separate protocol from the regular API, as wrapped by libpulse.
PulseError is raised on any failure.'''
import socket, errno, signal, time
s, n = None, attempts if attempts > 0 else None
try:
pid_path, sock_af, sock_t = None, socket.AF_UNIX, socket.SOCK_STREAM
if not server: server, pid_path = map(c.pa.runtime_path, ['cli', 'pid'])
else:
if not is_list(server):
server = c.force_str(server)
if not server.startswith('/'): server = server, 4712 # default port
if is_list(server):
try:
addrinfo = socket.getaddrinfo(
server[0], server[1], 0, sock_t, socket.IPPROTO_TCP )
if not addrinfo: raise socket.gaierror('No addrinfo for socket: {}'.format(server))
except (socket.gaierror, socket.error) as err:
raise PulseError( 'Failed to resolve socket parameters'
' (address, family) via getaddrinfo: {!r} - {} {}'.format(server, type(err), err) )
sock_af, sock_t, _, _, server = addrinfo[0]
s = socket.socket(sock_af, sock_t)
s.settimeout(socket_timeout)
while True:
ts = c.mono_time()
try: s.connect(server)
except socket.error as err:
if err.errno not in [errno.ECONNREFUSED, errno.ENOENT, errno.ECONNABORTED]: raise
else: break
if n:
n -= 1
if n <= 0: raise PulseError('Number of connection attempts ({}) exceeded'.format(attempts))
if pid_path:
with open(pid_path) as src: os.kill(int(src.read().strip()), signal.SIGUSR2)
time.sleep(max(0, retry_delay - (c.mono_time() - ts)))
if as_file: res = s.makefile('rw', 1)
else: res, s = s, None # to avoid closing this socket
return res
except Exception as err: # CallError, socket.error, IOError (pidfile), OSError (os.kill)
raise PulseError( 'Failed to connect to pulse'
' cli socket {!r}: {} {}'.format(server, type(err), err) )
finally:
if s: s.close() | python | def connect_to_cli(server=None, as_file=True, socket_timeout=1.0, attempts=5, retry_delay=0.3):
'''Returns connected CLI interface socket (as file object, unless as_file=False),
where one can send same commands (as lines) as to "pacmd" tool
or pulseaudio startup files (e.g. "default.pa").
"server" option can be specified to use non-standard unix socket path
(when passed absolute path string) or remote tcp socket,
when passed remote host address (to use default port) or (host, port) tuple.
Be sure to adjust "socket_timeout" option for tcp sockets over laggy internet.
Returned file object has line-buffered output,
so there should be no need to use flush() after every command.
Be sure to read from the socket line-by-line until
"### EOF" or timeout for commands that have output (e.g. "dump\\n").
If default server socket is used (i.e. not specified),
server pid will be signaled to load module-cli between connection attempts.
Completely separate protocol from the regular API, as wrapped by libpulse.
PulseError is raised on any failure.'''
import socket, errno, signal, time
s, n = None, attempts if attempts > 0 else None
try:
pid_path, sock_af, sock_t = None, socket.AF_UNIX, socket.SOCK_STREAM
if not server: server, pid_path = map(c.pa.runtime_path, ['cli', 'pid'])
else:
if not is_list(server):
server = c.force_str(server)
if not server.startswith('/'): server = server, 4712 # default port
if is_list(server):
try:
addrinfo = socket.getaddrinfo(
server[0], server[1], 0, sock_t, socket.IPPROTO_TCP )
if not addrinfo: raise socket.gaierror('No addrinfo for socket: {}'.format(server))
except (socket.gaierror, socket.error) as err:
raise PulseError( 'Failed to resolve socket parameters'
' (address, family) via getaddrinfo: {!r} - {} {}'.format(server, type(err), err) )
sock_af, sock_t, _, _, server = addrinfo[0]
s = socket.socket(sock_af, sock_t)
s.settimeout(socket_timeout)
while True:
ts = c.mono_time()
try: s.connect(server)
except socket.error as err:
if err.errno not in [errno.ECONNREFUSED, errno.ENOENT, errno.ECONNABORTED]: raise
else: break
if n:
n -= 1
if n <= 0: raise PulseError('Number of connection attempts ({}) exceeded'.format(attempts))
if pid_path:
with open(pid_path) as src: os.kill(int(src.read().strip()), signal.SIGUSR2)
time.sleep(max(0, retry_delay - (c.mono_time() - ts)))
if as_file: res = s.makefile('rw', 1)
else: res, s = s, None # to avoid closing this socket
return res
except Exception as err: # CallError, socket.error, IOError (pidfile), OSError (os.kill)
raise PulseError( 'Failed to connect to pulse'
' cli socket {!r}: {} {}'.format(server, type(err), err) )
finally:
if s: s.close() | Returns connected CLI interface socket (as file object, unless as_file=False),
where one can send same commands (as lines) as to "pacmd" tool
or pulseaudio startup files (e.g. "default.pa").
"server" option can be specified to use non-standard unix socket path
(when passed absolute path string) or remote tcp socket,
when passed remote host address (to use default port) or (host, port) tuple.
Be sure to adjust "socket_timeout" option for tcp sockets over laggy internet.
Returned file object has line-buffered output,
so there should be no need to use flush() after every command.
Be sure to read from the socket line-by-line until
"### EOF" or timeout for commands that have output (e.g. "dump\\n").
If default server socket is used (i.e. not specified),
server pid will be signaled to load module-cli between connection attempts.
Completely separate protocol from the regular API, as wrapped by libpulse.
PulseError is raised on any failure. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L818-L877 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | PulseExtStreamRestoreInfo.struct_from_value | def struct_from_value( cls, name, volume,
channel_list=None, mute=False, device=None ):
'Same arguments as with class instance init.'
chan_map = c.PA_CHANNEL_MAP()
if not channel_list: c.pa.channel_map_init_mono(chan_map)
else:
if not is_str(channel_list):
channel_list = b','.join(map(c.force_bytes, channel_list))
c.pa.channel_map_parse(chan_map, channel_list)
if not isinstance(volume, PulseVolumeInfo):
volume = PulseVolumeInfo(volume, chan_map.channels)
struct = c.PA_EXT_STREAM_RESTORE_INFO(
name=c.force_bytes(name),
mute=int(bool(mute)), device=c.force_bytes(device),
channel_map=chan_map, volume=volume.to_struct() )
return struct | python | def struct_from_value( cls, name, volume,
channel_list=None, mute=False, device=None ):
'Same arguments as with class instance init.'
chan_map = c.PA_CHANNEL_MAP()
if not channel_list: c.pa.channel_map_init_mono(chan_map)
else:
if not is_str(channel_list):
channel_list = b','.join(map(c.force_bytes, channel_list))
c.pa.channel_map_parse(chan_map, channel_list)
if not isinstance(volume, PulseVolumeInfo):
volume = PulseVolumeInfo(volume, chan_map.channels)
struct = c.PA_EXT_STREAM_RESTORE_INFO(
name=c.force_bytes(name),
mute=int(bool(mute)), device=c.force_bytes(device),
channel_map=chan_map, volume=volume.to_struct() )
return struct | Same arguments as with class instance init. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L287-L302 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | Pulse.connect | def connect(self, autospawn=False, wait=False):
'''Connect to pulseaudio server.
"autospawn" option will start new pulse daemon, if necessary.
Specifying "wait" option will make function block until pulseaudio server appears.'''
if self._loop_closed:
raise PulseError('Eventloop object was already'
' destroyed and cannot be reused from this instance.')
if self.connected is not None: self._ctx_init()
flags, self.connected = 0, None
if not autospawn: flags |= c.PA_CONTEXT_NOAUTOSPAWN
if wait: flags |= c.PA_CONTEXT_NOFAIL
try: c.pa.context_connect(self._ctx, self.server, flags, None)
except c.pa.CallError: self.connected = False
while self.connected is None: self._pulse_iterate()
if self.connected is False: raise PulseError('Failed to connect to pulseaudio server') | python | def connect(self, autospawn=False, wait=False):
'''Connect to pulseaudio server.
"autospawn" option will start new pulse daemon, if necessary.
Specifying "wait" option will make function block until pulseaudio server appears.'''
if self._loop_closed:
raise PulseError('Eventloop object was already'
' destroyed and cannot be reused from this instance.')
if self.connected is not None: self._ctx_init()
flags, self.connected = 0, None
if not autospawn: flags |= c.PA_CONTEXT_NOAUTOSPAWN
if wait: flags |= c.PA_CONTEXT_NOFAIL
try: c.pa.context_connect(self._ctx, self.server, flags, None)
except c.pa.CallError: self.connected = False
while self.connected is None: self._pulse_iterate()
if self.connected is False: raise PulseError('Failed to connect to pulseaudio server') | Connect to pulseaudio server.
"autospawn" option will start new pulse daemon, if necessary.
Specifying "wait" option will make function block until pulseaudio server appears. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L387-L401 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | Pulse._pulse_poll | def _pulse_poll(self, timeout=None):
'''timeout should be in seconds (float),
0 for non-blocking poll and None (default) for no timeout.'''
with self._pulse_loop() as loop:
ts = c.mono_time()
ts_deadline = timeout and (ts + timeout)
while True:
delay = max(0, int((ts_deadline - ts) * 1000000)) if ts_deadline else -1
c.pa.mainloop_prepare(loop, delay) # usec
c.pa.mainloop_poll(loop)
if self._loop_closed: break # interrupted by close() or such
c.pa.mainloop_dispatch(loop)
if self._loop_stop: break
ts = c.mono_time()
if ts_deadline and ts >= ts_deadline: break | python | def _pulse_poll(self, timeout=None):
'''timeout should be in seconds (float),
0 for non-blocking poll and None (default) for no timeout.'''
with self._pulse_loop() as loop:
ts = c.mono_time()
ts_deadline = timeout and (ts + timeout)
while True:
delay = max(0, int((ts_deadline - ts) * 1000000)) if ts_deadline else -1
c.pa.mainloop_prepare(loop, delay) # usec
c.pa.mainloop_poll(loop)
if self._loop_closed: break # interrupted by close() or such
c.pa.mainloop_dispatch(loop)
if self._loop_stop: break
ts = c.mono_time()
if ts_deadline and ts >= ts_deadline: break | timeout should be in seconds (float),
0 for non-blocking poll and None (default) for no timeout. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L484-L498 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | Pulse._pulse_method_call | def _pulse_method_call(pulse_op, func=None, index_arg=True):
'''Creates following synchronous wrapper for async pa_operation callable:
wrapper(index, ...) -> pulse_op(index, [*]args_func(...))
index_arg=False: wrapper(...) -> pulse_op([*]args_func(...))'''
def _wrapper(self, *args, **kws):
if index_arg:
if 'index' in kws: index = kws.pop('index')
else: index, args = args[0], args[1:]
pulse_args = func(*args, **kws) if func else list()
if not is_list(pulse_args): pulse_args = [pulse_args]
if index_arg: pulse_args = [index] + list(pulse_args)
with self._pulse_op_cb() as cb:
try: pulse_op(self._ctx, *(list(pulse_args) + [cb, None]))
except c.ArgumentError as err: raise TypeError(err.args)
except c.pa.CallError as err: raise PulseOperationInvalid(err.args[-1])
func_args = list(inspect.getargspec(func or (lambda: None)))
func_args[0] = list(func_args[0])
if index_arg: func_args[0] = ['index'] + func_args[0]
_wrapper.__name__ = '...'
_wrapper.__doc__ = 'Signature: func' + inspect.formatargspec(*func_args)
if func.__doc__: _wrapper.__doc__ += '\n\n' + func.__doc__
return _wrapper | python | def _pulse_method_call(pulse_op, func=None, index_arg=True):
'''Creates following synchronous wrapper for async pa_operation callable:
wrapper(index, ...) -> pulse_op(index, [*]args_func(...))
index_arg=False: wrapper(...) -> pulse_op([*]args_func(...))'''
def _wrapper(self, *args, **kws):
if index_arg:
if 'index' in kws: index = kws.pop('index')
else: index, args = args[0], args[1:]
pulse_args = func(*args, **kws) if func else list()
if not is_list(pulse_args): pulse_args = [pulse_args]
if index_arg: pulse_args = [index] + list(pulse_args)
with self._pulse_op_cb() as cb:
try: pulse_op(self._ctx, *(list(pulse_args) + [cb, None]))
except c.ArgumentError as err: raise TypeError(err.args)
except c.pa.CallError as err: raise PulseOperationInvalid(err.args[-1])
func_args = list(inspect.getargspec(func or (lambda: None)))
func_args[0] = list(func_args[0])
if index_arg: func_args[0] = ['index'] + func_args[0]
_wrapper.__name__ = '...'
_wrapper.__doc__ = 'Signature: func' + inspect.formatargspec(*func_args)
if func.__doc__: _wrapper.__doc__ += '\n\n' + func.__doc__
return _wrapper | Creates following synchronous wrapper for async pa_operation callable:
wrapper(index, ...) -> pulse_op(index, [*]args_func(...))
index_arg=False: wrapper(...) -> pulse_op([*]args_func(...)) | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L577-L598 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | Pulse.stream_restore_write | def stream_restore_write( obj_name_or_list,
mode='merge', apply_immediately=False, **obj_kws ):
'''Update module-stream-restore db entry for specified name.
Can be passed PulseExtStreamRestoreInfo object or list of them as argument,
or name string there and object init keywords (e.g. volume, mute, channel_list, etc).
"mode" is PulseUpdateEnum value of
'merge' (default), 'replace' or 'set' (replaces ALL entries!!!).'''
mode = PulseUpdateEnum[mode]._c_val
if is_str(obj_name_or_list):
obj_name_or_list = PulseExtStreamRestoreInfo(obj_name_or_list, **obj_kws)
if isinstance(obj_name_or_list, PulseExtStreamRestoreInfo):
obj_name_or_list = [obj_name_or_list]
# obj_array is an array of structs, laid out contiguously in memory, not pointers
obj_array = (c.PA_EXT_STREAM_RESTORE_INFO * len(obj_name_or_list))()
for n, obj in enumerate(obj_name_or_list):
obj_struct, dst_struct = obj.to_struct(), obj_array[n]
for k,t in obj_struct._fields_: setattr(dst_struct, k, getattr(obj_struct, k))
return mode, obj_array, len(obj_array), int(bool(apply_immediately)) | python | def stream_restore_write( obj_name_or_list,
mode='merge', apply_immediately=False, **obj_kws ):
'''Update module-stream-restore db entry for specified name.
Can be passed PulseExtStreamRestoreInfo object or list of them as argument,
or name string there and object init keywords (e.g. volume, mute, channel_list, etc).
"mode" is PulseUpdateEnum value of
'merge' (default), 'replace' or 'set' (replaces ALL entries!!!).'''
mode = PulseUpdateEnum[mode]._c_val
if is_str(obj_name_or_list):
obj_name_or_list = PulseExtStreamRestoreInfo(obj_name_or_list, **obj_kws)
if isinstance(obj_name_or_list, PulseExtStreamRestoreInfo):
obj_name_or_list = [obj_name_or_list]
# obj_array is an array of structs, laid out contiguously in memory, not pointers
obj_array = (c.PA_EXT_STREAM_RESTORE_INFO * len(obj_name_or_list))()
for n, obj in enumerate(obj_name_or_list):
obj_struct, dst_struct = obj.to_struct(), obj_array[n]
for k,t in obj_struct._fields_: setattr(dst_struct, k, getattr(obj_struct, k))
return mode, obj_array, len(obj_array), int(bool(apply_immediately)) | Update module-stream-restore db entry for specified name.
Can be passed PulseExtStreamRestoreInfo object or list of them as argument,
or name string there and object init keywords (e.g. volume, mute, channel_list, etc).
"mode" is PulseUpdateEnum value of
'merge' (default), 'replace' or 'set' (replaces ALL entries!!!). | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L675-L692 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | Pulse.stream_restore_delete | def stream_restore_delete(obj_name_or_list):
'''Can be passed string name,
PulseExtStreamRestoreInfo object or a list of any of these.'''
if is_str(obj_name_or_list, PulseExtStreamRestoreInfo):
obj_name_or_list = [obj_name_or_list]
name_list = list((obj.name if isinstance( obj,
PulseExtStreamRestoreInfo ) else obj) for obj in obj_name_or_list)
name_struct = (c.c_char_p * len(name_list))()
name_struct[:] = list(map(c.force_bytes, name_list))
return [name_struct] | python | def stream_restore_delete(obj_name_or_list):
'''Can be passed string name,
PulseExtStreamRestoreInfo object or a list of any of these.'''
if is_str(obj_name_or_list, PulseExtStreamRestoreInfo):
obj_name_or_list = [obj_name_or_list]
name_list = list((obj.name if isinstance( obj,
PulseExtStreamRestoreInfo ) else obj) for obj in obj_name_or_list)
name_struct = (c.c_char_p * len(name_list))()
name_struct[:] = list(map(c.force_bytes, name_list))
return [name_struct] | Can be passed string name,
PulseExtStreamRestoreInfo object or a list of any of these. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L695-L704 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | Pulse.default_set | def default_set(self, obj):
'Set passed sink or source to be used as default one by pulseaudio server.'
assert_pulse_object(obj)
method = {
PulseSinkInfo: self.sink_default_set,
PulseSourceInfo: self.source_default_set }.get(type(obj))
if not method: raise NotImplementedError(type(obj))
method(obj) | python | def default_set(self, obj):
'Set passed sink or source to be used as default one by pulseaudio server.'
assert_pulse_object(obj)
method = {
PulseSinkInfo: self.sink_default_set,
PulseSourceInfo: self.source_default_set }.get(type(obj))
if not method: raise NotImplementedError(type(obj))
method(obj) | Set passed sink or source to be used as default one by pulseaudio server. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L707-L714 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | Pulse.event_listen | def event_listen(self, timeout=None, raise_on_disconnect=True):
'''Does not return until PulseLoopStop
gets raised in event callback or timeout passes.
timeout should be in seconds (float),
0 for non-blocking poll and None (default) for no timeout.
raise_on_disconnect causes PulseDisconnected exceptions by default.
Do not run any pulse operations from these callbacks.'''
assert self.event_callback
try: self._pulse_poll(timeout)
except c.pa.CallError: pass # e.g. from mainloop_dispatch() on disconnect
if raise_on_disconnect and not self.connected: raise PulseDisconnected() | python | def event_listen(self, timeout=None, raise_on_disconnect=True):
'''Does not return until PulseLoopStop
gets raised in event callback or timeout passes.
timeout should be in seconds (float),
0 for non-blocking poll and None (default) for no timeout.
raise_on_disconnect causes PulseDisconnected exceptions by default.
Do not run any pulse operations from these callbacks.'''
assert self.event_callback
try: self._pulse_poll(timeout)
except c.pa.CallError: pass # e.g. from mainloop_dispatch() on disconnect
if raise_on_disconnect and not self.connected: raise PulseDisconnected() | Does not return until PulseLoopStop
gets raised in event callback or timeout passes.
timeout should be in seconds (float),
0 for non-blocking poll and None (default) for no timeout.
raise_on_disconnect causes PulseDisconnected exceptions by default.
Do not run any pulse operations from these callbacks. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L785-L795 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | Pulse.event_listen_stop | def event_listen_stop(self):
'''Stop event_listen() loop from e.g. another thread.
Does nothing if libpulse poll is not running yet, so might be racey with
event_listen() - be sure to call it in a loop until event_listen returns or something.'''
self._loop_stop = True
c.pa.mainloop_wakeup(self._loop) | python | def event_listen_stop(self):
'''Stop event_listen() loop from e.g. another thread.
Does nothing if libpulse poll is not running yet, so might be racey with
event_listen() - be sure to call it in a loop until event_listen returns or something.'''
self._loop_stop = True
c.pa.mainloop_wakeup(self._loop) | Stop event_listen() loop from e.g. another thread.
Does nothing if libpulse poll is not running yet, so might be racey with
event_listen() - be sure to call it in a loop until event_listen returns or something. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L797-L802 |
mk-fg/python-pulse-control | pulsectl/pulsectl.py | Pulse.set_poll_func | def set_poll_func(self, func, func_err_handler=None):
'''Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that.'''
if not func_err_handler: func_err_handler = traceback.print_exception
self._pa_poll_cb = c.PA_POLL_FUNC_T(ft.partial(self._pulse_poll_cb, func, func_err_handler))
c.pa.mainloop_set_poll_func(self._loop, self._pa_poll_cb, None) | python | def set_poll_func(self, func, func_err_handler=None):
'''Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that.'''
if not func_err_handler: func_err_handler = traceback.print_exception
self._pa_poll_cb = c.PA_POLL_FUNC_T(ft.partial(self._pulse_poll_cb, func, func_err_handler))
c.pa.mainloop_set_poll_func(self._loop, self._pa_poll_cb, None) | Can be used to integrate pulse client into existing eventloop.
Function will be passed a list of pollfd structs and timeout value (seconds, float),
which it is responsible to use and modify (set poll flags) accordingly,
returning int value >= 0 with number of fds that had any new events within timeout.
func_err_handler defaults to traceback.print_exception(),
and will be called on any exceptions from callback (to e.g. log these),
returning poll error code (-1) to libpulse after that. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/pulsectl.py#L805-L815 |
mk-fg/python-pulse-control | pulsectl/lookup.py | pulse_obj_lookup | def pulse_obj_lookup(pulse, obj_lookup, prop_default=None):
'''Return set of pulse object(s) with proplist values matching lookup-string.
Pattern syntax:
[ { 'sink' | 'source' | 'sink-input' | 'source-output' } [ / ... ] ':' ]
[ proplist-key-name (non-empty) [ / ... ] ':' ] [ ':' (for regexp match) ]
[ proplist-key-value ]
Examples:
- sink:alsa.driver_name:snd_hda_intel
Match sink(s) with alsa.driver_name=snd_hda_intel (exact match!).
- sink/source:device.bus:pci
Match all sinks and sources with device.bus=pci.
- myprop:somevalue
Match any object (of all 4 supported types) that has myprop=somevalue.
- mpv
Match any object with any of the "default lookup props" (!!!) being equal to "mpv".
"default lookup props" are specified per-type in lookup_key_defaults above.
For example, sink input will be looked-up by media.name, application.name, etc.
- sink-input/source-output:mpv
Same as above, but lookup streams only (not sinks/sources).
Note that "sink-input/source-output" matches type spec, and parsed as such, not as key.
- si/so:mpv
Same as above - see aliases for types in lookup_types.
- application.binary/application.icon:mpv
Lookup by multiple keys with "any match" logic, same as with multiple object types.
- key\/with\/slashes\:and\:colons:somevalue
Lookup by key that has slashes and colons in it.
"/" and ":" must only be escaped in the proplist key part, used as-is in values.
Backslash itself can be escaped as well, i.e. as "\\".
- module-stream-restore.id:sink-input-by-media-role:music
Value has ":" in it, but there's no need to escape it in any way.
- device.description::Analog
Value lookup starting with : is interpreted as a regexp,
i.e. any object with device.description *containing* "Analog" in this case.
- si/so:application.name::^mpv\b
Return all sink-inputs/source-outputs ("si/so") where
"application.name" proplist value matches regexp "^mpv\b".
- :^mpv\b
Regexp lookup (stuff starting with "mpv" word) without type or key specification.
For python2, lookup string should be unicode type.
"prop_default" keyword arg can be used to specify
default proplist value for when key is not found there.'''
# \ue000-\uf8ff - private use area, never assigned to symbols
obj_lookup = obj_lookup.replace('\\\\', '\ue000').replace('\\:', '\ue001')
obj_types_re = '({0})(/({0}))*'.format('|'.join(lookup_types))
m = re.search(
( r'^((?P<t>{}):)?'.format(obj_types_re) +
r'((?P<k>.+?):)?' r'(?P<v>.*)$' ), obj_lookup, re.IGNORECASE )
if not m: raise ValueError(obj_lookup)
lookup_type, lookup_keys, lookup_re = op.itemgetter('t', 'k', 'v')(m.groupdict())
if lookup_keys:
lookup_keys = list(
v.replace('\ue000', '\\\\').replace('\ue001', ':').replace('\ue002', '/')
for v in lookup_keys.replace('\\/', '\ue002').split('/') )
lookup_re = lookup_re.replace('\ue000', '\\\\').replace('\ue001', '\\:')
obj_list_res, lookup_re = list(), re.compile( lookup_re[1:]
if lookup_re.startswith(':') else '^{}$'.format(re.escape(lookup_re)) )
for k in set( lookup_types[k] for k in
(lookup_type.split('/') if lookup_type else lookup_types.keys()) ):
if not lookup_keys: lookup_keys = lookup_key_defaults.get(k)
if not lookup_keys: continue
obj_list = getattr(pulse, k)()
if not obj_list: continue
for obj, k in it.product(obj_list, lookup_keys):
v = obj.proplist.get(k, prop_default)
if v is None: continue
if lookup_re.search(v): obj_list_res.append(obj)
return set(obj_list_res) | python | def pulse_obj_lookup(pulse, obj_lookup, prop_default=None):
'''Return set of pulse object(s) with proplist values matching lookup-string.
Pattern syntax:
[ { 'sink' | 'source' | 'sink-input' | 'source-output' } [ / ... ] ':' ]
[ proplist-key-name (non-empty) [ / ... ] ':' ] [ ':' (for regexp match) ]
[ proplist-key-value ]
Examples:
- sink:alsa.driver_name:snd_hda_intel
Match sink(s) with alsa.driver_name=snd_hda_intel (exact match!).
- sink/source:device.bus:pci
Match all sinks and sources with device.bus=pci.
- myprop:somevalue
Match any object (of all 4 supported types) that has myprop=somevalue.
- mpv
Match any object with any of the "default lookup props" (!!!) being equal to "mpv".
"default lookup props" are specified per-type in lookup_key_defaults above.
For example, sink input will be looked-up by media.name, application.name, etc.
- sink-input/source-output:mpv
Same as above, but lookup streams only (not sinks/sources).
Note that "sink-input/source-output" matches type spec, and parsed as such, not as key.
- si/so:mpv
Same as above - see aliases for types in lookup_types.
- application.binary/application.icon:mpv
Lookup by multiple keys with "any match" logic, same as with multiple object types.
- key\/with\/slashes\:and\:colons:somevalue
Lookup by key that has slashes and colons in it.
"/" and ":" must only be escaped in the proplist key part, used as-is in values.
Backslash itself can be escaped as well, i.e. as "\\".
- module-stream-restore.id:sink-input-by-media-role:music
Value has ":" in it, but there's no need to escape it in any way.
- device.description::Analog
Value lookup starting with : is interpreted as a regexp,
i.e. any object with device.description *containing* "Analog" in this case.
- si/so:application.name::^mpv\b
Return all sink-inputs/source-outputs ("si/so") where
"application.name" proplist value matches regexp "^mpv\b".
- :^mpv\b
Regexp lookup (stuff starting with "mpv" word) without type or key specification.
For python2, lookup string should be unicode type.
"prop_default" keyword arg can be used to specify
default proplist value for when key is not found there.'''
# \ue000-\uf8ff - private use area, never assigned to symbols
obj_lookup = obj_lookup.replace('\\\\', '\ue000').replace('\\:', '\ue001')
obj_types_re = '({0})(/({0}))*'.format('|'.join(lookup_types))
m = re.search(
( r'^((?P<t>{}):)?'.format(obj_types_re) +
r'((?P<k>.+?):)?' r'(?P<v>.*)$' ), obj_lookup, re.IGNORECASE )
if not m: raise ValueError(obj_lookup)
lookup_type, lookup_keys, lookup_re = op.itemgetter('t', 'k', 'v')(m.groupdict())
if lookup_keys:
lookup_keys = list(
v.replace('\ue000', '\\\\').replace('\ue001', ':').replace('\ue002', '/')
for v in lookup_keys.replace('\\/', '\ue002').split('/') )
lookup_re = lookup_re.replace('\ue000', '\\\\').replace('\ue001', '\\:')
obj_list_res, lookup_re = list(), re.compile( lookup_re[1:]
if lookup_re.startswith(':') else '^{}$'.format(re.escape(lookup_re)) )
for k in set( lookup_types[k] for k in
(lookup_type.split('/') if lookup_type else lookup_types.keys()) ):
if not lookup_keys: lookup_keys = lookup_key_defaults.get(k)
if not lookup_keys: continue
obj_list = getattr(pulse, k)()
if not obj_list: continue
for obj, k in it.product(obj_list, lookup_keys):
v = obj.proplist.get(k, prop_default)
if v is None: continue
if lookup_re.search(v): obj_list_res.append(obj)
return set(obj_list_res) | Return set of pulse object(s) with proplist values matching lookup-string.
Pattern syntax:
[ { 'sink' | 'source' | 'sink-input' | 'source-output' } [ / ... ] ':' ]
[ proplist-key-name (non-empty) [ / ... ] ':' ] [ ':' (for regexp match) ]
[ proplist-key-value ]
Examples:
- sink:alsa.driver_name:snd_hda_intel
Match sink(s) with alsa.driver_name=snd_hda_intel (exact match!).
- sink/source:device.bus:pci
Match all sinks and sources with device.bus=pci.
- myprop:somevalue
Match any object (of all 4 supported types) that has myprop=somevalue.
- mpv
Match any object with any of the "default lookup props" (!!!) being equal to "mpv".
"default lookup props" are specified per-type in lookup_key_defaults above.
For example, sink input will be looked-up by media.name, application.name, etc.
- sink-input/source-output:mpv
Same as above, but lookup streams only (not sinks/sources).
Note that "sink-input/source-output" matches type spec, and parsed as such, not as key.
- si/so:mpv
Same as above - see aliases for types in lookup_types.
- application.binary/application.icon:mpv
Lookup by multiple keys with "any match" logic, same as with multiple object types.
- key\/with\/slashes\:and\:colons:somevalue
Lookup by key that has slashes and colons in it.
"/" and ":" must only be escaped in the proplist key part, used as-is in values.
Backslash itself can be escaped as well, i.e. as "\\".
- module-stream-restore.id:sink-input-by-media-role:music
Value has ":" in it, but there's no need to escape it in any way.
- device.description::Analog
Value lookup starting with : is interpreted as a regexp,
i.e. any object with device.description *containing* "Analog" in this case.
- si/so:application.name::^mpv\b
Return all sink-inputs/source-outputs ("si/so") where
"application.name" proplist value matches regexp "^mpv\b".
- :^mpv\b
Regexp lookup (stuff starting with "mpv" word) without type or key specification.
For python2, lookup string should be unicode type.
"prop_default" keyword arg can be used to specify
default proplist value for when key is not found there. | https://github.com/mk-fg/python-pulse-control/blob/902d5e9e5591b89c356e5194a370212e23fb0e93/pulsectl/lookup.py#L23-L93 |
bukun/TorCMS | torcms/model/link_model.py | MLink.update | def update(uid, post_data):
'''
Updat the link.
'''
entry = TabLink.update(
name=post_data['name'],
link=post_data['link'],
order=post_data['order'],
logo=post_data['logo'] if 'logo' in post_data else '',
).where(TabLink.uid == uid)
try:
entry.execute()
return True
except:
return False | python | def update(uid, post_data):
'''
Updat the link.
'''
entry = TabLink.update(
name=post_data['name'],
link=post_data['link'],
order=post_data['order'],
logo=post_data['logo'] if 'logo' in post_data else '',
).where(TabLink.uid == uid)
try:
entry.execute()
return True
except:
return False | Updat the link. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/link_model.py#L44-L58 |
bukun/TorCMS | torcms/model/link_model.py | MLink.create_link | def create_link(id_link, post_data):
'''
Add record in link.
'''
if MLink.get_by_uid(id_link):
return False
try:
the_order = int(post_data['order'])
except:
the_order = 999
TabLink.create(name=post_data['name'],
link=post_data['link'],
order=the_order,
logo=post_data['logo'] if 'logo' in post_data else '',
uid=id_link)
return id_link | python | def create_link(id_link, post_data):
'''
Add record in link.
'''
if MLink.get_by_uid(id_link):
return False
try:
the_order = int(post_data['order'])
except:
the_order = 999
TabLink.create(name=post_data['name'],
link=post_data['link'],
order=the_order,
logo=post_data['logo'] if 'logo' in post_data else '',
uid=id_link)
return id_link | Add record in link. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/link_model.py#L61-L76 |
bukun/TorCMS | torcms/handlers/wiki_handler.py | WikiHandler.recent | def recent(self):
'''
List recent wiki.
'''
kwd = {
'pager': '',
'title': 'Recent Pages',
}
self.render('wiki_page/wiki_list.html',
view=MWiki.query_recent(),
format_date=tools.format_date,
kwd=kwd,
userinfo=self.userinfo) | python | def recent(self):
'''
List recent wiki.
'''
kwd = {
'pager': '',
'title': 'Recent Pages',
}
self.render('wiki_page/wiki_list.html',
view=MWiki.query_recent(),
format_date=tools.format_date,
kwd=kwd,
userinfo=self.userinfo) | List recent wiki. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/wiki_handler.py#L61-L73 |
bukun/TorCMS | torcms/handlers/wiki_handler.py | WikiHandler.refresh | def refresh(self):
'''
List the wikis of dated.
'''
kwd = {
'pager': '',
'title': '最近文档',
}
self.render('wiki_page/wiki_list.html',
view=MWiki.query_dated(16),
format_date=tools.format_date,
kwd=kwd,
userinfo=self.userinfo) | python | def refresh(self):
'''
List the wikis of dated.
'''
kwd = {
'pager': '',
'title': '最近文档',
}
self.render('wiki_page/wiki_list.html',
view=MWiki.query_dated(16),
format_date=tools.format_date,
kwd=kwd,
userinfo=self.userinfo) | List the wikis of dated. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/wiki_handler.py#L75-L87 |
bukun/TorCMS | torcms/handlers/wiki_handler.py | WikiHandler.view_or_add | def view_or_add(self, title):
'''
To judge if there is a post of the title.
Then, to show, or to add.
'''
postinfo = MWiki.get_by_wiki(title)
if postinfo:
if postinfo.kind == self.kind:
self.view(postinfo)
else:
return False
else:
self.to_add(title) | python | def view_or_add(self, title):
'''
To judge if there is a post of the title.
Then, to show, or to add.
'''
postinfo = MWiki.get_by_wiki(title)
if postinfo:
if postinfo.kind == self.kind:
self.view(postinfo)
else:
return False
else:
self.to_add(title) | To judge if there is a post of the title.
Then, to show, or to add. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/wiki_handler.py#L89-L101 |
bukun/TorCMS | torcms/handlers/wiki_handler.py | WikiHandler.update | def update(self, uid):
'''
Update the wiki.
'''
postinfo = MWiki.get_by_uid(uid)
if self.check_post_role()['EDIT'] or postinfo.user_name == self.get_current_user():
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass
else:
MWikiHist.create_wiki_history(postinfo)
MWiki.update(uid, post_data)
# cele_gen_whoosh.delay()
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title']))) | python | def update(self, uid):
'''
Update the wiki.
'''
postinfo = MWiki.get_by_uid(uid)
if self.check_post_role()['EDIT'] or postinfo.user_name == self.get_current_user():
pass
else:
return False
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
cnt_old = tornado.escape.xhtml_unescape(postinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass
else:
MWikiHist.create_wiki_history(postinfo)
MWiki.update(uid, post_data)
# cele_gen_whoosh.delay()
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title']))) | Update the wiki. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/wiki_handler.py#L105-L130 |
bukun/TorCMS | torcms/handlers/wiki_handler.py | WikiHandler.view | def view(self, view):
'''
View the wiki.
'''
kwd = {
'pager': '',
'editable': self.editable(),
}
self.render('wiki_page/wiki_view.html',
postinfo=view,
kwd=kwd,
userinfo=self.userinfo) | python | def view(self, view):
'''
View the wiki.
'''
kwd = {
'pager': '',
'editable': self.editable(),
}
self.render('wiki_page/wiki_view.html',
postinfo=view,
kwd=kwd,
userinfo=self.userinfo) | View the wiki. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/wiki_handler.py#L150-L162 |
bukun/TorCMS | torcms/handlers/wiki_handler.py | WikiHandler.add | def add(self, title=''):
'''
Add wiki
'''
post_data = self.get_post_data()
if title == '':
pass
else:
post_data['title'] = title
post_data['user_name'] = self.get_current_user()
if MWiki.get_by_wiki(post_data['title']):
pass
else:
MWiki.create_wiki(post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
# cele_gen_whoosh.delay()
self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title']))) | python | def add(self, title=''):
'''
Add wiki
'''
post_data = self.get_post_data()
if title == '':
pass
else:
post_data['title'] = title
post_data['user_name'] = self.get_current_user()
if MWiki.get_by_wiki(post_data['title']):
pass
else:
MWiki.create_wiki(post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
# cele_gen_whoosh.delay()
self.redirect('/wiki/{0}'.format(tornado.escape.url_escape(post_data['title']))) | Add wiki | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/wiki_handler.py#L188-L208 |
bukun/TorCMS | torcms/handlers/filter_handler.py | echo_html_fenye_str | def echo_html_fenye_str(rec_num, fenye_num):
'''
生成分页的导航
'''
pagination_num = int(math.ceil(rec_num * 1.0 / 10))
if pagination_num == 1 or pagination_num == 0:
fenye_str = ''
elif pagination_num > 1:
pager_mid, pager_pre, pager_next, pager_last, pager_home = '', '', '', '', ''
fenye_str = '<ul class="pagination">'
if fenye_num > 1:
pager_home = '''<li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>First Page</a></li>'''.format('', 1)
pager_pre = ''' <li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>Previous Page</a></li>'''.format('', fenye_num - 1)
if fenye_num > 5:
cur_num = fenye_num - 4
else:
cur_num = 1
if pagination_num > 10 and cur_num < pagination_num - 10:
show_num = cur_num + 10
else:
show_num = pagination_num + 1
for num in range(cur_num, show_num):
if num == fenye_num:
checkstr = 'active'
else:
checkstr = ''
tmp_str_df = '''<li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>{1}</a></li>'''.format(checkstr, num)
pager_mid += tmp_str_df
if fenye_num < pagination_num:
pager_next = '''<li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>Next Page</a></li>'''.format('', fenye_num + 1)
pager_last = '''<li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>End Page</a></li>'''.format('', pagination_num)
fenye_str += pager_home + pager_pre + pager_mid + pager_next + pager_last
fenye_str += '</ul>'
else:
return ''
return fenye_str | python | def echo_html_fenye_str(rec_num, fenye_num):
'''
生成分页的导航
'''
pagination_num = int(math.ceil(rec_num * 1.0 / 10))
if pagination_num == 1 or pagination_num == 0:
fenye_str = ''
elif pagination_num > 1:
pager_mid, pager_pre, pager_next, pager_last, pager_home = '', '', '', '', ''
fenye_str = '<ul class="pagination">'
if fenye_num > 1:
pager_home = '''<li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>First Page</a></li>'''.format('', 1)
pager_pre = ''' <li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>Previous Page</a></li>'''.format('', fenye_num - 1)
if fenye_num > 5:
cur_num = fenye_num - 4
else:
cur_num = 1
if pagination_num > 10 and cur_num < pagination_num - 10:
show_num = cur_num + 10
else:
show_num = pagination_num + 1
for num in range(cur_num, show_num):
if num == fenye_num:
checkstr = 'active'
else:
checkstr = ''
tmp_str_df = '''<li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>{1}</a></li>'''.format(checkstr, num)
pager_mid += tmp_str_df
if fenye_num < pagination_num:
pager_next = '''<li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>Next Page</a></li>'''.format('', fenye_num + 1)
pager_last = '''<li class="{0}" name='fenye' onclick='change(this);'
value='{1}'><a>End Page</a></li>'''.format('', pagination_num)
fenye_str += pager_home + pager_pre + pager_mid + pager_next + pager_last
fenye_str += '</ul>'
else:
return ''
return fenye_str | 生成分页的导航 | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/filter_handler.py#L17-L70 |
bukun/TorCMS | torcms/handlers/filter_handler.py | FilterHandler.echo_html | def echo_html(self, url_str):
'''
Show the HTML
'''
logger.info('info echo html: {0}'.format(url_str))
condition = self.gen_redis_kw()
url_arr = self.parse_url(url_str)
sig = url_arr[0]
num = (len(url_arr) - 2) // 2
catinfo = MCategory.get_by_uid(sig)
if catinfo.pid == '0000':
condition['def_cat_pid'] = sig
else:
condition['def_cat_uid'] = sig
fenye_num = 1
for idx in range(num):
ckey = url_arr[idx * 2 + 2]
tval = url_arr[idx * 2 + 3]
if tval == '0':
continue
if ckey == 'fenye':
# 分页参数。单独处理。
fenye_num = int(tval)
continue
else:
cval = tval
ckey = 'tag_' + ckey
condition[ckey] = cval
if url_arr[1] == 'con':
infos = MPost.query_list_pager(condition, fenye_num, kind=catinfo.kind)
self.echo_html_list_str(sig, infos)
elif url_arr[1] == 'num':
allinfos = MPost.query_under_condition(condition, kind=catinfo.kind)
self.write(
tornado.escape.xhtml_unescape(
echo_html_fenye_str(
allinfos.count(),
fenye_num
)
)
) | python | def echo_html(self, url_str):
'''
Show the HTML
'''
logger.info('info echo html: {0}'.format(url_str))
condition = self.gen_redis_kw()
url_arr = self.parse_url(url_str)
sig = url_arr[0]
num = (len(url_arr) - 2) // 2
catinfo = MCategory.get_by_uid(sig)
if catinfo.pid == '0000':
condition['def_cat_pid'] = sig
else:
condition['def_cat_uid'] = sig
fenye_num = 1
for idx in range(num):
ckey = url_arr[idx * 2 + 2]
tval = url_arr[idx * 2 + 3]
if tval == '0':
continue
if ckey == 'fenye':
# 分页参数。单独处理。
fenye_num = int(tval)
continue
else:
cval = tval
ckey = 'tag_' + ckey
condition[ckey] = cval
if url_arr[1] == 'con':
infos = MPost.query_list_pager(condition, fenye_num, kind=catinfo.kind)
self.echo_html_list_str(sig, infos)
elif url_arr[1] == 'num':
allinfos = MPost.query_under_condition(condition, kind=catinfo.kind)
self.write(
tornado.escape.xhtml_unescape(
echo_html_fenye_str(
allinfos.count(),
fenye_num
)
)
) | Show the HTML | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/filter_handler.py#L110-L159 |
bukun/TorCMS | torcms/handlers/filter_handler.py | FilterHandler.echo_html_list_str | def echo_html_list_str(self, catid, infos):
'''
生成 list 后的 HTML 格式的字符串
'''
zhiding_str = ''
tuiguang_str = ''
imgname = 'fixed/zhanwei.png'
kwd = {
'imgname': imgname,
'zhiding': zhiding_str,
'tuiguang': tuiguang_str,
}
self.render('autogen/infolist/infolist_{0}.html'.format(catid),
userinfo=self.userinfo,
kwd=kwd,
html2text=html2text,
post_infos=infos,
widget_info=kwd) | python | def echo_html_list_str(self, catid, infos):
'''
生成 list 后的 HTML 格式的字符串
'''
zhiding_str = ''
tuiguang_str = ''
imgname = 'fixed/zhanwei.png'
kwd = {
'imgname': imgname,
'zhiding': zhiding_str,
'tuiguang': tuiguang_str,
}
self.render('autogen/infolist/infolist_{0}.html'.format(catid),
userinfo=self.userinfo,
kwd=kwd,
html2text=html2text,
post_infos=infos,
widget_info=kwd) | 生成 list 后的 HTML 格式的字符串 | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/filter_handler.py#L161-L180 |
bukun/TorCMS | torcms/handlers/filter_handler.py | FilterHandler.list | def list(self, catid):
'''
页面打开后的渲染方法,不包含 list 的查询结果与分页导航
'''
logger.info('Infocat input: {0}'.format(catid))
condition = self.gen_redis_kw()
sig = catid
bread_title = ''
bread_crumb_nav_str = '<li>当前位置:<a href="/">信息</a></li>'
_catinfo = MCategory.get_by_uid(catid)
logger.info('Infocat input: {0}'.format(_catinfo))
if _catinfo.pid == '0000':
pcatinfo = _catinfo
catinfo = None
parent_id = catid
parent_catname = MCategory.get_by_uid(parent_id).name
condition['parentid'] = [parent_id]
catname = MCategory.get_by_uid(sig).name
bread_crumb_nav_str += '<li><a href="/list/{0}">{1}</a></li>'.format(sig, catname)
bread_title = '{0}'.format(catname)
else:
catinfo = _catinfo
pcatinfo = MCategory.get_by_uid(_catinfo.pid)
condition['def_cat_uid'] = [sig]
parent_id = _catinfo.uid
parent_catname = MCategory.get_by_uid(parent_id).name
catname = MCategory.get_by_uid(sig).name
bread_crumb_nav_str += '<li><a href="/list/{0}">{1}</a></li>'.format(
parent_id,
parent_catname
)
bread_crumb_nav_str += '<li><a href="/list/{0}">{1}</a></li>'.format(sig, catname)
bread_title += '{0} - '.format(parent_catname)
bread_title += '{0}'.format(catname)
num = MPost.get_num_condition(condition)
kwd = {'catid': catid,
'daohangstr': bread_crumb_nav_str,
'breadtilte': bread_title,
'parentid': parent_id,
'parentlist': MCategory.get_parent_list(),
'condition': condition,
'catname': catname,
'rec_num': num}
# cat_rec = MCategory.get_by_uid(catid)
if self.get_current_user() and self.userinfo:
redis_kw = redisvr.smembers(CMS_CFG['redis_kw'] + self.userinfo.user_name)
else:
redis_kw = []
kw_condition_arr = []
for the_key in redis_kw:
kw_condition_arr.append(the_key.decode('utf-8'))
self.render('autogen/list/list_{0}.html'.format(catid),
userinfo=self.userinfo,
kwd=kwd,
widget_info=kwd,
condition_arr=kw_condition_arr,
cat_enum=MCategory.get_qian2(parent_id[:2]),
pcatinfo=pcatinfo,
catinfo=catinfo) | python | def list(self, catid):
'''
页面打开后的渲染方法,不包含 list 的查询结果与分页导航
'''
logger.info('Infocat input: {0}'.format(catid))
condition = self.gen_redis_kw()
sig = catid
bread_title = ''
bread_crumb_nav_str = '<li>当前位置:<a href="/">信息</a></li>'
_catinfo = MCategory.get_by_uid(catid)
logger.info('Infocat input: {0}'.format(_catinfo))
if _catinfo.pid == '0000':
pcatinfo = _catinfo
catinfo = None
parent_id = catid
parent_catname = MCategory.get_by_uid(parent_id).name
condition['parentid'] = [parent_id]
catname = MCategory.get_by_uid(sig).name
bread_crumb_nav_str += '<li><a href="/list/{0}">{1}</a></li>'.format(sig, catname)
bread_title = '{0}'.format(catname)
else:
catinfo = _catinfo
pcatinfo = MCategory.get_by_uid(_catinfo.pid)
condition['def_cat_uid'] = [sig]
parent_id = _catinfo.uid
parent_catname = MCategory.get_by_uid(parent_id).name
catname = MCategory.get_by_uid(sig).name
bread_crumb_nav_str += '<li><a href="/list/{0}">{1}</a></li>'.format(
parent_id,
parent_catname
)
bread_crumb_nav_str += '<li><a href="/list/{0}">{1}</a></li>'.format(sig, catname)
bread_title += '{0} - '.format(parent_catname)
bread_title += '{0}'.format(catname)
num = MPost.get_num_condition(condition)
kwd = {'catid': catid,
'daohangstr': bread_crumb_nav_str,
'breadtilte': bread_title,
'parentid': parent_id,
'parentlist': MCategory.get_parent_list(),
'condition': condition,
'catname': catname,
'rec_num': num}
# cat_rec = MCategory.get_by_uid(catid)
if self.get_current_user() and self.userinfo:
redis_kw = redisvr.smembers(CMS_CFG['redis_kw'] + self.userinfo.user_name)
else:
redis_kw = []
kw_condition_arr = []
for the_key in redis_kw:
kw_condition_arr.append(the_key.decode('utf-8'))
self.render('autogen/list/list_{0}.html'.format(catid),
userinfo=self.userinfo,
kwd=kwd,
widget_info=kwd,
condition_arr=kw_condition_arr,
cat_enum=MCategory.get_qian2(parent_id[:2]),
pcatinfo=pcatinfo,
catinfo=catinfo) | 页面打开后的渲染方法,不包含 list 的查询结果与分页导航 | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/filter_handler.py#L182-L246 |
bukun/TorCMS | torcms/script/autocrud/fetch_html_dic.py | gen_html_dic | def gen_html_dic():
'''
生成 Filter .
'''
if WORK_BOOK:
pass
else:
return False
html_dics = {}
for wk_sheet in WORK_BOOK:
for column in FILTER_COLUMNS:
kkey, kval = __write_filter_dic(wk_sheet, column)
if kkey:
html_dics[kkey] = kval
return html_dics | python | def gen_html_dic():
'''
生成 Filter .
'''
if WORK_BOOK:
pass
else:
return False
html_dics = {}
for wk_sheet in WORK_BOOK:
for column in FILTER_COLUMNS:
kkey, kval = __write_filter_dic(wk_sheet, column)
if kkey:
html_dics[kkey] = kval
return html_dics | 生成 Filter . | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/autocrud/fetch_html_dic.py#L69-L86 |
bukun/TorCMS | torcms/script/autocrud/fetch_html_dic.py | gen_array_crud | def gen_array_crud():
'''
Return the dictionay of the switcher form XLXS file.
if valud of the column of the row is `1`, it will be added to the array.
'''
if WORK_BOOK:
pass
else:
return False
papa_id = 0
switch_dics = {}
kind_dics = {}
for work_sheet in WORK_BOOK:
kind_sig = str(work_sheet['A1'].value).strip()
# the number of the categories in a website won't greater than 1000.
for row_num in range(3, 1000):
# 父类, column A
a_cell_value = work_sheet['A{0}'.format(row_num)].value
# 子类, column B
b_cell_val = work_sheet['B{0}'.format(row_num)].value
if a_cell_value or b_cell_val:
pass
else:
break
if a_cell_value and a_cell_value != '':
papa_id = a_cell_value.strip()[1:]
u_dic = __get_switch_arr(work_sheet, row_num)
switch_dics['dic_{0}00'.format(papa_id)] = u_dic
kind_dics['kind_{0}00'.format(papa_id)] = kind_sig
if b_cell_val and b_cell_val != '':
sun_id = b_cell_val.strip()[1:]
if len(sun_id) == 4:
app_uid = sun_id
else:
app_uid = '{0}{1}'.format(papa_id, sun_id)
u_dic = __get_switch_arr(work_sheet, row_num)
switch_dics['dic_{0}'.format(app_uid)] = u_dic
kind_dics['kind_{0}'.format(app_uid)] = kind_sig
return (switch_dics, kind_dics) | python | def gen_array_crud():
'''
Return the dictionay of the switcher form XLXS file.
if valud of the column of the row is `1`, it will be added to the array.
'''
if WORK_BOOK:
pass
else:
return False
papa_id = 0
switch_dics = {}
kind_dics = {}
for work_sheet in WORK_BOOK:
kind_sig = str(work_sheet['A1'].value).strip()
# the number of the categories in a website won't greater than 1000.
for row_num in range(3, 1000):
# 父类, column A
a_cell_value = work_sheet['A{0}'.format(row_num)].value
# 子类, column B
b_cell_val = work_sheet['B{0}'.format(row_num)].value
if a_cell_value or b_cell_val:
pass
else:
break
if a_cell_value and a_cell_value != '':
papa_id = a_cell_value.strip()[1:]
u_dic = __get_switch_arr(work_sheet, row_num)
switch_dics['dic_{0}00'.format(papa_id)] = u_dic
kind_dics['kind_{0}00'.format(papa_id)] = kind_sig
if b_cell_val and b_cell_val != '':
sun_id = b_cell_val.strip()[1:]
if len(sun_id) == 4:
app_uid = sun_id
else:
app_uid = '{0}{1}'.format(papa_id, sun_id)
u_dic = __get_switch_arr(work_sheet, row_num)
switch_dics['dic_{0}'.format(app_uid)] = u_dic
kind_dics['kind_{0}'.format(app_uid)] = kind_sig
return (switch_dics, kind_dics) | Return the dictionay of the switcher form XLXS file.
if valud of the column of the row is `1`, it will be added to the array. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/autocrud/fetch_html_dic.py#L89-L132 |
bukun/TorCMS | torcms/script/autocrud/fetch_html_dic.py | __get_switch_arr | def __get_switch_arr(work_sheet, row_num):
'''
if valud of the column of the row is `1`, it will be added to the array.
'''
u_dic = []
for col_idx in FILTER_COLUMNS:
cell_val = work_sheet['{0}{1}'.format(col_idx, row_num)].value
if cell_val in [1, '1']:
# Appending the slug name of the switcher.
u_dic.append(work_sheet['{0}1'.format(col_idx)].value.strip().split(',')[0])
return u_dic | python | def __get_switch_arr(work_sheet, row_num):
'''
if valud of the column of the row is `1`, it will be added to the array.
'''
u_dic = []
for col_idx in FILTER_COLUMNS:
cell_val = work_sheet['{0}{1}'.format(col_idx, row_num)].value
if cell_val in [1, '1']:
# Appending the slug name of the switcher.
u_dic.append(work_sheet['{0}1'.format(col_idx)].value.strip().split(',')[0])
return u_dic | if valud of the column of the row is `1`, it will be added to the array. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/autocrud/fetch_html_dic.py#L135-L146 |
bukun/TorCMS | torcms/model/usage_model.py | MUsage.add_or_update | def add_or_update(user_id, post_id, kind):
'''
Create the record if new, else update it.
'''
rec = MUsage.query_by_signature(user_id, post_id)
cate_rec = MInfor2Catalog.get_first_category(post_id)
if cate_rec:
cat_id = cate_rec.tag_id
else:
return False
if rec.count() > 0:
logger.info('Usage update: {uid}'.format(uid=post_id))
rec = rec.get()
query = TabUsage.update(kind=kind).where(TabUsage.uid == rec.uid)
query.execute()
MUsage.count_increate(rec.uid, cat_id, rec.count)
else:
logger.info('Usage create: {uid}'.format(uid=post_id))
TabUsage.create(
uid=tools.get_uuid(),
post_id=post_id,
user_id=user_id,
count=1,
tag_id=cat_id,
timestamp=int(time.time()),
kind=kind,
) | python | def add_or_update(user_id, post_id, kind):
'''
Create the record if new, else update it.
'''
rec = MUsage.query_by_signature(user_id, post_id)
cate_rec = MInfor2Catalog.get_first_category(post_id)
if cate_rec:
cat_id = cate_rec.tag_id
else:
return False
if rec.count() > 0:
logger.info('Usage update: {uid}'.format(uid=post_id))
rec = rec.get()
query = TabUsage.update(kind=kind).where(TabUsage.uid == rec.uid)
query.execute()
MUsage.count_increate(rec.uid, cat_id, rec.count)
else:
logger.info('Usage create: {uid}'.format(uid=post_id))
TabUsage.create(
uid=tools.get_uuid(),
post_id=post_id,
user_id=user_id,
count=1,
tag_id=cat_id,
timestamp=int(time.time()),
kind=kind,
) | Create the record if new, else update it. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/usage_model.py#L92-L120 |
bukun/TorCMS | torcms/script/script_sendemail_all.py | run_send_all | def run_send_all(*args):
'''
Send email to all user.
'''
for user_rec in MUser.query_all():
email_add = user_rec.user_email
send_mail([email_add],
"{0}|{1}".format(SMTP_CFG['name'], email_cfg['title']),
email_cfg['content']) | python | def run_send_all(*args):
'''
Send email to all user.
'''
for user_rec in MUser.query_all():
email_add = user_rec.user_email
send_mail([email_add],
"{0}|{1}".format(SMTP_CFG['name'], email_cfg['title']),
email_cfg['content']) | Send email to all user. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/script_sendemail_all.py#L11-L19 |
bukun/TorCMS | torcms/script/script_sendemail_all.py | run_send_nologin | def run_send_nologin(*args):
'''
Send email to who not logged in recently.
'''
for user_rec in MUser.query_nologin():
email_add = user_rec.user_email
print(email_add)
send_mail([email_add],
"{0}|{1}".format(SMTP_CFG['name'], email_cfg['title']),
email_cfg['content'])
MUser.set_sendemail_time(user_rec.uid) | python | def run_send_nologin(*args):
'''
Send email to who not logged in recently.
'''
for user_rec in MUser.query_nologin():
email_add = user_rec.user_email
print(email_add)
send_mail([email_add],
"{0}|{1}".format(SMTP_CFG['name'], email_cfg['title']),
email_cfg['content'])
MUser.set_sendemail_time(user_rec.uid) | Send email to who not logged in recently. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/script_sendemail_all.py#L22-L32 |
bukun/TorCMS | torcms/script/script_gen_category.py | gen_xlsx_category | def gen_xlsx_category():
'''
Genereting catetory from xlsx file.
'''
if os.path.exists(XLSX_FILE):
pass
else:
return
# 在分类中排序
order_index = 1
all_cate_arr = []
for sheet_ranges in load_workbook(filename=XLSX_FILE):
kind_sig = str(sheet_ranges['A1'].value).strip()
for row_num in range(3, 10000):
# 父类
a_cell_val = sheet_ranges['A{0}'.format(row_num)].value
b_cell_val = sheet_ranges['B{0}'.format(row_num)].value
c_cell_val = sheet_ranges['C{0}'.format(row_num)].value
if a_cell_val or b_cell_val or c_cell_val:
pass
else:
break
if a_cell_val and a_cell_val != '':
cell_arr = a_cell_val.strip()
p_uid = cell_arr[1:] # 所有以 t 开头
t_slug = sheet_ranges['C{0}'.format(row_num)].value.strip()
t_title = sheet_ranges['D{0}'.format(row_num)].value.strip()
u_uid = p_uid + (4 - len(p_uid)) * '0'
pp_uid = '0000'
elif b_cell_val and b_cell_val != '':
cell_arr = b_cell_val
c_iud = cell_arr[1:]
t_slug = sheet_ranges['C{0}'.format(row_num)].value.strip()
t_title = sheet_ranges['D{0}'.format(row_num)].value.strip()
if len(c_iud) == 4:
u_uid = c_iud
else:
u_uid = '{0}{1}'.format(p_uid, c_iud)
pp_uid = p_uid + (4 - len(p_uid)) * '0'
else:
continue
post_data = {
'name': t_title,
'slug': t_slug,
'order': order_index,
'uid': u_uid,
'pid': pp_uid,
'kind': kind_sig,
}
all_cate_arr.append(post_data)
MCategory.add_or_update(u_uid, post_data)
order_index += 1
return all_cate_arr | python | def gen_xlsx_category():
'''
Genereting catetory from xlsx file.
'''
if os.path.exists(XLSX_FILE):
pass
else:
return
# 在分类中排序
order_index = 1
all_cate_arr = []
for sheet_ranges in load_workbook(filename=XLSX_FILE):
kind_sig = str(sheet_ranges['A1'].value).strip()
for row_num in range(3, 10000):
# 父类
a_cell_val = sheet_ranges['A{0}'.format(row_num)].value
b_cell_val = sheet_ranges['B{0}'.format(row_num)].value
c_cell_val = sheet_ranges['C{0}'.format(row_num)].value
if a_cell_val or b_cell_val or c_cell_val:
pass
else:
break
if a_cell_val and a_cell_val != '':
cell_arr = a_cell_val.strip()
p_uid = cell_arr[1:] # 所有以 t 开头
t_slug = sheet_ranges['C{0}'.format(row_num)].value.strip()
t_title = sheet_ranges['D{0}'.format(row_num)].value.strip()
u_uid = p_uid + (4 - len(p_uid)) * '0'
pp_uid = '0000'
elif b_cell_val and b_cell_val != '':
cell_arr = b_cell_val
c_iud = cell_arr[1:]
t_slug = sheet_ranges['C{0}'.format(row_num)].value.strip()
t_title = sheet_ranges['D{0}'.format(row_num)].value.strip()
if len(c_iud) == 4:
u_uid = c_iud
else:
u_uid = '{0}{1}'.format(p_uid, c_iud)
pp_uid = p_uid + (4 - len(p_uid)) * '0'
else:
continue
post_data = {
'name': t_title,
'slug': t_slug,
'order': order_index,
'uid': u_uid,
'pid': pp_uid,
'kind': kind_sig,
}
all_cate_arr.append(post_data)
MCategory.add_or_update(u_uid, post_data)
order_index += 1
return all_cate_arr | Genereting catetory from xlsx file. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/script_gen_category.py#L14-L74 |
bukun/TorCMS | torcms/script/script_gen_category.py | gen_category | def gen_category(yaml_file, sig):
'''
Genereting catetory from YAML file.
'''
out_dic = yaml.load(open(yaml_file))
for key in out_dic:
if key.endswith('00'):
uid = key[1:]
cur_dic = out_dic[key]
porder = cur_dic['order']
cat_dic = {
'uid': uid,
'slug': cur_dic['slug'],
'name': cur_dic['name'],
'count': 0,
'tmpl': 1,
'pid': '0000',
'order': porder * 100,
'kind': '{0}'.format(sig),
}
MCategory.add_or_update(uid, cat_dic)
else:
sub_arr = out_dic[key]
pid = key[1:3]
for sub_dic in sub_arr:
porder = out_dic['z' + pid + '00']['order']
for key2 in sub_dic:
uid = key2[1:]
cur_dic = sub_dic[key2]
sorder = cur_dic['order']
cat_dic = {
'uid': uid,
'slug': cur_dic['slug'],
'name': cur_dic['name'],
'count': 0,
'tmpl': 1,
'pid': pid + '00',
'order': porder * 100 + sorder,
'kind': '{0}'.format(sig),
}
MCategory.add_or_update(pid + uid, cat_dic) | python | def gen_category(yaml_file, sig):
'''
Genereting catetory from YAML file.
'''
out_dic = yaml.load(open(yaml_file))
for key in out_dic:
if key.endswith('00'):
uid = key[1:]
cur_dic = out_dic[key]
porder = cur_dic['order']
cat_dic = {
'uid': uid,
'slug': cur_dic['slug'],
'name': cur_dic['name'],
'count': 0,
'tmpl': 1,
'pid': '0000',
'order': porder * 100,
'kind': '{0}'.format(sig),
}
MCategory.add_or_update(uid, cat_dic)
else:
sub_arr = out_dic[key]
pid = key[1:3]
for sub_dic in sub_arr:
porder = out_dic['z' + pid + '00']['order']
for key2 in sub_dic:
uid = key2[1:]
cur_dic = sub_dic[key2]
sorder = cur_dic['order']
cat_dic = {
'uid': uid,
'slug': cur_dic['slug'],
'name': cur_dic['name'],
'count': 0,
'tmpl': 1,
'pid': pid + '00',
'order': porder * 100 + sorder,
'kind': '{0}'.format(sig),
}
MCategory.add_or_update(pid + uid, cat_dic) | Genereting catetory from YAML file. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/script_gen_category.py#L76-L125 |
bukun/TorCMS | torcms/script/script_gen_category.py | gen_yaml_category | def gen_yaml_category():
'''
find YAML.
'''
for wroot, _, wfiles in os.walk('./database/meta'):
for wfile in wfiles:
if wfile.endswith('.yaml'):
gen_category(os.path.join(wroot, wfile), wfile[0]) | python | def gen_yaml_category():
'''
find YAML.
'''
for wroot, _, wfiles in os.walk('./database/meta'):
for wfile in wfiles:
if wfile.endswith('.yaml'):
gen_category(os.path.join(wroot, wfile), wfile[0]) | find YAML. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/script_gen_category.py#L128-L135 |
bukun/TorCMS | torcms/script/script_migrate.py | run_migrate | def run_migrate(*args):
'''
running some migration.
'''
print('Begin migrate ...')
torcms_migrator = migrate.PostgresqlMigrator(config.DB_CON)
memo_field = migrate.TextField(null=False, default='', help_text='Memo', )
try:
migrate.migrate(torcms_migrator.add_column('tabpost', 'memo', memo_field))
except:
pass
desc_field = migrate.CharField(null=False, default='', max_length=255, help_text='')
try:
migrate.migrate(torcms_migrator.add_column('tabentity', 'desc', desc_field))
except:
pass
extinfo_field = BinaryJSONField(null=False, default={}, help_text='Extra data in JSON.')
try:
migrate.migrate(torcms_migrator.add_column('tabmember', 'extinfo', extinfo_field))
except:
pass
par_id_field = migrate.CharField(null=False, default='', max_length=4,
help_text='父类id,对于label,top_id为""')
try:
migrate.migrate(torcms_migrator.add_column('tabpost2tag', 'par_id', par_id_field))
except:
pass
print('Migration finished.') | python | def run_migrate(*args):
'''
running some migration.
'''
print('Begin migrate ...')
torcms_migrator = migrate.PostgresqlMigrator(config.DB_CON)
memo_field = migrate.TextField(null=False, default='', help_text='Memo', )
try:
migrate.migrate(torcms_migrator.add_column('tabpost', 'memo', memo_field))
except:
pass
desc_field = migrate.CharField(null=False, default='', max_length=255, help_text='')
try:
migrate.migrate(torcms_migrator.add_column('tabentity', 'desc', desc_field))
except:
pass
extinfo_field = BinaryJSONField(null=False, default={}, help_text='Extra data in JSON.')
try:
migrate.migrate(torcms_migrator.add_column('tabmember', 'extinfo', extinfo_field))
except:
pass
par_id_field = migrate.CharField(null=False, default='', max_length=4,
help_text='父类id,对于label,top_id为""')
try:
migrate.migrate(torcms_migrator.add_column('tabpost2tag', 'par_id', par_id_field))
except:
pass
print('Migration finished.') | running some migration. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/script/script_migrate.py#L15-L49 |
bukun/TorCMS | torcms/core/tools.py | diff_table | def diff_table(rawinfo, newinfo):
'''
Generate the difference as the table format.
:param rawinfo:
:param newinfo:
:return:
'''
return HtmlDiff.make_table(HtmlDiff(), rawinfo.split('\n'), newinfo.split('\n'),
context=True,
numlines=1) | python | def diff_table(rawinfo, newinfo):
'''
Generate the difference as the table format.
:param rawinfo:
:param newinfo:
:return:
'''
return HtmlDiff.make_table(HtmlDiff(), rawinfo.split('\n'), newinfo.split('\n'),
context=True,
numlines=1) | Generate the difference as the table format.
:param rawinfo:
:param newinfo:
:return: | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/core/tools.py#L89-L98 |
bukun/TorCMS | torcms/core/tools.py | get_uudd | def get_uudd(lenth):
'''
随机获取给定位数的整数
'''
sel_arr = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
rarr = random.sample(sel_arr, lenth)
while rarr[0] == '0':
rarr = random.sample(sel_arr, lenth)
return int(''.join(rarr)) | python | def get_uudd(lenth):
'''
随机获取给定位数的整数
'''
sel_arr = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
rarr = random.sample(sel_arr, lenth)
while rarr[0] == '0':
rarr = random.sample(sel_arr, lenth)
return int(''.join(rarr)) | 随机获取给定位数的整数 | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/core/tools.py#L209-L217 |
bukun/TorCMS | torcms/core/tools.py | markdown2html | def markdown2html(markdown_text):
'''
Convert markdown text to HTML. with extensions.
:param markdown_text: The markdown text.
:return: The HTML text.
'''
html = markdown.markdown(
markdown_text,
extensions=[
WikiLinkExtension(base_url='/wiki/', end_url=''),
'markdown.extensions.extra',
'markdown.extensions.toc',
'markdown.extensions.codehilite',
'markdown.extensions.meta'
]
)
han_biaodians = ['。', ',', ';', '、', '!', '?']
for han_biaodian in han_biaodians:
html = html.replace(han_biaodian + '\n', han_biaodian)
return tornado.escape.xhtml_escape(html) | python | def markdown2html(markdown_text):
'''
Convert markdown text to HTML. with extensions.
:param markdown_text: The markdown text.
:return: The HTML text.
'''
html = markdown.markdown(
markdown_text,
extensions=[
WikiLinkExtension(base_url='/wiki/', end_url=''),
'markdown.extensions.extra',
'markdown.extensions.toc',
'markdown.extensions.codehilite',
'markdown.extensions.meta'
]
)
han_biaodians = ['。', ',', ';', '、', '!', '?']
for han_biaodian in han_biaodians:
html = html.replace(han_biaodian + '\n', han_biaodian)
return tornado.escape.xhtml_escape(html) | Convert markdown text to HTML. with extensions.
:param markdown_text: The markdown text.
:return: The HTML text. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/core/tools.py#L228-L247 |
bukun/TorCMS | torcms/core/tools.py | gen_pager_purecss | def gen_pager_purecss(cat_slug, page_num, current):
'''
Generate pager of purecss.
'''
if page_num == 1:
return ''
pager_shouye = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}"><< 首页</a></li>'''.format(
'hidden' if current <= 1 else '', cat_slug
)
pager_pre = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">< 前页</a>
</li>'''.format('hidden' if current <= 1 else '',
cat_slug,
current - 1)
pager_mid = ''
for ind in range(0, page_num):
tmp_mid = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">{2}</a></li>
'''.format('selected' if ind + 1 == current else '',
cat_slug,
ind + 1)
pager_mid += tmp_mid
pager_next = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">后页 ></a>
</li> '''.format('hidden' if current >= page_num else '',
cat_slug,
current + 1)
pager_last = '''<li class="pure-menu-item {0}">
<a hclass="pure-menu-link" ref="{1}/{2}">末页
>></a>
</li> '''.format('hidden' if current >= page_num else '',
cat_slug,
page_num)
pager = pager_shouye + pager_pre + pager_mid + pager_next + pager_last
return pager | python | def gen_pager_purecss(cat_slug, page_num, current):
'''
Generate pager of purecss.
'''
if page_num == 1:
return ''
pager_shouye = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}"><< 首页</a></li>'''.format(
'hidden' if current <= 1 else '', cat_slug
)
pager_pre = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">< 前页</a>
</li>'''.format('hidden' if current <= 1 else '',
cat_slug,
current - 1)
pager_mid = ''
for ind in range(0, page_num):
tmp_mid = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">{2}</a></li>
'''.format('selected' if ind + 1 == current else '',
cat_slug,
ind + 1)
pager_mid += tmp_mid
pager_next = '''<li class="pure-menu-item {0}">
<a class="pure-menu-link" href="{1}/{2}">后页 ></a>
</li> '''.format('hidden' if current >= page_num else '',
cat_slug,
current + 1)
pager_last = '''<li class="pure-menu-item {0}">
<a hclass="pure-menu-link" ref="{1}/{2}">末页
>></a>
</li> '''.format('hidden' if current >= page_num else '',
cat_slug,
page_num)
pager = pager_shouye + pager_pre + pager_mid + pager_next + pager_last
return pager | Generate pager of purecss. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/core/tools.py#L251-L288 |
bukun/TorCMS | torcms/core/tools.py | get_cfg | def get_cfg():
'''
Get the configure value.
'''
cfg_var = dir(cfg)
if 'DB_CFG' in cfg_var:
db_cfg = cfg.DB_CFG
else:
db_cfg = ConfigDefault.DB_CFG
if 'SMTP_CFG' in cfg_var:
smtp_cfg = cfg.SMTP_CFG
else:
smtp_cfg = ConfigDefault.SMTP_CFG
if 'SITE_CFG' in cfg_var:
site_cfg = cfg.SITE_CFG
else:
site_cfg = ConfigDefault.SITE_CFG
if 'ROLE_CFG' in cfg_var:
role_cfg = cfg.ROLE_CFG
else:
role_cfg = ConfigDefault.ROLE_CFG
role_cfg['view'] = role_cfg.get('view', '')
role_cfg['add'] = role_cfg.get('add', '1000')
role_cfg['edit'] = role_cfg.get('edit', '2000')
role_cfg['delete'] = role_cfg.get('delete', '3000')
role_cfg['admin'] = role_cfg.get('admin', '0300')
###################################################################
site_url = site_cfg['site_url'].strip('/')
site_cfg['site_url'] = site_url
infor = site_url.split(':')
if len(infor) == 1:
site_cfg['PORT'] = 8888
else:
site_cfg['PORT'] = infor[-1]
if 'DEBUG' in site_cfg:
pass
else:
site_cfg['DEBUG'] = False
db_con = PostgresqlExtDatabase(
db_cfg['db'],
user=db_cfg.get('user', db_cfg['db']),
password=db_cfg['pass'],
host='127.0.0.1',
port=db_cfg.get('port', '5432'),
autocommit=True,
autorollback=True)
return (db_con, smtp_cfg, site_cfg, role_cfg) | python | def get_cfg():
'''
Get the configure value.
'''
cfg_var = dir(cfg)
if 'DB_CFG' in cfg_var:
db_cfg = cfg.DB_CFG
else:
db_cfg = ConfigDefault.DB_CFG
if 'SMTP_CFG' in cfg_var:
smtp_cfg = cfg.SMTP_CFG
else:
smtp_cfg = ConfigDefault.SMTP_CFG
if 'SITE_CFG' in cfg_var:
site_cfg = cfg.SITE_CFG
else:
site_cfg = ConfigDefault.SITE_CFG
if 'ROLE_CFG' in cfg_var:
role_cfg = cfg.ROLE_CFG
else:
role_cfg = ConfigDefault.ROLE_CFG
role_cfg['view'] = role_cfg.get('view', '')
role_cfg['add'] = role_cfg.get('add', '1000')
role_cfg['edit'] = role_cfg.get('edit', '2000')
role_cfg['delete'] = role_cfg.get('delete', '3000')
role_cfg['admin'] = role_cfg.get('admin', '0300')
###################################################################
site_url = site_cfg['site_url'].strip('/')
site_cfg['site_url'] = site_url
infor = site_url.split(':')
if len(infor) == 1:
site_cfg['PORT'] = 8888
else:
site_cfg['PORT'] = infor[-1]
if 'DEBUG' in site_cfg:
pass
else:
site_cfg['DEBUG'] = False
db_con = PostgresqlExtDatabase(
db_cfg['db'],
user=db_cfg.get('user', db_cfg['db']),
password=db_cfg['pass'],
host='127.0.0.1',
port=db_cfg.get('port', '5432'),
autocommit=True,
autorollback=True)
return (db_con, smtp_cfg, site_cfg, role_cfg) | Get the configure value. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/core/tools.py#L359-L416 |
bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.view_or_add | def view_or_add(self, slug):
'''
When access with the slug, It will add the page if there is no record in database.
'''
rec_page = MWiki.get_by_uid(slug)
if rec_page:
if rec_page.kind == self.kind:
self.view(rec_page)
else:
return False
else:
self.to_add(slug) | python | def view_or_add(self, slug):
'''
When access with the slug, It will add the page if there is no record in database.
'''
rec_page = MWiki.get_by_uid(slug)
if rec_page:
if rec_page.kind == self.kind:
self.view(rec_page)
else:
return False
else:
self.to_add(slug) | When access with the slug, It will add the page if there is no record in database. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L65-L77 |
bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.to_add | def to_add(self, citiao):
'''
To Add page.
'''
kwd = {
'cats': MCategory.query_all(),
'slug': citiao,
'pager': '',
}
self.render('wiki_page/page_add.html',
kwd=kwd,
userinfo=self.userinfo) | python | def to_add(self, citiao):
'''
To Add page.
'''
kwd = {
'cats': MCategory.query_all(),
'slug': citiao,
'pager': '',
}
self.render('wiki_page/page_add.html',
kwd=kwd,
userinfo=self.userinfo) | To Add page. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L81-L93 |
bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.__could_edit | def __could_edit(self, slug):
'''
Test if the user could edit the page.
'''
page_rec = MWiki.get_by_uid(slug)
if not page_rec:
return False
if self.check_post_role()['EDIT']:
return True
elif page_rec.user_name == self.userinfo.user_name:
return True
else:
return False | python | def __could_edit(self, slug):
'''
Test if the user could edit the page.
'''
page_rec = MWiki.get_by_uid(slug)
if not page_rec:
return False
if self.check_post_role()['EDIT']:
return True
elif page_rec.user_name == self.userinfo.user_name:
return True
else:
return False | Test if the user could edit the page. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L96-L108 |
bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.update | def update(self, slug):
'''
Update the page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
pageinfo = MWiki.get_by_uid(slug)
cnt_old = tornado.escape.xhtml_unescape(pageinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass
else:
MWikiHist.create_wiki_history(MWiki.get_by_uid(slug))
MWiki.update(slug, post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/page/{0}'.format(post_data['slug'])) | python | def update(self, slug):
'''
Update the page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
pageinfo = MWiki.get_by_uid(slug)
cnt_old = tornado.escape.xhtml_unescape(pageinfo.cnt_md).strip()
cnt_new = post_data['cnt_md'].strip()
if cnt_old == cnt_new:
pass
else:
MWikiHist.create_wiki_history(MWiki.get_by_uid(slug))
MWiki.update(slug, post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/page/{0}'.format(post_data['slug'])) | Update the page. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L112-L133 |
bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.to_modify | def to_modify(self, uid):
'''
Try to modify the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_edit.html',
postinfo=MWiki.get_by_uid(uid),
kwd=kwd,
cfg=CMS_CFG,
userinfo=self.userinfo) | python | def to_modify(self, uid):
'''
Try to modify the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_edit.html',
postinfo=MWiki.get_by_uid(uid),
kwd=kwd,
cfg=CMS_CFG,
userinfo=self.userinfo) | Try to modify the page. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L137-L150 |
bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.view | def view(self, rec):
'''
View the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_view.html',
postinfo=rec,
kwd=kwd,
author=rec.user_name,
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG) | python | def view(self, rec):
'''
View the page.
'''
kwd = {
'pager': '',
}
self.render('wiki_page/page_view.html',
postinfo=rec,
kwd=kwd,
author=rec.user_name,
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG) | View the page. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L153-L167 |
bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.ajax_count_plus | def ajax_count_plus(self, slug):
'''
post count plus one via ajax.
'''
output = {
'status': 1 if MWiki.view_count_plus(slug) else 0,
}
return json.dump(output, self) | python | def ajax_count_plus(self, slug):
'''
post count plus one via ajax.
'''
output = {
'status': 1 if MWiki.view_count_plus(slug) else 0,
}
return json.dump(output, self) | post count plus one via ajax. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L169-L177 |
bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.list | def list(self):
'''
View the list of the pages.
'''
kwd = {
'pager': '',
'title': '单页列表',
}
self.render('wiki_page/page_list.html',
kwd=kwd,
view=MWiki.query_recent(),
view_all=MWiki.query_all(),
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG) | python | def list(self):
'''
View the list of the pages.
'''
kwd = {
'pager': '',
'title': '单页列表',
}
self.render('wiki_page/page_list.html',
kwd=kwd,
view=MWiki.query_recent(),
view_all=MWiki.query_all(),
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG) | View the list of the pages. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L179-L193 |
bukun/TorCMS | torcms/handlers/page_handler.py | PageHandler.add_page | def add_page(self, slug):
'''
Add new page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
if MWiki.get_by_uid(slug):
self.set_status(400)
return False
else:
MWiki.create_page(slug, post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/page/{0}'.format(slug)) | python | def add_page(self, slug):
'''
Add new page.
'''
post_data = self.get_post_data()
post_data['user_name'] = self.userinfo.user_name
if MWiki.get_by_uid(slug):
self.set_status(400)
return False
else:
MWiki.create_page(slug, post_data)
tornado.ioloop.IOLoop.instance().add_callback(self.cele_gen_whoosh)
self.redirect('/page/{0}'.format(slug)) | Add new page. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/page_handler.py#L197-L212 |
bukun/TorCMS | torcms/handlers/log_handler.py | LogHandler.add | def add(self, **kwargs):
'''
in infor.
'''
post_data = {}
for key in self.request.arguments:
post_data[key] = self.get_arguments(key)[0]
MLog.add(post_data)
kwargs.pop('uid', None) # delete `uid` if exists in kwargs
self.redirect('/log/') | python | def add(self, **kwargs):
'''
in infor.
'''
post_data = {}
for key in self.request.arguments:
post_data[key] = self.get_arguments(key)[0]
MLog.add(post_data)
kwargs.pop('uid', None) # delete `uid` if exists in kwargs
self.redirect('/log/') | in infor. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/log_handler.py#L63-L76 |
bukun/TorCMS | torcms/handlers/log_handler.py | LogHandler.list | def list(self, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
}
if self.is_p:
self.render('admin/log_ajax/user_list.html',
kwd=kwd,
user_list=MLog.query_all_user(),
no_user_list=MLog.query_all(current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo)
else:
self.render('misc/log/user_list.html',
kwd=kwd,
user_list=MLog.query_all_user(),
no_user_list=MLog.query_all(current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo) | python | def list(self, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
}
if self.is_p:
self.render('admin/log_ajax/user_list.html',
kwd=kwd,
user_list=MLog.query_all_user(),
no_user_list=MLog.query_all(current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo)
else:
self.render('misc/log/user_list.html',
kwd=kwd,
user_list=MLog.query_all_user(),
no_user_list=MLog.query_all(current_page_num=current_page_number),
format_date=tools.format_date,
userinfo=self.userinfo) | View the list of the Log. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/log_handler.py#L78-L110 |
bukun/TorCMS | torcms/handlers/log_handler.py | LogHandler.user_log_list | def user_log_list(self, userid, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
'user_id': userid,
}
if self.is_p:
self.render('admin/log_ajax/user_log_list.html',
kwd=kwd,
infos=MLog.query_pager_by_user(
userid,
current_page_num=current_page_number
),
format_date=tools.format_date,
userinfo=self.userinfo)
else:
self.render('misc/log/user_log_list.html',
kwd=kwd,
infos=MLog.query_pager_by_user(
userid,
current_page_num=current_page_number
),
format_date=tools.format_date,
userinfo=self.userinfo) | python | def user_log_list(self, userid, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
'user_id': userid,
}
if self.is_p:
self.render('admin/log_ajax/user_log_list.html',
kwd=kwd,
infos=MLog.query_pager_by_user(
userid,
current_page_num=current_page_number
),
format_date=tools.format_date,
userinfo=self.userinfo)
else:
self.render('misc/log/user_log_list.html',
kwd=kwd,
infos=MLog.query_pager_by_user(
userid,
current_page_num=current_page_number
),
format_date=tools.format_date,
userinfo=self.userinfo) | View the list of the Log. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/log_handler.py#L112-L149 |
bukun/TorCMS | torcms/handlers/log_handler.py | LogHandler.pageview | def pageview(self, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
}
arr_num = []
postinfo = MLog.query_all_current_url()
for i in postinfo:
postnum = MLog.count_of_current_url(i.current_url)
arr_num.append(postnum)
self.render('misc/log/pageview.html',
kwd=kwd,
infos=MLog.query_all_pageview(current_page_num=current_page_number),
postinfo=postinfo,
arr_num=arr_num,
format_date=tools.format_date,
userinfo=self.userinfo) | python | def pageview(self, cur_p=''):
'''
View the list of the Log.
'''
if cur_p == '':
current_page_number = 1
else:
current_page_number = int(cur_p)
current_page_number = 1 if current_page_number < 1 else current_page_number
pager_num = int(MLog.total_number() / CMS_CFG['list_num'])
kwd = {
'pager': '',
'title': '',
'current_page': current_page_number,
}
arr_num = []
postinfo = MLog.query_all_current_url()
for i in postinfo:
postnum = MLog.count_of_current_url(i.current_url)
arr_num.append(postnum)
self.render('misc/log/pageview.html',
kwd=kwd,
infos=MLog.query_all_pageview(current_page_num=current_page_number),
postinfo=postinfo,
arr_num=arr_num,
format_date=tools.format_date,
userinfo=self.userinfo) | View the list of the Log. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/log_handler.py#L151-L183 |
bukun/TorCMS | torcms/model/post_hist_model.py | MPostHist.delete | def delete(uid):
'''
Delete by uid
'''
del_count = TabPostHist.delete().where(TabPostHist.uid == uid)
try:
del_count.execute()
return False
except:
return True | python | def delete(uid):
'''
Delete by uid
'''
del_count = TabPostHist.delete().where(TabPostHist.uid == uid)
try:
del_count.execute()
return False
except:
return True | Delete by uid | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_hist_model.py#L25-L35 |
bukun/TorCMS | torcms/model/post_hist_model.py | MPostHist.update_cnt | def update_cnt(uid, post_data):
'''
Update the content by ID.
'''
entry = TabPostHist.update(
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md']),
time_update=tools.timestamp(),
).where(TabPostHist.uid == uid)
entry.execute() | python | def update_cnt(uid, post_data):
'''
Update the content by ID.
'''
entry = TabPostHist.update(
user_name=post_data['user_name'],
cnt_md=tornado.escape.xhtml_escape(post_data['cnt_md']),
time_update=tools.timestamp(),
).where(TabPostHist.uid == uid)
entry.execute() | Update the content by ID. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_hist_model.py#L38-L47 |
bukun/TorCMS | torcms/model/post_hist_model.py | MPostHist.query_by_postid | def query_by_postid(postid, limit=5):
'''
Query history of certian records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
).order_by(
TabPostHist.time_update.desc()
).limit(limit)
return recs | python | def query_by_postid(postid, limit=5):
'''
Query history of certian records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
).order_by(
TabPostHist.time_update.desc()
).limit(limit)
return recs | Query history of certian records. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_hist_model.py#L50-L59 |
bukun/TorCMS | torcms/model/post_hist_model.py | MPostHist.get_last | def get_last(postid, limit=10):
'''
Get the last one of the records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
).order_by(TabPostHist.time_update.desc()).limit(limit)
if recs.count():
return recs.get()
return None | python | def get_last(postid, limit=10):
'''
Get the last one of the records.
'''
recs = TabPostHist.select().where(
TabPostHist.post_id == postid
).order_by(TabPostHist.time_update.desc()).limit(limit)
if recs.count():
return recs.get()
return None | Get the last one of the records. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_hist_model.py#L62-L71 |
bukun/TorCMS | torcms/model/post_hist_model.py | MPostHist.create_post_history | def create_post_history(raw_data):
'''
Create the history of certain post.
'''
uid = tools.get_uuid()
TabPostHist.create(
uid=uid,
title=raw_data.title,
post_id=raw_data.uid,
user_name=raw_data.user_name,
cnt_md=raw_data.cnt_md,
time_update=tools.timestamp(),
logo=raw_data.logo,
)
return True | python | def create_post_history(raw_data):
'''
Create the history of certain post.
'''
uid = tools.get_uuid()
TabPostHist.create(
uid=uid,
title=raw_data.title,
post_id=raw_data.uid,
user_name=raw_data.user_name,
cnt_md=raw_data.cnt_md,
time_update=tools.timestamp(),
logo=raw_data.logo,
)
return True | Create the history of certain post. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_hist_model.py#L74-L88 |
bukun/TorCMS | torcms/handlers/list_handler.py | ListHandler.ajax_list_catalog | def ajax_list_catalog(self, catid):
'''
Get posts of certain catid. In Json.
根据分类ID(catid)获取 该分类下 post 的相关信息,返回Json格式
'''
out_arr = {}
for catinfo in MPost2Catalog.query_postinfo_by_cat(catid):
out_arr[catinfo.uid] = catinfo.title
json.dump(out_arr, self) | python | def ajax_list_catalog(self, catid):
'''
Get posts of certain catid. In Json.
根据分类ID(catid)获取 该分类下 post 的相关信息,返回Json格式
'''
out_arr = {}
for catinfo in MPost2Catalog.query_postinfo_by_cat(catid):
out_arr[catinfo.uid] = catinfo.title
json.dump(out_arr, self) | Get posts of certain catid. In Json.
根据分类ID(catid)获取 该分类下 post 的相关信息,返回Json格式 | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/list_handler.py#L57-L67 |
bukun/TorCMS | torcms/handlers/list_handler.py | ListHandler.ajax_subcat_arr | def ajax_subcat_arr(self, pid):
'''
Get the sub category.
ToDo: The menu should display by order. Error fond in DRR.
根据父类ID(pid)获取子类,返回Json格式
'''
out_arr = {}
for catinfo in MCategory.query_sub_cat(pid):
out_arr[catinfo.uid] = catinfo.name
json.dump(out_arr, self) | python | def ajax_subcat_arr(self, pid):
'''
Get the sub category.
ToDo: The menu should display by order. Error fond in DRR.
根据父类ID(pid)获取子类,返回Json格式
'''
out_arr = {}
for catinfo in MCategory.query_sub_cat(pid):
out_arr[catinfo.uid] = catinfo.name
json.dump(out_arr, self) | Get the sub category.
ToDo: The menu should display by order. Error fond in DRR.
根据父类ID(pid)获取子类,返回Json格式 | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/list_handler.py#L69-L79 |
bukun/TorCMS | torcms/handlers/list_handler.py | ListHandler.ajax_kindcat_arr | def ajax_kindcat_arr(self, kind_sig):
'''
Get the sub category.
根据kind值(kind_sig)获取相应分类,返回Json格式
'''
out_arr = {}
for catinfo in MCategory.query_kind_cat(kind_sig):
out_arr[catinfo.uid] = catinfo.name
json.dump(out_arr, self) | python | def ajax_kindcat_arr(self, kind_sig):
'''
Get the sub category.
根据kind值(kind_sig)获取相应分类,返回Json格式
'''
out_arr = {}
for catinfo in MCategory.query_kind_cat(kind_sig):
out_arr[catinfo.uid] = catinfo.name
json.dump(out_arr, self) | Get the sub category.
根据kind值(kind_sig)获取相应分类,返回Json格式 | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/list_handler.py#L81-L90 |
bukun/TorCMS | torcms/handlers/list_handler.py | ListHandler.list_catalog | def list_catalog(self, cat_slug, **kwargs):
'''
listing the posts via category
根据分类(cat_slug)显示分类列表
'''
post_data = self.get_post_data()
tag = post_data.get('tag', '')
def get_pager_idx():
'''
Get the pager index.
'''
cur_p = kwargs.get('cur_p')
the_num = int(cur_p) if cur_p else 1
the_num = 1 if the_num < 1 else the_num
return the_num
current_page_num = get_pager_idx()
cat_rec = MCategory.get_by_slug(cat_slug)
if not cat_rec:
return False
num_of_cat = MPost2Catalog.count_of_certain_category(cat_rec.uid, tag=tag)
page_num = int(num_of_cat / CMS_CFG['list_num']) + 1
cat_name = cat_rec.name
kwd = {'cat_name': cat_name,
'cat_slug': cat_slug,
'title': cat_name,
'router': router_post[cat_rec.kind],
'current_page': current_page_num,
'kind': cat_rec.kind,
'tag': tag}
# Todo: review the following codes.
if self.order:
tmpl = 'list/catalog_list.html'
else:
tmpl = 'list/category_list.html'
infos = MPost2Catalog.query_pager_by_slug(
cat_slug,
current_page_num,
tag=tag,
order=self.order
)
# ToDo: `gen_pager_purecss` should not use any more.
self.render(tmpl,
catinfo=cat_rec,
infos=infos,
pager=tools.gen_pager_purecss(
'/list/{0}'.format(cat_slug),
page_num,
current_page_num),
userinfo=self.userinfo,
html2text=html2text,
cfg=CMS_CFG,
kwd=kwd,
router=router_post[cat_rec.kind]) | python | def list_catalog(self, cat_slug, **kwargs):
'''
listing the posts via category
根据分类(cat_slug)显示分类列表
'''
post_data = self.get_post_data()
tag = post_data.get('tag', '')
def get_pager_idx():
'''
Get the pager index.
'''
cur_p = kwargs.get('cur_p')
the_num = int(cur_p) if cur_p else 1
the_num = 1 if the_num < 1 else the_num
return the_num
current_page_num = get_pager_idx()
cat_rec = MCategory.get_by_slug(cat_slug)
if not cat_rec:
return False
num_of_cat = MPost2Catalog.count_of_certain_category(cat_rec.uid, tag=tag)
page_num = int(num_of_cat / CMS_CFG['list_num']) + 1
cat_name = cat_rec.name
kwd = {'cat_name': cat_name,
'cat_slug': cat_slug,
'title': cat_name,
'router': router_post[cat_rec.kind],
'current_page': current_page_num,
'kind': cat_rec.kind,
'tag': tag}
# Todo: review the following codes.
if self.order:
tmpl = 'list/catalog_list.html'
else:
tmpl = 'list/category_list.html'
infos = MPost2Catalog.query_pager_by_slug(
cat_slug,
current_page_num,
tag=tag,
order=self.order
)
# ToDo: `gen_pager_purecss` should not use any more.
self.render(tmpl,
catinfo=cat_rec,
infos=infos,
pager=tools.gen_pager_purecss(
'/list/{0}'.format(cat_slug),
page_num,
current_page_num),
userinfo=self.userinfo,
html2text=html2text,
cfg=CMS_CFG,
kwd=kwd,
router=router_post[cat_rec.kind]) | listing the posts via category
根据分类(cat_slug)显示分类列表 | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/list_handler.py#L92-L155 |
bukun/TorCMS | torcms/core/tool/send_email.py | send_mail | def send_mail(to_list, sub, content, cc=None):
'''
Sending email via Python.
'''
sender = SMTP_CFG['name'] + "<" + SMTP_CFG['user'] + ">"
msg = MIMEText(content, _subtype='html', _charset='utf-8')
msg['Subject'] = sub
msg['From'] = sender
msg['To'] = ";".join(to_list)
if cc:
msg['cc'] = ';'.join(cc)
try:
# Using SMTP_SSL. The alinyun ECS has masked the 25 port since 9,2016.
smtper = smtplib.SMTP_SSL(SMTP_CFG['host'], port=994)
smtper.login(SMTP_CFG['user'], SMTP_CFG['pass'])
smtper.sendmail(sender, to_list, msg.as_string())
smtper.close()
return True
except:
return False | python | def send_mail(to_list, sub, content, cc=None):
'''
Sending email via Python.
'''
sender = SMTP_CFG['name'] + "<" + SMTP_CFG['user'] + ">"
msg = MIMEText(content, _subtype='html', _charset='utf-8')
msg['Subject'] = sub
msg['From'] = sender
msg['To'] = ";".join(to_list)
if cc:
msg['cc'] = ';'.join(cc)
try:
# Using SMTP_SSL. The alinyun ECS has masked the 25 port since 9,2016.
smtper = smtplib.SMTP_SSL(SMTP_CFG['host'], port=994)
smtper.login(SMTP_CFG['user'], SMTP_CFG['pass'])
smtper.sendmail(sender, to_list, msg.as_string())
smtper.close()
return True
except:
return False | Sending email via Python. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/core/tool/send_email.py#L12-L31 |
bukun/TorCMS | torcms/handlers/search_handler.py | gen_pager_bootstrap_url | def gen_pager_bootstrap_url(cat_slug, page_num, current):
'''
pager for searching results.
'''
pager = ''
if page_num == 1 or page_num == 0:
pager = ''
elif page_num > 1:
pager_mid, pager_pre, pager_next, pager_last, pager_home = '', '', '', '', ''
pager = '<ul class="pagination">'
if current > 1:
pager_home = '''<li class="{0}" name='fenye' onclick='change(this);'>
<a href="{1}/{2}">首页</a></li>'''.format('', cat_slug, 1)
pager_pre = ''' <li class="{0}" name='fenye' onclick='change(this);'>
<a href="{1}/{2}">上一页</a></li>'''.format('', cat_slug, current - 1)
if current > 5:
cur_num = current - 4
else:
cur_num = 1
if page_num > 10 and cur_num < page_num - 10:
show_num = cur_num + 10
else:
show_num = page_num + 1
for num in range(cur_num, show_num):
if num == current:
checkstr = 'active'
else:
checkstr = ''
tmp_str_df = '''<li class="{0}" name='fenye' onclick='change(this);'>
<a href="{1}/{2}">{2}</a></li>'''.format(checkstr, cat_slug, num)
pager_mid += tmp_str_df
if current < page_num:
pager_next = '''
<li class="{0}" name='fenye' onclick='change(this);'
><a href="{1}/{2}">下一页</a></li>'''.format('', cat_slug, current + 1)
pager_last = '''
<li class="{0}" name='fenye' onclick='change(this);'
><a href="{1}/{2}">末页</a></li>'''.format('', cat_slug, page_num)
pager += pager_home + pager_pre + pager_mid + pager_next + pager_last
pager += '</ul>'
else:
pass
return pager | python | def gen_pager_bootstrap_url(cat_slug, page_num, current):
'''
pager for searching results.
'''
pager = ''
if page_num == 1 or page_num == 0:
pager = ''
elif page_num > 1:
pager_mid, pager_pre, pager_next, pager_last, pager_home = '', '', '', '', ''
pager = '<ul class="pagination">'
if current > 1:
pager_home = '''<li class="{0}" name='fenye' onclick='change(this);'>
<a href="{1}/{2}">首页</a></li>'''.format('', cat_slug, 1)
pager_pre = ''' <li class="{0}" name='fenye' onclick='change(this);'>
<a href="{1}/{2}">上一页</a></li>'''.format('', cat_slug, current - 1)
if current > 5:
cur_num = current - 4
else:
cur_num = 1
if page_num > 10 and cur_num < page_num - 10:
show_num = cur_num + 10
else:
show_num = page_num + 1
for num in range(cur_num, show_num):
if num == current:
checkstr = 'active'
else:
checkstr = ''
tmp_str_df = '''<li class="{0}" name='fenye' onclick='change(this);'>
<a href="{1}/{2}">{2}</a></li>'''.format(checkstr, cat_slug, num)
pager_mid += tmp_str_df
if current < page_num:
pager_next = '''
<li class="{0}" name='fenye' onclick='change(this);'
><a href="{1}/{2}">下一页</a></li>'''.format('', cat_slug, current + 1)
pager_last = '''
<li class="{0}" name='fenye' onclick='change(this);'
><a href="{1}/{2}">末页</a></li>'''.format('', cat_slug, page_num)
pager += pager_home + pager_pre + pager_mid + pager_next + pager_last
pager += '</ul>'
else:
pass
return pager | pager for searching results. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/search_handler.py#L14-L68 |
bukun/TorCMS | torcms/handlers/search_handler.py | SearchHandler.search | def search(self, keyword, p_index=''):
'''
perform searching.
'''
if p_index == '' or p_index == '-1':
current_page_number = 1
else:
current_page_number = int(p_index)
res_all = self.ysearch.get_all_num(keyword)
results = self.ysearch.search_pager(
keyword,
page_index=current_page_number,
doc_per_page=CMS_CFG['list_num']
)
page_num = int(res_all / CMS_CFG['list_num'])
kwd = {'title': '查找结果',
'pager': '',
'count': res_all,
'keyword': keyword,
'catid': '',
'current_page': current_page_number}
self.render('misc/search/search_list.html',
kwd=kwd,
srecs=results,
pager=gen_pager_bootstrap_url(
'/search/{0}'.format(keyword),
page_num,
current_page_number
),
userinfo=self.userinfo,
cfg=CMS_CFG) | python | def search(self, keyword, p_index=''):
'''
perform searching.
'''
if p_index == '' or p_index == '-1':
current_page_number = 1
else:
current_page_number = int(p_index)
res_all = self.ysearch.get_all_num(keyword)
results = self.ysearch.search_pager(
keyword,
page_index=current_page_number,
doc_per_page=CMS_CFG['list_num']
)
page_num = int(res_all / CMS_CFG['list_num'])
kwd = {'title': '查找结果',
'pager': '',
'count': res_all,
'keyword': keyword,
'catid': '',
'current_page': current_page_number}
self.render('misc/search/search_list.html',
kwd=kwd,
srecs=results,
pager=gen_pager_bootstrap_url(
'/search/{0}'.format(keyword),
page_num,
current_page_number
),
userinfo=self.userinfo,
cfg=CMS_CFG) | perform searching. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/search_handler.py#L122-L152 |
bukun/TorCMS | torcms/handlers/search_handler.py | SearchHandler.search_cat | def search_cat(self, catid, keyword, p_index=1):
'''
Searching according the kind.
'''
catid = 'sid' + catid
logger.info('-' * 20)
logger.info('search cat')
logger.info('catid: {0}'.format(catid))
logger.info('keyword: {0}'.format(keyword))
# catid = ''
if p_index == '' or p_index == '-1':
current_page_number = 1
else:
current_page_number = int(p_index)
res_all = self.ysearch.get_all_num(keyword, catid=catid)
results = self.ysearch.search_pager(
keyword,
catid=catid,
page_index=current_page_number,
doc_per_page=CMS_CFG['list_num']
)
page_num = int(res_all / CMS_CFG['list_num'])
kwd = {'title': '查找结果',
'pager': '',
'count': res_all,
'current_page': current_page_number,
'catid': catid,
'keyword': keyword}
self.render('misc/search/search_list.html',
kwd=kwd,
srecs=results,
pager=gen_pager_bootstrap_url(
'/search/{0}/{1}'.format(catid, keyword),
page_num,
current_page_number
),
userinfo=self.userinfo,
cfg=CMS_CFG) | python | def search_cat(self, catid, keyword, p_index=1):
'''
Searching according the kind.
'''
catid = 'sid' + catid
logger.info('-' * 20)
logger.info('search cat')
logger.info('catid: {0}'.format(catid))
logger.info('keyword: {0}'.format(keyword))
# catid = ''
if p_index == '' or p_index == '-1':
current_page_number = 1
else:
current_page_number = int(p_index)
res_all = self.ysearch.get_all_num(keyword, catid=catid)
results = self.ysearch.search_pager(
keyword,
catid=catid,
page_index=current_page_number,
doc_per_page=CMS_CFG['list_num']
)
page_num = int(res_all / CMS_CFG['list_num'])
kwd = {'title': '查找结果',
'pager': '',
'count': res_all,
'current_page': current_page_number,
'catid': catid,
'keyword': keyword}
self.render('misc/search/search_list.html',
kwd=kwd,
srecs=results,
pager=gen_pager_bootstrap_url(
'/search/{0}/{1}'.format(catid, keyword),
page_num,
current_page_number
),
userinfo=self.userinfo,
cfg=CMS_CFG) | Searching according the kind. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/search_handler.py#L154-L195 |
bukun/TorCMS | torcms/handlers/post_ajax_handler.py | PostAjaxHandler.viewinfo | def viewinfo(self, postinfo):
'''
View the info
'''
out_json = {
'uid': postinfo.uid,
'time_update': postinfo.time_update,
'title': postinfo.title,
'cnt_html': tornado.escape.xhtml_unescape(postinfo.cnt_html),
}
self.write(json.dumps(out_json)) | python | def viewinfo(self, postinfo):
'''
View the info
'''
out_json = {
'uid': postinfo.uid,
'time_update': postinfo.time_update,
'title': postinfo.title,
'cnt_html': tornado.escape.xhtml_unescape(postinfo.cnt_html),
}
self.write(json.dumps(out_json)) | View the info | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/post_ajax_handler.py#L42-L53 |
bukun/TorCMS | torcms/handlers/post_ajax_handler.py | PostAjaxHandler.count_plus | def count_plus(self, uid):
'''
Ajax request, that the view count will plus 1.
'''
self.set_header("Content-Type", "application/json")
output = {
# ToDo: Test the following codes.
# MPost.__update_view_count_by_uid(uid) else 0,
'status': 1 if MPost.update_misc(uid, count=1) else 0
}
# return json.dump(output, self)
self.write(json.dumps(output)) | python | def count_plus(self, uid):
'''
Ajax request, that the view count will plus 1.
'''
self.set_header("Content-Type", "application/json")
output = {
# ToDo: Test the following codes.
# MPost.__update_view_count_by_uid(uid) else 0,
'status': 1 if MPost.update_misc(uid, count=1) else 0
}
# return json.dump(output, self)
self.write(json.dumps(output)) | Ajax request, that the view count will plus 1. | https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/handlers/post_ajax_handler.py#L55-L66 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.