repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
maxcountryman/atomos
atomos/multiprocessing/atomic.py
AtomicNumber.get_and_subtract
def get_and_subtract(self, delta): ''' Atomically subtracts `delta` from the current value and returns the old value. :param delta: The delta to subtract. ''' with self._reference.get_lock(): oldval = self._reference.value self._reference.value -= delta return oldval
python
def get_and_subtract(self, delta): ''' Atomically subtracts `delta` from the current value and returns the old value. :param delta: The delta to subtract. ''' with self._reference.get_lock(): oldval = self._reference.value self._reference.value -= delta return oldval
Atomically subtracts `delta` from the current value and returns the old value. :param delta: The delta to subtract.
https://github.com/maxcountryman/atomos/blob/418746c69134efba3c4f999405afe9113dee4827/atomos/multiprocessing/atomic.py#L200-L210
Pierre-Sassoulas/django-zxcvbn-password-validator
django_zxcvbn_password_validator/translate_zxcvbn_text.py
translate_zxcvbn_text
def translate_zxcvbn_text(text): """ This PR would make it cleaner, but it will also be very slow to be integrated in python-zxcvbn and we want this to work now : https://github.com/dropbox/zxcvbn/pull/124 """ i18n = { "Use a few words, avoid common phrases": _( "Use a few words, avoid common phrases" ), "No need for symbols, digits, or uppercase letters": _( "No need for symbols, digits, or uppercase letters" ), "Add another word or two. Uncommon words are better.": _( "Add another word or two. Uncommon words are better." ), "Straight rows of keys are easy to guess": _( "Straight rows of keys are easy to guess" ), "Short keyboard patterns are easy to guess": _( "Short keyboard patterns are easy to guess" ), "Use a longer keyboard pattern with more turns": _( "Use a longer keyboard pattern with more turns" ), 'Repeats like "aaa" are easy to guess': _( 'Repeats like "aaa" are easy to guess' ), 'Repeats like "abcabcabc" are only slightly harder to guess than "abc"': _( 'Repeats like "abcabcabc" are only slightly harder to guess than "abc"' ), "Avoid repeated words and characters": _("Avoid repeated words and characters"), 'Sequences like "abc" or "6543" are easy to guess': _( 'Sequences like "abc" or "6543" are easy to guess' ), "Avoid sequences": _("Avoid sequences"), "Recent years are easy to guess": _("Recent years are easy to guess"), "Avoid recent years": _("Avoid recent years"), "Avoid years that are associated with you": _( "Avoid years that are associated with you" ), "Dates are often easy to guess": _("Dates are often easy to guess"), "Avoid dates and years that are associated with you": _( "Avoid dates and years that are associated with you" ), "This is a top-10 common password": _("This is a top-10 common password"), "This is a top-100 common password": _("This is a top-100 common password"), "This is a very common password": _("This is a very common password"), "This is similar to a commonly used password": _( "This is similar to a commonly used password" ), "A word by itself is easy to guess": _("A word by itself is easy to guess"), "Names and surnames by themselves are easy to guess": _( "Names and surnames by themselves are easy to guess" ), "Common names and surnames are easy to guess": _( "Common names and surnames are easy to guess" ), "Capitalization doesn't help very much": _( "Capitalization doesn't help very much" ), "All-uppercase is almost as easy to guess as all-lowercase": _( "All-uppercase is almost as easy to guess as all-lowercase" ), "Reversed words aren't much harder to guess": _( "Reversed words aren't much harder to guess" ), "Predictable substitutions like '@' instead of 'a' don't help very much": _( "Predictable substitutions like '@' instead of 'a' don't help very much" ), } translated_text = i18n.get(text) if translated_text is None: # zxcvbn is inconsistent, sometime there is a dot, sometime not translated_text = i18n.get(text[:-1]) if translated_text is None: LOGGER.warning( "No translation for '%s' or '%s', update the generatei18ndict command.", text, text[:-1], ) return text return translated_text
python
def translate_zxcvbn_text(text): """ This PR would make it cleaner, but it will also be very slow to be integrated in python-zxcvbn and we want this to work now : https://github.com/dropbox/zxcvbn/pull/124 """ i18n = { "Use a few words, avoid common phrases": _( "Use a few words, avoid common phrases" ), "No need for symbols, digits, or uppercase letters": _( "No need for symbols, digits, or uppercase letters" ), "Add another word or two. Uncommon words are better.": _( "Add another word or two. Uncommon words are better." ), "Straight rows of keys are easy to guess": _( "Straight rows of keys are easy to guess" ), "Short keyboard patterns are easy to guess": _( "Short keyboard patterns are easy to guess" ), "Use a longer keyboard pattern with more turns": _( "Use a longer keyboard pattern with more turns" ), 'Repeats like "aaa" are easy to guess': _( 'Repeats like "aaa" are easy to guess' ), 'Repeats like "abcabcabc" are only slightly harder to guess than "abc"': _( 'Repeats like "abcabcabc" are only slightly harder to guess than "abc"' ), "Avoid repeated words and characters": _("Avoid repeated words and characters"), 'Sequences like "abc" or "6543" are easy to guess': _( 'Sequences like "abc" or "6543" are easy to guess' ), "Avoid sequences": _("Avoid sequences"), "Recent years are easy to guess": _("Recent years are easy to guess"), "Avoid recent years": _("Avoid recent years"), "Avoid years that are associated with you": _( "Avoid years that are associated with you" ), "Dates are often easy to guess": _("Dates are often easy to guess"), "Avoid dates and years that are associated with you": _( "Avoid dates and years that are associated with you" ), "This is a top-10 common password": _("This is a top-10 common password"), "This is a top-100 common password": _("This is a top-100 common password"), "This is a very common password": _("This is a very common password"), "This is similar to a commonly used password": _( "This is similar to a commonly used password" ), "A word by itself is easy to guess": _("A word by itself is easy to guess"), "Names and surnames by themselves are easy to guess": _( "Names and surnames by themselves are easy to guess" ), "Common names and surnames are easy to guess": _( "Common names and surnames are easy to guess" ), "Capitalization doesn't help very much": _( "Capitalization doesn't help very much" ), "All-uppercase is almost as easy to guess as all-lowercase": _( "All-uppercase is almost as easy to guess as all-lowercase" ), "Reversed words aren't much harder to guess": _( "Reversed words aren't much harder to guess" ), "Predictable substitutions like '@' instead of 'a' don't help very much": _( "Predictable substitutions like '@' instead of 'a' don't help very much" ), } translated_text = i18n.get(text) if translated_text is None: # zxcvbn is inconsistent, sometime there is a dot, sometime not translated_text = i18n.get(text[:-1]) if translated_text is None: LOGGER.warning( "No translation for '%s' or '%s', update the generatei18ndict command.", text, text[:-1], ) return text return translated_text
This PR would make it cleaner, but it will also be very slow to be integrated in python-zxcvbn and we want this to work now : https://github.com/dropbox/zxcvbn/pull/124
https://github.com/Pierre-Sassoulas/django-zxcvbn-password-validator/blob/4b73e0fae430b9f93485b7fbd7a132a78c2d8f15/django_zxcvbn_password_validator/translate_zxcvbn_text.py#L8-L88
codelv/enaml-native-cli
enamlnativecli/main.py
find_conda
def find_conda(): """ Try to find conda on the system """ USER_HOME = os.path.expanduser('~') CONDA_HOME = os.environ.get('CONDA_HOME', '') PROGRAMDATA = os.environ.get('PROGRAMDATA', '') # Search common install paths and sys path search_paths = [ # Windows join(PROGRAMDATA, 'miniconda2', 'scripts'), join(PROGRAMDATA, 'miniconda3', 'scripts'), join(USER_HOME, 'miniconda2', 'scripts'), join(USER_HOME, 'miniconda3', 'scripts'), join(CONDA_HOME, 'scripts'), # Linux join(USER_HOME, 'miniconda2', 'bin'), join(USER_HOME, 'miniconda3', 'bin'), join(CONDA_HOME, 'bin'), # TODO: OSX ] + os.environ.get("PATH", "").split(";" if 'win' in sys.path else ":") cmd = 'conda.exe' if IS_WIN else 'conda' for conda_path in search_paths: conda = join(conda_path, cmd) if exists(conda): return sh.Command(conda) # Try to let the system find it return sh.conda
python
def find_conda(): """ Try to find conda on the system """ USER_HOME = os.path.expanduser('~') CONDA_HOME = os.environ.get('CONDA_HOME', '') PROGRAMDATA = os.environ.get('PROGRAMDATA', '') # Search common install paths and sys path search_paths = [ # Windows join(PROGRAMDATA, 'miniconda2', 'scripts'), join(PROGRAMDATA, 'miniconda3', 'scripts'), join(USER_HOME, 'miniconda2', 'scripts'), join(USER_HOME, 'miniconda3', 'scripts'), join(CONDA_HOME, 'scripts'), # Linux join(USER_HOME, 'miniconda2', 'bin'), join(USER_HOME, 'miniconda3', 'bin'), join(CONDA_HOME, 'bin'), # TODO: OSX ] + os.environ.get("PATH", "").split(";" if 'win' in sys.path else ":") cmd = 'conda.exe' if IS_WIN else 'conda' for conda_path in search_paths: conda = join(conda_path, cmd) if exists(conda): return sh.Command(conda) # Try to let the system find it return sh.conda
Try to find conda on the system
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L74-L103
codelv/enaml-native-cli
enamlnativecli/main.py
cp
def cp(src, dst): """ Like cp -R src dst """ print("[DEBUG]: -> copying {} to {}".format(src, dst)) if os.path.isfile(src): if not exists(dirname(dst)): os.makedirs(dirname(dst)) shutil.copy(src, dst) else: copy_tree(src, dst)
python
def cp(src, dst): """ Like cp -R src dst """ print("[DEBUG]: -> copying {} to {}".format(src, dst)) if os.path.isfile(src): if not exists(dirname(dst)): os.makedirs(dirname(dst)) shutil.copy(src, dst) else: copy_tree(src, dst)
Like cp -R src dst
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L128-L136
codelv/enaml-native-cli
enamlnativecli/main.py
find_commands
def find_commands(cls): """ Finds commands by finding the subclasses of Command""" cmds = [] for subclass in cls.__subclasses__(): cmds.append(subclass) cmds.extend(find_commands(subclass)) return cmds
python
def find_commands(cls): """ Finds commands by finding the subclasses of Command""" cmds = [] for subclass in cls.__subclasses__(): cmds.append(subclass) cmds.extend(find_commands(subclass)) return cmds
Finds commands by finding the subclasses of Command
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1617-L1623
codelv/enaml-native-cli
enamlnativecli/main.py
Link.link
def link(self, path, pkg): """ Link the package in the current directory. """ # Check if a custom linker exists to handle linking this package #for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"): # if ep.name.replace("-", '_') == pkg.replace("-", '_'): # linker = ep.load() # print("Custom linker {} found for '{}'. Linking...".format( # linker, pkg)) # if linker(self.ctx, path): # return #: Use the default builtin linker script if exists(join(path, pkg, 'build.gradle')): print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format( pkg)+Colors.RESET) self.link_android(path, pkg) if exists(join(path, pkg, 'Podfile')): print(Colors.BLUE+"[INFO] Linking {}/Podfile".format( pkg)+Colors.RESET) self.link_ios(path, pkg)
python
def link(self, path, pkg): """ Link the package in the current directory. """ # Check if a custom linker exists to handle linking this package #for ep in pkg_resources.iter_entry_points(group="enaml_native_linker"): # if ep.name.replace("-", '_') == pkg.replace("-", '_'): # linker = ep.load() # print("Custom linker {} found for '{}'. Linking...".format( # linker, pkg)) # if linker(self.ctx, path): # return #: Use the default builtin linker script if exists(join(path, pkg, 'build.gradle')): print(Colors.BLUE+"[INFO] Linking {}/build.gradle".format( pkg)+Colors.RESET) self.link_android(path, pkg) if exists(join(path, pkg, 'Podfile')): print(Colors.BLUE+"[INFO] Linking {}/Podfile".format( pkg)+Colors.RESET) self.link_ios(path, pkg)
Link the package in the current directory.
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L826-L846
codelv/enaml-native-cli
enamlnativecli/main.py
Link.is_settings_linked
def is_settings_linked(source, pkg): """ Returns true if the "include ':<project>'" line exists in the file """ for line in source.split("\n"): if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line): return True return False
python
def is_settings_linked(source, pkg): """ Returns true if the "include ':<project>'" line exists in the file """ for line in source.split("\n"): if re.search(r"include\s*['\"]:{}['\"]".format(pkg), line): return True return False
Returns true if the "include ':<project>'" line exists in the file
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L849-L855
codelv/enaml-native-cli
enamlnativecli/main.py
Link.is_build_linked
def is_build_linked(source, pkg): """ Returns true if the "compile project(':<project>')" line exists exists in the file """ for line in source.split("\n"): if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg), line): return True return False
python
def is_build_linked(source, pkg): """ Returns true if the "compile project(':<project>')" line exists exists in the file """ for line in source.split("\n"): if re.search(r"(api|compile)\s+project\(['\"]:{}['\"]\)".format(pkg), line): return True return False
Returns true if the "compile project(':<project>')" line exists exists in the file
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L858-L865
codelv/enaml-native-cli
enamlnativecli/main.py
Link.find_packages
def find_packages(path): """ Find all java files matching the "*Package.java" pattern within the given enaml package directory relative to the java source path. """ matches = [] root = join(path, 'src', 'main', 'java') for folder, dirnames, filenames in os.walk(root): for filename in fnmatch.filter(filenames, '*Package.java'): #: Open and make sure it's an EnamlPackage somewhere with open(join(folder, filename)) as f: if "implements EnamlPackage" in f.read(): package = os.path.relpath(folder, root) matches.append(os.path.join(package, filename)) return matches
python
def find_packages(path): """ Find all java files matching the "*Package.java" pattern within the given enaml package directory relative to the java source path. """ matches = [] root = join(path, 'src', 'main', 'java') for folder, dirnames, filenames in os.walk(root): for filename in fnmatch.filter(filenames, '*Package.java'): #: Open and make sure it's an EnamlPackage somewhere with open(join(folder, filename)) as f: if "implements EnamlPackage" in f.read(): package = os.path.relpath(folder, root) matches.append(os.path.join(package, filename)) return matches
Find all java files matching the "*Package.java" pattern within the given enaml package directory relative to the java source path.
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L868-L881
codelv/enaml-native-cli
enamlnativecli/main.py
Link.is_app_linked
def is_app_linked(source, pkg, java_package): """ Returns true if the compile project line exists exists in the file """ for line in source.split("\n"): if java_package in line: return True return False
python
def is_app_linked(source, pkg, java_package): """ Returns true if the compile project line exists exists in the file """ for line in source.split("\n"): if java_package in line: return True return False
Returns true if the compile project line exists exists in the file
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L884-L891
codelv/enaml-native-cli
enamlnativecli/main.py
Link.link_android
def link_android(self, path, pkg): """ Link's the android project to this library. 1. Includes this project's directory in the app's android/settings.gradle It adds: include ':<project-name>' project(':<project-name>').projectDir = new File( rootProject.projectDir, '../packages/<project-name>/android') 2. Add's this project as a dependency to the android/app/build.gradle It adds: compile project(':<project-name>') to the dependencies. 3. If preset, adds the import and package statement to the android/app/src/main/java/<bundle/id>/MainApplication.java """ bundle_id = self.ctx['bundle_id'] pkg_root = join(path, pkg) #: Check if it's already linked with open(join('android', 'settings.gradle')) as f: settings_gradle = f.read() with open(join('android', 'app', 'build.gradle')) as f: build_gradle = f.read() #: Find the MainApplication.java main_app_java_path = join('android', 'app', 'src', 'main', 'java', join(*bundle_id.split(".")), 'MainApplication.java') with open(main_app_java_path) as f: main_application_java = f.read() try: #: Now link all the EnamlPackages we can find in the new "package" new_packages = Link.find_packages(join(path, pkg)) if not new_packages: print("[Android] {} No EnamlPackages found to link!".format( pkg)) return #: Link settings.gradle if not Link.is_settings_linked(settings_gradle, pkg): #: Add two statements new_settings = settings_gradle.split("\n") new_settings.append("") # Blank line new_settings.append("include ':{name}'".format(name=pkg)) new_settings.append("project(':{name}').projectDir = " "new File(rootProject.projectDir, " "'../{path}/android/{name}')" .format(name=pkg, path=self.package_dir)) with open(join('android', 'settings.gradle'), 'w') as f: f.write("\n".join(new_settings)) print("[Android] {} linked in settings.gradle!".format(pkg)) else: print("[Android] {} was already linked in " "settings.gradle!".format(pkg)) #: Link app/build.gradle if not Link.is_build_linked(build_gradle, pkg): #: Add two statements new_build = build_gradle.split("\n") #: Find correct line number found = False for i, line in enumerate(new_build): if re.match(r"dependencies\s*{", line): found = True continue if found and "}" in line: #: Hackish way to find line of the closing bracket after #: the dependencies { block is found break if not found: raise ValueError("Unable to find dependencies in " "{pkg}/app/build.gradle!".format(pkg=pkg)) #: Insert before the closing bracket new_build.insert(i, " api project(':{name}')".format( name=pkg)) with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write("\n".join(new_build)) print("[Android] {} linked in app/build.gradle!".format(pkg)) else: print("[Android] {} was already linked in " "app/build.gradle!".format(pkg)) new_app_java = [] for package in new_packages: #: Add our import statement javacls = os.path.splitext(package)[0].replace("/", ".") if not Link.is_app_linked(main_application_java, pkg, javacls): #: Reuse previous if avialable new_app_java = (new_app_java or main_application_java.split("\n")) #: Find last import statement j = 0 for i, line in enumerate(new_app_java): if fnmatch.fnmatch(line, "import *;"): j = i new_app_java.insert(j+1, "import {};".format(javacls)) #: Add the package statement j = 0 for i, line in enumerate(new_app_java): if fnmatch.fnmatch(line.strip(), "new *Package()"): j = i if j == 0: raise ValueError("Could not find the correct spot to " "add package {}".format(javacls)) else: #: Get indent and add to previous line #: Add comma to previous line new_app_java[j] = new_app_java[j]+ "," #: Insert new line new_app_java.insert(j+1, " new {}()" .format(javacls.split(".")[-1])) else: print("[Android] {} was already linked in {}!".format( pkg, main_app_java_path)) if new_app_java: with open(main_app_java_path, 'w') as f: f.write("\n".join(new_app_java)) print(Colors.GREEN+"[Android] {} linked successfully!".format( pkg)+Colors.RESET) except Exception as e: print(Colors.GREEN+"[Android] {} Failed to link. " "Reverting due to error: " "{}".format(pkg, e)+Colors.RESET) #: Undo any changes with open(join('android', 'settings.gradle'), 'w') as f: f.write(settings_gradle) with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write(build_gradle) with open(main_app_java_path, 'w') as f: f.write(main_application_java) #: Now blow up raise
python
def link_android(self, path, pkg): """ Link's the android project to this library. 1. Includes this project's directory in the app's android/settings.gradle It adds: include ':<project-name>' project(':<project-name>').projectDir = new File( rootProject.projectDir, '../packages/<project-name>/android') 2. Add's this project as a dependency to the android/app/build.gradle It adds: compile project(':<project-name>') to the dependencies. 3. If preset, adds the import and package statement to the android/app/src/main/java/<bundle/id>/MainApplication.java """ bundle_id = self.ctx['bundle_id'] pkg_root = join(path, pkg) #: Check if it's already linked with open(join('android', 'settings.gradle')) as f: settings_gradle = f.read() with open(join('android', 'app', 'build.gradle')) as f: build_gradle = f.read() #: Find the MainApplication.java main_app_java_path = join('android', 'app', 'src', 'main', 'java', join(*bundle_id.split(".")), 'MainApplication.java') with open(main_app_java_path) as f: main_application_java = f.read() try: #: Now link all the EnamlPackages we can find in the new "package" new_packages = Link.find_packages(join(path, pkg)) if not new_packages: print("[Android] {} No EnamlPackages found to link!".format( pkg)) return #: Link settings.gradle if not Link.is_settings_linked(settings_gradle, pkg): #: Add two statements new_settings = settings_gradle.split("\n") new_settings.append("") # Blank line new_settings.append("include ':{name}'".format(name=pkg)) new_settings.append("project(':{name}').projectDir = " "new File(rootProject.projectDir, " "'../{path}/android/{name}')" .format(name=pkg, path=self.package_dir)) with open(join('android', 'settings.gradle'), 'w') as f: f.write("\n".join(new_settings)) print("[Android] {} linked in settings.gradle!".format(pkg)) else: print("[Android] {} was already linked in " "settings.gradle!".format(pkg)) #: Link app/build.gradle if not Link.is_build_linked(build_gradle, pkg): #: Add two statements new_build = build_gradle.split("\n") #: Find correct line number found = False for i, line in enumerate(new_build): if re.match(r"dependencies\s*{", line): found = True continue if found and "}" in line: #: Hackish way to find line of the closing bracket after #: the dependencies { block is found break if not found: raise ValueError("Unable to find dependencies in " "{pkg}/app/build.gradle!".format(pkg=pkg)) #: Insert before the closing bracket new_build.insert(i, " api project(':{name}')".format( name=pkg)) with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write("\n".join(new_build)) print("[Android] {} linked in app/build.gradle!".format(pkg)) else: print("[Android] {} was already linked in " "app/build.gradle!".format(pkg)) new_app_java = [] for package in new_packages: #: Add our import statement javacls = os.path.splitext(package)[0].replace("/", ".") if not Link.is_app_linked(main_application_java, pkg, javacls): #: Reuse previous if avialable new_app_java = (new_app_java or main_application_java.split("\n")) #: Find last import statement j = 0 for i, line in enumerate(new_app_java): if fnmatch.fnmatch(line, "import *;"): j = i new_app_java.insert(j+1, "import {};".format(javacls)) #: Add the package statement j = 0 for i, line in enumerate(new_app_java): if fnmatch.fnmatch(line.strip(), "new *Package()"): j = i if j == 0: raise ValueError("Could not find the correct spot to " "add package {}".format(javacls)) else: #: Get indent and add to previous line #: Add comma to previous line new_app_java[j] = new_app_java[j]+ "," #: Insert new line new_app_java.insert(j+1, " new {}()" .format(javacls.split(".")[-1])) else: print("[Android] {} was already linked in {}!".format( pkg, main_app_java_path)) if new_app_java: with open(main_app_java_path, 'w') as f: f.write("\n".join(new_app_java)) print(Colors.GREEN+"[Android] {} linked successfully!".format( pkg)+Colors.RESET) except Exception as e: print(Colors.GREEN+"[Android] {} Failed to link. " "Reverting due to error: " "{}".format(pkg, e)+Colors.RESET) #: Undo any changes with open(join('android', 'settings.gradle'), 'w') as f: f.write(settings_gradle) with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write(build_gradle) with open(main_app_java_path, 'w') as f: f.write(main_application_java) #: Now blow up raise
Link's the android project to this library. 1. Includes this project's directory in the app's android/settings.gradle It adds: include ':<project-name>' project(':<project-name>').projectDir = new File( rootProject.projectDir, '../packages/<project-name>/android') 2. Add's this project as a dependency to the android/app/build.gradle It adds: compile project(':<project-name>') to the dependencies. 3. If preset, adds the import and package statement to the android/app/src/main/java/<bundle/id>/MainApplication.java
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L893-L1044
codelv/enaml-native-cli
enamlnativecli/main.py
Unlink.run
def run(self, args=None): """ The name IS required here. """ print(Colors.BLUE+"[INFO] Unlinking {}...".format( args.names)+Colors.RESET) for name in args.names: self.unlink(Link.package_dir, name)
python
def run(self, args=None): """ The name IS required here. """ print(Colors.BLUE+"[INFO] Unlinking {}...".format( args.names)+Colors.RESET) for name in args.names: self.unlink(Link.package_dir, name)
The name IS required here.
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1073-L1078
codelv/enaml-native-cli
enamlnativecli/main.py
Unlink.unlink
def unlink(self, path, pkg): """ Unlink the package in the current directory. """ #: Check if a custom unlinker exists to handle unlinking this package for ep in pkg_resources.iter_entry_points( group="enaml_native_unlinker"): if ep.name.replace("-", '_') == pkg.replace("-", '_'): unlinker = ep.load() print("Custom unlinker {} found for '{}'. " "Unlinking...".format(unlinker, pkg)) if unlinker(self.ctx, path): return if exists(join(path, 'android', pkg, 'build.gradle')): print("[Android] unlinking {}".format(pkg)) self.unlink_android(path, pkg) for target in ['iphoneos', 'iphonesimulator']: if exists(join(path, target, pkg, 'Podfile')): print("[iOS] unlinking {}".format(pkg)) self.unlink_ios(path, pkg)
python
def unlink(self, path, pkg): """ Unlink the package in the current directory. """ #: Check if a custom unlinker exists to handle unlinking this package for ep in pkg_resources.iter_entry_points( group="enaml_native_unlinker"): if ep.name.replace("-", '_') == pkg.replace("-", '_'): unlinker = ep.load() print("Custom unlinker {} found for '{}'. " "Unlinking...".format(unlinker, pkg)) if unlinker(self.ctx, path): return if exists(join(path, 'android', pkg, 'build.gradle')): print("[Android] unlinking {}".format(pkg)) self.unlink_android(path, pkg) for target in ['iphoneos', 'iphonesimulator']: if exists(join(path, target, pkg, 'Podfile')): print("[iOS] unlinking {}".format(pkg)) self.unlink_ios(path, pkg)
Unlink the package in the current directory.
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1080-L1100
codelv/enaml-native-cli
enamlnativecli/main.py
Unlink.unlink_android
def unlink_android(self, path, pkg): """ Unlink's the android project to this library. 1. In the app's android/settings.gradle, it removes the following lines (if they exist): include ':<project-name>' project(':<project-name>').projectDir = new File( rootProject.projectDir, '../venv/packages/<project-name>/android') 2. In the app's android/app/build.gradle, it removes the following line (if present) compile project(':<project-name>') 3. In the app's android/app/src/main/java/<bundle/id>/MainApplication.java, it removes: import <package>.<Name>Package; new <Name>Package(), If no comma exists it will remove the comma from the previous line. """ bundle_id = self.ctx['bundle_id'] #: Check if it's already linked with open(join('android', 'settings.gradle')) as f: settings_gradle = f.read() with open(join('android', 'app', 'build.gradle')) as f: build_gradle = f.read() #: Find the MainApplication.java main_app_java_path = join('android', 'app', 'src', 'main', 'java', join(*bundle_id.split(".")), 'MainApplication.java') with open(main_app_java_path) as f: main_application_java = f.read() try: #: Now link all the EnamlPackages we can find in the new "package" new_packages = Link.find_packages(join(path, 'android', pkg)) if not new_packages: print(Colors.RED+"\t[Android] {} No EnamlPackages found to " "unlink!".format(pkg)+Colors.RESET) return #: Unlink settings.gradle if Link.is_settings_linked(settings_gradle, pkg): #: Remove the two statements new_settings = [ line for line in settings_gradle.split("\n") if line.strip() not in [ "include ':{name}'".format(name=pkg), "project(':{name}').projectDir = " "new File(rootProject.projectDir, " "'../{path}/android/{name}')".format(path=path, name=pkg) ] ] with open(join('android', 'settings.gradle'), 'w') as f: f.write("\n".join(new_settings)) print("\t[Android] {} unlinked settings.gradle!".format(pkg)) else: print("\t[Android] {} was not linked in " "settings.gradle!".format(pkg)) #: Unlink app/build.gradle if Link.is_build_linked(build_gradle, pkg): #: Add two statements new_build = [ line for line in build_gradle.split("\n") if line.strip() not in [ "compile project(':{name}')".format(name=pkg), "api project(':{name}')".format(name=pkg), ] ] with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write("\n".join(new_build)) print("\t[Android] {} unlinked in " "app/build.gradle!".format(pkg)) else: print("\t[Android] {} was not linked in " "app/build.gradle!".format(pkg)) new_app_java = [] for package in new_packages: #: Add our import statement javacls = os.path.splitext(package)[0].replace("/", ".") if Link.is_app_linked(main_application_java, pkg, javacls): #: Reuse previous if avialable new_app_java = (new_app_java or main_application_java.split("\n")) new_app_java = [ line for line in new_app_java if line.strip() not in [ "import {};".format(javacls), "new {}()".format(javacls.split(".")[-1]), "new {}(),".format(javacls.split(".")[-1]), ] ] #: Now find the last package and remove the comma if it #: exists found = False j = 0 for i, line in enumerate(new_app_java): if fnmatch.fnmatch(line.strip(), "new *Package()"): found = True elif fnmatch.fnmatch(line.strip(), "new *Package(),"): j = i #: We removed the last package so add a comma if not found: #: This kills any whitespace... new_app_java[j] = new_app_java[j][ :new_app_java[j].rfind(',')] else: print("\t[Android] {} was not linked in {}!".format( pkg, main_app_java_path)) if new_app_java: with open(main_app_java_path, 'w') as f: f.write("\n".join(new_app_java)) print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format( pkg)+Colors.RESET) except Exception as e: print(Colors.RED+"\t[Android] {} Failed to unlink. " "Reverting due to error: {}".format(pkg, e)+Colors.RESET) #: Undo any changes with open(join('android', 'settings.gradle'), 'w') as f: f.write(settings_gradle) with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write(build_gradle) with open(main_app_java_path, 'w') as f: f.write(main_application_java) #: Now blow up raise
python
def unlink_android(self, path, pkg): """ Unlink's the android project to this library. 1. In the app's android/settings.gradle, it removes the following lines (if they exist): include ':<project-name>' project(':<project-name>').projectDir = new File( rootProject.projectDir, '../venv/packages/<project-name>/android') 2. In the app's android/app/build.gradle, it removes the following line (if present) compile project(':<project-name>') 3. In the app's android/app/src/main/java/<bundle/id>/MainApplication.java, it removes: import <package>.<Name>Package; new <Name>Package(), If no comma exists it will remove the comma from the previous line. """ bundle_id = self.ctx['bundle_id'] #: Check if it's already linked with open(join('android', 'settings.gradle')) as f: settings_gradle = f.read() with open(join('android', 'app', 'build.gradle')) as f: build_gradle = f.read() #: Find the MainApplication.java main_app_java_path = join('android', 'app', 'src', 'main', 'java', join(*bundle_id.split(".")), 'MainApplication.java') with open(main_app_java_path) as f: main_application_java = f.read() try: #: Now link all the EnamlPackages we can find in the new "package" new_packages = Link.find_packages(join(path, 'android', pkg)) if not new_packages: print(Colors.RED+"\t[Android] {} No EnamlPackages found to " "unlink!".format(pkg)+Colors.RESET) return #: Unlink settings.gradle if Link.is_settings_linked(settings_gradle, pkg): #: Remove the two statements new_settings = [ line for line in settings_gradle.split("\n") if line.strip() not in [ "include ':{name}'".format(name=pkg), "project(':{name}').projectDir = " "new File(rootProject.projectDir, " "'../{path}/android/{name}')".format(path=path, name=pkg) ] ] with open(join('android', 'settings.gradle'), 'w') as f: f.write("\n".join(new_settings)) print("\t[Android] {} unlinked settings.gradle!".format(pkg)) else: print("\t[Android] {} was not linked in " "settings.gradle!".format(pkg)) #: Unlink app/build.gradle if Link.is_build_linked(build_gradle, pkg): #: Add two statements new_build = [ line for line in build_gradle.split("\n") if line.strip() not in [ "compile project(':{name}')".format(name=pkg), "api project(':{name}')".format(name=pkg), ] ] with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write("\n".join(new_build)) print("\t[Android] {} unlinked in " "app/build.gradle!".format(pkg)) else: print("\t[Android] {} was not linked in " "app/build.gradle!".format(pkg)) new_app_java = [] for package in new_packages: #: Add our import statement javacls = os.path.splitext(package)[0].replace("/", ".") if Link.is_app_linked(main_application_java, pkg, javacls): #: Reuse previous if avialable new_app_java = (new_app_java or main_application_java.split("\n")) new_app_java = [ line for line in new_app_java if line.strip() not in [ "import {};".format(javacls), "new {}()".format(javacls.split(".")[-1]), "new {}(),".format(javacls.split(".")[-1]), ] ] #: Now find the last package and remove the comma if it #: exists found = False j = 0 for i, line in enumerate(new_app_java): if fnmatch.fnmatch(line.strip(), "new *Package()"): found = True elif fnmatch.fnmatch(line.strip(), "new *Package(),"): j = i #: We removed the last package so add a comma if not found: #: This kills any whitespace... new_app_java[j] = new_app_java[j][ :new_app_java[j].rfind(',')] else: print("\t[Android] {} was not linked in {}!".format( pkg, main_app_java_path)) if new_app_java: with open(main_app_java_path, 'w') as f: f.write("\n".join(new_app_java)) print(Colors.GREEN+"\t[Android] {} unlinked successfully!".format( pkg)+Colors.RESET) except Exception as e: print(Colors.RED+"\t[Android] {} Failed to unlink. " "Reverting due to error: {}".format(pkg, e)+Colors.RESET) #: Undo any changes with open(join('android', 'settings.gradle'), 'w') as f: f.write(settings_gradle) with open(join('android', 'app', 'build.gradle'), 'w') as f: f.write(build_gradle) with open(main_app_java_path, 'w') as f: f.write(main_application_java) #: Now blow up raise
Unlink's the android project to this library. 1. In the app's android/settings.gradle, it removes the following lines (if they exist): include ':<project-name>' project(':<project-name>').projectDir = new File( rootProject.projectDir, '../venv/packages/<project-name>/android') 2. In the app's android/app/build.gradle, it removes the following line (if present) compile project(':<project-name>') 3. In the app's android/app/src/main/java/<bundle/id>/MainApplication.java, it removes: import <package>.<Name>Package; new <Name>Package(), If no comma exists it will remove the comma from the previous line.
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1102-L1249
codelv/enaml-native-cli
enamlnativecli/main.py
Server.run_tornado
def run_tornado(self, args): """ Tornado dev server implementation """ server = self import tornado.ioloop import tornado.web import tornado.websocket ioloop = tornado.ioloop.IOLoop.current() class DevWebSocketHandler(tornado.websocket.WebSocketHandler): def open(self): super(DevWebSocketHandler, self).open() server.on_open(self) def on_message(self, message): server.on_message(self, message) def on_close(self): super(DevWebSocketHandler, self).on_close() server.on_close(self) class MainHandler(tornado.web.RequestHandler): def get(self): self.write(server.index_page) #: Set the call later method server.call_later = ioloop.call_later server.add_callback = ioloop.add_callback app = tornado.web.Application([ (r"/", MainHandler), (r"/dev", DevWebSocketHandler), ]) app.listen(self.port) print("Tornado Dev server started on {}".format(self.port)) ioloop.start()
python
def run_tornado(self, args): """ Tornado dev server implementation """ server = self import tornado.ioloop import tornado.web import tornado.websocket ioloop = tornado.ioloop.IOLoop.current() class DevWebSocketHandler(tornado.websocket.WebSocketHandler): def open(self): super(DevWebSocketHandler, self).open() server.on_open(self) def on_message(self, message): server.on_message(self, message) def on_close(self): super(DevWebSocketHandler, self).on_close() server.on_close(self) class MainHandler(tornado.web.RequestHandler): def get(self): self.write(server.index_page) #: Set the call later method server.call_later = ioloop.call_later server.add_callback = ioloop.add_callback app = tornado.web.Application([ (r"/", MainHandler), (r"/dev", DevWebSocketHandler), ]) app.listen(self.port) print("Tornado Dev server started on {}".format(self.port)) ioloop.start()
Tornado dev server implementation
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1466-L1502
codelv/enaml-native-cli
enamlnativecli/main.py
Server.run_twisted
def run_twisted(self, args): """ Twisted dev server implementation """ server = self from twisted.internet import reactor from twisted.web import resource from twisted.web.static import File from twisted.web.server import Site from autobahn.twisted.websocket import (WebSocketServerFactory, WebSocketServerProtocol) from autobahn.twisted.resource import WebSocketResource class DevWebSocketHandler(WebSocketServerProtocol): def onConnect(self, request): super(DevWebSocketHandler, self).onConnect(request) server.on_open(self) def onMessage(self, payload, isBinary): server.on_message(self, payload) def onClose(self, wasClean, code, reason): super(DevWebSocketHandler,self).onClose(wasClean, code, reason) server.on_close(self) def write_message(self, message, binary=False): self.sendMessage(message, binary) #: Set the call later method server.call_later = reactor.callLater server.add_callback = reactor.callFromThread factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port)) factory.protocol = DevWebSocketHandler class MainHandler(resource.Resource): def render_GET(self, req): return str(server.index_page) root = resource.Resource() root.putChild("", MainHandler()) root.putChild("dev", WebSocketResource(factory)) reactor.listenTCP(self.port, Site(root)) print("Twisted Dev server started on {}".format(self.port)) reactor.run()
python
def run_twisted(self, args): """ Twisted dev server implementation """ server = self from twisted.internet import reactor from twisted.web import resource from twisted.web.static import File from twisted.web.server import Site from autobahn.twisted.websocket import (WebSocketServerFactory, WebSocketServerProtocol) from autobahn.twisted.resource import WebSocketResource class DevWebSocketHandler(WebSocketServerProtocol): def onConnect(self, request): super(DevWebSocketHandler, self).onConnect(request) server.on_open(self) def onMessage(self, payload, isBinary): server.on_message(self, payload) def onClose(self, wasClean, code, reason): super(DevWebSocketHandler,self).onClose(wasClean, code, reason) server.on_close(self) def write_message(self, message, binary=False): self.sendMessage(message, binary) #: Set the call later method server.call_later = reactor.callLater server.add_callback = reactor.callFromThread factory = WebSocketServerFactory(u"ws://0.0.0.0:{}".format(self.port)) factory.protocol = DevWebSocketHandler class MainHandler(resource.Resource): def render_GET(self, req): return str(server.index_page) root = resource.Resource() root.putChild("", MainHandler()) root.putChild("dev", WebSocketResource(factory)) reactor.listenTCP(self.port, Site(root)) print("Twisted Dev server started on {}".format(self.port)) reactor.run()
Twisted dev server implementation
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1504-L1547
codelv/enaml-native-cli
enamlnativecli/main.py
Server.on_message
def on_message(self, handler, msg): """ In remote debugging mode this simply acts as a forwarding proxy for the two clients. """ if self.remote_debugging: #: Forward to other clients for h in self.handlers: if h != handler: h.write_message(msg, True) else: print(msg)
python
def on_message(self, handler, msg): """ In remote debugging mode this simply acts as a forwarding proxy for the two clients. """ if self.remote_debugging: #: Forward to other clients for h in self.handlers: if h != handler: h.write_message(msg, True) else: print(msg)
In remote debugging mode this simply acts as a forwarding proxy for the two clients.
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1557-L1567
codelv/enaml-native-cli
enamlnativecli/main.py
Server.send_message
def send_message(self, msg): """ Send a message to the client. This should not be used in remote debugging mode. """ if not self.handlers: return #: Client not connected for h in self.handlers: h.write_message(msg)
python
def send_message(self, msg): """ Send a message to the client. This should not be used in remote debugging mode. """ if not self.handlers: return #: Client not connected for h in self.handlers: h.write_message(msg)
Send a message to the client. This should not be used in remote debugging mode.
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1569-L1577
codelv/enaml-native-cli
enamlnativecli/main.py
EnamlNativeCli._default_commands
def _default_commands(self): """ Build the list of CLI commands by finding subclasses of the Command class Also allows commands to be installed using the "enaml_native_command" entry point. This entry point should return a Command subclass """ commands = [c() for c in find_commands(Command)] #: Get commands installed via entry points for ep in pkg_resources.iter_entry_points( group="enaml_native_command"): c = ep.load() if not issubclass(c, Command): print("Warning: entry point {} did not return a valid enaml " "cli command! This command will be ignored!".format( ep.name)) commands.append(c()) return commands
python
def _default_commands(self): """ Build the list of CLI commands by finding subclasses of the Command class Also allows commands to be installed using the "enaml_native_command" entry point. This entry point should return a Command subclass """ commands = [c() for c in find_commands(Command)] #: Get commands installed via entry points for ep in pkg_resources.iter_entry_points( group="enaml_native_command"): c = ep.load() if not issubclass(c, Command): print("Warning: entry point {} did not return a valid enaml " "cli command! This command will be ignored!".format( ep.name)) commands.append(c()) return commands
Build the list of CLI commands by finding subclasses of the Command class Also allows commands to be installed using the "enaml_native_command" entry point. This entry point should return a Command subclass
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1648-L1668
codelv/enaml-native-cli
enamlnativecli/main.py
EnamlNativeCli._default_ctx
def _default_ctx(self): """ Return the package config or context and normalize some of the values """ if not self.in_app_directory: print("Warning: {} does not exist. Using the default.".format( self.package)) ctx = {} else: with open(self.package) as f: ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader)) if self.in_app_directory: # Update the env for each platform excluded = list(ctx.get('excluded', [])) for env in [ctx['ios'], ctx['android']]: if 'python_build_dir' not in env: env['python_build_dir'] = expanduser(abspath('build/python')) if 'conda_prefix' not in env: env['conda_prefix'] = os.environ.get( 'CONDA_PREFIX', expanduser(abspath('venv'))) # Join the shared and local exclusions env['excluded'] = list(env.get('excluded', [])) + excluded return ctx
python
def _default_ctx(self): """ Return the package config or context and normalize some of the values """ if not self.in_app_directory: print("Warning: {} does not exist. Using the default.".format( self.package)) ctx = {} else: with open(self.package) as f: ctx = dict(yaml.load(f, Loader=yaml.RoundTripLoader)) if self.in_app_directory: # Update the env for each platform excluded = list(ctx.get('excluded', [])) for env in [ctx['ios'], ctx['android']]: if 'python_build_dir' not in env: env['python_build_dir'] = expanduser(abspath('build/python')) if 'conda_prefix' not in env: env['conda_prefix'] = os.environ.get( 'CONDA_PREFIX', expanduser(abspath('venv'))) # Join the shared and local exclusions env['excluded'] = list(env.get('excluded', [])) + excluded return ctx
Return the package config or context and normalize some of the values
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1678-L1706
codelv/enaml-native-cli
enamlnativecli/main.py
EnamlNativeCli._default_parser
def _default_parser(self): """ Generate a parser using the command list """ parser = ArgumentParser(prog='enaml-native') #: Build commands by name cmds = {c.title: c for c in self.commands} #: Build parser, prepare commands subparsers = parser.add_subparsers() for c in self.commands: p = subparsers.add_parser(c.title, help=c.help) c.parser = p for (flags, kwargs) in c.args: p.add_argument(*flags.split(), **kwargs) p.set_defaults(cmd=c) c.ctx = self.ctx c.cmds = cmds c.cli = self return parser
python
def _default_parser(self): """ Generate a parser using the command list """ parser = ArgumentParser(prog='enaml-native') #: Build commands by name cmds = {c.title: c for c in self.commands} #: Build parser, prepare commands subparsers = parser.add_subparsers() for c in self.commands: p = subparsers.add_parser(c.title, help=c.help) c.parser = p for (flags, kwargs) in c.args: p.add_argument(*flags.split(), **kwargs) p.set_defaults(cmd=c) c.ctx = self.ctx c.cmds = cmds c.cli = self return parser
Generate a parser using the command list
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1708-L1727
codelv/enaml-native-cli
enamlnativecli/main.py
EnamlNativeCli.start
def start(self): """ Run the commands""" self.check_dependencies() self.args = self.parser.parse_args() # Python 3 doesn't set the cmd if no args are given if not hasattr(self.args, 'cmd'): self.parser.print_help() return cmd = self.args.cmd try: if cmd.app_dir_required and not self.in_app_directory: raise EnvironmentError( "'enaml-native {}' must be run within an app root " "directory not: {}".format(cmd.title, os.getcwd())) cmd.run(self.args) except sh.ErrorReturnCode as e: raise
python
def start(self): """ Run the commands""" self.check_dependencies() self.args = self.parser.parse_args() # Python 3 doesn't set the cmd if no args are given if not hasattr(self.args, 'cmd'): self.parser.print_help() return cmd = self.args.cmd try: if cmd.app_dir_required and not self.in_app_directory: raise EnvironmentError( "'enaml-native {}' must be run within an app root " "directory not: {}".format(cmd.title, os.getcwd())) cmd.run(self.args) except sh.ErrorReturnCode as e: raise
Run the commands
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L1741-L1759
codelv/enaml-native-cli
setup.py
find_data
def find_data(folder): """ Include everything in the folder """ for (path, directories, filenames) in os.walk(folder): for filename in filenames: yield os.path.join('..', path, filename)
python
def find_data(folder): """ Include everything in the folder """ for (path, directories, filenames) in os.walk(folder): for filename in filenames: yield os.path.join('..', path, filename)
Include everything in the folder
https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/setup.py#L16-L20
glyph/horsephrase
horsephrase/_implementation.py
generate
def generate(number=4, choice=SystemRandom().choice, words=words, joiner=" "): """ Generate a random passphrase from the GSL. """ return joiner.join(choice(words) for each in range(number))
python
def generate(number=4, choice=SystemRandom().choice, words=words, joiner=" "): """ Generate a random passphrase from the GSL. """ return joiner.join(choice(words) for each in range(number))
Generate a random passphrase from the GSL.
https://github.com/glyph/horsephrase/blob/f646094a40d69e01012e57e29d5eabb50ae481e3/horsephrase/_implementation.py#L11-L15
jtauber/sebastian
sebastian/midi/write_midi.py
Trk.write_meta_info
def write_meta_info(self, byte1, byte2, data): "Worker method for writing meta info" write_varlen(self.data, 0) # tick write_byte(self.data, byte1) write_byte(self.data, byte2) write_varlen(self.data, len(data)) write_chars(self.data, data)
python
def write_meta_info(self, byte1, byte2, data): "Worker method for writing meta info" write_varlen(self.data, 0) # tick write_byte(self.data, byte1) write_byte(self.data, byte2) write_varlen(self.data, len(data)) write_chars(self.data, data)
Worker method for writing meta info
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/midi/write_midi.py#L135-L141
glyph/horsephrase
horsephrase/_guess_guess.py
how_long
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000 * 1000, optimism=2): """ How long might it take to guess a password? @param length: the number of words that we're going to choose. @type length: L{int} @param choice: the number of words we might choose between. @type choice: L{int} @param speed: the speed of our hypothetical password guesser, in guesses per second. @type speed: L{int} @param optimism: When we start guessing all the options, we probably won't have to guess I{all} of them to get a hit. This assumes that the guesser will have to guess only C{1/optimism} of the total number of possible options before it finds a hit. """ return ((choices ** length) / (speed * optimism))
python
def how_long(length=4, choices=len(words), speed=1000 * 1000 * 1000 * 1000, optimism=2): """ How long might it take to guess a password? @param length: the number of words that we're going to choose. @type length: L{int} @param choice: the number of words we might choose between. @type choice: L{int} @param speed: the speed of our hypothetical password guesser, in guesses per second. @type speed: L{int} @param optimism: When we start guessing all the options, we probably won't have to guess I{all} of them to get a hit. This assumes that the guesser will have to guess only C{1/optimism} of the total number of possible options before it finds a hit. """ return ((choices ** length) / (speed * optimism))
How long might it take to guess a password? @param length: the number of words that we're going to choose. @type length: L{int} @param choice: the number of words we might choose between. @type choice: L{int} @param speed: the speed of our hypothetical password guesser, in guesses per second. @type speed: L{int} @param optimism: When we start guessing all the options, we probably won't have to guess I{all} of them to get a hit. This assumes that the guesser will have to guess only C{1/optimism} of the total number of possible options before it finds a hit.
https://github.com/glyph/horsephrase/blob/f646094a40d69e01012e57e29d5eabb50ae481e3/horsephrase/_guess_guess.py#L9-L29
glyph/horsephrase
horsephrase/_guess_guess.py
redivmod
def redivmod(initial_value, factors): """ Chop up C{initial_value} according to the list of C{factors} and return a formatted string. """ result = [] value = initial_value for divisor, label in factors: if not divisor: remainder = value if not remainder: break else: value, remainder = divmod(value, divisor) if not value and not remainder: break if remainder == 1: # depluralize label = label[:-1] if six.PY2: addition = unicode(remainder) + ' ' + unicode(label) else: addition = str(remainder) + ' ' + str(label) result.insert(0, addition) if len(result) > 1: result[-1] = "and " + result[-1] if result: return ', '.join(result) else: return "instantly"
python
def redivmod(initial_value, factors): """ Chop up C{initial_value} according to the list of C{factors} and return a formatted string. """ result = [] value = initial_value for divisor, label in factors: if not divisor: remainder = value if not remainder: break else: value, remainder = divmod(value, divisor) if not value and not remainder: break if remainder == 1: # depluralize label = label[:-1] if six.PY2: addition = unicode(remainder) + ' ' + unicode(label) else: addition = str(remainder) + ' ' + str(label) result.insert(0, addition) if len(result) > 1: result[-1] = "and " + result[-1] if result: return ', '.join(result) else: return "instantly"
Chop up C{initial_value} according to the list of C{factors} and return a formatted string.
https://github.com/glyph/horsephrase/blob/f646094a40d69e01012e57e29d5eabb50ae481e3/horsephrase/_guess_guess.py#L33-L62
jtauber/sebastian
sebastian/core/elements.py
SeqBase.zip
def zip(self, other): """ zips two sequences unifying the corresponding points. """ return self.__class__(p1 % p2 for p1, p2 in zip(self, other))
python
def zip(self, other): """ zips two sequences unifying the corresponding points. """ return self.__class__(p1 % p2 for p1, p2 in zip(self, other))
zips two sequences unifying the corresponding points.
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/elements.py#L75-L79
jtauber/sebastian
sebastian/core/elements.py
SeqBase.display
def display(self, format="png"): """ Return an object that can be used to display this sequence. This is used for IPython Notebook. :param format: "png" or "svg" """ from sebastian.core.transforms import lilypond seq = HSeq(self) | lilypond() lily_output = write_lilypond.lily_format(seq) if not lily_output.strip(): #In the case of empty lily outputs, return self to get a textual display return self if format == "png": suffix = ".preview.png" args = ["lilypond", "--png", "-dno-print-pages", "-dpreview"] elif format == "svg": suffix = ".preview.svg" args = ["lilypond", "-dbackend=svg", "-dno-print-pages", "-dpreview"] f = tempfile.NamedTemporaryFile(suffix=suffix) basename = f.name[:-len(suffix)] args.extend(["-o" + basename, "-"]) #Pass shell=True so that if your $PATH contains ~ it will #get expanded. This also changes the way the arguments get #passed in. To work correctly, pass them as a string p = sp.Popen(" ".join(args), stdin=sp.PIPE, shell=True) stdout, stderr = p.communicate("{ %s }" % lily_output) if p.returncode != 0: # there was an error #raise IOError("Lilypond execution failed: %s%s" % (stdout, stderr)) return None if not ipython: return f.read() if format == "png": return Image(data=f.read(), filename=f.name, format="png") else: return SVG(data=f.read(), filename=f.name)
python
def display(self, format="png"): """ Return an object that can be used to display this sequence. This is used for IPython Notebook. :param format: "png" or "svg" """ from sebastian.core.transforms import lilypond seq = HSeq(self) | lilypond() lily_output = write_lilypond.lily_format(seq) if not lily_output.strip(): #In the case of empty lily outputs, return self to get a textual display return self if format == "png": suffix = ".preview.png" args = ["lilypond", "--png", "-dno-print-pages", "-dpreview"] elif format == "svg": suffix = ".preview.svg" args = ["lilypond", "-dbackend=svg", "-dno-print-pages", "-dpreview"] f = tempfile.NamedTemporaryFile(suffix=suffix) basename = f.name[:-len(suffix)] args.extend(["-o" + basename, "-"]) #Pass shell=True so that if your $PATH contains ~ it will #get expanded. This also changes the way the arguments get #passed in. To work correctly, pass them as a string p = sp.Popen(" ".join(args), stdin=sp.PIPE, shell=True) stdout, stderr = p.communicate("{ %s }" % lily_output) if p.returncode != 0: # there was an error #raise IOError("Lilypond execution failed: %s%s" % (stdout, stderr)) return None if not ipython: return f.read() if format == "png": return Image(data=f.read(), filename=f.name, format="png") else: return SVG(data=f.read(), filename=f.name)
Return an object that can be used to display this sequence. This is used for IPython Notebook. :param format: "png" or "svg"
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/elements.py#L84-L125
jtauber/sebastian
sebastian/core/elements.py
HSeq.append
def append(self, point): """ appends a copy of the given point to this sequence """ point = Point(point) self._elements.append(point)
python
def append(self, point): """ appends a copy of the given point to this sequence """ point = Point(point) self._elements.append(point)
appends a copy of the given point to this sequence
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/elements.py#L226-L231
jtauber/sebastian
sebastian/core/elements.py
HSeq.repeat
def repeat(self, count): """ repeat sequence given number of times to produce a new sequence """ x = HSeq() for i in range(count): x = x.concatenate(self) return x
python
def repeat(self, count): """ repeat sequence given number of times to produce a new sequence """ x = HSeq() for i in range(count): x = x.concatenate(self) return x
repeat sequence given number of times to produce a new sequence
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/elements.py#L239-L246
jtauber/sebastian
sebastian/core/elements.py
HSeq.subseq
def subseq(self, start_offset=0, end_offset=None): """ Return a subset of the sequence starting at start_offset (defaulting to the beginning) ending at end_offset (None representing the end, whih is the default) Raises ValueError if duration_64 is missing on any element """ from sebastian.core import DURATION_64 def subseq_iter(start_offset, end_offset): cur_offset = 0 for point in self._elements: try: cur_offset += point[DURATION_64] except KeyError: raise ValueError("HSeq.subseq requires all points to have a %s attribute" % DURATION_64) #Skip until start if cur_offset < start_offset: continue #Yield points start_offset <= point < end_offset if end_offset is None or cur_offset < end_offset: yield point else: raise StopIteration return HSeq(subseq_iter(start_offset, end_offset))
python
def subseq(self, start_offset=0, end_offset=None): """ Return a subset of the sequence starting at start_offset (defaulting to the beginning) ending at end_offset (None representing the end, whih is the default) Raises ValueError if duration_64 is missing on any element """ from sebastian.core import DURATION_64 def subseq_iter(start_offset, end_offset): cur_offset = 0 for point in self._elements: try: cur_offset += point[DURATION_64] except KeyError: raise ValueError("HSeq.subseq requires all points to have a %s attribute" % DURATION_64) #Skip until start if cur_offset < start_offset: continue #Yield points start_offset <= point < end_offset if end_offset is None or cur_offset < end_offset: yield point else: raise StopIteration return HSeq(subseq_iter(start_offset, end_offset))
Return a subset of the sequence starting at start_offset (defaulting to the beginning) ending at end_offset (None representing the end, whih is the default) Raises ValueError if duration_64 is missing on any element
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/elements.py#L248-L273
jtauber/sebastian
projects/mozart_k545/first_movement.py
arpeggio
def arpeggio(pattern, point): """ turns each subsequence into an arpeggio matching the given ``pattern``. """ point['sequence'] = HSeq(point['sequence'][i] for i in pattern) return point
python
def arpeggio(pattern, point): """ turns each subsequence into an arpeggio matching the given ``pattern``. """ point['sequence'] = HSeq(point['sequence'][i] for i in pattern) return point
turns each subsequence into an arpeggio matching the given ``pattern``.
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/projects/mozart_k545/first_movement.py#L26-L31
jtauber/sebastian
projects/mozart_k545/first_movement.py
fill
def fill(duration, point): """ fills the subsequence of the point with repetitions of its subsequence and sets the ``duration`` of each point. """ point['sequence'] = point['sequence'] * (point[DURATION_64] / (8 * duration)) | add({DURATION_64: duration}) return point
python
def fill(duration, point): """ fills the subsequence of the point with repetitions of its subsequence and sets the ``duration`` of each point. """ point['sequence'] = point['sequence'] * (point[DURATION_64] / (8 * duration)) | add({DURATION_64: duration}) return point
fills the subsequence of the point with repetitions of its subsequence and sets the ``duration`` of each point.
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/projects/mozart_k545/first_movement.py#L35-L41
jtauber/sebastian
projects/mozart_k545/first_movement.py
expand
def expand(sequence): """ expands a tree of sequences into a single, flat sequence, recursively. """ expanse = [] for point in sequence: if 'sequence' in point: expanse.extend(expand(point['sequence'])) else: expanse.append(point) return sequence.__class__(expanse)
python
def expand(sequence): """ expands a tree of sequences into a single, flat sequence, recursively. """ expanse = [] for point in sequence: if 'sequence' in point: expanse.extend(expand(point['sequence'])) else: expanse.append(point) return sequence.__class__(expanse)
expands a tree of sequences into a single, flat sequence, recursively.
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/projects/mozart_k545/first_movement.py#L44-L54
jtauber/sebastian
projects/mozart_k545/first_movement.py
debug
def debug(sequence): """ adds information to the sequence for better debugging, currently only an index property on each point in the sequence. """ points = [] for i, p in enumerate(sequence): copy = Point(p) copy['index'] = i points.append(copy) return sequence.__class__(points)
python
def debug(sequence): """ adds information to the sequence for better debugging, currently only an index property on each point in the sequence. """ points = [] for i, p in enumerate(sequence): copy = Point(p) copy['index'] = i points.append(copy) return sequence.__class__(points)
adds information to the sequence for better debugging, currently only an index property on each point in the sequence.
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/projects/mozart_k545/first_movement.py#L57-L67
jtauber/sebastian
sebastian/core/transforms.py
transform_sequence
def transform_sequence(f): """ A decorator to take a function operating on a point and turn it into a function returning a callable operating on a sequence. The functions passed to this decorator must define a kwarg called "point", or have point be the last positional argument """ @wraps(f) def wrapper(*args, **kwargs): #The arguments here are the arguments passed to the transform, #ie, there will be no "point" argument #Send a function to seq.map_points with all of its arguments applied except #point return lambda seq: seq.map_points(partial(f, *args, **kwargs)) return wrapper
python
def transform_sequence(f): """ A decorator to take a function operating on a point and turn it into a function returning a callable operating on a sequence. The functions passed to this decorator must define a kwarg called "point", or have point be the last positional argument """ @wraps(f) def wrapper(*args, **kwargs): #The arguments here are the arguments passed to the transform, #ie, there will be no "point" argument #Send a function to seq.map_points with all of its arguments applied except #point return lambda seq: seq.map_points(partial(f, *args, **kwargs)) return wrapper
A decorator to take a function operating on a point and turn it into a function returning a callable operating on a sequence. The functions passed to this decorator must define a kwarg called "point", or have point be the last positional argument
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/transforms.py#L8-L24
jtauber/sebastian
sebastian/core/transforms.py
subseq
def subseq(start_offset=0, end_offset=None): """ Return a portion of the input sequence """ def _(sequence): return sequence.subseq(start_offset, end_offset) return _
python
def subseq(start_offset=0, end_offset=None): """ Return a portion of the input sequence """ def _(sequence): return sequence.subseq(start_offset, end_offset) return _
Return a portion of the input sequence
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/transforms.py#L95-L101
jtauber/sebastian
sebastian/core/transforms.py
lilypond
def lilypond(point): """ Generate lilypond representation for a point """ #If lilypond already computed, leave as is if "lilypond" in point: return point #Defaults: pitch_string = "" octave_string = "" duration_string = "" preamble = "" dynamic_string = "" if "pitch" in point: octave = point["octave"] pitch = point["pitch"] if octave > 4: octave_string = "'" * (octave - 4) elif octave < 4: octave_string = "," * (4 - octave) else: octave_string = "" m = modifiers(pitch) if m > 0: modifier_string = "is" * m elif m < 0: modifier_string = "es" * -m else: modifier_string = "" pitch_string = letter(pitch).lower() + modifier_string if DURATION_64 in point: duration = point[DURATION_64] if duration > 0: if duration % 3 == 0: # dotted note duration_string = str(192 // (2 * duration)) + "." else: duration_string = str(64 // duration) #TODO: for now, if we have a duration but no pitch, show a 'c' with an x note if duration_string: if not pitch_string: pitch_string = "c" octave_string = "'" preamble = r'\xNote ' if "dynamic" in point: dynamic = point["dynamic"] if dynamic == "crescendo": dynamic_string = "\<" elif dynamic == "diminuendo": dynamic_string = "\>" else: dynamic_string = "\%s" % (dynamic,) point["lilypond"] = "%s%s%s%s%s" % (preamble, pitch_string, octave_string, duration_string, dynamic_string) return point
python
def lilypond(point): """ Generate lilypond representation for a point """ #If lilypond already computed, leave as is if "lilypond" in point: return point #Defaults: pitch_string = "" octave_string = "" duration_string = "" preamble = "" dynamic_string = "" if "pitch" in point: octave = point["octave"] pitch = point["pitch"] if octave > 4: octave_string = "'" * (octave - 4) elif octave < 4: octave_string = "," * (4 - octave) else: octave_string = "" m = modifiers(pitch) if m > 0: modifier_string = "is" * m elif m < 0: modifier_string = "es" * -m else: modifier_string = "" pitch_string = letter(pitch).lower() + modifier_string if DURATION_64 in point: duration = point[DURATION_64] if duration > 0: if duration % 3 == 0: # dotted note duration_string = str(192 // (2 * duration)) + "." else: duration_string = str(64 // duration) #TODO: for now, if we have a duration but no pitch, show a 'c' with an x note if duration_string: if not pitch_string: pitch_string = "c" octave_string = "'" preamble = r'\xNote ' if "dynamic" in point: dynamic = point["dynamic"] if dynamic == "crescendo": dynamic_string = "\<" elif dynamic == "diminuendo": dynamic_string = "\>" else: dynamic_string = "\%s" % (dynamic,) point["lilypond"] = "%s%s%s%s%s" % (preamble, pitch_string, octave_string, duration_string, dynamic_string) return point
Generate lilypond representation for a point
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/transforms.py#L131-L188
jtauber/sebastian
sebastian/core/transforms.py
dynamics
def dynamics(start, end=None): """ Apply dynamics to a sequence. If end is specified, it will crescendo or diminuendo linearly from start to end dynamics. You can pass any of these strings as dynamic markers: ['pppppp', 'ppppp', 'pppp', 'ppp', 'pp', 'p', 'mp', 'mf', 'f', 'ff', 'fff', ''ffff] Args: start: beginning dynamic marker, if no end is specified all notes will get this marker end: ending dynamic marker, if unspecified the entire sequence will get the start dynamic marker Example usage: s1 | dynamics('p') # play a sequence in piano s2 | dynamics('p', 'ff') # crescendo from p to ff s3 | dynamics('ff', 'p') # diminuendo from ff to p """ def _(sequence): if start in _dynamic_markers_to_velocity: start_velocity = _dynamic_markers_to_velocity[start] start_marker = start else: raise ValueError("Unknown start dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys())) if end is None: end_velocity = start_velocity end_marker = start_marker elif end in _dynamic_markers_to_velocity: end_velocity = _dynamic_markers_to_velocity[end] end_marker = end else: raise ValueError("Unknown end dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys())) retval = sequence.__class__([Point(point) for point in sequence._elements]) velocity_interval = (float(end_velocity) - float(start_velocity)) / (len(retval) - 1) if len(retval) > 1 else 0 velocities = [int(start_velocity + velocity_interval * pos) for pos in range(len(retval))] # insert dynamics markers for lilypond if start_velocity > end_velocity: retval[0]["dynamic"] = "diminuendo" retval[-1]["dynamic"] = end_marker elif start_velocity < end_velocity: retval[0]["dynamic"] = "crescendo" retval[-1]["dynamic"] = end_marker else: retval[0]["dynamic"] = start_marker for point, velocity in zip(retval, velocities): point["velocity"] = velocity return retval return _
python
def dynamics(start, end=None): """ Apply dynamics to a sequence. If end is specified, it will crescendo or diminuendo linearly from start to end dynamics. You can pass any of these strings as dynamic markers: ['pppppp', 'ppppp', 'pppp', 'ppp', 'pp', 'p', 'mp', 'mf', 'f', 'ff', 'fff', ''ffff] Args: start: beginning dynamic marker, if no end is specified all notes will get this marker end: ending dynamic marker, if unspecified the entire sequence will get the start dynamic marker Example usage: s1 | dynamics('p') # play a sequence in piano s2 | dynamics('p', 'ff') # crescendo from p to ff s3 | dynamics('ff', 'p') # diminuendo from ff to p """ def _(sequence): if start in _dynamic_markers_to_velocity: start_velocity = _dynamic_markers_to_velocity[start] start_marker = start else: raise ValueError("Unknown start dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys())) if end is None: end_velocity = start_velocity end_marker = start_marker elif end in _dynamic_markers_to_velocity: end_velocity = _dynamic_markers_to_velocity[end] end_marker = end else: raise ValueError("Unknown end dynamic: %s, must be in %s" % (start, _dynamic_markers_to_velocity.keys())) retval = sequence.__class__([Point(point) for point in sequence._elements]) velocity_interval = (float(end_velocity) - float(start_velocity)) / (len(retval) - 1) if len(retval) > 1 else 0 velocities = [int(start_velocity + velocity_interval * pos) for pos in range(len(retval))] # insert dynamics markers for lilypond if start_velocity > end_velocity: retval[0]["dynamic"] = "diminuendo" retval[-1]["dynamic"] = end_marker elif start_velocity < end_velocity: retval[0]["dynamic"] = "crescendo" retval[-1]["dynamic"] = end_marker else: retval[0]["dynamic"] = start_marker for point, velocity in zip(retval, velocities): point["velocity"] = velocity return retval return _
Apply dynamics to a sequence. If end is specified, it will crescendo or diminuendo linearly from start to end dynamics. You can pass any of these strings as dynamic markers: ['pppppp', 'ppppp', 'pppp', 'ppp', 'pp', 'p', 'mp', 'mf', 'f', 'ff', 'fff', ''ffff] Args: start: beginning dynamic marker, if no end is specified all notes will get this marker end: ending dynamic marker, if unspecified the entire sequence will get the start dynamic marker Example usage: s1 | dynamics('p') # play a sequence in piano s2 | dynamics('p', 'ff') # crescendo from p to ff s3 | dynamics('ff', 'p') # diminuendo from ff to p
https://github.com/jtauber/sebastian/blob/4e460c3aeab332b45c74fe78e65e76ec87d5cfa8/sebastian/core/transforms.py#L206-L257
wikimedia/editquality
editquality/utilities/extract_damaging.py
user_blocks
def user_blocks(user_text, session): """ Returns a list of blocks for a single user """ logger.debug("Getting blocks for {0}".format(user_text)) doc = session.get(action='query', list='blocks', bkusers=user_text, bkprop=['id', 'timestamp']) return [mwtypes.Timestamp(b['timestamp']) for b in doc['query']['blocks']]
python
def user_blocks(user_text, session): """ Returns a list of blocks for a single user """ logger.debug("Getting blocks for {0}".format(user_text)) doc = session.get(action='query', list='blocks', bkusers=user_text, bkprop=['id', 'timestamp']) return [mwtypes.Timestamp(b['timestamp']) for b in doc['query']['blocks']]
Returns a list of blocks for a single user
https://github.com/wikimedia/editquality/blob/73bab7bdd0ef3dba9a000f91f2fd810b1772d1f0/editquality/utilities/extract_damaging.py#L139-L146
wikimedia/editquality
editquality/utilities/autolabel.py
get_user_blocks
def get_user_blocks(session, user_text): """ Returns a list of blocks for a single user """ logger.debug("Getting user_blocks for {0}".format(user_text)) doc = session.get(action='query', list='blocks', bkusers=user_text, bkprop=['id']) return doc['query']['blocks']
python
def get_user_blocks(session, user_text): """ Returns a list of blocks for a single user """ logger.debug("Getting user_blocks for {0}".format(user_text)) doc = session.get(action='query', list='blocks', bkusers=user_text, bkprop=['id']) return doc['query']['blocks']
Returns a list of blocks for a single user
https://github.com/wikimedia/editquality/blob/73bab7bdd0ef3dba9a000f91f2fd810b1772d1f0/editquality/utilities/autolabel.py#L306-L313
wikimedia/editquality
editquality/utilities/autolabel.py
query_revisions_by_revids
def query_revisions_by_revids(session, revids, **params): """ Gets a set of revisions by their IDs by repeatedly querying in batches. If an ID cannot be found, it is ignored. """ doc = session.get(action='query', prop='revisions', revids=revids, **params) for page_doc in doc['query'].get('pages', {}).values(): revisions = page_doc.get('revisions', []) if 'revisions' in page_doc: del page_doc['revisions'] for revision_doc in revisions: revision_doc['page'] = page_doc yield revision_doc
python
def query_revisions_by_revids(session, revids, **params): """ Gets a set of revisions by their IDs by repeatedly querying in batches. If an ID cannot be found, it is ignored. """ doc = session.get(action='query', prop='revisions', revids=revids, **params) for page_doc in doc['query'].get('pages', {}).values(): revisions = page_doc.get('revisions', []) if 'revisions' in page_doc: del page_doc['revisions'] for revision_doc in revisions: revision_doc['page'] = page_doc yield revision_doc
Gets a set of revisions by their IDs by repeatedly querying in batches. If an ID cannot be found, it is ignored.
https://github.com/wikimedia/editquality/blob/73bab7bdd0ef3dba9a000f91f2fd810b1772d1f0/editquality/utilities/autolabel.py#L316-L331
wikimedia/editquality
editquality/codegen/generate.py
generate
def generate(variables, templates_path, main_template): """ :Parameters: variables : dict Template parameters, passed through. templates_path : str Root directory for transclusions. main_template : str Contents of the main template. Returns the rendered output. """ env = jinja2.Environment( loader=jinja2.FileSystemLoader(templates_path), lstrip_blocks=True, trim_blocks=True ) def norm_alg_filename(alg_name): if alg_name in variables['globals']['algorithm_filename_parts']: return variables['globals']['algorithm_filename_parts'][alg_name] else: raise KeyError("{0} not found in globals.algorithm_filename_parts" .format(alg_name)) env.globals.update(norm_alg_filename=norm_alg_filename) template = env.from_string(main_template) return template.render(variables) + "\n"
python
def generate(variables, templates_path, main_template): """ :Parameters: variables : dict Template parameters, passed through. templates_path : str Root directory for transclusions. main_template : str Contents of the main template. Returns the rendered output. """ env = jinja2.Environment( loader=jinja2.FileSystemLoader(templates_path), lstrip_blocks=True, trim_blocks=True ) def norm_alg_filename(alg_name): if alg_name in variables['globals']['algorithm_filename_parts']: return variables['globals']['algorithm_filename_parts'][alg_name] else: raise KeyError("{0} not found in globals.algorithm_filename_parts" .format(alg_name)) env.globals.update(norm_alg_filename=norm_alg_filename) template = env.from_string(main_template) return template.render(variables) + "\n"
:Parameters: variables : dict Template parameters, passed through. templates_path : str Root directory for transclusions. main_template : str Contents of the main template. Returns the rendered output.
https://github.com/wikimedia/editquality/blob/73bab7bdd0ef3dba9a000f91f2fd810b1772d1f0/editquality/codegen/generate.py#L4-L33
PlaidWeb/Publ
publ/category.py
load_metafile
def load_metafile(filepath): """ Load a metadata file from the filesystem """ try: with open(filepath, 'r', encoding='utf-8') as file: return email.message_from_file(file) except FileNotFoundError: logger.warning("Category file %s not found", filepath) orm.delete(c for c in model.Category if c.file_path == filepath) orm.commit() return None
python
def load_metafile(filepath): """ Load a metadata file from the filesystem """ try: with open(filepath, 'r', encoding='utf-8') as file: return email.message_from_file(file) except FileNotFoundError: logger.warning("Category file %s not found", filepath) orm.delete(c for c in model.Category if c.file_path == filepath) orm.commit() return None
Load a metadata file from the filesystem
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L30-L40
PlaidWeb/Publ
publ/category.py
scan_file
def scan_file(fullpath, relpath): """ scan a file and put it into the index """ load_metafile.cache_clear() meta = load_metafile(fullpath) if not meta: return True # update the category meta file mapping category = meta.get('Category', utils.get_category(relpath)) values = { 'category': category, 'file_path': fullpath, 'sort_name': meta.get('Sort-Name', '') } logger.debug("setting category %s to metafile %s", category, fullpath) record = model.Category.get(category=category) if record: record.set(**values) else: record = model.Category(**values) # update other relationships to the index path_alias.remove_aliases(record) for alias in meta.get_all('Path-Alias', []): path_alias.set_alias(alias, category=record) orm.commit() return record
python
def scan_file(fullpath, relpath): """ scan a file and put it into the index """ load_metafile.cache_clear() meta = load_metafile(fullpath) if not meta: return True # update the category meta file mapping category = meta.get('Category', utils.get_category(relpath)) values = { 'category': category, 'file_path': fullpath, 'sort_name': meta.get('Sort-Name', '') } logger.debug("setting category %s to metafile %s", category, fullpath) record = model.Category.get(category=category) if record: record.set(**values) else: record = model.Category(**values) # update other relationships to the index path_alias.remove_aliases(record) for alias in meta.get_all('Path-Alias', []): path_alias.set_alias(alias, category=record) orm.commit() return record
scan a file and put it into the index
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L283-L314
PlaidWeb/Publ
publ/category.py
Category.name
def name(self): """ Get the display name of the category """ if self._meta and self._meta.get('name'): # get it from the meta file return self._meta.get('name') # infer it from the basename return self.basename.replace('_', ' ').title()
python
def name(self): """ Get the display name of the category """ if self._meta and self._meta.get('name'): # get it from the meta file return self._meta.get('name') # infer it from the basename return self.basename.replace('_', ' ').title()
Get the display name of the category
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L118-L124
PlaidWeb/Publ
publ/category.py
Category.description
def description(self): """ Get the textual description of the category """ if self._meta and self._meta.get_payload(): return utils.TrueCallableProxy(self._description) return utils.CallableProxy(None)
python
def description(self): """ Get the textual description of the category """ if self._meta and self._meta.get_payload(): return utils.TrueCallableProxy(self._description) return utils.CallableProxy(None)
Get the textual description of the category
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L127-L131
PlaidWeb/Publ
publ/category.py
Category.breadcrumb
def breadcrumb(self): """ Get the category hierarchy leading up to this category, including root and self. For example, path/to/long/category will return a list containing Category('path'), Category('path/to'), and Category('path/to/long'). """ ret = [] here = self while here: ret.append(here) here = here.parent return list(reversed(ret))
python
def breadcrumb(self): """ Get the category hierarchy leading up to this category, including root and self. For example, path/to/long/category will return a list containing Category('path'), Category('path/to'), and Category('path/to/long'). """ ret = [] here = self while here: ret.append(here) here = here.parent return list(reversed(ret))
Get the category hierarchy leading up to this category, including root and self. For example, path/to/long/category will return a list containing Category('path'), Category('path/to'), and Category('path/to/long').
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L139-L151
PlaidWeb/Publ
publ/category.py
Category.sort_name
def sort_name(self): """ Get the sorting name of this category """ if self._record and self._record.sort_name: return self._record.sort_name return self.name
python
def sort_name(self): """ Get the sorting name of this category """ if self._record and self._record.sort_name: return self._record.sort_name return self.name
Get the sorting name of this category
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L154-L158
PlaidWeb/Publ
publ/category.py
Category.parent
def parent(self): """ Get the parent category """ if self.path: return Category(os.path.dirname(self.path)) return None
python
def parent(self): """ Get the parent category """ if self.path: return Category(os.path.dirname(self.path)) return None
Get the parent category
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L224-L228
PlaidWeb/Publ
publ/category.py
Category._get_subcats
def _get_subcats(self, recurse=False): """ Get the subcategories of this category recurse -- whether to include their subcategories as well """ if recurse: # No need to filter return sorted([Category(e) for e in self._subcats_recursive], key=lambda c: c.sort_breadcrumb) # get all the subcategories, with only the first subdir added # number of path components to ingest parts = len(self.path.split('/')) + 1 if self.path else 1 # convert the subcategories into separated pathlists with only 'parts' # parts subcats = [c.split('/')[:parts] for c in self._subcats_recursive] # join them back into a path, and make unique subcats = {'/'.join(c) for c in subcats} # convert to a bunch of Category objects return sorted([Category(c) for c in subcats], key=lambda c: c.sort_name or c.name)
python
def _get_subcats(self, recurse=False): """ Get the subcategories of this category recurse -- whether to include their subcategories as well """ if recurse: # No need to filter return sorted([Category(e) for e in self._subcats_recursive], key=lambda c: c.sort_breadcrumb) # get all the subcategories, with only the first subdir added # number of path components to ingest parts = len(self.path.split('/')) + 1 if self.path else 1 # convert the subcategories into separated pathlists with only 'parts' # parts subcats = [c.split('/')[:parts] for c in self._subcats_recursive] # join them back into a path, and make unique subcats = {'/'.join(c) for c in subcats} # convert to a bunch of Category objects return sorted([Category(c) for c in subcats], key=lambda c: c.sort_name or c.name)
Get the subcategories of this category recurse -- whether to include their subcategories as well
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L237-L261
PlaidWeb/Publ
publ/category.py
Category._first
def _first(self, **spec): """ Get the earliest entry in this category, optionally including subcategories """ for record in self._entries(spec).order_by(model.Entry.local_date, model.Entry.id)[:1]: return entry.Entry(record) return None
python
def _first(self, **spec): """ Get the earliest entry in this category, optionally including subcategories """ for record in self._entries(spec).order_by(model.Entry.local_date, model.Entry.id)[:1]: return entry.Entry(record) return None
Get the earliest entry in this category, optionally including subcategories
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L267-L272
PlaidWeb/Publ
publ/category.py
Category._last
def _last(self, **spec): """ Get the latest entry in this category, optionally including subcategories """ for record in self._entries(spec).order_by(orm.desc(model.Entry.local_date), orm.desc(model.Entry.id))[:1]: return entry.Entry(record) return None
python
def _last(self, **spec): """ Get the latest entry in this category, optionally including subcategories """ for record in self._entries(spec).order_by(orm.desc(model.Entry.local_date), orm.desc(model.Entry.id))[:1]: return entry.Entry(record) return None
Get the latest entry in this category, optionally including subcategories
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/category.py#L274-L279
PlaidWeb/Publ
publ/utils.py
parse_date
def parse_date(datestr): """ Parse a date expression into a tuple of: (start_date, span_type, span_format) Arguments: datestr -- A date specification, in the format of YYYY-MM-DD (dashes optional) """ match = re.match( r'([0-9]{4})(-?([0-9]{1,2}))?(-?([0-9]{1,2}))?(_w)?$', datestr) if not match: return (arrow.get(datestr, tzinfo=config.timezone).replace(tzinfo=config.timezone), 'day', 'YYYY-MM-DD') year, month, day, week = match.group(1, 3, 5, 6) start = arrow.Arrow(year=int(year), month=int( month or 1), day=int(day or 1), tzinfo=config.timezone) if week: return start.span('week')[0], 'week', WEEK_FORMAT if day: return start, 'day', DAY_FORMAT if month: return start, 'month', MONTH_FORMAT if year: return start, 'year', YEAR_FORMAT raise ValueError("Could not parse date: {}".format(datestr))
python
def parse_date(datestr): """ Parse a date expression into a tuple of: (start_date, span_type, span_format) Arguments: datestr -- A date specification, in the format of YYYY-MM-DD (dashes optional) """ match = re.match( r'([0-9]{4})(-?([0-9]{1,2}))?(-?([0-9]{1,2}))?(_w)?$', datestr) if not match: return (arrow.get(datestr, tzinfo=config.timezone).replace(tzinfo=config.timezone), 'day', 'YYYY-MM-DD') year, month, day, week = match.group(1, 3, 5, 6) start = arrow.Arrow(year=int(year), month=int( month or 1), day=int(day or 1), tzinfo=config.timezone) if week: return start.span('week')[0], 'week', WEEK_FORMAT if day: return start, 'day', DAY_FORMAT if month: return start, 'month', MONTH_FORMAT if year: return start, 'year', YEAR_FORMAT raise ValueError("Could not parse date: {}".format(datestr))
Parse a date expression into a tuple of: (start_date, span_type, span_format) Arguments: datestr -- A date specification, in the format of YYYY-MM-DD (dashes optional)
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L94-L123
PlaidWeb/Publ
publ/utils.py
find_file
def find_file(path, search_path): """ Find a file by relative path. Arguments: path -- the image's filename search_path -- a list of directories to check in Returns: the resolved file path """ if isinstance(search_path, str): search_path = [search_path] for relative in search_path: candidate = os.path.normpath(os.path.join(relative, path)) if os.path.isfile(candidate): return candidate return None
python
def find_file(path, search_path): """ Find a file by relative path. Arguments: path -- the image's filename search_path -- a list of directories to check in Returns: the resolved file path """ if isinstance(search_path, str): search_path = [search_path] for relative in search_path: candidate = os.path.normpath(os.path.join(relative, path)) if os.path.isfile(candidate): return candidate return None
Find a file by relative path. Arguments: path -- the image's filename search_path -- a list of directories to check in Returns: the resolved file path
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L126-L142
PlaidWeb/Publ
publ/utils.py
find_entry
def find_entry(rel_path, search_path): """ Find an entry by relative path. Arguments: path -- the entry's filename (or entry ID) search_path -- a list of directories to check in Returns: the resolved Entry object """ from . import entry # pylint:disable=cyclic-import try: entry_id = int(rel_path) record = model.Entry.get(id=entry_id) if record: return entry.Entry(record) except ValueError: pass if rel_path.startswith('/'): search_path = [config.content_folder] rel_path = '.' + rel_path for where in search_path: abspath = os.path.normpath(os.path.join(where, rel_path)) record = model.Entry.get(file_path=abspath) if record: return entry.Entry(record) return None
python
def find_entry(rel_path, search_path): """ Find an entry by relative path. Arguments: path -- the entry's filename (or entry ID) search_path -- a list of directories to check in Returns: the resolved Entry object """ from . import entry # pylint:disable=cyclic-import try: entry_id = int(rel_path) record = model.Entry.get(id=entry_id) if record: return entry.Entry(record) except ValueError: pass if rel_path.startswith('/'): search_path = [config.content_folder] rel_path = '.' + rel_path for where in search_path: abspath = os.path.normpath(os.path.join(where, rel_path)) record = model.Entry.get(file_path=abspath) if record: return entry.Entry(record) return None
Find an entry by relative path. Arguments: path -- the entry's filename (or entry ID) search_path -- a list of directories to check in Returns: the resolved Entry object
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L145-L173
PlaidWeb/Publ
publ/utils.py
static_url
def static_url(path, absolute=False): """ Shorthand for returning a URL for the requested static file. Arguments: path -- the path to the file (relative to the static files directory) absolute -- whether the link should be absolute or relative """ if os.sep != '/': path = '/'.join(path.split(os.sep)) return flask.url_for('static', filename=path, _external=absolute)
python
def static_url(path, absolute=False): """ Shorthand for returning a URL for the requested static file. Arguments: path -- the path to the file (relative to the static files directory) absolute -- whether the link should be absolute or relative """ if os.sep != '/': path = '/'.join(path.split(os.sep)) return flask.url_for('static', filename=path, _external=absolute)
Shorthand for returning a URL for the requested static file. Arguments: path -- the path to the file (relative to the static files directory) absolute -- whether the link should be absolute or relative
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L181-L193
PlaidWeb/Publ
publ/utils.py
make_tag
def make_tag(name, attrs, start_end=False): """ Build an HTML tag from the given name and attributes. Arguments: name -- the name of the tag (p, div, etc.) attrs -- a dict of attributes to apply to the tag start_end -- whether this tag should be self-closing """ text = '<' + name if isinstance(attrs, dict): attr_list = attrs.items() elif isinstance(attrs, list): attr_list = attrs elif attrs is not None: raise TypeError("Unhandled attrs type " + str(type(attrs))) for key, val in attr_list: if val is not None: escaped = html.escape(str(val), False).replace('"', '&#34;') text += ' {}="{}"'.format(key, escaped) if start_end: text += ' /' text += '>' return flask.Markup(text)
python
def make_tag(name, attrs, start_end=False): """ Build an HTML tag from the given name and attributes. Arguments: name -- the name of the tag (p, div, etc.) attrs -- a dict of attributes to apply to the tag start_end -- whether this tag should be self-closing """ text = '<' + name if isinstance(attrs, dict): attr_list = attrs.items() elif isinstance(attrs, list): attr_list = attrs elif attrs is not None: raise TypeError("Unhandled attrs type " + str(type(attrs))) for key, val in attr_list: if val is not None: escaped = html.escape(str(val), False).replace('"', '&#34;') text += ' {}="{}"'.format(key, escaped) if start_end: text += ' /' text += '>' return flask.Markup(text)
Build an HTML tag from the given name and attributes. Arguments: name -- the name of the tag (p, div, etc.) attrs -- a dict of attributes to apply to the tag start_end -- whether this tag should be self-closing
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L196-L222
PlaidWeb/Publ
publ/utils.py
file_fingerprint
def file_fingerprint(fullpath): """ Get a metadata fingerprint for a file """ stat = os.stat(fullpath) return ','.join([str(value) for value in [stat.st_ino, stat.st_mtime, stat.st_size] if value])
python
def file_fingerprint(fullpath): """ Get a metadata fingerprint for a file """ stat = os.stat(fullpath) return ','.join([str(value) for value in [stat.st_ino, stat.st_mtime, stat.st_size] if value])
Get a metadata fingerprint for a file
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L225-L228
PlaidWeb/Publ
publ/utils.py
remap_args
def remap_args(input_args, remap): """ Generate a new argument list by remapping keys. The 'remap' dict maps from destination key -> priority list of source keys """ out_args = input_args for dest_key, src_keys in remap.items(): remap_value = None if isinstance(src_keys, str): src_keys = [src_keys] for key in src_keys: if key in input_args: remap_value = input_args[key] break if remap_value is not None: if out_args is input_args: out_args = {**input_args} out_args[dest_key] = remap_value return out_args
python
def remap_args(input_args, remap): """ Generate a new argument list by remapping keys. The 'remap' dict maps from destination key -> priority list of source keys """ out_args = input_args for dest_key, src_keys in remap.items(): remap_value = None if isinstance(src_keys, str): src_keys = [src_keys] for key in src_keys: if key in input_args: remap_value = input_args[key] break if remap_value is not None: if out_args is input_args: out_args = {**input_args} out_args[dest_key] = remap_value return out_args
Generate a new argument list by remapping keys. The 'remap' dict maps from destination key -> priority list of source keys
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L231-L251
PlaidWeb/Publ
publ/utils.py
remap_link_target
def remap_link_target(path, absolute=False): """ remap a link target to a static URL if it's prefixed with @ """ if path.startswith('@'): # static resource return static_url(path[1:], absolute=absolute) if absolute: # absolute-ify whatever the URL is return urllib.parse.urljoin(flask.request.url, path) return path
python
def remap_link_target(path, absolute=False): """ remap a link target to a static URL if it's prefixed with @ """ if path.startswith('@'): # static resource return static_url(path[1:], absolute=absolute) if absolute: # absolute-ify whatever the URL is return urllib.parse.urljoin(flask.request.url, path) return path
remap a link target to a static URL if it's prefixed with @
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L254-L264
PlaidWeb/Publ
publ/utils.py
get_category
def get_category(filename): """ Get a default category name from a filename in a cross-platform manner """ return '/'.join(os.path.dirname(filename).split(os.sep))
python
def get_category(filename): """ Get a default category name from a filename in a cross-platform manner """ return '/'.join(os.path.dirname(filename).split(os.sep))
Get a default category name from a filename in a cross-platform manner
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L267-L269
PlaidWeb/Publ
publ/utils.py
CallableProxy._default
def _default(self): """ Get the default function return """ if self._default_args: return self._func( *self._default_args, **self._default_kwargs) return self._func(**self._default_kwargs)
python
def _default(self): """ Get the default function return """ if self._default_args: return self._func( *self._default_args, **self._default_kwargs) return self._func(**self._default_kwargs)
Get the default function return
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/utils.py#L35-L43
PlaidWeb/Publ
publ/model.py
setup
def setup(): """ Set up the database """ try: db.bind(**config.database_config) except OSError: # Attempted to connect to a file-based database where the file didn't # exist db.bind(**config.database_config, create_db=True) rebuild = True try: db.generate_mapping(create_tables=True) with orm.db_session: version = GlobalConfig.get(key='schema_version') if version and version.int_value != SCHEMA_VERSION: logger.info("Existing database has schema version %d", version.int_value) else: rebuild = False except: # pylint:disable=bare-except logger.exception("Error mapping schema") if rebuild: logger.info("Rebuilding schema") try: db.drop_all_tables(with_all_data=True) db.create_tables() except: raise RuntimeError("Unable to upgrade schema automatically; please " + "delete the existing database and try again.") with orm.db_session: if not GlobalConfig.get(key='schema_version'): logger.info("setting schema version to %d", SCHEMA_VERSION) GlobalConfig(key='schema_version', int_value=SCHEMA_VERSION) orm.commit()
python
def setup(): """ Set up the database """ try: db.bind(**config.database_config) except OSError: # Attempted to connect to a file-based database where the file didn't # exist db.bind(**config.database_config, create_db=True) rebuild = True try: db.generate_mapping(create_tables=True) with orm.db_session: version = GlobalConfig.get(key='schema_version') if version and version.int_value != SCHEMA_VERSION: logger.info("Existing database has schema version %d", version.int_value) else: rebuild = False except: # pylint:disable=bare-except logger.exception("Error mapping schema") if rebuild: logger.info("Rebuilding schema") try: db.drop_all_tables(with_all_data=True) db.create_tables() except: raise RuntimeError("Unable to upgrade schema automatically; please " + "delete the existing database and try again.") with orm.db_session: if not GlobalConfig.get(key='schema_version'): logger.info("setting schema version to %d", SCHEMA_VERSION) GlobalConfig(key='schema_version', int_value=SCHEMA_VERSION) orm.commit()
Set up the database
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/model.py#L126-L163
PlaidWeb/Publ
publ/model.py
Entry.visible
def visible(self): """ Returns true if the entry should be viewable """ return self.status not in (PublishStatus.DRAFT.value, PublishStatus.GONE.value)
python
def visible(self): """ Returns true if the entry should be viewable """ return self.status not in (PublishStatus.DRAFT.value, PublishStatus.GONE.value)
Returns true if the entry should be viewable
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/model.py#L78-L81
PlaidWeb/Publ
publ/cards.py
extract_card
def extract_card(text, config, image_search_path): """ Extract card data based on the provided texts. """ card = CardData() parser = CardParser(card, config, image_search_path) misaka.Markdown(parser, extensions=markdown.ENABLED_EXTENSIONS)(text) return card
python
def extract_card(text, config, image_search_path): """ Extract card data based on the provided texts. """ card = CardData() parser = CardParser(card, config, image_search_path) misaka.Markdown(parser, extensions=markdown.ENABLED_EXTENSIONS)(text) return card
Extract card data based on the provided texts.
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/cards.py#L90-L96
PlaidWeb/Publ
publ/cards.py
CardParser.paragraph
def paragraph(self, content): """ Turn the first paragraph of text into the summary text """ if not self._out.description: self._out.description = content return ' '
python
def paragraph(self, content): """ Turn the first paragraph of text into the summary text """ if not self._out.description: self._out.description = content return ' '
Turn the first paragraph of text into the summary text
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/cards.py#L33-L37
PlaidWeb/Publ
publ/cards.py
CardParser.image
def image(self, raw_url, title='', alt=''): ''' extract the images ''' max_images = self._config.get('count') if max_images is not None and len(self._out.images) >= max_images: # We already have enough images, so bail out return ' ' image_specs = raw_url if title: image_specs += ' "{}"'.format(title) alt, container_args = image.parse_alt_text(alt) spec_list, _ = image.get_spec_list(image_specs, container_args) for spec in spec_list: if not spec: continue self._out.images.append(self._render_image(spec, alt)) if max_images is not None and len(self._out.images) >= max_images: break return ' '
python
def image(self, raw_url, title='', alt=''): ''' extract the images ''' max_images = self._config.get('count') if max_images is not None and len(self._out.images) >= max_images: # We already have enough images, so bail out return ' ' image_specs = raw_url if title: image_specs += ' "{}"'.format(title) alt, container_args = image.parse_alt_text(alt) spec_list, _ = image.get_spec_list(image_specs, container_args) for spec in spec_list: if not spec: continue self._out.images.append(self._render_image(spec, alt)) if max_images is not None and len(self._out.images) >= max_images: break return ' '
extract the images
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/cards.py#L39-L62
PlaidWeb/Publ
publ/cards.py
CardParser._render_image
def _render_image(self, spec, alt=''): """ Given an image spec, try to turn it into a card image per the configuration """ # pylint: disable=unused-argument try: path, image_args, _ = image.parse_image_spec(spec) except Exception as err: # pylint: disable=broad-except # we tried™ logger.exception("Got error on spec %s: %s", spec, err) return None img = image.get_image(path, self._image_search_path) if img: image_config = {**image_args, **self._config, 'absolute': True} return img.get_rendition(1, **image_config)[0] return None
python
def _render_image(self, spec, alt=''): """ Given an image spec, try to turn it into a card image per the configuration """ # pylint: disable=unused-argument try: path, image_args, _ = image.parse_image_spec(spec) except Exception as err: # pylint: disable=broad-except # we tried™ logger.exception("Got error on spec %s: %s", spec, err) return None img = image.get_image(path, self._image_search_path) if img: image_config = {**image_args, **self._config, 'absolute': True} return img.get_rendition(1, **image_config)[0] return None
Given an image spec, try to turn it into a card image per the configuration
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/cards.py#L71-L87
PlaidWeb/Publ
publ/__init__.py
publ
def publ(name, cfg): """ Create a Flask app and configure it for use with Publ """ config.setup(cfg) app = _PublApp(name, template_folder=config.template_folder, static_folder=config.static_folder, static_url_path=config.static_url_path) for route in [ '/', '/<path:category>/', '/<template>', '/<path:category>/<template>', ]: app.add_url_rule(route, 'category', rendering.render_category) for route in [ '/<int:entry_id>', '/<int:entry_id>-', '/<int:entry_id>-<slug_text>', '/<path:category>/<int:entry_id>', '/<path:category>/<int:entry_id>-', '/<path:category>/<int:entry_id>-<slug_text>', ]: app.add_url_rule(route, 'entry', rendering.render_entry) app.add_url_rule('/<path:path>.PUBL_PATHALIAS', 'path_alias', rendering.render_path_alias) app.add_url_rule('/_async/<path:filename>', 'async', image.get_async) app.add_url_rule('/_', 'chit', rendering.render_transparent_chit) app.add_url_rule('/_file/<path:filename>', 'asset', rendering.retrieve_asset) app.config['TRAP_HTTP_EXCEPTIONS'] = True app.register_error_handler( werkzeug.exceptions.HTTPException, rendering.render_exception) app.jinja_env.globals.update( # pylint: disable=no-member get_view=view.get_view, arrow=arrow, static=utils.static_url, get_template=rendering.get_template ) caching.init_app(app) maint = maintenance.Maintenance() if config.index_rescan_interval: maint.register(functools.partial(index.scan_index, config.content_folder), config.index_rescan_interval) if config.image_cache_interval and config.image_cache_age: maint.register(functools.partial(image.clean_cache, config.image_cache_age), config.image_cache_interval) app.before_request(maint.run) if 'CACHE_THRESHOLD' in config.cache: app.after_request(set_cache_expiry) if app.debug: # We're in debug mode so we don't want to scan until everything's up # and running app.before_first_request(startup) else: # In production, register the exception handler and scan the index # immediately app.register_error_handler(Exception, rendering.render_exception) startup() return app
python
def publ(name, cfg): """ Create a Flask app and configure it for use with Publ """ config.setup(cfg) app = _PublApp(name, template_folder=config.template_folder, static_folder=config.static_folder, static_url_path=config.static_url_path) for route in [ '/', '/<path:category>/', '/<template>', '/<path:category>/<template>', ]: app.add_url_rule(route, 'category', rendering.render_category) for route in [ '/<int:entry_id>', '/<int:entry_id>-', '/<int:entry_id>-<slug_text>', '/<path:category>/<int:entry_id>', '/<path:category>/<int:entry_id>-', '/<path:category>/<int:entry_id>-<slug_text>', ]: app.add_url_rule(route, 'entry', rendering.render_entry) app.add_url_rule('/<path:path>.PUBL_PATHALIAS', 'path_alias', rendering.render_path_alias) app.add_url_rule('/_async/<path:filename>', 'async', image.get_async) app.add_url_rule('/_', 'chit', rendering.render_transparent_chit) app.add_url_rule('/_file/<path:filename>', 'asset', rendering.retrieve_asset) app.config['TRAP_HTTP_EXCEPTIONS'] = True app.register_error_handler( werkzeug.exceptions.HTTPException, rendering.render_exception) app.jinja_env.globals.update( # pylint: disable=no-member get_view=view.get_view, arrow=arrow, static=utils.static_url, get_template=rendering.get_template ) caching.init_app(app) maint = maintenance.Maintenance() if config.index_rescan_interval: maint.register(functools.partial(index.scan_index, config.content_folder), config.index_rescan_interval) if config.image_cache_interval and config.image_cache_age: maint.register(functools.partial(image.clean_cache, config.image_cache_age), config.image_cache_interval) app.before_request(maint.run) if 'CACHE_THRESHOLD' in config.cache: app.after_request(set_cache_expiry) if app.debug: # We're in debug mode so we don't want to scan until everything's up # and running app.before_first_request(startup) else: # In production, register the exception handler and scan the index # immediately app.register_error_handler(Exception, rendering.render_exception) startup() return app
Create a Flask app and configure it for use with Publ
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/__init__.py#L56-L135
PlaidWeb/Publ
publ/__init__.py
startup
def startup(): """ Startup routine for initiating the content indexer """ model.setup() index.scan_index(config.content_folder) index.background_scan(config.content_folder)
python
def startup(): """ Startup routine for initiating the content indexer """ model.setup() index.scan_index(config.content_folder) index.background_scan(config.content_folder)
Startup routine for initiating the content indexer
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/__init__.py#L141-L145
PlaidWeb/Publ
publ/__init__.py
set_cache_expiry
def set_cache_expiry(response): """ Set the cache control headers """ if response.cache_control.max_age is None and 'CACHE_DEFAULT_TIMEOUT' in config.cache: response.cache_control.max_age = config.cache['CACHE_DEFAULT_TIMEOUT'] return response
python
def set_cache_expiry(response): """ Set the cache control headers """ if response.cache_control.max_age is None and 'CACHE_DEFAULT_TIMEOUT' in config.cache: response.cache_control.max_age = config.cache['CACHE_DEFAULT_TIMEOUT'] return response
Set the cache control headers
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/__init__.py#L148-L152
PlaidWeb/Publ
publ/__init__.py
_PublApp.path_alias_regex
def path_alias_regex(self, regex): """ A decorator that adds a path-alias regular expression; calls add_path_regex """ def decorator(func): """ Adds the function to the regular expression alias list """ self.add_path_regex(regex, func) return decorator
python
def path_alias_regex(self, regex): """ A decorator that adds a path-alias regular expression; calls add_path_regex """ def decorator(func): """ Adds the function to the regular expression alias list """ self.add_path_regex(regex, func) return decorator
A decorator that adds a path-alias regular expression; calls add_path_regex
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/__init__.py#L24-L30
PlaidWeb/Publ
publ/__init__.py
_PublApp.get_path_regex
def get_path_regex(self, path): """ Evaluate the registered path-alias regular expressions """ for regex, func in self._regex_map: match = re.match(regex, path) if match: return func(match) return None, None
python
def get_path_regex(self, path): """ Evaluate the registered path-alias regular expressions """ for regex, func in self._regex_map: match = re.match(regex, path) if match: return func(match) return None, None
Evaluate the registered path-alias regular expressions
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/__init__.py#L46-L53
PlaidWeb/Publ
publ/entry.py
guess_title
def guess_title(basename): """ Attempt to guess the title from the filename """ base, _ = os.path.splitext(basename) return re.sub(r'[ _-]+', r' ', base).title()
python
def guess_title(basename): """ Attempt to guess the title from the filename """ base, _ = os.path.splitext(basename) return re.sub(r'[ _-]+', r' ', base).title()
Attempt to guess the title from the filename
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L374-L378
PlaidWeb/Publ
publ/entry.py
get_entry_id
def get_entry_id(entry, fullpath, assign_id): """ Get or generate an entry ID for an entry """ warn_duplicate = False if 'Entry-ID' in entry: entry_id = int(entry['Entry-ID']) else: entry_id = None # See if we've inadvertently duplicated an entry ID if entry_id: try: other_entry = model.Entry.get(id=entry_id) if (other_entry and os.path.isfile(other_entry.file_path) and not os.path.samefile(other_entry.file_path, fullpath)): warn_duplicate = entry_id entry_id = None except FileNotFoundError: # the other file doesn't exist, so just let it go pass # Do we need to assign a new ID? if not entry_id and not assign_id: # We're not assigning IDs yet return None if not entry_id: # See if we already have an entry with this file path by_filepath = model.Entry.get(file_path=fullpath) if by_filepath: entry_id = by_filepath.id if not entry_id: # We still don't have an ID; generate one pseudo-randomly, based on the # entry file path. This approach averages around 0.25 collisions per ID # generated while keeping the entry ID reasonably short. In general, # count*N averages 1/(N-1) collisions per ID. limit = max(10, orm.get(orm.count(e) for e in model.Entry) * 5) attempt = 0 while not entry_id or model.Entry.get(id=entry_id): # Stably generate a quasi-random entry ID from the file path md5 = hashlib.md5() md5.update("{} {}".format(fullpath, attempt).encode('utf-8')) entry_id = int.from_bytes(md5.digest(), byteorder='big') % limit attempt = attempt + 1 if warn_duplicate is not False: logger.warning("Entry '%s' had ID %d, which belongs to '%s'. Reassigned to %d", fullpath, warn_duplicate, other_entry.file_path, entry_id) return entry_id
python
def get_entry_id(entry, fullpath, assign_id): """ Get or generate an entry ID for an entry """ warn_duplicate = False if 'Entry-ID' in entry: entry_id = int(entry['Entry-ID']) else: entry_id = None # See if we've inadvertently duplicated an entry ID if entry_id: try: other_entry = model.Entry.get(id=entry_id) if (other_entry and os.path.isfile(other_entry.file_path) and not os.path.samefile(other_entry.file_path, fullpath)): warn_duplicate = entry_id entry_id = None except FileNotFoundError: # the other file doesn't exist, so just let it go pass # Do we need to assign a new ID? if not entry_id and not assign_id: # We're not assigning IDs yet return None if not entry_id: # See if we already have an entry with this file path by_filepath = model.Entry.get(file_path=fullpath) if by_filepath: entry_id = by_filepath.id if not entry_id: # We still don't have an ID; generate one pseudo-randomly, based on the # entry file path. This approach averages around 0.25 collisions per ID # generated while keeping the entry ID reasonably short. In general, # count*N averages 1/(N-1) collisions per ID. limit = max(10, orm.get(orm.count(e) for e in model.Entry) * 5) attempt = 0 while not entry_id or model.Entry.get(id=entry_id): # Stably generate a quasi-random entry ID from the file path md5 = hashlib.md5() md5.update("{} {}".format(fullpath, attempt).encode('utf-8')) entry_id = int.from_bytes(md5.digest(), byteorder='big') % limit attempt = attempt + 1 if warn_duplicate is not False: logger.warning("Entry '%s' had ID %d, which belongs to '%s'. Reassigned to %d", fullpath, warn_duplicate, other_entry.file_path, entry_id) return entry_id
Get or generate an entry ID for an entry
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L381-L434
PlaidWeb/Publ
publ/entry.py
save_file
def save_file(fullpath, entry): """ Save a message file out, without mangling the headers """ with tempfile.NamedTemporaryFile('w', delete=False) as file: tmpfile = file.name # we can't just use file.write(str(entry)) because otherwise the # headers "helpfully" do MIME encoding normalization. # str(val) is necessary to get around email.header's encoding # shenanigans for key, val in entry.items(): print('{}: {}'.format(key, str(val)), file=file) print('', file=file) file.write(entry.get_payload()) shutil.move(tmpfile, fullpath)
python
def save_file(fullpath, entry): """ Save a message file out, without mangling the headers """ with tempfile.NamedTemporaryFile('w', delete=False) as file: tmpfile = file.name # we can't just use file.write(str(entry)) because otherwise the # headers "helpfully" do MIME encoding normalization. # str(val) is necessary to get around email.header's encoding # shenanigans for key, val in entry.items(): print('{}: {}'.format(key, str(val)), file=file) print('', file=file) file.write(entry.get_payload()) shutil.move(tmpfile, fullpath)
Save a message file out, without mangling the headers
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L437-L449
PlaidWeb/Publ
publ/entry.py
scan_file
def scan_file(fullpath, relpath, assign_id): """ scan a file and put it into the index """ # pylint: disable=too-many-branches,too-many-statements,too-many-locals # Since a file has changed, the lrucache is invalid. load_message.cache_clear() try: entry = load_message(fullpath) except FileNotFoundError: # The file doesn't exist, so remove it from the index record = model.Entry.get(file_path=fullpath) if record: expire_record(record) return True entry_id = get_entry_id(entry, fullpath, assign_id) if entry_id is None: return False fixup_needed = False basename = os.path.basename(relpath) title = entry['title'] or guess_title(basename) values = { 'file_path': fullpath, 'category': entry.get('Category', utils.get_category(relpath)), 'status': model.PublishStatus[entry.get('Status', 'SCHEDULED').upper()].value, 'entry_type': entry.get('Entry-Type', ''), 'slug_text': make_slug(entry.get('Slug-Text', title)), 'redirect_url': entry.get('Redirect-To', ''), 'title': title, 'sort_title': entry.get('Sort-Title', title), 'entry_template': entry.get('Entry-Template', '') } entry_date = None if 'Date' in entry: try: entry_date = arrow.get(entry['Date'], tzinfo=config.timezone) except arrow.parser.ParserError: entry_date = None if entry_date is None: del entry['Date'] entry_date = arrow.get( os.stat(fullpath).st_ctime).to(config.timezone) entry['Date'] = entry_date.format() fixup_needed = True if 'Last-Modified' in entry: last_modified_str = entry['Last-Modified'] try: last_modified = arrow.get( last_modified_str, tzinfo=config.timezone) except arrow.parser.ParserError: last_modified = arrow.get() del entry['Last-Modified'] entry['Last-Modified'] = last_modified.format() fixup_needed = True values['display_date'] = entry_date.isoformat() values['utc_date'] = entry_date.to('utc').datetime values['local_date'] = entry_date.naive logger.debug("getting entry %s with id %d", fullpath, entry_id) record = model.Entry.get(id=entry_id) if record: logger.debug("Reusing existing entry %d", record.id) record.set(**values) else: record = model.Entry(id=entry_id, **values) # Update the entry ID if str(record.id) != entry['Entry-ID']: del entry['Entry-ID'] entry['Entry-ID'] = str(record.id) fixup_needed = True if 'UUID' not in entry: entry['UUID'] = str(uuid.uuid5( uuid.NAMESPACE_URL, 'file://' + fullpath)) fixup_needed = True # add other relationships to the index path_alias.remove_aliases(record) if record.visible: for alias in entry.get_all('Path-Alias', []): path_alias.set_alias(alias, entry=record) with orm.db_session: set_tags = { t.lower() for t in entry.get_all('Tag', []) + entry.get_all('Hidden-Tag', []) } for tag in record.tags: if tag.key in set_tags: set_tags.remove(tag.key) else: tag.delete() for tag in set_tags: model.EntryTag(entry=record, key=tag) orm.commit() if record.status == model.PublishStatus.DRAFT.value: logger.info("Not touching draft entry %s", fullpath) elif fixup_needed: logger.info("Fixing up entry %s", fullpath) save_file(fullpath, entry) return record
python
def scan_file(fullpath, relpath, assign_id): """ scan a file and put it into the index """ # pylint: disable=too-many-branches,too-many-statements,too-many-locals # Since a file has changed, the lrucache is invalid. load_message.cache_clear() try: entry = load_message(fullpath) except FileNotFoundError: # The file doesn't exist, so remove it from the index record = model.Entry.get(file_path=fullpath) if record: expire_record(record) return True entry_id = get_entry_id(entry, fullpath, assign_id) if entry_id is None: return False fixup_needed = False basename = os.path.basename(relpath) title = entry['title'] or guess_title(basename) values = { 'file_path': fullpath, 'category': entry.get('Category', utils.get_category(relpath)), 'status': model.PublishStatus[entry.get('Status', 'SCHEDULED').upper()].value, 'entry_type': entry.get('Entry-Type', ''), 'slug_text': make_slug(entry.get('Slug-Text', title)), 'redirect_url': entry.get('Redirect-To', ''), 'title': title, 'sort_title': entry.get('Sort-Title', title), 'entry_template': entry.get('Entry-Template', '') } entry_date = None if 'Date' in entry: try: entry_date = arrow.get(entry['Date'], tzinfo=config.timezone) except arrow.parser.ParserError: entry_date = None if entry_date is None: del entry['Date'] entry_date = arrow.get( os.stat(fullpath).st_ctime).to(config.timezone) entry['Date'] = entry_date.format() fixup_needed = True if 'Last-Modified' in entry: last_modified_str = entry['Last-Modified'] try: last_modified = arrow.get( last_modified_str, tzinfo=config.timezone) except arrow.parser.ParserError: last_modified = arrow.get() del entry['Last-Modified'] entry['Last-Modified'] = last_modified.format() fixup_needed = True values['display_date'] = entry_date.isoformat() values['utc_date'] = entry_date.to('utc').datetime values['local_date'] = entry_date.naive logger.debug("getting entry %s with id %d", fullpath, entry_id) record = model.Entry.get(id=entry_id) if record: logger.debug("Reusing existing entry %d", record.id) record.set(**values) else: record = model.Entry(id=entry_id, **values) # Update the entry ID if str(record.id) != entry['Entry-ID']: del entry['Entry-ID'] entry['Entry-ID'] = str(record.id) fixup_needed = True if 'UUID' not in entry: entry['UUID'] = str(uuid.uuid5( uuid.NAMESPACE_URL, 'file://' + fullpath)) fixup_needed = True # add other relationships to the index path_alias.remove_aliases(record) if record.visible: for alias in entry.get_all('Path-Alias', []): path_alias.set_alias(alias, entry=record) with orm.db_session: set_tags = { t.lower() for t in entry.get_all('Tag', []) + entry.get_all('Hidden-Tag', []) } for tag in record.tags: if tag.key in set_tags: set_tags.remove(tag.key) else: tag.delete() for tag in set_tags: model.EntryTag(entry=record, key=tag) orm.commit() if record.status == model.PublishStatus.DRAFT.value: logger.info("Not touching draft entry %s", fullpath) elif fixup_needed: logger.info("Fixing up entry %s", fullpath) save_file(fullpath, entry) return record
scan a file and put it into the index
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L453-L565
PlaidWeb/Publ
publ/entry.py
expire_file
def expire_file(filepath): """ Expire a record for a missing file """ load_message.cache_clear() orm.delete(pa for pa in model.PathAlias if pa.entry.file_path == filepath) orm.delete(item for item in model.Entry if item.file_path == filepath) orm.commit()
python
def expire_file(filepath): """ Expire a record for a missing file """ load_message.cache_clear() orm.delete(pa for pa in model.PathAlias if pa.entry.file_path == filepath) orm.delete(item for item in model.Entry if item.file_path == filepath) orm.commit()
Expire a record for a missing file
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L569-L574
PlaidWeb/Publ
publ/entry.py
expire_record
def expire_record(record): """ Expire a record for a missing entry """ load_message.cache_clear() # This entry no longer exists so delete it, and anything that references it # SQLite doesn't support cascading deletes so let's just clean up # manually orm.delete(pa for pa in model.PathAlias if pa.entry == record) record.delete() orm.commit()
python
def expire_record(record): """ Expire a record for a missing entry """ load_message.cache_clear() # This entry no longer exists so delete it, and anything that references it # SQLite doesn't support cascading deletes so let's just clean up # manually orm.delete(pa for pa in model.PathAlias if pa.entry == record) record.delete() orm.commit()
Expire a record for a missing entry
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L578-L587
PlaidWeb/Publ
publ/entry.py
Entry._link
def _link(self, *args, **kwargs): """ Returns a link, potentially pre-redirected """ if self._record.redirect_url: return links.resolve(self._record.redirect_url, self.search_path, kwargs.get('absolute')) return self._permalink(*args, **kwargs)
python
def _link(self, *args, **kwargs): """ Returns a link, potentially pre-redirected """ if self._record.redirect_url: return links.resolve(self._record.redirect_url, self.search_path, kwargs.get('absolute')) return self._permalink(*args, **kwargs)
Returns a link, potentially pre-redirected
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L147-L153
PlaidWeb/Publ
publ/entry.py
Entry._permalink
def _permalink(self, absolute=False, expand=True, **kwargs): """ Returns a canonical URL for the item """ return flask.url_for('entry', entry_id=self._record.id, category=self._record.category if expand else None, slug_text=self._record.slug_text if expand else None, _external=absolute, **kwargs)
python
def _permalink(self, absolute=False, expand=True, **kwargs): """ Returns a canonical URL for the item """ return flask.url_for('entry', entry_id=self._record.id, category=self._record.category if expand else None, slug_text=self._record.slug_text if expand else None, _external=absolute, **kwargs)
Returns a canonical URL for the item
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L155-L162
PlaidWeb/Publ
publ/entry.py
Entry.search_path
def search_path(self): """ The relative image search path for this entry """ return [os.path.dirname(self._record.file_path)] + self.category.search_path
python
def search_path(self): """ The relative image search path for this entry """ return [os.path.dirname(self._record.file_path)] + self.category.search_path
The relative image search path for this entry
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L201-L203
PlaidWeb/Publ
publ/entry.py
Entry._message
def _message(self): """ get the message payload """ filepath = self._record.file_path try: return load_message(filepath) except FileNotFoundError: expire_file(filepath) empty = email.message.Message() empty.set_payload('') return empty
python
def _message(self): """ get the message payload """ filepath = self._record.file_path try: return load_message(filepath) except FileNotFoundError: expire_file(filepath) empty = email.message.Message() empty.set_payload('') return empty
get the message payload
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L206-L215
PlaidWeb/Publ
publ/entry.py
Entry.body
def body(self): """ Get the above-the-fold entry body text """ body, _, is_markdown = self._entry_content return TrueCallableProxy( self._get_markup, body, is_markdown) if body else CallableProxy(None)
python
def body(self): """ Get the above-the-fold entry body text """ body, _, is_markdown = self._entry_content return TrueCallableProxy( self._get_markup, body, is_markdown) if body else CallableProxy(None)
Get the above-the-fold entry body text
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L233-L239
PlaidWeb/Publ
publ/entry.py
Entry.more
def more(self): """ Get the below-the-fold entry body text """ _, more, is_markdown = self._entry_content return TrueCallableProxy( self._get_markup, more, is_markdown) if more else CallableProxy(None)
python
def more(self): """ Get the below-the-fold entry body text """ _, more, is_markdown = self._entry_content return TrueCallableProxy( self._get_markup, more, is_markdown) if more else CallableProxy(None)
Get the below-the-fold entry body text
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L242-L248
PlaidWeb/Publ
publ/entry.py
Entry.card
def card(self): """ Get the entry's OpenGraph card """ body, more, is_markdown = self._entry_content return TrueCallableProxy( self._get_card, body or more) if is_markdown else CallableProxy(None)
python
def card(self): """ Get the entry's OpenGraph card """ body, more, is_markdown = self._entry_content return TrueCallableProxy( self._get_card, body or more) if is_markdown else CallableProxy(None)
Get the entry's OpenGraph card
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L251-L256
PlaidWeb/Publ
publ/entry.py
Entry.summary
def summary(self): """ Get the entry's summary text """ if self.get('Summary'): return self.get('Summary') body, more, is_markdown = self._entry_content return TrueCallableProxy( self._get_summary, body or more) if is_markdown else CallableProxy(None)
python
def summary(self): """ Get the entry's summary text """ if self.get('Summary'): return self.get('Summary') body, more, is_markdown = self._entry_content return TrueCallableProxy( self._get_summary, body or more) if is_markdown else CallableProxy(None)
Get the entry's summary text
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L259-L267
PlaidWeb/Publ
publ/entry.py
Entry.last_modified
def last_modified(self): """ Get the date of last file modification """ if self.get('Last-Modified'): return arrow.get(self.get('Last-Modified')) return self.date
python
def last_modified(self): """ Get the date of last file modification """ if self.get('Last-Modified'): return arrow.get(self.get('Last-Modified')) return self.date
Get the date of last file modification
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L270-L274
PlaidWeb/Publ
publ/entry.py
Entry._get_markup
def _get_markup(self, text, is_markdown, **kwargs): """ get the rendered markup for an entry is_markdown -- whether the entry is formatted as Markdown kwargs -- parameters to pass to the Markdown processor """ if is_markdown: return markdown.to_html( text, config=kwargs, search_path=self.search_path) return html_entry.process( text, config=kwargs, search_path=self.search_path)
python
def _get_markup(self, text, is_markdown, **kwargs): """ get the rendered markup for an entry is_markdown -- whether the entry is formatted as Markdown kwargs -- parameters to pass to the Markdown processor """ if is_markdown: return markdown.to_html( text, config=kwargs, search_path=self.search_path) return html_entry.process( text, config=kwargs, search_path=self.search_path)
get the rendered markup for an entry is_markdown -- whether the entry is formatted as Markdown kwargs -- parameters to pass to the Markdown processor
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L276-L291
PlaidWeb/Publ
publ/entry.py
Entry._get_card
def _get_card(self, text, **kwargs): """ Render out the tags for a Twitter/OpenGraph card for this entry. """ def og_tag(key, val): """ produce an OpenGraph tag with the given key and value """ return utils.make_tag('meta', {'property': key, 'content': val}, start_end=True) tags = og_tag('og:title', self.title(markup=False)) tags += og_tag('og:url', self.link(absolute=True)) card = cards.extract_card(text, kwargs, self.search_path) for image in card.images: tags += og_tag('og:image', image) if card.description: tags += og_tag('og:description', self.get('Summary', card.description)) return flask.Markup(tags)
python
def _get_card(self, text, **kwargs): """ Render out the tags for a Twitter/OpenGraph card for this entry. """ def og_tag(key, val): """ produce an OpenGraph tag with the given key and value """ return utils.make_tag('meta', {'property': key, 'content': val}, start_end=True) tags = og_tag('og:title', self.title(markup=False)) tags += og_tag('og:url', self.link(absolute=True)) card = cards.extract_card(text, kwargs, self.search_path) for image in card.images: tags += og_tag('og:image', image) if card.description: tags += og_tag('og:description', self.get('Summary', card.description)) return flask.Markup(tags)
Render out the tags for a Twitter/OpenGraph card for this entry.
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L293-L310
PlaidWeb/Publ
publ/entry.py
Entry._get_summary
def _get_summary(self, text, **kwargs): """ Render out just the summary """ card = cards.extract_card(text, kwargs, self.search_path) return flask.Markup((card.description or '').strip())
python
def _get_summary(self, text, **kwargs): """ Render out just the summary """ card = cards.extract_card(text, kwargs, self.search_path) return flask.Markup((card.description or '').strip())
Render out just the summary
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L312-L316
PlaidWeb/Publ
publ/entry.py
Entry._previous
def _previous(self, **kwargs): """ Get the previous item in any particular category """ spec = self._pagination_default_spec(kwargs) spec.update(kwargs) query = queries.build_query(spec) query = queries.where_before_entry(query, self._record) for record in query.order_by(orm.desc(model.Entry.local_date), orm.desc(model.Entry.id))[:1]: return Entry(record) return None
python
def _previous(self, **kwargs): """ Get the previous item in any particular category """ spec = self._pagination_default_spec(kwargs) spec.update(kwargs) query = queries.build_query(spec) query = queries.where_before_entry(query, self._record) for record in query.order_by(orm.desc(model.Entry.local_date), orm.desc(model.Entry.id))[:1]: return Entry(record) return None
Get the previous item in any particular category
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L333-L344
PlaidWeb/Publ
publ/entry.py
Entry._next
def _next(self, **kwargs): """ Get the next item in any particular category """ spec = self._pagination_default_spec(kwargs) spec.update(kwargs) query = queries.build_query(spec) query = queries.where_after_entry(query, self._record) for record in query.order_by(model.Entry.local_date, model.Entry.id)[:1]: return Entry(record) return None
python
def _next(self, **kwargs): """ Get the next item in any particular category """ spec = self._pagination_default_spec(kwargs) spec.update(kwargs) query = queries.build_query(spec) query = queries.where_after_entry(query, self._record) for record in query.order_by(model.Entry.local_date, model.Entry.id)[:1]: return Entry(record) return None
Get the next item in any particular category
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/entry.py#L346-L357
PlaidWeb/Publ
publ/config.py
setup
def setup(cfg): """ set up the global configuration from an object """ # copy the necessary configuration values over this_module = sys.modules[__name__] for name, value in cfg.items(): if hasattr(this_module, name): setattr(this_module, name, value)
python
def setup(cfg): """ set up the global configuration from an object """ # copy the necessary configuration values over this_module = sys.modules[__name__] for name, value in cfg.items(): if hasattr(this_module, name): setattr(this_module, name, value)
set up the global configuration from an object
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/config.py#L26-L33
PlaidWeb/Publ
publ/rendering.py
mime_type
def mime_type(template): """ infer the content-type from the extension """ _, ext = os.path.splitext(template.filename) return EXTENSION_MAP.get(ext, 'text/html; charset=utf-8')
python
def mime_type(template): """ infer the content-type from the extension """ _, ext = os.path.splitext(template.filename) return EXTENSION_MAP.get(ext, 'text/html; charset=utf-8')
infer the content-type from the extension
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/rendering.py#L37-L40
PlaidWeb/Publ
publ/rendering.py
map_template
def map_template(category, template_list): """ Given a file path and an acceptable list of templates, return the best-matching template's path relative to the configured template directory. Arguments: category -- The path to map template_list -- A template to look up (as a string), or a list of templates. """ if isinstance(template_list, str): template_list = [template_list] for template in template_list: path = os.path.normpath(category) while path is not None: for extension in ['', '.html', '.htm', '.xml', '.json']: candidate = os.path.join(path, template + extension) file_path = os.path.join(config.template_folder, candidate) if os.path.isfile(file_path): return Template(template, candidate, file_path) parent = os.path.dirname(path) if parent != path: path = parent else: path = None
python
def map_template(category, template_list): """ Given a file path and an acceptable list of templates, return the best-matching template's path relative to the configured template directory. Arguments: category -- The path to map template_list -- A template to look up (as a string), or a list of templates. """ if isinstance(template_list, str): template_list = [template_list] for template in template_list: path = os.path.normpath(category) while path is not None: for extension in ['', '.html', '.htm', '.xml', '.json']: candidate = os.path.join(path, template + extension) file_path = os.path.join(config.template_folder, candidate) if os.path.isfile(file_path): return Template(template, candidate, file_path) parent = os.path.dirname(path) if parent != path: path = parent else: path = None
Given a file path and an acceptable list of templates, return the best-matching template's path relative to the configured template directory. Arguments: category -- The path to map template_list -- A template to look up (as a string), or a list of templates.
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/rendering.py#L44-L71
PlaidWeb/Publ
publ/rendering.py
get_template
def get_template(template, relation): """ Given an entry or a category, return the path to a related template """ if isinstance(relation, Entry): path = relation.category.path elif isinstance(relation, Category): path = relation.path else: path = relation tmpl = map_template(path, template) return tmpl.filename if tmpl else None
python
def get_template(template, relation): """ Given an entry or a category, return the path to a related template """ if isinstance(relation, Entry): path = relation.category.path elif isinstance(relation, Category): path = relation.path else: path = relation tmpl = map_template(path, template) return tmpl.filename if tmpl else None
Given an entry or a category, return the path to a related template
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/rendering.py#L74-L84
PlaidWeb/Publ
publ/rendering.py
image_function
def image_function(template=None, entry=None, category=None): """ Get a function that gets an image """ path = [] if entry is not None: path += entry.search_path if category is not None: # Since the category might be different than the entry's category we add # this too path += category.search_path if template is not None: path.append(os.path.join( config.content_folder, os.path.dirname(template.filename))) return lambda filename: image.get_image(filename, path)
python
def image_function(template=None, entry=None, category=None): """ Get a function that gets an image """ path = [] if entry is not None: path += entry.search_path if category is not None: # Since the category might be different than the entry's category we add # this too path += category.search_path if template is not None: path.append(os.path.join( config.content_folder, os.path.dirname(template.filename))) return lambda filename: image.get_image(filename, path)
Get a function that gets an image
https://github.com/PlaidWeb/Publ/blob/ce7893632ddc3cb70b4978a41ffd7dd06fa13565/publ/rendering.py#L96-L112