Search is not available for this dataset
text
stringlengths
75
104k
def parse(self, scope): """ Parse Node args: scope (Scope): Scope object raises: SyntaxError returns: str """ assert (len(self.tokens) == 3) expr = self.process(self.tokens, scope) A, O, B = [ e[0] if isinstance(e, tuple) else e for e in expr if str(e).strip() ] try: a, ua = utility.analyze_number(A, 'Illegal element in expression') b, ub = utility.analyze_number(B, 'Illegal element in expression') except SyntaxError: return ' '.join([str(A), str(O), str(B)]) if (a is False or b is False): return ' '.join([str(A), str(O), str(B)]) if ua == 'color' or ub == 'color': return color.Color().process((A, O, B)) if a == 0 and O == '/': # NOTE(saschpe): The ugliest but valid CSS since sliced bread: 'font: 0/1 a;' return ''.join([str(A), str(O), str(B), ' ']) out = self.operate(a, b, O) if isinstance(out, bool): return out return self.with_units(out, ua, ub)
def with_units(self, val, ua, ub): """Return value with unit. args: val (mixed): result ua (str): 1st unit ub (str): 2nd unit raises: SyntaxError returns: str """ if not val: return str(val) if ua or ub: if ua and ub: if ua == ub: return str(val) + ua else: # Nodejs version does not seem to mind mismatched # units within expressions. So we choose the first # as they do # raise SyntaxError("Error in expression %s != %s" % (ua, ub)) return str(val) + ua elif ua: return str(val) + ua elif ub: return str(val) + ub return repr(val)
def operate(self, vala, valb, oper): """Perform operation args: vala (mixed): 1st value valb (mixed): 2nd value oper (str): operation returns: mixed """ operation = { '+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv, '=': operator.eq, '>': operator.gt, '<': operator.lt, '>=': operator.ge, '=<': operator.le, }.get(oper) if operation is None: raise SyntaxError("Unknown operation %s" % oper) ret = operation(vala, valb) if oper in '+-*/' and int(ret) == ret: ret = int(ret) return ret
def parse(self, filename=None, file=None, debuglevel=0): """ Parse file. kwargs: filename (str): File to parse debuglevel (int): Parser debuglevel """ self.scope.push() if not file: # We use a path. file = filename else: # We use a stream and try to extract the name from the stream. if hasattr(file, 'name'): if filename is not None: raise AssertionError( 'names of file and filename are in conflict') filename = file.name else: filename = '(stream)' self.target = filename if self.verbose and not self.fail_with_exc: print('Compiling target: %s' % filename, file=sys.stderr) self.result = self.parser.parse(file, lexer=self.lex, debug=debuglevel) self.post_parse() self.register.close()
def post_parse(self): """ Post parse cycle. nodejs version allows calls to mixins not yet defined or known to the parser. We defer all calls to mixins until after first cycle when all names are known. """ if self.result: out = [] for pu in self.result: try: out.append(pu.parse(self.scope)) except SyntaxError as e: self.handle_error(e, 0) self.result = list(utility.flatten(out))
def p_unit_list(self, p): """ unit_list : unit_list unit | unit """ if isinstance(p[1], list): if len(p) >= 3: if isinstance(p[2], list): p[1].extend(p[2]) else: p[1].append(p[2]) else: p[1] = [p[1]] p[0] = p[1]
def p_statement_aux(self, p): """ statement : css_charset t_ws css_string t_semicolon | css_namespace t_ws css_string t_semicolon """ p[0] = Statement(list(p)[1:], p.lineno(1)) p[0].parse(None)
def p_statement_namespace(self, p): """ statement : css_namespace t_ws word css_string t_semicolon """ p[0] = Statement(list(p)[1:], p.lineno(1)) p[0].parse(None)
def p_statement_import(self, p): """ import_statement : css_import t_ws string t_semicolon | css_import t_ws css_string t_semicolon | css_import t_ws css_string media_query_list t_semicolon | css_import t_ws fcall t_semicolon | css_import t_ws fcall media_query_list t_semicolon """ #import pdb; pdb.set_trace() if self.importlvl > 8: raise ImportError( 'Recrusive import level too deep > 8 (circular import ?)') if isinstance(p[3], string_types): ipath = utility.destring(p[3]) elif isinstance(p[3], list): p[3] = Import(p[3], p.lineno(4)).parse(self.scope) ipath = utility.destring(p[3]) elif isinstance(p[3], Call): # NOTE(saschpe): Always in the form of 'url("...");', so parse it # and retrieve the inner css_string. This whole func is messy. p[3] = p[3].parse( self.scope) # Store it as string, Statement.fmt expects it. ipath = utility.destring(p[3][4:-1]) fn, fe = os.path.splitext(ipath) if not fe or fe.lower() == '.less': try: cpath = os.path.dirname(os.path.abspath(self.target)) if not fe: ipath += '.less' filename = "%s%s%s" % (cpath, os.sep, ipath) if os.path.exists(filename): recurse = LessParser( importlvl=self.importlvl + 1, verbose=self.verbose, scope=self.scope) recurse.parse(filename=filename, debuglevel=0) p[0] = recurse.result else: err = "Cannot import '%s', file not found" % filename self.handle_error(err, p.lineno(1), 'W') p[0] = None except ImportError as e: self.handle_error(e, p) else: p[0] = Statement(list(p)[1:], p.lineno(1)) p[0].parse(None) sys.stdout.flush()
def p_block(self, p): """ block_decl : block_open declaration_list brace_close """ p[0] = Block(list(p)[1:-1], p.lineno(3)) self.scope.pop() self.scope.add_block(p[0])
def p_block_replace(self, p): """ block_decl : identifier t_semicolon """ m = p[1].parse(None) block = self.scope.blocks(m.raw()) if block: p[0] = block.copy_inner(self.scope) else: # fallback to mixin. Allow calls to mixins without parens p[0] = Deferred(p[1], None, p.lineno(2))
def p_block_open(self, p): """ block_open : identifier brace_open """ try: p[1].parse(self.scope) except SyntaxError: pass p[0] = p[1] self.scope.current = p[1]
def p_font_face_open(self, p): """ block_open : css_font_face t_ws brace_open """ p[0] = Identifier([p[1], p[2]]).parse(self.scope)
def p_mixin(self, p): """ mixin_decl : open_mixin declaration_list brace_close """ self.scope.add_mixin(Mixin(list(p)[1:], p.lineno(3)).parse(self.scope)) self.scope.pop() p[0] = None
def p_open_mixin(self, p): """ open_mixin : identifier t_popen mixin_args_list t_pclose brace_open | identifier t_popen mixin_args_list t_pclose mixin_guard brace_open """ p[1].parse(self.scope) self.scope.current = p[1] p[0] = [p[1], p[3]] if len(p) > 6: p[0].append(p[5]) else: p[0].append(None)
def p_mixin_guard_cond_list_aux(self, p): """ mixin_guard_cond_list : mixin_guard_cond_list t_comma mixin_guard_cond | mixin_guard_cond_list less_and mixin_guard_cond """ p[1].append(p[2]) p[1].append(p[3]) p[0] = p[1]
def p_call_mixin(self, p): """ call_mixin : identifier t_popen mixin_args_list t_pclose t_semicolon """ p[1].parse(None) p[0] = Deferred(p[1], p[3], p.lineno(4))
def p_declaration_list(self, p): """ declaration_list : declaration_list declaration | declaration | empty """ if len(p) > 2: p[1].extend(p[2]) p[0] = p[1]
def p_variable_decl(self, p): """ variable_decl : variable t_colon style_list t_semicolon """ p[0] = Variable(list(p)[1:-1], p.lineno(4)) p[0].parse(self.scope)
def p_property_decl(self, p): """ property_decl : prop_open style_list t_semicolon | prop_open style_list css_important t_semicolon | prop_open empty t_semicolon """ l = len(p) p[0] = Property(list(p)[1:-1], p.lineno(l - 1))
def p_identifier_list_aux(self, p): """ identifier_list : identifier_list t_comma identifier_group """ p[1].extend([p[2]]) p[1].extend(p[3]) p[0] = p[1]
def p_identifier_group_op(self, p): """ identifier_group : identifier_group child_selector ident_parts | identifier_group '+' ident_parts | identifier_group general_sibling_selector ident_parts | identifier_group '*' """ p[1].extend([p[2]]) if len(p) > 3: p[1].extend(p[3]) p[0] = p[1]
def p_ident_parts_aux(self, p): """ ident_parts : ident_parts ident_part | ident_parts filter_group """ if isinstance(p[2], list): p[1].extend(p[2]) else: p[1].append(p[2]) p[0] = p[1]
def p_ident_parts(self, p): """ ident_parts : ident_part | selector | filter_group """ if not isinstance(p[1], list): p[1] = [p[1]] p[0] = p[1]
def p_media_query_value(self, p): """ media_query_value : number | variable | word | color | expression """ if utility.is_variable(p[1]): var = self.scope.variables(''.join(p[1])) if var: value = var.value[0] if hasattr(value, 'parse'): p[1] = value.parse(self.scope) else: p[1] = value if isinstance(p[1], Expression): p[0] = p[1].parse(self.scope) else: p[0] = p[1]
def p_color(self, p): """ color : css_color | css_color t_ws """ try: p[0] = Color().fmt(p[1]) if len(p) > 2: p[0] = [p[0], p[2]] except ValueError: self.handle_error('Illegal color value `%s`' % p[1], p.lineno(1), 'W') p[0] = p[1]
def p_error(self, t): """ Internal error handler args: t (Lex token): Error token """ if t: error_msg = "E: %s line: %d, Syntax Error, token: `%s`, `%s`" % \ (self.target, t.lineno, t.type, t.value) self.register.register(error_msg) while True: t = self.lex.token() if not t or t.value == '}': if len(self.scope) > 1: self.scope.pop() break self.parser.restart() return t
def handle_error(self, e, line, t='E'): """ Custom error handler args: e (Mixed): Exception or str line (int): line number t(str): Error type """ self.register.register("%s: line: %d: %s\n" % (t, line, e))
def parse(self, scope): """Parse node args: scope (Scope): current scope raises: SyntaxError returns: parsed """ if not self.parsed: self.parsed = ''.join(self.process(self.tokens, scope)) return self.parsed
def NextPage(gh): """ Checks if a GitHub call returned multiple pages of data. :param gh: GitHub() instance :rtype: int :return: number of next page or 0 if no next page """ header = dict(gh.getheaders()) if 'Link' in header: parts = header['Link'].split(',') for part in parts: subparts = part.split(';') sub = subparts[1].split('=') if sub[0].strip() == 'rel': if sub[1] == '"next"': page = int( re.match( r'.*page=(\d+).*', subparts[0], re.IGNORECASE | re.DOTALL | re.UNICODE ).groups()[0] ) return page return 0
def fetch_github_token(self): """ Fetch GitHub token. First try to use variable provided by --token option, otherwise try to fetch it from git config and last CHANGELOG_GITHUB_TOKEN env variable. :returns: Nothing """ if not self.options.token: try: for v in GH_CFG_VARS: cmd = ['git', 'config', '--get', '{0}'.format(v)] self.options.token = subprocess.Popen( cmd, stdout=subprocess.PIPE).communicate()[0].strip() if self.options.token: break except (subprocess.CalledProcessError, WindowsError): pass if not self.options.token: self.options.token = os.environ.get(CHANGELOG_GITHUB_TOKEN) if not self.options.token: print(NO_TOKEN_PROVIDED)
def get_all_tags(self): """ Fetch all tags for repository from Github. :return: tags in repository :rtype: list """ verbose = self.options.verbose gh = self.github user = self.options.user repo = self.options.project if verbose: print("Fetching tags...") tags = [] page = 1 while page > 0: if verbose > 2: print(".", end="") rc, data = gh.repos[user][repo].tags.get( page=page, per_page=PER_PAGE_NUMBER) if rc == 200: tags.extend(data) else: self.raise_GitHubError(rc, data, gh.getheaders()) page = NextPage(gh) if verbose > 2: print(".") if len(tags) == 0: if not self.options.quiet: print("Warning: Can't find any tags in repo. Make sure, that " "you push tags to remote repo via 'git push --tags'") exit() if verbose > 1: print("Found {} tag(s)".format(len(tags))) return tags
def fetch_closed_issues_and_pr(self): """ This method fetches all closed issues and separate them to pull requests and pure issues (pull request is kind of issue in term of GitHub). :rtype: list, list :return: issues, pull-requests """ verbose = self.options.verbose gh = self.github user = self.options.user repo = self.options.project if verbose: print("Fetching closed issues and pull requests...") data = [] issues = [] data = [] page = 1 while page > 0: if verbose > 2: print(".", end="") rc, data = gh.repos[user][repo].issues.get( page=page, per_page=PER_PAGE_NUMBER, state='closed', filter='all' ) if rc == 200: issues.extend(data) else: self.raise_GitHubError(rc, data, gh.getheaders()) if len(issues) >= self.options.max_issues: break page = NextPage(gh) self.first_issue = data[-1] if len(data) > 0 else [] if verbose > 2: print(".") # separate arrays of issues and pull requests: prs = [] iss = [] for i in issues: if "pull_request" in i: prs.append(i) else: iss.append(i) if verbose > 1: print("\treceived {} issues and {} pull requests.".format( len(iss), len(prs)) ) return iss, prs
def fetch_closed_pull_requests(self): """ Fetch all pull requests. We need them to detect "merged_at" parameter :rtype: list :return: all pull requests """ pull_requests = [] verbose = self.options.verbose gh = self.github user = self.options.user repo = self.options.project if verbose: print("Fetching closed pull requests...") page = 1 while page > 0: if verbose > 2: print(".", end="") if self.options.release_branch: rc, data = gh.repos[user][repo].pulls.get( page=page, per_page=PER_PAGE_NUMBER, state='closed', base=self.options.release_branch ) else: rc, data = gh.repos[user][repo].pulls.get( page=page, per_page=PER_PAGE_NUMBER, state='closed', ) if rc == 200: pull_requests.extend(data) else: self.raise_GitHubError(rc, data, gh.getheaders()) page = NextPage(gh) if verbose > 2: print(".") if verbose > 1: print("\tfetched {} closed pull requests.".format( len(pull_requests)) ) return pull_requests
def fetch_repo_creation_date(self): """ Get the creation date of the repository from GitHub. :rtype: str, str :return: special tag name, creation date as ISO date string """ gh = self.github user = self.options.user repo = self.options.project rc, data = gh.repos[user][repo].get() if rc == 200: return REPO_CREATED_TAG_NAME, data["created_at"] else: self.raise_GitHubError(rc, data, gh.getheaders()) return None, None
def fetch_events_async(self, issues, tag_name): """ Fetch events for all issues and add them to self.events :param list issues: all issues :param str tag_name: name of the tag to fetch events for :returns: Nothing """ if not issues: return issues max_simultaneous_requests = self.options.max_simultaneous_requests verbose = self.options.verbose gh = self.github user = self.options.user repo = self.options.project self.events_cnt = 0 if verbose: print("fetching events for {} {}... ".format( len(issues), tag_name) ) def worker(issue): page = 1 issue['events'] = [] while page > 0: rc, data = gh.repos[user][repo].issues[ issue['number']].events.get( page=page, per_page=PER_PAGE_NUMBER) if rc == 200: issue['events'].extend(data) self.events_cnt += len(data) else: self.raise_GitHubError(rc, data, gh.getheaders()) page = NextPage(gh) threads = [] cnt = len(issues) for i in range(0, (cnt // max_simultaneous_requests) + 1): for j in range(max_simultaneous_requests): idx = i * max_simultaneous_requests + j if idx == cnt: break t = threading.Thread(target=worker, args=(issues[idx],)) threads.append(t) t.start() if verbose > 2: print(".", end="") if not idx % PER_PAGE_NUMBER: print("") for t in threads: t.join() if verbose > 2: print(".")
def fetch_date_of_tag(self, tag): """ Fetch time for tag from repository. :param dict tag: dictionary with tag information :rtype: str :return: time of specified tag as ISO date string """ if self.options.verbose > 1: print("\tFetching date for tag {}".format(tag["name"])) gh = self.github user = self.options.user repo = self.options.project rc, data = gh.repos[user][repo].git.commits[ tag["commit"]["sha"]].get() if rc == 200: return data["committer"]["date"] self.raise_GitHubError(rc, data, gh.getheaders())
def fetch_commit(self, event): """ Fetch commit data for specified event. :param dict event: dictionary with event information :rtype: dict :return: dictionary with commit data """ gh = self.github user = self.options.user repo = self.options.project rc, data = gh.repos[user][repo].git.commits[ event["commit_id"]].get() if rc == 200: return data self.raise_GitHubError(rc, data, gh.getheaders())
def run(self): """ The entry point of this script to generate change log 'ChangelogGeneratorError' Is thrown when one of the specified tags was not found in list of tags. """ if not self.options.project or not self.options.user: print("Project and/or user missing. " "For help run:\n pygcgen --help") return if not self.options.quiet: print("Generating changelog...") log = None try: log = self.generator.compound_changelog() except ChangelogGeneratorError as err: print("\n\033[91m\033[1m{}\x1b[0m".format(err.args[0])) exit(1) if not log: if not self.options.quiet: print("Empty changelog generated. {} not written.".format( self.options.output) ) return if self.options.no_overwrite: out = checkname(self.options.output) else: out = self.options.output with codecs.open(out, "w", "utf-8") as fh: fh.write(log) if not self.options.quiet: print("Done!") print("Generated changelog written to {}".format(out))
def parse_heading(heading): """ Parse a single heading and return a Hash The following heading structures are currently valid: - ## [v1.0.2](https://github.com/zanui/chef-thumbor/tree/v1.0.1) (2015-03-24) - ## [v1.0.2](https://github.com/zanui/chef-thumbor/tree/v1.0.1) - ## v1.0.2 (2015-03-24) - ## v1.0.2 @param [String] heading Heading from the ChangeLog File @return [Hash] Returns a structured Hash with version, url and date """ heading_structures = [ r"^## \[(?P<version>.+?)\]\((?P<url>.+?)\)( \((?P<date>.+?)\))?$", r"^## (?P<version>.+?)( \((?P<date>.+?)\))?$", ] captures = {"version": None, "url": None, "date": None} for regexp in heading_structures: matches = re.match(regexp, heading) if matches: captures.update(matches.groupdict()) break return captures
def parse(data): """ Parse the given ChangeLog data into a list of Hashes. @param [String] data File data from the ChangeLog.md @return [Array<Hash>] Parsed data, e.g. [{ 'version' => ..., 'url' => ..., 'date' => ..., 'content' => ...}, ...] """ sections = re.compile("^## .+$", re.MULTILINE).split(data) headings = re.findall("^## .+?$", data, re.MULTILINE) sections.pop(0) parsed = [] def func(h, s): p = parse_heading(h) p["content"] = s parsed.append(p) list(map(func, headings, sections)) return parsed
def _signal_handler_map(self): """ Create the signal handler map create a dictionary with signal:handler mapping based on self.signal_map :return: dict """ result = {} for signum, handler in self.signal_map.items(): result[signum] = self._get_signal_handler(handler) return result
def open(self): """ Daemonize this process Do everything that is needed to become a Unix daemon. :return: None :raise: DaemonError """ if self.is_open: return try: os.chdir(self.working_directory) if self.chroot_directory: os.chroot(self.chroot_directory) os.setgid(self.gid) os.setuid(self.uid) os.umask(self.umask) except OSError as err: raise DaemonError('Setting up Environment failed: {0}' .format(err)) if self.prevent_core: try: resource.setrlimit(resource.RLIMIT_CORE, (0, 0)) except Exception as err: raise DaemonError('Could not disable core files: {0}' .format(err)) if self.detach_process: try: if os.fork() > 0: os._exit(0) except OSError as err: raise DaemonError('First fork failed: {0}'.format(err)) os.setsid() try: if os.fork() > 0: os._exit(0) except OSError as err: raise DaemonError('Second fork failed: {0}'.format(err)) for (signal_number, handler) in self._signal_handler_map.items(): signal.signal(signal_number, handler) close_filenos(self._files_preserve) redirect_stream(sys.stdin, self.stdin) redirect_stream(sys.stdout, self.stdout) redirect_stream(sys.stderr, self.stderr) if self.pidfile: self.pidfile.acquire() self._is_open = True
def user_and_project_from_git(self, options, arg0=None, arg1=None): """ Detects user and project from git. """ user, project = self.user_project_from_option(options, arg0, arg1) if user and project: return user, project try: remote = subprocess.check_output( [ 'git', 'config', '--get', 'remote.{0}.url'.format(options.git_remote) ] ) except subprocess.CalledProcessError: return None, None except WindowsError: print("git binary not found.") exit(1) else: return self.user_project_from_remote(remote)
def user_project_from_option(options, arg0, arg1): """ Try to find user and project name from git remote output @param [String] output of git remote command @return [Array] user and project """ site = options.github_site if arg0 and not arg1: # this match should parse strings such as # "https://github.com/skywinder/Github-Changelog-Generator" # or # "skywinder/Github-Changelog-Generator" # to user and project match = re.match( "(?:.+{site}/)?(.+)/(.+)".format(site=site), arg0 ) if not match: print("Can't detect user and name from first " "parameter: '{arg0}' -> exit'".format(arg0=arg0)) exit(1) return match.groups() return None, None
def user_project_from_remote(remote): """ Try to find user and project name from git remote output @param [String] output of git remote command @return [Array] user and project """ # try to find repo in format: # origin [email protected]:skywinder/Github-Changelog-Generator.git (fetch) # [email protected]:skywinder/Github-Changelog-Generator.git regex1 = br".*(?:[:/])(?P<user>(-|\w|\.)*)/" \ br"(?P<project>(-|\w|\.)*)(\.git).*" match = re.match(regex1, remote) if match: return match.group("user"), match.group("project") # try to find repo in format: # origin https://github.com/skywinder/ChangelogMerger (fetch) # https://github.com/skywinder/ChangelogMerger regex2 = r".*/((?:-|\w|\.)*)/((?:-|\w|\.)*).*" match = re.match(regex2, remote) if match: return match.group("user"), match.group("project") return None, None
def timestring_to_datetime(timestring): """ Convert an ISO formated date and time string to a datetime object. :param str timestring: String with date and time in ISO format. :rtype: datetime :return: datetime object """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=UnicodeWarning) result = dateutil_parser(timestring) return result
def fetch_events_for_issues_and_pr(self): """ Fetch event for issues and pull requests @return [Array] array of fetched issues """ # Async fetching events: self.fetcher.fetch_events_async(self.issues, "issues") self.fetcher.fetch_events_async(self.pull_requests, "pull requests")
def fetch_tags_dates(self): """ Async fetching of all tags dates. """ if self.options.verbose: print( "Fetching dates for {} tags...".format(len(self.filtered_tags)) ) def worker(tag): self.get_time_of_tag(tag) # Async fetching tags: threads = [] max_threads = 50 cnt = len(self.filtered_tags) for i in range(0, (cnt // max_threads) + 1): for j in range(max_threads): idx = i * 50 + j if idx == cnt: break t = threading.Thread(target=worker, args=(self.filtered_tags[idx],)) threads.append(t) t.start() if self.options.verbose > 2: print(".", end="") for t in threads: t.join() if self.options.verbose > 2: print(".") if self.options.verbose > 1: print("Fetched dates for {} tags.".format( len(self.tag_times_dict)) )
def detect_actual_closed_dates(self, issues, kind): """ Find correct closed dates, if issues was closed by commits. :param list issues: issues to check :param str kind: either "issues" or "pull requests" :rtype: list :return: issues with updated closed dates """ if self.options.verbose: print("Fetching closed dates for {} {}...".format( len(issues), kind) ) all_issues = copy.deepcopy(issues) for issue in all_issues: if self.options.verbose > 2: print(".", end="") if not issues.index(issue) % 30: print("") self.find_closed_date_by_commit(issue) if not issue.get('actual_date', False): if issue.get('closed_at', False): print("Skipping closed non-merged issue: #{0} {1}".format( issue["number"], issue["title"])) all_issues.remove(issue) if self.options.verbose > 2: print(".") return all_issues
def find_closed_date_by_commit(self, issue): """ Fill "actual_date" parameter of specified issue by closed date of the commit, if it was closed by commit. :param dict issue: issue to edit """ if not issue.get('events'): return # if it's PR -> then find "merged event", in case # of usual issue -> find closed date compare_string = "merged" if 'merged_at' in issue else "closed" # reverse! - to find latest closed event. (event goes in date order) # if it were reopened and closed again. issue['events'].reverse() found_date = False for event in issue['events']: if event["event"] == compare_string: self.set_date_from_event(event, issue) found_date = True break if not found_date: # TODO: assert issues, that remain without # 'actual_date' hash for some reason. print("\nWARNING: Issue without 'actual_date':" " #{0} {1}".format(issue["number"], issue["title"]))
def set_date_from_event(self, event, issue): """ Set closed date from this issue. :param dict event: event data :param dict issue: issue data """ if not event.get('commit_id', None): issue['actual_date'] = timestring_to_datetime(issue['closed_at']) return try: commit = self.fetcher.fetch_commit(event) issue['actual_date'] = timestring_to_datetime( commit['author']['date'] ) except ValueError: print("WARNING: Can't fetch commit {0}. " "It is probably referenced from another repo.". format(event['commit_id'])) issue['actual_date'] = timestring_to_datetime(issue['closed_at'])
def encapsulate_string(raw_string): """ Encapsulate characters to make markdown look as expected. :param str raw_string: string to encapsulate :rtype: str :return: encapsulated input string """ raw_string.replace('\\', '\\\\') enc_string = re.sub("([<>*_()\[\]#])", r"\\\1", raw_string) return enc_string
def compound_changelog(self): """ Main function to start change log generation :rtype: str :return: Generated change log file """ self.fetch_and_filter_tags() tags_sorted = self.sort_tags_by_date(self.filtered_tags) self.filtered_tags = tags_sorted self.fetch_and_filter_issues_and_pr() log = str(self.options.frontmatter) \ if self.options.frontmatter else u"" log += u"{0}\n\n".format(self.options.header) if self.options.unreleased_only: log += self.generate_unreleased_section() else: log += self.generate_log_for_all_tags() try: with open(self.options.base) as fh: log += fh.read() except (TypeError, IOError): pass return log
def generate_sub_section(self, issues, prefix): """ Generate formated list of issues for changelog. :param list issues: Issues to put in sub-section. :param str prefix: Title of sub-section. :rtype: str :return: Generated ready-to-add sub-section. """ log = "" if issues: if not self.options.simple_list: log += u"{0}\n\n".format(prefix) for issue in issues: merge_string = self.get_string_for_issue(issue) log += u"- {0}\n".format(merge_string) log += "\n" return log
def generate_header(self, newer_tag_name, newer_tag_link, newer_tag_time, older_tag_link, project_url): """ Generate a header for a tag section with specific parameters. :param str newer_tag_name: Name (title) of newer tag. :param str newer_tag_link: Tag name of newer tag, used for links. Could be same as **newer_tag_name** or some specific value, like `HEAD`. :param datetime newer_tag_time: Date and time when newer tag was created. :param str older_tag_link: Tag name of older tag, used for links. :param str project_url: URL for current project. :rtype: str :return: Generated ready-to-add tag section. """ log = "" # Generate date string: # noinspection PyUnresolvedReferences time_string = newer_tag_time.strftime(self.options.date_format) # Generate tag name and link if self.options.release_url: release_url = self.options.release_url.format(newer_tag_link) else: release_url = u"{project_url}/tree/{newer_tag_link}".format( project_url=project_url, newer_tag_link=newer_tag_link) if not self.options.unreleased_with_date and \ newer_tag_name == self.options.unreleased_label: log += u"## [{newer_tag_name}]({release_url})\n\n".format( newer_tag_name=newer_tag_name, release_url=release_url) else: log += u"## [{newer_tag_name}]({release_url}) " \ u"({time_string})\n".format( newer_tag_name=newer_tag_name, release_url=release_url, time_string=time_string ) if self.options.compare_link \ and older_tag_link != REPO_CREATED_TAG_NAME: # Generate compare link log += u"[Full Changelog]" log += u"({project_url}/compare/{older_tag_link}".format( project_url=project_url, older_tag_link=older_tag_link, ) log += u"...{newer_tag_link})\n\n".format( newer_tag_link=newer_tag_link ) return log
def generate_log_between_tags(self, older_tag, newer_tag): """ Generate log between 2 specified tags. :param dict older_tag: All issues before this tag's date will be excluded. May be special value, if new tag is the first tag. (Means **older_tag** is when the repo was created.) :param dict newer_tag: All issues after this tag's date will be excluded. May be title of unreleased section. :rtype: str :return: Generated ready-to-add tag section for newer tag. """ filtered_issues, filtered_pull_requests = \ self.filter_issues_for_tags(newer_tag, older_tag) older_tag_name = older_tag["name"] if older_tag \ else self.detect_since_tag() if not filtered_issues and not filtered_pull_requests: # do not generate an unreleased section if it would be empty return "" return self.generate_log_for_tag( filtered_pull_requests, filtered_issues, newer_tag, older_tag_name)
def filter_issues_for_tags(self, newer_tag, older_tag): """ Apply all filters to issues and pull requests. :param dict older_tag: All issues before this tag's date will be excluded. May be special value, if new tag is the first tag. (Means **older_tag** is when the repo was created.) :param dict newer_tag: All issues after this tag's date will be excluded. May be title of unreleased section. :rtype: list(dict), list(dict) :return: Filtered issues and pull requests. """ filtered_pull_requests = self.delete_by_time(self.pull_requests, older_tag, newer_tag) filtered_issues = self.delete_by_time(self.issues, older_tag, newer_tag) newer_tag_name = newer_tag["name"] if newer_tag else None if self.options.filter_issues_by_milestone: # delete excess irrelevant issues (according milestones).Issue #22. filtered_issues = self.filter_by_milestone( filtered_issues, newer_tag_name, self.issues ) filtered_pull_requests = self.filter_by_milestone( filtered_pull_requests, newer_tag_name, self.pull_requests ) return filtered_issues, filtered_pull_requests
def generate_log_for_all_tags(self): """ The full cycle of generation for whole project. :rtype: str :return: The complete change log for released tags. """ if self.options.verbose: print("Generating log...") self.issues2 = copy.deepcopy(self.issues) log1 = "" if self.options.with_unreleased: log1 = self.generate_unreleased_section() log = "" for index in range(len(self.filtered_tags) - 1): log += self.do_generate_log_for_all_tags_part1(log, index) if self.options.tag_separator and log1: log = log1 + self.options.tag_separator + log else: log = log1 + log if len(self.filtered_tags) != 0: log += self.do_generate_log_for_all_tags_part2(log) return log
def generate_unreleased_section(self): """ Generate log for unreleased closed issues. :rtype: str :return: Generated ready-to-add unreleased section. """ if not self.filtered_tags: return "" now = datetime.datetime.utcnow() now = now.replace(tzinfo=dateutil.tz.tzutc()) head_tag = {"name": self.options.unreleased_label} self.tag_times_dict[head_tag["name"]] = now unreleased_log = self.generate_log_between_tags( self.filtered_tags[0], head_tag) return unreleased_log
def get_string_for_issue(self, issue): """ Parse issue and generate single line formatted issue line. Example output: - Add coveralls integration [\#223](https://github.com/skywinder/github-changelog-generator/pull/223) ([skywinder](https://github.com/skywinder)) - Add coveralls integration [\#223](https://github.com/skywinder/github-changelog-generator/pull/223) (@skywinder) :param dict issue: Fetched issue from GitHub. :rtype: str :return: Markdown-formatted single issue. """ encapsulated_title = self.encapsulate_string(issue['title']) try: title_with_number = u"{0} [\\#{1}]({2})".format( encapsulated_title, issue["number"], issue["html_url"] ) except UnicodeEncodeError: # TODO: why did i add this? Is it needed? title_with_number = "ERROR ERROR ERROR: #{0} {1}".format( issue["number"], issue['title'] ) print(title_with_number, '\n', issue["html_url"]) return self.issue_line_with_user(title_with_number, issue)
def issue_line_with_user(self, line, issue): """ If option author is enabled, a link to the profile of the author of the pull reqest will be added to the issue line. :param str line: String containing a markdown-formatted single issue. :param dict issue: Fetched issue from GitHub. :rtype: str :return: Issue line with added author link. """ if not issue.get("pull_request") or not self.options.author: return line if not issue.get("user"): line += u" (Null user)" elif self.options.username_as_tag: line += u" (@{0})".format( issue["user"]["login"] ) else: line += u" ([{0}]({1}))".format( issue["user"]["login"], issue["user"]["html_url"] ) return line
def generate_log_for_tag(self, pull_requests, issues, newer_tag, older_tag_name): """ Generates log for tag section with header and body. :param list(dict) pull_requests: List of PR's in this tag section. :param list(dict) issues: List of issues in this tag section. :param dict newer_tag: Github data of tag for this section. :param str older_tag_name: Older tag, used for the links. May be special value, if **newer tag** is the first tag. (Means **older_tag** is when the repo was created.) :rtype: str :return: Ready-to-add and parsed tag section. """ newer_tag_link, newer_tag_name, \ newer_tag_time = self.detect_link_tag_time(newer_tag) github_site = "https://github.com" or self.options.github_endpoint project_url = "{0}/{1}/{2}".format( github_site, self.options.user, self.options.project) log = self.generate_header(newer_tag_name, newer_tag_link, newer_tag_time, older_tag_name, project_url) if self.options.issues: # Generate issues: log += self.issues_to_log(issues, pull_requests) if self.options.include_pull_request: # Generate pull requests: log += self.generate_sub_section( pull_requests, self.options.merge_prefix ) return log
def issues_to_log(self, issues, pull_requests): """ Generate ready-to-paste log from list of issues and pull requests. :param list(dict) issues: List of issues in this tag section. :param list(dict) pull_requests: List of PR's in this tag section. :rtype: str :return: Generated log for issues and pull requests. """ log = "" sections_a, issues_a = self.parse_by_sections( issues, pull_requests) for section, s_issues in sections_a.items(): log += self.generate_sub_section(s_issues, section) log += self.generate_sub_section(issues_a, self.options.issue_prefix) return log
def parse_by_sections(self, issues, pull_requests): """ This method sort issues by types (bugs, features, etc. or just closed issues) by labels. :param list(dict) issues: List of issues in this tag section. :param list(dict) pull_requests: List of PR's in this tag section. :rtype: dict(list(dict)), list(dict) :return: Issues and PR's sorted into sections. """ issues_a = [] sections_a = OrderedDict() if not self.options.sections: return [sections_a, issues] for key in self.options.sections: sections_a.update({key: []}) self.parse_by_sections_for_issues(issues, sections_a, issues_a) self.parse_by_sections_for_pr(pull_requests, sections_a) return [sections_a, issues_a]
def exclude_issues_by_labels(self, issues): """ Delete all issues with labels from exclude-labels option. :param list(dict) issues: All issues for tag. :rtype: list(dict) :return: Filtered issues. """ if not self.options.exclude_labels: return copy.deepcopy(issues) remove_issues = set() exclude_labels = self.options.exclude_labels include_issues = [] for issue in issues: for label in issue["labels"]: if label["name"] in exclude_labels: remove_issues.add(issue["number"]) break for issue in issues: if issue["number"] not in remove_issues: include_issues.append(issue) return include_issues
def filter_by_milestone(self, filtered_issues, tag_name, all_issues): """ :param list(dict) filtered_issues: Filtered issues. :param str tag_name: Name (title) of tag. :param list(dict) all_issues: All issues. :rtype: list(dict) :return: Filtered issues according milestone. """ filtered_issues = self.remove_issues_in_milestones(filtered_issues) if tag_name: # add missed issues (according milestones) issues_to_add = self.find_issues_to_add(all_issues, tag_name) filtered_issues.extend(issues_to_add) return filtered_issues
def find_issues_to_add(all_issues, tag_name): """ Add all issues, that should be in that tag, according to milestone. :param list(dict) all_issues: All issues. :param str tag_name: Name (title) of tag. :rtype: List[dict] :return: Issues filtered by milestone. """ filtered = [] for issue in all_issues: if issue.get("milestone"): if issue["milestone"]["title"] == tag_name: iss = copy.deepcopy(issue) filtered.append(iss) return filtered
def remove_issues_in_milestones(self, filtered_issues): """ :param list(dict) filtered_issues: Filtered issues. :rtype: list(dict) :return: List with removed issues, that contain milestones with same name as a tag. """ for issue in filtered_issues: # leave issues without milestones if issue["milestone"]: # check, that this milestone is in tag list: for tag in self.filtered_tags: if tag["name"] == issue["milestone"]["title"]: filtered_issues.remove(issue) return filtered_issues
def delete_by_time(self, issues, older_tag, newer_tag): """ Filter issues that belong to specified tag range. :param list(dict) issues: Issues to filter. :param dict older_tag: All issues before this tag's date will be excluded. May be special value, if **newer_tag** is the first tag. (Means **older_tag** is when the repo was created.) :param dict newer_tag: All issues after this tag's date will be excluded. May be title of unreleased section. :rtype: list(dict) :return: Filtered issues. """ if not older_tag and not newer_tag: # in case if no tags are specified - return unchanged array return copy.deepcopy(issues) newer_tag_time = self.get_time_of_tag(newer_tag) older_tag_time = self.get_time_of_tag(older_tag) filtered = [] for issue in issues: if issue.get('actual_date'): rslt = older_tag_time < issue['actual_date'] <= newer_tag_time if rslt: filtered.append(copy.deepcopy(issue)) return filtered
def include_issues_by_labels(self, all_issues): """ Include issues with labels, specified in self.options.include_labels. :param list(dict) all_issues: All issues. :rtype: list(dict) :return: Filtered issues. """ included_by_labels = self.filter_by_include_labels(all_issues) wo_labels = self.filter_wo_labels(all_issues) il = set([f["number"] for f in included_by_labels]) wl = set([w["number"] for w in wo_labels]) filtered_issues = [] for issue in all_issues: if issue["number"] in il or issue["number"] in wl: filtered_issues.append(issue) return filtered_issues
def filter_wo_labels(self, all_issues): """ Filter all issues that don't have a label. :rtype: list(dict) :return: Issues without labels. """ issues_wo_labels = [] if not self.options.add_issues_wo_labels: for issue in all_issues: if not issue['labels']: issues_wo_labels.append(issue) return issues_wo_labels
def filter_by_include_labels(self, issues): """ Filter issues to include only issues with labels specified in include_labels. :param list(dict) issues: Pre-filtered issues. :rtype: list(dict) :return: Filtered issues. """ if not self.options.include_labels: return copy.deepcopy(issues) filtered_issues = [] include_labels = set(self.options.include_labels) for issue in issues: labels = [label["name"] for label in issue["labels"]] if include_labels.intersection(labels): filtered_issues.append(issue) return filtered_issues
def filter_by_labels(self, all_issues, kind): """ Filter issues for include/exclude labels. :param list(dict) all_issues: All issues. :param str kind: Either "issues" or "pull requests". :rtype: list(dict) :return: Filtered issues. """ filtered_issues = self.include_issues_by_labels(all_issues) filtered = self.exclude_issues_by_labels(filtered_issues) if self.options.verbose > 1: print("\tremaining {}: {}".format(kind, len(filtered))) return filtered
def get_filtered_pull_requests(self, pull_requests): """ This method fetches missing params for PR and filter them by specified options. It include add all PR's with labels from options.include_labels and exclude all from options.exclude_labels. :param list(dict) pull_requests: All pull requests. :rtype: list(dict) :return: Filtered pull requests. """ pull_requests = self.filter_by_labels(pull_requests, "pull requests") pull_requests = self.filter_merged_pull_requests(pull_requests) if self.options.verbose > 1: print("\tremaining pull requests: {}".format(len(pull_requests))) return pull_requests
def filter_merged_pull_requests(self, pull_requests): """ This method filter only merged PR and fetch missing required attributes for pull requests. Using merged date is more correct than closed date. :param list(dict) pull_requests: Pre-filtered pull requests. :rtype: list(dict) :return: """ if self.options.verbose: print("Fetching merge date for pull requests...") closed_pull_requests = self.fetcher.fetch_closed_pull_requests() if not pull_requests: return [] pulls = copy.deepcopy(pull_requests) for pr in pulls: fetched_pr = None for fpr in closed_pull_requests: if fpr['number'] == pr['number']: fetched_pr = fpr if fetched_pr: pr['merged_at'] = fetched_pr['merged_at'] closed_pull_requests.remove(fetched_pr) for pr in pulls: if not pr.get('merged_at'): pulls.remove(pr) return pulls
def fetch_and_filter_tags(self): """ Fetch and filter tags, fetch dates and sort them in time order. """ self.all_tags = self.fetcher.get_all_tags() self.filtered_tags = self.get_filtered_tags(self.all_tags) self.fetch_tags_dates()
def sort_tags_by_date(self, tags): """ Sort all tags by date. :param list(dict) tags: All tags. :rtype: list(dict) :return: Sorted list of tags. """ if self.options.verbose: print("Sorting tags...") tags.sort(key=lambda x: self.get_time_of_tag(x)) tags.reverse() return tags
def get_time_of_tag(self, tag): """ Get date and time for tag, fetching it if not already cached. :param dict tag: Tag to get the datetime for. :rtype: datetime :return: datetime for specified tag. """ if not tag: raise ChangelogGeneratorError("tag is nil") name_of_tag = tag["name"] time_for_name = self.tag_times_dict.get(name_of_tag, None) if time_for_name: return time_for_name else: time_string = self.fetcher.fetch_date_of_tag(tag) try: self.tag_times_dict[name_of_tag] = \ timestring_to_datetime(time_string) except UnicodeWarning: print("ERROR ERROR:", tag) self.tag_times_dict[name_of_tag] = \ timestring_to_datetime(time_string) return self.tag_times_dict[name_of_tag]
def detect_link_tag_time(self, tag): """ Detect link, name and time for specified tag. :param dict tag: Tag data. :rtype: str, str, datetime :return: Link, name and time of the tag. """ # if tag is nil - set current time newer_tag_time = self.get_time_of_tag(tag) if tag \ else datetime.datetime.now() # if it's future release tag - set this value if tag["name"] == self.options.unreleased_label \ and self.options.future_release: newer_tag_name = self.options.future_release newer_tag_link = self.options.future_release elif tag["name"] is not self.options.unreleased_label : # put unreleased label if there is no name for the tag newer_tag_name = tag["name"] newer_tag_link = newer_tag_name else: newer_tag_name = self.options.unreleased_label newer_tag_link = "HEAD" return [newer_tag_link, newer_tag_name, newer_tag_time]
def version_of_first_item(self): """ Try to detect the newest tag from self.options.base, otherwise return a special value indicating the creation of the repo. :rtype: str :return: Tag name to use as 'oldest' tag. May be special value, indicating the creation of the repo. """ try: sections = read_changelog(self.options) return sections[0]["version"] except(IOError, TypeError): return self.get_temp_tag_for_repo_creation()
def get_temp_tag_for_repo_creation(self): """ If not already cached, fetch the creation date of the repo, cache it and return the special value indicating the creation of the repo. :rtype: str :return: value indicating the creation """ tag_date = self.tag_times_dict.get(REPO_CREATED_TAG_NAME, None) if not tag_date: tag_name, tag_date = self.fetcher.fetch_repo_creation_date() self.tag_times_dict[tag_name] = timestring_to_datetime(tag_date) return REPO_CREATED_TAG_NAME
def get_filtered_tags(self, all_tags): """ Return tags after filtering tags in lists provided by option: --between-tags & --exclude-tags :param list(dict) all_tags: All tags. :rtype: list(dict) :return: Filtered tags. """ filtered_tags = self.filter_since_tag(all_tags) if self.options.between_tags: filtered_tags = self.filter_between_tags(filtered_tags) if self.options.due_tag: filtered_tags = self.filter_due_tag(filtered_tags) return self.filter_excluded_tags(filtered_tags)
def filter_since_tag(self, all_tags): """ Filter tags according since_tag option. :param list(dict) all_tags: All tags. :rtype: list(dict) :return: Filtered tags. """ tag = self.detect_since_tag() if not tag or tag == REPO_CREATED_TAG_NAME: return copy.deepcopy(all_tags) filtered_tags = [] tag_names = [t["name"] for t in all_tags] try: idx = tag_names.index(tag) except ValueError: self.warn_if_tag_not_found(tag, "since-tag") return copy.deepcopy(all_tags) since_tag = all_tags[idx] since_date = self.get_time_of_tag(since_tag) for t in all_tags: tag_date = self.get_time_of_tag(t) if since_date <= tag_date: filtered_tags.append(t) return filtered_tags
def filter_due_tag(self, all_tags): """ Filter tags according due_tag option. :param list(dict) all_tags: Pre-filtered tags. :rtype: list(dict) :return: Filtered tags. """ filtered_tags = [] tag = self.options.due_tag tag_names = [t["name"] for t in all_tags] try: idx = tag_names.index(tag) except ValueError: self.warn_if_tag_not_found(tag, "due-tag") return copy.deepcopy(all_tags) due_tag = all_tags[idx] due_date = self.get_time_of_tag(due_tag) for t in all_tags: tag_date = self.get_time_of_tag(t) if tag_date <= due_date: filtered_tags.append(t) return filtered_tags
def filter_between_tags(self, all_tags): """ Filter tags according between_tags option. :param list(dict) all_tags: Pre-filtered tags. :rtype: list(dict) :return: Filtered tags. """ tag_names = [t["name"] for t in all_tags] between_tags = [] for tag in self.options.between_tags: try: idx = tag_names.index(tag) except ValueError: raise ChangelogGeneratorError( "ERROR: can't find tag {0}, specified with " "--between-tags option.".format(tag)) between_tags.append(all_tags[idx]) between_tags = self.sort_tags_by_date(between_tags) if len(between_tags) == 1: # if option --between-tags was only 1 tag given, duplicate it # to generate the changelog only for that one tag. between_tags.append(between_tags[0]) older = self.get_time_of_tag(between_tags[1]) newer = self.get_time_of_tag(between_tags[0]) for tag in all_tags: if older < self.get_time_of_tag(tag) < newer: between_tags.append(tag) if older == newer: between_tags.pop(0) return between_tags
def filter_excluded_tags(self, all_tags): """ Filter tags according exclude_tags and exclude_tags_regex option. :param list(dict) all_tags: Pre-filtered tags. :rtype: list(dict) :return: Filtered tags. """ filtered_tags = copy.deepcopy(all_tags) if self.options.exclude_tags: filtered_tags = self.apply_exclude_tags(filtered_tags) if self.options.exclude_tags_regex: filtered_tags = self.apply_exclude_tags_regex(filtered_tags) return filtered_tags
def apply_exclude_tags_regex(self, all_tags): """ Filter tags according exclude_tags_regex option. :param list(dict) all_tags: Pre-filtered tags. :rtype: list(dict) :return: Filtered tags. """ filtered = [] for tag in all_tags: if not re.match(self.options.exclude_tags_regex, tag["name"]): filtered.append(tag) if len(all_tags) == len(filtered): self.warn_if_nonmatching_regex() return filtered
def apply_exclude_tags(self, all_tags): """ Filter tags according exclude_tags option. :param list(dict) all_tags: Pre-filtered tags. :rtype: list(dict) :return: Filtered tags. """ filtered = copy.deepcopy(all_tags) for tag in all_tags: if tag["name"] not in self.options.exclude_tags: self.warn_if_tag_not_found(tag, "exclude-tags") else: filtered.remove(tag) return filtered
def random_chars(n): """ Generate a random string from a-zA-Z0-9 :param n: length of the string :return: the random string """ return ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(n))
def random_letters(n): """ Generate a random string from a-zA-Z :param n: length of the string :return: the random string """ return ''.join(random.SystemRandom().choice(string.ascii_letters) for _ in range(n))
def random_numbers(n): """ Generate a random string from 0-9 :param n: length of the string :return: the random string """ return ''.join(random.SystemRandom().choice(string.digits) for _ in range(n))
def dict_search(d, k, v): """ Search dictionary list by key and value :param d: dictionary list :param k: key :param v: value :return: the index of the first dictionary in the array with the specific key / value """ for i in range(len(d)): if d[i][k] == v: return i return None
def dict_merge(a, b, k): """ Merge two dictionary lists :param a: original list :param b: alternative list, element will replace the one in original list with same key :param k: key :return: the merged list """ c = a.copy() for j in range(len(b)): flag = False for i in range(len(c)): if c[i][k] == b[j][k]: c[i] = b[j].copy() flag = True if not flag: c.append(b[j].copy()) return c
def dict_sort(d, k): """ Sort a dictionary list by key :param d: dictionary list :param k: key :return: sorted dictionary list """ return sorted(d.copy(), key=lambda i: i[k])
def dict_top(d, k, n, reverse=False): """ Return top n of a dictionary list sorted by key :param d: dictionary list :param k: key :param n: top n :param reverse: whether the value should be reversed :return: top n of the sorted dictionary list """ h = list() for i in range(len(d)): heappush(h, (-d[i][k] if reverse else d[i][k], i)) r = list() while len(r) < n and len(h) > 0: _, i = heappop(h) r.append(d[i].copy()) return r
def dict_flatten(d): """ Replace nested dict keys to underscore-connected keys :param d: the dictionary :return: flattened dictionary """ if type(d) != dict: return d else: dd = dict() for key, value in d.items(): if type(value) == dict: for k, v in value.items(): dd[key + '_' + k] = dict_flatten(v) else: dd[key] = value return dd
def dict_format_type(d, source, formatter, include_list=True): """ Replace the values of a dict with certain type to other values :param d: the dictionary :param source: the source type, e.g., int :param formatter: the formatter method, e.g., return the string format of an int :param include_list: whether list should be formatted, otherwise list will be considered as source type :return: formatted dictionary """ if not isinstance(d, dict): if isinstance(d, source): return formatter(d) else: return d else: dd = dict() for key, value in d.items(): if include_list and isinstance(value, list): dd[key] = [dict_format_type(i, source, formatter) for i in value] elif isinstance(value, dict): dd[key] = dict_format_type(value, source, formatter) elif isinstance(value, source): dd[key] = formatter(value) else: dd[key] = value return dd
def dict_remove_key(d, k): """ Recursively remove a key from a dict :param d: the dictionary :param k: key which should be removed :return: formatted dictionary """ dd = dict() for key, value in d.items(): if not key == k: if isinstance(value, dict): dd[key] = dict_remove_key(value, k) elif isinstance(value, list): dd[key] = [dict_remove_key(i, k) for i in value] else: dd[key] = value return dd
def dict_remove_value(d, v): """ Recursively remove keys with a certain value from a dict :param d: the dictionary :param v: value which should be removed :return: formatted dictionary """ dd = dict() for key, value in d.items(): if not value == v: if isinstance(value, dict): dd[key] = dict_remove_value(value, v) elif isinstance(value, list): dd[key] = [dict_remove_value(i, v) for i in value] else: dd[key] = value return dd