message
stringlengths
13
484
diff
stringlengths
38
4.63k
Change error msg for roll to use correct prefix Previously used `!` as the prefix, while `.` is the correct one Now imports prefix from bot.constants, so it'll always be up to date
@@ -10,7 +10,7 @@ from discord.ext import commands from discord.ext.commands import BadArgument, Bot, Cog, Context, MessageConverter, clean_content from bot import utils -from bot.constants import Colours, Emojis +from bot.constants import Client, Colours, Emojis log = logging.getLogger(__name__) @@ -70,7 +70,7 @@ class Fun(Cog): dice = " ".join(self._get_random_die() for _ in range(num_rolls)) await ctx.send(dice) else: - raise BadArgument("`!roll` only supports between 1 and 6 rolls.") + raise BadArgument(f"`{Client.prefix}roll` only supports between 1 and 6 rolls.") @commands.command(name="uwu", aliases=("uwuwize", "uwuify",)) async def uwu_command(self, ctx: Context, *, text: clean_content(fix_channel_mentions=True)) -> None:
Inherit ReAgent optimizer from PyTorch optimizer Summary: Pull Request resolved: See title
@@ -53,7 +53,7 @@ from .utils import is_torch_optimizer @dataclass -class Optimizer: +class Optimizer(torch.optim.Optimizer): # This is the wrapper for optimizer + scheduler optimizer: torch.optim.Optimizer lr_schedulers: List[torch.optim.lr_scheduler._LRScheduler]
mitmdump: also set dumper_filter to default filter on startup Fixes
@@ -152,6 +152,7 @@ def mitmdump(args=None): # pragma: no cover return dict( save_stream_filter=v, readfile_filter=v, + dumper_filter=v, ) return {}
Update cubes.py, changed axes for specsum Lines 311, 314 and 314: axis=(1,2) -> axis=1
@@ -308,13 +308,13 @@ def extract_aperture(cube, ap, r_mask=False, wcs=None, npixinmask = mask.sum() if method == 'mean': - specsum = nansum(cube[:, mask], axis=(1,2)) + specsum = nansum(cube[:, mask], axis=1) spec = specsum / npixinmask elif method == 'error': - specsum = nansum(cube[:, mask]**2, axis=(1,2)) + specsum = nansum(cube[:, mask]**2, axis=1) spec = (specsum)**0.5 / npixinmask else: - specsum = nansum(cube[:, mask], axis=(1,2)) + specsum = nansum(cube[:, mask], axis=1) if r_mask: return spec,mask
Fixed incorrect file mode as pointed in github issue Changed file mode to 'a' instead of 'w'
@@ -107,9 +107,9 @@ created is itself a group, in this case the `root group`, named ``/``: >>> f.name u'/' -Creating a subgroup is accomplished via the aptly-named ``create_group``. But we need to open the file in read/write mode first :: +Creating a subgroup is accomplished via the aptly-named ``create_group``. But we need to open the file in the "append" mode first (Read/write if exists, create otherwise) :: - >>> f = h5py.File('mydataset.hdf5', 'r+') + >>> f = h5py.File('mydataset.hdf5', 'a') >>> grp = f.create_group("subgroup") All ``Group`` objects also have the ``create_*`` methods like File::
Changed depot url template Blizzard changed the depot url format. Updated with new format
@@ -20,7 +20,7 @@ class DepotFile(object): """ #: The url template for all DepotFiles - url_template = "http://{0}.depot.battle.net:1119/{1}.{2}" + url_template = "https://{0}-s2-depot.classic.blizzard.com/{1}.{2}"" def __init__(self, bytes): #: The server the file is hosted on
Incidents: make crawl limit & sleep module-level constants Requested during review.
@@ -11,6 +11,14 @@ from bot.constants import Channels, Emojis, Roles, Webhooks log = logging.getLogger(__name__) +# Amount of messages for `crawl_task` to process at most on start-up - limited to 50 +# as in practice, there should never be this many messages, and if there are, +# something has likely gone very wrong +CRAWL_LIMIT = 50 + +# Seconds for `crawl_task` to sleep after adding reactions to a message +CRAWL_SLEEP = 2 + class Signal(Enum): """ @@ -114,19 +122,14 @@ class Incidents(Cog): Once this task is scheduled, listeners that change messages should await it. The crawl assumes that the channel history doesn't change as we go over it. + + Behaviour is configured by: `CRAWL_LIMIT`, `CRAWL_SLEEP`. """ await self.bot.wait_until_guild_available() incidents: discord.TextChannel = self.bot.get_channel(Channels.incidents) - # Limit the query at 50 as in practice, there should never be this many messages, - # and if there are, something has likely gone very wrong - limit = 50 - - # Seconds to sleep after adding reactions to a message - sleep = 2 - - log.debug(f"Crawling messages in #incidents: {limit=}, {sleep=}") - async for message in incidents.history(limit=limit): + log.debug(f"Crawling messages in #incidents: {CRAWL_LIMIT=}, {CRAWL_SLEEP=}") + async for message in incidents.history(limit=CRAWL_LIMIT): if not is_incident(message): log.trace(f"Skipping message {message.id}: not an incident") @@ -137,7 +140,7 @@ class Incidents(Cog): continue await add_signals(message) - await asyncio.sleep(sleep) + await asyncio.sleep(CRAWL_SLEEP) log.debug("Crawl task finished!")
MacOS is called "Macos" in settings not "Macosx".
@@ -51,7 +51,7 @@ class NinjaConan(ConanFile): def package_info(self): # ensure ninja is executable - if str(self.settings.os_build) in ["Linux", "Macosx"]: + if str(self.settings.os_build) in ["Linux", "Macos"]: name = os.path.join(self.package_folder, "bin", "ninja") os.chmod(name, os.stat(name).st_mode | 0o111) self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
Change Supervisor._processes to a dict so access is more natural This is just an implementation change and this should not lead to any behavior change from what I've seen.
@@ -71,7 +71,7 @@ class Supervisor(NamedMixin): self._args = arguments if arguments is not None else () self._background = background - self._processes = [] + self._processes = {} self._terminating = False super(Supervisor, self).__init__() @@ -102,7 +102,15 @@ class Supervisor(NamedMixin): args=self._args ) child.start() - self._processes.append(child) + + # One might wonder if `child.pid` is guaranteed to be set at this + # point. I tried it experimentally, and read quickly the source + # at https://github.com/python/cpython/blob/2.7/Lib/multiprocessing/process.py + # which shows that `pid` ultimately translates to `os.getpid()` after the + # fork. So no big risk, but I add an assertion just in case anyway. + pid = child.pid + assert pid, "Cannot add process with pid={}: {}".format(pid, child) + self._processes[pid] = child def target(self): """ @@ -114,7 +122,7 @@ class Supervisor(NamedMixin): # protection against double use of ".start()" if len(self._processes) != 0: - raise Exception("Child processes list is not empty, already called .start() ?") + raise Exception("Child processes map is not empty, already called .start() ?") # start worker processes self._start_worker_processes() @@ -124,7 +132,7 @@ class Supervisor(NamedMixin): # if terminating, join all processes and exit the loop so we finish # the supervisor process if self._terminating: - for proc in self._processes: + for proc in self._processes.values(): proc.join() break @@ -169,12 +177,12 @@ class Supervisor(NamedMixin): logger.debug(" child: name=%s pid=%d status=%s" % (name, child.pid, status)) if status in (psutil.STATUS_ZOMBIE, "unknown"): logger.debug(" process {} is zombie, will cleanup".format(child.pid)) - to_clean = [p for p in self._processes if p.pid == child.pid] - for process in to_clean: + to_clean = self._processes.get(child.pid) + if to_clean: # join process to clean it up - process.join() + to_clean.join() # remove the process from self._processes so it will be replaced later - self._processes.remove(process) + del self._processes[child.pid] # compensate lost children here self._start_worker_processes() @@ -202,9 +210,9 @@ class Supervisor(NamedMixin): """ Sends a stop (SIGTERM) signal to all worker processes. """ - for child in self._processes: - logger.info("process: sending SIGTERM to pid={}".format(child.pid)) - os.kill(child.pid, signal.SIGTERM) + for pid in self._processes.keys(): + logger.info("process: sending SIGTERM to pid={}".format(pid)) + os.kill(pid, signal.SIGTERM) def payload_friendly_name(self): payload = self._payload
TypeRepo.root_node: assert result is not None and refer to CompileCtx TN:
@@ -2831,7 +2831,9 @@ class TypeRepo(object): Shortcut to get the root AST node. :rtype: ASTNodeType """ - return StructMetaclass.root_grammar_class + result = get_context().root_grammar_class + assert result + return result @property def defer_root_node(self):
Avoid trying to deactivate the restricted session. Calling logout when the session is already dead results in calling the deactivate method on the gate middleware, which does not exist.
@@ -99,6 +99,8 @@ class WorkerGate(): if resp.object['type'] == 'http': self.q_http_replies.broadcast(resp) if resp.object['type'] == 'terminate': + if self.session != self.gateway_middleware: + # Not the restricted session, we can disable it self.session.deactivate() if resp.object['type'] == 'restart-master': aj.restart()
lightbox: Fix alignment of x button in image view menu. Fixes
color: hsla(0, 0%, 100%, 0.8); font-size: 2rem; - margin: 24px 20px 0 0; + margin: 11px 20px 0 0; transform: scaleY(0.75); font-weight: 300;
Update Loops.md Added a missing semicolon at the end of the tutorial array. Added a working solution.
@@ -50,7 +50,7 @@ In this exercise, you will need to loop through and print out all even numbers f Tutorial Code ------------- - @NUMBERS = (951,402,984,651,360,69,408,319,601,485,980,507,725,547,544,615,83,165,141,501,263,617,865,575,219,390,237,412,566,826,248,866,950,626,949,687,217,815,67,104,58,512,24,892,894,767,553,81,379,843,831,445,742,717,958,609,842,451,688,753,854,685,93,857,440,380,126,721,328,753,470,743,527) + @NUMBERS = (951,402,984,651,360,69,408,319,601,485,980,507,725,547,544,615,83,165,141,501,263,617,865,575,219,390,237,412,566,826,248,866,950,626,949,687,217,815,67,104,58,512,24,892,894,767,553,81,379,843,831,445,742,717,958,609,842,451,688,753,854,685,93,857,440,380,126,721,328,753,470,743,527); # write your code below @@ -66,3 +66,10 @@ Expected Output Solution -------- + + @NUMBERS = (951,402,984,651,360,69,408,319,601,485,980,507,725,547,544,615,83,165,141,501,263,617,865,575,219,390,237,412,566,826,248,866,950,626,949,687,217,815,67,104,58,512,24,892,894,767,553,81,379,843,831,445,742,717,958,609,842,451,688,753,854,685,93,857,440,380,126,721,328,753,470,743,527); + + foreach (@NUMBERS) { + print $_ . "\n" if ($_ % 2 == 0); + exit if ($_ == 237); + }
renamed measure() to expectation() fixed typo in call to backend.get_probabilities
@@ -151,7 +151,7 @@ class ProjectQDevice(Device): def execute(self): """ """ - #todo: I hope this function will become superflous, see https://github.com/XanaduAI/openqml/issues/18 + #todo: I hope this function will become superfluous, see https://github.com/XanaduAI/openqml/issues/18 self._out = self.execute_queued() def execute_queued(self): @@ -165,7 +165,7 @@ class ProjectQDevice(Device): #expectation_values[tuple(operation.wires)] = self.apply(operator_map[operation.name](*p), self.reg, operation.wires) self.apply(operation.name, operation.wires, *par) - result = self.measure(self._observe.name, self._observe.wires) + result = self.expectation(self._observe.name, self._observe.wires) self._deallocate() return result @@ -185,8 +185,8 @@ class ProjectQDevice(Device): else: gate | tuple([self.reg[i] for i in wires]) - def measure(self, observable, wires): - raise NotImplementedError("measure() is not yet implemented for this backend") + def expectation(self, observable, wires): + raise NotImplementedError("expectation() is not yet implemented for this backend") def shutdown(self): """Shutdown. @@ -268,7 +268,7 @@ class ProjectQSimulator(ProjectQDevice): super().reset() - def measure(self, observable, wires): + def expectation(self, observable, wires): self.eng.flush(deallocate_qubits=False) if observable == 'PauliX' or observable == 'PauliY' or observable == 'PauliZ': expectation_value = self.eng.backend.get_expectation_value(pq.ops.QubitOperator(str(observable)[-1]+'0'), self.reg) @@ -339,10 +339,10 @@ class ProjectQIBMBackend(ProjectQDevice): raise ValueError('An IBM Quantum Experience password specified via the "password" keyword argument is required') kwargs['backend'] = 'IBMBackend' - kwargs['verbose'] = True #todo: remove when done testing - kwargs['log'] = True #todo: remove when done testing - kwargs['use_hardware'] = True #todo: remove when done testing - kwargs['num_runs'] = 3 #todo: remove when done testing + #kwargs['verbose'] = True #todo: remove when done testing + #kwargs['log'] = True #todo: remove when done testing + #kwargs['use_hardware'] = False #todo: remove when done testing + #kwargs['num_runs'] = 3 #todo: remove when done testing super().__init__(wires, **kwargs) def reset(self): @@ -355,14 +355,14 @@ class ProjectQIBMBackend(ProjectQDevice): self.eng = pq.MainEngine(backend, engine_list=pq.setups.ibm.get_engine_list()) super().reset() - def measure(self, observable, wires): + def expectation(self, observable, wires): pq.ops.R(0) | self.reg[0]# todo:remove this once https://github.com/ProjectQ-Framework/ProjectQ/issues/259 is resolved pq.ops.All(pq.ops.Measure) | self.reg self.eng.flush() if observable == 'PauliZ': - probabilities = self.eng.backend.get_probabilities(self.reg[wires]) + probabilities = self.eng.backend.get_probabilities([self.reg[wires]]) #print("IBM probabilities="+str(probabilities)) if '1' in probabilities: expectation_value = 2*probabilities['1']-1
Docker: Add freetype-dev & replace libjpeg with libjpeg-turbo freetype-dev is required for loading fonts with pillow. * Remove zlib as zlib-dev installs zlib anyway
@@ -12,8 +12,8 @@ RUN apk add --no-cache --update \ git \ libffi-dev \ # Pillow dependencies - jpeg-dev \ - zlib \ + freetype-dev \ + libjpeg-turbo-dev \ zlib-dev RUN mkdir /bot
fix(test.py): fix test.py fix test.py
@@ -26,7 +26,7 @@ def test_path_func(): :rtype: pathlib.Path """ temp_path = get_ths_js("ths.js") - assert isinstance(temp_path, pathlib.WindowsPath) + assert isinstance(temp_path, pathlib.Path) if __name__ == "__main__":
Multparts working Multparts working for different quantities. MIssing groups the sub quantities (change in `kicost.py`file)
@@ -127,10 +127,17 @@ def subpart_qty(component): try: if logger.isEnabledFor(DEBUG_OBSESSIVE): print('Qty>>',component.refs,'>>', - component['manf#_subqty'], '*', len(component.refs)) - string = '=ceiling({{}}*{subqty}*{qty})'.format( - subqty=component['manf#_subqty'], + component.fields.get('manf#_subqty'), '*', + component.fields.get('manf#')) + subqty = component.fields.get('manf#_subqty') + print('>>>',subqty,'*',component.fields.get('manf#')) + string = '={{}}*{qty}'.format(qty=len(component.refs)) + if subqty != '1' and subqty != None: + string = '=CEILING({{}}*({subqty})*{qty})'.format( + subqty=subqty, qty=len(component.refs)) + else: + string = '={{}}*{qty}'.format(qty=len(component.refs)) except (KeyError, TypeError): if logger.isEnabledFor(DEBUG_OBSESSIVE): print('Qty>>',component.refs,'>>',len(component.refs))
Allow empty node_ids to override a manifest file In importcontent, node_ids is None needs to be treated differently from node_ids=[].
@@ -294,7 +294,7 @@ class Command(AsyncCommand): if manifest_file: content_manifest.read_file(manifest_file) use_content_manifest = True - elif path and detect_manifest and not (node_ids or exclude_node_ids): + elif path and detect_manifest and node_ids is None and exclude_node_ids is None: manifest_path = os.path.join(path, "content", "manifest.json") if content_manifest.read(manifest_path): use_content_manifest = True @@ -627,7 +627,9 @@ class Command(AsyncCommand): return FILE_TRANSFERRED, data_transferred def handle_async(self, *args, **options): - if options["manifest"] and (options["node_ids"] or options["exclude_node_ids"]): + if options["manifest"] and ( + options["node_ids"] is not None or options["exclude_node_ids"] is not None + ): raise CommandError( "The --manifest option must not be combined with --node_ids or --exclude_node_ids." )
Upgrade pandas version in CI Since pandas 1.1.5 was released, we should match the behavior to the latest version.
@@ -107,7 +107,7 @@ jobs: pyarrow-version: 1.0.1 - python-version: 3.8 spark-version: 3.0.1 - pandas-version: 1.1.4 + pandas-version: 1.1.5 pyarrow-version: 2.0.0 default-index-type: 'distributed-sequence' env:
Correct example configuration for databases. The loader module is common for databases, connectors and skills. So the configuration needs to follow the same format, i.e. each item under the databases 'sequence' needs a 'name' key, and various other parameters.
@@ -92,7 +92,7 @@ connectors: ## Database modules (optional) # databases: -# mongo: +# - name: mongo # host: "my host" # (Optional) default "localhost" # port: "12345" # (Optional) default "27017" # database: "mydatabase" # (Optional) default "opsdroid"
Prevent artifactual "running from outside your current environment" error Prevent warning when shutil.executable returns a symlink
@@ -299,10 +299,11 @@ def _check_environment_and_redirect(): If not, this utility tries to redirect the ``lightning`` call to the environment executable (prompting the user to install lightning for them there if needed). """ - env_executable = shutil.which("python") + env_executable = os.path.realpath(shutil.which("python")) + sys_executable = os.path.realpath(sys.executable) # on windows, the extension might be different, where one uses `.EXE` and the other `.exe` - if env_executable.lower() != sys.executable.lower(): + if env_executable.lower() != sys_executable.lower(): logger.info( "Lightning is running from outside your current environment. Switching to your current environment." )
create volume type with repeated name Test creating volume type with a repeated name will fail
@@ -69,3 +69,13 @@ class VolumeTypesNegativeTest(base.BaseVolumeAdminTest): self.assertRaises( lib_exc.NotFound, self.create_encryption_type, **create_kwargs) + + @decorators.attr(type=['negative']) + @decorators.idempotent_id('969b10c7-3d77-4e1b-a4f2-2d265980f7e5') + def test_create_with_repeated_name(self): + """Test creating volume type with a repeated name will fail""" + volume_type_name = self.create_volume_type()['name'] + self.assertRaises( + lib_exc.Conflict, + self.admin_volume_types_client.create_volume_type, + name=volume_type_name)
Added a TODO comment I find that it is easier to search what to remove once something happens in code which has TODO comments. I added this one for good measure.
@@ -430,6 +430,7 @@ ctx_New(HPyContext ctx, HPy h_type, void **data) return HPy_NULL; #if PY_VERSION_HEX < 0x03080000 // Workaround for Python issue 35810; no longer necessary in Python 3.8 + // TODO: Remove this workaround once we no longer support Python versions older than 3.8 Py_INCREF(tp); #endif
gtk detailedlistrenderers: python 3.5 compat enum.auto() is in python >= 3.6
-from enum import Enum, auto +from enum import Enum import html from toga_gtk.icons import Icon @@ -33,9 +33,9 @@ class DetailedListRenderer: class IconTextRendererColumns(Enum): """ a single column contents""" - ICON = auto() - TITLE = auto() - TITLE_SUBTITLE = auto() + ICON = 1 + TITLE = 2 + TITLE_SUBTITLE = 3 class IconTextRenderer(DetailedListRenderer):
integrations-docs: Update text in `create-bot-construct-url.md`. Revises the text about including a stream and a topic incoming webhook URLs in `create-bot-construct-url.md` for clarity and consistency.
@@ -6,16 +6,20 @@ bot using the bot's API key and the desired stream name: {!webhook-url.md!} Modify the parameters of the URL above, where `api_key` is the API key -of your Zulip bot, and `stream` is the [URL-encoded](https://www.urlencoder.org/) -stream name you want the notifications sent to. If you do not specify a -`stream`, the bot will send notifications via PMs to the creator of the bot. +of your Zulip bot, and `stream` is the [URL-encoded][url-encoder] +stream name you want the notifications sent to. If you don't specify a +`stream`, the bot will send notifications via private messages to the +creator of the bot. -If you'd like this integration to always send to a specific topic, -just include the (URL-encoded) topic as an additional parameter -(E.g. for `your topic`, append `&topic=your%20topic` to the URL). +If you'd like this integration to always send notifications to a +specific topic in the specified stream, just include the +[URL-encoded][url-encoder] topic as an additional parameter. E.g., +for `your topic`, append `&topic=your%20topic` to the URL. {% if all_event_types is defined %} {!event-filtering-instruction.md!} {% endif %} + +[url-encoder]: https://www.urlencoder.org/
'xfail' markers without a condition no longer rely on the underlying `Item` deriving from `PyobjMixin`
@@ -119,7 +119,6 @@ class MarkEvaluator: if hasattr(self, 'result'): return self.result if self.holder: - d = self._getglobals() if self.holder.args or 'condition' in self.holder.kwargs: self.result = False # "holder" might be a MarkInfo or a MarkDecorator; only @@ -135,6 +134,7 @@ class MarkEvaluator: for expr in args: self.expr = expr if isinstance(expr, py.builtin._basestring): + d = self._getglobals() result = cached_eval(self.item.config, expr, d) else: if "reason" not in kwargs:
Fix out of order group names Non-matching order results in NaNs reported for S2
@@ -68,7 +68,7 @@ def analyze(problem, Y, calc_second_order=True, num_resamples=100, if not groups: D = problem['num_vars'] else: - D = len(set(problem['groups'])) + _, D = extract_group_names(groups) if calc_second_order and Y.size % (2 * D + 2) == 0: N = int(Y.size / (2 * D + 2)) @@ -107,7 +107,6 @@ def analyze(problem, Y, calc_second_order=True, num_resamples=100, S['S2_conf'][j, k] = Z * second_order(A[r], AB[r, j], AB[r, k], BA[r, j], B[r]).std(ddof=1) - else: tasks, n_processors = create_task_list( D, calc_second_order, n_processors) @@ -161,9 +160,9 @@ def create_Si_dict(D, calc_second_order): for k in ('S1', 'S1_conf', 'ST', 'ST_conf')) if calc_second_order: - S['S2'] = np.zeros((D, D)) + S['S2'] = np.empty((D, D)) S['S2'][:] = np.nan - S['S2_conf'] = np.zeros((D, D)) + S['S2_conf'] = np.empty((D, D)) S['S2_conf'][:] = np.nan return S @@ -302,7 +301,7 @@ def Si_to_pandas_dict(S_dict): if len(names) > 2: idx = list(combinations(names, 2)) else: - idx = (tuple(set(names)), ) + idx = (names, ) second_order = { 'S2': [S_dict['S2'][names.index(i[0]), names.index(i[1])]
Remove unnecessary SetDefaultDevice(nullptr) (new context session reset default device to nullptr)
@@ -27,20 +27,12 @@ namespace { class ArrayDeviceTest : public ::testing::Test { protected: - void SetUp() override { - context_session_.emplace(); - orig_ = internal::GetDefaultDeviceNoExcept(); - SetDefaultDevice(nullptr); - } + void SetUp() override { context_session_.emplace(); } - void TearDown() override { - SetDefaultDevice(orig_); - context_session_.reset(); - } + void TearDown() override { context_session_.reset(); } private: nonstd::optional<testing::ContextSession> context_session_; - Device* orig_; }; // Check that Array data exists on the specified device
Change openstack-dev to openstack-discuss Depends-On:
@@ -5,7 +5,7 @@ description = Service for storing sensitive client information for OpenStack description-file = README.md author = OpenStack -author-email = [email protected] +author-email = [email protected] home-page = https://docs.openstack.org/barbican/latest/ classifier = Environment :: OpenStack
test: string values inside qb objects case inside update queries needs a recursive call param wrapper got lost
@@ -116,6 +116,7 @@ class TestParameterization(unittest.TestCase): Case() .when(DocType.search_fields == "value", "other_value") .when(Coalesce(DocType.search_fields == "subject_in_function"), "true_value") + .else_("Overdue") ) ) @@ -128,6 +129,32 @@ class TestParameterization(unittest.TestCase): self.assertEqual(params["param2"], "other_value") self.assertEqual(params["param3"], "subject_in_function") self.assertEqual(params["param4"], "true_value") + self.assertEqual(params["param5"], "Overdue") + + def test_case_in_update(self): + DocType = frappe.qb.DocType("DocType") + query = ( + frappe.qb.update(DocType) + .set( + "parent", + Case() + .when(DocType.search_fields == "value", "other_value") + .when(Coalesce(DocType.search_fields == "subject_in_function"), "true_value") + .else_("Overdue") + ) + ) + + self.assertTrue("walk" in dir(query)) + query, params = query.walk() + + self.assertIn("%(param1)s", query) + self.assertIn("param1", params) + self.assertEqual(params["param1"], "value") + self.assertEqual(params["param2"], "other_value") + self.assertEqual(params["param3"], "subject_in_function") + self.assertEqual(params["param4"], "true_value") + self.assertEqual(params["param5"], "Overdue") + @run_only_if(db_type_is.MARIADB)
the fix just type as int as there was already a check to ensure it will be an int
@@ -249,7 +249,7 @@ def discretize_oversample_1D(model, x_range, factor=10): # Evaluate model on oversampled grid x = np.linspace(x_range[0] - 0.5 * (1 - 1 / factor), x_range[1] - 0.5 * (1 + 1 / factor), - num = (x_range[1] - x_range[0]) * factor) + num=int((x_range[1] - x_range[0]) * factor)) values = model(x) @@ -265,10 +265,10 @@ def discretize_oversample_2D(model, x_range, y_range, factor=10): # Evaluate model on oversampled grid x = np.linspace(x_range[0] - 0.5 * (1 - 1 / factor), x_range[1] - 0.5 * (1 + 1 / factor), - num = (x_range[1] - x_range[0]) * factor) + num=int((x_range[1] - x_range[0]) * factor)) y = np.linspace(y_range[0] - 0.5 * (1 - 1 / factor), y_range[1] - 0.5 * (1 + 1 / factor), - num = (y_range[1] - y_range[0]) * factor) + num=int((y_range[1] - y_range[0]) * factor)) x_grid, y_grid = np.meshgrid(x, y) values = model(x_grid, y_grid)
All trainees: factor out and improve the queryset Changes: * queryset "generation" is moved to a separate function * indentation was improved * is_*_instructor Sum(Case(When)) spaghetti was simplified using Python sub-function * new, general is_instructor annotation was added. Works with all instructor badges.
@@ -94,37 +94,50 @@ class TrainingProgressDelete(RedirectSupportMixin, OnlyForAdminsMixin, success_url = reverse_lazy('all_trainees') -@admin_required -def all_trainees(request): - filter = TraineeFilter( - request.GET, - queryset=Person.objects +def all_trainees_queryset(): + def has_badge(badge): + return Sum(Case(When(badges__name=badge, then=1), + default=0, + output_field=IntegerField())) + + return ( + Person.objects .annotate_with_instructor_eligibility() .prefetch_related( - Prefetch('task_set', + Prefetch( + 'task_set', to_attr='training_tasks', queryset=Task.objects.filter(role__name='learner', - event__tags__name='TTT')), + event__tags__name='TTT') + ), 'training_tasks__event', 'trainingrequest_set', 'trainingprogress_set', 'trainingprogress_set__requirement', 'trainingprogress_set__evaluated_by', ).annotate( - is_swc_instructor=Sum(Case(When(badges__name='swc-instructor', - then=1), - default=0, - output_field=IntegerField())), - is_dc_instructor=Sum(Case(When(badges__name='dc-instructor', - then=1), - default=0, - output_field=IntegerField())), - is_lc_instructor=Sum(Case(When(badges__name='lc-instructor', - then=1), + is_swc_instructor=has_badge('swc-instructor'), + is_dc_instructor=has_badge('dc-instructor'), + is_lc_instructor=has_badge('lc-instructor'), + is_instructor=Sum( + Case( + When( + badges__name__in=Badge.INSTRUCTOR_BADGES, + then=1 + ), default=0, - output_field=IntegerField())), + output_field=IntegerField() ) - .order_by('family', 'personal') + ), + ).order_by('family', 'personal') + ) + + +@admin_required +def all_trainees(request): + filter = TraineeFilter( + request.GET, + queryset=all_trainees_queryset(), ) trainees = get_pagination_items(request, filter.qs)
fw/exception: Add 'message' property to `SerializerSyntaxError` Allow for the fact that Exceptions do not have a message attribute in Python3 so mimic the functionality.
@@ -89,6 +89,11 @@ class SerializerSyntaxError(Exception): """ Error loading a serialized structure from/to a file handle. """ + @property + def message(self): + if self.args: + return self.args[0] + return '' def __init__(self, message, line=None, column=None): super(SerializerSyntaxError, self).__init__(message)
tests/llvm/random: Random seed should be int32 Otherwise it hides the next argument (number of requested threads) Fixes ("cuda: Allow configurable thread block size")
@@ -37,7 +37,7 @@ def test_random_int(benchmark, mode): init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') state = init_fun.byref_arg_types[0]() gpu_state = pnlvm.jit_engine.pycuda.driver.to_device(bytearray(state)) - init_fun.cuda_call(gpu_state, np.int64(SEED)) + init_fun.cuda_call(gpu_state, np.int32(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32') out = np.asarray([0], dtype=np.int64) @@ -81,7 +81,7 @@ def test_random_float(benchmark, mode): init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') state = init_fun.byref_arg_types[0]() gpu_state = pnlvm.jit_engine.pycuda.driver.to_device(bytearray(state)) - init_fun.cuda_call(gpu_state, np.int64(SEED)) + init_fun.cuda_call(gpu_state, np.int32(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double') out = np.asfarray([0.0], dtype=np.float64) @@ -120,7 +120,7 @@ def test_random_normal(benchmark, mode): init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') state = init_fun.byref_arg_types[0]() gpu_state = pnlvm.jit_engine.pycuda.driver.to_device(bytearray(state)) - init_fun.cuda_call(gpu_state, np.int64(SEED)) + init_fun.cuda_call(gpu_state, np.int32(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal') out = np.asfarray([0.0], dtype=np.float64)
logging: Organize logger configs for easier reference. This is a pure reordering.
@@ -1118,47 +1118,30 @@ LOGGING = { }, }, 'loggers': { + # root logger '': { 'handlers': DEFAULT_ZULIP_HANDLERS, 'filters': ['require_logging_enabled'], 'level': 'INFO', 'propagate': False, }, + + # Django, alphabetized 'django': { 'handlers': DEFAULT_ZULIP_HANDLERS, 'level': 'INFO', 'propagate': False, }, - 'zulip.requests': { - 'handlers': DEFAULT_ZULIP_HANDLERS, - 'level': 'INFO', - 'propagate': False, - }, - 'zulip.queue': { - 'handlers': DEFAULT_ZULIP_HANDLERS, - 'level': 'WARNING', - 'propagate': False, - }, - 'zulip.management': { - 'handlers': ['file', 'errors_file'], - 'level': 'INFO', - 'propagate': False, - }, - 'requests': { + 'django.request': { 'handlers': DEFAULT_ZULIP_HANDLERS, 'level': 'WARNING', 'propagate': False, + 'filters': ['skip_boring_404s'], }, 'django.security.DisallowedHost': { 'handlers': ['file'], 'propagate': False, }, - 'django.request': { - 'handlers': DEFAULT_ZULIP_HANDLERS, - 'level': 'WARNING', - 'propagate': False, - 'filters': ['skip_boring_404s'], - }, 'django.server': { 'handlers': ['console', 'file'], 'propagate': False, @@ -1170,22 +1153,47 @@ LOGGING = { 'level': 'DEBUG', 'propagate': False, }, - 'zulip.zerver.webhooks': { + + ## Uncomment the following to get all database queries logged to the console + # 'django.db': { + # 'handlers': ['console'], + # 'level': 'DEBUG', + # 'propagate': False, + # }, + + # other libraries, alphabetized + 'requests': { + 'handlers': DEFAULT_ZULIP_HANDLERS, + 'level': 'WARNING', + 'propagate': False, + }, + + # our own loggers, alphabetized + 'zulip.management': { 'handlers': ['file', 'errors_file'], 'level': 'INFO', 'propagate': False, }, + 'zulip.queue': { + 'handlers': DEFAULT_ZULIP_HANDLERS, + 'level': 'WARNING', + 'propagate': False, + }, + 'zulip.requests': { + 'handlers': DEFAULT_ZULIP_HANDLERS, + 'level': 'INFO', + 'propagate': False, + }, 'zulip.soft_deactivation': { 'handlers': ['file', 'errors_file'], 'level': 'INFO', 'propagate': False, - } - ## Uncomment the following to get all database queries logged to the console - # 'django.db': { - # 'handlers': ['console'], - # 'level': 'DEBUG', - # 'propagate': False, - # }, + }, + 'zulip.zerver.webhooks': { + 'handlers': ['file', 'errors_file'], + 'level': 'INFO', + 'propagate': False, + }, } }
added OOPSpam API * added OOPSpam API * added OOSpam removed API from the name
@@ -234,6 +234,7 @@ API | Description | Auth | HTTPS | CORS | Link | | License-API | Unofficial REST API for choosealicense.com | No | Yes | No | [Go!](https://github.com/cmccandless/license-api/blob/master/README.md) | | LiveEdu | Live Coding Streaming | `OAuth` | Yes | Unknown | [Go!](https://www.liveedu.tv/developer/applications/) | | Myjson | A simple JSON store for your web or mobile app | No | No | Unknown | [Go!](http://myjson.com/api) | +| OOPSpam | Multiple spam filtering service | No | Yes | Yes | [Go!](https://oopspam.com/) | | Plino | Spam filtering system | No | Yes | Unknown | [Go!](https://plino.herokuapp.com/) | | Public APIs | A collective list of free JSON APIs for use in web development | No | Yes | Unknown | [Go!](https://github.com/davemachado/public-api) | | QR code | Create an easy to read QR code and URL shortener | No | Yes | Yes | [Go!](http://qrtag.net/api/) |
fix PBI max per year in proforma because PBI max in mosel uses pwf_prod_incent with PV degradation for PV Techs, the max is lower by degradation every year
@@ -2583,7 +2583,7 @@ def generate_proforma(scenariomodel, output_file_path): for year in range(financial.analysis_years): hcs['{}{}'.format(upper_case_letters[year + 2], current_row)] = ( "=IF({year} < {pbi_year_limit}, " - "MIN({dol_per_kwh} * {pv_kwh}, {pbi_max}), 0)" + "MIN({dol_per_kwh} * {pv_kwh}, {pbi_max} * (1 - {pv_degradation_rate}/100)^{year}), 0)" ).format( year=year, pbi_year_limit=pv_cell_locations[idx]["pv_pbi_years_cell"],
Support parsing of named Unicode escapes outside of the BMP in CPythons versions with a 2-byte Unicode representation. Closes
@@ -9,16 +9,16 @@ from __future__ import absolute_import import cython cython.declare(Nodes=object, ExprNodes=object, EncodedString=object, bytes_literal=object, StringEncoding=object, - FileSourceDescriptor=object, lookup_unicodechar=object, + FileSourceDescriptor=object, lookup_unicodechar=object, unicode_category=object, Future=object, Options=object, error=object, warning=object, Builtin=object, ModuleNode=object, Utils=object, - re=object, _parse_escape_sequences=object, _unicode=object, _bytes=object, - partial=object, reduce=object, _IS_PY3=cython.bint) + re=object, sys=object, _parse_escape_sequences=object, _unicode=object, _bytes=object, + partial=object, reduce=object, _IS_PY3=cython.bint, _IS_2BYTE_UNICODE=cython.bint) from io import StringIO import re import sys -from unicodedata import lookup as lookup_unicodechar +from unicodedata import lookup as lookup_unicodechar, category as unicode_category from functools import partial, reduce from .Scanning import PyrexScanner, FileSourceDescriptor, StringSourceDescriptor @@ -34,6 +34,7 @@ from . import Future from . import Options _IS_PY3 = sys.version_info[0] >= 3 +_IS_2BYTE_UNICODE = sys.maxunicode == 0xffff class Ctx(object): @@ -974,11 +975,21 @@ def _append_escape_sequence(kind, builder, escape_sequence, s): elif c in u'NUu' and kind in ('u', 'f', ''): # \uxxxx, \Uxxxxxxxx, \N{...} chrval = -1 if c == u'N': + uchar = None try: - chrval = ord(lookup_unicodechar(escape_sequence[3:-1])) + uchar = lookup_unicodechar(escape_sequence[3:-1]) + chrval = ord(uchar) except KeyError: s.error("Unknown Unicode character name %s" % repr(escape_sequence[3:-1]).lstrip('u'), fatal=False) + except TypeError: + # 2-byte unicode build of CPython? + if (uchar is not None and _IS_2BYTE_UNICODE and len(uchar) == 2 and + unicode_category(uchar[0]) == 'Cs' and unicode_category(uchar[1]) == 'Cs'): + # surrogate pair instead of single character + chrval = 0x10000 + (ord(uchar[0]) - 0xd800) >> 10 + (ord(uchar[1]) - 0xdc00) + else: + raise elif len(escape_sequence) in (6, 10): chrval = int(escape_sequence[2:], 16) if chrval > 1114111: # sys.maxunicode:
get_lldp_neighbors.py Junos fix chassis id if ip HG-- branch : pnpwin/get_lldp_neighborspy-junos-fix-chassis-i-1495805478736
@@ -14,6 +14,7 @@ from noc.sa.interfaces.base import (IntParameter, MACAddressParameter, InterfaceTypeError) from noc.sa.interfaces.igetlldpneighbors import IGetLLDPNeighbors +from noc.lib.validators import is_int, is_ipv4, is_ipv6 class Script(BaseScript): @@ -156,8 +157,7 @@ class Script(BaseScript): remote_port = match.get("p_id") n["remote_chassis_id"] = match.get("id") n["remote_port"] = str(remote_port) - if is_ipv4(n["remote_chassis_id"]) \ - or is_ipv6(n["remote_chassis_id"]): + if is_ipv4(n["remote_chassis_id"]) or is_ipv6(n["remote_chassis_id"]): n["remote_chassis_id_subtype"] = 5 i["neighbors"] += [n] r += [i]
Fix general_itest failues in cook_image Until is merged, we're not running general_itests in GHA
# See the License for the specific language governing permissions and # limitations under the License. """Contains methods used by the paasta client to build a docker image.""" +import argparse import os import sys +from typing import Optional from paasta_tools.cli.cmds.check import makefile_responds_to from paasta_tools.cli.utils import validate_service_name @@ -61,18 +63,35 @@ def add_subparser(subparsers): list_parser.set_defaults(command=paasta_cook_image) -def paasta_cook_image(args, service=None, soa_dir=None): +def paasta_cook_image( + args: Optional[argparse.Namespace], + service: Optional[str] = None, + soa_dir: Optional[str] = None, +): """Build a docker image""" if not service: + if args is None: + print( + "ERROR: No arguments or service passed to cook-image - unable to determine what service to cook an image for", + file=sys.stderr, + ) + return 1 service = args.service - if service.startswith("services-"): + if service and service.startswith("services-"): service = service.split("services-", 1)[1] if not soa_dir: + if args is None: + print( + "ERROR: No arguments or soadir passed to cook-image - unable to determine where to look for soa-configs", + file=sys.stderr, + ) + return 1 soa_dir = args.yelpsoa_config_root + validate_service_name(service, soa_dir) run_env = os.environ.copy() - if args.commit is not None: + if args is not None and args.commit is not None: # if we're given a commit, we're likely being called by Jenkins or someone # trying to push the cooked image to our registry - as such, we should tag # the cooked image as `paasta itest` would.
Call cleanup after each test This improves the situation for I see far fewer peak ipengine processes, but still more than I would expect if cleanup was working as expected after each test.
@@ -130,7 +130,9 @@ def load_dfk(config): module.config['globals'] = {} module.config['globals']['runDir'] = get_rundir() # Give unique rundir; needed running with -n=X where X > 1. parsl.clear() - parsl.load(module.config) + dfk = parsl.load(module.config) + yield + parsl.dfk().cleanup() @pytest.fixture(autouse=True)
Problem: nginx_container document is incorrect Solution Fixed nginx_container volume path and environment name.
@@ -12,8 +12,8 @@ reflect any changes made to the container. ``` docker run \ --name=tendermint_instance_pub_key \ - --env TENDERMINT_PUB_KEY_ACCESS_PORT='' + --env TM_PUB_KEY_ACCESS_PORT='' --publish=<nginx port for external connections>:<corresponding host port> \ - --volume=<host dir with public key>:/tendermint_node_data \ + --volume=<host dir with public key>:/usr/share/nginx \ bigchaindb/nginx_pub_key_access:<version_number> ```
Fix (very weird) Travis / pandoc install issue I really do not understand why there would be a difference between `sudo apt install pandoc` and `sudo apt-get install pandoc` Weird!
@@ -6,7 +6,7 @@ language: python python: - "3.6" install: - - sudo apt install pandoc # pandoc for jupyter notebooks + - sudo apt-get install pandoc # pandoc for jupyter notebooks - sudo apt install graphviz # graphviz for class inheritance diagrams in docs - pip install tox-travis script:
Update README.md Spelling corrections
@@ -15,7 +15,7 @@ A FEDn network, as illustrated in the picture below, is made up of three key age A Client is a data node, holding private data and connecting to a Combiner to recieve model update requests and model validation requests. Clients need to be configured to be able to execute model training for the ML-model type used. #### Combiner -A combiner is an actor which orchestrates model updates from a number of attached clients. Each combiner aggregates model updates from its clients during a global trainign round. When and how to trigger such orchestration rounds are specified in the overall *compute plan* layed out by the Reducer. Each combiner in the network is an independent (and identical) gRPC Server, providing RPCs for interacting with the alliance subsystem it controls. Hence, the total number of clients that can be accomodated in a FEDn network is proportional to the number of active combiners. +A combiner is an actor which orchestrates model updates from a number of attached clients. Each combiner aggregates model updates from its clients during a global training round. When and how to trigger such orchestration rounds are specified in the overall *compute plan* laid out by the Reducer. Each combiner in the network is an independent (and identical) gRPC Server, providing RPCs for interacting with the alliance subsystem it controls. Hence, the total number of clients that can be accomodated in a FEDn network is proportional to the number of active combiners. #### Reducer The reducer fills three main roles in the network: 1.) To lay out and initialize the overall, global training strategy and to aggregate model updates from combiners into a global model, 2.) to handle global state and maintain the model ledger - an immutable trail of global model updates uniquely defining the FedML timeline, and 3) provide a discovery service, mediating connections between clients and combiners. For this purpose, the Reducer exposes a standard REST API.
target/assistant: Fix logcat poller Rename the `start` method to `run` as this is what is what is called by the threading module's `start` method, otherwise this causes the polling to be done in the main thread blocking execution.
@@ -104,7 +104,7 @@ class LogcatPoller(threading.Thread): self.daemon = True self.exc = None - def start(self): + def run(self): self.logger.debug('starting polling') try: while True:
[dagit] Clean up /instance and /workspace redirects ### Summary & Motivation Cleaning up route redirects now that it's been a month or two. ### How I Tested These Changes Buildkite. Sanity check that Dagit routes are working as expected.
import {MainContent} from '@dagster-io/ui'; import * as React from 'react'; -import {Redirect, Route, Switch, useLocation} from 'react-router-dom'; +import {Route, Switch, useLocation} from 'react-router-dom'; const UserSettingsRoot = React.lazy(() => import('./UserSettingsRoot')); const WorkspaceRoot = React.lazy(() => import('../workspace/WorkspaceRoot')); @@ -29,25 +29,6 @@ export const ContentRoot = React.memo(() => { return ( <MainContent ref={main}> <Switch> - {/* todo dish: These /instance routes are for backward compatibility. Remove them - in November or December 2022. */} - <Route path="/instance" exact render={() => <Redirect to="/locations" />} /> - <Route - path="/instance/*" - exact - render={({match}) => { - const {url} = match; - return <Redirect to={url.replace('/instance', '')} />; - }} - /> - <Route - path="/workspace/*" - exact - render={({match}) => { - const {url} = match; - return <Redirect to={url.replace('/workspace', '/locations')} />; - }} - /> <Route path="/asset-groups(/?.*)"> <React.Suspense fallback={<div />}> <AssetsGroupsGlobalGraphRoot />
Removed README Requirement From Contributing Document * Removed README requirement * Generated READMEs from sources using Ronbun on-behalf-of:
# Sample Programs in Ruby -Welcome to Sample Programs in Ruby! +Welcome to Sample Programs in Ruby! To find documentation related to the Ruby code in this repo, look [here.](https://sample-programs.therenegadecoder.com/languages/ruby) ## Sample Programs List
Fix message about help which parameters should be used for creating family file Let's show where will be file placed.
@@ -219,7 +219,7 @@ if __name__ == '__main__': print(""" Usage: {module} <url> <short name> Example: {module} https://www.mywiki.bogus/wiki/Main_Page mywiki -This will create the file families{sep}mywiki_family.py""" +This will create the file mywiki_family.py in pywikibot{sep}families""" .format(module=sys.argv[0].strip('.' + os.sep), sep=os.sep))
operation events: fetch retry count from the right place it is in parameters directly, as stored in here:
@@ -172,8 +172,8 @@ class OperationsId(SecuredResource): return if exception is not None: operation.parameters.setdefault('error', str(exception)) - current_retries = context.get('current_retries') or 0 - total_retries = context.get('total_retries') or 0 + current_retries = operation.parameters.get('current_retries') or 0 + total_retries = operation.parameters.get('total_retries') or 0 try: message = common_events.format_event_message(
Ensure file_random_seed be 0 when use_per_host_infeed is True. Otherwise it will cause every host to read the same data and reduce the effectvie batch size by n times where n equals to the number of workers.
@@ -229,6 +229,13 @@ class BaseInputGeneratorFromFiles(BaseInputGenerator): 'record batcher.') return p + @base_layer.initializer + def __init__(self, params): + super(BaseInputGeneratorFromFiles, self).__init__(params) + if self.params.use_per_host_infeed and self.params.file_random_seed != 0: + raise ValueError('file_random_seed needs to be 0 when ' + 'use_per_host_infeed == True.') + def CommonInputOpArgs(self): """Common input params.""" p = self.params
more logging fixes call getLogger( "mininet" ) to set lg properly support warning() as well as (deprecated) warn() rearrange initialization slightly
'info': logging.INFO, 'output': OUTPUT, 'warning': logging.WARNING, + 'warn': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL } @@ -96,9 +97,9 @@ class MininetLogger( Logger, object ): __metaclass__ = Singleton - def __init__( self ): + def __init__( self, name="mininet" ): - Logger.__init__( self, "mininet" ) + Logger.__init__( self, name ) # create console handler ch = StreamHandlerNoNewline() @@ -116,6 +117,7 @@ def setLogLevel( self, levelname=None ): Convenience function to support lowercase names. levelName: level name from LEVELS""" if levelname and levelname not in LEVELS: + print(LEVELS) raise Exception( 'setLogLevel: unknown levelname %s' % levelname ) level = LEVELS.get( levelname, LOGLEVELDEFAULT ) self.setLevel( level ) @@ -135,8 +137,6 @@ def output( self, msg, *args, **kwargs ): self._log( OUTPUT, msg, args, kwargs ) -lg = MininetLogger() - # Make things a bit more convenient by adding aliases # (info, warn, error, debug) and allowing info( 'this', 'is', 'OK' ) # In the future we may wish to make things more efficient by only @@ -160,10 +160,13 @@ def newfn( *args ): return newfn -_loggers = lg.info, lg.output, lg.warn, lg.error, lg.debug -_loggers = tuple( makeListCompatible( logger ) - for logger in _loggers ) -lg.info, lg.output, lg.warn, lg.error, lg.debug = _loggers -info, output, warn, error, debug = _loggers +# Initialize logger and logging functions +logging.setLoggerClass( MininetLogger ) +lg = logging.getLogger( "mininet" ) +_loggers = lg.info, lg.output, lg.warning, lg.error, lg.debug +_loggers = tuple( makeListCompatible( logger ) for logger in _loggers ) +lg.info, lg.output, lg.warning, lg.error, lg.debug = _loggers +info, output, warning, error, debug = _loggers +warn = warning # alternate/old name setLogLevel = lg.setLogLevel
Get experiment object properties directly instead of using dictionary.
@@ -5,6 +5,8 @@ import nacl.utils import base64 import json +import sys + from studio.payload_builder import PayloadBuilder from studio import logs from studio.unencrypted_payload_builder import UnencryptedPayloadBuilder @@ -110,18 +112,22 @@ class EncryptedPayloadBuilder(PayloadBuilder): enc_key, enc_payload = self._encrypt_str(json.dumps(unencrypted_payload)) encrypted_payload["message"]["experiment"]["status"] =\ - experiment["status"] + experiment.status encrypted_payload["message"]["experiment"]["pythonver"] =\ - experiment["pythonver"] + experiment.pythonver encrypted_payload["message"]["time_added"] =\ - experiment["time_added"] + experiment.time_added encrypted_payload["message"]["experiment_lifetime"] =\ - experiment["experiment_lifetime"] + experiment.max_duration encrypted_payload["message"]["resources_needed"] =\ - experiment["resources_needed"] + experiment.resources_needed encrypted_payload["message"]["payload"] =\ "{0},{1}".format(enc_key, enc_payload) + pretty_str = json.dumps(encrypted_payload, indent=4) + print(pretty_str) + sys.exit(0) + return encrypted_payload # def main():
issue clean up FDs on failure explicitly The previous approach was crap since it left e.g. socketpair instances lying around for GC with their underlying FD already closed, coupled with FD number reuse, led to random madness when GC finally runs.
@@ -211,7 +211,7 @@ def create_socketpair(): return parentfp, childfp -def detach_popen(close_on_error=None, **kwargs): +def detach_popen(**kwargs): """ Use :class:`subprocess.Popen` to construct a child process, then hack the Popen so that it forgets the child it created, allowing it to survive a @@ -232,13 +232,7 @@ def detach_popen(close_on_error=None, **kwargs): # handling, without tying the surrounding code into managing a Popen # object, which isn't possible for at least :mod:`mitogen.fork`. This # should be replaced by a swappable helper class in a future version. - try: proc = subprocess.Popen(**kwargs) - except Exception: - for fd in close_on_error or (): - os.close(fd) - raise - proc._child_created = False return proc.pid @@ -279,15 +273,20 @@ def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): mitogen.core.set_cloexec(stderr_w) extra = {'stderr': stderr_w} + try: pid = detach_popen( args=args, stdin=childfp, stdout=childfp, close_fds=True, preexec_fn=preexec_fn, - close_on_error=[parentfp.fileno(), childfp.fileno()], **extra ) + except Exception: + childfp.close() + parentfp.close() + raise + if stderr_pipe: os.close(stderr_w) childfp.close() @@ -347,15 +346,19 @@ def tty_create_child(args): disable_echo(master_fd) disable_echo(slave_fd) + try: pid = detach_popen( args=args, stdin=slave_fd, stdout=slave_fd, stderr=slave_fd, preexec_fn=_acquire_controlling_tty, - close_on_error=[master_fd, slave_fd], close_fds=True, ) + except Exception: + os.close(master_fd) + os.close(slave_fd) + raise os.close(slave_fd) LOG.debug('tty_create_child() child %d fd %d, parent %d, cmd: %s', @@ -383,6 +386,7 @@ def hybrid_tty_create_child(args): disable_echo(master_fd) disable_echo(slave_fd) + try: pid = detach_popen( args=args, stdin=childfp, @@ -390,8 +394,13 @@ def hybrid_tty_create_child(args): stderr=slave_fd, preexec_fn=_acquire_controlling_tty, close_fds=True, - close_on_error=[master_fd, slave_fd, parentfp.fileno(), childfp.fileno()], ) + except Exception: + os.close(master_fd) + os.close(slave_fd) + parentfp.close() + childfp.close() + raise os.close(slave_fd) childfp.close()
Log payday.sql during recreate-schema.sh Took me a little longer to understand that this was happening while reviewing
@@ -24,6 +24,11 @@ echo "Applying sql/schema.sql ..." echo psql "$DATABASE_URL" < sql/schema.sql + + +echo "==============================================================================" +echo "Applying sql/payday.sql ..." +echo psql "$DATABASE_URL" < sql/payday.sql
No longer update influxDB IP in IP setter Because we're either binding on `localhost` or an external endpoint, there's no point in replacing this IP with the private one.
@@ -9,7 +9,6 @@ function set_manager_ip() { echo "Updating cloudify-amqpinflux.." /usr/bin/sed -i -e "s/AMQP_HOST=.*/AMQP_HOST="'"'"${ip}"'"'"/" /etc/sysconfig/cloudify-amqpinflux - /usr/bin/sed -i -e "s/INFLUXDB_HOST=.*/INFLUXDB_HOST="'"'"${ip}"'"'"/" /etc/sysconfig/cloudify-amqpinflux echo "Updating cloudify-riemann.." /usr/bin/sed -i -e "s/RABBITMQ_HOST=.*/RABBITMQ_HOST="'"'"${ip}"'"'"/" /etc/sysconfig/cloudify-riemann
Update community-overview.rst small readability tweak
@@ -12,7 +12,7 @@ Mattermost Community Vision for Mattermost Community --------------------------------------------------------- -Increase popularity of Mattermost by increasing brand advocacy and meaningful product benefits that would otherwise not be offered through an engaged and empowered community contributing best practices, troubleshooting guidance, feature proposals, documentation, language translations, extensions, features and low priority bug fixes. +Increase the popularity of Mattermost by increasing brand advocacy and meaningful product benefits that would otherwise not be offered, through an engaged and empowered community contributing best practices, troubleshooting guidance, feature proposals, documentation, language translations, extensions, features and low priority bug fixes. Principles ---------------------------------------------------------
(snapshot-perf-1) More efficient implementation of has-snapshot-id Summary: Check for existence of a row without fetching and parsing the entire snaphsot Test Plan: BK Reviewers: prha
@@ -418,7 +418,7 @@ def delete_run(self, run_id): def has_pipeline_snapshot(self, pipeline_snapshot_id): check.str_param(pipeline_snapshot_id, 'pipeline_snapshot_id') - return bool(self.get_pipeline_snapshot(pipeline_snapshot_id)) + return self._has_snapshot_id(pipeline_snapshot_id) def add_pipeline_snapshot(self, pipeline_snapshot): check.inst_param(pipeline_snapshot, 'pipeline_snapshot', PipelineSnapshot) @@ -463,6 +463,15 @@ def _add_snapshot(self, snapshot_id, snapshot_obj, snapshot_type): conn.execute(snapshot_insert) return snapshot_id + def _has_snapshot_id(self, snapshot_id): + query = db.select([SnapshotsTable.c.snapshot_id]).where( + SnapshotsTable.c.snapshot_id == snapshot_id + ) + + row = self.fetchone(query) + + return bool(row) + def _get_snapshot(self, snapshot_id): query = db.select([SnapshotsTable.c.snapshot_body]).where( SnapshotsTable.c.snapshot_id == snapshot_id
Trains imagenet for 100 epochs by default. This is closer to the number of steps reported in README's linked TensorBoard runs 225_200 steps, batch_size=512 56_250 steps, batch_size=2048
-# Copyright 2020 The Flax Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - # Copyright 2020 The Flax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -37,7 +23,7 @@ def get_config(): config.learning_rate = 0.1 config.momentum = 0.9 config.batch_size = 128 - config.num_epochs = 10 + config.num_epochs = 100 config.cache = False config.half_precision = False
cleaned up data generation graph w seaborn ok graph w plotly ready (pending)
@@ -30,37 +30,81 @@ def volume_graph(l_args, s_ticker): if not ns_parser: return - opt = yf.Ticker(s_ticker).option_chain(ns_parser.s_expiry_date.strftime("%Y-%m-%d")) - # data available via: opt.calls, opt.puts + __get_volume_graph(s_ticker, ns_parser.s_expiry_date.strftime("%Y-%m-%d")) + + except SystemExit: + print("") + except Exception as e: + print(e) + + + +def __get_volume_graph(ticker_name, exp_date): + df = __get_volume_data(ticker_name, exp_date) + __generate_graph_sns(df, ticker_name, exp_date) + #__generate_graph_plotly(df, ticker_name, exp_date) + +def __pull_call_put_data(call_put, flag): + df = call_put.pivot_table( + index='strike', + values = ['volume','openInterest'], + aggfunc='sum') + + df.reindex() + + df['strike'] = df.index + df['type'] = flag + + return df + +def __get_volume_data(ticker_name, exp_date): + + option_chain = yf.Ticker(ticker_name).option_chain(exp_date) + + calls = __pull_call_put_data( + option_chain.calls, + 'calls' + ) + + puts = __pull_call_put_data( + option_chain.puts, + 'puts' + ) volume_data = pd.concat( [ - __volume_data(opt.calls, 'calls'), - __volume_data(opt.puts, 'puts') + calls, + puts ], axis = 0 ) + #dataframe + return volume_data + +def __generate_graph_plotly(df, ticker_name, exp_date): + #version with plotly express fig = px.line( - volume_data, + df, x="strike", y="volume", - title=f'{s_ticker} Volume for {ns_parser.s_expiry_date.strftime("%Y-%m-%d")}', + title=f'{ticker_name} options volume for {exp_date}', color= 'type' ) fig.show() - except SystemExit: - print("") - except Exception as e: - print(e) + return -def __volume_data(opt_data, flag): - # get option chain for specific expiration - df = opt_data.pivot_table( - index='strike', - values=['volume', 'openInterest'], - aggfunc='sum') - df.reindex() - df['strike'] = df.index - df['type'] = flag - return df +def __generate_graph_sns(df, ticker_name, exp_date): + #version with seaborn express + plt.figure(figsize=(12,6)) + fig = sns.lineplot( + data = df, + x = 'strike', + y = 'volume', + hue = 'type', + palette= ['limegreen', 'tomato']) + + plt.title(f'{ticker_name} options volume for {exp_date}') + + plt.show() + return
Fixed an uncaught bug A `break` was used instead of a `continue`, so it stopped mining instead of restarting it !
@@ -348,12 +348,12 @@ def AVRMine(): # Mining section debugOutput("Calculated hashrate ("+str(hashrate)+")") except: Connect() - break + continue try: soc.send(bytes(str(result[0]) + "," + str(hashrate) + ",Official AVR Miner v" + str(minerVersion), encoding="utf8")) # Send result back to the server except: Connect() - break + continue except: Connect() ConnectToAVR()
Adds default initialization to 'uuid' and 'collisionAction' in DataSet.__setstate__ This should fix some un-pickling old DataSet problems.
@@ -1457,8 +1457,8 @@ class DataSet(object): self.timeType = state_dict['timeType'] self.repType = state_dict['repType'] - self.collisionAction = state_dict['collisionAction'] - self.uuid = state_dict['uuid'] + self.collisionAction = state_dict.get('collisionAction','aggregate') + self.uuid = state_dict.get('uuid',None)
Raise minimal version to 0.1.77 Previous version constraint fails due to missing omnistaging api's.
@@ -26,7 +26,7 @@ except IOError: install_requires = [ "numpy>=1.12", - "jax>=0.1.59", + "jax>=0.1.77", "matplotlib", # only needed for tensorboard export "dataclasses;python_version<'3.7'", # will only install on py3.6 "msgpack",
Azure : Fix intermittent validation errors For some reason, in some situations, directories were listed with a trailing slash when reading the archive. This handles dir listing both with and without slashes.
@@ -80,9 +80,18 @@ for module in ( with tarfile.open( args.archive, "r:gz" ) as a: # getmember still reads the whole archive, so might as well grab them all - # as we go. We need to strip the first directory from all paths too as that - # contains the release name - archivePaths = { os.path.join( *m.name.split( os.sep )[1:] ) for m in a.getmembers() if os.sep in m.name } + # as we go. We need to strip the first directory from all paths as that + # contains the release name. + + archivePaths = set() + + for m in a.getmembers() : + # ignore anything not under the release directory + if os.sep not in m.name : + continue + # Strip the release dir and any empty components at the end + relPath = os.path.join( *m.name.split( os.sep )[1:] ) + archivePaths.add( os.path.normpath( relPath ) ) missing = [ p for p in requiredPaths if p not in archivePaths ] if missing :
Fix test_glu_old HealthCheck with smarter generation strategy. Summary: Pull Request resolved:
@@ -13,25 +13,30 @@ import numpy as np import unittest [email protected] +def _glu_old_input(draw): + dims = draw(st.lists(st.integers(min_value=1, max_value=5), min_size=1, max_size=3)) + axis = draw(st.integers(min_value=0, max_value=len(dims))) + # The axis dimension must be divisible by two + axis_dim = 2 * draw(st.integers(min_value=1, max_value=2)) + dims.insert(axis, axis_dim) + X = draw(hu.arrays(dims, np.float32, None)) + return (X, axis) + + class TestGlu(serial.SerializedTestCase): - # Suppress filter_too_much health check. - # Reproduce by commenting @settings and uncommenting @seed. - # @seed(302934307671667531413257853548643485645) - @settings(suppress_health_check=[HealthCheck.filter_too_much]) @serial.given( - X=hu.tensor(), - axis=st.integers(min_value=0, max_value=3), + X_axis=_glu_old_input(), **hu.gcs ) - def test_glu_old(self, X, axis, gc, dc): + def test_glu_old(self, X_axis, gc, dc): + X, axis = X_axis + def glu_ref(X): x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis) Y = x1 * (1. / (1. + np.exp(-x2))) return [Y] - # Test only valid tensors. - assume(axis < X.ndim) - assume(X.shape[axis] % 2 == 0) op = core.CreateOperator("Glu", ["X"], ["Y"], dim=axis) self.assertReferenceChecks(gc, op, [X], glu_ref)
fix name bug in test_pass_inject_double_buffer Change the parameter 'C' name
@@ -7,7 +7,7 @@ def test_double_buffer(): tx = tvm.thread_axis("threadIdx.x") ib = tvm.ir_builder.create() A = ib.pointer("float32", name="A") - C = ib.pointer("float32", name="A") + C = ib.pointer("float32", name="C") ib.scope_attr(tx, "thread_extent", 1) with ib.for_range(0, n) as i: B = ib.allocate("float32", m, name="B", scope="shared")
Update quickfiles.rst change "from flask_appbuilder.model.mixins import ImageColumn" to "from flask_appbuilder.models.mixins import ImageColumn"
@@ -10,7 +10,7 @@ Define your model (models.py) :: from flask_appbuilder import Model - from flask_appbuilder.model.mixins import ImageColumn + from flask_appbuilder.models.mixins import ImageColumn class Person(Model): id = Column(Integer, primary_key=True)
Take quadrature with more contrast for Rabi analysis Same idea as for the parabola analysis, issue
@@ -482,15 +482,15 @@ class Transmon(Qubit): if np.size(amps) != 1: self.measure_rabi(amps, n=1, MC=MC, analyze=False) a = ma.Rabi_Analysis(close_fig=close_fig) + # Decide which quadrature to take by comparing the contrast if take_fit_I: - ampl = abs(a.fit_res[0].params['period'].value)/2 + ampl = abs(a.fit_res[0].params['period'].value)/2. print("taking I") + elif (np.abs(max(a.fit_res[0].data)-min(a.fit_res[0].data)))>/ + (np.abs(max(a.fit_res[1].data)-min(a.fit_res[1].data))): + ampl = a.fit_res[0].params['period'].value/2. else: - if (a.fit_res[0].params['period'].stderr <= - a.fit_res[1].params['period'].stderr): - ampl = abs(a.fit_res[0].params['period'].value)/2 - else: - ampl = abs(a.fit_res[1].params['period'].value)/2 + ampl = a.fit_res[1].params['period'].value/2. else: ampl = amps if verbose:
Fix get_permissions for small group chats Closes
@@ -1259,7 +1259,7 @@ class ChatMethods: if isinstance(entity, types.Channel): FullChat = await self(functions.channels.GetFullChannelRequest(entity)) elif isinstance(entity, types.Chat): - FullChat = await self(functions.messages.GetFullChatRequest(entity)) + FullChat = await self(functions.messages.GetFullChatRequest(entity.id)) else: return return FullChat.chats[0].default_banned_rights @@ -1276,7 +1276,7 @@ class ChatMethods: return custom.ParticipantPermissions(participant.participant, False) elif helpers._entity_type(entity) == helpers._EntityType.CHAT: chat = await self(functions.messages.GetFullChatRequest( - entity + entity.chat_id )) if isinstance(user, types.InputPeerSelf): user = await self.get_me(input_peer=True)
fix: deduplicate currencies manually on mariadb 10.3 `insert ignore` doesn't work
@@ -29,6 +29,8 @@ def get_countries_and_currencies(): countries = [] currencies = [] + added_currencies = set() + for name, country in data.items(): country = frappe._dict(country) countries.append( @@ -42,7 +44,9 @@ def get_countries_and_currencies(): time_zones="\n".join(country.timezones or []), ) ) - if country.currency: + if country.currency and country.currency not in added_currencies: + added_currencies.add(country.currency) + currencies.append( frappe.get_doc( doctype="Currency",
fix use-after-free bug Summary: Pull Request resolved: Test Plan: Imported from OSS
@@ -1990,7 +1990,9 @@ class ShapePropagator { if (inferred) { SHAPE_ASSERT(size_product != 0); size_t numel = 1; - for (int64_t s : tensor_types.at(0)->sizes().concrete_sizes().value()) + auto concrete_sizes = + tensor_types.at(0)->sizes().concrete_sizes().value(); + for (int64_t s : concrete_sizes) numel *= s; int64_t inferred_size = numel / size_product; sizes[inferred_idx] = inferred_size;
Add some files to the CODEOWNERS files for team-core to own And also fix a couple of bugs found when writing tests for the matching used by tamarack bot. This expands the files that the core team will be requested for on pull request reviews.
# This file uses an fnmatch-style matching pattern. # Team Boto -salt/**/*boto* @saltstack/team-boto +salt/*/*boto* @saltstack/team-boto # Team Core +requirements/* @saltstack/team-core salt/auth/* @saltstack/team-core salt/cache/* @saltstack/team-core salt/cli/* @saltstack/team-core @@ -24,14 +25,16 @@ salt/daemons/* @saltstack/team-core salt/pillar/* @saltstack/team-core salt/loader.py @saltstack/team-core salt/payload.py @saltstack/team-core -salt/**/master* @saltstack/team-core -salt/**/minion* @saltstack/team-core +salt/master.py @saltstack/team-core +salt/*/master* @saltstack/team-core +salt/minion.py @saltstack/team-core +salt/*/minion* @saltstack/team-core # Team Cloud salt/cloud/* @saltstack/team-cloud salt/utils/openstack/* @saltstack/team-cloud salt/utils/aws.py @saltstack/team-cloud -salt/**/*cloud* @saltstack/team-cloud +salt/*/*cloud* @saltstack/team-cloud # Team NetAPI salt/cli/api.py @saltstack/team-netapi @@ -50,18 +53,18 @@ salt/cli/ssh.py @saltstack/team-ssh salt/client/ssh/* @saltstack/team-ssh salt/roster/* @saltstack/team-ssh salt/runners/ssh.py @saltstack/team-ssh -salt/**/thin.py @saltstack/team-ssh +salt/*/thin.py @saltstack/team-ssh # Team State salt/state.py @saltstack/team-state # Team SUSE -salt/**/*btrfs* @saltstack/team-suse -salt/**/*kubernetes* @saltstack/team-suse -salt/**/*pkg* @saltstack/team-suse -salt/**/*snapper* @saltstack/team-suse -salt/**/*xfs* @saltstack/team-suse -salt/**/*zypper* @saltstack/team-suse +salt/*/*btrfs* @saltstack/team-suse +salt/*/*kubernetes* @saltstack/team-suse +salt/*/*pkg* @saltstack/team-suse +salt/*/*snapper* @saltstack/team-suse +salt/*/*xfs* @saltstack/team-suse +salt/*/*zypper* @saltstack/team-suse # Team Transport salt/transport/* @saltstack/team-transport
Handle application set status Because pylibjuju has an internal cache, very similar to how juju model cache works, we need to correctly handle when the application status is set by the charm author, vs how the application is dervied via the unit status.
@@ -80,10 +80,12 @@ class Application(model.ModelEntity): workload status and highlight the most relevant (severity). """ status = self.safe_data['status']['current'] - unit_status = [status] + if status == "unset": + unit_status = [] for unit in self.units: unit_status.append(unit.workload_status) return derive_status(unit_status) + return status @property def status_message(self):
pagegenerators.py: -linter options fails Fix parsing of argument in handleArg() linter should select all categories instead gives error.
@@ -14,7 +14,7 @@ These parameters are supported to specify which pages titles to print: &params; """ # -# (C) Pywikibot team, 2008-2017 +# (C) Pywikibot team, 2008-2018 # # Distributed under the terms of the MIT license. # @@ -719,6 +719,7 @@ class GeneratorFactory(object): cats = self.site.siteinfo.get('linter') # Get linter categories. valid_cats = [c for _list in cats.values() for c in _list] + value = '' if value is None else value cat, sep, lint_from = value.partition('/') if not lint_from: lint_from = None
Remove panic from get_block All uses of get_block internally use the version that returns the result. This way, there can be no panics if the block is not found, but the appropriate handling of the error will occur.
@@ -172,7 +172,7 @@ impl SyncBlockPublisher { if self.is_building_block(state) { previous_block = state .get_previous_block_id() - .map(|block_id| self.get_block_checked(block_id.as_str()).ok()) + .map(|block_id| self.get_block(block_id.as_str()).ok()) .unwrap_or(None); self.cancel_block(state); } @@ -362,7 +362,7 @@ impl SyncBlockPublisher { } } - fn get_block_checked(&self, block_id: &str) -> Result<Block, BlockPublisherError> { + fn get_block(&self, block_id: &str) -> Result<Block, BlockPublisherError> { self.block_manager .get(&[block_id]) .next() @@ -370,19 +370,22 @@ impl SyncBlockPublisher { .ok_or_else(|| BlockPublisherError::UnknownBlock(block_id.to_string())) } - fn get_block(&self, block_id: &str) -> Block { - self.get_block_checked(block_id) - .expect("Unable to extract BlockWrapper") - } - fn restart_block(&self, state: &mut BlockPublisherState) { - if let Some(previous_block) = state + match state .get_previous_block_id() .map(|previous_block_id| self.get_block(previous_block_id.as_str())) { - state.candidate_block = None; - self.initialize_block(state, &previous_block) - .expect("Initialization failed unexpectedly"); + Some(Ok(previous_block)) => { + self.cancel_block(state); + + if let Err(err) = self.initialize_block(state, &previous_block) { + error!("Initialization failed unexpectedly: {:?}", err); + } + } + Some(Err(err)) => { + error!("Unable to read previous block on restart: {:?}", err); + } + None => (), } }
Update nicinfo only when mac match is found Fix error of initializing NicInfo when upper_nic is not set
@@ -333,9 +333,9 @@ class Nics(InitializableMixin): upper_nic_mac = ip.get_mac(nic_name) if upper_nic_mac == lower_nic_mac: upper_nic = nic_name - break nic_info = NicInfo(upper_nic, lower_nic, pci_slot) self.append(nic_info) + break # Collects NIC info for any unpaired NICS for nic_name in [
Update vr_logging.py Fixed small variable name bug in vr logging.pu
@@ -354,7 +354,7 @@ class VRLogWriter(): torso_data_list = [is_valid] torso_data_list.extend(list(torso_trans)) torso_data_list.extend(list(torso_rot)) - self.data_map['vr']['vr_device_data']['torso_tracker'][self.frame_counter, ...] = np.array(torso_data) + self.data_map['vr']['vr_device_data']['torso_tracker'][self.frame_counter, ...] = np.array(torso_data_list) vr_pos_data = [] vr_pos_data.extend(list(s.get_vr_pos()))
Fixed bug Now taking the most immediate subclasses of an invalid superclass for categorizing.
@@ -63,7 +63,10 @@ class Transformer(object): def categorize(self, ignore:List[str]=None): """ Attempts to find node categories and edge labels by following - subclass_of paths within the graph. + subclass_of paths within the graph. If a superclass is being ignored + then all of its children subclasses will be used unless they are also + being ignored. You can use the ignore feature in this way to get more + refined categories. Parameters ---------- @@ -74,21 +77,46 @@ class Transformer(object): if ignore is None: ignore = IGNORE_CLASSSES - superclasses = [] + superclasses = set() + with click.progressbar(self.graph.nodes(data='name'), label='Finding superclasses') as bar: for n, name in bar: if name is None: continue - c = bmt.get_class(name) - if c is not None: - superclasses.append(n) - else: + in_degree = sum(1 for _, _, edge_label in self.graph.in_edges(n, data='edge_label') if edge_label == 'subclass_of') out_degree = sum(1 for _, _, edge_label in self.graph.out_edges(n, data='edge_label') if edge_label == 'subclass_of') if out_degree == 0 and in_degree > 0: superclasses.append(n) - with click.progressbar(superclasses, label='Categorizing subclasses') as bar: + c = bmt.get_class(name) + if c is not None: + superclasses.add(n) + + c = bmt.get_by_mapping(n) + if c is not None: + superclasses.add(n) + + def get_valid_superclasses(superclasses): + """ + Returns a list of the most immediate valid subclasses of the given + superclasses (which will be a given superclass if it is valid). + """ + result = set() + for superclass in superclasses: + name = self.graph.node[superclass].get('name') + is_invalid = name is None or name in ignore + if is_invalid: + for subclass, edge_label in self.graph.in_nodes(data='edge_label'): + if edge_label == 'subclass_of': + result.update(get_valid_superclasses([subclass])) + else: + result.add(superclass) + return result + + superclasses = get_valid_superclasses(superclasses) + + with click.progressbar(superclasses, label='Categorizing nodes') as bar: for superclass in bar: name = self.graph.node[superclass].get('name') if name is None or name in ignore: @@ -103,7 +131,7 @@ class Transformer(object): memo = {} # Starts with each uncategorized ge and finds a superclass - with click.progressbar(self.graph.edges(data=True), label='categorizing edges') as bar: + with click.progressbar(self.graph.edges(data=True), label='Categorizing edges') as bar: for u, v, data in bar: if data.get('edge_label') is None or data['edge_label'] == 'related_to': relation = data.get('relation')
Resolve DeprecationWarning DeprecationWarning: Please use assertRaisesRegex instead.
@@ -99,17 +99,17 @@ class CasePropertyValidationTests(SimpleTestCase): CaseProperty.wrap({"case_property": "foo"}) def test_blank_case_property(self): - with self.assertRaisesRegexp(BadValueError, "Value cannot be blank."): + with self.assertRaisesRegex(BadValueError, "Value cannot be blank."): CaseProperty.wrap({"case_property": ""}) def test_missing_case_property(self): case_property = CaseProperty.wrap({}) - with self.assertRaisesRegexp(BadValueError, "Property case_property is required."): + with self.assertRaisesRegex(BadValueError, "Property case_property is required."): case_property.validate() def test_null_case_property(self): case_property = CaseProperty.wrap({"case_property": None}) - with self.assertRaisesRegexp(BadValueError, "Property case_property is required."): + with self.assertRaisesRegex(BadValueError, "Property case_property is required."): case_property.validate()
fix: ca reinstall test if still needed with latest rpi os
@@ -5,13 +5,13 @@ set -ex # Re-Install ARM/Raspberry Pi ca-certifcates # Which otherwise cause SSL Certificate Verification problems. -if $(arch | grep -q arm) -then - echo "Re-Installing ca-certifcates on Raspberry Pi / ARM CPU" - sudo apt-get remove -y ca-certificates - sudo apt-get update - sudo apt-get install -y ca-certificates -fi +# if $(arch | grep -q arm) +# then +# echo "Re-Installing ca-certifcates on Raspberry Pi / ARM CPU" +# sudo apt-get remove -y ca-certificates +# sudo apt-get update +# sudo apt-get install -y ca-certificates +# fi python3 -m peerjs.ext.http-proxy & python3 -m ambianic
[client] add comment about os.path.islink() in fs.islink() Add information about why and on where stdlib is broken.
@@ -180,6 +180,10 @@ if sys.platform == 'win32': The stdlib is broken. https://msdn.microsoft.com/library/windows/desktop/aa365682.aspx + os.path.islink() always returns false on WindowsNT/95 and OS/2 in python2, + https://github.com/python/cpython/blob/2.7/Lib/ntpath.py#L220 + and also for Windows prior to 6.0 in python3. + https://github.com/python/cpython/blob/3.9/Lib/ntpath.py#L228 """ res = GetFileAttributesW(extend(path)) if res == INVALID_FILE_ATTRIBUTES:
source: df: Allow for no_script execution Fixes:
@@ -47,6 +47,9 @@ class DataFlowSourceConfig: "results of desired record on a call to record()", default=False, ) + no_strict: bool = field( + "Do not exit on operation exceptions, just log errors", default=False, + ) orchestrator: BaseOrchestrator = MemoryOrchestrator.withconfig({}) @@ -130,7 +133,8 @@ class DataFlowSourceContext(BaseSourceContext): { RecordInputSetContext(record): await self.input_set(record) async for record in [self.sctx.record(key)] - } + }, + strict=not self.parent.config.no_strict, ): if result: ctx.record.evaluated(result) @@ -141,7 +145,8 @@ class DataFlowSourceContext(BaseSourceContext): { RecordInputSetContext(record): await self.input_set(record) async for record in self.sctx.records() - } + }, + strict=not self.parent.config.no_strict, ): if result: ctx.record.evaluated(result)
package.base: dynamic_getattr_dict(): drop raising ignored exceptions Since we don't catch generic Exceptions anymore we don't need to check for ignored exceptions as they should be raised by default since they aren't caught.
@@ -10,7 +10,7 @@ Right now, doesn't provide much, need to change that down the line __all__ = ("base", "wrapper", "dynamic_getattr_dict") from snakeoil import klass -from snakeoil.compatibility import cmp, IGNORED_EXCEPTIONS +from snakeoil.compatibility import cmp from pkgcore import exceptions as base_errors from pkgcore.operations import format @@ -89,8 +89,6 @@ def dynamic_getattr_dict(self, attr): val = functor(self) object.__setattr__(self, attr, val) return val - except IGNORED_EXCEPTIONS: - raise except errors.MetadataException as e: if e.attr == attr: raise
Replace default strings ' ' with ... This fixes a problem with pytype.
@@ -238,7 +238,7 @@ class str(Sequence[str]): def capitalize(self) -> str: ... def casefold(self) -> str: ... - def center(self, width: int, fillchar: str = ' ') -> str: ... + def center(self, width: int, fillchar: str = ...) -> str: ... def count(self, x: str, __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ... def encode(self, encoding: str = 'utf-8', errors: str = 'strict') -> bytes: ... def endswith(self, suffix: Union[str, Tuple[str, ...]], start: Optional[int] = None, @@ -260,14 +260,14 @@ class str(Sequence[str]): def istitle(self) -> bool: ... def isupper(self) -> bool: ... def join(self, iterable: Iterable[str]) -> str: ... - def ljust(self, width: int, fillchar: str = ' ') -> str: ... + def ljust(self, width: int, fillchar: str = ...) -> str: ... def lower(self) -> str: ... def lstrip(self, chars: Optional[str] = None) -> str: ... def partition(self, sep: str) -> Tuple[str, str, str]: ... def replace(self, old: str, new: str, count: int = -1) -> str: ... def rfind(self, sub: str, __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ... def rindex(self, sub: str, __start: Optional[int] = ..., __end: Optional[int] = ...) -> int: ... - def rjust(self, width: int, fillchar: str = ' ') -> str: ... + def rjust(self, width: int, fillchar: str = ...) -> str: ... def rpartition(self, sep: str) -> Tuple[str, str, str]: ... def rsplit(self, sep: Optional[str] = None, maxsplit: int = -1) -> List[str]: ... def rstrip(self, chars: Optional[str] = None) -> str: ... @@ -782,7 +782,7 @@ def filter(function: Optional[Callable[[_T], Any]], iterable: Iterable[_T]) -> Iterator[_T]: ... @overload def filter(function: None, iterable: Iterable[Optional[_T]]) -> Iterator[_T]: ... -def format(o: object, format_spec: str = '') -> str: ... +def format(o: object, format_spec: str = ...) -> str: ... def getattr(o: Any, name: str, default: Any = ...) -> Any: ... def globals() -> Dict[str, Any]: ... def hasattr(o: Any, name: str) -> bool: ... @@ -834,7 +834,7 @@ else: def ord(c: Union[str, bytes, bytearray]) -> int: ... # TODO: in Python 3.2, print() does not support flush -def print(*values: Any, sep: str = ' ', end: str = '\n', file: Optional[IO[str]] = None, flush: bool = False) -> None: ... +def print(*values: Any, sep: str = ..., end: str = ..., file: Optional[IO[str]] = None, flush: bool = False) -> None: ... @overload def pow(x: int, y: int) -> Any: ... # The return type can be int or float, depending on y @overload
Refactor w to frequency We have established the use of frequency instead of w to these arguments.
@@ -36,32 +36,32 @@ bokeh_colors = bp.RdGy[11] class _Coefficient: - def __init__(self, coefficient, w=None, interpolated=None): + def __init__(self, coefficient, frequency=None, interpolated=None): if isinstance(coefficient, (int, float)): - if w is not None and type(w) != float: - coefficient = [coefficient for _ in range(len(w))] + if frequency is not None and type(frequency) != float: + coefficient = [coefficient for _ in range(len(frequency))] else: coefficient = [coefficient] self.coefficient = coefficient - self.w = w + self.frequency = frequency if len(self.coefficient) > 1: try: with warnings.catch_warnings(): warnings.simplefilter("ignore") self.interpolated = interpolate.UnivariateSpline( - self.w, self.coefficient + self.frequency, self.coefficient ) # dfitpack.error is not exposed by scipy # so a bare except is used except: try: - if len(self.w) in (2, 3): + if len(self.frequency) in (2, 3): self.interpolated = interpolate.interp1d( - self.w, + self.frequency, self.coefficient, - kind=len(self.w) - 1, + kind=len(self.frequency) - 1, fill_value="extrapolate", ) except: @@ -84,11 +84,14 @@ class _Coefficient: coef.append("{:.2e}".format(i)) return f"{coef}" + # def __getitem__(self, item): + # pass + def plot(self, ax=None, **kwargs): if ax is None: ax = plt.gca() - w_range = np.linspace(min(self.w), max(self.w), 30) + w_range = np.linspace(min(self.frequency), max(self.frequency), 30) ax.plot(w_range, self.interpolated(w_range), **kwargs) ax.set_xlabel("Speed (rad/s)") @@ -212,7 +215,7 @@ class BearingElement(Element): for arg in args: if arg[0] == "k": coefficients[arg] = _Stiffness_Coefficient( - coefficient=args_dict[arg], w=args_dict["frequency"] + coefficient=args_dict[arg], frequency=args_dict["frequency"] ) else: coefficients[arg] = _Damping_Coefficient( @@ -1700,7 +1703,7 @@ class BearingElement6DoF(BearingElement): for arg in new_args: if arg[0] == "k": coefficients[arg] = _Stiffness_Coefficient( - coefficient=args_dict[arg], w=None + coefficient=args_dict[arg], frequency=None ) else: coefficients[arg] = _Damping_Coefficient(args_dict[arg], None)
Update Jenkinsfile Reduce tests
@@ -23,11 +23,7 @@ pipeline { withPythonEnv('/home/jenkins/allvenvs/') { sh 'COVERAGE_FILE=report/cov/coverage1 mpirun -n 1 coverage run --source=heat --parallel-mode -m pytest --junitxml=report/test/report1.xml heat/' sh 'COVERAGE_FILE=report/cov/coverage2 mpirun -n 2 coverage run --source=heat --parallel-mode -m pytest --junitxml=report/test/report2.xml heat/' - sh 'COVERAGE_FILE=report/cov/coverage3 mpirun -n 3 coverage run --source=heat --parallel-mode -m pytest --junitxml=report/test/report3.xml heat/' - sh 'COVERAGE_FILE=report/cov/coverage4 mpirun -n 4 coverage run --source=heat --parallel-mode -m pytest --junitxml=report/test/report4.xml heat/' sh 'COVERAGE_FILE=report/cov/coverage5 mpirun -n 5 coverage run --source=heat --parallel-mode -m pytest --junitxml=report/test/report5.xml heat/' - sh 'COVERAGE_FILE=report/cov/coverage6 mpirun -n 6 coverage run --source=heat --parallel-mode -m pytest --junitxml=report/test/report6.xml heat/' - sh 'COVERAGE_FILE=report/cov/coverage7 mpirun -n 7 coverage run --source=heat --parallel-mode -m pytest --junitxml=report/test/report7.xml heat/' sh 'COVERAGE_FILE=report/cov/coverage8 mpirun -n 8 coverage run --source=heat --parallel-mode -m pytest --junitxml=report/test/report8.xml heat/' } }
Update ursnif.txt Minus what we already have.
@@ -3445,6 +3445,19 @@ l4fnses.com gstat.coneybucks.com +# Reference: https://twitter.com/reecdeep/status/1283675503552008192 + +gstat.secundato.com +gstat.secundamo.com +gstat.premiamo.com +gstat.securezzas.com +gstat.securanto.com +gstat.secundato.net +gstat.securezzis.net +gstat.securanto.net +gstat.premiamo.eu +gstat.securezal.xyz + # Generic trails /a.aspx?redir=1&clientUuid=
Fix code generation for ".domain" when there is no default logic binder TN:
@@ -283,12 +283,18 @@ class DomainExpr(ComputingExpr): static_type = T.EquationType def __init__(self, domain, logic_var_expr, abstract_expr=None): + from langkit.compile_context import get_context + self.domain = domain ":type: ResolvedExpression" self.logic_var_expr = logic_var_expr ":type: ResolvedExpression" + # Generated code relies on the instantiation of a logic binder package + # for the default case (no convertion nor equality properties). + get_context().do_generate_logic_binder() + super(DomainExpr, self).__init__('Domain_Equation', abstract_expr=abstract_expr)
GafferUI.showURL : Use "xdg-open" on linux (if available) This avoids a problem whereby the current release of PySide2 totally omits the binding for QDesktopServices.
import os import sys +import distutils.spawn import GafferUI @@ -45,7 +46,13 @@ QtGui = GafferUI._qtImport( "QtGui" ) def showURL( url ) : + opener = None if sys.platform == "darwin" : - os.system( "open \"" + url + "\"" ) + opener = "open" + elif "linux" in sys.platform : + opener = distutils.spawn.find_executable( "xdg-open" ) + + if opener : + os.system( "{0} \"{1}\"".format( opener, url ) ) else : QtGui.QDesktopServices.openUrl( QtCore.QUrl( url, QtCore.QUrl.TolerantMode ) )
Update test_dbscan.py added max_bytes_per_batch variable in the code
@@ -41,6 +41,7 @@ dataset_names = ['noisy_moons', 'varied', 'aniso', 'blobs', 'noisy_circles', 'no_structure'] [email protected]('max_bytes_per_batch',[1e9, 5e9]) @pytest.mark.parametrize('datatype', [np.float32, np.float64]) @pytest.mark.parametrize('input_type', ['ndarray']) @pytest.mark.parametrize('use_handle', [True, False]) @@ -49,7 +50,7 @@ dataset_names = ['noisy_moons', 'varied', 'aniso', 'blobs', @pytest.mark.parametrize('ncols', [unit_param(3), quality_param(100), stress_param(1000)]) def test_dbscan(datatype, input_type, use_handle, - nrows, ncols): + nrows, ncols, max_bytes_per_batch): n_samples = nrows n_feats = ncols X, y = make_blobs(n_samples=n_samples, @@ -57,7 +58,7 @@ def test_dbscan(datatype, input_type, use_handle, handle, stream = get_handle(use_handle) cudbscan = cuDBSCAN(handle=handle, eps=0.5, min_samples=2, - ) + max_bytes_per_batch=max_bytes_per_batch) if input_type == 'dataframe': X = pd.DataFrame(
ebd/ebuild-daemon.bash: add quotes around unknown coms/lines To make empty data vs whitespace vs garbage easier to be identified.
@@ -263,7 +263,7 @@ __ebd_process_ebuild_phases() { __ebd_write_line "yep!" ;; *) - echo "received unknown com during phase processing: ${line}" >&2 + echo "received unknown com during phase processing: \"${line}\"" >&2 exit 1 ;; esac @@ -407,7 +407,7 @@ __ebd_main_loop() { __ebd_write_line "yep!" ;; *) - echo "received unknown com: ${com}" >&2 + echo "received unknown com: \"${com}\"" >&2 exit 1 ;; esac
[PR tweaks per review Simplify the Py2 check for an existing-but-old cachedir by using any and a generator expression. Move an import to module scope.
@@ -35,6 +35,7 @@ import sys import SCons.Action import SCons.Warnings +from SCons.Util import PY3 cache_enabled = True cache_debug = False @@ -154,7 +155,6 @@ class CacheDir(object): if path is None: return - from SCons.Util import PY3 if PY3: self._readconfig3(path) else: @@ -204,9 +204,9 @@ class CacheDir(object): """ Python2 version of reading cache config. - See if there's a config file in the cache directory. If there is, + See if there is a config file in the cache directory. If there is, use it. If there isn't, and the directory exists and isn't empty, - produce a warning. If the directory doesn't exist or is empty, + produce a warning. If the directory does not exist or is empty, write a config file. :param path: path to the cache directory @@ -215,14 +215,13 @@ class CacheDir(object): if not os.path.exists(config_file): # A note: There is a race hazard here, if two processes start and # attempt to create the cache directory at the same time. However, - # python doesn't really give you the option to do exclusive file - # creation (it doesn't even give you the option to error on opening - # an existing file for writing...). The ordering of events here - # as an attempt to alleviate this, on the basis that it's a pretty - # unlikely occurence (it'd require two builds with a brand new cache + # Python 2.x does not give you the option to do exclusive file + # creation (not even the option to error on opening ad existing + # file for writing...). The ordering of events here as an attempt + # to alleviate this, on the basis that it's a pretty unlikely + # occurence (would require two builds with a brand new cache # directory) - #if os.path.isdir(path) and len(os.listdir(path)) != 0: - if os.path.isdir(path) and len([f for f in os.listdir(path) if os.path.basename(f) != "config"]) != 0: + if os.path.isdir(path) and any(f != "config" for f in os.listdir(path)): self.config['prefix_len'] = 1 # When building the project I was testing this on, the warning # was output over 20 times. That seems excessive
(antocuni) Cherry-pick commit from PR Overwrite stub file for universal ABI extension Added a comment to the template that the file is autogenerated
@@ -100,7 +100,11 @@ def handle_hpy_ext_modules(dist, attr, hpy_ext_modules): _HPY_UNIVERSAL_MODULE_STUB_TEMPLATE = """ +# DO NOT EDIT THIS FILE! +# This file is automatically generated by hpy + def __bootstrap__(): + import sys, pkg_resources from hpy.universal import load ext_filepath = pkg_resources.resource_filename(__name__, {ext_file!r}) @@ -258,8 +262,7 @@ class build_hpy_ext_mixin: log.info( "writing hpy universal stub loader for %s to %s", ext._full_name, stub_file) - if compile and os.path.exists(stub_file): - raise DistutilsError(stub_file + " already exists! Please delete.") + ext_file = os.path.basename(ext._file_name) module_name = ext_file.split(".")[0] if not self.dry_run:
Fix linter error and add comments: storing info in validator's own db
@@ -75,6 +75,7 @@ class Validator(BaseService): self.peer_pool = peer_pool self.privkey = privkey self.event_bus = event_bus + # TODO: `latest_proposed_epoch` should be written into/read from validator's own db self.latest_proposed_epoch = genesis_epoch self.slots_per_epoch = slots_per_epoch @@ -127,6 +128,9 @@ class Validator(BaseService): state: BeaconState, state_machine: BaseBeaconStateMachine, head_block: BaseBeaconBlock) -> BaseBeaconBlock: + # TODO: Proposed block should be written into validator's own db. + # Before proposing, validator should check it's own db if block has + # been proposed for this epoch. block = self._make_proposing_block(slot, state, state_machine, head_block) self.logger.debug( bold_green(f"proposing block, block={block}")
Removed incident tbd-tbd-2 It is identical to incident wa-seattle-12.
@@ -10,15 +10,3 @@ id: tbd-tbd-1 * https://twitter.com/perfectlyg0lden/status/1267014293628870656 - -### Officer beats a protestor while pinning him on the ground | - -At the beginning of this video, an officer can be seen punching a protestor in the head while pinning him to the ground. - -tags: punch, tackle - -id: tbd-tbd-2 - -**Links** - -* https://twitter.com/samjwc/status/1267355060666654720
Simplify travis build Should speed up build times as well.
@@ -4,17 +4,11 @@ env: HYPOTHESIS_PROFILE=ci matrix: include: - python: "3.6" - env: TEST_TYPE=check - - python: "3.6" - env: TEST_TYPE=typecheck - - python: "3.6" - env: TEST_TYPE=coverage - - python: "2.7" - env: TEST_TYPE=coverage + env: TEST_TYPE=prcheck - python: "2.7" - env: TEST_TYPE=check + env: TEST_TYPE=prcheck-py2 install: - - pip install -r requirements-dev.txt + - pip install -r requirements-dev.txt -r requirements-docs.txt - pip install -e . script: - make $TEST_TYPE
Add auto_requires and auto_optional for components. new_component_type uses auto_requires and auto_optional to extend the requires and optional specs for decorators it produces. These decorators give their components automatic dependencies that extend whatever is specified when they're used.
@@ -418,7 +418,12 @@ def register_component( register_consumer(component) -def new_component_type(name=None, shared=True, cluster=False, emitter=False): +def new_component_type(name=None, + auto_requires=[], + auto_optional=[], + shared=True, + cluster=False, + emitter=False): """ Factory that creates component decorators. @@ -426,17 +431,29 @@ def new_component_type(name=None, shared=True, cluster=False, emitter=False): rules, cluster rules, etc. They don't yet define mappers or cluster_mappers. Args: - name (str): the name of the type of component this decorator will define - shared (bool): should the component be used outside its defining module - cluster (bool): should the component run for multi-node archives - emitter (bool): the components returns make_response(...) + name (str): the name of the component type the produced decorator + will define + auto_requires (list): All decorated components automatically have + this requires spec. Anything specified when decorating a component + is added to this spec. + auto_optional (list): All decorated components automatically have + this optional spec. Anything specified when decorating a component + is added to this spec. + shared (bool): the component should be used outside its defining module? + cluster (bool): the component should be run for multi-node archives? + emitter (bool): the components returns make_response(...)? Returns: - A decorator function for a given type of component + A decorator function used to define components of the new type """ def decorator(requires=None, optional=None): is_shared = shared + requires = requires or [] + optional = optional or [] + + requires.extend(auto_requires) + optional.extend(auto_optional) def _f(func): @wraps(func)
TST: updated data variable unit test Updated data variable unit test to include four data variables.
@@ -433,13 +433,16 @@ class TestBasics(): self.testInst.load(self.ref_time.year, self.ref_doy) # Ensure the desired data variable is present and delete all others - self.testInst.data = self.testInst.data[['mlt']] + # 4-6 variables are needed to test all lines; choose the lesser limit + nvar = 4 + self.testInst.data = self.testInst.data[self.testInst.variables[:nvar]] # Test output with one data variable self.out = self.testInst.__str__() - assert self.out.find('Number of variables:') > 0 + assert self.out.find('Number of variables: 4') > 0 assert self.out.find('Variable Names') > 0 - assert self.out.find('mlt') > 0 + for n in range(nvar): + assert self.out.find(self.testInst.variables[n]) > 0 # ------------------------------------------------------------------------- #
llvm: Query and use local CPU features and name. Enables AVX instructions on CPUs that support them. No bservable performance improvement. (few % degradation on skylake + llvm3.9)
@@ -159,10 +159,12 @@ binding.initialize_native_target() # but why? binding.initialize_native_asmprinter() +__features = binding.get_host_cpu_features().flatten() +__cpu_name = binding.get_host_cpu_name() # Create compilation target, use default triple __target = binding.Target.from_default_triple() -__target_machine = __target.create_target_machine(opt=3) +__target_machine = __target.create_target_machine(cpu = __cpu_name, features = __features, opt = 3) # And an execution engine with an empty backing module # TODO: why is empty backing mod necessary?
discoveryplus: fix detecting seasons fixes:
@@ -150,7 +150,7 @@ class Dplay(Service): showid = None for what in res.json()["included"]: - if "attributes" in what and "alias" in what["attributes"] and "season" in what["attributes"]["alias"]: + if "attributes" in what and "alias" in what["attributes"] and "grid" in what["attributes"]["alias"]: programid = what["id"] for ses in what["attributes"]["component"]["filters"]: if ses["id"] == "seasonNumber":
ebuild.ebd_ipc: try to follow the old behavior for dosym/dohard Mostly trying to approximate what `ln -snfT` would result in.
@@ -639,20 +639,22 @@ class _Symlink(_InstallWrapper): arg_parser.add_argument('target') def run(self, args): - source = pjoin(self.op.ED, args.source.lstrip(os.path.sep)) - target = pjoin(self.op.ED, args.target.lstrip(os.path.sep)) - dest_dir = args.target.rsplit(os.path.sep, 1)[0] + if dest_dir != args.target: self.install_dirs([dest_dir]) + target = pjoin(self.op.ED, args.target.lstrip(os.path.sep)) + with chdir(self.op.ED): try: try: - self._link(source, target) + self._link(args.source, target) except FileExistsError: + # overwrite target if it exists os.unlink(target) - self._link(source, target) + self._link(args.source, target) except OSError as e: - raise IpcCommandError(f'failed creating link: {target!r}: {e.strerror}') + raise IpcCommandError( + f'failed creating link: {args.source!r} -> {args.target!r}: {e.strerror}') class Dosym(_Symlink):