problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_12820 | rasdani/github-patches | git_diff | ESMCI__cime-2508 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
settings from env_test are not being reapplied to tests
It seems that some recent change causes the settings from env_test.xml not to be applied when a test is resubmitted. This is supposed to be tested in scripts_regression_tests so a second question is - why is that test not failing?
</issue>
<code>
[start of scripts/lib/CIME/case/case_submit.py]
1 #!/usr/bin/env python
2
3 """
4 case.submit - Submit a cesm workflow to the queueing system or run it
5 if there is no queueing system. A cesm workflow may include multiple
6 jobs.
7 submit, check_case and check_da_settings are members of class Case in file case.py
8 """
9 import socket
10 from CIME.XML.standard_module_setup import *
11 from CIME.utils import expect, run_and_log_case_status, verbatim_success_msg
12 from CIME.locked_files import unlock_file, lock_file
13 from CIME.test_status import *
14
15 logger = logging.getLogger(__name__)
16
17 def _submit(case, job=None, no_batch=False, prereq=None, resubmit=False,
18 skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):
19 if job is None:
20 job = case.get_primary_job()
21
22 rundir = case.get_value("RUNDIR")
23 continue_run = case.get_value("CONTINUE_RUN")
24 expect(os.path.isdir(rundir) or not continue_run,
25 " CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir))
26
27 # if case.submit is called with the no_batch flag then we assume that this
28 # flag will stay in effect for the duration of the RESUBMITs
29 env_batch = case.get_env("batch")
30 if resubmit:
31 if env_batch.get_batch_system_type() == "none":
32 no_batch = True
33
34 # This is a resubmission, do not reinitialize test values
35 if job == "case.test":
36 case.set_value("IS_FIRST_RUN", False)
37
38 resub = case.get_value("RESUBMIT")
39 logger.info("Submitting job '{}', resubmit={:d}".format(job, resub))
40 case.set_value("RESUBMIT", resub-1)
41 if case.get_value("RESUBMIT_SETS_CONTINUE_RUN"):
42 case.set_value("CONTINUE_RUN", True)
43
44 else:
45 if job == "case.test":
46 case.set_value("IS_FIRST_RUN", True)
47
48 if no_batch:
49 batch_system = "none"
50 else:
51 batch_system = env_batch.get_batch_system_type()
52
53 case.set_value("BATCH_SYSTEM", batch_system)
54
55 env_batch_has_changed = False
56 try:
57 case.check_lockedfile(os.path.basename(env_batch.filename))
58 except SystemExit:
59 env_batch_has_changed = True
60
61 if env_batch.get_batch_system_type() != "none" and env_batch_has_changed:
62 # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)
63 logger.warning(\
64 """
65 env_batch.xml appears to have changed, regenerating batch scripts
66 manual edits to these file will be lost!
67 """)
68 env_batch.make_all_batch_files(case)
69
70 unlock_file(os.path.basename(env_batch.filename))
71 lock_file(os.path.basename(env_batch.filename))
72
73 if job == case.get_primary_job():
74 case.check_case()
75 case.check_DA_settings()
76 if case.get_value("MACH") == "mira":
77 with open(".original_host", "w") as fd:
78 fd.write( socket.gethostname())
79
80 #Load Modules
81 case.load_env()
82
83 case.flush()
84
85 logger.warning("submit_jobs {}".format(job))
86 job_ids = case.submit_jobs(no_batch=no_batch, job=job, skip_pnl=skip_pnl,
87 prereq=prereq, mail_user=mail_user,
88 mail_type=mail_type, batch_args=batch_args)
89
90 xml_jobids = []
91 for jobname, jobid in job_ids.items():
92 logger.info("Submitted job {} with id {}".format(jobname, jobid))
93 if jobid:
94 xml_jobids.append("{}:{}".format(jobname, jobid))
95
96 xml_jobid_text = ", ".join(xml_jobids)
97 if xml_jobid_text:
98 case.set_value("JOB_IDS", xml_jobid_text)
99
100 return xml_jobid_text
101
102 def submit(self, job=None, no_batch=False, prereq=None, resubmit=False,
103 skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):
104 if self.get_value("TEST"):
105 caseroot = self.get_value("CASEROOT")
106 casebaseid = self.get_value("CASEBASEID")
107 # This should take care of the race condition where the submitted job
108 # begins immediately and tries to set RUN phase. We proactively assume
109 # a passed SUBMIT phase. If this state is already PASS, don't set it again
110 # because then we'll lose RUN phase info if it's there. This info is important
111 # for system_tests_common to know if it needs to reinitialize the test or not.
112 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
113 phase_status = ts.get_status(SUBMIT_PHASE)
114 if phase_status != TEST_PASS_STATUS:
115 ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)
116
117 try:
118 functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,
119 resubmit=resubmit, skip_pnl=skip_pnl,
120 mail_user=mail_user, mail_type=mail_type,
121 batch_args=batch_args)
122 run_and_log_case_status(functor, "case.submit", caseroot=self.get_value("CASEROOT"),
123 custom_success_msg_functor=verbatim_success_msg)
124 except:
125 # If something failed in the batch system, make sure to mark
126 # the test as failed if we are running a test.
127 if self.get_value("TEST"):
128 with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:
129 ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)
130
131 raise
132
133 def check_case(self):
134 self.check_lockedfiles()
135 self.create_namelists() # Must be called before check_all_input_data
136 logger.info("Checking that inputdata is available as part of case submission")
137 self.check_all_input_data()
138
139 expect(self.get_value("BUILD_COMPLETE"), "Build complete is "
140 "not True please rebuild the model by calling case.build")
141 logger.info("Check case OK")
142
143 def check_DA_settings(self):
144 script = self.get_value("DATA_ASSIMILATION_SCRIPT")
145 cycles = self.get_value("DATA_ASSIMILATION_CYCLES")
146 if len(script) > 0 and os.path.isfile(script) and cycles > 0:
147 logger.info("Data Assimilation enabled using script {} with {:d} cycles".format(script,cycles))
148
[end of scripts/lib/CIME/case/case_submit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/lib/CIME/case/case_submit.py b/scripts/lib/CIME/case/case_submit.py
--- a/scripts/lib/CIME/case/case_submit.py
+++ b/scripts/lib/CIME/case/case_submit.py
@@ -20,9 +20,10 @@
job = case.get_primary_job()
rundir = case.get_value("RUNDIR")
- continue_run = case.get_value("CONTINUE_RUN")
- expect(os.path.isdir(rundir) or not continue_run,
- " CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir))
+ if job != "case.test":
+ continue_run = case.get_value("CONTINUE_RUN")
+ expect(os.path.isdir(rundir) or not continue_run,
+ " CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir))
# if case.submit is called with the no_batch flag then we assume that this
# flag will stay in effect for the duration of the RESUBMITs
| {"golden_diff": "diff --git a/scripts/lib/CIME/case/case_submit.py b/scripts/lib/CIME/case/case_submit.py\n--- a/scripts/lib/CIME/case/case_submit.py\n+++ b/scripts/lib/CIME/case/case_submit.py\n@@ -20,9 +20,10 @@\n job = case.get_primary_job()\n \n rundir = case.get_value(\"RUNDIR\")\n- continue_run = case.get_value(\"CONTINUE_RUN\")\n- expect(os.path.isdir(rundir) or not continue_run,\n- \" CONTINUE_RUN is true but RUNDIR {} does not exist\".format(rundir))\n+ if job != \"case.test\":\n+ continue_run = case.get_value(\"CONTINUE_RUN\")\n+ expect(os.path.isdir(rundir) or not continue_run,\n+ \" CONTINUE_RUN is true but RUNDIR {} does not exist\".format(rundir))\n \n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n", "issue": "settings from env_test are not being reapplied to tests\nIt seems that some recent change causes the settings from env_test.xml not to be applied when a test is resubmitted. This is supposed to be tested in scripts_regression_tests so a second question is - why is that test not failing?\n", "before_files": [{"content": "#!/usr/bin/env python\n\n\"\"\"\ncase.submit - Submit a cesm workflow to the queueing system or run it\nif there is no queueing system. A cesm workflow may include multiple\njobs.\nsubmit, check_case and check_da_settings are members of class Case in file case.py\n\"\"\"\nimport socket\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.utils import expect, run_and_log_case_status, verbatim_success_msg\nfrom CIME.locked_files import unlock_file, lock_file\nfrom CIME.test_status import *\n\nlogger = logging.getLogger(__name__)\n\ndef _submit(case, job=None, no_batch=False, prereq=None, resubmit=False,\n skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):\n if job is None:\n job = case.get_primary_job()\n\n rundir = case.get_value(\"RUNDIR\")\n continue_run = case.get_value(\"CONTINUE_RUN\")\n expect(os.path.isdir(rundir) or not continue_run,\n \" CONTINUE_RUN is true but RUNDIR {} does not exist\".format(rundir))\n\n # if case.submit is called with the no_batch flag then we assume that this\n # flag will stay in effect for the duration of the RESUBMITs\n env_batch = case.get_env(\"batch\")\n if resubmit:\n if env_batch.get_batch_system_type() == \"none\":\n no_batch = True\n\n # This is a resubmission, do not reinitialize test values\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", False)\n\n resub = case.get_value(\"RESUBMIT\")\n logger.info(\"Submitting job '{}', resubmit={:d}\".format(job, resub))\n case.set_value(\"RESUBMIT\", resub-1)\n if case.get_value(\"RESUBMIT_SETS_CONTINUE_RUN\"):\n case.set_value(\"CONTINUE_RUN\", True)\n\n else:\n if job == \"case.test\":\n case.set_value(\"IS_FIRST_RUN\", True)\n\n if no_batch:\n batch_system = \"none\"\n else:\n batch_system = env_batch.get_batch_system_type()\n\n case.set_value(\"BATCH_SYSTEM\", batch_system)\n\n env_batch_has_changed = False\n try:\n case.check_lockedfile(os.path.basename(env_batch.filename))\n except SystemExit:\n env_batch_has_changed = True\n\n if env_batch.get_batch_system_type() != \"none\" and env_batch_has_changed:\n # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc)\n logger.warning(\\\n\"\"\"\nenv_batch.xml appears to have changed, regenerating batch scripts\nmanual edits to these file will be lost!\n\"\"\")\n env_batch.make_all_batch_files(case)\n\n unlock_file(os.path.basename(env_batch.filename))\n lock_file(os.path.basename(env_batch.filename))\n\n if job == case.get_primary_job():\n case.check_case()\n case.check_DA_settings()\n if case.get_value(\"MACH\") == \"mira\":\n with open(\".original_host\", \"w\") as fd:\n fd.write( socket.gethostname())\n\n #Load Modules\n case.load_env()\n\n case.flush()\n\n logger.warning(\"submit_jobs {}\".format(job))\n job_ids = case.submit_jobs(no_batch=no_batch, job=job, skip_pnl=skip_pnl,\n prereq=prereq, mail_user=mail_user,\n mail_type=mail_type, batch_args=batch_args)\n\n xml_jobids = []\n for jobname, jobid in job_ids.items():\n logger.info(\"Submitted job {} with id {}\".format(jobname, jobid))\n if jobid:\n xml_jobids.append(\"{}:{}\".format(jobname, jobid))\n\n xml_jobid_text = \", \".join(xml_jobids)\n if xml_jobid_text:\n case.set_value(\"JOB_IDS\", xml_jobid_text)\n\n return xml_jobid_text\n\ndef submit(self, job=None, no_batch=False, prereq=None, resubmit=False,\n skip_pnl=False, mail_user=None, mail_type=None, batch_args=None):\n if self.get_value(\"TEST\"):\n caseroot = self.get_value(\"CASEROOT\")\n casebaseid = self.get_value(\"CASEBASEID\")\n # This should take care of the race condition where the submitted job\n # begins immediately and tries to set RUN phase. We proactively assume\n # a passed SUBMIT phase. If this state is already PASS, don't set it again\n # because then we'll lose RUN phase info if it's there. This info is important\n # for system_tests_common to know if it needs to reinitialize the test or not.\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n phase_status = ts.get_status(SUBMIT_PHASE)\n if phase_status != TEST_PASS_STATUS:\n ts.set_status(SUBMIT_PHASE, TEST_PASS_STATUS)\n\n try:\n functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq,\n resubmit=resubmit, skip_pnl=skip_pnl,\n mail_user=mail_user, mail_type=mail_type,\n batch_args=batch_args)\n run_and_log_case_status(functor, \"case.submit\", caseroot=self.get_value(\"CASEROOT\"),\n custom_success_msg_functor=verbatim_success_msg)\n except:\n # If something failed in the batch system, make sure to mark\n # the test as failed if we are running a test.\n if self.get_value(\"TEST\"):\n with TestStatus(test_dir=caseroot, test_name=casebaseid) as ts:\n ts.set_status(SUBMIT_PHASE, TEST_FAIL_STATUS)\n\n raise\n\ndef check_case(self):\n self.check_lockedfiles()\n self.create_namelists() # Must be called before check_all_input_data\n logger.info(\"Checking that inputdata is available as part of case submission\")\n self.check_all_input_data()\n\n expect(self.get_value(\"BUILD_COMPLETE\"), \"Build complete is \"\n \"not True please rebuild the model by calling case.build\")\n logger.info(\"Check case OK\")\n\ndef check_DA_settings(self):\n script = self.get_value(\"DATA_ASSIMILATION_SCRIPT\")\n cycles = self.get_value(\"DATA_ASSIMILATION_CYCLES\")\n if len(script) > 0 and os.path.isfile(script) and cycles > 0:\n logger.info(\"Data Assimilation enabled using script {} with {:d} cycles\".format(script,cycles))\n", "path": "scripts/lib/CIME/case/case_submit.py"}]} | 2,357 | 228 |
gh_patches_debug_10696 | rasdani/github-patches | git_diff | Kinto__kinto-1138 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enforce the permission endpoint when the admin plugin is included.
Enforce the permission endpoint when the admin plugin is included.
</issue>
<code>
[start of kinto/__init__.py]
1 import pkg_resources
2 import logging
3
4 import kinto.core
5 from pyramid.config import Configurator
6 from pyramid.settings import asbool
7 from pyramid.security import Authenticated, Everyone
8
9 from kinto.authorization import RouteFactory
10
11
12 # Module version, as defined in PEP-0396.
13 __version__ = pkg_resources.get_distribution(__package__).version
14
15 # Implemented HTTP API Version
16 HTTP_API_VERSION = '1.16'
17
18 # Main kinto logger
19 logger = logging.getLogger(__name__)
20
21
22 DEFAULT_SETTINGS = {
23 'flush_endpoint_enabled': False,
24 'retry_after_seconds': 3,
25 'cache_backend': 'kinto.core.cache.memory',
26 'permission_backend': 'kinto.core.permission.memory',
27 'storage_backend': 'kinto.core.storage.memory',
28 'project_docs': 'https://kinto.readthedocs.io/',
29 'bucket_create_principals': Authenticated,
30 'permissions_read_principals': Everyone,
31 'multiauth.authorization_policy': (
32 'kinto.authorization.AuthorizationPolicy'),
33 'experimental_collection_schema_validation': False,
34 'experimental_permissions_endpoint': False,
35 'http_api_version': HTTP_API_VERSION,
36 'bucket_id_generator': 'kinto.views.NameGenerator',
37 'collection_id_generator': 'kinto.views.NameGenerator',
38 'group_id_generator': 'kinto.views.NameGenerator',
39 'record_id_generator': 'kinto.views.RelaxedUUID'
40 }
41
42
43 def main(global_config, config=None, **settings):
44 if not config:
45 config = Configurator(settings=settings, root_factory=RouteFactory)
46
47 # Force project name, since it determines settings prefix.
48 config.add_settings({'kinto.project_name': 'kinto'})
49
50 kinto.core.initialize(config,
51 version=__version__,
52 default_settings=DEFAULT_SETTINGS)
53
54 settings = config.get_settings()
55
56 # Expose capability
57 schema_enabled = asbool(
58 settings['experimental_collection_schema_validation']
59 )
60 if schema_enabled:
61 config.add_api_capability(
62 "schema",
63 description="Validates collection records with JSON schemas.",
64 url="https://kinto.readthedocs.io/en/latest/api/1.x/"
65 "collections.html#collection-json-schema")
66
67 # Scan Kinto views.
68 kwargs = {}
69
70 flush_enabled = asbool(settings['flush_endpoint_enabled'])
71 if flush_enabled:
72 config.add_api_capability(
73 "flush_endpoint",
74 description="The __flush__ endpoint can be used to remove all "
75 "data from all backends.",
76 url="https://kinto.readthedocs.io/en/latest/configuration/"
77 "settings.html#activating-the-flush-endpoint")
78 else:
79 kwargs['ignore'] = ['kinto.views.flush']
80
81 # Permissions endpoint enabled if permission backend is setup.
82 permissions_endpoint_enabled = (
83 asbool(settings['experimental_permissions_endpoint']) and
84 hasattr(config.registry, 'permission'))
85 if permissions_endpoint_enabled:
86 config.add_api_capability(
87 "permissions_endpoint",
88 description="The permissions endpoint can be used to list all "
89 "user objects permissions.",
90 url="https://kinto.readthedocs.io/en/latest/configuration/"
91 "settings.html#activating-the-permissions-endpoint")
92 else:
93 kwargs.setdefault('ignore', []).append('kinto.views.permissions')
94
95 config.scan("kinto.views", **kwargs)
96
97 app = config.make_wsgi_app()
98
99 # Install middleware (no-op if disabled)
100 return kinto.core.install_middlewares(app, settings)
101
[end of kinto/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/__init__.py b/kinto/__init__.py
--- a/kinto/__init__.py
+++ b/kinto/__init__.py
@@ -79,8 +79,9 @@
kwargs['ignore'] = ['kinto.views.flush']
# Permissions endpoint enabled if permission backend is setup.
+ is_admin_enabled = 'kinto.plugins.admin' in settings['includes']
permissions_endpoint_enabled = (
- asbool(settings['experimental_permissions_endpoint']) and
+ (is_admin_enabled or asbool(settings['experimental_permissions_endpoint'])) and
hasattr(config.registry, 'permission'))
if permissions_endpoint_enabled:
config.add_api_capability(
| {"golden_diff": "diff --git a/kinto/__init__.py b/kinto/__init__.py\n--- a/kinto/__init__.py\n+++ b/kinto/__init__.py\n@@ -79,8 +79,9 @@\n kwargs['ignore'] = ['kinto.views.flush']\n \n # Permissions endpoint enabled if permission backend is setup.\n+ is_admin_enabled = 'kinto.plugins.admin' in settings['includes']\n permissions_endpoint_enabled = (\n- asbool(settings['experimental_permissions_endpoint']) and\n+ (is_admin_enabled or asbool(settings['experimental_permissions_endpoint'])) and\n hasattr(config.registry, 'permission'))\n if permissions_endpoint_enabled:\n config.add_api_capability(\n", "issue": "Enforce the permission endpoint when the admin plugin is included.\n\nEnforce the permission endpoint when the admin plugin is included.\n\n", "before_files": [{"content": "import pkg_resources\nimport logging\n\nimport kinto.core\nfrom pyramid.config import Configurator\nfrom pyramid.settings import asbool\nfrom pyramid.security import Authenticated, Everyone\n\nfrom kinto.authorization import RouteFactory\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution(__package__).version\n\n# Implemented HTTP API Version\nHTTP_API_VERSION = '1.16'\n\n# Main kinto logger\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_SETTINGS = {\n 'flush_endpoint_enabled': False,\n 'retry_after_seconds': 3,\n 'cache_backend': 'kinto.core.cache.memory',\n 'permission_backend': 'kinto.core.permission.memory',\n 'storage_backend': 'kinto.core.storage.memory',\n 'project_docs': 'https://kinto.readthedocs.io/',\n 'bucket_create_principals': Authenticated,\n 'permissions_read_principals': Everyone,\n 'multiauth.authorization_policy': (\n 'kinto.authorization.AuthorizationPolicy'),\n 'experimental_collection_schema_validation': False,\n 'experimental_permissions_endpoint': False,\n 'http_api_version': HTTP_API_VERSION,\n 'bucket_id_generator': 'kinto.views.NameGenerator',\n 'collection_id_generator': 'kinto.views.NameGenerator',\n 'group_id_generator': 'kinto.views.NameGenerator',\n 'record_id_generator': 'kinto.views.RelaxedUUID'\n}\n\n\ndef main(global_config, config=None, **settings):\n if not config:\n config = Configurator(settings=settings, root_factory=RouteFactory)\n\n # Force project name, since it determines settings prefix.\n config.add_settings({'kinto.project_name': 'kinto'})\n\n kinto.core.initialize(config,\n version=__version__,\n default_settings=DEFAULT_SETTINGS)\n\n settings = config.get_settings()\n\n # Expose capability\n schema_enabled = asbool(\n settings['experimental_collection_schema_validation']\n )\n if schema_enabled:\n config.add_api_capability(\n \"schema\",\n description=\"Validates collection records with JSON schemas.\",\n url=\"https://kinto.readthedocs.io/en/latest/api/1.x/\"\n \"collections.html#collection-json-schema\")\n\n # Scan Kinto views.\n kwargs = {}\n\n flush_enabled = asbool(settings['flush_endpoint_enabled'])\n if flush_enabled:\n config.add_api_capability(\n \"flush_endpoint\",\n description=\"The __flush__ endpoint can be used to remove all \"\n \"data from all backends.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-flush-endpoint\")\n else:\n kwargs['ignore'] = ['kinto.views.flush']\n\n # Permissions endpoint enabled if permission backend is setup.\n permissions_endpoint_enabled = (\n asbool(settings['experimental_permissions_endpoint']) and\n hasattr(config.registry, 'permission'))\n if permissions_endpoint_enabled:\n config.add_api_capability(\n \"permissions_endpoint\",\n description=\"The permissions endpoint can be used to list all \"\n \"user objects permissions.\",\n url=\"https://kinto.readthedocs.io/en/latest/configuration/\"\n \"settings.html#activating-the-permissions-endpoint\")\n else:\n kwargs.setdefault('ignore', []).append('kinto.views.permissions')\n\n config.scan(\"kinto.views\", **kwargs)\n\n app = config.make_wsgi_app()\n\n # Install middleware (no-op if disabled)\n return kinto.core.install_middlewares(app, settings)\n", "path": "kinto/__init__.py"}]} | 1,489 | 144 |
gh_patches_debug_6238 | rasdani/github-patches | git_diff | piskvorky__gensim-2106 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Turn off support of Google Style docstrings
**All docstrings should be refactored first**
To prevent contributors from using Google Style docstrings, we need to set
`napoleon_google_docstring = False`,
[like explained here](https://samnicholls.net/2016/06/15/how-to-sphinx-readthedocs/).
</issue>
<code>
[start of docs/src/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # gensim documentation build configuration file, created by
4 # sphinx-quickstart on Wed Mar 17 13:42:21 2010.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import os
15 import sys
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 sys.path.append(os.path.abspath('.'))
21
22 # -- General configuration -----------------------------------------------------
23
24 html_theme = 'gensim_theme'
25
26 # Add any Sphinx extension module names here, as strings. They can be extensions
27 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
28 extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.imgmath', 'sphinxcontrib.programoutput']
29 autoclass_content = "both"
30
31 # Add any paths that contain templates here, relative to this directory.
32 templates_path = ['_templates']
33
34 # The suffix of source filenames.
35 source_suffix = '.rst'
36
37 # The encoding of source files.
38 # source_encoding = 'utf-8'
39
40 # The master toctree document.
41 master_doc = 'indextoc'
42
43 # Additional templates that should be rendered to pages, maps page names to
44 # template names.
45 html_additional_pages = {'index': './_templates/indexcontent.html'}
46
47 # General information about the project.
48 project = u'gensim'
49 copyright = u'2009-now, Radim Řehůřek <me(at)radimrehurek.com>'
50
51 # The version info for the project you're documenting, acts as replacement for
52 # |version| and |release|, also used in various other places throughout the
53 # built documents.
54 #
55 # The short X.Y version.
56 version = '3.4'
57 # The full version, including alpha/beta/rc tags.
58 release = '3.4.0'
59
60 # The language for content autogenerated by Sphinx. Refer to documentation
61 # for a list of supported languages.
62 # language = None
63
64 # There are two options for replacing |today|: either, you set today to some
65 # non-false value, then it is used:
66 # today = ''
67 # Else, today_fmt is used as the format for a strftime call.
68 # today_fmt = '%B %d, %Y'
69
70 # List of documents that shouldn't be included in the build.
71 # unused_docs = []
72
73 # List of directories, relative to source directory, that shouldn't be searched
74 # for source files.
75 exclude_trees = ['_build']
76
77 # The reST default role (used for this markup: `text`) to use for all documents.
78 # default_role = None
79
80 # If true, '()' will be appended to :func: etc. cross-reference text.
81 # add_function_parentheses = True
82
83 # If true, the current module name will be prepended to all description
84 # unit titles (such as .. function::).
85 # add_module_names = True
86
87 # If true, sectionauthor and moduleauthor directives will be shown in the
88 # output. They are ignored by default.
89 # show_authors = False
90
91 # The name of the Pygments (syntax highlighting) style to use.
92 pygments_style = 'sphinx'
93
94 # A list of ignored prefixes for module index sorting.
95 # modindex_common_prefix = []
96
97
98 # -- Options for HTML output ---------------------------------------------------
99
100 # The theme to use for HTML and HTML Help pages. Major themes that come with
101 # Sphinx are currently 'default' and 'sphinxdoc'.
102 # html_theme = 'default'
103
104 # Theme options are theme-specific and customize the look and feel of a theme
105 # further. For a list of options available for each theme, see the
106 # documentation.
107 # main_colour = "#ffbbbb"
108
109 html_theme_options = {
110 # "rightsidebar": "false",
111 # "stickysidebar": "true",
112 # "bodyfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
113 # "headfont": "'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'",
114 # "sidebarbgcolor": "fuckyou",
115 # "footerbgcolor": "#771111",
116 # "relbarbgcolor": "#993333",
117 # "sidebartextcolor": "#000000",
118 # "sidebarlinkcolor": "#330000",
119 # "codebgcolor": "#fffff0",
120 # "headtextcolor": "#000080",
121 # "headbgcolor": "#f0f0ff",
122 # "bgcolor": "#ffffff",
123 }
124
125
126 # Add any paths that contain custom themes here, relative to this directory.
127 html_theme_path = ['.']
128
129 # The name for this set of Sphinx documents. If None, it defaults to
130 # "<project> v<release> documentation".
131 html_title = "gensim"
132
133 # A shorter title for the navigation bar. Default is the same as html_title.
134 # html_short_title = ''
135
136 # The name of an image file (relative to this directory) to place at the top
137 # of the sidebar.
138 # html_logo = None
139
140 # The name of an image file (within the static path) to use as favicon of the
141 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
142 # pixels large.
143 html_favicon = '_static/favicon.ico'
144
145 # Add any paths that contain custom static files (such as style sheets) here,
146 # relative to this directory. They are copied after the builtin static files,
147 # so a file named "default.css" will overwrite the builtin "default.css".
148 html_static_path = ['_static']
149
150 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
151 # using the given strftime format.
152 html_last_updated_fmt = '%b %d, %Y'
153
154 # If true, SmartyPants will be used to convert quotes and dashes to
155 # typographically correct entities.
156 # html_use_smartypants = True
157
158 # Custom sidebar templates, maps document names to template names.
159 html_sidebars = {} # {'index': ['download.html', 'globaltoc.html', 'searchbox.html', 'indexsidebar.html']}
160 # html_sidebars = {'index': ['globaltoc.html', 'searchbox.html']}
161
162 # If false, no module index is generated.
163 # html_use_modindex = True
164
165 # If false, no index is generated.
166 # html_use_index = True
167
168 # If true, the index is split into individual pages for each letter.
169 html_split_index = False
170
171 # If true, links to the reST sources are added to the pages.
172 html_show_sourcelink = False
173
174 html_domain_indices = False
175
176 # If true, an OpenSearch description file will be output, and all pages will
177 # contain a <link> tag referring to it. The value of this option must be the
178 # base URL from which the finished HTML is served.
179 # html_use_opensearch = ''
180
181 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
182 # html_file_suffix = ''
183
184 # Output file base name for HTML help builder.
185 htmlhelp_basename = 'gensimdoc'
186
187 html_show_sphinx = False
188
189 # -- Options for LaTeX output --------------------------------------------------
190
191 # The paper size ('letter' or 'a4').
192 # latex_paper_size = 'letter'
193
194 # The font size ('10pt', '11pt' or '12pt').
195 # latex_font_size = '10pt'
196
197 # Grouping the document tree into LaTeX files. List of tuples
198 # (source start file, target name, title, author, documentclass [howto/manual]).
199 latex_documents = [('index', 'gensim.tex', u'gensim Documentation', u'Radim Řehůřek', 'manual')]
200
201 # The name of an image file (relative to this directory) to place at the top of
202 # the title page.
203 # latex_logo = None
204
205 # For "manual" documents, if this is true, then toplevel headings are parts,
206 # not chapters.
207 latex_use_parts = False
208
209 # Additional stuff for the LaTeX preamble.
210 # latex_preamble = ''
211
212 # Documents to append as an appendix to all manuals.
213 # latex_appendices = []
214
215 # If false, no module index is generated.
216 # latex_use_modindex = True
217
218 suppress_warnings = ['image.nonlocal_uri', 'ref.citation', 'ref.footnote']
219
[end of docs/src/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/docs/src/conf.py b/docs/src/conf.py
--- a/docs/src/conf.py
+++ b/docs/src/conf.py
@@ -28,6 +28,8 @@
extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.imgmath', 'sphinxcontrib.programoutput']
autoclass_content = "both"
+napoleon_google_docstring = False # Disable support for google-style docstring
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
| {"golden_diff": "diff --git a/docs/src/conf.py b/docs/src/conf.py\n--- a/docs/src/conf.py\n+++ b/docs/src/conf.py\n@@ -28,6 +28,8 @@\n extensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.imgmath', 'sphinxcontrib.programoutput']\n autoclass_content = \"both\"\n \n+napoleon_google_docstring = False # Disable support for google-style docstring\n+\n # Add any paths that contain templates here, relative to this directory.\n templates_path = ['_templates']\n", "issue": "Turn off support of Google Style docstrings\n**All docstrings should be refactored first**\r\n\r\nTo prevent contributors from using Google Style docstrings, we need to set\r\n\r\n`napoleon_google_docstring = False`,\r\n\r\n[like explained here](https://samnicholls.net/2016/06/15/how-to-sphinx-readthedocs/).\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# gensim documentation build configuration file, created by\n# sphinx-quickstart on Wed Mar 17 13:42:21 2010.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.append(os.path.abspath('.'))\n\n# -- General configuration -----------------------------------------------------\n\nhtml_theme = 'gensim_theme'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinxcontrib.napoleon', 'sphinx.ext.imgmath', 'sphinxcontrib.programoutput']\nautoclass_content = \"both\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n# source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'indextoc'\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\nhtml_additional_pages = {'index': './_templates/indexcontent.html'}\n\n# General information about the project.\nproject = u'gensim'\ncopyright = u'2009-now, Radim \u0158eh\u016f\u0159ek <me(at)radimrehurek.com>'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = '3.4'\n# The full version, including alpha/beta/rc tags.\nrelease = '3.4.0'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n# language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n# unused_docs = []\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['_build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. Major themes that come with\n# Sphinx are currently 'default' and 'sphinxdoc'.\n# html_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n# main_colour = \"#ffbbbb\"\n\nhtml_theme_options = {\n# \"rightsidebar\": \"false\",\n# \"stickysidebar\": \"true\",\n# \"bodyfont\": \"'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'\",\n# \"headfont\": \"'Lucida Grande', 'Lucida Sans Unicode', 'Geneva', 'Verdana', 'sans-serif'\",\n# \"sidebarbgcolor\": \"fuckyou\",\n# \"footerbgcolor\": \"#771111\",\n# \"relbarbgcolor\": \"#993333\",\n# \"sidebartextcolor\": \"#000000\",\n# \"sidebarlinkcolor\": \"#330000\",\n# \"codebgcolor\": \"#fffff0\",\n# \"headtextcolor\": \"#000080\",\n# \"headbgcolor\": \"#f0f0ff\",\n# \"bgcolor\": \"#ffffff\",\n}\n\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['.']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\nhtml_title = \"gensim\"\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = ''\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n# html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\nhtml_favicon = '_static/favicon.ico'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\nhtml_sidebars = {} # {'index': ['download.html', 'globaltoc.html', 'searchbox.html', 'indexsidebar.html']}\n# html_sidebars = {'index': ['globaltoc.html', 'searchbox.html']}\n\n# If false, no module index is generated.\n# html_use_modindex = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\nhtml_split_index = False\n\n# If true, links to the reST sources are added to the pages.\nhtml_show_sourcelink = False\n\nhtml_domain_indices = False\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'gensimdoc'\n\nhtml_show_sphinx = False\n\n# -- Options for LaTeX output --------------------------------------------------\n\n# The paper size ('letter' or 'a4').\n# latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n# latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [('index', 'gensim.tex', u'gensim Documentation', u'Radim \u0158eh\u016f\u0159ek', 'manual')]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\nlatex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n# latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_use_modindex = True\n\nsuppress_warnings = ['image.nonlocal_uri', 'ref.citation', 'ref.footnote']\n", "path": "docs/src/conf.py"}]} | 3,070 | 120 |
gh_patches_debug_31308 | rasdani/github-patches | git_diff | dask__distributed-4984 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Drop down tile to reveal "secret" dashboards
We're accumulating a lot of _secret_ dashboard pages https://github.com/dask/distributed/blob/c2557938e6c4175534031cba5ca5ac9d2cdc95f7/distributed/dashboard/scheduler.py#L82-L119
although most are not easily accessible from the UI. Most of the pages are not useful for the ordinary user and are only relevant for specific edge cases or debugging. hence, it makes sense that they are not promoted as a top-level dashboard page.
However, at least for debugging purposes, I would really appreciate if these pages were a bit easier to navigate. In particular I'm looking for a way which doesn't require me to know the exact endpoint for an individual plot and requires me to type it into my browser.
I would propose to add a drop down menu / button which can be used to browse all _hidden_ dashboard pages.
Disclaimer: I can't implement this. I barely know what bokeh is.
</issue>
<code>
[start of distributed/dashboard/scheduler.py]
1 from urllib.parse import urljoin
2
3 from tornado import web
4 from tornado.ioloop import IOLoop
5
6 try:
7 import numpy as np
8 except ImportError:
9 np = False
10
11 from .components.nvml import gpu_doc # noqa: 1708
12 from .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc
13 from .components.scheduler import (
14 AggregateAction,
15 BandwidthTypes,
16 BandwidthWorkers,
17 ComputePerKey,
18 CurrentLoad,
19 MemoryByKey,
20 NBytes,
21 NBytesCluster,
22 Occupancy,
23 SystemMonitor,
24 TaskGraph,
25 TaskGroupGraph,
26 TaskProgress,
27 TaskStream,
28 WorkerTable,
29 events_doc,
30 graph_doc,
31 individual_doc,
32 individual_profile_doc,
33 individual_profile_server_doc,
34 profile_doc,
35 profile_server_doc,
36 status_doc,
37 stealing_doc,
38 systemmonitor_doc,
39 tasks_doc,
40 tg_graph_doc,
41 workers_doc,
42 )
43 from .core import BokehApplication
44 from .worker import counters_doc
45
46 template_variables = {
47 "pages": [
48 "status",
49 "workers",
50 "tasks",
51 "system",
52 "profile",
53 "graph",
54 "groups",
55 "info",
56 ]
57 }
58
59 if NVML_ENABLED:
60 template_variables["pages"].insert(4, "gpu")
61
62
63 def connect(application, http_server, scheduler, prefix=""):
64 bokeh_app = BokehApplication(
65 applications, scheduler, prefix=prefix, template_variables=template_variables
66 )
67 application.add_application(bokeh_app)
68 bokeh_app.initialize(IOLoop.current())
69
70 bokeh_app.add_handlers(
71 r".*",
72 [
73 (
74 r"/",
75 web.RedirectHandler,
76 {"url": urljoin((prefix or "").strip("/") + "/", r"status")},
77 )
78 ],
79 )
80
81
82 applications = {
83 "/system": systemmonitor_doc,
84 "/stealing": stealing_doc,
85 "/workers": workers_doc,
86 "/events": events_doc,
87 "/counters": counters_doc,
88 "/tasks": tasks_doc,
89 "/status": status_doc,
90 "/profile": profile_doc,
91 "/profile-server": profile_server_doc,
92 "/graph": graph_doc,
93 "/groups": tg_graph_doc,
94 "/gpu": gpu_doc,
95 "/individual-task-stream": individual_doc(
96 TaskStream, 100, n_rectangles=1000, clear_interval="10s"
97 ),
98 "/individual-progress": individual_doc(TaskProgress, 100, height=160),
99 "/individual-graph": individual_doc(TaskGraph, 200),
100 "/individual-groups": individual_doc(TaskGroupGraph, 200),
101 "/individual-nbytes": individual_doc(NBytes, 100),
102 "/individual-nbytes-cluster": individual_doc(NBytesCluster, 100),
103 "/individual-cpu": individual_doc(CurrentLoad, 100, fig_attr="cpu_figure"),
104 "/individual-nprocessing": individual_doc(
105 CurrentLoad, 100, fig_attr="processing_figure"
106 ),
107 "/individual-occupancy": individual_doc(Occupancy, 100),
108 "/individual-workers": individual_doc(WorkerTable, 500),
109 "/individual-bandwidth-types": individual_doc(BandwidthTypes, 500),
110 "/individual-bandwidth-workers": individual_doc(BandwidthWorkers, 500),
111 "/individual-memory-by-key": individual_doc(MemoryByKey, 500),
112 "/individual-compute-time-per-key": individual_doc(ComputePerKey, 500),
113 "/individual-aggregate-time-per-action": individual_doc(AggregateAction, 500),
114 "/individual-scheduler-system": individual_doc(SystemMonitor, 500),
115 "/individual-profile": individual_profile_doc,
116 "/individual-profile-server": individual_profile_server_doc,
117 "/individual-gpu-memory": gpu_memory_doc,
118 "/individual-gpu-utilization": gpu_utilization_doc,
119 }
120
[end of distributed/dashboard/scheduler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py
--- a/distributed/dashboard/scheduler.py
+++ b/distributed/dashboard/scheduler.py
@@ -43,42 +43,6 @@
from .core import BokehApplication
from .worker import counters_doc
-template_variables = {
- "pages": [
- "status",
- "workers",
- "tasks",
- "system",
- "profile",
- "graph",
- "groups",
- "info",
- ]
-}
-
-if NVML_ENABLED:
- template_variables["pages"].insert(4, "gpu")
-
-
-def connect(application, http_server, scheduler, prefix=""):
- bokeh_app = BokehApplication(
- applications, scheduler, prefix=prefix, template_variables=template_variables
- )
- application.add_application(bokeh_app)
- bokeh_app.initialize(IOLoop.current())
-
- bokeh_app.add_handlers(
- r".*",
- [
- (
- r"/",
- web.RedirectHandler,
- {"url": urljoin((prefix or "").strip("/") + "/", r"status")},
- )
- ],
- )
-
-
applications = {
"/system": systemmonitor_doc,
"/stealing": stealing_doc,
@@ -117,3 +81,40 @@
"/individual-gpu-memory": gpu_memory_doc,
"/individual-gpu-utilization": gpu_utilization_doc,
}
+
+
+template_variables = {
+ "pages": [
+ "status",
+ "workers",
+ "tasks",
+ "system",
+ "profile",
+ "graph",
+ "groups",
+ "info",
+ ],
+ "plots": [x.replace("/", "") for x in applications if "individual" in x],
+}
+
+if NVML_ENABLED:
+ template_variables["pages"].insert(4, "gpu")
+
+
+def connect(application, http_server, scheduler, prefix=""):
+ bokeh_app = BokehApplication(
+ applications, scheduler, prefix=prefix, template_variables=template_variables
+ )
+ application.add_application(bokeh_app)
+ bokeh_app.initialize(IOLoop.current())
+
+ bokeh_app.add_handlers(
+ r".*",
+ [
+ (
+ r"/",
+ web.RedirectHandler,
+ {"url": urljoin((prefix or "").strip("/") + "/", r"status")},
+ )
+ ],
+ )
| {"golden_diff": "diff --git a/distributed/dashboard/scheduler.py b/distributed/dashboard/scheduler.py\n--- a/distributed/dashboard/scheduler.py\n+++ b/distributed/dashboard/scheduler.py\n@@ -43,42 +43,6 @@\n from .core import BokehApplication\n from .worker import counters_doc\n \n-template_variables = {\n- \"pages\": [\n- \"status\",\n- \"workers\",\n- \"tasks\",\n- \"system\",\n- \"profile\",\n- \"graph\",\n- \"groups\",\n- \"info\",\n- ]\n-}\n-\n-if NVML_ENABLED:\n- template_variables[\"pages\"].insert(4, \"gpu\")\n-\n-\n-def connect(application, http_server, scheduler, prefix=\"\"):\n- bokeh_app = BokehApplication(\n- applications, scheduler, prefix=prefix, template_variables=template_variables\n- )\n- application.add_application(bokeh_app)\n- bokeh_app.initialize(IOLoop.current())\n-\n- bokeh_app.add_handlers(\n- r\".*\",\n- [\n- (\n- r\"/\",\n- web.RedirectHandler,\n- {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n- )\n- ],\n- )\n-\n-\n applications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n@@ -117,3 +81,40 @@\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n }\n+\n+\n+template_variables = {\n+ \"pages\": [\n+ \"status\",\n+ \"workers\",\n+ \"tasks\",\n+ \"system\",\n+ \"profile\",\n+ \"graph\",\n+ \"groups\",\n+ \"info\",\n+ ],\n+ \"plots\": [x.replace(\"/\", \"\") for x in applications if \"individual\" in x],\n+}\n+\n+if NVML_ENABLED:\n+ template_variables[\"pages\"].insert(4, \"gpu\")\n+\n+\n+def connect(application, http_server, scheduler, prefix=\"\"):\n+ bokeh_app = BokehApplication(\n+ applications, scheduler, prefix=prefix, template_variables=template_variables\n+ )\n+ application.add_application(bokeh_app)\n+ bokeh_app.initialize(IOLoop.current())\n+\n+ bokeh_app.add_handlers(\n+ r\".*\",\n+ [\n+ (\n+ r\"/\",\n+ web.RedirectHandler,\n+ {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n+ )\n+ ],\n+ )\n", "issue": "Drop down tile to reveal \"secret\" dashboards\nWe're accumulating a lot of _secret_ dashboard pages https://github.com/dask/distributed/blob/c2557938e6c4175534031cba5ca5ac9d2cdc95f7/distributed/dashboard/scheduler.py#L82-L119\r\nalthough most are not easily accessible from the UI. Most of the pages are not useful for the ordinary user and are only relevant for specific edge cases or debugging. hence, it makes sense that they are not promoted as a top-level dashboard page.\r\n\r\nHowever, at least for debugging purposes, I would really appreciate if these pages were a bit easier to navigate. In particular I'm looking for a way which doesn't require me to know the exact endpoint for an individual plot and requires me to type it into my browser.\r\n\r\nI would propose to add a drop down menu / button which can be used to browse all _hidden_ dashboard pages.\r\n\r\nDisclaimer: I can't implement this. I barely know what bokeh is.\n", "before_files": [{"content": "from urllib.parse import urljoin\n\nfrom tornado import web\nfrom tornado.ioloop import IOLoop\n\ntry:\n import numpy as np\nexcept ImportError:\n np = False\n\nfrom .components.nvml import gpu_doc # noqa: 1708\nfrom .components.nvml import NVML_ENABLED, gpu_memory_doc, gpu_utilization_doc\nfrom .components.scheduler import (\n AggregateAction,\n BandwidthTypes,\n BandwidthWorkers,\n ComputePerKey,\n CurrentLoad,\n MemoryByKey,\n NBytes,\n NBytesCluster,\n Occupancy,\n SystemMonitor,\n TaskGraph,\n TaskGroupGraph,\n TaskProgress,\n TaskStream,\n WorkerTable,\n events_doc,\n graph_doc,\n individual_doc,\n individual_profile_doc,\n individual_profile_server_doc,\n profile_doc,\n profile_server_doc,\n status_doc,\n stealing_doc,\n systemmonitor_doc,\n tasks_doc,\n tg_graph_doc,\n workers_doc,\n)\nfrom .core import BokehApplication\nfrom .worker import counters_doc\n\ntemplate_variables = {\n \"pages\": [\n \"status\",\n \"workers\",\n \"tasks\",\n \"system\",\n \"profile\",\n \"graph\",\n \"groups\",\n \"info\",\n ]\n}\n\nif NVML_ENABLED:\n template_variables[\"pages\"].insert(4, \"gpu\")\n\n\ndef connect(application, http_server, scheduler, prefix=\"\"):\n bokeh_app = BokehApplication(\n applications, scheduler, prefix=prefix, template_variables=template_variables\n )\n application.add_application(bokeh_app)\n bokeh_app.initialize(IOLoop.current())\n\n bokeh_app.add_handlers(\n r\".*\",\n [\n (\n r\"/\",\n web.RedirectHandler,\n {\"url\": urljoin((prefix or \"\").strip(\"/\") + \"/\", r\"status\")},\n )\n ],\n )\n\n\napplications = {\n \"/system\": systemmonitor_doc,\n \"/stealing\": stealing_doc,\n \"/workers\": workers_doc,\n \"/events\": events_doc,\n \"/counters\": counters_doc,\n \"/tasks\": tasks_doc,\n \"/status\": status_doc,\n \"/profile\": profile_doc,\n \"/profile-server\": profile_server_doc,\n \"/graph\": graph_doc,\n \"/groups\": tg_graph_doc,\n \"/gpu\": gpu_doc,\n \"/individual-task-stream\": individual_doc(\n TaskStream, 100, n_rectangles=1000, clear_interval=\"10s\"\n ),\n \"/individual-progress\": individual_doc(TaskProgress, 100, height=160),\n \"/individual-graph\": individual_doc(TaskGraph, 200),\n \"/individual-groups\": individual_doc(TaskGroupGraph, 200),\n \"/individual-nbytes\": individual_doc(NBytes, 100),\n \"/individual-nbytes-cluster\": individual_doc(NBytesCluster, 100),\n \"/individual-cpu\": individual_doc(CurrentLoad, 100, fig_attr=\"cpu_figure\"),\n \"/individual-nprocessing\": individual_doc(\n CurrentLoad, 100, fig_attr=\"processing_figure\"\n ),\n \"/individual-occupancy\": individual_doc(Occupancy, 100),\n \"/individual-workers\": individual_doc(WorkerTable, 500),\n \"/individual-bandwidth-types\": individual_doc(BandwidthTypes, 500),\n \"/individual-bandwidth-workers\": individual_doc(BandwidthWorkers, 500),\n \"/individual-memory-by-key\": individual_doc(MemoryByKey, 500),\n \"/individual-compute-time-per-key\": individual_doc(ComputePerKey, 500),\n \"/individual-aggregate-time-per-action\": individual_doc(AggregateAction, 500),\n \"/individual-scheduler-system\": individual_doc(SystemMonitor, 500),\n \"/individual-profile\": individual_profile_doc,\n \"/individual-profile-server\": individual_profile_server_doc,\n \"/individual-gpu-memory\": gpu_memory_doc,\n \"/individual-gpu-utilization\": gpu_utilization_doc,\n}\n", "path": "distributed/dashboard/scheduler.py"}]} | 1,868 | 548 |
gh_patches_debug_24350 | rasdani/github-patches | git_diff | Qiskit__qiskit-1118 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can not combine the Result object from the same backend (statevector)
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues -->
### Informations
- **Qiskit Terra version**: the master branch
- **Python version**: 3.6.5
- **Operating system**: macOS 10.13
### What is the current behavior?
raise error
```
Traceback (most recent call last):
File "/Users/rchen/Developer/Quantum/qiskit-terra/qiskit/result/_result.py", line 125, in __add__
copy_of_self += other
File "/Users/rchen/Developer/Quantum/qiskit-terra/qiskit/result/_result.py", line 108, in __iadd__
raise QISKitError('Result objects from different backends cannot be combined.')
qiskit._qiskiterror.QISKitError: 'Result objects from different backends cannot be combined.'
```
### Steps to reproduce the problem
Code
```python
from qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister
import qiskit as qk
import numpy as np
num_qubits = 2
q = QuantumRegister(num_qubits, name='q')
c = ClassicalRegister(num_qubits, name='c')
circuits = QuantumCircuit(q, c)
param_idx = 0
for qubit in range(num_qubits):
circuits.u3(0.0, 0.0, 0.0, q[qubit])
circuits.u1(3.0, q[qubit])
# circuits.measure(q, c)
my_backend = qk.Aer.get_backend('statevector_simulator')
qobj = qk.compile(circuits=circuits, backend=my_backend)
job = my_backend.run(qobj)
result_a = job.result()
qobj = qk.compile(circuits=circuits, backend=my_backend)
job = my_backend.run(qobj)
result_b = job.result()
result = result_a + result_b
```
### What is the expected behavior?
Result objects are combined without error
### Suggested solutions
None
Note: If I change the backend to `qasm_simulator`, there is no error.
</issue>
<code>
[start of qiskit/backends/aer/statevector_simulator.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2017, IBM.
4 #
5 # This source code is licensed under the Apache License, Version 2.0 found in
6 # the LICENSE.txt file in the root directory of this source tree.
7
8 # pylint: disable=invalid-name
9
10 """
11 Interface to C++ quantum circuit simulator with realistic noise.
12 """
13
14 import logging
15 import uuid
16
17 from qiskit.qobj import QobjInstruction
18 from .qasm_simulator import QasmSimulator
19 from ._simulatorerror import SimulatorError
20 from .aerjob import AerJob
21
22 logger = logging.getLogger(__name__)
23
24
25 class StatevectorSimulator(QasmSimulator):
26 """C++ statevector simulator"""
27
28 DEFAULT_CONFIGURATION = {
29 'name': 'statevector_simulator',
30 'url': 'https://github.com/QISKit/qiskit-terra/src/qasm-simulator-cpp',
31 'simulator': True,
32 'local': True,
33 'description': 'A C++ statevector simulator for qobj files',
34 'coupling_map': 'all-to-all',
35 'basis_gates': 'u1,u2,u3,cx,cz,id,x,y,z,h,s,sdg,t,tdg,rzz,load,save,snapshot'
36 }
37
38 def __init__(self, configuration=None, provider=None):
39 super().__init__(configuration=configuration or self.DEFAULT_CONFIGURATION.copy(),
40 provider=provider)
41
42 def run(self, qobj):
43 """Run a qobj on the the backend."""
44 job_id = str(uuid.uuid4())
45 aer_job = AerJob(self, job_id, self._run_job, qobj)
46 aer_job.submit()
47 return aer_job
48
49 def _run_job(self, job_id, qobj):
50 """Run a Qobj on the backend."""
51 self._validate(qobj)
52 final_state_key = 32767 # Internal key for final state snapshot
53 # Add final snapshots to circuits
54 for experiment in qobj.experiments:
55 experiment.instructions.append(
56 QobjInstruction(name='snapshot', params=[final_state_key])
57 )
58 result = super()._run_job(job_id, qobj)
59 # Replace backend name with current backend
60 result.backend_name = self.name
61 # Extract final state snapshot and move to 'statevector' data field
62 for experiment_result in result.results.values():
63 snapshots = experiment_result.snapshots
64 if str(final_state_key) in snapshots:
65 final_state_key = str(final_state_key)
66 # Pop off final snapshot added above
67 final_state = snapshots.pop(final_state_key, None)
68 final_state = final_state['statevector'][0]
69 # Add final state to results data
70 experiment_result.data['statevector'] = final_state
71 # Remove snapshot dict if empty
72 if snapshots == {}:
73 experiment_result.data.pop('snapshots', None)
74 return result
75
76 def _validate(self, qobj):
77 """Semantic validations of the qobj which cannot be done via schemas.
78 Some of these may later move to backend schemas.
79
80 1. No shots
81 2. No measurements in the middle
82 """
83 if qobj.config.shots != 1:
84 logger.info("statevector simulator only supports 1 shot. "
85 "Setting shots=1.")
86 qobj.config.shots = 1
87 for experiment in qobj.experiments:
88 if getattr(experiment.config, 'shots', 1) != 1:
89 logger.info("statevector simulator only supports 1 shot. "
90 "Setting shots=1 for circuit %s.", experiment.name)
91 experiment.config.shots = 1
92 for op in experiment.instructions:
93 if op.name in ['measure', 'reset']:
94 raise SimulatorError(
95 "In circuit {}: statevector simulator does not support "
96 "measure or reset.".format(experiment.header.name))
97
[end of qiskit/backends/aer/statevector_simulator.py]
[start of qiskit/backends/aer/statevector_simulator_py.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2017, IBM.
4 #
5 # This source code is licensed under the Apache License, Version 2.0 found in
6 # the LICENSE.txt file in the root directory of this source tree.
7
8 # pylint: disable=invalid-name
9
10 """Contains a (slow) python statevector simulator.
11
12 It simulates the statevector through a quantum circuit. It is exponential in
13 the number of qubits.
14
15 We advise using the c++ simulator or online simulator for larger size systems.
16
17 The input is a qobj dictionary and the output is a Result object.
18
19 The input qobj to this simulator has no shots, no measures, no reset, no noise.
20 """
21 import logging
22 import uuid
23
24 from qiskit.backends.aer.aerjob import AerJob
25 from qiskit.backends.aer._simulatorerror import SimulatorError
26 from qiskit.qobj import QobjInstruction
27 from .qasm_simulator_py import QasmSimulatorPy
28
29 logger = logging.getLogger(__name__)
30
31
32 class StatevectorSimulatorPy(QasmSimulatorPy):
33 """Python statevector simulator."""
34
35 DEFAULT_CONFIGURATION = {
36 'name': 'statevector_simulator_py',
37 'url': 'https://github.com/QISKit/qiskit-terra',
38 'simulator': True,
39 'local': True,
40 'description': 'A Python statevector simulator for qobj files',
41 'coupling_map': 'all-to-all',
42 'basis_gates': 'u1,u2,u3,cx,id,snapshot'
43 }
44
45 def __init__(self, configuration=None, provider=None):
46 super().__init__(configuration=configuration or self.DEFAULT_CONFIGURATION.copy(),
47 provider=provider)
48
49 def run(self, qobj):
50 """Run qobj asynchronously.
51
52 Args:
53 qobj (dict): job description
54
55 Returns:
56 AerJob: derived from BaseJob
57 """
58 job_id = str(uuid.uuid4())
59 aer_job = AerJob(self, job_id, self._run_job, qobj)
60 aer_job.submit()
61 return aer_job
62
63 def _run_job(self, job_id, qobj):
64 """Run a Qobj on the backend."""
65 self._validate(qobj)
66 final_state_key = 32767 # Internal key for final state snapshot
67 # Add final snapshots to circuits
68 for experiment in qobj.experiments:
69 experiment.instructions.append(
70 QobjInstruction(name='snapshot', params=[final_state_key])
71 )
72 result = super()._run_job(job_id, qobj)
73 # Replace backend name with current backend
74 result.backend_name = self.name
75 # Extract final state snapshot and move to 'statevector' data field
76 for experiment_result in result.results.values():
77 snapshots = experiment_result.snapshots
78 if str(final_state_key) in snapshots:
79 final_state_key = str(final_state_key)
80 # Pop off final snapshot added above
81 final_state = snapshots.pop(final_state_key, None)
82 final_state = final_state['statevector'][0]
83 # Add final state to results data
84 experiment_result.data['statevector'] = final_state
85 # Remove snapshot dict if empty
86 if snapshots == {}:
87 experiment_result.data.pop('snapshots', None)
88 return result
89
90 def _validate(self, qobj):
91 """Semantic validations of the qobj which cannot be done via schemas.
92 Some of these may later move to backend schemas.
93
94 1. No shots
95 2. No measurements in the middle
96 """
97 if qobj.config.shots != 1:
98 logger.info("statevector simulator only supports 1 shot. "
99 "Setting shots=1.")
100 qobj.config.shots = 1
101 for experiment in qobj.experiments:
102 if getattr(experiment.config, 'shots', 1) != 1:
103 logger.info("statevector simulator only supports 1 shot. "
104 "Setting shots=1 for circuit %s.", experiment.name)
105 experiment.config.shots = 1
106 for op in experiment.instructions:
107 if op.name in ['measure', 'reset']:
108 raise SimulatorError(
109 "In circuit {}: statevector simulator does not support "
110 "measure or reset.".format(experiment.header.name))
111
[end of qiskit/backends/aer/statevector_simulator_py.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/qiskit/backends/aer/statevector_simulator.py b/qiskit/backends/aer/statevector_simulator.py
--- a/qiskit/backends/aer/statevector_simulator.py
+++ b/qiskit/backends/aer/statevector_simulator.py
@@ -56,8 +56,6 @@
QobjInstruction(name='snapshot', params=[final_state_key])
)
result = super()._run_job(job_id, qobj)
- # Replace backend name with current backend
- result.backend_name = self.name
# Extract final state snapshot and move to 'statevector' data field
for experiment_result in result.results.values():
snapshots = experiment_result.snapshots
diff --git a/qiskit/backends/aer/statevector_simulator_py.py b/qiskit/backends/aer/statevector_simulator_py.py
--- a/qiskit/backends/aer/statevector_simulator_py.py
+++ b/qiskit/backends/aer/statevector_simulator_py.py
@@ -70,8 +70,6 @@
QobjInstruction(name='snapshot', params=[final_state_key])
)
result = super()._run_job(job_id, qobj)
- # Replace backend name with current backend
- result.backend_name = self.name
# Extract final state snapshot and move to 'statevector' data field
for experiment_result in result.results.values():
snapshots = experiment_result.snapshots
| {"golden_diff": "diff --git a/qiskit/backends/aer/statevector_simulator.py b/qiskit/backends/aer/statevector_simulator.py\n--- a/qiskit/backends/aer/statevector_simulator.py\n+++ b/qiskit/backends/aer/statevector_simulator.py\n@@ -56,8 +56,6 @@\n QobjInstruction(name='snapshot', params=[final_state_key])\n )\n result = super()._run_job(job_id, qobj)\n- # Replace backend name with current backend\n- result.backend_name = self.name\n # Extract final state snapshot and move to 'statevector' data field\n for experiment_result in result.results.values():\n snapshots = experiment_result.snapshots\ndiff --git a/qiskit/backends/aer/statevector_simulator_py.py b/qiskit/backends/aer/statevector_simulator_py.py\n--- a/qiskit/backends/aer/statevector_simulator_py.py\n+++ b/qiskit/backends/aer/statevector_simulator_py.py\n@@ -70,8 +70,6 @@\n QobjInstruction(name='snapshot', params=[final_state_key])\n )\n result = super()._run_job(job_id, qobj)\n- # Replace backend name with current backend\n- result.backend_name = self.name\n # Extract final state snapshot and move to 'statevector' data field\n for experiment_result in result.results.values():\n snapshots = experiment_result.snapshots\n", "issue": "Can not combine the Result object from the same backend (statevector)\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Informations\r\n\r\n- **Qiskit Terra version**: the master branch\r\n- **Python version**: 3.6.5\r\n- **Operating system**: macOS 10.13\r\n\r\n### What is the current behavior?\r\nraise error\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/rchen/Developer/Quantum/qiskit-terra/qiskit/result/_result.py\", line 125, in __add__\r\n copy_of_self += other\r\n File \"/Users/rchen/Developer/Quantum/qiskit-terra/qiskit/result/_result.py\", line 108, in __iadd__\r\n raise QISKitError('Result objects from different backends cannot be combined.')\r\nqiskit._qiskiterror.QISKitError: 'Result objects from different backends cannot be combined.'\r\n```\r\n\r\n### Steps to reproduce the problem\r\nCode\r\n```python\r\nfrom qiskit import QuantumRegister, QuantumCircuit, ClassicalRegister\r\nimport qiskit as qk\r\nimport numpy as np\r\n\r\nnum_qubits = 2\r\n\r\nq = QuantumRegister(num_qubits, name='q')\r\nc = ClassicalRegister(num_qubits, name='c')\r\ncircuits = QuantumCircuit(q, c)\r\nparam_idx = 0\r\nfor qubit in range(num_qubits):\r\n circuits.u3(0.0, 0.0, 0.0, q[qubit])\r\n circuits.u1(3.0, q[qubit])\r\n\r\n# circuits.measure(q, c)\r\n\r\n\r\nmy_backend = qk.Aer.get_backend('statevector_simulator')\r\nqobj = qk.compile(circuits=circuits, backend=my_backend)\r\njob = my_backend.run(qobj)\r\nresult_a = job.result()\r\n\r\nqobj = qk.compile(circuits=circuits, backend=my_backend)\r\njob = my_backend.run(qobj)\r\nresult_b = job.result()\r\n\r\nresult = result_a + result_b\r\n\r\n```\r\n\r\n\r\n### What is the expected behavior?\r\nResult objects are combined without error\r\n\r\n\r\n### Suggested solutions\r\nNone\r\n\r\nNote: If I change the backend to `qasm_simulator`, there is no error.\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name\n\n\"\"\"\nInterface to C++ quantum circuit simulator with realistic noise.\n\"\"\"\n\nimport logging\nimport uuid\n\nfrom qiskit.qobj import QobjInstruction\nfrom .qasm_simulator import QasmSimulator\nfrom ._simulatorerror import SimulatorError\nfrom .aerjob import AerJob\n\nlogger = logging.getLogger(__name__)\n\n\nclass StatevectorSimulator(QasmSimulator):\n \"\"\"C++ statevector simulator\"\"\"\n\n DEFAULT_CONFIGURATION = {\n 'name': 'statevector_simulator',\n 'url': 'https://github.com/QISKit/qiskit-terra/src/qasm-simulator-cpp',\n 'simulator': True,\n 'local': True,\n 'description': 'A C++ statevector simulator for qobj files',\n 'coupling_map': 'all-to-all',\n 'basis_gates': 'u1,u2,u3,cx,cz,id,x,y,z,h,s,sdg,t,tdg,rzz,load,save,snapshot'\n }\n\n def __init__(self, configuration=None, provider=None):\n super().__init__(configuration=configuration or self.DEFAULT_CONFIGURATION.copy(),\n provider=provider)\n\n def run(self, qobj):\n \"\"\"Run a qobj on the the backend.\"\"\"\n job_id = str(uuid.uuid4())\n aer_job = AerJob(self, job_id, self._run_job, qobj)\n aer_job.submit()\n return aer_job\n\n def _run_job(self, job_id, qobj):\n \"\"\"Run a Qobj on the backend.\"\"\"\n self._validate(qobj)\n final_state_key = 32767 # Internal key for final state snapshot\n # Add final snapshots to circuits\n for experiment in qobj.experiments:\n experiment.instructions.append(\n QobjInstruction(name='snapshot', params=[final_state_key])\n )\n result = super()._run_job(job_id, qobj)\n # Replace backend name with current backend\n result.backend_name = self.name\n # Extract final state snapshot and move to 'statevector' data field\n for experiment_result in result.results.values():\n snapshots = experiment_result.snapshots\n if str(final_state_key) in snapshots:\n final_state_key = str(final_state_key)\n # Pop off final snapshot added above\n final_state = snapshots.pop(final_state_key, None)\n final_state = final_state['statevector'][0]\n # Add final state to results data\n experiment_result.data['statevector'] = final_state\n # Remove snapshot dict if empty\n if snapshots == {}:\n experiment_result.data.pop('snapshots', None)\n return result\n\n def _validate(self, qobj):\n \"\"\"Semantic validations of the qobj which cannot be done via schemas.\n Some of these may later move to backend schemas.\n\n 1. No shots\n 2. No measurements in the middle\n \"\"\"\n if qobj.config.shots != 1:\n logger.info(\"statevector simulator only supports 1 shot. \"\n \"Setting shots=1.\")\n qobj.config.shots = 1\n for experiment in qobj.experiments:\n if getattr(experiment.config, 'shots', 1) != 1:\n logger.info(\"statevector simulator only supports 1 shot. \"\n \"Setting shots=1 for circuit %s.\", experiment.name)\n experiment.config.shots = 1\n for op in experiment.instructions:\n if op.name in ['measure', 'reset']:\n raise SimulatorError(\n \"In circuit {}: statevector simulator does not support \"\n \"measure or reset.\".format(experiment.header.name))\n", "path": "qiskit/backends/aer/statevector_simulator.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n# pylint: disable=invalid-name\n\n\"\"\"Contains a (slow) python statevector simulator.\n\nIt simulates the statevector through a quantum circuit. It is exponential in\nthe number of qubits.\n\nWe advise using the c++ simulator or online simulator for larger size systems.\n\nThe input is a qobj dictionary and the output is a Result object.\n\nThe input qobj to this simulator has no shots, no measures, no reset, no noise.\n\"\"\"\nimport logging\nimport uuid\n\nfrom qiskit.backends.aer.aerjob import AerJob\nfrom qiskit.backends.aer._simulatorerror import SimulatorError\nfrom qiskit.qobj import QobjInstruction\nfrom .qasm_simulator_py import QasmSimulatorPy\n\nlogger = logging.getLogger(__name__)\n\n\nclass StatevectorSimulatorPy(QasmSimulatorPy):\n \"\"\"Python statevector simulator.\"\"\"\n\n DEFAULT_CONFIGURATION = {\n 'name': 'statevector_simulator_py',\n 'url': 'https://github.com/QISKit/qiskit-terra',\n 'simulator': True,\n 'local': True,\n 'description': 'A Python statevector simulator for qobj files',\n 'coupling_map': 'all-to-all',\n 'basis_gates': 'u1,u2,u3,cx,id,snapshot'\n }\n\n def __init__(self, configuration=None, provider=None):\n super().__init__(configuration=configuration or self.DEFAULT_CONFIGURATION.copy(),\n provider=provider)\n\n def run(self, qobj):\n \"\"\"Run qobj asynchronously.\n\n Args:\n qobj (dict): job description\n\n Returns:\n AerJob: derived from BaseJob\n \"\"\"\n job_id = str(uuid.uuid4())\n aer_job = AerJob(self, job_id, self._run_job, qobj)\n aer_job.submit()\n return aer_job\n\n def _run_job(self, job_id, qobj):\n \"\"\"Run a Qobj on the backend.\"\"\"\n self._validate(qobj)\n final_state_key = 32767 # Internal key for final state snapshot\n # Add final snapshots to circuits\n for experiment in qobj.experiments:\n experiment.instructions.append(\n QobjInstruction(name='snapshot', params=[final_state_key])\n )\n result = super()._run_job(job_id, qobj)\n # Replace backend name with current backend\n result.backend_name = self.name\n # Extract final state snapshot and move to 'statevector' data field\n for experiment_result in result.results.values():\n snapshots = experiment_result.snapshots\n if str(final_state_key) in snapshots:\n final_state_key = str(final_state_key)\n # Pop off final snapshot added above\n final_state = snapshots.pop(final_state_key, None)\n final_state = final_state['statevector'][0]\n # Add final state to results data\n experiment_result.data['statevector'] = final_state\n # Remove snapshot dict if empty\n if snapshots == {}:\n experiment_result.data.pop('snapshots', None)\n return result\n\n def _validate(self, qobj):\n \"\"\"Semantic validations of the qobj which cannot be done via schemas.\n Some of these may later move to backend schemas.\n\n 1. No shots\n 2. No measurements in the middle\n \"\"\"\n if qobj.config.shots != 1:\n logger.info(\"statevector simulator only supports 1 shot. \"\n \"Setting shots=1.\")\n qobj.config.shots = 1\n for experiment in qobj.experiments:\n if getattr(experiment.config, 'shots', 1) != 1:\n logger.info(\"statevector simulator only supports 1 shot. \"\n \"Setting shots=1 for circuit %s.\", experiment.name)\n experiment.config.shots = 1\n for op in experiment.instructions:\n if op.name in ['measure', 'reset']:\n raise SimulatorError(\n \"In circuit {}: statevector simulator does not support \"\n \"measure or reset.\".format(experiment.header.name))\n", "path": "qiskit/backends/aer/statevector_simulator_py.py"}]} | 3,232 | 310 |
gh_patches_debug_28338 | rasdani/github-patches | git_diff | chainer__chainer-1511 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get_cifar100() does not work
`get_cifar100()` causes the following error.
```
>>> chainer.datasets.get_cifar100()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/chainer/datasets/cifar.py", line 84, in get_cifar100
raw = _retrieve_cifar('cifar-100')
File "/usr/local/lib/python2.7/dist-packages/chainer/datasets/cifar.py", line 145, in _retrieve_cifar
return download.cache_or_load_file(path, creator, numpy.load)
File "/usr/local/lib/python2.7/dist-packages/chainer/dataset/download.py", line 145, in cache_or_load_file
content = creator(temp_path)
File "/usr/local/lib/python2.7/dist-packages/chainer/datasets/cifar.py", line 127, in creator
d = _pickle_load(archive.extractfile(file_name))
File "/usr/lib/python2.7/tarfile.py", line 2143, in extractfile
tarinfo = self.getmember(member)
File "/usr/lib/python2.7/tarfile.py", line 1827, in getmember
raise KeyError("filename %r not found" % name)
KeyError: "filename 'cifar-100-batches-py/data_batch_1' not found"
```
cifar-100's directory structure seems to be different from cifar-10's.
```
$ tar xvzf cifar-100-python.tar.gz
cifar-100-python/
cifar-100-python/file.txt~
cifar-100-python/train
cifar-100-python/test
cifar-100-python/meta
$ tar xvzf cifar-10-python.tar.gz
cifar-10-batches-py/
cifar-10-batches-py/data_batch_4
cifar-10-batches-py/readme.html
cifar-10-batches-py/test_batch
cifar-10-batches-py/data_batch_3
cifar-10-batches-py/batches.meta
cifar-10-batches-py/data_batch_2
cifar-10-batches-py/data_batch_5
cifar-10-batches-py/data_batch_1
```
They should not be retrieved with the same logic.
https://github.com/pfnet/chainer/blob/master/chainer/datasets/cifar.py#L126
</issue>
<code>
[start of chainer/datasets/cifar.py]
1 import os
2 import sys
3 import tarfile
4
5 import numpy
6 import six.moves.cPickle as pickle
7
8 from chainer.dataset import download
9 from chainer.datasets import tuple_dataset
10
11
12 def get_cifar10(withlabel=True, ndim=3, scale=1.):
13 """Gets the CIFAR-10 dataset.
14
15 `CIFAR-10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ is a set of small
16 natural images. Each example is an RGB color image of size 32x32,
17 classified into 10 groups. In the original images, each component of pixels
18 is represented by one-byte unsigned integer. This function scales the
19 components to floating point values in the interval ``[0, scale]``.
20
21 This function returns the training set and the test set of the official
22 CIFAR-10 dataset. If ``withlabel`` is ``True``, each dataset consists of
23 tuples of images and labels, otherwise it only consists of images.
24
25 Args:
26 withlabel (bool): If ``True``, it returns datasets with labels. In this
27 case, each example is a tuple of an image and a label. Otherwise,
28 the datasets only contain images.
29 ndim (int): Number of dimensions of each image. The shape of each image
30 is determined depending on ndim as follows:
31
32 - ``ndim == 1``: the shape is ``(3072,)``
33 - ``ndim == 3``: the shape is ``(3, 32, 32)``
34
35 scale (float): Pixel value scale. If it is 1 (default), pixels are
36 scaled to the interval ``[0, 1]``.
37
38 Returns:
39 A tuple of two datasets. If ``withlabel`` is ``True``, both datasets
40 are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both
41 datasets are arrays of images.
42
43 """
44 raw = _retrieve_cifar('cifar-10')
45 train = _preprocess_cifar(raw['train_x'], raw['train_y'],
46 withlabel, ndim, scale)
47 test = _preprocess_cifar(raw['test_x'], raw['test_y'],
48 withlabel, ndim, scale)
49 return train, test
50
51
52 def get_cifar100(withlabel=True, ndim=3, scale=1.):
53 """Gets the CIFAR-100 dataset.
54
55 `CIFAR-100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ is a set of
56 small natural images. Each example is an RGB color image of size 32x32,
57 classified into 100 groups. In the original images, each component
58 pixels is represented by one-byte unsigned integer. This function scales
59 the components to floating point values in the interval ``[0, scale]``.
60
61 This function returns the training set and the test set of the official
62 CIFAR-100 dataset. If ``withlabel`` is ``True``, each dataset consists of
63 tuples of images and labels, otherwise it only consists of images.
64
65 Args:
66 withlabel (bool): If ``True``, it returns datasets with labels. In this
67 case, each example is a tuple of an image and a label. Otherwise,
68 the datasets only contain images.
69 ndim (int): Number of dimensions of each image. The shape of each image
70 is determined depending on ndim as follows:
71
72 - ``ndim == 1``: the shape is ``(3072,)``
73 - ``ndim == 3``: the shape is ``(3, 32, 32)``
74
75 scale (float): Pixel value scale. If it is 1 (default), pixels are
76 scaled to the interval ``[0, 1]``.
77
78 Returns:
79 A tuple of two datasets. If ``withlabel`` is ``True``, both
80 are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both
81 datasets are arrays of images.
82
83 """
84 raw = _retrieve_cifar('cifar-100')
85 train = _preprocess_cifar(raw['train_x'], raw['train_y'],
86 withlabel, ndim, scale)
87 test = _preprocess_cifar(raw['test_x'], raw['test_y'],
88 withlabel, ndim, scale)
89 return train, test
90
91
92 def _preprocess_cifar(images, labels, withlabel, ndim, scale):
93 if ndim == 1:
94 images = images.reshape(-1, 3072)
95 elif ndim == 3:
96 images = images.reshape(-1, 3, 32, 32)
97 else:
98 raise ValueError('invalid ndim for CIFAR dataset')
99 images = images.astype(numpy.float32)
100 images *= scale / 255.
101
102 if withlabel:
103 labels = labels.astype(numpy.int32)
104 return tuple_dataset.TupleDataset(images, labels)
105 else:
106 return images
107
108
109 def _retrieve_cifar(name):
110 root = download.get_dataset_directory('pfnet/chainer/cifar')
111 path = os.path.join(root, '{}.npz'.format(name))
112 url = 'https://www.cs.toronto.edu/~kriz/{}-python.tar.gz'.format(name)
113
114 def creator(path):
115 archive_path = download.cached_download(url)
116
117 train_x = numpy.empty((5, 10000, 3072), dtype=numpy.uint8)
118 train_y = numpy.empty((5, 10000), dtype=numpy.uint8)
119 test_y = numpy.empty(10000, dtype=numpy.uint8)
120
121 dir_name = '{}-batches-py'.format(name)
122
123 with tarfile.open(archive_path, 'r:gz') as archive:
124 # training set
125 for i in range(5):
126 file_name = '{}/data_batch_{}'.format(dir_name, i + 1)
127 d = _pickle_load(archive.extractfile(file_name))
128 train_x[i] = d['data']
129 train_y[i] = d['labels']
130
131 # test set
132 file_name = '{}/test_batch'.format(dir_name)
133 d = _pickle_load(archive.extractfile(file_name))
134 test_x = d['data']
135 test_y[...] = d['labels'] # copy to array
136
137 train_x = train_x.reshape(50000, 3072)
138 train_y = train_y.reshape(50000)
139
140 numpy.savez_compressed(path, train_x=train_x, train_y=train_y,
141 test_x=test_x, test_y=test_y)
142 return {'train_x': train_x, 'train_y': train_y,
143 'test_x': test_x, 'test_y': test_y}
144
145 return download.cache_or_load_file(path, creator, numpy.load)
146
147
148 def _pickle_load(f):
149 if sys.version_info > (3, ):
150 # python3
151 return pickle.load(f, encoding='latin-1')
152 else:
153 # python2
154 return pickle.load(f)
155
[end of chainer/datasets/cifar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer/datasets/cifar.py b/chainer/datasets/cifar.py
--- a/chainer/datasets/cifar.py
+++ b/chainer/datasets/cifar.py
@@ -81,7 +81,7 @@
datasets are arrays of images.
"""
- raw = _retrieve_cifar('cifar-100')
+ raw = _retrieve_cifar_100()
train = _preprocess_cifar(raw['train_x'], raw['train_y'],
withlabel, ndim, scale)
test = _preprocess_cifar(raw['test_x'], raw['test_y'],
@@ -106,6 +106,32 @@
return images
+def _retrieve_cifar_100():
+ root = download.get_dataset_directory('pfnet/chainer/cifar')
+ path = os.path.join(root, 'cifar-100.npz')
+ url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
+
+ def creator(path):
+
+ def load(archive, file_name):
+ d = _pickle_load(archive.extractfile(file_name))
+ x = d['data'].reshape((-1, 3072))
+ y = numpy.array(d['fine_labels'], dtype=numpy.uint8)
+ return x, y
+
+ archive_path = download.cached_download(url)
+ with tarfile.open(archive_path, 'r:gz') as archive:
+ train_x, train_y = load(archive, 'cifar-100-python/train')
+ test_x, test_y = load(archive, 'cifar-100-python/test')
+
+ numpy.savez_compressed(path, train_x=train_x, train_y=train_y,
+ test_x=test_x, test_y=test_y)
+ return {'train_x': train_x, 'train_y': train_y,
+ 'test_x': test_x, 'test_y': test_y}
+
+ return download.cache_or_load_file(path, creator, numpy.load)
+
+
def _retrieve_cifar(name):
root = download.get_dataset_directory('pfnet/chainer/cifar')
path = os.path.join(root, '{}.npz'.format(name))
| {"golden_diff": "diff --git a/chainer/datasets/cifar.py b/chainer/datasets/cifar.py\n--- a/chainer/datasets/cifar.py\n+++ b/chainer/datasets/cifar.py\n@@ -81,7 +81,7 @@\n datasets are arrays of images.\n \n \"\"\"\n- raw = _retrieve_cifar('cifar-100')\n+ raw = _retrieve_cifar_100()\n train = _preprocess_cifar(raw['train_x'], raw['train_y'],\n withlabel, ndim, scale)\n test = _preprocess_cifar(raw['test_x'], raw['test_y'],\n@@ -106,6 +106,32 @@\n return images\n \n \n+def _retrieve_cifar_100():\n+ root = download.get_dataset_directory('pfnet/chainer/cifar')\n+ path = os.path.join(root, 'cifar-100.npz')\n+ url = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n+\n+ def creator(path):\n+\n+ def load(archive, file_name):\n+ d = _pickle_load(archive.extractfile(file_name))\n+ x = d['data'].reshape((-1, 3072))\n+ y = numpy.array(d['fine_labels'], dtype=numpy.uint8)\n+ return x, y\n+\n+ archive_path = download.cached_download(url)\n+ with tarfile.open(archive_path, 'r:gz') as archive:\n+ train_x, train_y = load(archive, 'cifar-100-python/train')\n+ test_x, test_y = load(archive, 'cifar-100-python/test')\n+\n+ numpy.savez_compressed(path, train_x=train_x, train_y=train_y,\n+ test_x=test_x, test_y=test_y)\n+ return {'train_x': train_x, 'train_y': train_y,\n+ 'test_x': test_x, 'test_y': test_y}\n+\n+ return download.cache_or_load_file(path, creator, numpy.load)\n+\n+\n def _retrieve_cifar(name):\n root = download.get_dataset_directory('pfnet/chainer/cifar')\n path = os.path.join(root, '{}.npz'.format(name))\n", "issue": "get_cifar100() does not work\n`get_cifar100()` causes the following error.\n\n```\n>>> chainer.datasets.get_cifar100()\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"/usr/local/lib/python2.7/dist-packages/chainer/datasets/cifar.py\", line 84, in get_cifar100\n raw = _retrieve_cifar('cifar-100')\n File \"/usr/local/lib/python2.7/dist-packages/chainer/datasets/cifar.py\", line 145, in _retrieve_cifar\n return download.cache_or_load_file(path, creator, numpy.load)\n File \"/usr/local/lib/python2.7/dist-packages/chainer/dataset/download.py\", line 145, in cache_or_load_file\n content = creator(temp_path)\n File \"/usr/local/lib/python2.7/dist-packages/chainer/datasets/cifar.py\", line 127, in creator\n d = _pickle_load(archive.extractfile(file_name))\n File \"/usr/lib/python2.7/tarfile.py\", line 2143, in extractfile\n tarinfo = self.getmember(member)\n File \"/usr/lib/python2.7/tarfile.py\", line 1827, in getmember\n raise KeyError(\"filename %r not found\" % name)\nKeyError: \"filename 'cifar-100-batches-py/data_batch_1' not found\"\n```\n\ncifar-100's directory structure seems to be different from cifar-10's.\n\n```\n$ tar xvzf cifar-100-python.tar.gz \ncifar-100-python/\ncifar-100-python/file.txt~\ncifar-100-python/train\ncifar-100-python/test\ncifar-100-python/meta\n\n$ tar xvzf cifar-10-python.tar.gz \ncifar-10-batches-py/\ncifar-10-batches-py/data_batch_4\ncifar-10-batches-py/readme.html\ncifar-10-batches-py/test_batch\ncifar-10-batches-py/data_batch_3\ncifar-10-batches-py/batches.meta\ncifar-10-batches-py/data_batch_2\ncifar-10-batches-py/data_batch_5\ncifar-10-batches-py/data_batch_1\n```\n\nThey should not be retrieved with the same logic.\nhttps://github.com/pfnet/chainer/blob/master/chainer/datasets/cifar.py#L126\n\n", "before_files": [{"content": "import os\nimport sys\nimport tarfile\n\nimport numpy\nimport six.moves.cPickle as pickle\n\nfrom chainer.dataset import download\nfrom chainer.datasets import tuple_dataset\n\n\ndef get_cifar10(withlabel=True, ndim=3, scale=1.):\n \"\"\"Gets the CIFAR-10 dataset.\n\n `CIFAR-10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ is a set of small\n natural images. Each example is an RGB color image of size 32x32,\n classified into 10 groups. In the original images, each component of pixels\n is represented by one-byte unsigned integer. This function scales the\n components to floating point values in the interval ``[0, scale]``.\n\n This function returns the training set and the test set of the official\n CIFAR-10 dataset. If ``withlabel`` is ``True``, each dataset consists of\n tuples of images and labels, otherwise it only consists of images.\n\n Args:\n withlabel (bool): If ``True``, it returns datasets with labels. In this\n case, each example is a tuple of an image and a label. Otherwise,\n the datasets only contain images.\n ndim (int): Number of dimensions of each image. The shape of each image\n is determined depending on ndim as follows:\n\n - ``ndim == 1``: the shape is ``(3072,)``\n - ``ndim == 3``: the shape is ``(3, 32, 32)``\n\n scale (float): Pixel value scale. If it is 1 (default), pixels are\n scaled to the interval ``[0, 1]``.\n\n Returns:\n A tuple of two datasets. If ``withlabel`` is ``True``, both datasets\n are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both\n datasets are arrays of images.\n\n \"\"\"\n raw = _retrieve_cifar('cifar-10')\n train = _preprocess_cifar(raw['train_x'], raw['train_y'],\n withlabel, ndim, scale)\n test = _preprocess_cifar(raw['test_x'], raw['test_y'],\n withlabel, ndim, scale)\n return train, test\n\n\ndef get_cifar100(withlabel=True, ndim=3, scale=1.):\n \"\"\"Gets the CIFAR-100 dataset.\n\n `CIFAR-100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ is a set of\n small natural images. Each example is an RGB color image of size 32x32,\n classified into 100 groups. In the original images, each component\n pixels is represented by one-byte unsigned integer. This function scales\n the components to floating point values in the interval ``[0, scale]``.\n\n This function returns the training set and the test set of the official\n CIFAR-100 dataset. If ``withlabel`` is ``True``, each dataset consists of\n tuples of images and labels, otherwise it only consists of images.\n\n Args:\n withlabel (bool): If ``True``, it returns datasets with labels. In this\n case, each example is a tuple of an image and a label. Otherwise,\n the datasets only contain images.\n ndim (int): Number of dimensions of each image. The shape of each image\n is determined depending on ndim as follows:\n\n - ``ndim == 1``: the shape is ``(3072,)``\n - ``ndim == 3``: the shape is ``(3, 32, 32)``\n\n scale (float): Pixel value scale. If it is 1 (default), pixels are\n scaled to the interval ``[0, 1]``.\n\n Returns:\n A tuple of two datasets. If ``withlabel`` is ``True``, both\n are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both\n datasets are arrays of images.\n\n \"\"\"\n raw = _retrieve_cifar('cifar-100')\n train = _preprocess_cifar(raw['train_x'], raw['train_y'],\n withlabel, ndim, scale)\n test = _preprocess_cifar(raw['test_x'], raw['test_y'],\n withlabel, ndim, scale)\n return train, test\n\n\ndef _preprocess_cifar(images, labels, withlabel, ndim, scale):\n if ndim == 1:\n images = images.reshape(-1, 3072)\n elif ndim == 3:\n images = images.reshape(-1, 3, 32, 32)\n else:\n raise ValueError('invalid ndim for CIFAR dataset')\n images = images.astype(numpy.float32)\n images *= scale / 255.\n\n if withlabel:\n labels = labels.astype(numpy.int32)\n return tuple_dataset.TupleDataset(images, labels)\n else:\n return images\n\n\ndef _retrieve_cifar(name):\n root = download.get_dataset_directory('pfnet/chainer/cifar')\n path = os.path.join(root, '{}.npz'.format(name))\n url = 'https://www.cs.toronto.edu/~kriz/{}-python.tar.gz'.format(name)\n\n def creator(path):\n archive_path = download.cached_download(url)\n\n train_x = numpy.empty((5, 10000, 3072), dtype=numpy.uint8)\n train_y = numpy.empty((5, 10000), dtype=numpy.uint8)\n test_y = numpy.empty(10000, dtype=numpy.uint8)\n\n dir_name = '{}-batches-py'.format(name)\n\n with tarfile.open(archive_path, 'r:gz') as archive:\n # training set\n for i in range(5):\n file_name = '{}/data_batch_{}'.format(dir_name, i + 1)\n d = _pickle_load(archive.extractfile(file_name))\n train_x[i] = d['data']\n train_y[i] = d['labels']\n\n # test set\n file_name = '{}/test_batch'.format(dir_name)\n d = _pickle_load(archive.extractfile(file_name))\n test_x = d['data']\n test_y[...] = d['labels'] # copy to array\n\n train_x = train_x.reshape(50000, 3072)\n train_y = train_y.reshape(50000)\n\n numpy.savez_compressed(path, train_x=train_x, train_y=train_y,\n test_x=test_x, test_y=test_y)\n return {'train_x': train_x, 'train_y': train_y,\n 'test_x': test_x, 'test_y': test_y}\n\n return download.cache_or_load_file(path, creator, numpy.load)\n\n\ndef _pickle_load(f):\n if sys.version_info > (3, ):\n # python3\n return pickle.load(f, encoding='latin-1')\n else:\n # python2\n return pickle.load(f)\n", "path": "chainer/datasets/cifar.py"}]} | 3,057 | 497 |
gh_patches_debug_13027 | rasdani/github-patches | git_diff | Gallopsled__pwntools-282 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
2 doctests in proc fail on debian
We have some tests that assume pid 1 is init , and they fail on debian versions that use systemd. I can't really think of tests that are less platform-specific, any suggestions?
</issue>
<code>
[start of pwnlib/util/proc.py]
1 import time, errno, logging
2 from .. import tubes
3
4 try:
5 import psutil
6 _ok_import = True
7 except ImportError:
8 _ok_import = False
9
10 log = logging.getLogger(__name__)
11
12 if _ok_import:
13 all_pids = psutil.pids
14
15 def pidof(target):
16 """pidof(target) -> int list
17
18 Get PID(s) of `target`. The returned PID(s) depends on the type of `target`:
19
20 - :class:`str`: PIDs of all processes with a name matching `target`.
21
22 - :class:`pwnlib.tubes.process.process`: singleton list of the PID of `target`.
23
24 - :class:`pwnlib.tubes.sock.sock`: singleton list of the PID at the
25 remote end of `target` if it is running on the host. Otherwise an
26 empty list.
27
28 Args:
29 target(object): The target whose PID(s) to find.
30
31 Returns:
32 A list of found PIDs.
33 """
34 if isinstance(target, tubes.sock.sock):
35 local = target.sock.getsockname()
36 remote = target.sock.getpeername()
37
38 def match(p):
39 return (c.raddr, c.laddr, c.status) == (local, remote, 'ESTABLISHED')
40
41 return [c.pid for c in psutil.net_connections() if match(c)]
42
43 elif isinstance(target, tubes.process.process):
44 return [target.proc.pid]
45
46 else:
47 return pid_by_name(target)
48
49 def pid_by_name(name):
50 """pid_by_name(name) -> int list
51
52 Args:
53 name (str): Name of program.
54
55 Returns:
56 List of PIDs matching `name` sorted by lifetime, youngest to oldest.
57
58 Example:
59 >>> 1 in pid_by_name('init')
60 True
61 >>> os.getpid() in pid_by_name(name(os.getpid()))
62 True
63 """
64 def match(p):
65 if p.name() == name:
66 return True
67 try:
68 if p.exe() == name:
69 return True
70 except:
71 pass
72 return False
73
74 return [p.pid for p in psutil.process_iter() if match(p)]
75
76 def name(pid):
77 """name(pid) -> str
78
79 Args:
80 pid (int): PID of the process.
81
82 Returns:
83 Name of process as listed in ``/proc/<pid>/status``.
84
85 Example:
86 >>> name(1)
87 'init'
88 """
89 return psutil.Process(pid).name()
90
91 def parent(pid):
92 """parent(pid) -> int
93
94 Args:
95 pid (int): PID of the process.
96
97 Returns:
98 Parent PID as listed in ``/proc/<pid>/status`` under ``PPid``,
99 or 0 if there is not parent.
100 """
101 try:
102 return psutil.Process(pid).parent().pid
103 except:
104 return 0
105
106 def children(ppid):
107 """children(ppid) -> int list
108
109 Args:
110 pid (int): PID of the process.
111
112 Returns:
113 List of PIDs of whose parent process is `pid`.
114 """
115 return [p.pid for p in psutil.Process(ppid).children()]
116
117 def ancestors(pid):
118 """ancestors(pid) -> int list
119
120 Args:
121 pid (int): PID of the process.
122
123 Returns:
124 List of PIDs of whose parent process is `pid` or an ancestor of `pid`.
125 """
126 pids = []
127 while pid != 0:
128 pids.append(pid)
129 pid = parent(pid)
130 return pids
131
132 def descendants(pid):
133 """descendants(pid) -> dict
134
135 Args:
136 pid (int): PID of the process.
137
138 Returns:
139 Dictionary mapping the PID of each child of `pid` to it's descendants.
140 """
141 this_pid = pid
142 allpids = all_pids()
143 ppids = {}
144 def _parent(pid):
145 if pid not in ppids:
146 ppids[pid] = parent(pid)
147 return ppids[pid]
148 def _children(ppid):
149 return [pid for pid in allpids if _parent(pid) == ppid]
150 def _loop(ppid):
151 return {pid: _loop(pid) for pid in _children(ppid)}
152 return _loop(pid)
153
154 def exe(pid):
155 """exe(pid) -> str
156
157 Args:
158 pid (int): PID of the process.
159
160 Returns:
161 The path of the binary of the process. I.e. what ``/proc/<pid>/exe`` points to.
162 """
163 return psutil.Process(pid).exe()
164
165 def cwd(pid):
166 """cwd(pid) -> str
167
168 Args:
169 pid (int): PID of the process.
170
171 Returns:
172 The path of the process's current working directory. I.e. what
173 ``/proc/<pid>/cwd`` points to.
174 """
175 return psutil.Process(pid).cwd()
176
177 def cmdline(pid):
178 """cmdline(pid) -> str list
179
180 Args:
181 pid (int): PID of the process.
182
183 Returns:
184 A list of the fields in ``/proc/<pid>/cmdline``.
185 """
186 return psutil.Process(pid).cmdline()
187
188 def stat(pid):
189 """stat(pid) -> str list
190
191 Args:
192 pid (int): PID of the process.
193
194 Returns:
195 A list of the values in ``/proc/<pid>/stat``, with the exception that ``(`` and ``)`` has been removed from around the process name.
196 """
197 with open('/proc/%d/stat' % pid) as fd:
198 s = fd.read()
199 # filenames can have ( and ) in them, dammit
200 i = s.find('(')
201 j = s.rfind(')')
202 name = s[i+1:j]
203 return s[:i].split() + [name] + s[j+1:].split()
204
205 def starttime(pid):
206 """starttime(pid) -> float
207
208 Args:
209 pid (int): PID of the process.
210
211 Returns:
212 The time (in seconds) the process started after system boot
213 """
214 return psutil.Process(pid).create_time() - psutil.boot_time()
215
216 def status(pid):
217 """status(pid) -> dict
218
219 Get the status of a process.
220
221 Args:
222 pid (int): PID of the process.
223
224 Returns:
225 The contents of ``/proc/<pid>/status`` as a dictionary.
226 """
227 out = {}
228 try:
229 with open('/proc/%d/status' % pid) as fd:
230 for line in fd:
231 i = line.index(':')
232 key = line[:i]
233 val = line[i + 2:-1] # initial :\t and trailing \n
234 out[key] = val
235 except OSError as e:
236 if e.errno == errno.ENOENT:
237 raise ValueError('No process with PID %d' % pid)
238 else:
239 raise
240 return out
241
242 def tracer(pid):
243 """tracer(pid) -> int
244
245 Args:
246 pid (int): PID of the process.
247
248 Returns:
249 PID of the process tracing `pid`, or None if no `pid` is not being traced.
250
251 Example:
252 >>> tracer(os.getpid()) is None
253 True
254 """
255 tpid = int(status(pid)['TracerPid'])
256 return tpid if tpid > 0 else None
257
258 def state(pid):
259 """state(pid) -> str
260
261 Args:
262 pid (int): PID of the process.
263
264 Returns:
265 State of the process as listed in ``/proc/<pid>/status``. See `proc(5)` for details.
266
267 Example:
268 >>> state(os.getpid())
269 'R (running)'
270 """
271 return status(pid)['State']
272
273 def wait_for_debugger(pid):
274 """wait_for_debugger(pid) -> None
275
276 Sleeps until the process with PID `pid` is being traced.
277
278 Args:
279 pid (int): PID of the process.
280
281 Returns:
282 None
283 """
284 with log.waitfor('Waiting for debugger') as l:
285 while tracer(pid) is None:
286 time.sleep(0.01)
287 l.success()
288
289 if not _ok_import:
290 def _make_stub(func):
291 func.__doc__ = 'Stubbed out function, because psutil is not available.'
292 return func
293
294 @_make_stub
295 def all_pids():
296 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
297
298 @_make_stub
299 def pidof(target):
300 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
301
302 @_make_stub
303 def pid_by_name(name):
304 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
305
306 @_make_stub
307 def name(pid):
308 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
309
310 @_make_stub
311 def parent(pid):
312 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
313
314 @_make_stub
315 def children(ppid):
316 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
317
318 @_make_stub
319 def ancestors(pid):
320 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
321
322 @_make_stub
323 def descendants(pid):
324 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
325
326 @_make_stub
327 def exe(pid):
328 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
329
330 @_make_stub
331 def cwd(pid):
332 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
333
334 @_make_stub
335 def cmdline(pid):
336 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
337
338 @_make_stub
339 def starttime(pid):
340 log.error("Called stubbed-out function. Get psutil to work on your platform, then come back.")
341
[end of pwnlib/util/proc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwnlib/util/proc.py b/pwnlib/util/proc.py
--- a/pwnlib/util/proc.py
+++ b/pwnlib/util/proc.py
@@ -56,8 +56,6 @@
List of PIDs matching `name` sorted by lifetime, youngest to oldest.
Example:
- >>> 1 in pid_by_name('init')
- True
>>> os.getpid() in pid_by_name(name(os.getpid()))
True
"""
@@ -83,8 +81,8 @@
Name of process as listed in ``/proc/<pid>/status``.
Example:
- >>> name(1)
- 'init'
+ >>> name(os.getpid()) == os.path.basename(sys.argv[0])
+ True
"""
return psutil.Process(pid).name()
| {"golden_diff": "diff --git a/pwnlib/util/proc.py b/pwnlib/util/proc.py\n--- a/pwnlib/util/proc.py\n+++ b/pwnlib/util/proc.py\n@@ -56,8 +56,6 @@\n List of PIDs matching `name` sorted by lifetime, youngest to oldest.\n \n Example:\n- >>> 1 in pid_by_name('init')\n- True\n >>> os.getpid() in pid_by_name(name(os.getpid()))\n True\n \"\"\"\n@@ -83,8 +81,8 @@\n Name of process as listed in ``/proc/<pid>/status``.\n \n Example:\n- >>> name(1)\n- 'init'\n+ >>> name(os.getpid()) == os.path.basename(sys.argv[0])\n+ True\n \"\"\"\n return psutil.Process(pid).name()\n", "issue": "2 doctests in proc fail on debian\nWe have some tests that assume pid 1 is init , and they fail on debian versions that use systemd. I can't really think of tests that are less platform-specific, any suggestions?\n\n", "before_files": [{"content": "import time, errno, logging\nfrom .. import tubes\n\ntry:\n import psutil\n _ok_import = True\nexcept ImportError:\n _ok_import = False\n\nlog = logging.getLogger(__name__)\n\nif _ok_import:\n all_pids = psutil.pids\n\ndef pidof(target):\n \"\"\"pidof(target) -> int list\n\n Get PID(s) of `target`. The returned PID(s) depends on the type of `target`:\n\n - :class:`str`: PIDs of all processes with a name matching `target`.\n\n - :class:`pwnlib.tubes.process.process`: singleton list of the PID of `target`.\n\n - :class:`pwnlib.tubes.sock.sock`: singleton list of the PID at the\n remote end of `target` if it is running on the host. Otherwise an\n empty list.\n\n Args:\n target(object): The target whose PID(s) to find.\n\n Returns:\n A list of found PIDs.\n \"\"\"\n if isinstance(target, tubes.sock.sock):\n local = target.sock.getsockname()\n remote = target.sock.getpeername()\n\n def match(p):\n return (c.raddr, c.laddr, c.status) == (local, remote, 'ESTABLISHED')\n\n return [c.pid for c in psutil.net_connections() if match(c)]\n\n elif isinstance(target, tubes.process.process):\n return [target.proc.pid]\n\n else:\n return pid_by_name(target)\n\ndef pid_by_name(name):\n \"\"\"pid_by_name(name) -> int list\n\n Args:\n name (str): Name of program.\n\n Returns:\n List of PIDs matching `name` sorted by lifetime, youngest to oldest.\n\n Example:\n >>> 1 in pid_by_name('init')\n True\n >>> os.getpid() in pid_by_name(name(os.getpid()))\n True\n \"\"\"\n def match(p):\n if p.name() == name:\n return True\n try:\n if p.exe() == name:\n return True\n except:\n pass\n return False\n\n return [p.pid for p in psutil.process_iter() if match(p)]\n\ndef name(pid):\n \"\"\"name(pid) -> str\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n Name of process as listed in ``/proc/<pid>/status``.\n\n Example:\n >>> name(1)\n 'init'\n \"\"\"\n return psutil.Process(pid).name()\n\ndef parent(pid):\n \"\"\"parent(pid) -> int\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n Parent PID as listed in ``/proc/<pid>/status`` under ``PPid``,\n or 0 if there is not parent.\n \"\"\"\n try:\n return psutil.Process(pid).parent().pid\n except:\n return 0\n\ndef children(ppid):\n \"\"\"children(ppid) -> int list\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n List of PIDs of whose parent process is `pid`.\n \"\"\"\n return [p.pid for p in psutil.Process(ppid).children()]\n\ndef ancestors(pid):\n \"\"\"ancestors(pid) -> int list\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n List of PIDs of whose parent process is `pid` or an ancestor of `pid`.\n \"\"\"\n pids = []\n while pid != 0:\n pids.append(pid)\n pid = parent(pid)\n return pids\n\ndef descendants(pid):\n \"\"\"descendants(pid) -> dict\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n Dictionary mapping the PID of each child of `pid` to it's descendants.\n \"\"\"\n this_pid = pid\n allpids = all_pids()\n ppids = {}\n def _parent(pid):\n if pid not in ppids:\n ppids[pid] = parent(pid)\n return ppids[pid]\n def _children(ppid):\n return [pid for pid in allpids if _parent(pid) == ppid]\n def _loop(ppid):\n return {pid: _loop(pid) for pid in _children(ppid)}\n return _loop(pid)\n\ndef exe(pid):\n \"\"\"exe(pid) -> str\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n The path of the binary of the process. I.e. what ``/proc/<pid>/exe`` points to.\n \"\"\"\n return psutil.Process(pid).exe()\n\ndef cwd(pid):\n \"\"\"cwd(pid) -> str\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n The path of the process's current working directory. I.e. what\n ``/proc/<pid>/cwd`` points to.\n \"\"\"\n return psutil.Process(pid).cwd()\n\ndef cmdline(pid):\n \"\"\"cmdline(pid) -> str list\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n A list of the fields in ``/proc/<pid>/cmdline``.\n \"\"\"\n return psutil.Process(pid).cmdline()\n\ndef stat(pid):\n \"\"\"stat(pid) -> str list\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n A list of the values in ``/proc/<pid>/stat``, with the exception that ``(`` and ``)`` has been removed from around the process name.\n \"\"\"\n with open('/proc/%d/stat' % pid) as fd:\n s = fd.read()\n # filenames can have ( and ) in them, dammit\n i = s.find('(')\n j = s.rfind(')')\n name = s[i+1:j]\n return s[:i].split() + [name] + s[j+1:].split()\n\ndef starttime(pid):\n \"\"\"starttime(pid) -> float\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n The time (in seconds) the process started after system boot\n \"\"\"\n return psutil.Process(pid).create_time() - psutil.boot_time()\n\ndef status(pid):\n \"\"\"status(pid) -> dict\n\n Get the status of a process.\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n The contents of ``/proc/<pid>/status`` as a dictionary.\n \"\"\"\n out = {}\n try:\n with open('/proc/%d/status' % pid) as fd:\n for line in fd:\n i = line.index(':')\n key = line[:i]\n val = line[i + 2:-1] # initial :\\t and trailing \\n\n out[key] = val\n except OSError as e:\n if e.errno == errno.ENOENT:\n raise ValueError('No process with PID %d' % pid)\n else:\n raise\n return out\n\ndef tracer(pid):\n \"\"\"tracer(pid) -> int\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n PID of the process tracing `pid`, or None if no `pid` is not being traced.\n\n Example:\n >>> tracer(os.getpid()) is None\n True\n \"\"\"\n tpid = int(status(pid)['TracerPid'])\n return tpid if tpid > 0 else None\n\ndef state(pid):\n \"\"\"state(pid) -> str\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n State of the process as listed in ``/proc/<pid>/status``. See `proc(5)` for details.\n\n Example:\n >>> state(os.getpid())\n 'R (running)'\n \"\"\"\n return status(pid)['State']\n\ndef wait_for_debugger(pid):\n \"\"\"wait_for_debugger(pid) -> None\n\n Sleeps until the process with PID `pid` is being traced.\n\n Args:\n pid (int): PID of the process.\n\n Returns:\n None\n \"\"\"\n with log.waitfor('Waiting for debugger') as l:\n while tracer(pid) is None:\n time.sleep(0.01)\n l.success()\n\nif not _ok_import:\n def _make_stub(func):\n func.__doc__ = 'Stubbed out function, because psutil is not available.'\n return func\n\n @_make_stub\n def all_pids():\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def pidof(target):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def pid_by_name(name):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def name(pid):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def parent(pid):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def children(ppid):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def ancestors(pid):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def descendants(pid):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def exe(pid):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def cwd(pid):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def cmdline(pid):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n\n @_make_stub\n def starttime(pid):\n log.error(\"Called stubbed-out function. Get psutil to work on your platform, then come back.\")\n", "path": "pwnlib/util/proc.py"}]} | 3,744 | 185 |
gh_patches_debug_54796 | rasdani/github-patches | git_diff | encode__httpx-1357 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ASGITransport does not correctly simulate raw_path in the scope
I'm trying to switch Datasette's internal tests over to using `httpx` with `AsyncClient`.
This has almost worked perfectly, but I've run into one problem: it looks like the `ASGITransport` class used by the `AsyncClient(app=asgi_app)` mechanism does not correctly simulate the `raw_path` and `path` keys.
Here's the code in question: https://github.com/encode/httpx/blob/92ca4d0cc654859fc2257c492e55d8752370d427/httpx/_transports/asgi.py#L82-L97
As you can see, it's not populating `raw_path` even though that's part of the ASGI spec.
This matters for Datasette because it supports this URL: https://latest.datasette.io/fixtures/table%2Fwith%2Fslashes.csv - which refers to a SQLite database table called `table/with/slashes.csv` (a weird table name but that's test cases for you). The way it does this is through careful decoding of the `raw_path` ASGI scope variable.
Here are my notes when I first ran into this limitation of ASGITransport: https://github.com/simonw/datasette/pull/1000#issuecomment-705945591
ASGITransport does not correctly simulate raw_path in the scope
I'm trying to switch Datasette's internal tests over to using `httpx` with `AsyncClient`.
This has almost worked perfectly, but I've run into one problem: it looks like the `ASGITransport` class used by the `AsyncClient(app=asgi_app)` mechanism does not correctly simulate the `raw_path` and `path` keys.
Here's the code in question: https://github.com/encode/httpx/blob/92ca4d0cc654859fc2257c492e55d8752370d427/httpx/_transports/asgi.py#L82-L97
As you can see, it's not populating `raw_path` even though that's part of the ASGI spec.
This matters for Datasette because it supports this URL: https://latest.datasette.io/fixtures/table%2Fwith%2Fslashes.csv - which refers to a SQLite database table called `table/with/slashes.csv` (a weird table name but that's test cases for you). The way it does this is through careful decoding of the `raw_path` ASGI scope variable.
Here are my notes when I first ran into this limitation of ASGITransport: https://github.com/simonw/datasette/pull/1000#issuecomment-705945591
</issue>
<code>
[start of httpx/_transports/asgi.py]
1 from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union
2 from urllib.parse import unquote
3
4 import httpcore
5 import sniffio
6
7 if TYPE_CHECKING: # pragma: no cover
8 import asyncio
9
10 import trio
11
12 Event = Union[asyncio.Event, trio.Event]
13
14
15 def create_event() -> "Event":
16 if sniffio.current_async_library() == "trio":
17 import trio
18
19 return trio.Event()
20 else:
21 import asyncio
22
23 return asyncio.Event()
24
25
26 class ASGITransport(httpcore.AsyncHTTPTransport):
27 """
28 A custom AsyncTransport that handles sending requests directly to an ASGI app.
29 The simplest way to use this functionality is to use the `app` argument.
30
31 ```
32 client = httpx.AsyncClient(app=app)
33 ```
34
35 Alternatively, you can setup the transport instance explicitly.
36 This allows you to include any additional configuration arguments specific
37 to the ASGITransport class:
38
39 ```
40 transport = httpx.ASGITransport(
41 app=app,
42 root_path="/submount",
43 client=("1.2.3.4", 123)
44 )
45 client = httpx.AsyncClient(transport=transport)
46 ```
47
48 Arguments:
49
50 * `app` - The ASGI application.
51 * `raise_app_exceptions` - Boolean indicating if exceptions in the application
52 should be raised. Default to `True`. Can be set to `False` for use cases
53 such as testing the content of a client 500 response.
54 * `root_path` - The root path on which the ASGI application should be mounted.
55 * `client` - A two-tuple indicating the client IP and port of incoming requests.
56 ```
57 """
58
59 def __init__(
60 self,
61 app: Callable,
62 raise_app_exceptions: bool = True,
63 root_path: str = "",
64 client: Tuple[str, int] = ("127.0.0.1", 123),
65 ) -> None:
66 self.app = app
67 self.raise_app_exceptions = raise_app_exceptions
68 self.root_path = root_path
69 self.client = client
70
71 async def arequest(
72 self,
73 method: bytes,
74 url: Tuple[bytes, bytes, Optional[int], bytes],
75 headers: List[Tuple[bytes, bytes]] = None,
76 stream: httpcore.AsyncByteStream = None,
77 ext: dict = None,
78 ) -> Tuple[int, List[Tuple[bytes, bytes]], httpcore.AsyncByteStream, dict]:
79 headers = [] if headers is None else headers
80 stream = httpcore.PlainByteStream(content=b"") if stream is None else stream
81
82 # ASGI scope.
83 scheme, host, port, full_path = url
84 path, _, query = full_path.partition(b"?")
85 scope = {
86 "type": "http",
87 "asgi": {"version": "3.0"},
88 "http_version": "1.1",
89 "method": method.decode(),
90 "headers": [(k.lower(), v) for (k, v) in headers],
91 "scheme": scheme.decode("ascii"),
92 "path": unquote(path.decode("ascii")),
93 "query_string": query,
94 "server": (host.decode("ascii"), port),
95 "client": self.client,
96 "root_path": self.root_path,
97 }
98
99 # Request.
100 request_body_chunks = stream.__aiter__()
101 request_complete = False
102
103 # Response.
104 status_code = None
105 response_headers = None
106 body_parts = []
107 response_started = False
108 response_complete = create_event()
109
110 # ASGI callables.
111
112 async def receive() -> dict:
113 nonlocal request_complete
114
115 if request_complete:
116 await response_complete.wait()
117 return {"type": "http.disconnect"}
118
119 try:
120 body = await request_body_chunks.__anext__()
121 except StopAsyncIteration:
122 request_complete = True
123 return {"type": "http.request", "body": b"", "more_body": False}
124 return {"type": "http.request", "body": body, "more_body": True}
125
126 async def send(message: dict) -> None:
127 nonlocal status_code, response_headers, response_started
128
129 if message["type"] == "http.response.start":
130 assert not response_started
131
132 status_code = message["status"]
133 response_headers = message.get("headers", [])
134 response_started = True
135
136 elif message["type"] == "http.response.body":
137 assert not response_complete.is_set()
138 body = message.get("body", b"")
139 more_body = message.get("more_body", False)
140
141 if body and method != b"HEAD":
142 body_parts.append(body)
143
144 if not more_body:
145 response_complete.set()
146
147 try:
148 await self.app(scope, receive, send)
149 except Exception:
150 if self.raise_app_exceptions or not response_complete.is_set():
151 raise
152
153 assert response_complete.is_set()
154 assert status_code is not None
155 assert response_headers is not None
156
157 stream = httpcore.PlainByteStream(content=b"".join(body_parts))
158 ext = {}
159
160 return (status_code, response_headers, stream, ext)
161
[end of httpx/_transports/asgi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/httpx/_transports/asgi.py b/httpx/_transports/asgi.py
--- a/httpx/_transports/asgi.py
+++ b/httpx/_transports/asgi.py
@@ -90,6 +90,7 @@
"headers": [(k.lower(), v) for (k, v) in headers],
"scheme": scheme.decode("ascii"),
"path": unquote(path.decode("ascii")),
+ "raw_path": path,
"query_string": query,
"server": (host.decode("ascii"), port),
"client": self.client,
| {"golden_diff": "diff --git a/httpx/_transports/asgi.py b/httpx/_transports/asgi.py\n--- a/httpx/_transports/asgi.py\n+++ b/httpx/_transports/asgi.py\n@@ -90,6 +90,7 @@\n \"headers\": [(k.lower(), v) for (k, v) in headers],\n \"scheme\": scheme.decode(\"ascii\"),\n \"path\": unquote(path.decode(\"ascii\")),\n+ \"raw_path\": path,\n \"query_string\": query,\n \"server\": (host.decode(\"ascii\"), port),\n \"client\": self.client,\n", "issue": "ASGITransport does not correctly simulate raw_path in the scope\nI'm trying to switch Datasette's internal tests over to using `httpx` with `AsyncClient`.\r\n\r\nThis has almost worked perfectly, but I've run into one problem: it looks like the `ASGITransport` class used by the `AsyncClient(app=asgi_app)` mechanism does not correctly simulate the `raw_path` and `path` keys.\r\n\r\nHere's the code in question: https://github.com/encode/httpx/blob/92ca4d0cc654859fc2257c492e55d8752370d427/httpx/_transports/asgi.py#L82-L97\r\n\r\nAs you can see, it's not populating `raw_path` even though that's part of the ASGI spec.\r\n\r\nThis matters for Datasette because it supports this URL: https://latest.datasette.io/fixtures/table%2Fwith%2Fslashes.csv - which refers to a SQLite database table called `table/with/slashes.csv` (a weird table name but that's test cases for you). The way it does this is through careful decoding of the `raw_path` ASGI scope variable.\r\n\r\nHere are my notes when I first ran into this limitation of ASGITransport: https://github.com/simonw/datasette/pull/1000#issuecomment-705945591\nASGITransport does not correctly simulate raw_path in the scope\nI'm trying to switch Datasette's internal tests over to using `httpx` with `AsyncClient`.\r\n\r\nThis has almost worked perfectly, but I've run into one problem: it looks like the `ASGITransport` class used by the `AsyncClient(app=asgi_app)` mechanism does not correctly simulate the `raw_path` and `path` keys.\r\n\r\nHere's the code in question: https://github.com/encode/httpx/blob/92ca4d0cc654859fc2257c492e55d8752370d427/httpx/_transports/asgi.py#L82-L97\r\n\r\nAs you can see, it's not populating `raw_path` even though that's part of the ASGI spec.\r\n\r\nThis matters for Datasette because it supports this URL: https://latest.datasette.io/fixtures/table%2Fwith%2Fslashes.csv - which refers to a SQLite database table called `table/with/slashes.csv` (a weird table name but that's test cases for you). The way it does this is through careful decoding of the `raw_path` ASGI scope variable.\r\n\r\nHere are my notes when I first ran into this limitation of ASGITransport: https://github.com/simonw/datasette/pull/1000#issuecomment-705945591\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union\nfrom urllib.parse import unquote\n\nimport httpcore\nimport sniffio\n\nif TYPE_CHECKING: # pragma: no cover\n import asyncio\n\n import trio\n\n Event = Union[asyncio.Event, trio.Event]\n\n\ndef create_event() -> \"Event\":\n if sniffio.current_async_library() == \"trio\":\n import trio\n\n return trio.Event()\n else:\n import asyncio\n\n return asyncio.Event()\n\n\nclass ASGITransport(httpcore.AsyncHTTPTransport):\n \"\"\"\n A custom AsyncTransport that handles sending requests directly to an ASGI app.\n The simplest way to use this functionality is to use the `app` argument.\n\n ```\n client = httpx.AsyncClient(app=app)\n ```\n\n Alternatively, you can setup the transport instance explicitly.\n This allows you to include any additional configuration arguments specific\n to the ASGITransport class:\n\n ```\n transport = httpx.ASGITransport(\n app=app,\n root_path=\"/submount\",\n client=(\"1.2.3.4\", 123)\n )\n client = httpx.AsyncClient(transport=transport)\n ```\n\n Arguments:\n\n * `app` - The ASGI application.\n * `raise_app_exceptions` - Boolean indicating if exceptions in the application\n should be raised. Default to `True`. Can be set to `False` for use cases\n such as testing the content of a client 500 response.\n * `root_path` - The root path on which the ASGI application should be mounted.\n * `client` - A two-tuple indicating the client IP and port of incoming requests.\n ```\n \"\"\"\n\n def __init__(\n self,\n app: Callable,\n raise_app_exceptions: bool = True,\n root_path: str = \"\",\n client: Tuple[str, int] = (\"127.0.0.1\", 123),\n ) -> None:\n self.app = app\n self.raise_app_exceptions = raise_app_exceptions\n self.root_path = root_path\n self.client = client\n\n async def arequest(\n self,\n method: bytes,\n url: Tuple[bytes, bytes, Optional[int], bytes],\n headers: List[Tuple[bytes, bytes]] = None,\n stream: httpcore.AsyncByteStream = None,\n ext: dict = None,\n ) -> Tuple[int, List[Tuple[bytes, bytes]], httpcore.AsyncByteStream, dict]:\n headers = [] if headers is None else headers\n stream = httpcore.PlainByteStream(content=b\"\") if stream is None else stream\n\n # ASGI scope.\n scheme, host, port, full_path = url\n path, _, query = full_path.partition(b\"?\")\n scope = {\n \"type\": \"http\",\n \"asgi\": {\"version\": \"3.0\"},\n \"http_version\": \"1.1\",\n \"method\": method.decode(),\n \"headers\": [(k.lower(), v) for (k, v) in headers],\n \"scheme\": scheme.decode(\"ascii\"),\n \"path\": unquote(path.decode(\"ascii\")),\n \"query_string\": query,\n \"server\": (host.decode(\"ascii\"), port),\n \"client\": self.client,\n \"root_path\": self.root_path,\n }\n\n # Request.\n request_body_chunks = stream.__aiter__()\n request_complete = False\n\n # Response.\n status_code = None\n response_headers = None\n body_parts = []\n response_started = False\n response_complete = create_event()\n\n # ASGI callables.\n\n async def receive() -> dict:\n nonlocal request_complete\n\n if request_complete:\n await response_complete.wait()\n return {\"type\": \"http.disconnect\"}\n\n try:\n body = await request_body_chunks.__anext__()\n except StopAsyncIteration:\n request_complete = True\n return {\"type\": \"http.request\", \"body\": b\"\", \"more_body\": False}\n return {\"type\": \"http.request\", \"body\": body, \"more_body\": True}\n\n async def send(message: dict) -> None:\n nonlocal status_code, response_headers, response_started\n\n if message[\"type\"] == \"http.response.start\":\n assert not response_started\n\n status_code = message[\"status\"]\n response_headers = message.get(\"headers\", [])\n response_started = True\n\n elif message[\"type\"] == \"http.response.body\":\n assert not response_complete.is_set()\n body = message.get(\"body\", b\"\")\n more_body = message.get(\"more_body\", False)\n\n if body and method != b\"HEAD\":\n body_parts.append(body)\n\n if not more_body:\n response_complete.set()\n\n try:\n await self.app(scope, receive, send)\n except Exception:\n if self.raise_app_exceptions or not response_complete.is_set():\n raise\n\n assert response_complete.is_set()\n assert status_code is not None\n assert response_headers is not None\n\n stream = httpcore.PlainByteStream(content=b\"\".join(body_parts))\n ext = {}\n\n return (status_code, response_headers, stream, ext)\n", "path": "httpx/_transports/asgi.py"}]} | 2,672 | 129 |
gh_patches_debug_10125 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1060 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MISO <-> Canada interconnector
..needs to be updated as it still pointing to Montana instead of MISO

</issue>
<code>
[start of parsers/CA_ON.py]
1 #!/usr/bin/env python3
2
3 # The arrow library is used to handle datetimes
4 import arrow
5 # The request library is used to fetch content through HTTP
6 import requests
7
8 from bs4 import BeautifulSoup
9
10 MAP_GENERATION = {
11 'BIOFUEL': 'biomass',
12 'GAS': 'gas',
13 'HYDRO': 'hydro',
14 'NUCLEAR': 'nuclear',
15 'SOLAR': 'solar',
16 'WIND': 'wind'
17 }
18
19 timezone = 'Canada/Eastern'
20
21
22 def fetch_production(country_code='CA-ON', session=None):
23 """Requests the last known production mix (in MW) of a given country
24
25 Arguments:
26 country_code (optional) -- used in case a parser is able to fetch multiple countries
27 session (optional) -- request session passed in order to re-use an existing session
28
29 Return:
30 A dictionary in the form:
31 {
32 'countryCode': 'FR',
33 'datetime': '2017-01-01T00:00:00Z',
34 'production': {
35 'biomass': 0.0,
36 'coal': 0.0,
37 'gas': 0.0,
38 'hydro': 0.0,
39 'nuclear': null,
40 'oil': 0.0,
41 'solar': 0.0,
42 'wind': 0.0,
43 'geothermal': 0.0,
44 'unknown': 0.0
45 },
46 'storage': {
47 'hydro': -10.0,
48 },
49 'source': 'mysource.com'
50 }
51 """
52 r = session or requests.session()
53 url = 'http://www.ieso.ca/-/media/files/ieso/uploaded/chart/generation_fuel_type_multiday.xml?la=en'
54 response = r.get(url)
55 soup = BeautifulSoup(response.text, 'html.parser')
56
57 data = {}
58
59 start_datetime = arrow.get(
60 arrow.get(soup.find_all('startdate')[0].contents[0]).datetime, timezone)
61
62 # Iterate over all datasets (production types)
63 for item in soup.find_all('dataset'):
64 key = item.attrs['series']
65 for rowIndex, row in enumerate(item.find_all('value')):
66 if not len(row.contents):
67 continue
68 if rowIndex not in data:
69 data[rowIndex] = {
70 'datetime': start_datetime.replace(hours=+rowIndex).datetime,
71 'countryCode': country_code,
72 'production': {
73 'coal': 0
74 },
75 'storage': {},
76 'source': 'ieso.ca',
77 }
78 data[rowIndex]['production'][MAP_GENERATION[key]] = \
79 float(row.contents[0])
80
81 return [data[k] for k in sorted(data.keys())]
82
83
84 def fetch_price(country_code='CA-ON', session=None):
85 """Requests the last known power price of a given country
86
87 Arguments:
88 country_code (optional) -- used in case a parser is able to fetch multiple countries
89 session (optional) -- request session passed in order to re-use an existing session
90
91 Return:
92 A dictionary in the form:
93 {
94 'countryCode': 'FR',
95 'currency': EUR,
96 'datetime': '2017-01-01T00:00:00Z',
97 'price': 0.0,
98 'source': 'mysource.com'
99 }
100 """
101
102 r = session or requests.session()
103 url = 'http://www.ieso.ca/-/media/files/ieso/uploaded/chart/price_multiday.xml?la=en'
104 response = r.get(url)
105 soup = BeautifulSoup(response.text, 'html.parser')
106
107 data = {}
108
109 start_datetime = arrow.get(
110 arrow.get(soup.find_all('startdate')[0].contents[0]).datetime, timezone)
111
112 # Iterate over all datasets (production types)
113 for item in soup.find_all('dataset'):
114 key = item.attrs['series']
115 if key != 'HOEP':
116 continue
117 for rowIndex, row in enumerate(item.find_all('value')):
118 if not len(row.contents):
119 continue
120 if rowIndex not in data:
121 data[rowIndex] = {
122 'datetime': start_datetime.replace(hours=+rowIndex).datetime,
123 'countryCode': country_code,
124 'currency': 'CAD',
125 'source': 'ieso.ca',
126 }
127 data[rowIndex]['price'] = \
128 float(row.contents[0])
129
130 return [data[k] for k in sorted(data.keys())]
131
132 return data
133
134
135 def fetch_exchange(country_code1, country_code2, session=None):
136 """Requests the last known power exchange (in MW) between two countries
137
138 Arguments:
139 country_code (optional) -- used in case a parser is able to fetch multiple countries
140 session (optional) -- request session passed in order to re-use an existing session
141
142 Return:
143 A dictionary in the form:
144 {
145 'sortedCountryCodes': 'DK->NO',
146 'datetime': '2017-01-01T00:00:00Z',
147 'netFlow': 0.0,
148 'source': 'mysource.com'
149 }
150 """
151
152 r = session or requests.session()
153 url = 'http://live.gridwatch.ca/WebServices/GridWatchWebApp.asmx/GetHomeViewData_v2'
154 response = r.get(url)
155 obj = response.json()
156 exchanges = obj['intertieLineData']
157
158 sortedCountryCodes = '->'.join(sorted([country_code1, country_code2]))
159 # Everything -> CA_ON corresponds to an import to ON
160 # In the data, "net" represents an export
161 # So everything -> CA_ON must be reversed
162 if sortedCountryCodes == 'CA-MB->CA-ON':
163 keys = ['MANITOBA', 'MANITOBA SK']
164 direction = -1
165 elif sortedCountryCodes == 'CA-ON->US-NY':
166 keys = ['NEW-YORK']
167 direction = 1
168 elif sortedCountryCodes == 'CA-ON->US-MI':
169 keys = ['MICHIGAN']
170 direction = 1
171 elif sortedCountryCodes == 'CA-ON->US-MN':
172 keys = ['MINNESOTA']
173 direction = 1
174 elif sortedCountryCodes == 'CA-ON->CA-QC':
175 keys = filter(lambda k: k[:2] == 'PQ', exchanges.keys())
176 direction = 1
177 else:
178 raise NotImplementedError('This exchange pair is not implemented')
179
180 data = {
181 'datetime': max(map(lambda x: arrow.get(arrow.get(
182 exchanges[x]['dateReported']).datetime, timezone).datetime, keys)),
183 'sortedCountryCodes': sortedCountryCodes,
184 'netFlow': sum(map(lambda x: float(exchanges[x]['net'].replace(',', '')), keys)) * direction,
185 'source': 'gridwatch.ca'
186 }
187
188 return data
189
190
191 if __name__ == '__main__':
192 """Main method, never used by the Electricity Map backend, but handy for testing."""
193
194 print('fetch_production() ->')
195 print(fetch_production())
196 print('fetch_price() ->')
197 print(fetch_price())
198 print('fetch_exchange("CA-ON", "US-NY") ->')
199 print(fetch_exchange("CA-ON", "US-NY"))
200
[end of parsers/CA_ON.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/CA_ON.py b/parsers/CA_ON.py
--- a/parsers/CA_ON.py
+++ b/parsers/CA_ON.py
@@ -165,11 +165,8 @@
elif sortedCountryCodes == 'CA-ON->US-NY':
keys = ['NEW-YORK']
direction = 1
- elif sortedCountryCodes == 'CA-ON->US-MI':
- keys = ['MICHIGAN']
- direction = 1
- elif sortedCountryCodes == 'CA-ON->US-MN':
- keys = ['MINNESOTA']
+ elif sortedCountryCodes == 'CA-ON->US-MISO':
+ keys = ['MICHIGAN', 'MINNESOTA']
direction = 1
elif sortedCountryCodes == 'CA-ON->CA-QC':
keys = filter(lambda k: k[:2] == 'PQ', exchanges.keys())
| {"golden_diff": "diff --git a/parsers/CA_ON.py b/parsers/CA_ON.py\n--- a/parsers/CA_ON.py\n+++ b/parsers/CA_ON.py\n@@ -165,11 +165,8 @@\n elif sortedCountryCodes == 'CA-ON->US-NY':\n keys = ['NEW-YORK']\n direction = 1\n- elif sortedCountryCodes == 'CA-ON->US-MI':\n- keys = ['MICHIGAN']\n- direction = 1\n- elif sortedCountryCodes == 'CA-ON->US-MN':\n- keys = ['MINNESOTA']\n+ elif sortedCountryCodes == 'CA-ON->US-MISO':\n+ keys = ['MICHIGAN', 'MINNESOTA']\n direction = 1\n elif sortedCountryCodes == 'CA-ON->CA-QC':\n keys = filter(lambda k: k[:2] == 'PQ', exchanges.keys())\n", "issue": "MISO <-> Canada interconnector\n..needs to be updated as it still pointing to Montana instead of MISO\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# The arrow library is used to handle datetimes\nimport arrow\n# The request library is used to fetch content through HTTP\nimport requests\n\nfrom bs4 import BeautifulSoup\n\nMAP_GENERATION = {\n 'BIOFUEL': 'biomass',\n 'GAS': 'gas',\n 'HYDRO': 'hydro',\n 'NUCLEAR': 'nuclear',\n 'SOLAR': 'solar',\n 'WIND': 'wind'\n}\n\ntimezone = 'Canada/Eastern'\n\n\ndef fetch_production(country_code='CA-ON', session=None):\n \"\"\"Requests the last known production mix (in MW) of a given country\n\n Arguments:\n country_code (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'countryCode': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n r = session or requests.session()\n url = 'http://www.ieso.ca/-/media/files/ieso/uploaded/chart/generation_fuel_type_multiday.xml?la=en'\n response = r.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n data = {}\n\n start_datetime = arrow.get(\n arrow.get(soup.find_all('startdate')[0].contents[0]).datetime, timezone)\n\n # Iterate over all datasets (production types)\n for item in soup.find_all('dataset'):\n key = item.attrs['series']\n for rowIndex, row in enumerate(item.find_all('value')):\n if not len(row.contents):\n continue\n if rowIndex not in data:\n data[rowIndex] = {\n 'datetime': start_datetime.replace(hours=+rowIndex).datetime,\n 'countryCode': country_code,\n 'production': {\n 'coal': 0\n },\n 'storage': {},\n 'source': 'ieso.ca',\n }\n data[rowIndex]['production'][MAP_GENERATION[key]] = \\\n float(row.contents[0])\n\n return [data[k] for k in sorted(data.keys())]\n\n\ndef fetch_price(country_code='CA-ON', session=None):\n \"\"\"Requests the last known power price of a given country\n\n Arguments:\n country_code (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'countryCode': 'FR',\n 'currency': EUR,\n 'datetime': '2017-01-01T00:00:00Z',\n 'price': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n\n r = session or requests.session()\n url = 'http://www.ieso.ca/-/media/files/ieso/uploaded/chart/price_multiday.xml?la=en'\n response = r.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n data = {}\n\n start_datetime = arrow.get(\n arrow.get(soup.find_all('startdate')[0].contents[0]).datetime, timezone)\n\n # Iterate over all datasets (production types)\n for item in soup.find_all('dataset'):\n key = item.attrs['series']\n if key != 'HOEP':\n continue\n for rowIndex, row in enumerate(item.find_all('value')):\n if not len(row.contents):\n continue\n if rowIndex not in data:\n data[rowIndex] = {\n 'datetime': start_datetime.replace(hours=+rowIndex).datetime,\n 'countryCode': country_code,\n 'currency': 'CAD',\n 'source': 'ieso.ca',\n }\n data[rowIndex]['price'] = \\\n float(row.contents[0])\n\n return [data[k] for k in sorted(data.keys())]\n\n return data\n\n\ndef fetch_exchange(country_code1, country_code2, session=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n\n Arguments:\n country_code (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'sortedCountryCodes': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n \"\"\"\n\n r = session or requests.session()\n url = 'http://live.gridwatch.ca/WebServices/GridWatchWebApp.asmx/GetHomeViewData_v2'\n response = r.get(url)\n obj = response.json()\n exchanges = obj['intertieLineData']\n\n sortedCountryCodes = '->'.join(sorted([country_code1, country_code2]))\n # Everything -> CA_ON corresponds to an import to ON\n # In the data, \"net\" represents an export\n # So everything -> CA_ON must be reversed\n if sortedCountryCodes == 'CA-MB->CA-ON':\n keys = ['MANITOBA', 'MANITOBA SK']\n direction = -1\n elif sortedCountryCodes == 'CA-ON->US-NY':\n keys = ['NEW-YORK']\n direction = 1\n elif sortedCountryCodes == 'CA-ON->US-MI':\n keys = ['MICHIGAN']\n direction = 1\n elif sortedCountryCodes == 'CA-ON->US-MN':\n keys = ['MINNESOTA']\n direction = 1\n elif sortedCountryCodes == 'CA-ON->CA-QC':\n keys = filter(lambda k: k[:2] == 'PQ', exchanges.keys())\n direction = 1\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n data = {\n 'datetime': max(map(lambda x: arrow.get(arrow.get(\n exchanges[x]['dateReported']).datetime, timezone).datetime, keys)),\n 'sortedCountryCodes': sortedCountryCodes,\n 'netFlow': sum(map(lambda x: float(exchanges[x]['net'].replace(',', '')), keys)) * direction,\n 'source': 'gridwatch.ca'\n }\n\n return data\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_price() ->')\n print(fetch_price())\n print('fetch_exchange(\"CA-ON\", \"US-NY\") ->')\n print(fetch_exchange(\"CA-ON\", \"US-NY\"))\n", "path": "parsers/CA_ON.py"}]} | 2,732 | 209 |
gh_patches_debug_18228 | rasdani/github-patches | git_diff | bridgecrewio__checkov-3393 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ERROR] Failed to run check: Ensure Application Gateway WAF prevents message lookup in Log4j2.
**Describe the issue**
While performing checkov scan on our terraform dir, we are running into this error:
`2022-07-18 10:09:32,794 [MainThread ] [ERROR] Failed to run check: Ensure Application Gateway WAF prevents message lookup in Log4j2. See CVE-2021-44228 aka log4jshell for configuration: {'custom_rules': [[]], 'location': ['westeurope'], 'managed_rules': [{'exclusion': [[]], 'managed_rule_set': [{'rule_group_override': [[]], 'type': ['OWASP'], 'version': ['3.1'], '__startline__': [1], '__endline__': [1], 'start_line': [0], 'end_line': [0]}], '__startline__': [1], '__endline__': [1], 'start_line': [0], 'end_line': [0]}], 'name': ['waf-acc-weu-001'], 'policy_settings': [{'enabled': [True], 'file_upload_limit_in_mb': [100], 'max_request_body_size_in_kb': [128], 'mode': ['Prevention'], 'request_body_check': [True], '__startline__': [1], '__endline__': [1], 'start_line': [0], 'end_line': [0]}], 'resource_group_name': ['rg-acceptance-weu-001'], 'tags': [None], 'timeouts': [None], '__startline__': [1], '__endline__': [1], 'start_line': [0], 'end_line': [0], 'references_': ['data.azurerm_resource_group.resourcegroup.name', 'data.azurerm_resource_group.resourcegroup'], '__address__': 'azurerm_web_application_firewall_policy.agw_waf_policy_aks_acc'} at file: /tf/plan.json`
**Examples**
When looking into this page: https://docs.bridgecrew.io/docs/ensure-application-gateway-waf-prevents-message-lookup-in-log4j2, we saw a code example, but the thing is, we have exactly the same code,:
```
resource "azurerm_web_application_firewall_policy" "agw_waf_policy_aks_acc" {
name = "waf-acc-weu-001"
resource_group_name = data.azurerm_resource_group.kbc_acc_resourcegroup.name
location = var.location
policy_settings {
enabled = true
mode = "Prevention"
request_body_check = true
# Set to 200 because ...
file_upload_limit_in_mb = 200
# Set to 2000 because ...
max_request_body_size_in_kb = 2000
}
managed_rules
managed_rule_set {
type = "OWASP"
version = "3.2"
}
}
}
}
```
**Version (please complete the following information):**
- bridgecrew/checkov docker image without version specified
**Additional context**
Traceback:
```
Process ForkProcess-6:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/local/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.8/site-packages/checkov/common/parallelizer/parallel_runner.py", line 37, in func_wrapper
result = original_func(item)
File "/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py", line 76, in _parallel_run
return runner.run(
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py", line 72, in run
self.check_tf_definition(report, root_folder, runner_filter)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py", line 90, in check_tf_definition
self.run_block(definition[block_type], None, full_file_path, root_folder, report, scanned_file,
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py", line 110, in run_block
results = registry.scan(scanned_file, entity, [], runner_filter)
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py", line 124, in scan
result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py", line 138, in run_check
result = check.run(
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py", line 75, in run
check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 43, in scan_entity_conf
return self.scan_resource_conf(conf)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py", line 35, in scan_resource_conf
if rule_override.get("rule_group_name") == ["REQUEST-944-APPLICATION-ATTACK-JAVA"]:
File "/usr/local/lib/python3.8/site-packages/checkov/common/parsers/node.py", line 189, in __getattr__
raise TemplateAttributeError(f'{name} is invalid')
checkov.common.parsers.node.TemplateAttributeError: get is invalid
```
</issue>
<code>
[start of checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py]
1 from typing import Dict, Any
2
3 from checkov.common.models.enums import CheckCategories, CheckResult
4 from checkov.common.util.type_forcers import force_list
5 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
6
7
8 class AppGatewayWAFACLCVE202144228(BaseResourceCheck):
9 def __init__(self) -> None:
10 name = "Ensure Application Gateway WAF prevents message lookup in Log4j2. See CVE-2021-44228 aka log4jshell"
11 id = "CKV_AZURE_135"
12 supported_resources = ("azurerm_web_application_firewall_policy",)
13 categories = (CheckCategories.APPLICATION_SECURITY,)
14 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
15
16 def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:
17 self.evaluated_keys = ["managed_rules"]
18 managed_rules = conf.get("managed_rules")
19 if managed_rules:
20 managed_rule_sets = managed_rules[0].get("managed_rule_set") or []
21 for idx_rule_set, rule_set in enumerate(force_list(managed_rule_sets)):
22 self.evaluated_keys = [
23 f"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/type",
24 f"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/version",
25 ]
26 if rule_set.get("type", ["OWASP"]) == ["OWASP"] and rule_set.get("version") in (["3.1"], ["3.2"]):
27 rule_overrides = rule_set.get("rule_group_override") or []
28 for idx_override, rule_override in enumerate(force_list(rule_overrides)):
29 self.evaluated_keys.extend(
30 [
31 f"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/rule_group_override/[{idx_override}]/rule_group_name",
32 f"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/rule_group_override/[{idx_override}]/disabled_rules",
33 ]
34 )
35 if rule_override.get("rule_group_name") == ["REQUEST-944-APPLICATION-ATTACK-JAVA"]:
36 disabled_rules = rule_override.get("disabled_rules") or []
37 if isinstance(disabled_rules, list) and "944240" in force_list(disabled_rules[0]):
38 return CheckResult.FAILED
39
40 return CheckResult.PASSED
41
42 return CheckResult.FAILED
43
44
45 check = AppGatewayWAFACLCVE202144228()
46
[end of checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py b/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py
--- a/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py
+++ b/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py
@@ -32,7 +32,7 @@
f"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/rule_group_override/[{idx_override}]/disabled_rules",
]
)
- if rule_override.get("rule_group_name") == ["REQUEST-944-APPLICATION-ATTACK-JAVA"]:
+ if isinstance(rule_override, dict) and rule_override.get("rule_group_name") == ["REQUEST-944-APPLICATION-ATTACK-JAVA"]:
disabled_rules = rule_override.get("disabled_rules") or []
if isinstance(disabled_rules, list) and "944240" in force_list(disabled_rules[0]):
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py b/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py\n--- a/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py\n+++ b/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py\n@@ -32,7 +32,7 @@\n f\"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/rule_group_override/[{idx_override}]/disabled_rules\",\n ]\n )\n- if rule_override.get(\"rule_group_name\") == [\"REQUEST-944-APPLICATION-ATTACK-JAVA\"]:\n+ if isinstance(rule_override, dict) and rule_override.get(\"rule_group_name\") == [\"REQUEST-944-APPLICATION-ATTACK-JAVA\"]:\n disabled_rules = rule_override.get(\"disabled_rules\") or []\n if isinstance(disabled_rules, list) and \"944240\" in force_list(disabled_rules[0]):\n return CheckResult.FAILED\n", "issue": "[ERROR] Failed to run check: Ensure Application Gateway WAF prevents message lookup in Log4j2.\n**Describe the issue**\r\nWhile performing checkov scan on our terraform dir, we are running into this error:\r\n\r\n`2022-07-18 10:09:32,794 [MainThread ] [ERROR] Failed to run check: Ensure Application Gateway WAF prevents message lookup in Log4j2. See CVE-2021-44228 aka log4jshell for configuration: {'custom_rules': [[]], 'location': ['westeurope'], 'managed_rules': [{'exclusion': [[]], 'managed_rule_set': [{'rule_group_override': [[]], 'type': ['OWASP'], 'version': ['3.1'], '__startline__': [1], '__endline__': [1], 'start_line': [0], 'end_line': [0]}], '__startline__': [1], '__endline__': [1], 'start_line': [0], 'end_line': [0]}], 'name': ['waf-acc-weu-001'], 'policy_settings': [{'enabled': [True], 'file_upload_limit_in_mb': [100], 'max_request_body_size_in_kb': [128], 'mode': ['Prevention'], 'request_body_check': [True], '__startline__': [1], '__endline__': [1], 'start_line': [0], 'end_line': [0]}], 'resource_group_name': ['rg-acceptance-weu-001'], 'tags': [None], 'timeouts': [None], '__startline__': [1], '__endline__': [1], 'start_line': [0], 'end_line': [0], 'references_': ['data.azurerm_resource_group.resourcegroup.name', 'data.azurerm_resource_group.resourcegroup'], '__address__': 'azurerm_web_application_firewall_policy.agw_waf_policy_aks_acc'} at file: /tf/plan.json`\r\n\r\n**Examples**\r\nWhen looking into this page: https://docs.bridgecrew.io/docs/ensure-application-gateway-waf-prevents-message-lookup-in-log4j2, we saw a code example, but the thing is, we have exactly the same code,:\r\n\r\n```\r\nresource \"azurerm_web_application_firewall_policy\" \"agw_waf_policy_aks_acc\" {\r\n name = \"waf-acc-weu-001\"\r\n resource_group_name = data.azurerm_resource_group.kbc_acc_resourcegroup.name\r\n location = var.location\r\n\r\n policy_settings {\r\n enabled = true\r\n mode = \"Prevention\"\r\n request_body_check = true\r\n # Set to 200 because ...\r\n file_upload_limit_in_mb = 200\r\n # Set to 2000 because ...\r\n max_request_body_size_in_kb = 2000\r\n }\r\n\r\n managed_rules \r\n\r\n managed_rule_set {\r\n type = \"OWASP\"\r\n version = \"3.2\"\r\n }\r\n }\r\n }\r\n}\r\n\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - bridgecrew/checkov docker image without version specified\r\n\r\n**Additional context**\r\nTraceback:\r\n\r\n```\r\nProcess ForkProcess-6:\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/multiprocessing/process.py\", line 315, in _bootstrap\r\n self.run()\r\n File \"/usr/local/lib/python3.8/multiprocessing/process.py\", line 108, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/parallelizer/parallel_runner.py\", line 37, in func_wrapper\r\n result = original_func(item)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py\", line 76, in _parallel_run\r\n return runner.run(\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py\", line 72, in run\r\n self.check_tf_definition(report, root_folder, runner_filter)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py\", line 90, in check_tf_definition\r\n self.run_block(definition[block_type], None, full_file_path, root_folder, report, scanned_file,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py\", line 110, in run_block\r\n results = registry.scan(scanned_file, entity, [], runner_filter)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\", line 124, in scan\r\n result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\", line 138, in run_check\r\n result = check.run(\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 75, in run\r\n check_result[\"result\"] = self.scan_entity_conf(entity_configuration, entity_type)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 43, in scan_entity_conf\r\n return self.scan_resource_conf(conf)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py\", line 35, in scan_resource_conf\r\n if rule_override.get(\"rule_group_name\") == [\"REQUEST-944-APPLICATION-ATTACK-JAVA\"]:\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/parsers/node.py\", line 189, in __getattr__\r\n raise TemplateAttributeError(f'{name} is invalid')\r\ncheckov.common.parsers.node.TemplateAttributeError: get is invalid\r\n\r\n```\n", "before_files": [{"content": "from typing import Dict, Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.common.util.type_forcers import force_list\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass AppGatewayWAFACLCVE202144228(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure Application Gateway WAF prevents message lookup in Log4j2. See CVE-2021-44228 aka log4jshell\"\n id = \"CKV_AZURE_135\"\n supported_resources = (\"azurerm_web_application_firewall_policy\",)\n categories = (CheckCategories.APPLICATION_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf: Dict[str, Any]) -> CheckResult:\n self.evaluated_keys = [\"managed_rules\"]\n managed_rules = conf.get(\"managed_rules\")\n if managed_rules:\n managed_rule_sets = managed_rules[0].get(\"managed_rule_set\") or []\n for idx_rule_set, rule_set in enumerate(force_list(managed_rule_sets)):\n self.evaluated_keys = [\n f\"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/type\",\n f\"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/version\",\n ]\n if rule_set.get(\"type\", [\"OWASP\"]) == [\"OWASP\"] and rule_set.get(\"version\") in ([\"3.1\"], [\"3.2\"]):\n rule_overrides = rule_set.get(\"rule_group_override\") or []\n for idx_override, rule_override in enumerate(force_list(rule_overrides)):\n self.evaluated_keys.extend(\n [\n f\"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/rule_group_override/[{idx_override}]/rule_group_name\",\n f\"managed_rules/[0]/managed_rule_set[{idx_rule_set}]/rule_group_override/[{idx_override}]/disabled_rules\",\n ]\n )\n if rule_override.get(\"rule_group_name\") == [\"REQUEST-944-APPLICATION-ATTACK-JAVA\"]:\n disabled_rules = rule_override.get(\"disabled_rules\") or []\n if isinstance(disabled_rules, list) and \"944240\" in force_list(disabled_rules[0]):\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n\ncheck = AppGatewayWAFACLCVE202144228()\n", "path": "checkov/terraform/checks/resource/azure/AppGatewayWAFACLCVE202144228.py"}]} | 2,533 | 268 |
gh_patches_debug_36677 | rasdani/github-patches | git_diff | sublimelsp__LSP-693 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some servers provide tooltips for ignored scopes
The vscode-json-languageserver provides tooltips for json keys, but the `string` scope is ignored
https://github.com/tomv564/LSP/blob/1836426c85826f20de73e50ab285a948eebbeba4/plugin/hover.py#L21
</issue>
<code>
[start of plugin/hover.py]
1 import mdpopups
2 import sublime
3 import sublime_plugin
4 import webbrowser
5 from html import escape
6 try:
7 from typing import List, Optional, Any, Dict
8 assert List and Optional and Any and Dict
9 except ImportError:
10 pass
11
12 from .core.configurations import is_supported_syntax
13 from .diagnostics import get_point_diagnostics
14 from .core.registry import session_for_view, LspTextCommand
15 from .core.protocol import Request, DiagnosticSeverity
16 from .core.documents import get_document_position
17 from .core.popups import popup_css, popup_class
18 from .core.settings import client_configs
19
20 SUBLIME_WORD_MASK = 515
21 NO_HOVER_SCOPES = 'comment, string'
22
23
24 class HoverHandler(sublime_plugin.ViewEventListener):
25 def __init__(self, view):
26 self.view = view
27
28 @classmethod
29 def is_applicable(cls, settings):
30 syntax = settings.get('syntax')
31 return syntax and is_supported_syntax(syntax, client_configs.all)
32
33 def on_hover(self, point, hover_zone):
34 if hover_zone != sublime.HOVER_TEXT or self.view.is_popup_visible():
35 return
36 self.view.run_command("lsp_hover", {"point": point})
37
38
39 _test_contents = [] # type: List[str]
40
41
42 class_for_severity = {
43 DiagnosticSeverity.Error: 'errors',
44 DiagnosticSeverity.Warning: 'warnings',
45 DiagnosticSeverity.Information: 'info',
46 DiagnosticSeverity.Hint: 'hints'
47 }
48
49
50 class GotoKind:
51
52 __slots__ = ("lsp_name", "label", "subl_cmd_name")
53
54 def __init__(self, lsp_name: str, label: str, subl_cmd_name: str) -> None:
55 self.lsp_name = lsp_name
56 self.label = label
57 self.subl_cmd_name = subl_cmd_name
58
59
60 goto_kinds = [
61 GotoKind("definition", "Definition", "definition"),
62 GotoKind("typeDefinition", "Type Definition", "type_definition"),
63 GotoKind("declaration", "Declaration", "declaration"),
64 GotoKind("implementation", "Implementation", "implementation")
65 ]
66
67
68 class LspHoverCommand(LspTextCommand):
69 def __init__(self, view):
70 super().__init__(view)
71
72 def is_likely_at_symbol(self, point):
73 word_at_sel = self.view.classify(point)
74 return word_at_sel & SUBLIME_WORD_MASK and not self.view.match_selector(point, NO_HOVER_SCOPES)
75
76 def run(self, edit, point=None):
77 if point is None:
78 point = self.view.sel()[0].begin()
79 if self.is_likely_at_symbol(point):
80 self.request_symbol_hover(point)
81 point_diagnostics = get_point_diagnostics(self.view, point)
82 if point_diagnostics:
83 self.show_hover(point, self.diagnostics_content(point_diagnostics))
84
85 def request_symbol_hover(self, point) -> None:
86 session = session_for_view(self.view, point)
87 if session:
88 if session.has_capability('hoverProvider'):
89 document_position = get_document_position(self.view, point)
90 if document_position:
91 if session.client:
92 session.client.send_request(
93 Request.hover(document_position),
94 lambda response: self.handle_response(response, point))
95
96 def handle_response(self, response: 'Optional[Any]', point) -> None:
97 all_content = ""
98
99 point_diagnostics = get_point_diagnostics(self.view, point)
100 if point_diagnostics:
101 all_content += self.diagnostics_content(point_diagnostics)
102
103 all_content += self.hover_content(point, response)
104 all_content += self.symbol_actions_content()
105
106 _test_contents.clear()
107 _test_contents.append(all_content) # for testing only
108 self.show_hover(point, all_content)
109
110 def symbol_actions_content(self):
111 actions = []
112 for goto_kind in goto_kinds:
113 if self.has_client_with_capability(goto_kind.lsp_name + "Provider"):
114 actions.append("<a href='{}'>{}</a>".format(goto_kind.lsp_name, goto_kind.label))
115 if self.has_client_with_capability('referencesProvider'):
116 actions.append("<a href='{}'>{}</a>".format('references', 'References'))
117 if self.has_client_with_capability('renameProvider'):
118 actions.append("<a href='{}'>{}</a>".format('rename', 'Rename'))
119 return "<p>" + " | ".join(actions) + "</p>"
120
121 def format_diagnostic(self, diagnostic):
122 if diagnostic.source:
123 return "<pre>[{}] {}</pre>".format(diagnostic.source, escape(diagnostic.message, False))
124 else:
125 return "<pre>{}</pre>".format(escape(diagnostic.message, False))
126
127 def diagnostics_content(self, diagnostics):
128 by_severity = {} # type: Dict[int, List[str]]
129 for diagnostic in diagnostics:
130 by_severity.setdefault(diagnostic.severity, []).append(self.format_diagnostic(diagnostic))
131 formatted = []
132 for severity, items in by_severity.items():
133 formatted.append("<div class='{}'>".format(class_for_severity[severity]))
134 formatted.extend(items)
135 formatted.append("<a href='{}'>{}</a>".format('code-actions',
136 'Code Actions'))
137 formatted.append("</div>")
138
139 return "".join(formatted)
140
141 def hover_content(self, point, response: 'Optional[Any]') -> str:
142 contents = ["No description available."]
143 if isinstance(response, dict):
144 # Flow returns None sometimes
145 # See: https://github.com/flowtype/flow-language-server/issues/51
146 response_content = response.get('contents')
147 if response_content:
148 if isinstance(response_content, list):
149 contents = response_content
150 else:
151 contents = [response_content]
152
153 formatted = []
154 for item in contents:
155 value = ""
156 language = None
157 if isinstance(item, str):
158 value = item
159 else:
160 value = item.get("value")
161 language = item.get("language")
162 if language:
163 formatted.append("```{}\n{}\n```\n".format(language, value))
164 else:
165 formatted.append(value)
166
167 return mdpopups.md2html(self.view, "\n".join(formatted))
168
169 def show_hover(self, point, contents):
170 mdpopups.show_popup(
171 self.view,
172 contents,
173 css=popup_css,
174 md=False,
175 flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,
176 location=point,
177 wrapper_class=popup_class,
178 max_width=800,
179 on_navigate=lambda href: self.on_hover_navigate(href, point))
180
181 def on_hover_navigate(self, href, point):
182 for goto_kind in goto_kinds:
183 if href == goto_kind.lsp_name:
184 self.run_command_from_point(point, "lsp_symbol_" + goto_kind.subl_cmd_name)
185 return
186 if href == 'references':
187 self.run_command_from_point(point, "lsp_symbol_references")
188 elif href == 'rename':
189 self.run_command_from_point(point, "lsp_symbol_rename")
190 elif href == 'code-actions':
191 self.run_command_from_point(point, "lsp_code_actions")
192 else:
193 webbrowser.open_new_tab(href)
194
195 def run_command_from_point(self, point, command_name):
196 sel = self.view.sel()
197 sel.clear()
198 sel.add(sublime.Region(point, point))
199 self.view.run_command(command_name)
200
[end of plugin/hover.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/hover.py b/plugin/hover.py
--- a/plugin/hover.py
+++ b/plugin/hover.py
@@ -18,7 +18,6 @@
from .core.settings import client_configs
SUBLIME_WORD_MASK = 515
-NO_HOVER_SCOPES = 'comment, string'
class HoverHandler(sublime_plugin.ViewEventListener):
@@ -71,7 +70,7 @@
def is_likely_at_symbol(self, point):
word_at_sel = self.view.classify(point)
- return word_at_sel & SUBLIME_WORD_MASK and not self.view.match_selector(point, NO_HOVER_SCOPES)
+ return word_at_sel & SUBLIME_WORD_MASK
def run(self, edit, point=None):
if point is None:
@@ -101,11 +100,14 @@
all_content += self.diagnostics_content(point_diagnostics)
all_content += self.hover_content(point, response)
- all_content += self.symbol_actions_content()
+ if all_content:
+ all_content += self.symbol_actions_content()
_test_contents.clear()
_test_contents.append(all_content) # for testing only
- self.show_hover(point, all_content)
+
+ if all_content:
+ self.show_hover(point, all_content)
def symbol_actions_content(self):
actions = []
@@ -139,10 +141,8 @@
return "".join(formatted)
def hover_content(self, point, response: 'Optional[Any]') -> str:
- contents = ["No description available."]
+ contents = [] # type: List[Any]
if isinstance(response, dict):
- # Flow returns None sometimes
- # See: https://github.com/flowtype/flow-language-server/issues/51
response_content = response.get('contents')
if response_content:
if isinstance(response_content, list):
@@ -164,7 +164,10 @@
else:
formatted.append(value)
- return mdpopups.md2html(self.view, "\n".join(formatted))
+ if formatted:
+ return mdpopups.md2html(self.view, "\n".join(formatted))
+
+ return ""
def show_hover(self, point, contents):
mdpopups.show_popup(
| {"golden_diff": "diff --git a/plugin/hover.py b/plugin/hover.py\n--- a/plugin/hover.py\n+++ b/plugin/hover.py\n@@ -18,7 +18,6 @@\n from .core.settings import client_configs\n \n SUBLIME_WORD_MASK = 515\n-NO_HOVER_SCOPES = 'comment, string'\n \n \n class HoverHandler(sublime_plugin.ViewEventListener):\n@@ -71,7 +70,7 @@\n \n def is_likely_at_symbol(self, point):\n word_at_sel = self.view.classify(point)\n- return word_at_sel & SUBLIME_WORD_MASK and not self.view.match_selector(point, NO_HOVER_SCOPES)\n+ return word_at_sel & SUBLIME_WORD_MASK\n \n def run(self, edit, point=None):\n if point is None:\n@@ -101,11 +100,14 @@\n all_content += self.diagnostics_content(point_diagnostics)\n \n all_content += self.hover_content(point, response)\n- all_content += self.symbol_actions_content()\n+ if all_content:\n+ all_content += self.symbol_actions_content()\n \n _test_contents.clear()\n _test_contents.append(all_content) # for testing only\n- self.show_hover(point, all_content)\n+\n+ if all_content:\n+ self.show_hover(point, all_content)\n \n def symbol_actions_content(self):\n actions = []\n@@ -139,10 +141,8 @@\n return \"\".join(formatted)\n \n def hover_content(self, point, response: 'Optional[Any]') -> str:\n- contents = [\"No description available.\"]\n+ contents = [] # type: List[Any]\n if isinstance(response, dict):\n- # Flow returns None sometimes\n- # See: https://github.com/flowtype/flow-language-server/issues/51\n response_content = response.get('contents')\n if response_content:\n if isinstance(response_content, list):\n@@ -164,7 +164,10 @@\n else:\n formatted.append(value)\n \n- return mdpopups.md2html(self.view, \"\\n\".join(formatted))\n+ if formatted:\n+ return mdpopups.md2html(self.view, \"\\n\".join(formatted))\n+\n+ return \"\"\n \n def show_hover(self, point, contents):\n mdpopups.show_popup(\n", "issue": "Some servers provide tooltips for ignored scopes\nThe vscode-json-languageserver provides tooltips for json keys, but the `string` scope is ignored\r\nhttps://github.com/tomv564/LSP/blob/1836426c85826f20de73e50ab285a948eebbeba4/plugin/hover.py#L21\r\n\n", "before_files": [{"content": "import mdpopups\nimport sublime\nimport sublime_plugin\nimport webbrowser\nfrom html import escape\ntry:\n from typing import List, Optional, Any, Dict\n assert List and Optional and Any and Dict\nexcept ImportError:\n pass\n\nfrom .core.configurations import is_supported_syntax\nfrom .diagnostics import get_point_diagnostics\nfrom .core.registry import session_for_view, LspTextCommand\nfrom .core.protocol import Request, DiagnosticSeverity\nfrom .core.documents import get_document_position\nfrom .core.popups import popup_css, popup_class\nfrom .core.settings import client_configs\n\nSUBLIME_WORD_MASK = 515\nNO_HOVER_SCOPES = 'comment, string'\n\n\nclass HoverHandler(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n\n @classmethod\n def is_applicable(cls, settings):\n syntax = settings.get('syntax')\n return syntax and is_supported_syntax(syntax, client_configs.all)\n\n def on_hover(self, point, hover_zone):\n if hover_zone != sublime.HOVER_TEXT or self.view.is_popup_visible():\n return\n self.view.run_command(\"lsp_hover\", {\"point\": point})\n\n\n_test_contents = [] # type: List[str]\n\n\nclass_for_severity = {\n DiagnosticSeverity.Error: 'errors',\n DiagnosticSeverity.Warning: 'warnings',\n DiagnosticSeverity.Information: 'info',\n DiagnosticSeverity.Hint: 'hints'\n}\n\n\nclass GotoKind:\n\n __slots__ = (\"lsp_name\", \"label\", \"subl_cmd_name\")\n\n def __init__(self, lsp_name: str, label: str, subl_cmd_name: str) -> None:\n self.lsp_name = lsp_name\n self.label = label\n self.subl_cmd_name = subl_cmd_name\n\n\ngoto_kinds = [\n GotoKind(\"definition\", \"Definition\", \"definition\"),\n GotoKind(\"typeDefinition\", \"Type Definition\", \"type_definition\"),\n GotoKind(\"declaration\", \"Declaration\", \"declaration\"),\n GotoKind(\"implementation\", \"Implementation\", \"implementation\")\n]\n\n\nclass LspHoverCommand(LspTextCommand):\n def __init__(self, view):\n super().__init__(view)\n\n def is_likely_at_symbol(self, point):\n word_at_sel = self.view.classify(point)\n return word_at_sel & SUBLIME_WORD_MASK and not self.view.match_selector(point, NO_HOVER_SCOPES)\n\n def run(self, edit, point=None):\n if point is None:\n point = self.view.sel()[0].begin()\n if self.is_likely_at_symbol(point):\n self.request_symbol_hover(point)\n point_diagnostics = get_point_diagnostics(self.view, point)\n if point_diagnostics:\n self.show_hover(point, self.diagnostics_content(point_diagnostics))\n\n def request_symbol_hover(self, point) -> None:\n session = session_for_view(self.view, point)\n if session:\n if session.has_capability('hoverProvider'):\n document_position = get_document_position(self.view, point)\n if document_position:\n if session.client:\n session.client.send_request(\n Request.hover(document_position),\n lambda response: self.handle_response(response, point))\n\n def handle_response(self, response: 'Optional[Any]', point) -> None:\n all_content = \"\"\n\n point_diagnostics = get_point_diagnostics(self.view, point)\n if point_diagnostics:\n all_content += self.diagnostics_content(point_diagnostics)\n\n all_content += self.hover_content(point, response)\n all_content += self.symbol_actions_content()\n\n _test_contents.clear()\n _test_contents.append(all_content) # for testing only\n self.show_hover(point, all_content)\n\n def symbol_actions_content(self):\n actions = []\n for goto_kind in goto_kinds:\n if self.has_client_with_capability(goto_kind.lsp_name + \"Provider\"):\n actions.append(\"<a href='{}'>{}</a>\".format(goto_kind.lsp_name, goto_kind.label))\n if self.has_client_with_capability('referencesProvider'):\n actions.append(\"<a href='{}'>{}</a>\".format('references', 'References'))\n if self.has_client_with_capability('renameProvider'):\n actions.append(\"<a href='{}'>{}</a>\".format('rename', 'Rename'))\n return \"<p>\" + \" | \".join(actions) + \"</p>\"\n\n def format_diagnostic(self, diagnostic):\n if diagnostic.source:\n return \"<pre>[{}] {}</pre>\".format(diagnostic.source, escape(diagnostic.message, False))\n else:\n return \"<pre>{}</pre>\".format(escape(diagnostic.message, False))\n\n def diagnostics_content(self, diagnostics):\n by_severity = {} # type: Dict[int, List[str]]\n for diagnostic in diagnostics:\n by_severity.setdefault(diagnostic.severity, []).append(self.format_diagnostic(diagnostic))\n formatted = []\n for severity, items in by_severity.items():\n formatted.append(\"<div class='{}'>\".format(class_for_severity[severity]))\n formatted.extend(items)\n formatted.append(\"<a href='{}'>{}</a>\".format('code-actions',\n 'Code Actions'))\n formatted.append(\"</div>\")\n\n return \"\".join(formatted)\n\n def hover_content(self, point, response: 'Optional[Any]') -> str:\n contents = [\"No description available.\"]\n if isinstance(response, dict):\n # Flow returns None sometimes\n # See: https://github.com/flowtype/flow-language-server/issues/51\n response_content = response.get('contents')\n if response_content:\n if isinstance(response_content, list):\n contents = response_content\n else:\n contents = [response_content]\n\n formatted = []\n for item in contents:\n value = \"\"\n language = None\n if isinstance(item, str):\n value = item\n else:\n value = item.get(\"value\")\n language = item.get(\"language\")\n if language:\n formatted.append(\"```{}\\n{}\\n```\\n\".format(language, value))\n else:\n formatted.append(value)\n\n return mdpopups.md2html(self.view, \"\\n\".join(formatted))\n\n def show_hover(self, point, contents):\n mdpopups.show_popup(\n self.view,\n contents,\n css=popup_css,\n md=False,\n flags=sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n location=point,\n wrapper_class=popup_class,\n max_width=800,\n on_navigate=lambda href: self.on_hover_navigate(href, point))\n\n def on_hover_navigate(self, href, point):\n for goto_kind in goto_kinds:\n if href == goto_kind.lsp_name:\n self.run_command_from_point(point, \"lsp_symbol_\" + goto_kind.subl_cmd_name)\n return\n if href == 'references':\n self.run_command_from_point(point, \"lsp_symbol_references\")\n elif href == 'rename':\n self.run_command_from_point(point, \"lsp_symbol_rename\")\n elif href == 'code-actions':\n self.run_command_from_point(point, \"lsp_code_actions\")\n else:\n webbrowser.open_new_tab(href)\n\n def run_command_from_point(self, point, command_name):\n sel = self.view.sel()\n sel.clear()\n sel.add(sublime.Region(point, point))\n self.view.run_command(command_name)\n", "path": "plugin/hover.py"}]} | 2,689 | 509 |
gh_patches_debug_16198 | rasdani/github-patches | git_diff | numpy__numpy-13688 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DOC: numpy.random.sample and numpy.random.random_sample
I just noticed in the docs that the page for `numpy.random.sample` indicates that the function should be called as `numpy.random.random_sample`. I understand that this may just indicate that the function may be called as either `sample` or `random_sample`, but it does come across as a mistake when first viewing the page. Perhaps make it more explicit that `random_sample` is an alias of `sample`? Or is this the accepted practice for functions that have aliases?
</issue>
<code>
[start of numpy/random/__init__.py]
1 """
2 ========================
3 Random Number Generation
4 ========================
5
6 Instantiate a BitGenerator and wrap it in a Generator
7 which will convert the uniform stream to a number of distributions. The "bare"
8 functions are kept for legacy code, they should be called with the newer API
9 via ``np.random.Generator().function`` instead
10
11 ==================== =========================================================
12 Utility functions
13 -------------------- ---------------------------------------------------------
14 random Uniformly distributed floats over ``[0, 1)``
15 integers Uniformly distributed integers, replaces ``randint``
16 bytes Uniformly distributed random bytes.
17 permutation Randomly permute a sequence / generate a random sequence.
18 shuffle Randomly permute a sequence in place.
19 seed Seed the random number generator.
20 choice Random sample from 1-D array.
21 ==================== =========================================================
22
23 ==================== =========================================================
24 Compatibility
25 functions - removed
26 in the new API
27 -------------------- ---------------------------------------------------------
28 rand Uniformly distributed values.
29 randn Normally distributed values.
30 ranf Uniformly distributed floating point numbers.
31 random_integers Uniformly distributed integers in a given range.
32 (deprecated, use ``integers(..., closed=True)`` instead)
33 random_sample Alias for `random_sample`
34 randint Uniformly distributed integers in a given range
35 ==================== =========================================================
36
37 ==================== =========================================================
38 Univariate
39 distributions
40 -------------------- ---------------------------------------------------------
41 beta Beta distribution over ``[0, 1]``.
42 binomial Binomial distribution.
43 chisquare :math:`\\chi^2` distribution.
44 exponential Exponential distribution.
45 f F (Fisher-Snedecor) distribution.
46 gamma Gamma distribution.
47 geometric Geometric distribution.
48 gumbel Gumbel distribution.
49 hypergeometric Hypergeometric distribution.
50 laplace Laplace distribution.
51 logistic Logistic distribution.
52 lognormal Log-normal distribution.
53 logseries Logarithmic series distribution.
54 negative_binomial Negative binomial distribution.
55 noncentral_chisquare Non-central chi-square distribution.
56 noncentral_f Non-central F distribution.
57 normal Normal / Gaussian distribution.
58 pareto Pareto distribution.
59 poisson Poisson distribution.
60 power Power distribution.
61 rayleigh Rayleigh distribution.
62 triangular Triangular distribution.
63 uniform Uniform distribution.
64 vonmises Von Mises circular distribution.
65 wald Wald (inverse Gaussian) distribution.
66 weibull Weibull distribution.
67 zipf Zipf's distribution over ranked data.
68 ==================== =========================================================
69
70 ==================== ==========================================================
71 Multivariate
72 distributions
73 -------------------- ----------------------------------------------------------
74 dirichlet Multivariate generalization of Beta distribution.
75 multinomial Multivariate generalization of the binomial distribution.
76 multivariate_normal Multivariate generalization of the normal distribution.
77 ==================== ==========================================================
78
79 ==================== =========================================================
80 Standard
81 distributions
82 -------------------- ---------------------------------------------------------
83 standard_cauchy Standard Cauchy-Lorentz distribution.
84 standard_exponential Standard exponential distribution.
85 standard_gamma Standard Gamma distribution.
86 standard_normal Standard normal distribution.
87 standard_t Standard Student's t-distribution.
88 ==================== =========================================================
89
90 ==================== =========================================================
91 Internal functions
92 -------------------- ---------------------------------------------------------
93 get_state Get tuple representing internal state of generator.
94 set_state Set state of generator.
95 ==================== =========================================================
96
97 ============================================= ===
98 BitGenerator Streams that work with Generator
99 --------------------------------------------- ---
100 MT19937
101 DSFMT
102 PCG32
103 PCG64
104 Philox
105 ThreeFry
106 Xoshiro256
107 Xoshiro512
108 ============================================= ===
109
110 """
111 from __future__ import division, absolute_import, print_function
112
113 __all__ = [
114 'beta',
115 'binomial',
116 'bytes',
117 'chisquare',
118 'choice',
119 'dirichlet',
120 'exponential',
121 'f',
122 'gamma',
123 'geometric',
124 'get_state',
125 'gumbel',
126 'hypergeometric',
127 'laplace',
128 'logistic',
129 'lognormal',
130 'logseries',
131 'multinomial',
132 'multivariate_normal',
133 'negative_binomial',
134 'noncentral_chisquare',
135 'noncentral_f',
136 'normal',
137 'pareto',
138 'permutation',
139 'poisson',
140 'power',
141 'rand',
142 'randint',
143 'randn',
144 'random_integers',
145 'random_sample',
146 'rayleigh',
147 'seed',
148 'set_state',
149 'shuffle',
150 'standard_cauchy',
151 'standard_exponential',
152 'standard_gamma',
153 'standard_normal',
154 'standard_t',
155 'triangular',
156 'uniform',
157 'vonmises',
158 'wald',
159 'weibull',
160 'zipf',
161 ]
162
163 from . import mtrand
164 from .mtrand import *
165 from .dsfmt import DSFMT
166 from .generator import Generator
167 from .mt19937 import MT19937
168 from .pcg32 import PCG32
169 from .pcg64 import PCG64
170 from .philox import Philox
171 from .threefry import ThreeFry
172 from .xoshiro256 import Xoshiro256
173 from .xoshiro512 import Xoshiro512
174 from .mtrand import RandomState
175
176 __all__ += ['Generator', 'DSFMT', 'MT19937', 'Philox', 'PCG64', 'PCG32',
177 'ThreeFry', 'Xoshiro256', 'Xoshiro512', 'RandomState']
178
179 # Some aliases:
180 ranf = random = sample = random_sample
181 __all__.extend(['ranf', 'random', 'sample'])
182
183
184 def __RandomState_ctor():
185 """Return a RandomState instance.
186
187 This function exists solely to assist (un)pickling.
188
189 Note that the state of the RandomState returned here is irrelevant, as this function's
190 entire purpose is to return a newly allocated RandomState whose state pickle can set.
191 Consequently the RandomState returned by this function is a freshly allocated copy
192 with a seed=0.
193
194 See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
195
196 """
197 return RandomState(seed=0)
198
199
200 from numpy._pytesttester import PytestTester
201 test = PytestTester(__name__)
202 del PytestTester
203
[end of numpy/random/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -141,9 +141,12 @@
'rand',
'randint',
'randn',
+ 'random',
'random_integers',
'random_sample',
+ 'ranf',
'rayleigh',
+ 'sample',
'seed',
'set_state',
'shuffle',
@@ -176,10 +179,6 @@
__all__ += ['Generator', 'DSFMT', 'MT19937', 'Philox', 'PCG64', 'PCG32',
'ThreeFry', 'Xoshiro256', 'Xoshiro512', 'RandomState']
-# Some aliases:
-ranf = random = sample = random_sample
-__all__.extend(['ranf', 'random', 'sample'])
-
def __RandomState_ctor():
"""Return a RandomState instance.
| {"golden_diff": "diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py\n--- a/numpy/random/__init__.py\n+++ b/numpy/random/__init__.py\n@@ -141,9 +141,12 @@\n 'rand',\n 'randint',\n 'randn',\n+ 'random',\n 'random_integers',\n 'random_sample',\n+ 'ranf',\n 'rayleigh',\n+ 'sample',\n 'seed',\n 'set_state',\n 'shuffle',\n@@ -176,10 +179,6 @@\n __all__ += ['Generator', 'DSFMT', 'MT19937', 'Philox', 'PCG64', 'PCG32',\n 'ThreeFry', 'Xoshiro256', 'Xoshiro512', 'RandomState']\n \n-# Some aliases:\n-ranf = random = sample = random_sample\n-__all__.extend(['ranf', 'random', 'sample'])\n-\n \n def __RandomState_ctor():\n \"\"\"Return a RandomState instance.\n", "issue": "DOC: numpy.random.sample and numpy.random.random_sample\nI just noticed in the docs that the page for `numpy.random.sample` indicates that the function should be called as `numpy.random.random_sample`. I understand that this may just indicate that the function may be called as either `sample` or `random_sample`, but it does come across as a mistake when first viewing the page. Perhaps make it more explicit that `random_sample` is an alias of `sample`? Or is this the accepted practice for functions that have aliases?\n", "before_files": [{"content": "\"\"\"\n========================\nRandom Number Generation\n========================\n\nInstantiate a BitGenerator and wrap it in a Generator\nwhich will convert the uniform stream to a number of distributions. The \"bare\"\nfunctions are kept for legacy code, they should be called with the newer API\nvia ``np.random.Generator().function`` instead\n\n==================== =========================================================\nUtility functions\n-------------------- ---------------------------------------------------------\nrandom Uniformly distributed floats over ``[0, 1)``\nintegers Uniformly distributed integers, replaces ``randint``\nbytes Uniformly distributed random bytes.\npermutation Randomly permute a sequence / generate a random sequence.\nshuffle Randomly permute a sequence in place.\nseed Seed the random number generator.\nchoice Random sample from 1-D array.\n==================== =========================================================\n\n==================== =========================================================\nCompatibility\nfunctions - removed\nin the new API\n-------------------- ---------------------------------------------------------\nrand Uniformly distributed values.\nrandn Normally distributed values.\nranf Uniformly distributed floating point numbers.\nrandom_integers Uniformly distributed integers in a given range.\n (deprecated, use ``integers(..., closed=True)`` instead)\nrandom_sample Alias for `random_sample`\nrandint Uniformly distributed integers in a given range\n==================== =========================================================\n\n==================== =========================================================\nUnivariate\ndistributions\n-------------------- ---------------------------------------------------------\nbeta Beta distribution over ``[0, 1]``.\nbinomial Binomial distribution.\nchisquare :math:`\\\\chi^2` distribution.\nexponential Exponential distribution.\nf F (Fisher-Snedecor) distribution.\ngamma Gamma distribution.\ngeometric Geometric distribution.\ngumbel Gumbel distribution.\nhypergeometric Hypergeometric distribution.\nlaplace Laplace distribution.\nlogistic Logistic distribution.\nlognormal Log-normal distribution.\nlogseries Logarithmic series distribution.\nnegative_binomial Negative binomial distribution.\nnoncentral_chisquare Non-central chi-square distribution.\nnoncentral_f Non-central F distribution.\nnormal Normal / Gaussian distribution.\npareto Pareto distribution.\npoisson Poisson distribution.\npower Power distribution.\nrayleigh Rayleigh distribution.\ntriangular Triangular distribution.\nuniform Uniform distribution.\nvonmises Von Mises circular distribution.\nwald Wald (inverse Gaussian) distribution.\nweibull Weibull distribution.\nzipf Zipf's distribution over ranked data.\n==================== =========================================================\n\n==================== ==========================================================\nMultivariate\ndistributions\n-------------------- ----------------------------------------------------------\ndirichlet Multivariate generalization of Beta distribution.\nmultinomial Multivariate generalization of the binomial distribution.\nmultivariate_normal Multivariate generalization of the normal distribution.\n==================== ==========================================================\n\n==================== =========================================================\nStandard\ndistributions\n-------------------- ---------------------------------------------------------\nstandard_cauchy Standard Cauchy-Lorentz distribution.\nstandard_exponential Standard exponential distribution.\nstandard_gamma Standard Gamma distribution.\nstandard_normal Standard normal distribution.\nstandard_t Standard Student's t-distribution.\n==================== =========================================================\n\n==================== =========================================================\nInternal functions\n-------------------- ---------------------------------------------------------\nget_state Get tuple representing internal state of generator.\nset_state Set state of generator.\n==================== =========================================================\n\n============================================= ===\nBitGenerator Streams that work with Generator\n--------------------------------------------- ---\nMT19937\nDSFMT\nPCG32\nPCG64\nPhilox\nThreeFry\nXoshiro256\nXoshiro512\n============================================= ===\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\n__all__ = [\n 'beta',\n 'binomial',\n 'bytes',\n 'chisquare',\n 'choice',\n 'dirichlet',\n 'exponential',\n 'f',\n 'gamma',\n 'geometric',\n 'get_state',\n 'gumbel',\n 'hypergeometric',\n 'laplace',\n 'logistic',\n 'lognormal',\n 'logseries',\n 'multinomial',\n 'multivariate_normal',\n 'negative_binomial',\n 'noncentral_chisquare',\n 'noncentral_f',\n 'normal',\n 'pareto',\n 'permutation',\n 'poisson',\n 'power',\n 'rand',\n 'randint',\n 'randn',\n 'random_integers',\n 'random_sample',\n 'rayleigh',\n 'seed',\n 'set_state',\n 'shuffle',\n 'standard_cauchy',\n 'standard_exponential',\n 'standard_gamma',\n 'standard_normal',\n 'standard_t',\n 'triangular',\n 'uniform',\n 'vonmises',\n 'wald',\n 'weibull',\n 'zipf',\n]\n\nfrom . import mtrand\nfrom .mtrand import *\nfrom .dsfmt import DSFMT\nfrom .generator import Generator\nfrom .mt19937 import MT19937\nfrom .pcg32 import PCG32\nfrom .pcg64 import PCG64\nfrom .philox import Philox\nfrom .threefry import ThreeFry\nfrom .xoshiro256 import Xoshiro256\nfrom .xoshiro512 import Xoshiro512\nfrom .mtrand import RandomState\n\n__all__ += ['Generator', 'DSFMT', 'MT19937', 'Philox', 'PCG64', 'PCG32',\n 'ThreeFry', 'Xoshiro256', 'Xoshiro512', 'RandomState']\n\n# Some aliases:\nranf = random = sample = random_sample\n__all__.extend(['ranf', 'random', 'sample'])\n\n\ndef __RandomState_ctor():\n \"\"\"Return a RandomState instance.\n\n This function exists solely to assist (un)pickling.\n\n Note that the state of the RandomState returned here is irrelevant, as this function's\n entire purpose is to return a newly allocated RandomState whose state pickle can set.\n Consequently the RandomState returned by this function is a freshly allocated copy\n with a seed=0.\n\n See https://github.com/numpy/numpy/issues/4763 for a detailed discussion\n\n \"\"\"\n return RandomState(seed=0)\n\n\nfrom numpy._pytesttester import PytestTester\ntest = PytestTester(__name__)\ndel PytestTester\n", "path": "numpy/random/__init__.py"}]} | 2,478 | 239 |
gh_patches_debug_66191 | rasdani/github-patches | git_diff | nipy__nipype-3634 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ENH: add STC partial volume correction to PETPVC interface
### Summary
Partial Volume Correction using Single-target correction (STC) has been added to PETPVC since the Nipype PETPVC interface was created, and it would therefore be ideal if this could be added to the interface as well.
### Actual behavior
The interface should include the 'STC' option for the 'pvc' flag.
### Expected behavior
### How to replicate the behavior
### Script/Workflow details
Please put URL to code or code here (if not too long).
### Platform details:
<!-- Please run the following code from your shell and place the output between the triple ticks, below.
python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())"
-->
```
```
### Execution environment
Choose one
- Container [Tag: ???]
- My python environment inside container [Base Tag: ???]
- My python environment outside container
</issue>
<code>
[start of nipype/interfaces/petpvc.py]
1 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
2 # vi: set ft=python sts=4 ts=4 sw=4 et:
3 """PETPVC is a toolbox for partial volume correction in positron emission tomography."""
4 import os
5
6 from .base import (
7 TraitedSpec,
8 CommandLineInputSpec,
9 CommandLine,
10 File,
11 isdefined,
12 traits,
13 )
14 from ..utils.filemanip import fname_presuffix
15 from ..external.due import BibTeX
16
17 pvc_methods = [
18 "GTM",
19 "IY",
20 "IY+RL",
21 "IY+VC",
22 "LABBE",
23 "LABBE+MTC",
24 "LABBE+MTC+RL",
25 "LABBE+MTC+VC",
26 "LABBE+RBV",
27 "LABBE+RBV+RL",
28 "LABBE+RBV+VC",
29 "MG",
30 "MG+RL",
31 "MG+VC",
32 "MTC",
33 "MTC+RL",
34 "MTC+VC",
35 "RBV",
36 "RBV+RL",
37 "RBV+VC",
38 "RL",
39 "VC",
40 ]
41
42
43 class PETPVCInputSpec(CommandLineInputSpec):
44 in_file = File(desc="PET image file", exists=True, mandatory=True, argstr="-i %s")
45 out_file = File(desc="Output file", genfile=True, hash_files=False, argstr="-o %s")
46 mask_file = File(
47 desc="Mask image file", exists=True, mandatory=True, argstr="-m %s"
48 )
49 pvc = traits.Enum(
50 pvc_methods,
51 mandatory=True,
52 argstr="-p %s",
53 desc="""\
54 Desired PVC method:
55
56 * Geometric transfer matrix -- ``GTM``
57 * Labbe approach -- ``LABBE``
58 * Richardson-Lucy -- ``RL``
59 * Van-Cittert -- ``VC``
60 * Region-based voxel-wise correction -- ``RBV``
61 * RBV with Labbe -- ``LABBE+RBV``
62 * RBV with Van-Cittert -- ``RBV+VC``
63 * RBV with Richardson-Lucy -- ``RBV+RL``
64 * RBV with Labbe and Van-Cittert -- ``LABBE+RBV+VC``
65 * RBV with Labbe and Richardson-Lucy -- ``LABBE+RBV+RL``
66 * Multi-target correction -- ``MTC``
67 * MTC with Labbe -- ``LABBE+MTC``
68 * MTC with Van-Cittert -- ``MTC+VC``
69 * MTC with Richardson-Lucy -- ``MTC+RL``
70 * MTC with Labbe and Van-Cittert -- ``LABBE+MTC+VC``
71 * MTC with Labbe and Richardson-Lucy -- ``LABBE+MTC+RL``
72 * Iterative Yang -- ``IY``
73 * Iterative Yang with Van-Cittert -- ``IY+VC``
74 * Iterative Yang with Richardson-Lucy -- ``IY+RL``
75 * Muller Gartner -- ``MG``
76 * Muller Gartner with Van-Cittert -- ``MG+VC``
77 * Muller Gartner with Richardson-Lucy -- ``MG+RL``
78
79 """,
80 )
81 fwhm_x = traits.Float(
82 desc="The full-width at half maximum in mm along x-axis",
83 mandatory=True,
84 argstr="-x %.4f",
85 )
86 fwhm_y = traits.Float(
87 desc="The full-width at half maximum in mm along y-axis",
88 mandatory=True,
89 argstr="-y %.4f",
90 )
91 fwhm_z = traits.Float(
92 desc="The full-width at half maximum in mm along z-axis",
93 mandatory=True,
94 argstr="-z %.4f",
95 )
96 debug = traits.Bool(
97 desc="Prints debug information",
98 usedefault=True,
99 default_value=False,
100 argstr="-d",
101 )
102 n_iter = traits.Int(
103 desc="Number of iterations", default_value=10, usedefault=True, argstr="-n %d"
104 )
105 n_deconv = traits.Int(
106 desc="Number of deconvolution iterations",
107 default_value=10,
108 usedefault=True,
109 argstr="-k %d",
110 )
111 alpha = traits.Float(
112 desc="Alpha value", default_value=1.5, usedefault=True, argstr="-a %.4f"
113 )
114 stop_crit = traits.Float(
115 desc="Stopping criterion", default_value=0.01, usedefault=True, argstr="-s %.4f"
116 )
117
118
119 class PETPVCOutputSpec(TraitedSpec):
120 out_file = File(desc="Output file")
121
122
123 class PETPVC(CommandLine):
124 """Use PETPVC for partial volume correction of PET images.
125
126 PETPVC ([1]_, [2]_) is a software from the Nuclear Medicine Department
127 of the UCL University Hospital, London, UK.
128
129 Examples
130 --------
131 >>> from ..testing import example_data
132 >>> #TODO get data for PETPVC
133 >>> pvc = PETPVC()
134 >>> pvc.inputs.in_file = 'pet.nii.gz'
135 >>> pvc.inputs.mask_file = 'tissues.nii.gz'
136 >>> pvc.inputs.out_file = 'pet_pvc_rbv.nii.gz'
137 >>> pvc.inputs.pvc = 'RBV'
138 >>> pvc.inputs.fwhm_x = 2.0
139 >>> pvc.inputs.fwhm_y = 2.0
140 >>> pvc.inputs.fwhm_z = 2.0
141 >>> outs = pvc.run() #doctest: +SKIP
142
143 References
144 ----------
145 .. [1] K. Erlandsson, I. Buvat, P. H. Pretorius, B. A. Thomas, and B. F. Hutton,
146 "A review of partial volume correction techniques for emission tomography
147 and their applications in neurology, cardiology and oncology," Phys. Med.
148 Biol., vol. 57, no. 21, p. R119, 2012.
149 .. [2] https://github.com/UCL/PETPVC
150
151 """
152
153 input_spec = PETPVCInputSpec
154 output_spec = PETPVCOutputSpec
155 _cmd = "petpvc"
156
157 _references = [
158 {
159 "entry": BibTeX(
160 "@article{0031-9155-61-22-7975,"
161 "author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and "
162 "Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson},"
163 "title={PETPVC: a toolbox for performing partial volume correction "
164 "techniques in positron emission tomography},"
165 "journal={Physics in Medicine and Biology},"
166 "volume={61},"
167 "number={22},"
168 "pages={7975},"
169 "url={http://stacks.iop.org/0031-9155/61/i=22/a=7975},"
170 "doi={https://doi.org/10.1088/0031-9155/61/22/7975},"
171 "year={2016},"
172 "}"
173 ),
174 "description": "PETPVC software implementation publication",
175 "tags": ["implementation"],
176 }
177 ]
178
179 def _list_outputs(self):
180 outputs = self.output_spec().get()
181 outputs["out_file"] = self.inputs.out_file
182 if not isdefined(outputs["out_file"]):
183 method_name = self.inputs.pvc.lower()
184 outputs["out_file"] = self._gen_fname(
185 self.inputs.in_file, suffix=f"_{method_name}_pvc"
186 )
187
188 outputs["out_file"] = os.path.abspath(outputs["out_file"])
189 return outputs
190
191 def _gen_fname(
192 self, basename, cwd=None, suffix=None, change_ext=True, ext=".nii.gz"
193 ):
194 """Generate a filename based on the given parameters.
195
196 The filename will take the form: cwd/basename<suffix><ext>.
197 If change_ext is True, it will use the extensions specified in
198 <instance>inputs.output_type.
199
200 Parameters
201 ----------
202 basename : str
203 Filename to base the new filename on.
204 cwd : str
205 Path to prefix to the new filename. (default is os.getcwd())
206 suffix : str
207 Suffix to add to the `basename`. (defaults is '' )
208 change_ext : bool
209 Flag to change the filename extension to the given `ext`.
210 (Default is False)
211
212 Returns
213 -------
214 fname : str
215 New filename based on given parameters.
216
217 """
218 if basename == "":
219 msg = "Unable to generate filename for command %s. " % self.cmd
220 msg += "basename is not set!"
221 raise ValueError(msg)
222 if cwd is None:
223 cwd = os.getcwd()
224 if change_ext:
225 if suffix:
226 suffix = "".join((suffix, ext))
227 else:
228 suffix = ext
229 if suffix is None:
230 suffix = ""
231 fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)
232 return fname
233
234 def _gen_filename(self, name):
235 if name == "out_file":
236 return self._list_outputs()["out_file"]
237 return None
238
[end of nipype/interfaces/petpvc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py
--- a/nipype/interfaces/petpvc.py
+++ b/nipype/interfaces/petpvc.py
@@ -37,6 +37,7 @@
"RBV+VC",
"RL",
"VC",
+ "STC",
]
@@ -75,6 +76,7 @@
* Muller Gartner -- ``MG``
* Muller Gartner with Van-Cittert -- ``MG+VC``
* Muller Gartner with Richardson-Lucy -- ``MG+RL``
+ * Single-target correction -- ``STC``
""",
)
| {"golden_diff": "diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py\n--- a/nipype/interfaces/petpvc.py\n+++ b/nipype/interfaces/petpvc.py\n@@ -37,6 +37,7 @@\n \"RBV+VC\",\n \"RL\",\n \"VC\",\n+ \"STC\",\n ]\n \n \n@@ -75,6 +76,7 @@\n * Muller Gartner -- ``MG``\n * Muller Gartner with Van-Cittert -- ``MG+VC``\n * Muller Gartner with Richardson-Lucy -- ``MG+RL``\n+ * Single-target correction -- ``STC``\n \n \"\"\",\n )\n", "issue": "ENH: add STC partial volume correction to PETPVC interface\n### Summary\r\nPartial Volume Correction using Single-target correction (STC) has been added to PETPVC since the Nipype PETPVC interface was created, and it would therefore be ideal if this could be added to the interface as well.\r\n\r\n### Actual behavior\r\nThe interface should include the 'STC' option for the 'pvc' flag.\r\n\r\n### Expected behavior\r\n\r\n### How to replicate the behavior\r\n\r\n### Script/Workflow details\r\n\r\nPlease put URL to code or code here (if not too long).\r\n\r\n### Platform details:\r\n\r\n<!-- Please run the following code from your shell and place the output between the triple ticks, below.\r\npython -c \"import nipype; from pprint import pprint; pprint(nipype.get_info())\"\r\n-->\r\n\r\n```\r\n\r\n```\r\n\r\n### Execution environment\r\n\r\nChoose one\r\n- Container [Tag: ???]\r\n- My python environment inside container [Base Tag: ???]\r\n- My python environment outside container\r\n\n", "before_files": [{"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"PETPVC is a toolbox for partial volume correction in positron emission tomography.\"\"\"\nimport os\n\nfrom .base import (\n TraitedSpec,\n CommandLineInputSpec,\n CommandLine,\n File,\n isdefined,\n traits,\n)\nfrom ..utils.filemanip import fname_presuffix\nfrom ..external.due import BibTeX\n\npvc_methods = [\n \"GTM\",\n \"IY\",\n \"IY+RL\",\n \"IY+VC\",\n \"LABBE\",\n \"LABBE+MTC\",\n \"LABBE+MTC+RL\",\n \"LABBE+MTC+VC\",\n \"LABBE+RBV\",\n \"LABBE+RBV+RL\",\n \"LABBE+RBV+VC\",\n \"MG\",\n \"MG+RL\",\n \"MG+VC\",\n \"MTC\",\n \"MTC+RL\",\n \"MTC+VC\",\n \"RBV\",\n \"RBV+RL\",\n \"RBV+VC\",\n \"RL\",\n \"VC\",\n]\n\n\nclass PETPVCInputSpec(CommandLineInputSpec):\n in_file = File(desc=\"PET image file\", exists=True, mandatory=True, argstr=\"-i %s\")\n out_file = File(desc=\"Output file\", genfile=True, hash_files=False, argstr=\"-o %s\")\n mask_file = File(\n desc=\"Mask image file\", exists=True, mandatory=True, argstr=\"-m %s\"\n )\n pvc = traits.Enum(\n pvc_methods,\n mandatory=True,\n argstr=\"-p %s\",\n desc=\"\"\"\\\nDesired PVC method:\n\n * Geometric transfer matrix -- ``GTM``\n * Labbe approach -- ``LABBE``\n * Richardson-Lucy -- ``RL``\n * Van-Cittert -- ``VC``\n * Region-based voxel-wise correction -- ``RBV``\n * RBV with Labbe -- ``LABBE+RBV``\n * RBV with Van-Cittert -- ``RBV+VC``\n * RBV with Richardson-Lucy -- ``RBV+RL``\n * RBV with Labbe and Van-Cittert -- ``LABBE+RBV+VC``\n * RBV with Labbe and Richardson-Lucy -- ``LABBE+RBV+RL``\n * Multi-target correction -- ``MTC``\n * MTC with Labbe -- ``LABBE+MTC``\n * MTC with Van-Cittert -- ``MTC+VC``\n * MTC with Richardson-Lucy -- ``MTC+RL``\n * MTC with Labbe and Van-Cittert -- ``LABBE+MTC+VC``\n * MTC with Labbe and Richardson-Lucy -- ``LABBE+MTC+RL``\n * Iterative Yang -- ``IY``\n * Iterative Yang with Van-Cittert -- ``IY+VC``\n * Iterative Yang with Richardson-Lucy -- ``IY+RL``\n * Muller Gartner -- ``MG``\n * Muller Gartner with Van-Cittert -- ``MG+VC``\n * Muller Gartner with Richardson-Lucy -- ``MG+RL``\n\n\"\"\",\n )\n fwhm_x = traits.Float(\n desc=\"The full-width at half maximum in mm along x-axis\",\n mandatory=True,\n argstr=\"-x %.4f\",\n )\n fwhm_y = traits.Float(\n desc=\"The full-width at half maximum in mm along y-axis\",\n mandatory=True,\n argstr=\"-y %.4f\",\n )\n fwhm_z = traits.Float(\n desc=\"The full-width at half maximum in mm along z-axis\",\n mandatory=True,\n argstr=\"-z %.4f\",\n )\n debug = traits.Bool(\n desc=\"Prints debug information\",\n usedefault=True,\n default_value=False,\n argstr=\"-d\",\n )\n n_iter = traits.Int(\n desc=\"Number of iterations\", default_value=10, usedefault=True, argstr=\"-n %d\"\n )\n n_deconv = traits.Int(\n desc=\"Number of deconvolution iterations\",\n default_value=10,\n usedefault=True,\n argstr=\"-k %d\",\n )\n alpha = traits.Float(\n desc=\"Alpha value\", default_value=1.5, usedefault=True, argstr=\"-a %.4f\"\n )\n stop_crit = traits.Float(\n desc=\"Stopping criterion\", default_value=0.01, usedefault=True, argstr=\"-s %.4f\"\n )\n\n\nclass PETPVCOutputSpec(TraitedSpec):\n out_file = File(desc=\"Output file\")\n\n\nclass PETPVC(CommandLine):\n \"\"\"Use PETPVC for partial volume correction of PET images.\n\n PETPVC ([1]_, [2]_) is a software from the Nuclear Medicine Department\n of the UCL University Hospital, London, UK.\n\n Examples\n --------\n >>> from ..testing import example_data\n >>> #TODO get data for PETPVC\n >>> pvc = PETPVC()\n >>> pvc.inputs.in_file = 'pet.nii.gz'\n >>> pvc.inputs.mask_file = 'tissues.nii.gz'\n >>> pvc.inputs.out_file = 'pet_pvc_rbv.nii.gz'\n >>> pvc.inputs.pvc = 'RBV'\n >>> pvc.inputs.fwhm_x = 2.0\n >>> pvc.inputs.fwhm_y = 2.0\n >>> pvc.inputs.fwhm_z = 2.0\n >>> outs = pvc.run() #doctest: +SKIP\n\n References\n ----------\n .. [1] K. Erlandsson, I. Buvat, P. H. Pretorius, B. A. Thomas, and B. F. Hutton,\n \"A review of partial volume correction techniques for emission tomography\n and their applications in neurology, cardiology and oncology,\" Phys. Med.\n Biol., vol. 57, no. 21, p. R119, 2012.\n .. [2] https://github.com/UCL/PETPVC\n\n \"\"\"\n\n input_spec = PETPVCInputSpec\n output_spec = PETPVCOutputSpec\n _cmd = \"petpvc\"\n\n _references = [\n {\n \"entry\": BibTeX(\n \"@article{0031-9155-61-22-7975,\"\n \"author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and \"\n \"Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson},\"\n \"title={PETPVC: a toolbox for performing partial volume correction \"\n \"techniques in positron emission tomography},\"\n \"journal={Physics in Medicine and Biology},\"\n \"volume={61},\"\n \"number={22},\"\n \"pages={7975},\"\n \"url={http://stacks.iop.org/0031-9155/61/i=22/a=7975},\"\n \"doi={https://doi.org/10.1088/0031-9155/61/22/7975},\"\n \"year={2016},\"\n \"}\"\n ),\n \"description\": \"PETPVC software implementation publication\",\n \"tags\": [\"implementation\"],\n }\n ]\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = self.inputs.out_file\n if not isdefined(outputs[\"out_file\"]):\n method_name = self.inputs.pvc.lower()\n outputs[\"out_file\"] = self._gen_fname(\n self.inputs.in_file, suffix=f\"_{method_name}_pvc\"\n )\n\n outputs[\"out_file\"] = os.path.abspath(outputs[\"out_file\"])\n return outputs\n\n def _gen_fname(\n self, basename, cwd=None, suffix=None, change_ext=True, ext=\".nii.gz\"\n ):\n \"\"\"Generate a filename based on the given parameters.\n\n The filename will take the form: cwd/basename<suffix><ext>.\n If change_ext is True, it will use the extensions specified in\n <instance>inputs.output_type.\n\n Parameters\n ----------\n basename : str\n Filename to base the new filename on.\n cwd : str\n Path to prefix to the new filename. (default is os.getcwd())\n suffix : str\n Suffix to add to the `basename`. (defaults is '' )\n change_ext : bool\n Flag to change the filename extension to the given `ext`.\n (Default is False)\n\n Returns\n -------\n fname : str\n New filename based on given parameters.\n\n \"\"\"\n if basename == \"\":\n msg = \"Unable to generate filename for command %s. \" % self.cmd\n msg += \"basename is not set!\"\n raise ValueError(msg)\n if cwd is None:\n cwd = os.getcwd()\n if change_ext:\n if suffix:\n suffix = \"\".join((suffix, ext))\n else:\n suffix = ext\n if suffix is None:\n suffix = \"\"\n fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)\n return fname\n\n def _gen_filename(self, name):\n if name == \"out_file\":\n return self._list_outputs()[\"out_file\"]\n return None\n", "path": "nipype/interfaces/petpvc.py"}]} | 3,450 | 155 |
gh_patches_debug_43549 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4544 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of colossalai/shardformer/shard/sharder.py]
1 from types import MethodType
2 from typing import Any, Callable, Dict, List, Optional, Set, Union
3
4 import torch.nn as nn
5 from torch import Tensor
6
7 from colossalai.lazy import LazyInitContext
8
9 from .._utils import getattr_, setattr_
10 from ..policies.auto_policy import get_autopolicy
11 from ..policies.base_policy import Policy, SubModuleReplacementDescription
12 from .shard_config import ShardConfig
13 from .utils import set_tensors_to_none
14
15 __all__ = ['ModelSharder', 'shard_model']
16
17
18 class ModelSharder(object):
19 r"""
20 Shard the original huggingface model according to the policy
21
22 Args:
23 policy (:class:`Policy`): The policy to shard the model
24 model (:class:`torch.Module`): The model to shard
25 shard_config: The setting of distributed model
26 """
27
28 def __init__(self, model: nn.Module, policy: Policy, shard_config: ShardConfig = None) -> None:
29 self.model = model
30 self.policy = get_autopolicy(self.model) if policy is None else policy
31 self.shard_config = shard_config
32
33 def shard(self) -> List[Dict[int, Tensor]]:
34 r"""
35 Shard the model according to the policy
36 """
37 self.policy.set_model(self.model)
38 self.policy.set_shard_config(self.shard_config)
39 self._preprocess()
40 # get shared params before release unheld layers, this avoid misjudgement of shared params (None is None)
41 shared_params = self.policy.get_shared_params()
42 held_layers = self._release_unheld_layers()
43 self._replace_module(include=held_layers)
44 self._materialize()
45 self._postprocess()
46 return shared_params
47
48 def _preprocess(self) -> None:
49 self.model = self.policy.preprocess()
50
51 def _postprocess(self) -> None:
52 self.model = self.policy.postprocess()
53
54 def _replace_module(self, include: Optional[Set[nn.Module]] = None) -> None:
55 r"""
56 Replace the module according to the policy, and replace the module one by one
57
58 Args:
59 model (:class:`torch.nn.Module`): The model to shard
60 """
61 module_descriptions = self.policy.module_policy()
62 for layer_cls, module_description in module_descriptions.items():
63 attr_replacement = module_description.attribute_replacement
64 param_replacement = module_description.param_replacement
65 sub_module_replacement = module_description.sub_module_replacement
66 method_replacement = module_description.method_replacement
67 self._recursive_replace_layer(self.model,
68 layer_cls,
69 attr_replacement,
70 param_replacement,
71 method_replacement,
72 sub_module_replacement,
73 include=include)
74
75 def _recursive_replace_layer(
76 self,
77 module: nn.Module,
78 origin_cls: Union[str, nn.Module],
79 attr_replacement: Dict[str, Any],
80 param_replacement: List[Callable],
81 method_replacement: Dict[str, Callable],
82 sub_module_replacement: List[SubModuleReplacementDescription],
83 include: Optional[Set[nn.Module]] = None,
84 ) -> None:
85 r"""
86 Reverse the replace layer operation
87
88 Args:
89 module (torch.nn.Module): The object of layer to shard
90 origin_cls (Union[str, torch.nn.Module]): The origin layer class or a string of layer class name
91 attr_replacement (Dict[str, Any]): The attribute dict to modify
92 param_replacement (List[Callable]): The function list to get parameter shard information in policy
93 method_replacement (Dict[str, Callable]): Key is the method name, value is the method for replacement
94 sub_module_replacement ((List[SubModuleReplacementDescription]): The function list to get sub module shard information in policy
95 """
96 # released layers are not shardable
97 can_replace_param_or_layer = include is None or module in include
98 if (isinstance(origin_cls, str) and origin_cls == module.__class__.__name__) or \
99 (module.__class__ == origin_cls):
100 if attr_replacement is not None:
101 self._replace_attr(module, attr_replacement)
102
103 if param_replacement is not None and can_replace_param_or_layer:
104 self._replace_param(module, param_replacement)
105
106 if method_replacement is not None:
107 self._replace_method(module, method_replacement)
108
109 if sub_module_replacement is not None and can_replace_param_or_layer:
110 self._replace_sub_module(module, sub_module_replacement)
111
112 for name, child in module.named_children():
113 self._recursive_replace_layer(child,
114 origin_cls,
115 attr_replacement,
116 param_replacement,
117 method_replacement,
118 sub_module_replacement,
119 include=include)
120
121 def _replace_attr(
122 self,
123 module: nn.Module,
124 attr_replacement: Dict[str, Any],
125 ) -> None:
126 r"""
127 Replace the attribute of the layer
128
129 Args:
130 module (:class:`torch.nn.Module`): The object of layer to shard
131 attr_replacement (Dict): The attribute dict to modify
132 """
133 for k, v in attr_replacement.items():
134 setattr_(module, k, v, ignore=True)
135
136 def _replace_param(
137 self,
138 module: nn.Module,
139 param_replacement: List[Callable],
140 ) -> None:
141 r"""
142 Replace the parameter of the layer
143
144 Args:
145 module (:class:`torch.nn.Module`): The object of layer to shard
146 param_replacement (List[Callable]): The function list to get parameter shard information in policy
147 """
148 for param_func in param_replacement:
149 param_func(module)
150
151 def _replace_method(self, module: nn.Module, method_replacement: Dict[str, Callable]):
152 for method_name, new_method in method_replacement.items():
153 # bind the new method to the module
154 bound_method = MethodType(new_method, module)
155 setattr(module, method_name, bound_method)
156
157 def _replace_sub_module(
158 self,
159 org_layer: nn.Module,
160 sub_module_replacement: List[SubModuleReplacementDescription],
161 ) -> None:
162 r"""
163 Shard one layer according to the policy, the layer should be the same class as the key in policy's argument_policy return dict
164
165 Args:
166 org_layer (torch.nn.Module): The origin layer object to shard
167 sub_module_replacement (List[SubModuleReplacementDescription]): The sub module replacement description list
168
169 """
170 for description in sub_module_replacement:
171 suffix = description.suffix
172 target_module = description.target_module
173 kwargs = {} if description.kwargs is None else description.kwargs
174
175 assert target_module is not None, 'target_module should not be None'
176
177 # TODO: support different parallel mode
178 native_sub_module = getattr_(org_layer, suffix, ignore=True)
179
180 assert not isinstance(native_sub_module, target_module), \
181 f"The module with suffix {suffix} has been replaced, please check the policy"
182
183 # if it is None and we are allowed to ignore this module
184 # just skip
185 if description.ignore_if_not_exist and native_sub_module is None:
186 continue
187
188 try:
189 replace_layer = target_module.from_native_module(native_sub_module,
190 self.shard_config.tensor_parallel_process_group,
191 **kwargs)
192 except Exception as e:
193 raise RuntimeError(
194 f"Failed to replace {suffix} of type {native_sub_module.__class__.__qualname__}"
195 f" with {target_module.__qualname__} with the exception: {e}. "
196 "Please check your model configuration or sharding policy, you can set up an issue for us to help you as well."
197 )
198
199 setattr_(org_layer, suffix, replace_layer)
200
201 def _get_recursive_held_layers(self, held_layers: Optional[List[nn.Module]]) -> Optional[List[nn.Module]]:
202
203 def collect_sub_modules(module: nn.Module):
204 if module is None:
205 return
206 recursive_held_layers.append(module)
207 for name, child in module.named_children():
208 collect_sub_modules(child)
209
210 recursive_held_layers = []
211 for module in held_layers:
212 collect_sub_modules(module)
213 return recursive_held_layers
214
215 def _release_unheld_layers(self) -> Optional[Set[nn.Module]]:
216 r"""
217 Release the unheld layers in the model
218 """
219 if self.shard_config and self.shard_config.pipeline_stage_manager:
220 held_layers = self.policy.get_held_layers()
221 set_tensors_to_none(self.model, exclude=set(held_layers))
222 return set(self._get_recursive_held_layers(held_layers))
223 return None
224
225 def _materialize(self) -> None:
226 r"""
227 Materialize the model if lazy initialization is used
228 """
229 LazyInitContext.materialize(self.model)
230
[end of colossalai/shardformer/shard/sharder.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/colossalai/shardformer/shard/sharder.py b/colossalai/shardformer/shard/sharder.py
--- a/colossalai/shardformer/shard/sharder.py
+++ b/colossalai/shardformer/shard/sharder.py
@@ -92,22 +92,21 @@
param_replacement (List[Callable]): The function list to get parameter shard information in policy
method_replacement (Dict[str, Callable]): Key is the method name, value is the method for replacement
sub_module_replacement ((List[SubModuleReplacementDescription]): The function list to get sub module shard information in policy
+ include (Set[nn.Module], optional): The set of modules to keep on current device when pipeline parallel is enabled. Defaults to None
"""
- # released layers are not shardable
- can_replace_param_or_layer = include is None or module in include
if (isinstance(origin_cls, str) and origin_cls == module.__class__.__name__) or \
(module.__class__ == origin_cls):
if attr_replacement is not None:
self._replace_attr(module, attr_replacement)
- if param_replacement is not None and can_replace_param_or_layer:
+ if param_replacement is not None and (include is None or module in include):
self._replace_param(module, param_replacement)
if method_replacement is not None:
self._replace_method(module, method_replacement)
- if sub_module_replacement is not None and can_replace_param_or_layer:
- self._replace_sub_module(module, sub_module_replacement)
+ if sub_module_replacement is not None:
+ self._replace_sub_module(module, sub_module_replacement, include)
for name, child in module.named_children():
self._recursive_replace_layer(child,
@@ -154,18 +153,17 @@
bound_method = MethodType(new_method, module)
setattr(module, method_name, bound_method)
- def _replace_sub_module(
- self,
- org_layer: nn.Module,
- sub_module_replacement: List[SubModuleReplacementDescription],
- ) -> None:
+ def _replace_sub_module(self,
+ org_layer: nn.Module,
+ sub_module_replacement: List[SubModuleReplacementDescription],
+ include: Optional[Set[nn.Module]] = None) -> None:
r"""
Shard one layer according to the policy, the layer should be the same class as the key in policy's argument_policy return dict
Args:
org_layer (torch.nn.Module): The origin layer object to shard
sub_module_replacement (List[SubModuleReplacementDescription]): The sub module replacement description list
-
+ include (Set[nn.Module], optional): The set of modules to keep on current device when pipeline parallel is enabled. Defaults to None
"""
for description in sub_module_replacement:
suffix = description.suffix
@@ -174,9 +172,12 @@
assert target_module is not None, 'target_module should not be None'
- # TODO: support different parallel mode
native_sub_module = getattr_(org_layer, suffix, ignore=True)
+ # Skip replacement if submodule is not kept by current device when pipeline parallel is enabled.
+ if (include is not None) and (native_sub_module is not None) and (native_sub_module not in include):
+ continue
+
assert not isinstance(native_sub_module, target_module), \
f"The module with suffix {suffix} has been replaced, please check the policy"
| {"golden_diff": "diff --git a/colossalai/shardformer/shard/sharder.py b/colossalai/shardformer/shard/sharder.py\n--- a/colossalai/shardformer/shard/sharder.py\n+++ b/colossalai/shardformer/shard/sharder.py\n@@ -92,22 +92,21 @@\n param_replacement (List[Callable]): The function list to get parameter shard information in policy\n method_replacement (Dict[str, Callable]): Key is the method name, value is the method for replacement\n sub_module_replacement ((List[SubModuleReplacementDescription]): The function list to get sub module shard information in policy\n+ include (Set[nn.Module], optional): The set of modules to keep on current device when pipeline parallel is enabled. Defaults to None\n \"\"\"\n- # released layers are not shardable\n- can_replace_param_or_layer = include is None or module in include\n if (isinstance(origin_cls, str) and origin_cls == module.__class__.__name__) or \\\n (module.__class__ == origin_cls):\n if attr_replacement is not None:\n self._replace_attr(module, attr_replacement)\n \n- if param_replacement is not None and can_replace_param_or_layer:\n+ if param_replacement is not None and (include is None or module in include):\n self._replace_param(module, param_replacement)\n \n if method_replacement is not None:\n self._replace_method(module, method_replacement)\n \n- if sub_module_replacement is not None and can_replace_param_or_layer:\n- self._replace_sub_module(module, sub_module_replacement)\n+ if sub_module_replacement is not None:\n+ self._replace_sub_module(module, sub_module_replacement, include)\n \n for name, child in module.named_children():\n self._recursive_replace_layer(child,\n@@ -154,18 +153,17 @@\n bound_method = MethodType(new_method, module)\n setattr(module, method_name, bound_method)\n \n- def _replace_sub_module(\n- self,\n- org_layer: nn.Module,\n- sub_module_replacement: List[SubModuleReplacementDescription],\n- ) -> None:\n+ def _replace_sub_module(self,\n+ org_layer: nn.Module,\n+ sub_module_replacement: List[SubModuleReplacementDescription],\n+ include: Optional[Set[nn.Module]] = None) -> None:\n r\"\"\"\n Shard one layer according to the policy, the layer should be the same class as the key in policy's argument_policy return dict\n \n Args:\n org_layer (torch.nn.Module): The origin layer object to shard\n sub_module_replacement (List[SubModuleReplacementDescription]): The sub module replacement description list\n-\n+ include (Set[nn.Module], optional): The set of modules to keep on current device when pipeline parallel is enabled. Defaults to None\n \"\"\"\n for description in sub_module_replacement:\n suffix = description.suffix\n@@ -174,9 +172,12 @@\n \n assert target_module is not None, 'target_module should not be None'\n \n- # TODO: support different parallel mode\n native_sub_module = getattr_(org_layer, suffix, ignore=True)\n \n+ # Skip replacement if submodule is not kept by current device when pipeline parallel is enabled.\n+ if (include is not None) and (native_sub_module is not None) and (native_sub_module not in include):\n+ continue\n+\n assert not isinstance(native_sub_module, target_module), \\\n f\"The module with suffix {suffix} has been replaced, please check the policy\"\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from types import MethodType\nfrom typing import Any, Callable, Dict, List, Optional, Set, Union\n\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom colossalai.lazy import LazyInitContext\n\nfrom .._utils import getattr_, setattr_\nfrom ..policies.auto_policy import get_autopolicy\nfrom ..policies.base_policy import Policy, SubModuleReplacementDescription\nfrom .shard_config import ShardConfig\nfrom .utils import set_tensors_to_none\n\n__all__ = ['ModelSharder', 'shard_model']\n\n\nclass ModelSharder(object):\n r\"\"\"\n Shard the original huggingface model according to the policy\n\n Args:\n policy (:class:`Policy`): The policy to shard the model\n model (:class:`torch.Module`): The model to shard\n shard_config: The setting of distributed model\n \"\"\"\n\n def __init__(self, model: nn.Module, policy: Policy, shard_config: ShardConfig = None) -> None:\n self.model = model\n self.policy = get_autopolicy(self.model) if policy is None else policy\n self.shard_config = shard_config\n\n def shard(self) -> List[Dict[int, Tensor]]:\n r\"\"\"\n Shard the model according to the policy\n \"\"\"\n self.policy.set_model(self.model)\n self.policy.set_shard_config(self.shard_config)\n self._preprocess()\n # get shared params before release unheld layers, this avoid misjudgement of shared params (None is None)\n shared_params = self.policy.get_shared_params()\n held_layers = self._release_unheld_layers()\n self._replace_module(include=held_layers)\n self._materialize()\n self._postprocess()\n return shared_params\n\n def _preprocess(self) -> None:\n self.model = self.policy.preprocess()\n\n def _postprocess(self) -> None:\n self.model = self.policy.postprocess()\n\n def _replace_module(self, include: Optional[Set[nn.Module]] = None) -> None:\n r\"\"\"\n Replace the module according to the policy, and replace the module one by one\n\n Args:\n model (:class:`torch.nn.Module`): The model to shard\n \"\"\"\n module_descriptions = self.policy.module_policy()\n for layer_cls, module_description in module_descriptions.items():\n attr_replacement = module_description.attribute_replacement\n param_replacement = module_description.param_replacement\n sub_module_replacement = module_description.sub_module_replacement\n method_replacement = module_description.method_replacement\n self._recursive_replace_layer(self.model,\n layer_cls,\n attr_replacement,\n param_replacement,\n method_replacement,\n sub_module_replacement,\n include=include)\n\n def _recursive_replace_layer(\n self,\n module: nn.Module,\n origin_cls: Union[str, nn.Module],\n attr_replacement: Dict[str, Any],\n param_replacement: List[Callable],\n method_replacement: Dict[str, Callable],\n sub_module_replacement: List[SubModuleReplacementDescription],\n include: Optional[Set[nn.Module]] = None,\n ) -> None:\n r\"\"\"\n Reverse the replace layer operation\n\n Args:\n module (torch.nn.Module): The object of layer to shard\n origin_cls (Union[str, torch.nn.Module]): The origin layer class or a string of layer class name\n attr_replacement (Dict[str, Any]): The attribute dict to modify\n param_replacement (List[Callable]): The function list to get parameter shard information in policy\n method_replacement (Dict[str, Callable]): Key is the method name, value is the method for replacement\n sub_module_replacement ((List[SubModuleReplacementDescription]): The function list to get sub module shard information in policy\n \"\"\"\n # released layers are not shardable\n can_replace_param_or_layer = include is None or module in include\n if (isinstance(origin_cls, str) and origin_cls == module.__class__.__name__) or \\\n (module.__class__ == origin_cls):\n if attr_replacement is not None:\n self._replace_attr(module, attr_replacement)\n\n if param_replacement is not None and can_replace_param_or_layer:\n self._replace_param(module, param_replacement)\n\n if method_replacement is not None:\n self._replace_method(module, method_replacement)\n\n if sub_module_replacement is not None and can_replace_param_or_layer:\n self._replace_sub_module(module, sub_module_replacement)\n\n for name, child in module.named_children():\n self._recursive_replace_layer(child,\n origin_cls,\n attr_replacement,\n param_replacement,\n method_replacement,\n sub_module_replacement,\n include=include)\n\n def _replace_attr(\n self,\n module: nn.Module,\n attr_replacement: Dict[str, Any],\n ) -> None:\n r\"\"\"\n Replace the attribute of the layer\n\n Args:\n module (:class:`torch.nn.Module`): The object of layer to shard\n attr_replacement (Dict): The attribute dict to modify\n \"\"\"\n for k, v in attr_replacement.items():\n setattr_(module, k, v, ignore=True)\n\n def _replace_param(\n self,\n module: nn.Module,\n param_replacement: List[Callable],\n ) -> None:\n r\"\"\"\n Replace the parameter of the layer\n\n Args:\n module (:class:`torch.nn.Module`): The object of layer to shard\n param_replacement (List[Callable]): The function list to get parameter shard information in policy\n \"\"\"\n for param_func in param_replacement:\n param_func(module)\n\n def _replace_method(self, module: nn.Module, method_replacement: Dict[str, Callable]):\n for method_name, new_method in method_replacement.items():\n # bind the new method to the module\n bound_method = MethodType(new_method, module)\n setattr(module, method_name, bound_method)\n\n def _replace_sub_module(\n self,\n org_layer: nn.Module,\n sub_module_replacement: List[SubModuleReplacementDescription],\n ) -> None:\n r\"\"\"\n Shard one layer according to the policy, the layer should be the same class as the key in policy's argument_policy return dict\n\n Args:\n org_layer (torch.nn.Module): The origin layer object to shard\n sub_module_replacement (List[SubModuleReplacementDescription]): The sub module replacement description list\n\n \"\"\"\n for description in sub_module_replacement:\n suffix = description.suffix\n target_module = description.target_module\n kwargs = {} if description.kwargs is None else description.kwargs\n\n assert target_module is not None, 'target_module should not be None'\n\n # TODO: support different parallel mode\n native_sub_module = getattr_(org_layer, suffix, ignore=True)\n\n assert not isinstance(native_sub_module, target_module), \\\n f\"The module with suffix {suffix} has been replaced, please check the policy\"\n\n # if it is None and we are allowed to ignore this module\n # just skip\n if description.ignore_if_not_exist and native_sub_module is None:\n continue\n\n try:\n replace_layer = target_module.from_native_module(native_sub_module,\n self.shard_config.tensor_parallel_process_group,\n **kwargs)\n except Exception as e:\n raise RuntimeError(\n f\"Failed to replace {suffix} of type {native_sub_module.__class__.__qualname__}\"\n f\" with {target_module.__qualname__} with the exception: {e}. \"\n \"Please check your model configuration or sharding policy, you can set up an issue for us to help you as well.\"\n )\n\n setattr_(org_layer, suffix, replace_layer)\n\n def _get_recursive_held_layers(self, held_layers: Optional[List[nn.Module]]) -> Optional[List[nn.Module]]:\n\n def collect_sub_modules(module: nn.Module):\n if module is None:\n return\n recursive_held_layers.append(module)\n for name, child in module.named_children():\n collect_sub_modules(child)\n\n recursive_held_layers = []\n for module in held_layers:\n collect_sub_modules(module)\n return recursive_held_layers\n\n def _release_unheld_layers(self) -> Optional[Set[nn.Module]]:\n r\"\"\"\n Release the unheld layers in the model\n \"\"\"\n if self.shard_config and self.shard_config.pipeline_stage_manager:\n held_layers = self.policy.get_held_layers()\n set_tensors_to_none(self.model, exclude=set(held_layers))\n return set(self._get_recursive_held_layers(held_layers))\n return None\n\n def _materialize(self) -> None:\n r\"\"\"\n Materialize the model if lazy initialization is used\n \"\"\"\n LazyInitContext.materialize(self.model)\n", "path": "colossalai/shardformer/shard/sharder.py"}]} | 3,037 | 787 |
gh_patches_debug_26807 | rasdani/github-patches | git_diff | facebookresearch__hydra-911 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug] Not able to Access Parent Fields in instantiate Interpolation
# 🐛 Bug
## Description
Followup on #388 the parent fields seem to be getting lost again. If I have a field that is an interpolation of a field higher up in the hierarchy, I can print out the value fine but I can't use it within instiantiate.
## Checklist
- [x] I checked on the latest version of Hydra
- [x] I created a minimal repro
## To reproduce
** Minimal Code/Config snippet to reproduce **
**Minimal** code snippet which should print three times the same integer value.
The first print is the parent field
The second print accesses the child field, which is an interpolation of the parent field
The third print uses instantiate to create an object which takes the child field as a parameter and prints from that object
Before the third print happens the exception is thrown
```
import time
import hydra
import submitit
class GetsTheInteger:
def __init__(self, same_integer):
self.intval = intval
@hydra.main(config_name="test2.yaml")
def main(cfg) -> None:
print(cfg.data.integer)
print(cfg.data.test.same_integer)
g = hydra.utils.instantiate(cfg.data.test)
print(g.intval)
if __name__ == "__main__":
main()
```
** Stack trace/error message **
```
Traceback (most recent call last):
File "/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/_internal/utils.py", line 203, in run_and_report
return func()
File "/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/_internal/utils.py", line 355, in <lambda>
overrides=args.overrides,
File "/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/_internal/hydra.py", line 110, in run
job_subdir_key=None,
File "/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/core/utils.py", line 123, in run_job
ret.return_value = task_function(task_cfg)
File "test2.py", line 15, in main
g = hydra.utils.instantiate(cfg.data.test)
File "/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/utils.py", line 68, in call
raise HydraException(f"Error calling '{cls}' : {e}") from e
hydra.errors.HydraException: Error calling 'test2.GetsTheInteger' : str interpolation key 'data.integer' not found
full_key: same_integer
reference_type=Any
object_type=dict
```
## Expected Behavior
No crash, can instantiate objects whose parameters depend on interpolations of parent fields
## System information
- **Hydra Version** : git master
- **Python version** : 3.7
- **Virtual environment type and version** : Conda
- **Operating system** : Ubuntu 18.04 (fair cluster)
</issue>
<code>
[start of hydra/utils.py]
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import copy
3 import logging.config
4 import os
5 from pathlib import Path
6 from typing import Any, Callable
7
8 from omegaconf import DictConfig, OmegaConf
9 from omegaconf._utils import is_structured_config
10
11 from hydra._internal.utils import (
12 _call_callable,
13 _get_cls_name,
14 _instantiate_class,
15 _locate,
16 )
17 from hydra.core.hydra_config import HydraConfig
18 from hydra.errors import HydraException, InstantiationException
19 from hydra.types import TargetConf
20
21 log = logging.getLogger(__name__)
22
23
24 def call(config: Any, *args: Any, **kwargs: Any) -> Any:
25 """
26 :param config: An object describing what to call and what params to use. needs to have a _target_ field.
27 :param args: optional positional parameters pass-through
28 :param kwargs: optional named parameters pass-through
29 :return: the return value from the specified class or method
30 """
31
32 if OmegaConf.is_none(config):
33 return None
34
35 if isinstance(config, TargetConf) and config._target_ == "???":
36 # Specific check to give a good warning about failure to annotate _target_ as a string.
37 raise InstantiationException(
38 f"Missing value for {type(config).__name__}._target_. Check that it's properly annotated and overridden."
39 f"\nA common problem is forgetting to annotate _target_ as a string : '_target_: str = ...'"
40 )
41
42 if (
43 isinstance(config, dict)
44 or OmegaConf.is_config(config)
45 or is_structured_config(config)
46 ):
47 config = OmegaConf.structured(config)
48 else:
49 raise HydraException(f"Unsupported config type : {type(config).__name__}")
50
51 cls = "<unknown>"
52 try:
53 assert isinstance(config, DictConfig)
54 # make a copy to ensure we do not change the provided object
55 config = copy.deepcopy(config)
56 OmegaConf.set_readonly(config, False)
57 OmegaConf.set_struct(config, False)
58 cls = _get_cls_name(config)
59 type_or_callable = _locate(cls)
60 if isinstance(type_or_callable, type):
61 return _instantiate_class(type_or_callable, config, *args, **kwargs)
62 else:
63 assert callable(type_or_callable)
64 return _call_callable(type_or_callable, config, *args, **kwargs)
65 except InstantiationException as e:
66 raise e
67 except Exception as e:
68 raise HydraException(f"Error calling '{cls}' : {e}") from e
69
70
71 # Alias for call
72 instantiate = call
73
74
75 def get_class(path: str) -> type:
76 try:
77 cls = _locate(path)
78 if not isinstance(cls, type):
79 raise ValueError(f"Located non-class in {path} : {type(cls).__name__}")
80 return cls
81 except Exception as e:
82 log.error(f"Error initializing class at {path} : {e}")
83 raise e
84
85
86 def get_method(path: str) -> Callable[..., Any]:
87 try:
88 cl = _locate(path)
89 if not callable(cl):
90 raise ValueError(f"Non callable object located : {type(cl).__name__}")
91 return cl
92 except Exception as e:
93 log.error(f"Error getting callable at {path} : {e}")
94 raise e
95
96
97 # Alias for get_method
98 get_static_method = get_method
99
100
101 def get_original_cwd() -> str:
102 """
103 :return: the original working directory the Hydra application was launched from
104 """
105 if not HydraConfig.initialized():
106 raise ValueError(
107 "get_original_cwd() must only be used after HydraConfig is initialized"
108 )
109 ret = HydraConfig.get().runtime.cwd
110 assert ret is not None and isinstance(ret, str)
111 return ret
112
113
114 def to_absolute_path(path: str) -> str:
115 """
116 converts the specified path to be absolute path.
117 if the input path is relative, it's interpreted as relative to the original working directory
118 if it's absolute, it's returned as is
119 :param path: path to convert
120 :return:
121 """
122 p = Path(path)
123 if not HydraConfig.initialized():
124 base = Path(os.getcwd())
125 else:
126 base = Path(get_original_cwd())
127 if p.is_absolute():
128 ret = p
129 else:
130 ret = base / p
131 return str(ret)
132
[end of hydra/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/hydra/utils.py b/hydra/utils.py
--- a/hydra/utils.py
+++ b/hydra/utils.py
@@ -1,5 +1,4 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
-import copy
import logging.config
import os
from pathlib import Path
@@ -39,20 +38,22 @@
f"\nA common problem is forgetting to annotate _target_ as a string : '_target_: str = ...'"
)
- if (
+ if not (
isinstance(config, dict)
or OmegaConf.is_config(config)
or is_structured_config(config)
):
- config = OmegaConf.structured(config)
- else:
raise HydraException(f"Unsupported config type : {type(config).__name__}")
+ # make a copy to ensure we do not change the provided object
+ config_copy = OmegaConf.structured(config)
+ if OmegaConf.is_config(config):
+ config_copy._set_parent(config._get_parent())
+ config = config_copy
+
cls = "<unknown>"
try:
assert isinstance(config, DictConfig)
- # make a copy to ensure we do not change the provided object
- config = copy.deepcopy(config)
OmegaConf.set_readonly(config, False)
OmegaConf.set_struct(config, False)
cls = _get_cls_name(config)
| {"golden_diff": "diff --git a/hydra/utils.py b/hydra/utils.py\n--- a/hydra/utils.py\n+++ b/hydra/utils.py\n@@ -1,5 +1,4 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n-import copy\n import logging.config\n import os\n from pathlib import Path\n@@ -39,20 +38,22 @@\n f\"\\nA common problem is forgetting to annotate _target_ as a string : '_target_: str = ...'\"\n )\n \n- if (\n+ if not (\n isinstance(config, dict)\n or OmegaConf.is_config(config)\n or is_structured_config(config)\n ):\n- config = OmegaConf.structured(config)\n- else:\n raise HydraException(f\"Unsupported config type : {type(config).__name__}\")\n \n+ # make a copy to ensure we do not change the provided object\n+ config_copy = OmegaConf.structured(config)\n+ if OmegaConf.is_config(config):\n+ config_copy._set_parent(config._get_parent())\n+ config = config_copy\n+\n cls = \"<unknown>\"\n try:\n assert isinstance(config, DictConfig)\n- # make a copy to ensure we do not change the provided object\n- config = copy.deepcopy(config)\n OmegaConf.set_readonly(config, False)\n OmegaConf.set_struct(config, False)\n cls = _get_cls_name(config)\n", "issue": "[Bug] Not able to Access Parent Fields in instantiate Interpolation\n# \ud83d\udc1b Bug\r\n## Description\r\n\r\nFollowup on #388 the parent fields seem to be getting lost again. If I have a field that is an interpolation of a field higher up in the hierarchy, I can print out the value fine but I can't use it within instiantiate.\r\n\r\n## Checklist\r\n- [x] I checked on the latest version of Hydra\r\n- [x] I created a minimal repro\r\n\r\n## To reproduce\r\n\r\n** Minimal Code/Config snippet to reproduce **\r\n\r\n**Minimal** code snippet which should print three times the same integer value. \r\n\r\nThe first print is the parent field\r\nThe second print accesses the child field, which is an interpolation of the parent field\r\nThe third print uses instantiate to create an object which takes the child field as a parameter and prints from that object\r\n\r\nBefore the third print happens the exception is thrown\r\n\r\n```\r\nimport time\r\nimport hydra\r\nimport submitit\r\n\r\nclass GetsTheInteger:\r\n def __init__(self, same_integer):\r\n self.intval = intval\r\n \r\n\r\[email protected](config_name=\"test2.yaml\")\r\ndef main(cfg) -> None:\r\n print(cfg.data.integer)\r\n print(cfg.data.test.same_integer)\r\n\r\n g = hydra.utils.instantiate(cfg.data.test)\r\n print(g.intval) \r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n```\r\n\r\n** Stack trace/error message **\r\n```\r\nTraceback (most recent call last):\r\n File \"/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/_internal/utils.py\", line 203, in run_and_report\r\n return func()\r\n File \"/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/_internal/utils.py\", line 355, in <lambda>\r\n overrides=args.overrides,\r\n File \"/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/_internal/hydra.py\", line 110, in run\r\n job_subdir_key=None,\r\n File \"/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/core/utils.py\", line 123, in run_job\r\n ret.return_value = task_function(task_cfg)\r\n File \"test2.py\", line 15, in main\r\n g = hydra.utils.instantiate(cfg.data.test)\r\n File \"/private/home/mehrlich/.conda/envs/qgac/lib/python3.7/site-packages/hydra/utils.py\", line 68, in call\r\n raise HydraException(f\"Error calling '{cls}' : {e}\") from e\r\nhydra.errors.HydraException: Error calling 'test2.GetsTheInteger' : str interpolation key 'data.integer' not found\r\n full_key: same_integer\r\n reference_type=Any\r\n object_type=dict\r\n```\r\n\r\n## Expected Behavior\r\n\r\nNo crash, can instantiate objects whose parameters depend on interpolations of parent fields\r\n\r\n## System information\r\n- **Hydra Version** : git master\r\n- **Python version** : 3.7\r\n- **Virtual environment type and version** : Conda\r\n- **Operating system** : Ubuntu 18.04 (fair cluster)\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport copy\nimport logging.config\nimport os\nfrom pathlib import Path\nfrom typing import Any, Callable\n\nfrom omegaconf import DictConfig, OmegaConf\nfrom omegaconf._utils import is_structured_config\n\nfrom hydra._internal.utils import (\n _call_callable,\n _get_cls_name,\n _instantiate_class,\n _locate,\n)\nfrom hydra.core.hydra_config import HydraConfig\nfrom hydra.errors import HydraException, InstantiationException\nfrom hydra.types import TargetConf\n\nlog = logging.getLogger(__name__)\n\n\ndef call(config: Any, *args: Any, **kwargs: Any) -> Any:\n \"\"\"\n :param config: An object describing what to call and what params to use. needs to have a _target_ field.\n :param args: optional positional parameters pass-through\n :param kwargs: optional named parameters pass-through\n :return: the return value from the specified class or method\n \"\"\"\n\n if OmegaConf.is_none(config):\n return None\n\n if isinstance(config, TargetConf) and config._target_ == \"???\":\n # Specific check to give a good warning about failure to annotate _target_ as a string.\n raise InstantiationException(\n f\"Missing value for {type(config).__name__}._target_. Check that it's properly annotated and overridden.\"\n f\"\\nA common problem is forgetting to annotate _target_ as a string : '_target_: str = ...'\"\n )\n\n if (\n isinstance(config, dict)\n or OmegaConf.is_config(config)\n or is_structured_config(config)\n ):\n config = OmegaConf.structured(config)\n else:\n raise HydraException(f\"Unsupported config type : {type(config).__name__}\")\n\n cls = \"<unknown>\"\n try:\n assert isinstance(config, DictConfig)\n # make a copy to ensure we do not change the provided object\n config = copy.deepcopy(config)\n OmegaConf.set_readonly(config, False)\n OmegaConf.set_struct(config, False)\n cls = _get_cls_name(config)\n type_or_callable = _locate(cls)\n if isinstance(type_or_callable, type):\n return _instantiate_class(type_or_callable, config, *args, **kwargs)\n else:\n assert callable(type_or_callable)\n return _call_callable(type_or_callable, config, *args, **kwargs)\n except InstantiationException as e:\n raise e\n except Exception as e:\n raise HydraException(f\"Error calling '{cls}' : {e}\") from e\n\n\n# Alias for call\ninstantiate = call\n\n\ndef get_class(path: str) -> type:\n try:\n cls = _locate(path)\n if not isinstance(cls, type):\n raise ValueError(f\"Located non-class in {path} : {type(cls).__name__}\")\n return cls\n except Exception as e:\n log.error(f\"Error initializing class at {path} : {e}\")\n raise e\n\n\ndef get_method(path: str) -> Callable[..., Any]:\n try:\n cl = _locate(path)\n if not callable(cl):\n raise ValueError(f\"Non callable object located : {type(cl).__name__}\")\n return cl\n except Exception as e:\n log.error(f\"Error getting callable at {path} : {e}\")\n raise e\n\n\n# Alias for get_method\nget_static_method = get_method\n\n\ndef get_original_cwd() -> str:\n \"\"\"\n :return: the original working directory the Hydra application was launched from\n \"\"\"\n if not HydraConfig.initialized():\n raise ValueError(\n \"get_original_cwd() must only be used after HydraConfig is initialized\"\n )\n ret = HydraConfig.get().runtime.cwd\n assert ret is not None and isinstance(ret, str)\n return ret\n\n\ndef to_absolute_path(path: str) -> str:\n \"\"\"\n converts the specified path to be absolute path.\n if the input path is relative, it's interpreted as relative to the original working directory\n if it's absolute, it's returned as is\n :param path: path to convert\n :return:\n \"\"\"\n p = Path(path)\n if not HydraConfig.initialized():\n base = Path(os.getcwd())\n else:\n base = Path(get_original_cwd())\n if p.is_absolute():\n ret = p\n else:\n ret = base / p\n return str(ret)\n", "path": "hydra/utils.py"}]} | 2,472 | 306 |
gh_patches_debug_28060 | rasdani/github-patches | git_diff | dynaconf__dynaconf-131 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
YAML.load without a loader is deprecated for security purposes
We've started seeing the following warning:
```
lib/python3.6/site-packages/dynaconf/loaders/base.py:95: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.
```
See here: https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
</issue>
<code>
[start of dynaconf/loaders/yaml_loader.py]
1 # coding: utf-8
2 import io
3 from pathlib import Path
4 from dynaconf import default_settings
5 from dynaconf.loaders.base import BaseLoader
6 from dynaconf.constants import YAML_EXTENSIONS
7 from dynaconf.utils import object_merge
8 try:
9 import yaml
10 except ImportError as e: # pragma: no cover
11 yaml = None
12
13
14 def load(obj, env=None, silent=True, key=None, filename=None):
15 """
16 Reads and loads in to "obj" a single key or all keys from source file.
17
18 :param obj: the settings instance
19 :param env: settings current env default='development'
20 :param silent: if errors should raise
21 :param key: if defined load a single key, else load all in env
22 :param filename: Optional custom filename to load
23 :return: None
24 """
25 if yaml is None: # pragma: no cover
26 BaseLoader.warn_not_installed(obj, 'yaml')
27 return
28
29 loader = BaseLoader(
30 obj=obj,
31 env=env,
32 identifier='yaml',
33 extensions=YAML_EXTENSIONS,
34 file_reader=yaml.load,
35 string_reader=yaml.load
36 )
37 loader.load(filename=filename, key=key, silent=silent)
38
39
40 def write(settings_path, settings_data, merge=True):
41 """Write data to a settings file.
42
43 :param settings_path: the filepath
44 :param settings_data: a dictionary with data
45 :param merge: boolean if existing file should be merged with new data
46 """
47 settings_path = Path(settings_path)
48 if settings_path.exists() and merge: # pragma: no cover
49 object_merge(
50 yaml.load(
51 io.open(
52 str(settings_path),
53 encoding=default_settings.ENCODING_FOR_DYNACONF
54 )
55 ),
56 settings_data
57 )
58
59 yaml.dump(
60 settings_data,
61 io.open(
62 str(settings_path), 'w',
63 encoding=default_settings.ENCODING_FOR_DYNACONF
64 )
65 )
66
[end of dynaconf/loaders/yaml_loader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py
--- a/dynaconf/loaders/yaml_loader.py
+++ b/dynaconf/loaders/yaml_loader.py
@@ -1,10 +1,13 @@
# coding: utf-8
import io
+import os
from pathlib import Path
+from warnings import warn
from dynaconf import default_settings
from dynaconf.loaders.base import BaseLoader
from dynaconf.constants import YAML_EXTENSIONS
from dynaconf.utils import object_merge
+
try:
import yaml
except ImportError as e: # pragma: no cover
@@ -26,13 +29,25 @@
BaseLoader.warn_not_installed(obj, 'yaml')
return
+ # Resolve the loaders
+ # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation
+ # Possible values are `safe_load, full_load, unsafe_load, load`
+ yaml_loader_name = os.environ.get('YAML_LOADER_FOR_DYNACONF', 'full_load')
+ yaml_reader = getattr(yaml, yaml_loader_name, yaml.load)
+ if yaml_reader.__name__ == 'unsafe_load': # pragma: no cover
+ warn(
+ "yaml.unsafe_load is deprecated."
+ " Please read https://msg.pyyaml.org/load for full details."
+ " Try to use full_load or safe_load."
+ )
+
loader = BaseLoader(
obj=obj,
env=env,
identifier='yaml',
extensions=YAML_EXTENSIONS,
- file_reader=yaml.load,
- string_reader=yaml.load
+ file_reader=yaml_reader,
+ string_reader=yaml_reader
)
loader.load(filename=filename, key=key, silent=silent)
| {"golden_diff": "diff --git a/dynaconf/loaders/yaml_loader.py b/dynaconf/loaders/yaml_loader.py\n--- a/dynaconf/loaders/yaml_loader.py\n+++ b/dynaconf/loaders/yaml_loader.py\n@@ -1,10 +1,13 @@\n # coding: utf-8\n import io\n+import os\n from pathlib import Path\n+from warnings import warn\n from dynaconf import default_settings\n from dynaconf.loaders.base import BaseLoader\n from dynaconf.constants import YAML_EXTENSIONS\n from dynaconf.utils import object_merge\n+\n try:\n import yaml\n except ImportError as e: # pragma: no cover\n@@ -26,13 +29,25 @@\n BaseLoader.warn_not_installed(obj, 'yaml')\n return\n \n+ # Resolve the loaders\n+ # https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n+ # Possible values are `safe_load, full_load, unsafe_load, load`\n+ yaml_loader_name = os.environ.get('YAML_LOADER_FOR_DYNACONF', 'full_load')\n+ yaml_reader = getattr(yaml, yaml_loader_name, yaml.load)\n+ if yaml_reader.__name__ == 'unsafe_load': # pragma: no cover\n+ warn(\n+ \"yaml.unsafe_load is deprecated.\"\n+ \" Please read https://msg.pyyaml.org/load for full details.\"\n+ \" Try to use full_load or safe_load.\"\n+ )\n+\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier='yaml',\n extensions=YAML_EXTENSIONS,\n- file_reader=yaml.load,\n- string_reader=yaml.load\n+ file_reader=yaml_reader,\n+ string_reader=yaml_reader\n )\n loader.load(filename=filename, key=key, silent=silent)\n", "issue": "YAML.load without a loader is deprecated for security purposes\nWe've started seeing the following warning:\r\n```\r\nlib/python3.6/site-packages/dynaconf/loaders/base.py:95: YAMLLoadWarning: calling yaml.load() without Loader=... is deprecated, as the default Loader is unsafe. Please read https://msg.pyyaml.org/load for full details.\r\n```\r\n\r\nSee here: https://github.com/yaml/pyyaml/wiki/PyYAML-yaml.load(input)-Deprecation\n", "before_files": [{"content": "# coding: utf-8\nimport io\nfrom pathlib import Path\nfrom dynaconf import default_settings\nfrom dynaconf.loaders.base import BaseLoader\nfrom dynaconf.constants import YAML_EXTENSIONS\nfrom dynaconf.utils import object_merge\ntry:\n import yaml\nexcept ImportError as e: # pragma: no cover\n yaml = None\n\n\ndef load(obj, env=None, silent=True, key=None, filename=None):\n \"\"\"\n Reads and loads in to \"obj\" a single key or all keys from source file.\n\n :param obj: the settings instance\n :param env: settings current env default='development'\n :param silent: if errors should raise\n :param key: if defined load a single key, else load all in env\n :param filename: Optional custom filename to load\n :return: None\n \"\"\"\n if yaml is None: # pragma: no cover\n BaseLoader.warn_not_installed(obj, 'yaml')\n return\n\n loader = BaseLoader(\n obj=obj,\n env=env,\n identifier='yaml',\n extensions=YAML_EXTENSIONS,\n file_reader=yaml.load,\n string_reader=yaml.load\n )\n loader.load(filename=filename, key=key, silent=silent)\n\n\ndef write(settings_path, settings_data, merge=True):\n \"\"\"Write data to a settings file.\n\n :param settings_path: the filepath\n :param settings_data: a dictionary with data\n :param merge: boolean if existing file should be merged with new data\n \"\"\"\n settings_path = Path(settings_path)\n if settings_path.exists() and merge: # pragma: no cover\n object_merge(\n yaml.load(\n io.open(\n str(settings_path),\n encoding=default_settings.ENCODING_FOR_DYNACONF\n )\n ),\n settings_data\n )\n\n yaml.dump(\n settings_data,\n io.open(\n str(settings_path), 'w',\n encoding=default_settings.ENCODING_FOR_DYNACONF\n )\n )\n", "path": "dynaconf/loaders/yaml_loader.py"}]} | 1,205 | 401 |
gh_patches_debug_41208 | rasdani/github-patches | git_diff | akvo__akvo-rsr-3751 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Audit log disaggregation categories and labels
</issue>
<code>
[start of akvo/rest/views/indicator_dimension_name.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorDimensionName
9
10 from ..serializers import IndicatorDimensionNameSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class IndicatorDimensionNameViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')
18 serializer_class = IndicatorDimensionNameSerializer
19 project_relation = 'project__'
20
[end of akvo/rest/views/indicator_dimension_name.py]
[start of akvo/rest/views/indicator_dimension_value.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from akvo.rsr.models import IndicatorDimensionValue
9
10 from ..serializers import IndicatorDimensionValueSerializer
11 from ..viewsets import PublicProjectViewSet
12
13
14 class IndicatorDimensionValueViewSet(PublicProjectViewSet):
15 """
16 """
17 queryset = IndicatorDimensionValue.objects.all()
18 serializer_class = IndicatorDimensionValueSerializer
19 project_relation = 'name__project__'
20
[end of akvo/rest/views/indicator_dimension_value.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rest/views/indicator_dimension_name.py b/akvo/rest/views/indicator_dimension_name.py
--- a/akvo/rest/views/indicator_dimension_name.py
+++ b/akvo/rest/views/indicator_dimension_name.py
@@ -5,6 +5,8 @@
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
+from django.contrib.contenttypes.models import ContentType
from akvo.rsr.models import IndicatorDimensionName
from ..serializers import IndicatorDimensionNameSerializer
@@ -17,3 +19,31 @@
queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')
serializer_class = IndicatorDimensionNameSerializer
project_relation = 'project__'
+
+ def create(self, request, *args, **kwargs):
+ response = super(IndicatorDimensionNameViewSet, self).create(request, *args, **kwargs)
+ self._log_action(ADDITION, response.data, str(request.data))
+ return response
+
+ def update(self, request, *args, **kwargs):
+ response = super(IndicatorDimensionNameViewSet, self).update(request, *args, **kwargs)
+ self._log_action(CHANGE, response.data, str(request.data))
+ return response
+
+ def destroy(self, request, *args, **kwargs):
+ instance = self.get_object()
+ data = {'id': instance.id, 'name': instance.name}
+ response = super(IndicatorDimensionNameViewSet, self).destroy(request, *args, **kwargs)
+ self._log_action(DELETION, data)
+ return response
+
+ def _log_action(self, action_flag, instance, message=''):
+ user = self.request.user
+ LogEntry.objects.log_action(
+ user_id=user.pk,
+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionName).pk,
+ object_id=instance['id'],
+ object_repr=str(instance),
+ action_flag=action_flag,
+ change_message=message
+ )
diff --git a/akvo/rest/views/indicator_dimension_value.py b/akvo/rest/views/indicator_dimension_value.py
--- a/akvo/rest/views/indicator_dimension_value.py
+++ b/akvo/rest/views/indicator_dimension_value.py
@@ -5,6 +5,8 @@
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION
+from django.contrib.contenttypes.models import ContentType
from akvo.rsr.models import IndicatorDimensionValue
from ..serializers import IndicatorDimensionValueSerializer
@@ -17,3 +19,31 @@
queryset = IndicatorDimensionValue.objects.all()
serializer_class = IndicatorDimensionValueSerializer
project_relation = 'name__project__'
+
+ def create(self, request, *args, **kwargs):
+ response = super(IndicatorDimensionValueViewSet, self).create(request, *args, **kwargs)
+ self._log_action(ADDITION, response.data, str(request.data))
+ return response
+
+ def update(self, request, *args, **kwargs):
+ response = super(IndicatorDimensionValueViewSet, self).update(request, *args, **kwargs)
+ self._log_action(CHANGE, response.data, str(request.data))
+ return response
+
+ def destroy(self, request, *args, **kwargs):
+ instance = self.get_object()
+ data = {'id': instance.id, 'value': instance.value}
+ response = super(IndicatorDimensionValueViewSet, self).destroy(request, *args, **kwargs)
+ self._log_action(DELETION, data)
+ return response
+
+ def _log_action(self, action_flag, instance, message=''):
+ user = self.request.user
+ LogEntry.objects.log_action(
+ user_id=user.pk,
+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionValue).pk,
+ object_id=instance['id'],
+ object_repr=str(instance),
+ action_flag=action_flag,
+ change_message=message
+ )
| {"golden_diff": "diff --git a/akvo/rest/views/indicator_dimension_name.py b/akvo/rest/views/indicator_dimension_name.py\n--- a/akvo/rest/views/indicator_dimension_name.py\n+++ b/akvo/rest/views/indicator_dimension_name.py\n@@ -5,6 +5,8 @@\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n \n+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\n+from django.contrib.contenttypes.models import ContentType\n from akvo.rsr.models import IndicatorDimensionName\n \n from ..serializers import IndicatorDimensionNameSerializer\n@@ -17,3 +19,31 @@\n queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')\n serializer_class = IndicatorDimensionNameSerializer\n project_relation = 'project__'\n+\n+ def create(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionNameViewSet, self).create(request, *args, **kwargs)\n+ self._log_action(ADDITION, response.data, str(request.data))\n+ return response\n+\n+ def update(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionNameViewSet, self).update(request, *args, **kwargs)\n+ self._log_action(CHANGE, response.data, str(request.data))\n+ return response\n+\n+ def destroy(self, request, *args, **kwargs):\n+ instance = self.get_object()\n+ data = {'id': instance.id, 'name': instance.name}\n+ response = super(IndicatorDimensionNameViewSet, self).destroy(request, *args, **kwargs)\n+ self._log_action(DELETION, data)\n+ return response\n+\n+ def _log_action(self, action_flag, instance, message=''):\n+ user = self.request.user\n+ LogEntry.objects.log_action(\n+ user_id=user.pk,\n+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionName).pk,\n+ object_id=instance['id'],\n+ object_repr=str(instance),\n+ action_flag=action_flag,\n+ change_message=message\n+ )\ndiff --git a/akvo/rest/views/indicator_dimension_value.py b/akvo/rest/views/indicator_dimension_value.py\n--- a/akvo/rest/views/indicator_dimension_value.py\n+++ b/akvo/rest/views/indicator_dimension_value.py\n@@ -5,6 +5,8 @@\n # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n \n \n+from django.contrib.admin.models import LogEntry, ADDITION, CHANGE, DELETION\n+from django.contrib.contenttypes.models import ContentType\n from akvo.rsr.models import IndicatorDimensionValue\n \n from ..serializers import IndicatorDimensionValueSerializer\n@@ -17,3 +19,31 @@\n queryset = IndicatorDimensionValue.objects.all()\n serializer_class = IndicatorDimensionValueSerializer\n project_relation = 'name__project__'\n+\n+ def create(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionValueViewSet, self).create(request, *args, **kwargs)\n+ self._log_action(ADDITION, response.data, str(request.data))\n+ return response\n+\n+ def update(self, request, *args, **kwargs):\n+ response = super(IndicatorDimensionValueViewSet, self).update(request, *args, **kwargs)\n+ self._log_action(CHANGE, response.data, str(request.data))\n+ return response\n+\n+ def destroy(self, request, *args, **kwargs):\n+ instance = self.get_object()\n+ data = {'id': instance.id, 'value': instance.value}\n+ response = super(IndicatorDimensionValueViewSet, self).destroy(request, *args, **kwargs)\n+ self._log_action(DELETION, data)\n+ return response\n+\n+ def _log_action(self, action_flag, instance, message=''):\n+ user = self.request.user\n+ LogEntry.objects.log_action(\n+ user_id=user.pk,\n+ content_type_id=ContentType.objects.get_for_model(IndicatorDimensionValue).pk,\n+ object_id=instance['id'],\n+ object_repr=str(instance),\n+ action_flag=action_flag,\n+ change_message=message\n+ )\n", "issue": "Audit log disaggregation categories and labels\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimensionName\n\nfrom ..serializers import IndicatorDimensionNameSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionNameViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimensionName.objects.prefetch_related('dimension_values')\n serializer_class = IndicatorDimensionNameSerializer\n project_relation = 'project__'\n", "path": "akvo/rest/views/indicator_dimension_name.py"}, {"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom akvo.rsr.models import IndicatorDimensionValue\n\nfrom ..serializers import IndicatorDimensionValueSerializer\nfrom ..viewsets import PublicProjectViewSet\n\n\nclass IndicatorDimensionValueViewSet(PublicProjectViewSet):\n \"\"\"\n \"\"\"\n queryset = IndicatorDimensionValue.objects.all()\n serializer_class = IndicatorDimensionValueSerializer\n project_relation = 'name__project__'\n", "path": "akvo/rest/views/indicator_dimension_value.py"}]} | 931 | 925 |
gh_patches_debug_61680 | rasdani/github-patches | git_diff | joke2k__faker-48 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Capital O missing an umlaut
Hello, I noticed in faker/Providers/De_de/internet.py in the _to_ascii method, the capital O is missing an umlaut.
It should be: ('Ö', 'Oe')
Currently:
replacements = (
('ä', 'ae'), ('Ä', 'Ae'),
('ö', 'oe'), ('O', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
('ß', 'ss')
</issue>
<code>
[start of faker/providers/de_DE/internet.py]
1 # coding=utf-8
2 from __future__ import unicode_literals
3 from ..internet import Provider as InternetProvider
4
5 import re
6
7
8 class Provider(InternetProvider):
9
10 free_email_domains = (
11 'web.de', 'gmail.com', 'hotmail.de', 'yahoo.de', 'googlemail.com',
12 'aol.de', 'gmx.de'
13 )
14 tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de')
15
16 @staticmethod
17 def _to_ascii(string):
18 replacements = (
19 ('ä', 'ae'), ('Ä', 'Ae'),
20 ('ö', 'oe'), ('O', 'Oe'),
21 ('ü', 'ue'), ('Ü', 'Ue'),
22 ('ß', 'ss')
23 )
24 for search, replace in replacements:
25 string = string.replace(search, replace)
26
27 return string
28
29 def user_name(self):
30 pattern = self.random_element(self.user_name_formats)
31 return self._to_ascii(
32 self.bothify(self.generator.parse(pattern)
33 ).lower())
34
35 def domain_word(self):
36 company = self.generator.format('company')
37 company_elements = company.split(' ')
38 company = self._to_ascii(company_elements.pop(0))
39 return re.sub(r'\W', '', company).lower()
40
[end of faker/providers/de_DE/internet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/de_DE/internet.py b/faker/providers/de_DE/internet.py
--- a/faker/providers/de_DE/internet.py
+++ b/faker/providers/de_DE/internet.py
@@ -17,7 +17,7 @@
def _to_ascii(string):
replacements = (
('ä', 'ae'), ('Ä', 'Ae'),
- ('ö', 'oe'), ('O', 'Oe'),
+ ('ö', 'oe'), ('Ö', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
('ß', 'ss')
)
| {"golden_diff": "diff --git a/faker/providers/de_DE/internet.py b/faker/providers/de_DE/internet.py\n--- a/faker/providers/de_DE/internet.py\n+++ b/faker/providers/de_DE/internet.py\n@@ -17,7 +17,7 @@\n def _to_ascii(string):\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n- ('\u00f6', 'oe'), ('O', 'Oe'),\n+ ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n )\n", "issue": "Capital O missing an umlaut\nHello, I noticed in faker/Providers/De_de/internet.py in the _to_ascii method, the capital O is missing an umlaut. \n\nIt should be: ('\u00d6', 'Oe') \n\nCurrently:\nreplacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('O', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import unicode_literals\nfrom ..internet import Provider as InternetProvider\n\nimport re\n\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'web.de', 'gmail.com', 'hotmail.de', 'yahoo.de', 'googlemail.com',\n 'aol.de', 'gmx.de'\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de')\n\n @staticmethod\n def _to_ascii(string):\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('O', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss')\n )\n for search, replace in replacements:\n string = string.replace(search, replace)\n\n return string\n\n def user_name(self):\n pattern = self.random_element(self.user_name_formats)\n return self._to_ascii(\n self.bothify(self.generator.parse(pattern)\n ).lower())\n\n def domain_word(self):\n company = self.generator.format('company')\n company_elements = company.split(' ')\n company = self._to_ascii(company_elements.pop(0))\n return re.sub(r'\\W', '', company).lower()\n", "path": "faker/providers/de_DE/internet.py"}]} | 1,000 | 134 |
gh_patches_debug_5954 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2609 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Issues with installation process that connects an existing DB
- [x] Tester Marius reports (server credentials in Upwork)
- [ ] It seems that even if you select existing database, it still tries to start a docker container for the database, creating a conflict?
- [x] Tester Mohammad reports an error as well ([details here](https://docs.google.com/document/d/15m9eZFocAsU1V9inLKxC6i_KQxMdu28snRrBPOrf5Hk/edit))
</issue>
<code>
[start of db/install.py]
1 from sqlalchemy import text
2 from sqlalchemy.exc import OperationalError
3
4 from db import engine
5 from db.types import install
6
7
8 def install_mathesar(
9 database_name, username, password, hostname, port, skip_confirm
10 ):
11 """Create database and install Mathesar on it."""
12 user_db_engine = engine.create_future_engine(
13 username, password, hostname, database_name, port,
14 connect_args={"connect_timeout": 10}
15 )
16 try:
17 user_db_engine.connect()
18 print(f"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...")
19 install.install_mathesar_on_database(user_db_engine)
20 user_db_engine.dispose()
21 except OperationalError:
22 database_created = _create_database(
23 database_name=database_name,
24 hostname=hostname,
25 username=username,
26 password=password,
27 port=port,
28 skip_confirm=skip_confirm
29 )
30 if database_created:
31 print(f"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...")
32 install.install_mathesar_on_database(user_db_engine)
33 user_db_engine.dispose()
34 else:
35 print(f"Skipping installing on DB with key {database_name}.")
36
37
38 def _create_database(database_name, hostname, username, password, port, skip_confirm=True):
39 if skip_confirm is True:
40 create_database = "y"
41 else:
42 create_database = input(
43 f"Create a new Database called {database_name}? (y/n) > "
44 )
45 if create_database.lower() in ["y", "yes"]:
46 # We need to connect to an existing database inorder to create a new Database.
47 # So we use the default Database `postgres` that comes with postgres.
48 # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)
49 root_database = "postgres"
50 root_db_engine = engine.create_future_engine(
51 username, password, hostname, root_database, port,
52 connect_args={"connect_timeout": 10}
53 )
54 with root_db_engine.connect() as conn:
55 conn.execution_options(isolation_level="AUTOCOMMIT")
56 conn.execute(text(f"CREATE DATABASE {database_name}"))
57 root_db_engine.dispose()
58 print(f"Created DB is {database_name}.")
59 return True
60 else:
61 print(f"Database {database_name} not created!")
62 return False
63
[end of db/install.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/db/install.py b/db/install.py
--- a/db/install.py
+++ b/db/install.py
@@ -53,7 +53,7 @@
)
with root_db_engine.connect() as conn:
conn.execution_options(isolation_level="AUTOCOMMIT")
- conn.execute(text(f"CREATE DATABASE {database_name}"))
+ conn.execute(text(f'CREATE DATABASE "{database_name}"'))
root_db_engine.dispose()
print(f"Created DB is {database_name}.")
return True
| {"golden_diff": "diff --git a/db/install.py b/db/install.py\n--- a/db/install.py\n+++ b/db/install.py\n@@ -53,7 +53,7 @@\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n- conn.execute(text(f\"CREATE DATABASE {database_name}\"))\n+ conn.execute(text(f'CREATE DATABASE \"{database_name}\"'))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n", "issue": "Issues with installation process that connects an existing DB\n- [x] Tester Marius reports (server credentials in Upwork)\r\n - [ ] It seems that even if you select existing database, it still tries to start a docker container for the database, creating a conflict?\r\n- [x] Tester Mohammad reports an error as well ([details here](https://docs.google.com/document/d/15m9eZFocAsU1V9inLKxC6i_KQxMdu28snRrBPOrf5Hk/edit))\n", "before_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.types import install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {database_name}\"))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}]} | 1,260 | 111 |
gh_patches_debug_41704 | rasdani/github-patches | git_diff | Flexget__Flexget-2525 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
qBittorrent 4.2.0 can't work
Hi
I've upgrade to qBittorrent 4.2.0 and flexget can't add new tasks to qBittorrent.
FlexGet version: 3.0.11
Python version: 3.7.5
qBittorrent 4.2.0 can't work
Hi
I've upgrade to qBittorrent 4.2.0 and flexget can't add new tasks to qBittorrent.
FlexGet version: 3.0.11
Python version: 3.7.5
</issue>
<code>
[start of flexget/plugins/clients/qbittorrent.py]
1 import logging
2 import os
3
4 from requests import Session
5 from requests.exceptions import RequestException
6
7 from flexget import plugin
8 from flexget.event import event
9 from flexget.utils.template import RenderError
10
11 log = logging.getLogger('qbittorrent')
12
13
14 class OutputQBitTorrent:
15 """
16 Example:
17
18 qbittorrent:
19 username: <USERNAME> (default: (none))
20 password: <PASSWORD> (default: (none))
21 host: <HOSTNAME> (default: localhost)
22 port: <PORT> (default: 8080)
23 use_ssl: <SSL> (default: False)
24 verify_cert: <VERIFY> (default: True)
25 path: <OUTPUT_DIR> (default: (none))
26 label: <LABEL> (default: (none))
27 maxupspeed: <torrent upload speed limit> (default: 0)
28 maxdownspeed: <torrent download speed limit> (default: 0)
29 add_paused: <ADD_PAUSED> (default: False)
30 """
31
32 schema = {
33 'anyOf': [
34 {'type': 'boolean'},
35 {
36 'type': 'object',
37 'properties': {
38 'username': {'type': 'string'},
39 'password': {'type': 'string'},
40 'host': {'type': 'string'},
41 'port': {'type': 'integer'},
42 'use_ssl': {'type': 'boolean'},
43 'verify_cert': {'type': 'boolean'},
44 'path': {'type': 'string'},
45 'label': {'type': 'string'},
46 'maxupspeed': {'type': 'integer'},
47 'maxdownspeed': {'type': 'integer'},
48 'fail_html': {'type': 'boolean'},
49 'add_paused': {'type': 'boolean'},
50 },
51 'additionalProperties': False,
52 },
53 ]
54 }
55
56 def _request(self, method, url, msg_on_fail=None, **kwargs):
57 try:
58 response = self.session.request(method, url, **kwargs)
59 if response == 'Fails.':
60 msg = (
61 'Failure. URL: {}, data: {}'.format(url, kwargs)
62 if not msg_on_fail
63 else msg_on_fail
64 )
65 else:
66 return response
67 except RequestException as e:
68 msg = str(e)
69 raise plugin.PluginError(
70 'Error when trying to send request to qBittorrent: {}'.format(msg)
71 )
72
73 def connect(self, config):
74 """
75 Connect to qBittorrent Web UI. Username and password not necessary
76 if 'Bypass authentication for localhost' is checked and host is
77 'localhost'.
78 """
79 self.session = Session()
80 self.url = '{}://{}:{}'.format(
81 'https' if config['use_ssl'] else 'http', config['host'], config['port']
82 )
83 if config.get('username') and config.get('password'):
84 data = {'username': config['username'], 'password': config['password']}
85 self._request(
86 'post',
87 self.url + '/login',
88 data=data,
89 msg_on_fail='Authentication failed.',
90 verify=config['verify_cert'],
91 )
92 log.debug('Successfully connected to qBittorrent')
93 self.connected = True
94
95 def add_torrent_file(self, file_path, data, verify_cert):
96 if not self.connected:
97 raise plugin.PluginError('Not connected.')
98 multipart_data = {k: (None, v) for k, v in data.items()}
99 with open(file_path, 'rb') as f:
100 multipart_data['torrents'] = f
101 self._request(
102 'post',
103 self.url + '/command/upload',
104 msg_on_fail='Failed to add file to qBittorrent',
105 files=multipart_data,
106 verify=verify_cert,
107 )
108 log.debug('Added torrent file %s to qBittorrent', file_path)
109
110 def add_torrent_url(self, url, data, verify_cert):
111 if not self.connected:
112 raise plugin.PluginError('Not connected.')
113 data['urls'] = url
114 multipart_data = {k: (None, v) for k, v in data.items()}
115 self._request(
116 'post',
117 self.url + '/command/download',
118 msg_on_fail='Failed to add file to qBittorrent',
119 files=multipart_data,
120 verify=verify_cert,
121 )
122 log.debug('Added url %s to qBittorrent', url)
123
124 def prepare_config(self, config):
125 if isinstance(config, bool):
126 config = {'enabled': config}
127 config.setdefault('enabled', True)
128 config.setdefault('host', 'localhost')
129 config.setdefault('port', 8080)
130 config.setdefault('use_ssl', False)
131 config.setdefault('verify_cert', True)
132 config.setdefault('label', '')
133 config.setdefault('maxupspeed', 0)
134 config.setdefault('maxdownspeed', 0)
135 config.setdefault('fail_html', True)
136 return config
137
138 def add_entries(self, task, config):
139 for entry in task.accepted:
140 form_data = {}
141 try:
142 save_path = entry.render(entry.get('path', config.get('path', '')))
143 if save_path:
144 form_data['savepath'] = save_path
145 except RenderError as e:
146 log.error('Error setting path for %s: %s', entry['title'], e)
147
148 label = entry.get('label', config.get('label'))
149 if label:
150 form_data['label'] = label # qBittorrent v3.3.3-
151 form_data['category'] = label # qBittorrent v3.3.4+
152
153 add_paused = entry.get('add_paused', config.get('add_paused'))
154 if add_paused:
155 form_data['paused'] = 'true'
156
157 maxupspeed = entry.get('maxupspeed', config.get('maxupspeed'))
158 if maxupspeed:
159 form_data['upLimit'] = maxupspeed * 1024
160
161 maxdownspeed = entry.get('maxdownspeed', config.get('maxdownspeed'))
162 if maxdownspeed:
163 form_data['dlLimit'] = maxdownspeed * 1024
164
165 is_magnet = entry['url'].startswith('magnet:')
166
167 if task.manager.options.test:
168 log.info('Test mode.')
169 log.info('Would add torrent to qBittorrent with:')
170 if not is_magnet:
171 log.info('File: %s', entry.get('file'))
172 else:
173 log.info('Url: %s', entry.get('url'))
174 log.info('Save path: %s', form_data.get('savepath'))
175 log.info('Label: %s', form_data.get('label'))
176 log.info('Paused: %s', form_data.get('paused', 'false'))
177 if maxupspeed:
178 log.info('Upload Speed Limit: %d', form_data.get('upLimit'))
179 if maxdownspeed:
180 log.info('Download Speed Limit: %d', form_data.get('dlLimit'))
181 continue
182
183 if not is_magnet:
184 if 'file' not in entry:
185 entry.fail('File missing?')
186 continue
187 if not os.path.exists(entry['file']):
188 tmp_path = os.path.join(task.manager.config_base, 'temp')
189 log.debug('entry: %s', entry)
190 log.debug('temp: %s', ', '.join(os.listdir(tmp_path)))
191 entry.fail("Downloaded temp file '%s' doesn't exist!?" % entry['file'])
192 continue
193 self.add_torrent_file(entry['file'], form_data, config['verify_cert'])
194 else:
195 self.add_torrent_url(entry['url'], form_data, config['verify_cert'])
196
197 @plugin.priority(120)
198 def on_task_download(self, task, config):
199 """
200 Call download plugin to generate torrent files to load into
201 qBittorrent.
202 """
203 config = self.prepare_config(config)
204 if not config['enabled']:
205 return
206 if 'download' not in task.config:
207 download = plugin.get('download', self)
208 download.get_temp_files(task, handle_magnets=True, fail_html=config['fail_html'])
209
210 @plugin.priority(135)
211 def on_task_output(self, task, config):
212 """Add torrents to qBittorrent at exit."""
213 if task.accepted:
214 config = self.prepare_config(config)
215 self.connect(config)
216 self.add_entries(task, config)
217
218
219 @event('plugin.register')
220 def register_plugin():
221 plugin.register(OutputQBitTorrent, 'qbittorrent', api_ver=2)
222
[end of flexget/plugins/clients/qbittorrent.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flexget/plugins/clients/qbittorrent.py b/flexget/plugins/clients/qbittorrent.py
--- a/flexget/plugins/clients/qbittorrent.py
+++ b/flexget/plugins/clients/qbittorrent.py
@@ -56,14 +56,41 @@
def _request(self, method, url, msg_on_fail=None, **kwargs):
try:
response = self.session.request(method, url, **kwargs)
- if response == 'Fails.':
+ if response.text == "Ok.":
+ return response
+ else:
msg = (
'Failure. URL: {}, data: {}'.format(url, kwargs)
if not msg_on_fail
else msg_on_fail
)
- else:
- return response
+ except RequestException as e:
+ msg = str(e)
+ raise plugin.PluginError(
+ 'Error when trying to send request to qBittorrent: {}'.format(msg)
+ )
+
+ def check_api_version(self, msg_on_fail):
+ try:
+ url = self.url + "/api/v2/app/webapiVersion"
+ response = self.session.request('get', url)
+ if response.status_code != 404:
+ self.api_url_login = '/api/v2/auth/login'
+ self.api_url_add = '/api/v2/torrents/add'
+ return response
+
+ url = self.url + "/version/api"
+ response = self.session.request('get', url)
+ if response.status_code != 404:
+ self.api_url_login = '/login'
+ self.api_url_add = '/command/upload'
+ return response
+
+ msg = (
+ 'Failure. URL: {}'.format(url)
+ if not msg_on_fail
+ else msg_on_fail
+ )
except RequestException as e:
msg = str(e)
raise plugin.PluginError(
@@ -80,11 +107,12 @@
self.url = '{}://{}:{}'.format(
'https' if config['use_ssl'] else 'http', config['host'], config['port']
)
+ self.check_api_version('Check API version failed.')
if config.get('username') and config.get('password'):
data = {'username': config['username'], 'password': config['password']}
self._request(
'post',
- self.url + '/login',
+ self.url + self.api_url_login,
data=data,
msg_on_fail='Authentication failed.',
verify=config['verify_cert'],
@@ -100,7 +128,7 @@
multipart_data['torrents'] = f
self._request(
'post',
- self.url + '/command/upload',
+ self.url + self.api_url_add,
msg_on_fail='Failed to add file to qBittorrent',
files=multipart_data,
verify=verify_cert,
@@ -114,7 +142,7 @@
multipart_data = {k: (None, v) for k, v in data.items()}
self._request(
'post',
- self.url + '/command/download',
+ self.url + self.api_url_add,
msg_on_fail='Failed to add file to qBittorrent',
files=multipart_data,
verify=verify_cert,
| {"golden_diff": "diff --git a/flexget/plugins/clients/qbittorrent.py b/flexget/plugins/clients/qbittorrent.py\n--- a/flexget/plugins/clients/qbittorrent.py\n+++ b/flexget/plugins/clients/qbittorrent.py\n@@ -56,14 +56,41 @@\n def _request(self, method, url, msg_on_fail=None, **kwargs):\n try:\n response = self.session.request(method, url, **kwargs)\n- if response == 'Fails.':\n+ if response.text == \"Ok.\":\n+ return response \n+ else:\n msg = (\n 'Failure. URL: {}, data: {}'.format(url, kwargs)\n if not msg_on_fail\n else msg_on_fail\n )\n- else:\n- return response\n+ except RequestException as e:\n+ msg = str(e)\n+ raise plugin.PluginError(\n+ 'Error when trying to send request to qBittorrent: {}'.format(msg)\n+ )\n+ \n+ def check_api_version(self, msg_on_fail):\n+ try:\n+ url = self.url + \"/api/v2/app/webapiVersion\"\n+ response = self.session.request('get', url)\n+ if response.status_code != 404:\n+ self.api_url_login = '/api/v2/auth/login'\n+ self.api_url_add = '/api/v2/torrents/add'\n+ return response \n+ \n+ url = self.url + \"/version/api\"\n+ response = self.session.request('get', url)\n+ if response.status_code != 404:\n+ self.api_url_login = '/login'\n+ self.api_url_add = '/command/upload'\n+ return response \n+ \n+ msg = (\n+ 'Failure. URL: {}'.format(url)\n+ if not msg_on_fail\n+ else msg_on_fail\n+ )\n except RequestException as e:\n msg = str(e)\n raise plugin.PluginError(\n@@ -80,11 +107,12 @@\n self.url = '{}://{}:{}'.format(\n 'https' if config['use_ssl'] else 'http', config['host'], config['port']\n )\n+ self.check_api_version('Check API version failed.')\n if config.get('username') and config.get('password'):\n data = {'username': config['username'], 'password': config['password']}\n self._request(\n 'post',\n- self.url + '/login',\n+ self.url + self.api_url_login,\n data=data,\n msg_on_fail='Authentication failed.',\n verify=config['verify_cert'],\n@@ -100,7 +128,7 @@\n multipart_data['torrents'] = f\n self._request(\n 'post',\n- self.url + '/command/upload',\n+ self.url + self.api_url_add,\n msg_on_fail='Failed to add file to qBittorrent',\n files=multipart_data,\n verify=verify_cert,\n@@ -114,7 +142,7 @@\n multipart_data = {k: (None, v) for k, v in data.items()}\n self._request(\n 'post',\n- self.url + '/command/download',\n+ self.url + self.api_url_add,\n msg_on_fail='Failed to add file to qBittorrent',\n files=multipart_data,\n verify=verify_cert,\n", "issue": "qBittorrent 4.2.0 can't work\nHi\r\n I've upgrade to qBittorrent 4.2.0 and flexget can't add new tasks to qBittorrent.\r\n \r\nFlexGet version: 3.0.11\r\nPython version: 3.7.5\nqBittorrent 4.2.0 can't work\nHi\r\n I've upgrade to qBittorrent 4.2.0 and flexget can't add new tasks to qBittorrent.\r\n \r\nFlexGet version: 3.0.11\r\nPython version: 3.7.5\n", "before_files": [{"content": "import logging\nimport os\n\nfrom requests import Session\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils.template import RenderError\n\nlog = logging.getLogger('qbittorrent')\n\n\nclass OutputQBitTorrent:\n \"\"\"\n Example:\n\n qbittorrent:\n username: <USERNAME> (default: (none))\n password: <PASSWORD> (default: (none))\n host: <HOSTNAME> (default: localhost)\n port: <PORT> (default: 8080)\n use_ssl: <SSL> (default: False)\n verify_cert: <VERIFY> (default: True)\n path: <OUTPUT_DIR> (default: (none))\n label: <LABEL> (default: (none))\n maxupspeed: <torrent upload speed limit> (default: 0)\n maxdownspeed: <torrent download speed limit> (default: 0)\n add_paused: <ADD_PAUSED> (default: False)\n \"\"\"\n\n schema = {\n 'anyOf': [\n {'type': 'boolean'},\n {\n 'type': 'object',\n 'properties': {\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'host': {'type': 'string'},\n 'port': {'type': 'integer'},\n 'use_ssl': {'type': 'boolean'},\n 'verify_cert': {'type': 'boolean'},\n 'path': {'type': 'string'},\n 'label': {'type': 'string'},\n 'maxupspeed': {'type': 'integer'},\n 'maxdownspeed': {'type': 'integer'},\n 'fail_html': {'type': 'boolean'},\n 'add_paused': {'type': 'boolean'},\n },\n 'additionalProperties': False,\n },\n ]\n }\n\n def _request(self, method, url, msg_on_fail=None, **kwargs):\n try:\n response = self.session.request(method, url, **kwargs)\n if response == 'Fails.':\n msg = (\n 'Failure. URL: {}, data: {}'.format(url, kwargs)\n if not msg_on_fail\n else msg_on_fail\n )\n else:\n return response\n except RequestException as e:\n msg = str(e)\n raise plugin.PluginError(\n 'Error when trying to send request to qBittorrent: {}'.format(msg)\n )\n\n def connect(self, config):\n \"\"\"\n Connect to qBittorrent Web UI. Username and password not necessary\n if 'Bypass authentication for localhost' is checked and host is\n 'localhost'.\n \"\"\"\n self.session = Session()\n self.url = '{}://{}:{}'.format(\n 'https' if config['use_ssl'] else 'http', config['host'], config['port']\n )\n if config.get('username') and config.get('password'):\n data = {'username': config['username'], 'password': config['password']}\n self._request(\n 'post',\n self.url + '/login',\n data=data,\n msg_on_fail='Authentication failed.',\n verify=config['verify_cert'],\n )\n log.debug('Successfully connected to qBittorrent')\n self.connected = True\n\n def add_torrent_file(self, file_path, data, verify_cert):\n if not self.connected:\n raise plugin.PluginError('Not connected.')\n multipart_data = {k: (None, v) for k, v in data.items()}\n with open(file_path, 'rb') as f:\n multipart_data['torrents'] = f\n self._request(\n 'post',\n self.url + '/command/upload',\n msg_on_fail='Failed to add file to qBittorrent',\n files=multipart_data,\n verify=verify_cert,\n )\n log.debug('Added torrent file %s to qBittorrent', file_path)\n\n def add_torrent_url(self, url, data, verify_cert):\n if not self.connected:\n raise plugin.PluginError('Not connected.')\n data['urls'] = url\n multipart_data = {k: (None, v) for k, v in data.items()}\n self._request(\n 'post',\n self.url + '/command/download',\n msg_on_fail='Failed to add file to qBittorrent',\n files=multipart_data,\n verify=verify_cert,\n )\n log.debug('Added url %s to qBittorrent', url)\n\n def prepare_config(self, config):\n if isinstance(config, bool):\n config = {'enabled': config}\n config.setdefault('enabled', True)\n config.setdefault('host', 'localhost')\n config.setdefault('port', 8080)\n config.setdefault('use_ssl', False)\n config.setdefault('verify_cert', True)\n config.setdefault('label', '')\n config.setdefault('maxupspeed', 0)\n config.setdefault('maxdownspeed', 0)\n config.setdefault('fail_html', True)\n return config\n\n def add_entries(self, task, config):\n for entry in task.accepted:\n form_data = {}\n try:\n save_path = entry.render(entry.get('path', config.get('path', '')))\n if save_path:\n form_data['savepath'] = save_path\n except RenderError as e:\n log.error('Error setting path for %s: %s', entry['title'], e)\n\n label = entry.get('label', config.get('label'))\n if label:\n form_data['label'] = label # qBittorrent v3.3.3-\n form_data['category'] = label # qBittorrent v3.3.4+\n\n add_paused = entry.get('add_paused', config.get('add_paused'))\n if add_paused:\n form_data['paused'] = 'true'\n\n maxupspeed = entry.get('maxupspeed', config.get('maxupspeed'))\n if maxupspeed:\n form_data['upLimit'] = maxupspeed * 1024\n\n maxdownspeed = entry.get('maxdownspeed', config.get('maxdownspeed'))\n if maxdownspeed:\n form_data['dlLimit'] = maxdownspeed * 1024\n\n is_magnet = entry['url'].startswith('magnet:')\n\n if task.manager.options.test:\n log.info('Test mode.')\n log.info('Would add torrent to qBittorrent with:')\n if not is_magnet:\n log.info('File: %s', entry.get('file'))\n else:\n log.info('Url: %s', entry.get('url'))\n log.info('Save path: %s', form_data.get('savepath'))\n log.info('Label: %s', form_data.get('label'))\n log.info('Paused: %s', form_data.get('paused', 'false'))\n if maxupspeed:\n log.info('Upload Speed Limit: %d', form_data.get('upLimit'))\n if maxdownspeed:\n log.info('Download Speed Limit: %d', form_data.get('dlLimit'))\n continue\n\n if not is_magnet:\n if 'file' not in entry:\n entry.fail('File missing?')\n continue\n if not os.path.exists(entry['file']):\n tmp_path = os.path.join(task.manager.config_base, 'temp')\n log.debug('entry: %s', entry)\n log.debug('temp: %s', ', '.join(os.listdir(tmp_path)))\n entry.fail(\"Downloaded temp file '%s' doesn't exist!?\" % entry['file'])\n continue\n self.add_torrent_file(entry['file'], form_data, config['verify_cert'])\n else:\n self.add_torrent_url(entry['url'], form_data, config['verify_cert'])\n\n @plugin.priority(120)\n def on_task_download(self, task, config):\n \"\"\"\n Call download plugin to generate torrent files to load into\n qBittorrent.\n \"\"\"\n config = self.prepare_config(config)\n if not config['enabled']:\n return\n if 'download' not in task.config:\n download = plugin.get('download', self)\n download.get_temp_files(task, handle_magnets=True, fail_html=config['fail_html'])\n\n @plugin.priority(135)\n def on_task_output(self, task, config):\n \"\"\"Add torrents to qBittorrent at exit.\"\"\"\n if task.accepted:\n config = self.prepare_config(config)\n self.connect(config)\n self.add_entries(task, config)\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(OutputQBitTorrent, 'qbittorrent', api_ver=2)\n", "path": "flexget/plugins/clients/qbittorrent.py"}]} | 3,096 | 737 |
gh_patches_debug_31675 | rasdani/github-patches | git_diff | pyload__pyload-1369 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Uplea plugin out of date
Hi,
any download from uplea.com fails:
pyLoad reports success on downloading but actually only the HTML page giving acces to download is downloaded...
</issue>
<code>
[start of module/plugins/hoster/UpleaCom.py]
1 # -*- coding: utf-8 -*-
2
3 import re
4
5 from urlparse import urljoin
6
7 from module.plugins.internal.XFSHoster import XFSHoster, create_getInfo
8
9
10 class UpleaCom(XFSHoster):
11 __name__ = "UpleaCom"
12 __type__ = "hoster"
13 __version__ = "0.06"
14
15 __pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
16
17 __description__ = """Uplea.com hoster plugin"""
18 __license__ = "GPLv3"
19 __authors__ = [("Redleon", None)]
20
21
22 NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<'
23 SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_])</span>'
24
25 OFFLINE_PATTERN = r'>You followed an invalid or expired link'
26
27 LINK_PATTERN = r'"(http?://\w+\.uplea\.com/anonym/.*?)"'
28
29 WAIT_PATTERN = r'timeText:([\d.]+),'
30 STEP_PATTERN = r'<a href="(/step/.+)">'
31
32
33 def setup(self):
34 self.multiDL = False
35 self.chunkLimit = 1
36 self.resumeDownload = True
37
38
39 def handleFree(self, pyfile):
40 m = re.search(self.STEP_PATTERN, self.html)
41 if m is None:
42 self.error(_("STEP_PATTERN not found"))
43
44 self.html = self.load(urljoin("http://uplea.com/", m.group(1)))
45
46 m = re.search(self.WAIT_PATTERN, self.html)
47 if m:
48 self.wait(m.group(1), True)
49 self.retry()
50
51 m = re.search(self.LINK_PATTERN, self.html)
52 if m is None:
53 self.error(_("LINK_PATTERN not found"))
54
55 self.link = m.group(1)
56 self.wait(15)
57
58
59 getInfo = create_getInfo(UpleaCom)
60
[end of module/plugins/hoster/UpleaCom.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py
--- a/module/plugins/hoster/UpleaCom.py
+++ b/module/plugins/hoster/UpleaCom.py
@@ -10,23 +10,26 @@
class UpleaCom(XFSHoster):
__name__ = "UpleaCom"
__type__ = "hoster"
- __version__ = "0.06"
+ __version__ = "0.07"
__pattern__ = r'https?://(?:www\.)?uplea\.com/dl/\w{15}'
__description__ = """Uplea.com hoster plugin"""
__license__ = "GPLv3"
- __authors__ = [("Redleon", None)]
+ __authors__ = [("Redleon", None),
+ ("GammaC0de", None)]
NAME_PATTERN = r'class="agmd size18">(?P<N>.+?)<'
- SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_])</span>'
+ SIZE_PATTERN = r'size14">(?P<S>[\d.,]+) (?P<U>[\w^_]+?)</span>'
+ SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]
OFFLINE_PATTERN = r'>You followed an invalid or expired link'
+ PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'
- LINK_PATTERN = r'"(http?://\w+\.uplea\.com/anonym/.*?)"'
+ LINK_PATTERN = r'"(https?://\w+\.uplea\.com/anonym/.*?)"'
- WAIT_PATTERN = r'timeText:([\d.]+),'
+ WAIT_PATTERN = r'timeText: ?([\d.]+),'
STEP_PATTERN = r'<a href="(/step/.+)">'
@@ -45,9 +48,14 @@
m = re.search(self.WAIT_PATTERN, self.html)
if m:
+ self.logDebug(_("Waiting %s seconds") % m.group(1))
self.wait(m.group(1), True)
self.retry()
+ m = re.search(self.PREMIUM_PATTERN, self.html)
+ if m:
+ self.error(_("This URL requires a premium account"))
+
m = re.search(self.LINK_PATTERN, self.html)
if m is None:
self.error(_("LINK_PATTERN not found"))
| {"golden_diff": "diff --git a/module/plugins/hoster/UpleaCom.py b/module/plugins/hoster/UpleaCom.py\n--- a/module/plugins/hoster/UpleaCom.py\n+++ b/module/plugins/hoster/UpleaCom.py\n@@ -10,23 +10,26 @@\n class UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n- __version__ = \"0.06\"\n+ __version__ = \"0.07\"\n \n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n \n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n- __authors__ = [(\"Redleon\", None)]\n+ __authors__ = [(\"Redleon\", None),\n+ (\"GammaC0de\", None)]\n \n \n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n- SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_])</span>'\n+ SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_]+?)</span>'\n+ SIZE_REPLACEMENTS = [('Ko','KB'), ('Mo','MB'), ('Go','GB')]\n \n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n+ PREMIUM_PATTERN = r'You need to have a Premium subscription to download this file'\n \n- LINK_PATTERN = r'\"(http?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n+ LINK_PATTERN = r'\"(https?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n \n- WAIT_PATTERN = r'timeText:([\\d.]+),'\n+ WAIT_PATTERN = r'timeText: ?([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n \n \n@@ -45,9 +48,14 @@\n \n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n+ self.logDebug(_(\"Waiting %s seconds\") % m.group(1))\n self.wait(m.group(1), True)\n self.retry()\n \n+ m = re.search(self.PREMIUM_PATTERN, self.html)\n+ if m:\n+ self.error(_(\"This URL requires a premium account\"))\n+\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n", "issue": "Uplea plugin out of date\nHi,\nany download from uplea.com fails:\npyLoad reports success on downloading but actually only the HTML page giving acces to download is downloaded...\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport re\n\nfrom urlparse import urljoin\n\nfrom module.plugins.internal.XFSHoster import XFSHoster, create_getInfo\n\n\nclass UpleaCom(XFSHoster):\n __name__ = \"UpleaCom\"\n __type__ = \"hoster\"\n __version__ = \"0.06\"\n\n __pattern__ = r'https?://(?:www\\.)?uplea\\.com/dl/\\w{15}'\n\n __description__ = \"\"\"Uplea.com hoster plugin\"\"\"\n __license__ = \"GPLv3\"\n __authors__ = [(\"Redleon\", None)]\n\n\n NAME_PATTERN = r'class=\"agmd size18\">(?P<N>.+?)<'\n SIZE_PATTERN = r'size14\">(?P<S>[\\d.,]+) (?P<U>[\\w^_])</span>'\n\n OFFLINE_PATTERN = r'>You followed an invalid or expired link'\n\n LINK_PATTERN = r'\"(http?://\\w+\\.uplea\\.com/anonym/.*?)\"'\n\n WAIT_PATTERN = r'timeText:([\\d.]+),'\n STEP_PATTERN = r'<a href=\"(/step/.+)\">'\n\n\n def setup(self):\n self.multiDL = False\n self.chunkLimit = 1\n self.resumeDownload = True\n\n\n def handleFree(self, pyfile):\n m = re.search(self.STEP_PATTERN, self.html)\n if m is None:\n self.error(_(\"STEP_PATTERN not found\"))\n\n self.html = self.load(urljoin(\"http://uplea.com/\", m.group(1)))\n\n m = re.search(self.WAIT_PATTERN, self.html)\n if m:\n self.wait(m.group(1), True)\n self.retry()\n\n m = re.search(self.LINK_PATTERN, self.html)\n if m is None:\n self.error(_(\"LINK_PATTERN not found\"))\n\n self.link = m.group(1)\n self.wait(15)\n\n\ngetInfo = create_getInfo(UpleaCom)\n", "path": "module/plugins/hoster/UpleaCom.py"}]} | 1,144 | 582 |
gh_patches_debug_30586 | rasdani/github-patches | git_diff | superduper-io__superduper-1947 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[MISC] The cosine method is slow to compute.
The Cosine method is slow in computation because it involves data transformation for the vector matrix every time, which results in significant time consumption.
```python
def cosine(x, y):
'''
Cosine similarity function for vector search
'''
x = x.astype(float)
y = y.astype(float)
x = x / numpy.linalg.norm(x, axis=1)[:, None]
y = y / numpy.linalg.norm(y, axis=1)[:, None]
return dot(x, y)
```
we need to preprocess all the incoming matrices of cosine.
</issue>
<code>
[start of superduperdb/vector_search/in_memory.py]
1 import typing as t
2
3 import numpy
4
5 from superduperdb import logging
6 from superduperdb.vector_search.base import BaseVectorSearcher, VectorItem, measures
7
8
9 class InMemoryVectorSearcher(BaseVectorSearcher):
10 """
11 Simple hash-set for looking up with vector similarity.
12
13 :param identifier: Unique string identifier of index
14 :param h: array/ tensor of vectors
15 :param index: list of IDs
16 :param measure: measure to assess similarity
17 """
18
19 name = 'vanilla'
20
21 def __init__(
22 self,
23 identifier: str,
24 dimensions: int,
25 h: t.Optional[numpy.ndarray] = None,
26 index: t.Optional[t.List[str]] = None,
27 measure: t.Union[str, t.Callable] = 'cosine',
28 ):
29 self.identifier = identifier
30 self.dimensions = dimensions
31 self._cache: t.Sequence[VectorItem] = []
32 self._CACHE_SIZE = 10000
33
34 if h is not None:
35 assert index is not None
36 self._setup(h, index)
37 else:
38 self.h = None
39 self.index = None
40 self.lookup = None
41
42 self.measure = measure
43 if isinstance(measure, str):
44 self.measure = measures[measure]
45
46 self.identifier = identifier
47
48 def __len__(self):
49 return self.h.shape[0]
50
51 def _setup(self, h, index):
52 self.h = numpy.array(h) if not isinstance(h, numpy.ndarray) else h
53 self.index = index
54 self.lookup = dict(zip(index, range(len(index))))
55
56 def find_nearest_from_id(self, _id, n=100):
57 self.post_create()
58 return self.find_nearest_from_array(self.h[self.lookup[_id]], n=n)
59
60 def find_nearest_from_array(self, h, n=100, within_ids=None):
61 self.post_create()
62 h = self.to_numpy(h)[None, :]
63 if within_ids:
64 ix = list(map(self.lookup.__getitem__, within_ids))
65 similarities = self.measure(h, self.h[ix, :]) # mypy: ignore
66 else:
67 similarities = self.measure(h, self.h) # mypy: ignore
68 similarities = similarities[0, :]
69 logging.debug(similarities)
70 scores = -numpy.sort(-similarities)
71 ## different ways of handling
72 if within_ids:
73 top_n_idxs = numpy.argsort(-similarities)[:n]
74 ix = [ix[i] for i in top_n_idxs]
75 else:
76 ix = numpy.argsort(-similarities)[:n]
77 ix = ix.tolist()
78 scores = scores.tolist()
79 _ids = [self.index[i] for i in ix]
80 return _ids, scores
81
82 def add(self, items: t.Sequence[VectorItem]) -> None:
83 if len(self._cache) < self._CACHE_SIZE:
84 for item in items:
85 self._cache.append(item)
86 else:
87 self._add(self._cache)
88 self._cache = []
89
90 def post_create(self):
91 if self._cache:
92 self._add(self._cache)
93 self._cache = []
94
95 def _add(self, items: t.Sequence[VectorItem]) -> None:
96 index = [item.id for item in items]
97 h = numpy.stack([item.vector for item in items])
98
99 if self.h is not None:
100 old_not_in_new = list(set(self.index) - set(index))
101 ix_old = [self.lookup[_id] for _id in old_not_in_new]
102 h = numpy.concatenate((self.h[ix_old], h), axis=0)
103 index = [self.index[i] for i in ix_old] + index
104
105 return self._setup(h, index)
106
107 def delete(self, ids):
108 self.post_create()
109 ix = list(map(self.lookup.__getitem__, ids))
110 h = numpy.delete(self.h, ix, axis=0)
111 index = [_id for _id in self.index if _id not in set(ids)]
112 self._setup(h, index)
113
[end of superduperdb/vector_search/in_memory.py]
[start of superduperdb/vector_search/base.py]
1 from __future__ import annotations
2
3 import enum
4 import typing as t
5 from abc import ABC, abstractmethod
6 from dataclasses import dataclass, field
7
8 import numpy
9 import numpy.typing
10
11 if t.TYPE_CHECKING:
12 from superduperdb.components.vector_index import VectorIndex
13
14
15 class BaseVectorSearcher(ABC):
16 @classmethod
17 def from_component(cls, vi: 'VectorIndex'):
18 return cls(
19 identifier=vi.identifier, dimensions=vi.dimensions, measure=vi.measure
20 )
21
22 @abstractmethod
23 def __init__(
24 self,
25 identifier: str,
26 dimensions: int,
27 h: t.Optional[numpy.ndarray] = None,
28 index: t.Optional[t.List[str]] = None,
29 measure: t.Optional[str] = None,
30 ):
31 pass
32
33 @abstractmethod
34 def __len__(self):
35 pass
36
37 @staticmethod
38 def to_numpy(h):
39 if isinstance(h, numpy.ndarray):
40 return h
41 if hasattr(h, 'numpy'):
42 return h.numpy()
43 if isinstance(h, list):
44 return numpy.array(h)
45 raise ValueError(str(h))
46
47 @staticmethod
48 def to_list(h):
49 if hasattr(h, 'tolist'):
50 return h.tolist()
51 if isinstance(h, list):
52 return h
53 raise ValueError(str(h))
54
55 @abstractmethod
56 def add(self, items: t.Sequence[VectorItem]) -> None:
57 """
58 Add items to the index.
59
60 :param items: t.Sequence of VectorItems
61 """
62
63 @abstractmethod
64 def delete(self, ids: t.Sequence[str]) -> None:
65 """
66 Remove items from the index
67
68 :param ids: t.Sequence of ids of vectors.
69 """
70
71 @abstractmethod
72 def find_nearest_from_id(
73 self,
74 _id,
75 n: int = 100,
76 within_ids: t.Sequence[str] = (),
77 ) -> t.Tuple[t.List[str], t.List[float]]:
78 """
79 Find the nearest vectors to the vector with the given id.
80
81 :param _id: id of the vector
82 :param n: number of nearest vectors to return
83 """
84
85 @abstractmethod
86 def find_nearest_from_array(
87 self,
88 h: numpy.typing.ArrayLike,
89 n: int = 100,
90 within_ids: t.Sequence[str] = (),
91 ) -> t.Tuple[t.List[str], t.List[float]]:
92 """
93 Find the nearest vectors to the given vector.
94
95 :param h: vector
96 :param n: number of nearest vectors to return
97 """
98
99 def post_create(self):
100 """
101 This method is used for searchers which requires
102 to perform a task after all vectors have been added
103 """
104
105
106 class VectorIndexMeasureType(str, enum.Enum):
107 cosine = 'cosine'
108 css = 'css'
109 dot = 'dot'
110 l2 = 'l2'
111
112
113 @dataclass(frozen=True)
114 class VectorSearchConfig:
115 '''
116 Represents search config which helps initiate a vector
117 searcher class.
118 '''
119
120 id: str
121 dimensions: int
122 measure: VectorIndexMeasureType = VectorIndexMeasureType.l2
123 parameters: t.Mapping[str, t.Any] = field(default_factory=dict)
124
125
126 @dataclass(frozen=True)
127 class VectorItem:
128 '''
129 Class for representing a vector in vector search with
130 id and vector.
131
132 '''
133
134 id: str
135 vector: numpy.ndarray
136
137 @classmethod
138 def create(
139 cls,
140 *,
141 id: str,
142 vector: numpy.typing.ArrayLike,
143 ) -> VectorItem:
144 return VectorItem(id=id, vector=BaseVectorSearcher.to_numpy(vector))
145
146 def to_dict(self) -> t.Dict:
147 return {'id': self.id, 'vector': self.vector}
148
149
150 @dataclass(frozen=True)
151 class VectorSearchResult:
152 '''
153 Dataclass for representing vector search results with
154 `id` and `score`.
155 '''
156
157 id: str
158 score: float
159
160
161 def l2(x, y):
162 '''
163 L2 function for vector similarity search
164 '''
165 return numpy.array([-numpy.linalg.norm(x - y, axis=1)])
166
167
168 def dot(x, y):
169 '''
170 Dot function for vector similarity search
171 '''
172 return numpy.dot(x, y.T)
173
174
175 def cosine(x, y):
176 '''
177 Cosine similarity function for vector search
178 '''
179 x = x.astype(float)
180 y = y.astype(float)
181 x = x / numpy.linalg.norm(x, axis=1)[:, None]
182 y = y / numpy.linalg.norm(y, axis=1)[:, None]
183 return dot(x, y)
184
185
186 measures = {'cosine': cosine, 'dot': dot, 'l2': l2}
187
[end of superduperdb/vector_search/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/superduperdb/vector_search/base.py b/superduperdb/vector_search/base.py
--- a/superduperdb/vector_search/base.py
+++ b/superduperdb/vector_search/base.py
@@ -176,10 +176,9 @@
'''
Cosine similarity function for vector search
'''
- x = x.astype(float)
- y = y.astype(float)
x = x / numpy.linalg.norm(x, axis=1)[:, None]
- y = y / numpy.linalg.norm(y, axis=1)[:, None]
+ # y which implies all vectors in vectordatabase
+ # has normalized vectors.
return dot(x, y)
diff --git a/superduperdb/vector_search/in_memory.py b/superduperdb/vector_search/in_memory.py
--- a/superduperdb/vector_search/in_memory.py
+++ b/superduperdb/vector_search/in_memory.py
@@ -31,6 +31,10 @@
self._cache: t.Sequence[VectorItem] = []
self._CACHE_SIZE = 10000
+ self.measure = measure
+ if isinstance(measure, str):
+ self.measure = measures[measure]
+
if h is not None:
assert index is not None
self._setup(h, index)
@@ -39,17 +43,19 @@
self.index = None
self.lookup = None
- self.measure = measure
- if isinstance(measure, str):
- self.measure = measures[measure]
-
self.identifier = identifier
def __len__(self):
return self.h.shape[0]
def _setup(self, h, index):
- self.h = numpy.array(h) if not isinstance(h, numpy.ndarray) else h
+ h = numpy.array(h) if not isinstance(h, numpy.ndarray) else h
+
+ if self.measure == 'cosine':
+ # Normalization is required for cosine, hence preparing
+ # all vectors in advance.
+ h = h / numpy.linalg.norm(h, axis=1)[:, None]
+ self.h = h
self.index = index
self.lookup = dict(zip(index, range(len(index))))
| {"golden_diff": "diff --git a/superduperdb/vector_search/base.py b/superduperdb/vector_search/base.py\n--- a/superduperdb/vector_search/base.py\n+++ b/superduperdb/vector_search/base.py\n@@ -176,10 +176,9 @@\n '''\n Cosine similarity function for vector search\n '''\n- x = x.astype(float)\n- y = y.astype(float)\n x = x / numpy.linalg.norm(x, axis=1)[:, None]\n- y = y / numpy.linalg.norm(y, axis=1)[:, None]\n+ # y which implies all vectors in vectordatabase\n+ # has normalized vectors.\n return dot(x, y)\n \n \ndiff --git a/superduperdb/vector_search/in_memory.py b/superduperdb/vector_search/in_memory.py\n--- a/superduperdb/vector_search/in_memory.py\n+++ b/superduperdb/vector_search/in_memory.py\n@@ -31,6 +31,10 @@\n self._cache: t.Sequence[VectorItem] = []\n self._CACHE_SIZE = 10000\n \n+ self.measure = measure\n+ if isinstance(measure, str):\n+ self.measure = measures[measure]\n+\n if h is not None:\n assert index is not None\n self._setup(h, index)\n@@ -39,17 +43,19 @@\n self.index = None\n self.lookup = None\n \n- self.measure = measure\n- if isinstance(measure, str):\n- self.measure = measures[measure]\n-\n self.identifier = identifier\n \n def __len__(self):\n return self.h.shape[0]\n \n def _setup(self, h, index):\n- self.h = numpy.array(h) if not isinstance(h, numpy.ndarray) else h\n+ h = numpy.array(h) if not isinstance(h, numpy.ndarray) else h\n+\n+ if self.measure == 'cosine':\n+ # Normalization is required for cosine, hence preparing\n+ # all vectors in advance.\n+ h = h / numpy.linalg.norm(h, axis=1)[:, None]\n+ self.h = h\n self.index = index\n self.lookup = dict(zip(index, range(len(index))))\n", "issue": "[MISC] The cosine method is slow to compute.\nThe Cosine method is slow in computation because it involves data transformation for the vector matrix every time, which results in significant time consumption.\r\n\r\n```python\r\ndef cosine(x, y):\r\n '''\r\n Cosine similarity function for vector search\r\n '''\r\n x = x.astype(float)\r\n y = y.astype(float)\r\n x = x / numpy.linalg.norm(x, axis=1)[:, None]\r\n y = y / numpy.linalg.norm(y, axis=1)[:, None]\r\n return dot(x, y)\r\n\r\n```\r\n\r\n\r\nwe need to preprocess all the incoming matrices of cosine.\n", "before_files": [{"content": "import typing as t\n\nimport numpy\n\nfrom superduperdb import logging\nfrom superduperdb.vector_search.base import BaseVectorSearcher, VectorItem, measures\n\n\nclass InMemoryVectorSearcher(BaseVectorSearcher):\n \"\"\"\n Simple hash-set for looking up with vector similarity.\n\n :param identifier: Unique string identifier of index\n :param h: array/ tensor of vectors\n :param index: list of IDs\n :param measure: measure to assess similarity\n \"\"\"\n\n name = 'vanilla'\n\n def __init__(\n self,\n identifier: str,\n dimensions: int,\n h: t.Optional[numpy.ndarray] = None,\n index: t.Optional[t.List[str]] = None,\n measure: t.Union[str, t.Callable] = 'cosine',\n ):\n self.identifier = identifier\n self.dimensions = dimensions\n self._cache: t.Sequence[VectorItem] = []\n self._CACHE_SIZE = 10000\n\n if h is not None:\n assert index is not None\n self._setup(h, index)\n else:\n self.h = None\n self.index = None\n self.lookup = None\n\n self.measure = measure\n if isinstance(measure, str):\n self.measure = measures[measure]\n\n self.identifier = identifier\n\n def __len__(self):\n return self.h.shape[0]\n\n def _setup(self, h, index):\n self.h = numpy.array(h) if not isinstance(h, numpy.ndarray) else h\n self.index = index\n self.lookup = dict(zip(index, range(len(index))))\n\n def find_nearest_from_id(self, _id, n=100):\n self.post_create()\n return self.find_nearest_from_array(self.h[self.lookup[_id]], n=n)\n\n def find_nearest_from_array(self, h, n=100, within_ids=None):\n self.post_create()\n h = self.to_numpy(h)[None, :]\n if within_ids:\n ix = list(map(self.lookup.__getitem__, within_ids))\n similarities = self.measure(h, self.h[ix, :]) # mypy: ignore\n else:\n similarities = self.measure(h, self.h) # mypy: ignore\n similarities = similarities[0, :]\n logging.debug(similarities)\n scores = -numpy.sort(-similarities)\n ## different ways of handling\n if within_ids:\n top_n_idxs = numpy.argsort(-similarities)[:n]\n ix = [ix[i] for i in top_n_idxs]\n else:\n ix = numpy.argsort(-similarities)[:n]\n ix = ix.tolist()\n scores = scores.tolist()\n _ids = [self.index[i] for i in ix]\n return _ids, scores\n\n def add(self, items: t.Sequence[VectorItem]) -> None:\n if len(self._cache) < self._CACHE_SIZE:\n for item in items:\n self._cache.append(item)\n else:\n self._add(self._cache)\n self._cache = []\n\n def post_create(self):\n if self._cache:\n self._add(self._cache)\n self._cache = []\n\n def _add(self, items: t.Sequence[VectorItem]) -> None:\n index = [item.id for item in items]\n h = numpy.stack([item.vector for item in items])\n\n if self.h is not None:\n old_not_in_new = list(set(self.index) - set(index))\n ix_old = [self.lookup[_id] for _id in old_not_in_new]\n h = numpy.concatenate((self.h[ix_old], h), axis=0)\n index = [self.index[i] for i in ix_old] + index\n\n return self._setup(h, index)\n\n def delete(self, ids):\n self.post_create()\n ix = list(map(self.lookup.__getitem__, ids))\n h = numpy.delete(self.h, ix, axis=0)\n index = [_id for _id in self.index if _id not in set(ids)]\n self._setup(h, index)\n", "path": "superduperdb/vector_search/in_memory.py"}, {"content": "from __future__ import annotations\n\nimport enum\nimport typing as t\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass, field\n\nimport numpy\nimport numpy.typing\n\nif t.TYPE_CHECKING:\n from superduperdb.components.vector_index import VectorIndex\n\n\nclass BaseVectorSearcher(ABC):\n @classmethod\n def from_component(cls, vi: 'VectorIndex'):\n return cls(\n identifier=vi.identifier, dimensions=vi.dimensions, measure=vi.measure\n )\n\n @abstractmethod\n def __init__(\n self,\n identifier: str,\n dimensions: int,\n h: t.Optional[numpy.ndarray] = None,\n index: t.Optional[t.List[str]] = None,\n measure: t.Optional[str] = None,\n ):\n pass\n\n @abstractmethod\n def __len__(self):\n pass\n\n @staticmethod\n def to_numpy(h):\n if isinstance(h, numpy.ndarray):\n return h\n if hasattr(h, 'numpy'):\n return h.numpy()\n if isinstance(h, list):\n return numpy.array(h)\n raise ValueError(str(h))\n\n @staticmethod\n def to_list(h):\n if hasattr(h, 'tolist'):\n return h.tolist()\n if isinstance(h, list):\n return h\n raise ValueError(str(h))\n\n @abstractmethod\n def add(self, items: t.Sequence[VectorItem]) -> None:\n \"\"\"\n Add items to the index.\n\n :param items: t.Sequence of VectorItems\n \"\"\"\n\n @abstractmethod\n def delete(self, ids: t.Sequence[str]) -> None:\n \"\"\"\n Remove items from the index\n\n :param ids: t.Sequence of ids of vectors.\n \"\"\"\n\n @abstractmethod\n def find_nearest_from_id(\n self,\n _id,\n n: int = 100,\n within_ids: t.Sequence[str] = (),\n ) -> t.Tuple[t.List[str], t.List[float]]:\n \"\"\"\n Find the nearest vectors to the vector with the given id.\n\n :param _id: id of the vector\n :param n: number of nearest vectors to return\n \"\"\"\n\n @abstractmethod\n def find_nearest_from_array(\n self,\n h: numpy.typing.ArrayLike,\n n: int = 100,\n within_ids: t.Sequence[str] = (),\n ) -> t.Tuple[t.List[str], t.List[float]]:\n \"\"\"\n Find the nearest vectors to the given vector.\n\n :param h: vector\n :param n: number of nearest vectors to return\n \"\"\"\n\n def post_create(self):\n \"\"\"\n This method is used for searchers which requires\n to perform a task after all vectors have been added\n \"\"\"\n\n\nclass VectorIndexMeasureType(str, enum.Enum):\n cosine = 'cosine'\n css = 'css'\n dot = 'dot'\n l2 = 'l2'\n\n\n@dataclass(frozen=True)\nclass VectorSearchConfig:\n '''\n Represents search config which helps initiate a vector\n searcher class.\n '''\n\n id: str\n dimensions: int\n measure: VectorIndexMeasureType = VectorIndexMeasureType.l2\n parameters: t.Mapping[str, t.Any] = field(default_factory=dict)\n\n\n@dataclass(frozen=True)\nclass VectorItem:\n '''\n Class for representing a vector in vector search with\n id and vector.\n\n '''\n\n id: str\n vector: numpy.ndarray\n\n @classmethod\n def create(\n cls,\n *,\n id: str,\n vector: numpy.typing.ArrayLike,\n ) -> VectorItem:\n return VectorItem(id=id, vector=BaseVectorSearcher.to_numpy(vector))\n\n def to_dict(self) -> t.Dict:\n return {'id': self.id, 'vector': self.vector}\n\n\n@dataclass(frozen=True)\nclass VectorSearchResult:\n '''\n Dataclass for representing vector search results with\n `id` and `score`.\n '''\n\n id: str\n score: float\n\n\ndef l2(x, y):\n '''\n L2 function for vector similarity search\n '''\n return numpy.array([-numpy.linalg.norm(x - y, axis=1)])\n\n\ndef dot(x, y):\n '''\n Dot function for vector similarity search\n '''\n return numpy.dot(x, y.T)\n\n\ndef cosine(x, y):\n '''\n Cosine similarity function for vector search\n '''\n x = x.astype(float)\n y = y.astype(float)\n x = x / numpy.linalg.norm(x, axis=1)[:, None]\n y = y / numpy.linalg.norm(y, axis=1)[:, None]\n return dot(x, y)\n\n\nmeasures = {'cosine': cosine, 'dot': dot, 'l2': l2}\n", "path": "superduperdb/vector_search/base.py"}]} | 3,301 | 492 |
gh_patches_debug_1520 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-316 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Wendy's
e.g. https://locations.wendys.com/jamestown-ny-3438
</issue>
<code>
[start of locations/spiders/wendys.py]
1 import scrapy
2 import re
3 import json
4 from locations.items import GeojsonPointItem
5
6 DAY_MAPPING = {
7 'Monday': 'Mo',
8 'Tuesday': 'Tu',
9 'Wednesday': 'We',
10 'Thursday': 'Th',
11 'Friday': 'Fr',
12 'Saturday': 'Sa',
13 'Sunday': 'Su'
14 }
15
16
17 class WendysSpider(scrapy.Spider):
18
19 name = "wendys"
20 allowed_domains = ["locations.wendys.com"]
21 download_delay = 0
22 download_timeout = 30
23 start_urls = (
24 'https://locations.wendys.com',
25 )
26
27 def handle_error(self, failure):
28 self.log("Request failed: %s" % failure.request)
29 def parse_day(self, day):
30 return DAY_MAPPING[day.strip()]
31 def parse_times(self, times):
32 hours_to = [x.strip() for x in times.split('-')]
33 cleaned_times = []
34
35 for hour in hours_to:
36 if re.search('pm$', hour):
37 hour = re.sub('pm', '', hour).strip()
38 hour_min = hour.split(":")
39 if int(hour_min[0]) < 12:
40 hour_min[0] = str(12 + int(hour_min[0]))
41 cleaned_times.append(":".join(hour_min))
42
43 if re.search('am$', hour):
44 hour = re.sub('am', '', hour).strip()
45 hour_min = hour.split(":")
46 if len(hour_min[0]) <2:
47 hour_min[0] = hour_min[0].zfill(2)
48 else:
49 hour_min[0] = str(int(hour_min[0]))
50
51 cleaned_times.append(":".join(hour_min))
52 return "-".join(cleaned_times)
53
54 def parse_hours(self, lis):
55 hours = []
56 for li in lis:
57 day = li.xpath('./span[@class="day"]/text()').extract()[1]
58 times = li.xpath('./span[2]/text()').extract_first()
59 if times and day:
60 parsed_time = self.parse_times(times)
61 parsed_day = self.parse_day(day)
62 hours.append(parsed_day + ' ' + parsed_time)
63
64 return "; ".join(hours)
65 def parse_stores(self, response):
66 page_content = response.body_as_unicode()
67 json_content = re.findall('li.data.results =[^;]+' , page_content)
68 if len(json_content)>0:
69 json_content = json_content[0].replace('li.data.results =' ,'')
70 json_data = json.loads(json_content)
71 properties = {
72 'addr_full': json_data[0]['address'],
73 'phone':json_data[0]['phone'],
74 'city': json_data[0]['city'],
75 'state':json_data[0]['state'],
76 'postcode': json_data[0]['postal'],
77 'ref': json_data[0]['id'],
78 'website': response.url,
79 'lat': json_data[0]['lat'],
80 'lon': json_data[0]['lon'],
81 }
82 hours = self.parse_hours(response.xpath('//div[@class="hours"]/ol/li'))
83 if hours:
84 properties['opening_hours'] = hours
85
86 yield GeojsonPointItem(**properties)
87
88 def parse_city_stores(self, response):
89 stores = response.xpath('//div[@class="col-xs-12 col-lg-10 col-lg-offset-1"]/article/ul/li/a/@href').extract()
90 for store in stores:
91 if store:
92 yield scrapy.Request(response.urljoin(store), callback=self.parse_stores ,errback=self.handle_error)
93
94 def parse_state(self, response):
95 city_urls = response.xpath('//div[@class="col-xs-12 col-lg-10 col-lg-offset-1"]/article/div[@class="col"]/ul/li/a/@href').extract()
96 for path in city_urls:
97 yield scrapy.Request(response.urljoin(path), callback=self.parse_city_stores ,errback=self.handle_error)
98
99 def parse(self, response):
100 urls = response.xpath('//div[@class="col-xs-12 col-lg-10 col-lg-offset-1"]/article/div[@class="col"]/ul/li/a/@href').extract()
101 for path in urls:
102 yield scrapy.Request(response.urljoin(path), callback=self.parse_state ,errback=self.handle_error)
103
[end of locations/spiders/wendys.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/locations/spiders/wendys.py b/locations/spiders/wendys.py
--- a/locations/spiders/wendys.py
+++ b/locations/spiders/wendys.py
@@ -18,7 +18,7 @@
name = "wendys"
allowed_domains = ["locations.wendys.com"]
- download_delay = 0
+ download_delay = 0.5
download_timeout = 30
start_urls = (
'https://locations.wendys.com',
| {"golden_diff": "diff --git a/locations/spiders/wendys.py b/locations/spiders/wendys.py\n--- a/locations/spiders/wendys.py\n+++ b/locations/spiders/wendys.py\n@@ -18,7 +18,7 @@\n \n name = \"wendys\"\n allowed_domains = [\"locations.wendys.com\"]\n- download_delay = 0\n+ download_delay = 0.5\n download_timeout = 30\n start_urls = (\n 'https://locations.wendys.com',\n", "issue": "Wendy's\ne.g. https://locations.wendys.com/jamestown-ny-3438\n", "before_files": [{"content": "import scrapy\nimport re\nimport json\nfrom locations.items import GeojsonPointItem\n\nDAY_MAPPING = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su'\n}\n\n\nclass WendysSpider(scrapy.Spider):\n\n name = \"wendys\"\n allowed_domains = [\"locations.wendys.com\"]\n download_delay = 0\n download_timeout = 30\n start_urls = (\n 'https://locations.wendys.com',\n )\n\n def handle_error(self, failure):\n self.log(\"Request failed: %s\" % failure.request)\n def parse_day(self, day):\n return DAY_MAPPING[day.strip()]\n def parse_times(self, times):\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n\n for hour in hours_to:\n if re.search('pm$', hour):\n hour = re.sub('pm', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('am$', hour):\n hour = re.sub('am', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str(int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath('./span[@class=\"day\"]/text()').extract()[1]\n times = li.xpath('./span[2]/text()').extract_first()\n if times and day:\n parsed_time = self.parse_times(times)\n parsed_day = self.parse_day(day)\n hours.append(parsed_day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n def parse_stores(self, response):\n page_content = response.body_as_unicode()\n json_content = re.findall('li.data.results =[^;]+' , page_content)\n if len(json_content)>0:\n json_content = json_content[0].replace('li.data.results =' ,'')\n json_data = json.loads(json_content)\n properties = {\n 'addr_full': json_data[0]['address'],\n 'phone':json_data[0]['phone'],\n 'city': json_data[0]['city'],\n 'state':json_data[0]['state'],\n 'postcode': json_data[0]['postal'],\n 'ref': json_data[0]['id'],\n 'website': response.url,\n 'lat': json_data[0]['lat'],\n 'lon': json_data[0]['lon'],\n }\n hours = self.parse_hours(response.xpath('//div[@class=\"hours\"]/ol/li'))\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_city_stores(self, response):\n stores = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/ul/li/a/@href').extract()\n for store in stores:\n if store:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores ,errback=self.handle_error)\n\n def parse_state(self, response):\n city_urls = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/div[@class=\"col\"]/ul/li/a/@href').extract()\n for path in city_urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_city_stores ,errback=self.handle_error)\n\n def parse(self, response):\n urls = response.xpath('//div[@class=\"col-xs-12 col-lg-10 col-lg-offset-1\"]/article/div[@class=\"col\"]/ul/li/a/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state ,errback=self.handle_error)\n", "path": "locations/spiders/wendys.py"}]} | 1,689 | 116 |
gh_patches_debug_8029 | rasdani/github-patches | git_diff | ipython__ipython-6931 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
console config not written
assigning to @minrk who said: "Oh jeez, I don't wanna fix that right now". Marking it two-point-oh.
</issue>
<code>
[start of IPython/core/profileapp.py]
1 # encoding: utf-8
2 """
3 An application for managing IPython profiles.
4
5 To be invoked as the `ipython profile` subcommand.
6
7 Authors:
8
9 * Min RK
10
11 """
12 from __future__ import print_function
13
14 #-----------------------------------------------------------------------------
15 # Copyright (C) 2008 The IPython Development Team
16 #
17 # Distributed under the terms of the BSD License. The full license is in
18 # the file COPYING, distributed as part of this software.
19 #-----------------------------------------------------------------------------
20
21 #-----------------------------------------------------------------------------
22 # Imports
23 #-----------------------------------------------------------------------------
24
25 import os
26
27 from IPython.config.application import Application
28 from IPython.core.application import (
29 BaseIPythonApplication, base_flags
30 )
31 from IPython.core.profiledir import ProfileDir
32 from IPython.utils.importstring import import_item
33 from IPython.utils.path import get_ipython_dir, get_ipython_package_dir
34 from IPython.utils import py3compat
35 from IPython.utils.traitlets import Unicode, Bool, Dict
36
37 #-----------------------------------------------------------------------------
38 # Constants
39 #-----------------------------------------------------------------------------
40
41 create_help = """Create an IPython profile by name
42
43 Create an ipython profile directory by its name or
44 profile directory path. Profile directories contain
45 configuration, log and security related files and are named
46 using the convention 'profile_<name>'. By default they are
47 located in your ipython directory. Once created, you will
48 can edit the configuration files in the profile
49 directory to configure IPython. Most users will create a
50 profile directory by name,
51 `ipython profile create myprofile`, which will put the directory
52 in `<ipython_dir>/profile_myprofile`.
53 """
54 list_help = """List available IPython profiles
55
56 List all available profiles, by profile location, that can
57 be found in the current working directly or in the ipython
58 directory. Profile directories are named using the convention
59 'profile_<profile>'.
60 """
61 profile_help = """Manage IPython profiles
62
63 Profile directories contain
64 configuration, log and security related files and are named
65 using the convention 'profile_<name>'. By default they are
66 located in your ipython directory. You can create profiles
67 with `ipython profile create <name>`, or see the profiles you
68 already have with `ipython profile list`
69
70 To get started configuring IPython, simply do:
71
72 $> ipython profile create
73
74 and IPython will create the default profile in <ipython_dir>/profile_default,
75 where you can edit ipython_config.py to start configuring IPython.
76
77 """
78
79 _list_examples = "ipython profile list # list all profiles"
80
81 _create_examples = """
82 ipython profile create foo # create profile foo w/ default config files
83 ipython profile create foo --reset # restage default config files over current
84 ipython profile create foo --parallel # also stage parallel config files
85 """
86
87 _main_examples = """
88 ipython profile create -h # show the help string for the create subcommand
89 ipython profile list -h # show the help string for the list subcommand
90
91 ipython locate profile foo # print the path to the directory for profile 'foo'
92 """
93
94 #-----------------------------------------------------------------------------
95 # Profile Application Class (for `ipython profile` subcommand)
96 #-----------------------------------------------------------------------------
97
98
99 def list_profiles_in(path):
100 """list profiles in a given root directory"""
101 files = os.listdir(path)
102 profiles = []
103 for f in files:
104 try:
105 full_path = os.path.join(path, f)
106 except UnicodeError:
107 continue
108 if os.path.isdir(full_path) and f.startswith('profile_'):
109 profiles.append(f.split('_',1)[-1])
110 return profiles
111
112
113 def list_bundled_profiles():
114 """list profiles that are bundled with IPython."""
115 path = os.path.join(get_ipython_package_dir(), u'config', u'profile')
116 files = os.listdir(path)
117 profiles = []
118 for profile in files:
119 full_path = os.path.join(path, profile)
120 if os.path.isdir(full_path) and profile != "__pycache__":
121 profiles.append(profile)
122 return profiles
123
124
125 class ProfileLocate(BaseIPythonApplication):
126 description = """print the path to an IPython profile dir"""
127
128 def parse_command_line(self, argv=None):
129 super(ProfileLocate, self).parse_command_line(argv)
130 if self.extra_args:
131 self.profile = self.extra_args[0]
132
133 def start(self):
134 print(self.profile_dir.location)
135
136
137 class ProfileList(Application):
138 name = u'ipython-profile'
139 description = list_help
140 examples = _list_examples
141
142 aliases = Dict({
143 'ipython-dir' : 'ProfileList.ipython_dir',
144 'log-level' : 'Application.log_level',
145 })
146 flags = Dict(dict(
147 debug = ({'Application' : {'log_level' : 0}},
148 "Set Application.log_level to 0, maximizing log output."
149 )
150 ))
151
152 ipython_dir = Unicode(get_ipython_dir(), config=True,
153 help="""
154 The name of the IPython directory. This directory is used for logging
155 configuration (through profiles), history storage, etc. The default
156 is usually $HOME/.ipython. This options can also be specified through
157 the environment variable IPYTHONDIR.
158 """
159 )
160
161
162 def _print_profiles(self, profiles):
163 """print list of profiles, indented."""
164 for profile in profiles:
165 print(' %s' % profile)
166
167 def list_profile_dirs(self):
168 profiles = list_bundled_profiles()
169 if profiles:
170 print()
171 print("Available profiles in IPython:")
172 self._print_profiles(profiles)
173 print()
174 print(" The first request for a bundled profile will copy it")
175 print(" into your IPython directory (%s)," % self.ipython_dir)
176 print(" where you can customize it.")
177
178 profiles = list_profiles_in(self.ipython_dir)
179 if profiles:
180 print()
181 print("Available profiles in %s:" % self.ipython_dir)
182 self._print_profiles(profiles)
183
184 profiles = list_profiles_in(py3compat.getcwd())
185 if profiles:
186 print()
187 print("Available profiles in current directory (%s):" % py3compat.getcwd())
188 self._print_profiles(profiles)
189
190 print()
191 print("To use any of the above profiles, start IPython with:")
192 print(" ipython --profile=<name>")
193 print()
194
195 def start(self):
196 self.list_profile_dirs()
197
198
199 create_flags = {}
200 create_flags.update(base_flags)
201 # don't include '--init' flag, which implies running profile create in other apps
202 create_flags.pop('init')
203 create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
204 "reset config files in this profile to the defaults.")
205 create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
206 "Include the config files for parallel "
207 "computing apps (ipengine, ipcontroller, etc.)")
208
209
210 class ProfileCreate(BaseIPythonApplication):
211 name = u'ipython-profile'
212 description = create_help
213 examples = _create_examples
214 auto_create = Bool(True, config=False)
215 def _log_format_default(self):
216 return "[%(name)s] %(message)s"
217
218 def _copy_config_files_default(self):
219 return True
220
221 parallel = Bool(False, config=True,
222 help="whether to include parallel computing config files")
223 def _parallel_changed(self, name, old, new):
224 parallel_files = [ 'ipcontroller_config.py',
225 'ipengine_config.py',
226 'ipcluster_config.py'
227 ]
228 if new:
229 for cf in parallel_files:
230 self.config_files.append(cf)
231 else:
232 for cf in parallel_files:
233 if cf in self.config_files:
234 self.config_files.remove(cf)
235
236 def parse_command_line(self, argv):
237 super(ProfileCreate, self).parse_command_line(argv)
238 # accept positional arg as profile name
239 if self.extra_args:
240 self.profile = self.extra_args[0]
241
242 flags = Dict(create_flags)
243
244 classes = [ProfileDir]
245
246 def _import_app(self, app_path):
247 """import an app class"""
248 app = None
249 name = app_path.rsplit('.', 1)[-1]
250 try:
251 app = import_item(app_path)
252 except ImportError:
253 self.log.info("Couldn't import %s, config file will be excluded", name)
254 except Exception:
255 self.log.warn('Unexpected error importing %s', name, exc_info=True)
256 return app
257
258 def init_config_files(self):
259 super(ProfileCreate, self).init_config_files()
260 # use local imports, since these classes may import from here
261 from IPython.terminal.ipapp import TerminalIPythonApp
262 apps = [TerminalIPythonApp]
263 for app_path in (
264 'IPython.kernel.zmq.kernelapp.IPKernelApp',
265 'IPython.qt.console.qtconsoleapp.IPythonQtConsoleApp',
266 'IPython.html.notebookapp.NotebookApp',
267 'IPython.nbconvert.nbconvertapp.NbConvertApp',
268 ):
269 app = self._import_app(app_path)
270 if app is not None:
271 apps.append(app)
272 if self.parallel:
273 from IPython.parallel.apps.ipcontrollerapp import IPControllerApp
274 from IPython.parallel.apps.ipengineapp import IPEngineApp
275 from IPython.parallel.apps.ipclusterapp import IPClusterStart
276 from IPython.parallel.apps.iploggerapp import IPLoggerApp
277 apps.extend([
278 IPControllerApp,
279 IPEngineApp,
280 IPClusterStart,
281 IPLoggerApp,
282 ])
283 for App in apps:
284 app = App()
285 app.config.update(self.config)
286 app.log = self.log
287 app.overwrite = self.overwrite
288 app.copy_config_files=True
289 app.ipython_dir=self.ipython_dir
290 app.profile_dir=self.profile_dir
291 app.init_config_files()
292
293 def stage_default_config_file(self):
294 pass
295
296
297 class ProfileApp(Application):
298 name = u'ipython profile'
299 description = profile_help
300 examples = _main_examples
301
302 subcommands = Dict(dict(
303 create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
304 list = (ProfileList, ProfileList.description.splitlines()[0]),
305 locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
306 ))
307
308 def start(self):
309 if self.subapp is None:
310 print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
311 print()
312 self.print_description()
313 self.print_subcommands()
314 self.exit(1)
315 else:
316 return self.subapp.start()
317
[end of IPython/core/profileapp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/IPython/core/profileapp.py b/IPython/core/profileapp.py
--- a/IPython/core/profileapp.py
+++ b/IPython/core/profileapp.py
@@ -262,6 +262,7 @@
apps = [TerminalIPythonApp]
for app_path in (
'IPython.kernel.zmq.kernelapp.IPKernelApp',
+ 'IPython.terminal.console.app.ZMQTerminalIPythonApp',
'IPython.qt.console.qtconsoleapp.IPythonQtConsoleApp',
'IPython.html.notebookapp.NotebookApp',
'IPython.nbconvert.nbconvertapp.NbConvertApp',
| {"golden_diff": "diff --git a/IPython/core/profileapp.py b/IPython/core/profileapp.py\n--- a/IPython/core/profileapp.py\n+++ b/IPython/core/profileapp.py\n@@ -262,6 +262,7 @@\n apps = [TerminalIPythonApp]\n for app_path in (\n 'IPython.kernel.zmq.kernelapp.IPKernelApp',\n+ 'IPython.terminal.console.app.ZMQTerminalIPythonApp',\n 'IPython.qt.console.qtconsoleapp.IPythonQtConsoleApp',\n 'IPython.html.notebookapp.NotebookApp',\n 'IPython.nbconvert.nbconvertapp.NbConvertApp',\n", "issue": "console config not written\nassigning to @minrk who said: \"Oh jeez, I don't wanna fix that right now\". Marking it two-point-oh.\n\n", "before_files": [{"content": "# encoding: utf-8\n\"\"\"\nAn application for managing IPython profiles.\n\nTo be invoked as the `ipython profile` subcommand.\n\nAuthors:\n\n* Min RK\n\n\"\"\"\nfrom __future__ import print_function\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nimport os\n\nfrom IPython.config.application import Application\nfrom IPython.core.application import (\n BaseIPythonApplication, base_flags\n)\nfrom IPython.core.profiledir import ProfileDir\nfrom IPython.utils.importstring import import_item\nfrom IPython.utils.path import get_ipython_dir, get_ipython_package_dir\nfrom IPython.utils import py3compat\nfrom IPython.utils.traitlets import Unicode, Bool, Dict\n\n#-----------------------------------------------------------------------------\n# Constants\n#-----------------------------------------------------------------------------\n\ncreate_help = \"\"\"Create an IPython profile by name\n\nCreate an ipython profile directory by its name or\nprofile directory path. Profile directories contain\nconfiguration, log and security related files and are named\nusing the convention 'profile_<name>'. By default they are\nlocated in your ipython directory. Once created, you will\ncan edit the configuration files in the profile\ndirectory to configure IPython. Most users will create a\nprofile directory by name,\n`ipython profile create myprofile`, which will put the directory\nin `<ipython_dir>/profile_myprofile`.\n\"\"\"\nlist_help = \"\"\"List available IPython profiles\n\nList all available profiles, by profile location, that can\nbe found in the current working directly or in the ipython\ndirectory. Profile directories are named using the convention\n'profile_<profile>'.\n\"\"\"\nprofile_help = \"\"\"Manage IPython profiles\n\nProfile directories contain\nconfiguration, log and security related files and are named\nusing the convention 'profile_<name>'. By default they are\nlocated in your ipython directory. You can create profiles\nwith `ipython profile create <name>`, or see the profiles you\nalready have with `ipython profile list`\n\nTo get started configuring IPython, simply do:\n\n$> ipython profile create\n\nand IPython will create the default profile in <ipython_dir>/profile_default,\nwhere you can edit ipython_config.py to start configuring IPython.\n\n\"\"\"\n\n_list_examples = \"ipython profile list # list all profiles\"\n\n_create_examples = \"\"\"\nipython profile create foo # create profile foo w/ default config files\nipython profile create foo --reset # restage default config files over current\nipython profile create foo --parallel # also stage parallel config files\n\"\"\"\n\n_main_examples = \"\"\"\nipython profile create -h # show the help string for the create subcommand\nipython profile list -h # show the help string for the list subcommand\n\nipython locate profile foo # print the path to the directory for profile 'foo'\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Profile Application Class (for `ipython profile` subcommand)\n#-----------------------------------------------------------------------------\n\n\ndef list_profiles_in(path):\n \"\"\"list profiles in a given root directory\"\"\"\n files = os.listdir(path)\n profiles = []\n for f in files:\n try:\n full_path = os.path.join(path, f)\n except UnicodeError:\n continue\n if os.path.isdir(full_path) and f.startswith('profile_'):\n profiles.append(f.split('_',1)[-1])\n return profiles\n\n\ndef list_bundled_profiles():\n \"\"\"list profiles that are bundled with IPython.\"\"\"\n path = os.path.join(get_ipython_package_dir(), u'config', u'profile')\n files = os.listdir(path)\n profiles = []\n for profile in files:\n full_path = os.path.join(path, profile)\n if os.path.isdir(full_path) and profile != \"__pycache__\":\n profiles.append(profile)\n return profiles\n\n\nclass ProfileLocate(BaseIPythonApplication):\n description = \"\"\"print the path to an IPython profile dir\"\"\"\n \n def parse_command_line(self, argv=None):\n super(ProfileLocate, self).parse_command_line(argv)\n if self.extra_args:\n self.profile = self.extra_args[0]\n \n def start(self):\n print(self.profile_dir.location)\n\n\nclass ProfileList(Application):\n name = u'ipython-profile'\n description = list_help\n examples = _list_examples\n\n aliases = Dict({\n 'ipython-dir' : 'ProfileList.ipython_dir',\n 'log-level' : 'Application.log_level',\n })\n flags = Dict(dict(\n debug = ({'Application' : {'log_level' : 0}},\n \"Set Application.log_level to 0, maximizing log output.\"\n )\n ))\n\n ipython_dir = Unicode(get_ipython_dir(), config=True,\n help=\"\"\"\n The name of the IPython directory. This directory is used for logging\n configuration (through profiles), history storage, etc. The default\n is usually $HOME/.ipython. This options can also be specified through\n the environment variable IPYTHONDIR.\n \"\"\"\n )\n\n\n def _print_profiles(self, profiles):\n \"\"\"print list of profiles, indented.\"\"\"\n for profile in profiles:\n print(' %s' % profile)\n\n def list_profile_dirs(self):\n profiles = list_bundled_profiles()\n if profiles:\n print()\n print(\"Available profiles in IPython:\")\n self._print_profiles(profiles)\n print()\n print(\" The first request for a bundled profile will copy it\")\n print(\" into your IPython directory (%s),\" % self.ipython_dir)\n print(\" where you can customize it.\")\n \n profiles = list_profiles_in(self.ipython_dir)\n if profiles:\n print()\n print(\"Available profiles in %s:\" % self.ipython_dir)\n self._print_profiles(profiles)\n \n profiles = list_profiles_in(py3compat.getcwd())\n if profiles:\n print()\n print(\"Available profiles in current directory (%s):\" % py3compat.getcwd())\n self._print_profiles(profiles)\n \n print()\n print(\"To use any of the above profiles, start IPython with:\")\n print(\" ipython --profile=<name>\")\n print()\n\n def start(self):\n self.list_profile_dirs()\n\n\ncreate_flags = {}\ncreate_flags.update(base_flags)\n# don't include '--init' flag, which implies running profile create in other apps\ncreate_flags.pop('init')\ncreate_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},\n \"reset config files in this profile to the defaults.\")\ncreate_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},\n \"Include the config files for parallel \"\n \"computing apps (ipengine, ipcontroller, etc.)\")\n\n\nclass ProfileCreate(BaseIPythonApplication):\n name = u'ipython-profile'\n description = create_help\n examples = _create_examples\n auto_create = Bool(True, config=False)\n def _log_format_default(self):\n return \"[%(name)s] %(message)s\"\n\n def _copy_config_files_default(self):\n return True\n\n parallel = Bool(False, config=True,\n help=\"whether to include parallel computing config files\")\n def _parallel_changed(self, name, old, new):\n parallel_files = [ 'ipcontroller_config.py',\n 'ipengine_config.py',\n 'ipcluster_config.py'\n ]\n if new:\n for cf in parallel_files:\n self.config_files.append(cf)\n else:\n for cf in parallel_files:\n if cf in self.config_files:\n self.config_files.remove(cf)\n\n def parse_command_line(self, argv):\n super(ProfileCreate, self).parse_command_line(argv)\n # accept positional arg as profile name\n if self.extra_args:\n self.profile = self.extra_args[0]\n\n flags = Dict(create_flags)\n\n classes = [ProfileDir]\n \n def _import_app(self, app_path):\n \"\"\"import an app class\"\"\"\n app = None\n name = app_path.rsplit('.', 1)[-1]\n try:\n app = import_item(app_path)\n except ImportError:\n self.log.info(\"Couldn't import %s, config file will be excluded\", name)\n except Exception:\n self.log.warn('Unexpected error importing %s', name, exc_info=True)\n return app\n\n def init_config_files(self):\n super(ProfileCreate, self).init_config_files()\n # use local imports, since these classes may import from here\n from IPython.terminal.ipapp import TerminalIPythonApp\n apps = [TerminalIPythonApp]\n for app_path in (\n 'IPython.kernel.zmq.kernelapp.IPKernelApp',\n 'IPython.qt.console.qtconsoleapp.IPythonQtConsoleApp',\n 'IPython.html.notebookapp.NotebookApp',\n 'IPython.nbconvert.nbconvertapp.NbConvertApp',\n ):\n app = self._import_app(app_path)\n if app is not None:\n apps.append(app)\n if self.parallel:\n from IPython.parallel.apps.ipcontrollerapp import IPControllerApp\n from IPython.parallel.apps.ipengineapp import IPEngineApp\n from IPython.parallel.apps.ipclusterapp import IPClusterStart\n from IPython.parallel.apps.iploggerapp import IPLoggerApp\n apps.extend([\n IPControllerApp,\n IPEngineApp,\n IPClusterStart,\n IPLoggerApp,\n ])\n for App in apps:\n app = App()\n app.config.update(self.config)\n app.log = self.log\n app.overwrite = self.overwrite\n app.copy_config_files=True\n app.ipython_dir=self.ipython_dir\n app.profile_dir=self.profile_dir\n app.init_config_files()\n\n def stage_default_config_file(self):\n pass\n\n\nclass ProfileApp(Application):\n name = u'ipython profile'\n description = profile_help\n examples = _main_examples\n\n subcommands = Dict(dict(\n create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),\n list = (ProfileList, ProfileList.description.splitlines()[0]),\n locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),\n ))\n\n def start(self):\n if self.subapp is None:\n print(\"No subcommand specified. Must specify one of: %s\"%(self.subcommands.keys()))\n print()\n self.print_description()\n self.print_subcommands()\n self.exit(1)\n else:\n return self.subapp.start()\n", "path": "IPython/core/profileapp.py"}]} | 3,690 | 136 |
gh_patches_debug_6762 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-4770 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
nv-sd CI test failure
The Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7073374056 failed.
</issue>
<code>
[start of deepspeed/model_implementations/diffusers/unet.py]
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5
6 import torch
7 from ..features.cuda_graph import CUDAGraph
8
9
10 class DSUNet(CUDAGraph, torch.nn.Module):
11
12 def __init__(self, unet, enable_cuda_graph=True):
13 super().__init__(enable_cuda_graph=enable_cuda_graph)
14 self.unet = unet
15 # SD pipeline accesses this attribute
16 self.in_channels = unet.in_channels
17 self.device = self.unet.device
18 self.dtype = self.unet.dtype
19 self.config = self.unet.config
20 self.fwd_count = 0
21 self.unet.requires_grad_(requires_grad=False)
22 self.unet.to(memory_format=torch.channels_last)
23 self.cuda_graph_created = False
24
25 def _graph_replay(self, *inputs, **kwargs):
26 for i in range(len(inputs)):
27 if torch.is_tensor(inputs[i]):
28 self.static_inputs[i].copy_(inputs[i])
29 for k in kwargs:
30 if torch.is_tensor(kwargs[k]):
31 self.static_kwargs[k].copy_(kwargs[k])
32 self._cuda_graphs.replay()
33 return self.static_output
34
35 def forward(self, *inputs, **kwargs):
36 if self.enable_cuda_graph:
37 if self.cuda_graph_created:
38 outputs = self._graph_replay(*inputs, **kwargs)
39 else:
40 self._create_cuda_graph(*inputs, **kwargs)
41 outputs = self._graph_replay(*inputs, **kwargs)
42 return outputs
43 else:
44 return self._forward(*inputs, **kwargs)
45
46 def _create_cuda_graph(self, *inputs, **kwargs):
47 # warmup to create the workspace and cublas handle
48 cuda_stream = torch.cuda.Stream()
49 cuda_stream.wait_stream(torch.cuda.current_stream())
50 with torch.cuda.stream(cuda_stream):
51 for i in range(3):
52 ret = self._forward(*inputs, **kwargs)
53 torch.cuda.current_stream().wait_stream(cuda_stream)
54
55 # create cuda_graph and assign static_inputs and static_outputs
56 self._cuda_graphs = torch.cuda.CUDAGraph()
57 self.static_inputs = inputs
58 self.static_kwargs = kwargs
59
60 with torch.cuda.graph(self._cuda_graphs):
61 self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
62
63 self.cuda_graph_created = True
64
65 def _forward(self,
66 sample,
67 timestamp,
68 encoder_hidden_states,
69 return_dict=True,
70 cross_attention_kwargs=None,
71 timestep_cond=None):
72 if cross_attention_kwargs:
73 return self.unet(sample,
74 timestamp,
75 encoder_hidden_states,
76 return_dict,
77 cross_attention_kwargs=cross_attention_kwargs)
78 else:
79 return self.unet(sample, timestamp, encoder_hidden_states, return_dict)
80
[end of deepspeed/model_implementations/diffusers/unet.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/deepspeed/model_implementations/diffusers/unet.py b/deepspeed/model_implementations/diffusers/unet.py
--- a/deepspeed/model_implementations/diffusers/unet.py
+++ b/deepspeed/model_implementations/diffusers/unet.py
@@ -68,7 +68,8 @@
encoder_hidden_states,
return_dict=True,
cross_attention_kwargs=None,
- timestep_cond=None):
+ timestep_cond=None,
+ added_cond_kwargs=None):
if cross_attention_kwargs:
return self.unet(sample,
timestamp,
| {"golden_diff": "diff --git a/deepspeed/model_implementations/diffusers/unet.py b/deepspeed/model_implementations/diffusers/unet.py\n--- a/deepspeed/model_implementations/diffusers/unet.py\n+++ b/deepspeed/model_implementations/diffusers/unet.py\n@@ -68,7 +68,8 @@\n encoder_hidden_states,\n return_dict=True,\n cross_attention_kwargs=None,\n- timestep_cond=None):\n+ timestep_cond=None,\n+ added_cond_kwargs=None):\n if cross_attention_kwargs:\n return self.unet(sample,\n timestamp,\n", "issue": "nv-sd CI test failure\nThe Nightly CI for https://github.com/microsoft/DeepSpeed/actions/runs/7073374056 failed.\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\n\nimport torch\nfrom ..features.cuda_graph import CUDAGraph\n\n\nclass DSUNet(CUDAGraph, torch.nn.Module):\n\n def __init__(self, unet, enable_cuda_graph=True):\n super().__init__(enable_cuda_graph=enable_cuda_graph)\n self.unet = unet\n # SD pipeline accesses this attribute\n self.in_channels = unet.in_channels\n self.device = self.unet.device\n self.dtype = self.unet.dtype\n self.config = self.unet.config\n self.fwd_count = 0\n self.unet.requires_grad_(requires_grad=False)\n self.unet.to(memory_format=torch.channels_last)\n self.cuda_graph_created = False\n\n def _graph_replay(self, *inputs, **kwargs):\n for i in range(len(inputs)):\n if torch.is_tensor(inputs[i]):\n self.static_inputs[i].copy_(inputs[i])\n for k in kwargs:\n if torch.is_tensor(kwargs[k]):\n self.static_kwargs[k].copy_(kwargs[k])\n self._cuda_graphs.replay()\n return self.static_output\n\n def forward(self, *inputs, **kwargs):\n if self.enable_cuda_graph:\n if self.cuda_graph_created:\n outputs = self._graph_replay(*inputs, **kwargs)\n else:\n self._create_cuda_graph(*inputs, **kwargs)\n outputs = self._graph_replay(*inputs, **kwargs)\n return outputs\n else:\n return self._forward(*inputs, **kwargs)\n\n def _create_cuda_graph(self, *inputs, **kwargs):\n # warmup to create the workspace and cublas handle\n cuda_stream = torch.cuda.Stream()\n cuda_stream.wait_stream(torch.cuda.current_stream())\n with torch.cuda.stream(cuda_stream):\n for i in range(3):\n ret = self._forward(*inputs, **kwargs)\n torch.cuda.current_stream().wait_stream(cuda_stream)\n\n # create cuda_graph and assign static_inputs and static_outputs\n self._cuda_graphs = torch.cuda.CUDAGraph()\n self.static_inputs = inputs\n self.static_kwargs = kwargs\n\n with torch.cuda.graph(self._cuda_graphs):\n self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)\n\n self.cuda_graph_created = True\n\n def _forward(self,\n sample,\n timestamp,\n encoder_hidden_states,\n return_dict=True,\n cross_attention_kwargs=None,\n timestep_cond=None):\n if cross_attention_kwargs:\n return self.unet(sample,\n timestamp,\n encoder_hidden_states,\n return_dict,\n cross_attention_kwargs=cross_attention_kwargs)\n else:\n return self.unet(sample, timestamp, encoder_hidden_states, return_dict)\n", "path": "deepspeed/model_implementations/diffusers/unet.py"}]} | 1,335 | 127 |
gh_patches_debug_460 | rasdani/github-patches | git_diff | gratipay__gratipay.com-3013 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Twitter asks for authorization even though I've already authorized Gittip
As of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.

<bountysource-plugin>
---
Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).
</bountysource-plugin>
Twitter asks for authorization even though I've already authorized Gittip
As of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.

<bountysource-plugin>
---
Want to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).
</bountysource-plugin>
</issue>
<code>
[start of gratipay/elsewhere/twitter.py]
1 from __future__ import absolute_import, division, print_function, unicode_literals
2
3 from gratipay.elsewhere import PlatformOAuth1
4 from gratipay.elsewhere._extractors import key, not_available
5
6
7 class Twitter(PlatformOAuth1):
8
9 # Platform attributes
10 name = 'twitter'
11 display_name = 'Twitter'
12 account_url = 'https://twitter.com/{user_name}'
13
14 # Auth attributes
15 auth_url = 'https://api.twitter.com'
16
17 # API attributes
18 api_format = 'json'
19 api_url = 'https://api.twitter.com/1.1'
20 api_user_info_path = '/users/show.json?screen_name={user_name}'
21 api_user_self_info_path = '/account/verify_credentials.json'
22 ratelimit_headers_prefix = 'x-rate-limit-'
23
24 # User info extractors
25 x_user_id = key('id')
26 x_user_name = key('screen_name')
27 x_display_name = key('name')
28 x_email = not_available
29 x_avatar_url = key('profile_image_url_https',
30 clean=lambda v: v.replace('_normal.', '.'))
31
[end of gratipay/elsewhere/twitter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gratipay/elsewhere/twitter.py b/gratipay/elsewhere/twitter.py
--- a/gratipay/elsewhere/twitter.py
+++ b/gratipay/elsewhere/twitter.py
@@ -13,6 +13,7 @@
# Auth attributes
auth_url = 'https://api.twitter.com'
+ authorize_path = '/oauth/authenticate'
# API attributes
api_format = 'json'
| {"golden_diff": "diff --git a/gratipay/elsewhere/twitter.py b/gratipay/elsewhere/twitter.py\n--- a/gratipay/elsewhere/twitter.py\n+++ b/gratipay/elsewhere/twitter.py\n@@ -13,6 +13,7 @@\n \n # Auth attributes\n auth_url = 'https://api.twitter.com'\n+ authorize_path = '/oauth/authenticate'\n \n # API attributes\n api_format = 'json'\n", "issue": "Twitter asks for authorization even though I've already authorized Gittip\nAs of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.\n\n\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\nTwitter asks for authorization even though I've already authorized Gittip\nAs of #1369 Twitter is now asking me to authorize Giitip even though I've already done so.\n\n\n\n<bountysource-plugin>\n\n---\n\nWant to back this issue? **[Place a bounty on it!](https://www.bountysource.com/issues/1428788-twitter-asks-for-authorization-even-though-i-ve-already-authorized-gittip?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F85909&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom gratipay.elsewhere import PlatformOAuth1\nfrom gratipay.elsewhere._extractors import key, not_available\n\n\nclass Twitter(PlatformOAuth1):\n\n # Platform attributes\n name = 'twitter'\n display_name = 'Twitter'\n account_url = 'https://twitter.com/{user_name}'\n\n # Auth attributes\n auth_url = 'https://api.twitter.com'\n\n # API attributes\n api_format = 'json'\n api_url = 'https://api.twitter.com/1.1'\n api_user_info_path = '/users/show.json?screen_name={user_name}'\n api_user_self_info_path = '/account/verify_credentials.json'\n ratelimit_headers_prefix = 'x-rate-limit-'\n\n # User info extractors\n x_user_id = key('id')\n x_user_name = key('screen_name')\n x_display_name = key('name')\n x_email = not_available\n x_avatar_url = key('profile_image_url_https',\n clean=lambda v: v.replace('_normal.', '.'))\n", "path": "gratipay/elsewhere/twitter.py"}]} | 1,346 | 96 |
gh_patches_debug_12961 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-985 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Calibration error does not support negative logits
Hi there,
It appears that `CalibrationError` currently does not support some of the input data types mentioned in [this table](https://torchmetrics.readthedocs.io/en/stable/pages/classification.html#input-types) from the docs. In particular, it seems to break when fed with
- `preds=logits` where `logits` is a `(N, C)` float32 tensor with potentially negative values.
- `preds=predictions` where `predictions` is a `(N,)` int tensor with the predicted labels.
It still works with softmax-ed logits (`preds=logits.softmax(-1)`) or, generally, with and `(N,)`-dimensional float32 tensors (i.e. the binary input in the input data table mentioned above).
To reproduce:
```python
N, C = 10, 3
targets = torch.randint(C, (N,))
# (N, C) non-negative: works
preds = torch.rand((N, C)) # non-negative
CalibrationError()(preds=preds, target=targets)
# (N, C) potentially negative: fails
preds = torch.randn((N, C)) # potetially negative
CalibrationError()(preds=preds, target=targets)
# (N,) int type: fails
CalibrationError()(preds=targets, target=targets)
# (N,) float type non-negative: works
preds = torch.rand((N,)) # binary non-negative
CalibrationError()(preds=preds, target=targets)
# (N,) float type potentially negative: fails
preds = torch.randn((N,)) # binary potentially negative
CalibrationError()(preds=preds, target=targets)
```
Torchmetrics: v0.8
Pytorch: v1.11
</issue>
<code>
[start of torchmetrics/functional/classification/calibration_error.py]
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Tuple
15
16 import torch
17 from torch import Tensor
18
19 from torchmetrics.utilities.checks import _input_format_classification
20 from torchmetrics.utilities.enums import DataType
21 from torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_8
22
23
24 def _binning_with_loop(
25 confidences: Tensor, accuracies: Tensor, bin_boundaries: Tensor
26 ) -> Tuple[Tensor, Tensor, Tensor]:
27 """Compute calibration bins using for loops. Use for pytorch < 1.6.
28
29 Args:
30 confidences: The confidence (i.e. predicted prob) of the top1 prediction.
31 accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.
32 bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.
33
34 Returns:
35 tuple with binned accuracy, binned confidence and binned probabilities
36 """
37 conf_bin = torch.zeros_like(bin_boundaries)
38 acc_bin = torch.zeros_like(bin_boundaries)
39 prop_bin = torch.zeros_like(bin_boundaries)
40 for i, (bin_lower, bin_upper) in enumerate(zip(bin_boundaries[:-1], bin_boundaries[1:])):
41 # Calculated confidence and accuracy in each bin
42 in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
43 prop_in_bin = in_bin.float().mean()
44 if prop_in_bin.item() > 0:
45 acc_bin[i] = accuracies[in_bin].float().mean()
46 conf_bin[i] = confidences[in_bin].mean()
47 prop_bin[i] = prop_in_bin
48 return acc_bin, conf_bin, prop_bin
49
50
51 def _binning_bucketize(
52 confidences: Tensor, accuracies: Tensor, bin_boundaries: Tensor
53 ) -> Tuple[Tensor, Tensor, Tensor]:
54 """Compute calibration bins using ``torch.bucketize``. Use for pytorch >= 1.6.
55
56 Args:
57 confidences: The confidence (i.e. predicted prob) of the top1 prediction.
58 accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.
59 bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.
60
61 Returns:
62 tuple with binned accuracy, binned confidence and binned probabilities
63 """
64 acc_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype)
65 conf_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype)
66 count_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype)
67
68 indices = torch.bucketize(confidences, bin_boundaries) - 1
69
70 count_bin.scatter_add_(dim=0, index=indices, src=torch.ones_like(confidences))
71
72 conf_bin.scatter_add_(dim=0, index=indices, src=confidences)
73 conf_bin = torch.nan_to_num(conf_bin / count_bin)
74
75 acc_bin.scatter_add_(dim=0, index=indices, src=accuracies)
76 acc_bin = torch.nan_to_num(acc_bin / count_bin)
77
78 prop_bin = count_bin / count_bin.sum()
79 return acc_bin, conf_bin, prop_bin
80
81
82 def _ce_compute(
83 confidences: Tensor,
84 accuracies: Tensor,
85 bin_boundaries: Tensor,
86 norm: str = "l1",
87 debias: bool = False,
88 ) -> Tensor:
89 """Computes the calibration error given the provided bin boundaries and norm.
90
91 Args:
92 confidences: The confidence (i.e. predicted prob) of the top1 prediction.
93 accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.
94 bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.
95 norm: Norm function to use when computing calibration error. Defaults to "l1".
96 debias: Apply debiasing to L2 norm computation as in
97 `Verified Uncertainty Calibration`_. Defaults to False.
98
99 Raises:
100 ValueError: If an unsupported norm function is provided.
101
102 Returns:
103 Tensor: Calibration error scalar.
104 """
105 if norm not in {"l1", "l2", "max"}:
106 raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ")
107
108 if _TORCH_GREATER_EQUAL_1_8:
109 acc_bin, conf_bin, prop_bin = _binning_bucketize(confidences, accuracies, bin_boundaries)
110 else:
111 acc_bin, conf_bin, prop_bin = _binning_with_loop(confidences, accuracies, bin_boundaries)
112
113 if norm == "l1":
114 ce = torch.sum(torch.abs(acc_bin - conf_bin) * prop_bin)
115 elif norm == "max":
116 ce = torch.max(torch.abs(acc_bin - conf_bin))
117 elif norm == "l2":
118 ce = torch.sum(torch.pow(acc_bin - conf_bin, 2) * prop_bin)
119 # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn.
120 if debias:
121 # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from
122 # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/
123 debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1)
124 ce += torch.sum(torch.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin
125 ce = torch.sqrt(ce) if ce > 0 else torch.tensor(0)
126 return ce
127
128
129 def _ce_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:
130 """Given a predictions and targets tensor, computes the confidences of the top-1 prediction and records their
131 correctness.
132
133 Args:
134 preds: Input ``softmaxed`` predictions.
135 target: Labels.
136
137 Raises:
138 ValueError: If the dataset shape is not binary, multiclass, or multidimensional-multiclass.
139
140 Returns:
141 tuple with confidences and accuracies
142 """
143 _, _, mode = _input_format_classification(preds, target)
144
145 if mode == DataType.BINARY:
146 confidences, accuracies = preds, target
147 elif mode == DataType.MULTICLASS:
148 confidences, predictions = preds.max(dim=1)
149 accuracies = predictions.eq(target)
150 elif mode == DataType.MULTIDIM_MULTICLASS:
151 # reshape tensors
152 # for preds, move the class dimension to the final axis and flatten the rest
153 confidences, predictions = torch.transpose(preds, 1, -1).flatten(0, -2).max(dim=1)
154 # for targets, just flatten the target
155 accuracies = predictions.eq(target.flatten())
156 else:
157 raise ValueError(
158 f"Calibration error is not well-defined for data with size {preds.size()} and targets {target.size()}."
159 )
160 # must be cast to float for ddp allgather to work
161 return confidences.float(), accuracies.float()
162
163
164 def calibration_error(preds: Tensor, target: Tensor, n_bins: int = 15, norm: str = "l1") -> Tensor:
165 r"""`Computes the Top-label Calibration Error`_
166
167 Three different norms are implemented, each corresponding to variations on the calibration error metric.
168
169 L1 norm (Expected Calibration Error)
170
171 .. math::
172 \text{ECE} = \sum_i^N b_i \|(p_i - c_i)\|
173
174 Infinity norm (Maximum Calibration Error)
175
176 .. math::
177 \text{MCE} = \max_{i} (p_i - c_i)
178
179 L2 norm (Root Mean Square Calibration Error)
180
181 .. math::
182 \text{RMSCE} = \sqrt{\sum_i^N b_i(p_i - c_i)^2}
183
184 Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`,
185 :math:`c_i` is the average confidence of predictions in bin :math:`i`, and
186 :math:`b_i` is the fraction of data points in bin :math:`i`.
187
188 .. note:
189 L2-norm debiasing is not yet supported.
190
191 Args:
192 preds: Model output probabilities.
193 target: Ground-truth target class labels.
194 n_bins: Number of bins to use when computing t.
195 norm: Norm used to compare empirical and expected probability bins.
196 Defaults to "l1", or Expected Calibration Error.
197 """
198 if norm not in ("l1", "l2", "max"):
199 raise ValueError(f"Norm {norm} is not supported. Please select from l1, l2, or max. ")
200
201 if not isinstance(n_bins, int) or n_bins <= 0:
202 raise ValueError(f"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}")
203
204 confidences, accuracies = _ce_update(preds, target)
205
206 bin_boundaries = torch.linspace(0, 1, n_bins + 1, dtype=torch.float, device=preds.device)
207
208 return _ce_compute(confidences, accuracies, bin_boundaries, norm=norm)
209
[end of torchmetrics/functional/classification/calibration_error.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchmetrics/functional/classification/calibration_error.py b/torchmetrics/functional/classification/calibration_error.py
--- a/torchmetrics/functional/classification/calibration_error.py
+++ b/torchmetrics/functional/classification/calibration_error.py
@@ -143,8 +143,12 @@
_, _, mode = _input_format_classification(preds, target)
if mode == DataType.BINARY:
+ if not ((0 <= preds) * (preds <= 1)).all():
+ preds = preds.sigmoid()
confidences, accuracies = preds, target
elif mode == DataType.MULTICLASS:
+ if not ((0 <= preds) * (preds <= 1)).all():
+ preds = preds.softmax(dim=1)
confidences, predictions = preds.max(dim=1)
accuracies = predictions.eq(target)
elif mode == DataType.MULTIDIM_MULTICLASS:
| {"golden_diff": "diff --git a/torchmetrics/functional/classification/calibration_error.py b/torchmetrics/functional/classification/calibration_error.py\n--- a/torchmetrics/functional/classification/calibration_error.py\n+++ b/torchmetrics/functional/classification/calibration_error.py\n@@ -143,8 +143,12 @@\n _, _, mode = _input_format_classification(preds, target)\n \n if mode == DataType.BINARY:\n+ if not ((0 <= preds) * (preds <= 1)).all():\n+ preds = preds.sigmoid()\n confidences, accuracies = preds, target\n elif mode == DataType.MULTICLASS:\n+ if not ((0 <= preds) * (preds <= 1)).all():\n+ preds = preds.softmax(dim=1)\n confidences, predictions = preds.max(dim=1)\n accuracies = predictions.eq(target)\n elif mode == DataType.MULTIDIM_MULTICLASS:\n", "issue": "Calibration error does not support negative logits\nHi there,\r\n\r\nIt appears that `CalibrationError` currently does not support some of the input data types mentioned in [this table](https://torchmetrics.readthedocs.io/en/stable/pages/classification.html#input-types) from the docs. In particular, it seems to break when fed with \r\n\r\n- `preds=logits` where `logits` is a `(N, C)` float32 tensor with potentially negative values.\r\n- `preds=predictions` where `predictions` is a `(N,)` int tensor with the predicted labels.\r\n\r\nIt still works with softmax-ed logits (`preds=logits.softmax(-1)`) or, generally, with and `(N,)`-dimensional float32 tensors (i.e. the binary input in the input data table mentioned above).\r\n\r\nTo reproduce:\r\n\r\n```python\r\nN, C = 10, 3\r\ntargets = torch.randint(C, (N,))\r\n\r\n# (N, C) non-negative: works\r\npreds = torch.rand((N, C)) # non-negative\r\nCalibrationError()(preds=preds, target=targets)\r\n\r\n# (N, C) potentially negative: fails\r\npreds = torch.randn((N, C)) # potetially negative\r\nCalibrationError()(preds=preds, target=targets)\r\n\r\n# (N,) int type: fails\r\nCalibrationError()(preds=targets, target=targets)\r\n\r\n# (N,) float type non-negative: works\r\npreds = torch.rand((N,)) # binary non-negative\r\nCalibrationError()(preds=preds, target=targets)\r\n\r\n# (N,) float type potentially negative: fails\r\npreds = torch.randn((N,)) # binary potentially negative\r\nCalibrationError()(preds=preds, target=targets)\r\n```\r\n\r\nTorchmetrics: v0.8\r\nPytorch: v1.11\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _input_format_classification\nfrom torchmetrics.utilities.enums import DataType\nfrom torchmetrics.utilities.imports import _TORCH_GREATER_EQUAL_1_8\n\n\ndef _binning_with_loop(\n confidences: Tensor, accuracies: Tensor, bin_boundaries: Tensor\n) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Compute calibration bins using for loops. Use for pytorch < 1.6.\n\n Args:\n confidences: The confidence (i.e. predicted prob) of the top1 prediction.\n accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.\n bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.\n\n Returns:\n tuple with binned accuracy, binned confidence and binned probabilities\n \"\"\"\n conf_bin = torch.zeros_like(bin_boundaries)\n acc_bin = torch.zeros_like(bin_boundaries)\n prop_bin = torch.zeros_like(bin_boundaries)\n for i, (bin_lower, bin_upper) in enumerate(zip(bin_boundaries[:-1], bin_boundaries[1:])):\n # Calculated confidence and accuracy in each bin\n in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())\n prop_in_bin = in_bin.float().mean()\n if prop_in_bin.item() > 0:\n acc_bin[i] = accuracies[in_bin].float().mean()\n conf_bin[i] = confidences[in_bin].mean()\n prop_bin[i] = prop_in_bin\n return acc_bin, conf_bin, prop_bin\n\n\ndef _binning_bucketize(\n confidences: Tensor, accuracies: Tensor, bin_boundaries: Tensor\n) -> Tuple[Tensor, Tensor, Tensor]:\n \"\"\"Compute calibration bins using ``torch.bucketize``. Use for pytorch >= 1.6.\n\n Args:\n confidences: The confidence (i.e. predicted prob) of the top1 prediction.\n accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.\n bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.\n\n Returns:\n tuple with binned accuracy, binned confidence and binned probabilities\n \"\"\"\n acc_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype)\n conf_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype)\n count_bin = torch.zeros(len(bin_boundaries) - 1, device=confidences.device, dtype=confidences.dtype)\n\n indices = torch.bucketize(confidences, bin_boundaries) - 1\n\n count_bin.scatter_add_(dim=0, index=indices, src=torch.ones_like(confidences))\n\n conf_bin.scatter_add_(dim=0, index=indices, src=confidences)\n conf_bin = torch.nan_to_num(conf_bin / count_bin)\n\n acc_bin.scatter_add_(dim=0, index=indices, src=accuracies)\n acc_bin = torch.nan_to_num(acc_bin / count_bin)\n\n prop_bin = count_bin / count_bin.sum()\n return acc_bin, conf_bin, prop_bin\n\n\ndef _ce_compute(\n confidences: Tensor,\n accuracies: Tensor,\n bin_boundaries: Tensor,\n norm: str = \"l1\",\n debias: bool = False,\n) -> Tensor:\n \"\"\"Computes the calibration error given the provided bin boundaries and norm.\n\n Args:\n confidences: The confidence (i.e. predicted prob) of the top1 prediction.\n accuracies: 1.0 if the top-1 prediction was correct, 0.0 otherwise.\n bin_boundaries: Bin boundaries separating the ``linspace`` from 0 to 1.\n norm: Norm function to use when computing calibration error. Defaults to \"l1\".\n debias: Apply debiasing to L2 norm computation as in\n `Verified Uncertainty Calibration`_. Defaults to False.\n\n Raises:\n ValueError: If an unsupported norm function is provided.\n\n Returns:\n Tensor: Calibration error scalar.\n \"\"\"\n if norm not in {\"l1\", \"l2\", \"max\"}:\n raise ValueError(f\"Norm {norm} is not supported. Please select from l1, l2, or max. \")\n\n if _TORCH_GREATER_EQUAL_1_8:\n acc_bin, conf_bin, prop_bin = _binning_bucketize(confidences, accuracies, bin_boundaries)\n else:\n acc_bin, conf_bin, prop_bin = _binning_with_loop(confidences, accuracies, bin_boundaries)\n\n if norm == \"l1\":\n ce = torch.sum(torch.abs(acc_bin - conf_bin) * prop_bin)\n elif norm == \"max\":\n ce = torch.max(torch.abs(acc_bin - conf_bin))\n elif norm == \"l2\":\n ce = torch.sum(torch.pow(acc_bin - conf_bin, 2) * prop_bin)\n # NOTE: debiasing is disabled in the wrapper functions. This implementation differs from that in sklearn.\n if debias:\n # the order here (acc_bin - 1 ) vs (1 - acc_bin) is flipped from\n # the equation in Verified Uncertainty Prediction (Kumar et al 2019)/\n debias_bins = (acc_bin * (acc_bin - 1) * prop_bin) / (prop_bin * accuracies.size()[0] - 1)\n ce += torch.sum(torch.nan_to_num(debias_bins)) # replace nans with zeros if nothing appeared in a bin\n ce = torch.sqrt(ce) if ce > 0 else torch.tensor(0)\n return ce\n\n\ndef _ce_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Given a predictions and targets tensor, computes the confidences of the top-1 prediction and records their\n correctness.\n\n Args:\n preds: Input ``softmaxed`` predictions.\n target: Labels.\n\n Raises:\n ValueError: If the dataset shape is not binary, multiclass, or multidimensional-multiclass.\n\n Returns:\n tuple with confidences and accuracies\n \"\"\"\n _, _, mode = _input_format_classification(preds, target)\n\n if mode == DataType.BINARY:\n confidences, accuracies = preds, target\n elif mode == DataType.MULTICLASS:\n confidences, predictions = preds.max(dim=1)\n accuracies = predictions.eq(target)\n elif mode == DataType.MULTIDIM_MULTICLASS:\n # reshape tensors\n # for preds, move the class dimension to the final axis and flatten the rest\n confidences, predictions = torch.transpose(preds, 1, -1).flatten(0, -2).max(dim=1)\n # for targets, just flatten the target\n accuracies = predictions.eq(target.flatten())\n else:\n raise ValueError(\n f\"Calibration error is not well-defined for data with size {preds.size()} and targets {target.size()}.\"\n )\n # must be cast to float for ddp allgather to work\n return confidences.float(), accuracies.float()\n\n\ndef calibration_error(preds: Tensor, target: Tensor, n_bins: int = 15, norm: str = \"l1\") -> Tensor:\n r\"\"\"`Computes the Top-label Calibration Error`_\n\n Three different norms are implemented, each corresponding to variations on the calibration error metric.\n\n L1 norm (Expected Calibration Error)\n\n .. math::\n \\text{ECE} = \\sum_i^N b_i \\|(p_i - c_i)\\|\n\n Infinity norm (Maximum Calibration Error)\n\n .. math::\n \\text{MCE} = \\max_{i} (p_i - c_i)\n\n L2 norm (Root Mean Square Calibration Error)\n\n .. math::\n \\text{RMSCE} = \\sqrt{\\sum_i^N b_i(p_i - c_i)^2}\n\n Where :math:`p_i` is the top-1 prediction accuracy in bin :math:`i`,\n :math:`c_i` is the average confidence of predictions in bin :math:`i`, and\n :math:`b_i` is the fraction of data points in bin :math:`i`.\n\n .. note:\n L2-norm debiasing is not yet supported.\n\n Args:\n preds: Model output probabilities.\n target: Ground-truth target class labels.\n n_bins: Number of bins to use when computing t.\n norm: Norm used to compare empirical and expected probability bins.\n Defaults to \"l1\", or Expected Calibration Error.\n \"\"\"\n if norm not in (\"l1\", \"l2\", \"max\"):\n raise ValueError(f\"Norm {norm} is not supported. Please select from l1, l2, or max. \")\n\n if not isinstance(n_bins, int) or n_bins <= 0:\n raise ValueError(f\"Expected argument `n_bins` to be a int larger than 0 but got {n_bins}\")\n\n confidences, accuracies = _ce_update(preds, target)\n\n bin_boundaries = torch.linspace(0, 1, n_bins + 1, dtype=torch.float, device=preds.device)\n\n return _ce_compute(confidences, accuracies, bin_boundaries, norm=norm)\n", "path": "torchmetrics/functional/classification/calibration_error.py"}]} | 3,640 | 203 |
gh_patches_debug_2788 | rasdani/github-patches | git_diff | lutris__lutris-4038 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bottom panel switches to a different game when it stops
If another game is running and you switch to a different one, and the first game is closed by itself (like quitting it manually, closing the game window), not through Lutris, the bottom panel will switch to that stopped game all by itself, without user's interaction:

It should be noted that so far, only I can reproduce this, for some bizarre reason.
</issue>
<code>
[start of lutris/gui/widgets/game_bar.py]
1 from datetime import datetime
2 from gettext import gettext as _
3
4 from gi.repository import GObject, Gtk, Pango
5
6 from lutris import runners, services
7 from lutris.database.games import get_game_by_field, get_game_for_service
8 from lutris.game import Game
9 from lutris.gui.widgets.utils import get_link_button
10 from lutris.util.strings import gtk_safe
11
12
13 class GameBar(Gtk.Box):
14 def __init__(self, db_game, game_actions, application):
15 """Create the game bar with a database row"""
16 super().__init__(orientation=Gtk.Orientation.VERTICAL, visible=True,
17 margin_top=12,
18 margin_left=12,
19 margin_bottom=12,
20 margin_right=12,
21 spacing=6)
22 GObject.add_emission_hook(Game, "game-start", self.on_game_state_changed)
23 GObject.add_emission_hook(Game, "game-started", self.on_game_state_changed)
24 GObject.add_emission_hook(Game, "game-stopped", self.on_game_state_changed)
25 GObject.add_emission_hook(Game, "game-updated", self.on_game_state_changed)
26 GObject.add_emission_hook(Game, "game-removed", self.on_game_state_changed)
27 GObject.add_emission_hook(Game, "game-installed", self.on_game_state_changed)
28
29 self.set_margin_bottom(12)
30 self.game_actions = game_actions
31 self.db_game = db_game
32 self.service = None
33 if db_game.get("service"):
34 try:
35 self.service = services.SERVICES[db_game["service"]]()
36 except KeyError:
37 pass
38
39 game_id = None
40 if "service_id" in db_game:
41 self.appid = db_game["service_id"]
42 game_id = db_game["id"]
43 elif self.service:
44 self.appid = db_game["appid"]
45 if self.service.id == "lutris":
46 game = get_game_by_field(self.appid, field="slug")
47 else:
48 game = get_game_for_service(self.service.id, self.appid)
49 if game:
50 game_id = game["id"]
51 if game_id:
52 self.game = application.get_game_by_id(game_id) or Game(game_id)
53 else:
54 self.game = Game()
55 self.game.name = db_game["name"]
56 self.game.slug = db_game["slug"]
57 self.game.appid = self.appid
58 self.game.service = self.service.id if self.service else None
59 game_actions.set_game(self.game)
60 self.update_view()
61
62 def clear_view(self):
63 """Clears all widgets from the container"""
64 for child in self.get_children():
65 child.destroy()
66
67 def update_view(self):
68 """Populate the view with widgets"""
69 game_label = self.get_game_name_label()
70 game_label.set_halign(Gtk.Align.START)
71 self.pack_start(game_label, False, False, 0)
72
73 hbox = Gtk.Box(Gtk.Orientation.HORIZONTAL, spacing=6)
74 self.pack_start(hbox, False, False, 0)
75
76 self.play_button = self.get_play_button()
77 hbox.pack_start(self.play_button, False, False, 0)
78
79 if self.game.is_installed:
80 hbox.pack_start(self.get_runner_button(), False, False, 0)
81 hbox.pack_start(self.get_platform_label(), False, False, 0)
82 if self.game.lastplayed:
83 hbox.pack_start(self.get_last_played_label(), False, False, 0)
84 if self.game.playtime:
85 hbox.pack_start(self.get_playtime_label(), False, False, 0)
86 hbox.show_all()
87
88 def get_popover(self, buttons, parent):
89 """Return the popover widget containing a list of link buttons"""
90 if not buttons:
91 return None
92 popover = Gtk.Popover()
93 vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, visible=True)
94
95 for action in buttons:
96 vbox.pack_end(buttons[action], False, False, 1)
97 popover.add(vbox)
98 popover.set_position(Gtk.PositionType.TOP)
99 popover.set_constrain_to(Gtk.PopoverConstraint.NONE)
100 popover.set_relative_to(parent)
101 return popover
102
103 def get_game_name_label(self):
104 """Return the label with the game's title"""
105 title_label = Gtk.Label(visible=True)
106 title_label.set_ellipsize(Pango.EllipsizeMode.END)
107 title_label.set_markup("<span font_desc='16'><b>%s</b></span>" % gtk_safe(self.game.name))
108 return title_label
109
110 def get_runner_button(self):
111 icon_name = self.game.runner.name + "-symbolic"
112 runner_icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)
113 runner_icon.show()
114 box = Gtk.HBox(visible=True)
115 runner_button = Gtk.Button(visible=True)
116 popover = self.get_popover(self.get_runner_buttons(), runner_button)
117 if popover:
118 runner_button.set_image(runner_icon)
119 popover_button = Gtk.MenuButton(visible=True)
120 popover_button.set_size_request(32, 32)
121 popover_button.props.direction = Gtk.ArrowType.UP
122 popover_button.set_popover(popover)
123 runner_button.connect("clicked", lambda _x: popover_button.emit("clicked"))
124 box.add(runner_button)
125 box.add(popover_button)
126 style_context = box.get_style_context()
127 style_context.add_class("linked")
128 else:
129 runner_icon.set_margin_left(49)
130 runner_icon.set_margin_right(6)
131 box.add(runner_icon)
132 return box
133
134 def get_platform_label(self):
135 platform_label = Gtk.Label(visible=True)
136 platform_label.set_size_request(120, -1)
137 platform_label.set_alignment(0, 0.5)
138 platform = gtk_safe(self.game.platform)
139 platform_label.set_tooltip_markup(platform)
140 platform_label.set_markup(_("Platform:\n<b>%s</b>") % platform)
141 platform_label.set_property("ellipsize", Pango.EllipsizeMode.END)
142 return platform_label
143
144 def get_playtime_label(self):
145 """Return the label containing the playtime info"""
146 playtime_label = Gtk.Label(visible=True)
147 playtime_label.set_size_request(120, -1)
148 playtime_label.set_alignment(0, 0.5)
149 playtime_label.set_markup(_("Time played:\n<b>%s</b>") % self.game.formatted_playtime)
150 return playtime_label
151
152 def get_last_played_label(self):
153 """Return the label containing the last played info"""
154 last_played_label = Gtk.Label(visible=True)
155 last_played_label.set_size_request(120, -1)
156 last_played_label.set_alignment(0, 0.5)
157 lastplayed = datetime.fromtimestamp(self.game.lastplayed)
158 last_played_label.set_markup(_("Last played:\n<b>%s</b>") % lastplayed.strftime("%x"))
159 return last_played_label
160
161 def get_popover_button(self):
162 """Return the popover button+menu for the Play button"""
163 popover_button = Gtk.MenuButton(visible=True)
164 popover_button.set_size_request(32, 32)
165 popover_button.props.direction = Gtk.ArrowType.UP
166
167 return popover_button
168
169 def get_popover_box(self):
170 """Return a container for a button + a popover button attached to it"""
171 box = Gtk.HBox(visible=True)
172 style_context = box.get_style_context()
173 style_context.add_class("linked")
174 return box
175
176 def get_locate_installed_game_button(self):
177 """Return a button to locate an existing install"""
178 button = get_link_button("Locate installed game")
179 button.show()
180 button.connect("clicked", self.game_actions.on_locate_installed_game, self.game)
181 return {"locate": button}
182
183 def get_play_button(self):
184 """Return the widget for install/play/stop and game config"""
185 button = Gtk.Button(visible=True)
186 button.set_size_request(120, 32)
187 box = self.get_popover_box()
188 popover_button = self.get_popover_button()
189 if self.game.is_installed:
190 if self.game.state == self.game.STATE_STOPPED:
191 button.set_label(_("Play"))
192 button.connect("clicked", self.game_actions.on_game_launch)
193 elif self.game.state == self.game.STATE_LAUNCHING:
194 button.set_label(_("Launching"))
195 button.set_sensitive(False)
196 else:
197 button.set_label(_("Stop"))
198 button.connect("clicked", self.game_actions.on_game_stop)
199 else:
200 button.set_label(_("Install"))
201 button.connect("clicked", self.game_actions.on_install_clicked)
202 if self.service:
203 if self.service.local:
204 # Local services don't show an install dialog, they can be launched directly
205 button.set_label(_("Play"))
206 if self.service.drm_free:
207 button.set_size_request(84, 32)
208 box.add(button)
209 popover = self.get_popover(self.get_locate_installed_game_button(), popover_button)
210 popover_button.set_popover(popover)
211 box.add(popover_button)
212 return box
213 return button
214 button.set_size_request(84, 32)
215 box.add(button)
216 popover = self.get_popover(self.get_game_buttons(), popover_button)
217 popover_button.set_popover(popover)
218 box.add(popover_button)
219 return box
220
221 def get_game_buttons(self):
222 """Return a dictionary of buttons to use in the panel"""
223 displayed = self.game_actions.get_displayed_entries()
224 buttons = {}
225 for action in self.game_actions.get_game_actions():
226 action_id, label, callback = action
227 if action_id in ("play", "stop", "install"):
228 continue
229 button = get_link_button(label)
230 if displayed.get(action_id):
231 button.show()
232 else:
233 button.hide()
234 buttons[action_id] = button
235 button.connect("clicked", self.on_link_button_clicked, callback)
236 return buttons
237
238 def get_runner_buttons(self):
239 buttons = {}
240 if self.game.runner_name and self.game.is_installed:
241 runner = runners.import_runner(self.game.runner_name)(self.game.config)
242 for entry in runner.context_menu_entries:
243 name, label, callback = entry
244 button = get_link_button(label)
245 button.show()
246 button.connect("clicked", self.on_link_button_clicked, callback)
247 buttons[name] = button
248 return buttons
249
250 def on_link_button_clicked(self, button, callback):
251 """Callback for link buttons. Closes the popover then runs the actual action"""
252 popover = button.get_parent().get_parent()
253 popover.popdown()
254 callback(button)
255
256 def on_install_clicked(self, button):
257 """Handler for installing service games"""
258 self.service.install(self.db_game)
259
260 def on_game_state_changed(self, game):
261 """Handler called when the game has changed state"""
262 if (
263 game.id == self.game.id
264 or game.appid == self.appid
265 ):
266 self.game = game
267 else:
268 return True
269 self.clear_view()
270 self.update_view()
271 return True
272
[end of lutris/gui/widgets/game_bar.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lutris/gui/widgets/game_bar.py b/lutris/gui/widgets/game_bar.py
--- a/lutris/gui/widgets/game_bar.py
+++ b/lutris/gui/widgets/game_bar.py
@@ -261,7 +261,7 @@
"""Handler called when the game has changed state"""
if (
game.id == self.game.id
- or game.appid == self.appid
+ or (self.appid and game.appid == self.appid)
):
self.game = game
else:
| {"golden_diff": "diff --git a/lutris/gui/widgets/game_bar.py b/lutris/gui/widgets/game_bar.py\n--- a/lutris/gui/widgets/game_bar.py\n+++ b/lutris/gui/widgets/game_bar.py\n@@ -261,7 +261,7 @@\n \"\"\"Handler called when the game has changed state\"\"\"\n if (\n game.id == self.game.id\n- or game.appid == self.appid\n+ or (self.appid and game.appid == self.appid)\n ):\n self.game = game\n else:\n", "issue": "Bottom panel switches to a different game when it stops\nIf another game is running and you switch to a different one, and the first game is closed by itself (like quitting it manually, closing the game window), not through Lutris, the bottom panel will switch to that stopped game all by itself, without user's interaction:\r\n\r\n\r\nIt should be noted that so far, only I can reproduce this, for some bizarre reason.\r\n\n", "before_files": [{"content": "from datetime import datetime\nfrom gettext import gettext as _\n\nfrom gi.repository import GObject, Gtk, Pango\n\nfrom lutris import runners, services\nfrom lutris.database.games import get_game_by_field, get_game_for_service\nfrom lutris.game import Game\nfrom lutris.gui.widgets.utils import get_link_button\nfrom lutris.util.strings import gtk_safe\n\n\nclass GameBar(Gtk.Box):\n def __init__(self, db_game, game_actions, application):\n \"\"\"Create the game bar with a database row\"\"\"\n super().__init__(orientation=Gtk.Orientation.VERTICAL, visible=True,\n margin_top=12,\n margin_left=12,\n margin_bottom=12,\n margin_right=12,\n spacing=6)\n GObject.add_emission_hook(Game, \"game-start\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-started\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-stopped\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-updated\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-removed\", self.on_game_state_changed)\n GObject.add_emission_hook(Game, \"game-installed\", self.on_game_state_changed)\n\n self.set_margin_bottom(12)\n self.game_actions = game_actions\n self.db_game = db_game\n self.service = None\n if db_game.get(\"service\"):\n try:\n self.service = services.SERVICES[db_game[\"service\"]]()\n except KeyError:\n pass\n\n game_id = None\n if \"service_id\" in db_game:\n self.appid = db_game[\"service_id\"]\n game_id = db_game[\"id\"]\n elif self.service:\n self.appid = db_game[\"appid\"]\n if self.service.id == \"lutris\":\n game = get_game_by_field(self.appid, field=\"slug\")\n else:\n game = get_game_for_service(self.service.id, self.appid)\n if game:\n game_id = game[\"id\"]\n if game_id:\n self.game = application.get_game_by_id(game_id) or Game(game_id)\n else:\n self.game = Game()\n self.game.name = db_game[\"name\"]\n self.game.slug = db_game[\"slug\"]\n self.game.appid = self.appid\n self.game.service = self.service.id if self.service else None\n game_actions.set_game(self.game)\n self.update_view()\n\n def clear_view(self):\n \"\"\"Clears all widgets from the container\"\"\"\n for child in self.get_children():\n child.destroy()\n\n def update_view(self):\n \"\"\"Populate the view with widgets\"\"\"\n game_label = self.get_game_name_label()\n game_label.set_halign(Gtk.Align.START)\n self.pack_start(game_label, False, False, 0)\n\n hbox = Gtk.Box(Gtk.Orientation.HORIZONTAL, spacing=6)\n self.pack_start(hbox, False, False, 0)\n\n self.play_button = self.get_play_button()\n hbox.pack_start(self.play_button, False, False, 0)\n\n if self.game.is_installed:\n hbox.pack_start(self.get_runner_button(), False, False, 0)\n hbox.pack_start(self.get_platform_label(), False, False, 0)\n if self.game.lastplayed:\n hbox.pack_start(self.get_last_played_label(), False, False, 0)\n if self.game.playtime:\n hbox.pack_start(self.get_playtime_label(), False, False, 0)\n hbox.show_all()\n\n def get_popover(self, buttons, parent):\n \"\"\"Return the popover widget containing a list of link buttons\"\"\"\n if not buttons:\n return None\n popover = Gtk.Popover()\n vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, visible=True)\n\n for action in buttons:\n vbox.pack_end(buttons[action], False, False, 1)\n popover.add(vbox)\n popover.set_position(Gtk.PositionType.TOP)\n popover.set_constrain_to(Gtk.PopoverConstraint.NONE)\n popover.set_relative_to(parent)\n return popover\n\n def get_game_name_label(self):\n \"\"\"Return the label with the game's title\"\"\"\n title_label = Gtk.Label(visible=True)\n title_label.set_ellipsize(Pango.EllipsizeMode.END)\n title_label.set_markup(\"<span font_desc='16'><b>%s</b></span>\" % gtk_safe(self.game.name))\n return title_label\n\n def get_runner_button(self):\n icon_name = self.game.runner.name + \"-symbolic\"\n runner_icon = Gtk.Image.new_from_icon_name(icon_name, Gtk.IconSize.MENU)\n runner_icon.show()\n box = Gtk.HBox(visible=True)\n runner_button = Gtk.Button(visible=True)\n popover = self.get_popover(self.get_runner_buttons(), runner_button)\n if popover:\n runner_button.set_image(runner_icon)\n popover_button = Gtk.MenuButton(visible=True)\n popover_button.set_size_request(32, 32)\n popover_button.props.direction = Gtk.ArrowType.UP\n popover_button.set_popover(popover)\n runner_button.connect(\"clicked\", lambda _x: popover_button.emit(\"clicked\"))\n box.add(runner_button)\n box.add(popover_button)\n style_context = box.get_style_context()\n style_context.add_class(\"linked\")\n else:\n runner_icon.set_margin_left(49)\n runner_icon.set_margin_right(6)\n box.add(runner_icon)\n return box\n\n def get_platform_label(self):\n platform_label = Gtk.Label(visible=True)\n platform_label.set_size_request(120, -1)\n platform_label.set_alignment(0, 0.5)\n platform = gtk_safe(self.game.platform)\n platform_label.set_tooltip_markup(platform)\n platform_label.set_markup(_(\"Platform:\\n<b>%s</b>\") % platform)\n platform_label.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return platform_label\n\n def get_playtime_label(self):\n \"\"\"Return the label containing the playtime info\"\"\"\n playtime_label = Gtk.Label(visible=True)\n playtime_label.set_size_request(120, -1)\n playtime_label.set_alignment(0, 0.5)\n playtime_label.set_markup(_(\"Time played:\\n<b>%s</b>\") % self.game.formatted_playtime)\n return playtime_label\n\n def get_last_played_label(self):\n \"\"\"Return the label containing the last played info\"\"\"\n last_played_label = Gtk.Label(visible=True)\n last_played_label.set_size_request(120, -1)\n last_played_label.set_alignment(0, 0.5)\n lastplayed = datetime.fromtimestamp(self.game.lastplayed)\n last_played_label.set_markup(_(\"Last played:\\n<b>%s</b>\") % lastplayed.strftime(\"%x\"))\n return last_played_label\n\n def get_popover_button(self):\n \"\"\"Return the popover button+menu for the Play button\"\"\"\n popover_button = Gtk.MenuButton(visible=True)\n popover_button.set_size_request(32, 32)\n popover_button.props.direction = Gtk.ArrowType.UP\n\n return popover_button\n\n def get_popover_box(self):\n \"\"\"Return a container for a button + a popover button attached to it\"\"\"\n box = Gtk.HBox(visible=True)\n style_context = box.get_style_context()\n style_context.add_class(\"linked\")\n return box\n\n def get_locate_installed_game_button(self):\n \"\"\"Return a button to locate an existing install\"\"\"\n button = get_link_button(\"Locate installed game\")\n button.show()\n button.connect(\"clicked\", self.game_actions.on_locate_installed_game, self.game)\n return {\"locate\": button}\n\n def get_play_button(self):\n \"\"\"Return the widget for install/play/stop and game config\"\"\"\n button = Gtk.Button(visible=True)\n button.set_size_request(120, 32)\n box = self.get_popover_box()\n popover_button = self.get_popover_button()\n if self.game.is_installed:\n if self.game.state == self.game.STATE_STOPPED:\n button.set_label(_(\"Play\"))\n button.connect(\"clicked\", self.game_actions.on_game_launch)\n elif self.game.state == self.game.STATE_LAUNCHING:\n button.set_label(_(\"Launching\"))\n button.set_sensitive(False)\n else:\n button.set_label(_(\"Stop\"))\n button.connect(\"clicked\", self.game_actions.on_game_stop)\n else:\n button.set_label(_(\"Install\"))\n button.connect(\"clicked\", self.game_actions.on_install_clicked)\n if self.service:\n if self.service.local:\n # Local services don't show an install dialog, they can be launched directly\n button.set_label(_(\"Play\"))\n if self.service.drm_free:\n button.set_size_request(84, 32)\n box.add(button)\n popover = self.get_popover(self.get_locate_installed_game_button(), popover_button)\n popover_button.set_popover(popover)\n box.add(popover_button)\n return box\n return button\n button.set_size_request(84, 32)\n box.add(button)\n popover = self.get_popover(self.get_game_buttons(), popover_button)\n popover_button.set_popover(popover)\n box.add(popover_button)\n return box\n\n def get_game_buttons(self):\n \"\"\"Return a dictionary of buttons to use in the panel\"\"\"\n displayed = self.game_actions.get_displayed_entries()\n buttons = {}\n for action in self.game_actions.get_game_actions():\n action_id, label, callback = action\n if action_id in (\"play\", \"stop\", \"install\"):\n continue\n button = get_link_button(label)\n if displayed.get(action_id):\n button.show()\n else:\n button.hide()\n buttons[action_id] = button\n button.connect(\"clicked\", self.on_link_button_clicked, callback)\n return buttons\n\n def get_runner_buttons(self):\n buttons = {}\n if self.game.runner_name and self.game.is_installed:\n runner = runners.import_runner(self.game.runner_name)(self.game.config)\n for entry in runner.context_menu_entries:\n name, label, callback = entry\n button = get_link_button(label)\n button.show()\n button.connect(\"clicked\", self.on_link_button_clicked, callback)\n buttons[name] = button\n return buttons\n\n def on_link_button_clicked(self, button, callback):\n \"\"\"Callback for link buttons. Closes the popover then runs the actual action\"\"\"\n popover = button.get_parent().get_parent()\n popover.popdown()\n callback(button)\n\n def on_install_clicked(self, button):\n \"\"\"Handler for installing service games\"\"\"\n self.service.install(self.db_game)\n\n def on_game_state_changed(self, game):\n \"\"\"Handler called when the game has changed state\"\"\"\n if (\n game.id == self.game.id\n or game.appid == self.appid\n ):\n self.game = game\n else:\n return True\n self.clear_view()\n self.update_view()\n return True\n", "path": "lutris/gui/widgets/game_bar.py"}]} | 3,761 | 118 |
gh_patches_debug_16571 | rasdani/github-patches | git_diff | geopandas__geopandas-854 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Deprecation Warning with fiona 1.8b1
using a `debian:buster` docker image
installed Fiona with
> pip install git+https://github.com/Toblerity/[email protected]
I got this __warning__ today:
```python
/usr/local/lib/python2.7/dist-packages/geopandas/io/file.py:108: FionaDeprecationWarning: Use fiona.Env() instead.
with fiona.drivers():
No handlers could be found for logger "rasterio._gdal"
```
</issue>
<code>
[start of geopandas/io/file.py]
1 import os
2
3 import fiona
4 import numpy as np
5 import six
6
7 from geopandas import GeoDataFrame, GeoSeries
8
9 # Adapted from pandas.io.common
10 if six.PY3:
11 from urllib.request import urlopen as _urlopen
12 from urllib.parse import urlparse as parse_url
13 from urllib.parse import uses_relative, uses_netloc, uses_params
14 else:
15 from urllib2 import urlopen as _urlopen
16 from urlparse import urlparse as parse_url
17 from urlparse import uses_relative, uses_netloc, uses_params
18
19 _VALID_URLS = set(uses_relative + uses_netloc + uses_params)
20 _VALID_URLS.discard('')
21
22
23 def _is_url(url):
24 """Check to see if *url* has a valid protocol."""
25 try:
26 return parse_url(url).scheme in _VALID_URLS
27 except:
28 return False
29
30
31 def read_file(filename, bbox=None, **kwargs):
32 """
33 Returns a GeoDataFrame from a file or URL.
34
35 Parameters
36 ----------
37 filename: str
38 Either the absolute or relative path to the file or URL to
39 be opened.
40 bbox : tuple | GeoDataFrame or GeoSeries, default None
41 Filter features by given bounding box, GeoSeries, or GeoDataFrame.
42 CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.
43 **kwargs:
44 Keyword args to be passed to the `open` or `BytesCollection` method
45 in the fiona library when opening the file. For more information on
46 possible keywords, type:
47 ``import fiona; help(fiona.open)``
48
49 Examples
50 --------
51 >>> df = geopandas.read_file("nybb.shp")
52
53 Returns
54 -------
55 geodataframe : GeoDataFrame
56 """
57 if _is_url(filename):
58 req = _urlopen(filename)
59 path_or_bytes = req.read()
60 reader = fiona.BytesCollection
61 else:
62 path_or_bytes = filename
63 reader = fiona.open
64
65 with reader(path_or_bytes, **kwargs) as features:
66 crs = features.crs
67 if bbox is not None:
68 if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):
69 bbox = tuple(bbox.to_crs(crs).total_bounds)
70 assert len(bbox) == 4
71 f_filt = features.filter(bbox=bbox)
72 else:
73 f_filt = features
74
75 columns = list(features.meta["schema"]["properties"]) + ["geometry"]
76 gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)
77
78 return gdf
79
80
81 def to_file(df, filename, driver="ESRI Shapefile", schema=None,
82 **kwargs):
83 """
84 Write this GeoDataFrame to an OGR data source
85
86 A dictionary of supported OGR providers is available via:
87 >>> import fiona
88 >>> fiona.supported_drivers
89
90 Parameters
91 ----------
92 df : GeoDataFrame to be written
93 filename : string
94 File path or file handle to write to.
95 driver : string, default 'ESRI Shapefile'
96 The OGR format driver used to write the vector file.
97 schema : dict, default None
98 If specified, the schema dictionary is passed to Fiona to
99 better control how the file is written. If None, GeoPandas
100 will determine the schema based on each column's dtype
101
102 The *kwargs* are passed to fiona.open and can be used to write
103 to multi-layer data, store data within archives (zip files), etc.
104 """
105 if schema is None:
106 schema = infer_schema(df)
107 filename = os.path.abspath(os.path.expanduser(filename))
108 with fiona.drivers():
109 with fiona.open(filename, 'w', driver=driver, crs=df.crs,
110 schema=schema, **kwargs) as colxn:
111 colxn.writerecords(df.iterfeatures())
112
113
114 def infer_schema(df):
115 try:
116 from collections import OrderedDict
117 except ImportError:
118 from ordereddict import OrderedDict
119
120 def convert_type(column, in_type):
121 if in_type == object:
122 return 'str'
123 out_type = type(np.asscalar(np.zeros(1, in_type))).__name__
124 if out_type == 'long':
125 out_type = 'int'
126 if out_type == 'bool':
127 raise ValueError('column "{}" is boolean type, '.format(column) +
128 'which is unsupported in file writing. '
129 'Consider casting the column to int type.')
130 return out_type
131
132 properties = OrderedDict([
133 (col, convert_type(col, _type)) for col, _type in
134 zip(df.columns, df.dtypes) if col != df._geometry_column_name
135 ])
136
137 if df.empty:
138 raise ValueError("Cannot write empty DataFrame to file.")
139
140 geom_type = _common_geom_type(df)
141
142 if not geom_type:
143 raise ValueError("Geometry column cannot contain mutiple "
144 "geometry types when writing to file.")
145
146 schema = {'geometry': geom_type, 'properties': properties}
147
148 return schema
149
150
151 def _common_geom_type(df):
152 # Need to check geom_types before we write to file...
153 # Some (most?) providers expect a single geometry type:
154 # Point, LineString, or Polygon
155 geom_types = df.geometry.geom_type.unique()
156
157 from os.path import commonprefix
158 # use reversed geom types and commonprefix to find the common suffix,
159 # then reverse the result to get back to a geom type
160 geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1]
161 if not geom_type:
162 return None
163
164 if df.geometry.has_z.any():
165 geom_type = "3D " + geom_type
166
167 return geom_type
168
[end of geopandas/io/file.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/geopandas/io/file.py b/geopandas/io/file.py
--- a/geopandas/io/file.py
+++ b/geopandas/io/file.py
@@ -4,6 +4,11 @@
import numpy as np
import six
+try:
+ from fiona import Env as fiona_env
+except ImportError:
+ from fiona import drivers as fiona_env
+
from geopandas import GeoDataFrame, GeoSeries
# Adapted from pandas.io.common
@@ -105,7 +110,7 @@
if schema is None:
schema = infer_schema(df)
filename = os.path.abspath(os.path.expanduser(filename))
- with fiona.drivers():
+ with fiona_env():
with fiona.open(filename, 'w', driver=driver, crs=df.crs,
schema=schema, **kwargs) as colxn:
colxn.writerecords(df.iterfeatures())
| {"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -4,6 +4,11 @@\n import numpy as np\n import six\n \n+try:\n+ from fiona import Env as fiona_env\n+except ImportError:\n+ from fiona import drivers as fiona_env\n+\n from geopandas import GeoDataFrame, GeoSeries\n \n # Adapted from pandas.io.common\n@@ -105,7 +110,7 @@\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n- with fiona.drivers():\n+ with fiona_env():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n", "issue": "Deprecation Warning with fiona 1.8b1\nusing a `debian:buster` docker image\r\n\r\ninstalled Fiona with \r\n> pip install git+https://github.com/Toblerity/[email protected]\r\n\r\nI got this __warning__ today: \r\n```python\r\n/usr/local/lib/python2.7/dist-packages/geopandas/io/file.py:108: FionaDeprecationWarning: Use fiona.Env() instead.\r\n with fiona.drivers():\r\nNo handlers could be found for logger \"rasterio._gdal\"\r\n```\n", "before_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nimport six\n\nfrom geopandas import GeoDataFrame, GeoSeries\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, bbox=None, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n bbox : tuple | GeoDataFrame or GeoSeries, default None\n Filter features by given bounding box, GeoSeries, or GeoDataFrame.\n CRS mis-matches are resolved if given a GeoSeries or GeoDataFrame.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n\n with reader(path_or_bytes, **kwargs) as features:\n crs = features.crs\n if bbox is not None:\n if isinstance(bbox, GeoDataFrame) or isinstance(bbox, GeoSeries):\n bbox = tuple(bbox.to_crs(crs).total_bounds)\n assert len(bbox) == 4\n f_filt = features.filter(bbox=bbox)\n else:\n f_filt = features\n\n columns = list(features.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = GeoDataFrame.from_features(f_filt, crs=crs, columns=columns)\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.drivers():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n if out_type == 'bool':\n raise ValueError('column \"{}\" is boolean type, '.format(column) +\n 'which is unsupported in file writing. '\n 'Consider casting the column to int type.')\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(col, _type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n if df.empty:\n raise ValueError(\"Cannot write empty DataFrame to file.\")\n\n geom_type = _common_geom_type(df)\n \n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix\n # use reversed geom types and commonprefix to find the common suffix,\n # then reverse the result to get back to a geom type\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1]\n if not geom_type:\n return None\n\n if df.geometry.has_z.any():\n geom_type = \"3D \" + geom_type\n\n return geom_type\n", "path": "geopandas/io/file.py"}]} | 2,295 | 203 |
gh_patches_debug_24607 | rasdani/github-patches | git_diff | streamlink__streamlink-3185 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tv360.com.tr no playable stream
## Bug Report
- [x] This is a bug report and I have read the contribution guidelines.
### Description
can't find playable stream.
### Expected / Actual behavior
stream supposed to be found
### Reproduction steps / Explicit stream URLs to test
``` 1. streamlink https://www.tv360.com.tr/canli-yayin ```
### Log output
```
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.8.2
[cli][debug] Streamlink: 1.5.0
[cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)
[cli][info] Found matching plugin tv360 for URL tv360.com.tr/canli-yayin
error: No playable streams found on this URL: tv360.com.tr/canli-yayin
```
### Additional comments, screenshots, etc.
[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)
</issue>
<code>
[start of src/streamlink/plugins/tv360.py]
1 from __future__ import print_function
2
3 import re
4
5 from streamlink.plugin import Plugin
6 from streamlink.plugin.api import validate
7 from streamlink.stream import HLSStream
8
9
10 class TV360(Plugin):
11 url_re = re.compile(r"https?://(?:www.)?tv360.com.tr/canli-yayin")
12 hls_re = re.compile(r'''hls.loadSource\(["'](http.*m3u8)["']\)''', re.DOTALL)
13
14 hls_schema = validate.Schema(
15 validate.transform(hls_re.search),
16 validate.any(None, validate.all(validate.get(1)))
17 )
18
19 @classmethod
20 def can_handle_url(cls, url):
21 return cls.url_re.match(url) is not None
22
23 def _get_streams(self):
24 res = self.session.http.get(self.url)
25 hls_url = self.hls_re.search(res.text)
26
27 if hls_url:
28 return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))
29
30
31 __plugin__ = TV360
32
[end of src/streamlink/plugins/tv360.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/tv360.py b/src/streamlink/plugins/tv360.py
--- a/src/streamlink/plugins/tv360.py
+++ b/src/streamlink/plugins/tv360.py
@@ -1,5 +1,3 @@
-from __future__ import print_function
-
import re
from streamlink.plugin import Plugin
@@ -9,11 +7,11 @@
class TV360(Plugin):
url_re = re.compile(r"https?://(?:www.)?tv360.com.tr/canli-yayin")
- hls_re = re.compile(r'''hls.loadSource\(["'](http.*m3u8)["']\)''', re.DOTALL)
+ hls_re = re.compile(r'''src="(http.*m3u8)"''')
hls_schema = validate.Schema(
validate.transform(hls_re.search),
- validate.any(None, validate.all(validate.get(1)))
+ validate.any(None, validate.all(validate.get(1), validate.url()))
)
@classmethod
@@ -21,11 +19,10 @@
return cls.url_re.match(url) is not None
def _get_streams(self):
- res = self.session.http.get(self.url)
- hls_url = self.hls_re.search(res.text)
+ hls_url = self.session.http.get(self.url, schema=self.hls_schema)
if hls_url:
- return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))
+ return HLSStream.parse_variant_playlist(self.session, hls_url)
__plugin__ = TV360
| {"golden_diff": "diff --git a/src/streamlink/plugins/tv360.py b/src/streamlink/plugins/tv360.py\n--- a/src/streamlink/plugins/tv360.py\n+++ b/src/streamlink/plugins/tv360.py\n@@ -1,5 +1,3 @@\n-from __future__ import print_function\n-\n import re\n \n from streamlink.plugin import Plugin\n@@ -9,11 +7,11 @@\n \n class TV360(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?tv360.com.tr/canli-yayin\")\n- hls_re = re.compile(r'''hls.loadSource\\([\"'](http.*m3u8)[\"']\\)''', re.DOTALL)\n+ hls_re = re.compile(r'''src=\"(http.*m3u8)\"''')\n \n hls_schema = validate.Schema(\n validate.transform(hls_re.search),\n- validate.any(None, validate.all(validate.get(1)))\n+ validate.any(None, validate.all(validate.get(1), validate.url()))\n )\n \n @classmethod\n@@ -21,11 +19,10 @@\n return cls.url_re.match(url) is not None\n \n def _get_streams(self):\n- res = self.session.http.get(self.url)\n- hls_url = self.hls_re.search(res.text)\n+ hls_url = self.session.http.get(self.url, schema=self.hls_schema)\n \n if hls_url:\n- return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))\n+ return HLSStream.parse_variant_playlist(self.session, hls_url)\n \n \n __plugin__ = TV360\n", "issue": "tv360.com.tr no playable stream\n## Bug Report\r\n- [x] This is a bug report and I have read the contribution guidelines.\r\n\r\n### Description\r\n\r\ncan't find playable stream.\r\n\r\n### Expected / Actual behavior\r\n\r\nstream supposed to be found\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n``` 1. streamlink https://www.tv360.com.tr/canli-yayin ```\r\n\r\n### Log output\r\n\r\n```\r\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.8.2\r\n[cli][debug] Streamlink: 1.5.0\r\n[cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)\r\n[cli][info] Found matching plugin tv360 for URL tv360.com.tr/canli-yayin\r\nerror: No playable streams found on this URL: tv360.com.tr/canli-yayin\r\n```\r\n\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\n\r\n\r\n[Love Streamlink? Please consider supporting our collective. Thanks!](https://opencollective.com/streamlink/donate)\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass TV360(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?tv360.com.tr/canli-yayin\")\n hls_re = re.compile(r'''hls.loadSource\\([\"'](http.*m3u8)[\"']\\)''', re.DOTALL)\n\n hls_schema = validate.Schema(\n validate.transform(hls_re.search),\n validate.any(None, validate.all(validate.get(1)))\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = self.session.http.get(self.url)\n hls_url = self.hls_re.search(res.text)\n\n if hls_url:\n return HLSStream.parse_variant_playlist(self.session, hls_url.group(1))\n\n\n__plugin__ = TV360\n", "path": "src/streamlink/plugins/tv360.py"}]} | 1,086 | 363 |
gh_patches_debug_7942 | rasdani/github-patches | git_diff | borgbackup__borg-3134 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
1.1.0: using a logging config causes exception
I've been using Borg 1.0.x with a [simple logging config](https://github.com/borgbackup/borg/files/1369192/logging.conf.txt) to get a logging behaviour suitable for cronjobs, that is: everything goes to the logfile, errors and warnings also go to stderr (and are sent via mail, by cron). Using the same logging config with Borg 1.1.0 causes an exception:
```
2017-10-09 06:05:09 [ERROR] Local Exception
2017-10-09 06:05:09 [ERROR] Traceback (most recent call last):
File "borg/archiver.py", line 4024, in main
File "borg/archiver.py", line 3952, in run
File "borg/archiver.py", line 130, in wrapper
File "borg/remote.py", line 562, in __init__
File "borg/remote.py", line 699, in call
File "borg/remote.py", line 841, in call_many
File "borg/remote.py", line 989, in handle_remote_line
AttributeError: 'Logger' object has no attribute 'json'
```
When not using a logging config, `setup_logging` will set up two loggers, the second one explicitly named `borg` and having a custom `json` attribute (see logging.py, lines 97-99). While I could add a `borg` logger to my logging config there seems to be no way to add the required custom `json` attribute within the fileConfig format.
</issue>
<code>
[start of src/borg/logger.py]
1 """logging facilities
2
3 The way to use this is as follows:
4
5 * each module declares its own logger, using:
6
7 from .logger import create_logger
8 logger = create_logger()
9
10 * then each module uses logger.info/warning/debug/etc according to the
11 level it believes is appropriate:
12
13 logger.debug('debugging info for developers or power users')
14 logger.info('normal, informational output')
15 logger.warning('warn about a non-fatal error or sth else')
16 logger.error('a fatal error')
17
18 ... and so on. see the `logging documentation
19 <https://docs.python.org/3/howto/logging.html#when-to-use-logging>`_
20 for more information
21
22 * console interaction happens on stderr, that includes interactive
23 reporting functions like `help`, `info` and `list`
24
25 * ...except ``input()`` is special, because we can't control the
26 stream it is using, unfortunately. we assume that it won't clutter
27 stdout, because interaction would be broken then anyways
28
29 * what is output on INFO level is additionally controlled by commandline
30 flags
31 """
32
33 import inspect
34 import json
35 import logging
36 import logging.config
37 import logging.handlers # needed for handlers defined there being configurable in logging.conf file
38 import os
39 import warnings
40
41 configured = False
42
43 # use something like this to ignore warnings:
44 # warnings.filterwarnings('ignore', r'... regex for warning message to ignore ...')
45
46
47 def _log_warning(message, category, filename, lineno, file=None, line=None):
48 # for warnings, we just want to use the logging system, not stderr or other files
49 msg = "{0}:{1}: {2}: {3}".format(filename, lineno, category.__name__, message)
50 logger = create_logger(__name__)
51 # Note: the warning will look like coming from here,
52 # but msg contains info about where it really comes from
53 logger.warning(msg)
54
55
56 def setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', level='info', is_serve=False, json=False):
57 """setup logging module according to the arguments provided
58
59 if conf_fname is given (or the config file name can be determined via
60 the env_var, if given): load this logging configuration.
61
62 otherwise, set up a stream handler logger on stderr (by default, if no
63 stream is provided).
64
65 if is_serve == True, we configure a special log format as expected by
66 the borg client log message interceptor.
67 """
68 global configured
69 err_msg = None
70 if env_var:
71 conf_fname = os.environ.get(env_var, conf_fname)
72 if conf_fname:
73 try:
74 conf_fname = os.path.abspath(conf_fname)
75 # we open the conf file here to be able to give a reasonable
76 # error message in case of failure (if we give the filename to
77 # fileConfig(), it silently ignores unreadable files and gives
78 # unhelpful error msgs like "No section: 'formatters'"):
79 with open(conf_fname) as f:
80 logging.config.fileConfig(f)
81 configured = True
82 logger = logging.getLogger(__name__)
83 logger.debug('using logging configuration read from "{0}"'.format(conf_fname))
84 warnings.showwarning = _log_warning
85 return None
86 except Exception as err: # XXX be more precise
87 err_msg = str(err)
88 # if we did not / not successfully load a logging configuration, fallback to this:
89 logger = logging.getLogger('')
90 handler = logging.StreamHandler(stream)
91 if is_serve and not json:
92 fmt = '$LOG %(levelname)s %(name)s Remote: %(message)s'
93 else:
94 fmt = '%(message)s'
95 formatter = JsonFormatter(fmt) if json else logging.Formatter(fmt)
96 handler.setFormatter(formatter)
97 borg_logger = logging.getLogger('borg')
98 borg_logger.formatter = formatter
99 borg_logger.json = json
100 if configured and logger.handlers:
101 # The RepositoryServer can call setup_logging a second time to adjust the output
102 # mode from text-ish is_serve to json is_serve.
103 # Thus, remove the previously installed handler, if any.
104 logger.handlers[0].close()
105 logger.handlers.clear()
106 logger.addHandler(handler)
107 logger.setLevel(level.upper())
108 configured = True
109 logger = logging.getLogger(__name__)
110 if err_msg:
111 logger.warning('setup_logging for "{0}" failed with "{1}".'.format(conf_fname, err_msg))
112 logger.debug('using builtin fallback logging configuration')
113 warnings.showwarning = _log_warning
114 return handler
115
116
117 def find_parent_module():
118 """find the name of a the first module calling this module
119
120 if we cannot find it, we return the current module's name
121 (__name__) instead.
122 """
123 try:
124 frame = inspect.currentframe().f_back
125 module = inspect.getmodule(frame)
126 while module is None or module.__name__ == __name__:
127 frame = frame.f_back
128 module = inspect.getmodule(frame)
129 return module.__name__
130 except AttributeError:
131 # somehow we failed to find our module
132 # return the logger module name by default
133 return __name__
134
135
136 def create_logger(name=None):
137 """lazily create a Logger object with the proper path, which is returned by
138 find_parent_module() by default, or is provided via the commandline
139
140 this is really a shortcut for:
141
142 logger = logging.getLogger(__name__)
143
144 we use it to avoid errors and provide a more standard API.
145
146 We must create the logger lazily, because this is usually called from
147 module level (and thus executed at import time - BEFORE setup_logging()
148 was called). By doing it lazily we can do the setup first, we just have to
149 be careful not to call any logger methods before the setup_logging() call.
150 If you try, you'll get an exception.
151 """
152 class LazyLogger:
153 def __init__(self, name=None):
154 self.__name = name or find_parent_module()
155 self.__real_logger = None
156
157 @property
158 def __logger(self):
159 if self.__real_logger is None:
160 if not configured:
161 raise Exception("tried to call a logger before setup_logging() was called")
162 self.__real_logger = logging.getLogger(self.__name)
163 if self.__name.startswith('borg.debug.') and self.__real_logger.level == logging.NOTSET:
164 self.__real_logger.setLevel('WARNING')
165 return self.__real_logger
166
167 def getChild(self, suffix):
168 return LazyLogger(self.__name + '.' + suffix)
169
170 def setLevel(self, *args, **kw):
171 return self.__logger.setLevel(*args, **kw)
172
173 def log(self, *args, **kw):
174 if 'msgid' in kw:
175 kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
176 return self.__logger.log(*args, **kw)
177
178 def exception(self, *args, **kw):
179 if 'msgid' in kw:
180 kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
181 return self.__logger.exception(*args, **kw)
182
183 def debug(self, *args, **kw):
184 if 'msgid' in kw:
185 kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
186 return self.__logger.debug(*args, **kw)
187
188 def info(self, *args, **kw):
189 if 'msgid' in kw:
190 kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
191 return self.__logger.info(*args, **kw)
192
193 def warning(self, *args, **kw):
194 if 'msgid' in kw:
195 kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
196 return self.__logger.warning(*args, **kw)
197
198 def error(self, *args, **kw):
199 if 'msgid' in kw:
200 kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
201 return self.__logger.error(*args, **kw)
202
203 def critical(self, *args, **kw):
204 if 'msgid' in kw:
205 kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')
206 return self.__logger.critical(*args, **kw)
207
208 return LazyLogger(name)
209
210
211 class JsonFormatter(logging.Formatter):
212 RECORD_ATTRIBUTES = (
213 'levelname',
214 'name',
215 'message',
216 # msgid is an attribute we made up in Borg to expose a non-changing handle for log messages
217 'msgid',
218 )
219
220 # Other attributes that are not very useful but do exist:
221 # processName, process, relativeCreated, stack_info, thread, threadName
222 # msg == message
223 # *args* are the unformatted arguments passed to the logger function, not useful now,
224 # become useful if sanitized properly (must be JSON serializable) in the code +
225 # fixed message IDs are assigned.
226 # exc_info, exc_text are generally uninteresting because the message will have that
227
228 def format(self, record):
229 super().format(record)
230 data = {
231 'type': 'log_message',
232 'time': record.created,
233 'message': '',
234 'levelname': 'CRITICAL',
235 }
236 for attr in self.RECORD_ATTRIBUTES:
237 value = getattr(record, attr, None)
238 if value:
239 data[attr] = value
240 return json.dumps(data)
241
[end of src/borg/logger.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/borg/logger.py b/src/borg/logger.py
--- a/src/borg/logger.py
+++ b/src/borg/logger.py
@@ -80,6 +80,8 @@
logging.config.fileConfig(f)
configured = True
logger = logging.getLogger(__name__)
+ borg_logger = logging.getLogger('borg')
+ borg_logger.json = json
logger.debug('using logging configuration read from "{0}"'.format(conf_fname))
warnings.showwarning = _log_warning
return None
| {"golden_diff": "diff --git a/src/borg/logger.py b/src/borg/logger.py\n--- a/src/borg/logger.py\n+++ b/src/borg/logger.py\n@@ -80,6 +80,8 @@\n logging.config.fileConfig(f)\n configured = True\n logger = logging.getLogger(__name__)\n+ borg_logger = logging.getLogger('borg')\n+ borg_logger.json = json\n logger.debug('using logging configuration read from \"{0}\"'.format(conf_fname))\n warnings.showwarning = _log_warning\n return None\n", "issue": "1.1.0: using a logging config causes exception\nI've been using Borg 1.0.x with a [simple logging config](https://github.com/borgbackup/borg/files/1369192/logging.conf.txt) to get a logging behaviour suitable for cronjobs, that is: everything goes to the logfile, errors and warnings also go to stderr (and are sent via mail, by cron). Using the same logging config with Borg 1.1.0 causes an exception:\r\n\r\n```\r\n2017-10-09 06:05:09 [ERROR] Local Exception\r\n2017-10-09 06:05:09 [ERROR] Traceback (most recent call last):\r\n File \"borg/archiver.py\", line 4024, in main\r\n File \"borg/archiver.py\", line 3952, in run\r\n File \"borg/archiver.py\", line 130, in wrapper\r\n File \"borg/remote.py\", line 562, in __init__\r\n File \"borg/remote.py\", line 699, in call\r\n File \"borg/remote.py\", line 841, in call_many\r\n File \"borg/remote.py\", line 989, in handle_remote_line\r\nAttributeError: 'Logger' object has no attribute 'json'\r\n```\r\n\r\nWhen not using a logging config, `setup_logging` will set up two loggers, the second one explicitly named `borg` and having a custom `json` attribute (see logging.py, lines 97-99). While I could add a `borg` logger to my logging config there seems to be no way to add the required custom `json` attribute within the fileConfig format.\n", "before_files": [{"content": "\"\"\"logging facilities\n\nThe way to use this is as follows:\n\n* each module declares its own logger, using:\n\n from .logger import create_logger\n logger = create_logger()\n\n* then each module uses logger.info/warning/debug/etc according to the\n level it believes is appropriate:\n\n logger.debug('debugging info for developers or power users')\n logger.info('normal, informational output')\n logger.warning('warn about a non-fatal error or sth else')\n logger.error('a fatal error')\n\n ... and so on. see the `logging documentation\n <https://docs.python.org/3/howto/logging.html#when-to-use-logging>`_\n for more information\n\n* console interaction happens on stderr, that includes interactive\n reporting functions like `help`, `info` and `list`\n\n* ...except ``input()`` is special, because we can't control the\n stream it is using, unfortunately. we assume that it won't clutter\n stdout, because interaction would be broken then anyways\n\n* what is output on INFO level is additionally controlled by commandline\n flags\n\"\"\"\n\nimport inspect\nimport json\nimport logging\nimport logging.config\nimport logging.handlers # needed for handlers defined there being configurable in logging.conf file\nimport os\nimport warnings\n\nconfigured = False\n\n# use something like this to ignore warnings:\n# warnings.filterwarnings('ignore', r'... regex for warning message to ignore ...')\n\n\ndef _log_warning(message, category, filename, lineno, file=None, line=None):\n # for warnings, we just want to use the logging system, not stderr or other files\n msg = \"{0}:{1}: {2}: {3}\".format(filename, lineno, category.__name__, message)\n logger = create_logger(__name__)\n # Note: the warning will look like coming from here,\n # but msg contains info about where it really comes from\n logger.warning(msg)\n\n\ndef setup_logging(stream=None, conf_fname=None, env_var='BORG_LOGGING_CONF', level='info', is_serve=False, json=False):\n \"\"\"setup logging module according to the arguments provided\n\n if conf_fname is given (or the config file name can be determined via\n the env_var, if given): load this logging configuration.\n\n otherwise, set up a stream handler logger on stderr (by default, if no\n stream is provided).\n\n if is_serve == True, we configure a special log format as expected by\n the borg client log message interceptor.\n \"\"\"\n global configured\n err_msg = None\n if env_var:\n conf_fname = os.environ.get(env_var, conf_fname)\n if conf_fname:\n try:\n conf_fname = os.path.abspath(conf_fname)\n # we open the conf file here to be able to give a reasonable\n # error message in case of failure (if we give the filename to\n # fileConfig(), it silently ignores unreadable files and gives\n # unhelpful error msgs like \"No section: 'formatters'\"):\n with open(conf_fname) as f:\n logging.config.fileConfig(f)\n configured = True\n logger = logging.getLogger(__name__)\n logger.debug('using logging configuration read from \"{0}\"'.format(conf_fname))\n warnings.showwarning = _log_warning\n return None\n except Exception as err: # XXX be more precise\n err_msg = str(err)\n # if we did not / not successfully load a logging configuration, fallback to this:\n logger = logging.getLogger('')\n handler = logging.StreamHandler(stream)\n if is_serve and not json:\n fmt = '$LOG %(levelname)s %(name)s Remote: %(message)s'\n else:\n fmt = '%(message)s'\n formatter = JsonFormatter(fmt) if json else logging.Formatter(fmt)\n handler.setFormatter(formatter)\n borg_logger = logging.getLogger('borg')\n borg_logger.formatter = formatter\n borg_logger.json = json\n if configured and logger.handlers:\n # The RepositoryServer can call setup_logging a second time to adjust the output\n # mode from text-ish is_serve to json is_serve.\n # Thus, remove the previously installed handler, if any.\n logger.handlers[0].close()\n logger.handlers.clear()\n logger.addHandler(handler)\n logger.setLevel(level.upper())\n configured = True\n logger = logging.getLogger(__name__)\n if err_msg:\n logger.warning('setup_logging for \"{0}\" failed with \"{1}\".'.format(conf_fname, err_msg))\n logger.debug('using builtin fallback logging configuration')\n warnings.showwarning = _log_warning\n return handler\n\n\ndef find_parent_module():\n \"\"\"find the name of a the first module calling this module\n\n if we cannot find it, we return the current module's name\n (__name__) instead.\n \"\"\"\n try:\n frame = inspect.currentframe().f_back\n module = inspect.getmodule(frame)\n while module is None or module.__name__ == __name__:\n frame = frame.f_back\n module = inspect.getmodule(frame)\n return module.__name__\n except AttributeError:\n # somehow we failed to find our module\n # return the logger module name by default\n return __name__\n\n\ndef create_logger(name=None):\n \"\"\"lazily create a Logger object with the proper path, which is returned by\n find_parent_module() by default, or is provided via the commandline\n\n this is really a shortcut for:\n\n logger = logging.getLogger(__name__)\n\n we use it to avoid errors and provide a more standard API.\n\n We must create the logger lazily, because this is usually called from\n module level (and thus executed at import time - BEFORE setup_logging()\n was called). By doing it lazily we can do the setup first, we just have to\n be careful not to call any logger methods before the setup_logging() call.\n If you try, you'll get an exception.\n \"\"\"\n class LazyLogger:\n def __init__(self, name=None):\n self.__name = name or find_parent_module()\n self.__real_logger = None\n\n @property\n def __logger(self):\n if self.__real_logger is None:\n if not configured:\n raise Exception(\"tried to call a logger before setup_logging() was called\")\n self.__real_logger = logging.getLogger(self.__name)\n if self.__name.startswith('borg.debug.') and self.__real_logger.level == logging.NOTSET:\n self.__real_logger.setLevel('WARNING')\n return self.__real_logger\n\n def getChild(self, suffix):\n return LazyLogger(self.__name + '.' + suffix)\n\n def setLevel(self, *args, **kw):\n return self.__logger.setLevel(*args, **kw)\n\n def log(self, *args, **kw):\n if 'msgid' in kw:\n kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')\n return self.__logger.log(*args, **kw)\n\n def exception(self, *args, **kw):\n if 'msgid' in kw:\n kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')\n return self.__logger.exception(*args, **kw)\n\n def debug(self, *args, **kw):\n if 'msgid' in kw:\n kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')\n return self.__logger.debug(*args, **kw)\n\n def info(self, *args, **kw):\n if 'msgid' in kw:\n kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')\n return self.__logger.info(*args, **kw)\n\n def warning(self, *args, **kw):\n if 'msgid' in kw:\n kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')\n return self.__logger.warning(*args, **kw)\n\n def error(self, *args, **kw):\n if 'msgid' in kw:\n kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')\n return self.__logger.error(*args, **kw)\n\n def critical(self, *args, **kw):\n if 'msgid' in kw:\n kw.setdefault('extra', {})['msgid'] = kw.pop('msgid')\n return self.__logger.critical(*args, **kw)\n\n return LazyLogger(name)\n\n\nclass JsonFormatter(logging.Formatter):\n RECORD_ATTRIBUTES = (\n 'levelname',\n 'name',\n 'message',\n # msgid is an attribute we made up in Borg to expose a non-changing handle for log messages\n 'msgid',\n )\n\n # Other attributes that are not very useful but do exist:\n # processName, process, relativeCreated, stack_info, thread, threadName\n # msg == message\n # *args* are the unformatted arguments passed to the logger function, not useful now,\n # become useful if sanitized properly (must be JSON serializable) in the code +\n # fixed message IDs are assigned.\n # exc_info, exc_text are generally uninteresting because the message will have that\n\n def format(self, record):\n super().format(record)\n data = {\n 'type': 'log_message',\n 'time': record.created,\n 'message': '',\n 'levelname': 'CRITICAL',\n }\n for attr in self.RECORD_ATTRIBUTES:\n value = getattr(record, attr, None)\n if value:\n data[attr] = value\n return json.dumps(data)\n", "path": "src/borg/logger.py"}]} | 3,561 | 114 |
gh_patches_debug_1102 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1254 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Running `pre-commit` 1.20.0 on Guix gives server certificate verification failed. CAfile: none CRLfile: none
Running `pre-commit` 1.20.0 on Guix gives
```
An unexpected error has occurred: CalledProcessError: Command: ('/home/igankevich/.guix-profile/bin/git', 'fetch', 'origin', '--tags')
Return code: 128
Expected return code: 0
Output: (none)
Errors:
fatal: unable to access 'https://github.com/pre-commit/pre-commit-hooks/': server certificate verification failed. CAfile: none CRLfile: none
Traceback (most recent call last):
File "/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/store.py", line 168, in clone_strategy
self._shallow_clone(ref, _git_cmd)
File "/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/store.py", line 150, in _shallow_clone
git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')
File "/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/store.py", line 165, in _git_cmd
cmd_output_b('git', *args, cwd=directory, env=env)
File "/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/util.py", line 147, in cmd_output_b
returncode, cmd, retcode, output=(stdout_b, stderr_b),
pre_commit.util.CalledProcessError: Command: ('/home/igankevich/.guix-profile/bin/git', '-c', 'protocol.version=2', 'fetch', 'origin', 'v2.4.0', '--depth=1')
Return code: 128
Expected return code: 0
Output: (none)
Errors:
fatal: unable to access 'https://github.com/pre-commit/pre-commit-hooks/': server certificate verification failed. CAfile: none CRLfile: none
```
It looks like pre-commit sanitises GIT_SSL_CAINFO environment variable. Guix uses this variable to specify the path to the certificates. I fixed this bug by _whitelisting_ GIT_SSL_CAINFO in `no_git_env` in `pre_commit/git.py`.
</issue>
<code>
[start of pre_commit/git.py]
1 from __future__ import unicode_literals
2
3 import logging
4 import os.path
5 import sys
6
7 from pre_commit.util import cmd_output
8 from pre_commit.util import cmd_output_b
9
10
11 logger = logging.getLogger(__name__)
12
13
14 def zsplit(s):
15 s = s.strip('\0')
16 if s:
17 return s.split('\0')
18 else:
19 return []
20
21
22 def no_git_env(_env=None):
23 # Too many bugs dealing with environment variables and GIT:
24 # https://github.com/pre-commit/pre-commit/issues/300
25 # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
26 # pre-commit hooks
27 # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
28 # while running pre-commit hooks in submodules.
29 # GIT_DIR: Causes git clone to clone wrong thing
30 # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
31 _env = _env if _env is not None else os.environ
32 return {
33 k: v for k, v in _env.items()
34 if not k.startswith('GIT_') or
35 k in {'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND'}
36 }
37
38
39 def get_root():
40 return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()
41
42
43 def get_git_dir(git_root='.'):
44 opts = ('--git-common-dir', '--git-dir')
45 _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)
46 for line, opt in zip(out.splitlines(), opts):
47 if line != opt: # pragma: no branch (git < 2.5)
48 return os.path.normpath(os.path.join(git_root, line))
49 else:
50 raise AssertionError('unreachable: no git dir')
51
52
53 def get_remote_url(git_root):
54 _, out, _ = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)
55 return out.strip()
56
57
58 def is_in_merge_conflict():
59 git_dir = get_git_dir('.')
60 return (
61 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
62 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
63 )
64
65
66 def parse_merge_msg_for_conflicts(merge_msg):
67 # Conflicted files start with tabs
68 return [
69 line.lstrip(b'#').strip().decode('UTF-8')
70 for line in merge_msg.splitlines()
71 # '#\t' for git 2.4.1
72 if line.startswith((b'\t', b'#\t'))
73 ]
74
75
76 def get_conflicted_files():
77 logger.info('Checking merge-conflict files only.')
78 # Need to get the conflicted files from the MERGE_MSG because they could
79 # have resolved the conflict by choosing one side or the other
80 with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:
81 merge_msg = f.read()
82 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
83
84 # This will get the rest of the changes made after the merge.
85 # If they resolved the merge conflict by choosing a mesh of both sides
86 # this will also include the conflicted files
87 tree_hash = cmd_output('git', 'write-tree')[1].strip()
88 merge_diff_filenames = zsplit(
89 cmd_output(
90 'git', 'diff', '--name-only', '--no-ext-diff', '-z',
91 '-m', tree_hash, 'HEAD', 'MERGE_HEAD',
92 )[1],
93 )
94 return set(merge_conflict_filenames) | set(merge_diff_filenames)
95
96
97 def get_staged_files(cwd=None):
98 return zsplit(
99 cmd_output(
100 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',
101 # Everything except for D
102 '--diff-filter=ACMRTUXB',
103 cwd=cwd,
104 )[1],
105 )
106
107
108 def intent_to_add_files():
109 _, stdout, _ = cmd_output('git', 'status', '--porcelain', '-z')
110 parts = list(reversed(zsplit(stdout)))
111 intent_to_add = []
112 while parts:
113 line = parts.pop()
114 status, filename = line[:3], line[3:]
115 if status[0] in {'C', 'R'}: # renames / moves have an additional arg
116 parts.pop()
117 if status[1] == 'A':
118 intent_to_add.append(filename)
119 return intent_to_add
120
121
122 def get_all_files():
123 return zsplit(cmd_output('git', 'ls-files', '-z')[1])
124
125
126 def get_changed_files(new, old):
127 return zsplit(
128 cmd_output(
129 'git', 'diff', '--name-only', '--no-ext-diff', '-z',
130 '{}...{}'.format(old, new),
131 )[1],
132 )
133
134
135 def head_rev(remote):
136 _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')
137 return out.split()[0]
138
139
140 def has_diff(*args, **kwargs):
141 repo = kwargs.pop('repo', '.')
142 assert not kwargs, kwargs
143 cmd = ('git', 'diff', '--quiet', '--no-ext-diff') + args
144 return cmd_output_b(*cmd, cwd=repo, retcode=None)[0]
145
146
147 def has_core_hookpaths_set():
148 _, out, _ = cmd_output_b('git', 'config', 'core.hooksPath', retcode=None)
149 return bool(out.strip())
150
151
152 def init_repo(path, remote):
153 if os.path.isdir(remote):
154 remote = os.path.abspath(remote)
155
156 env = no_git_env()
157 cmd_output_b('git', 'init', path, env=env)
158 cmd_output_b('git', 'remote', 'add', 'origin', remote, cwd=path, env=env)
159
160
161 def commit(repo='.'):
162 env = no_git_env()
163 name, email = 'pre-commit', '[email protected]'
164 env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name
165 env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email
166 cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
167 cmd_output_b(*cmd, cwd=repo, env=env)
168
169
170 def git_path(name, repo='.'):
171 _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)
172 return os.path.join(repo, out.strip())
173
174
175 def check_for_cygwin_mismatch():
176 """See https://github.com/pre-commit/pre-commit/issues/354"""
177 if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
178 is_cygwin_python = sys.platform == 'cygwin'
179 toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]
180 is_cygwin_git = toplevel.startswith('/')
181
182 if is_cygwin_python ^ is_cygwin_git:
183 exe_type = {True: '(cygwin)', False: '(windows)'}
184 logger.warn(
185 'pre-commit has detected a mix of cygwin python / git\n'
186 'This combination is not supported, it is likely you will '
187 'receive an error later in the program.\n'
188 'Make sure to use cygwin git+python while using cygwin\n'
189 'These can be installed through the cygwin installer.\n'
190 ' - python {}\n'
191 ' - git {}\n'.format(
192 exe_type[is_cygwin_python], exe_type[is_cygwin_git],
193 ),
194 )
195
[end of pre_commit/git.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -32,7 +32,7 @@
return {
k: v for k, v in _env.items()
if not k.startswith('GIT_') or
- k in {'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND'}
+ k in {'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO'}
}
| {"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -32,7 +32,7 @@\n return {\n k: v for k, v in _env.items()\n if not k.startswith('GIT_') or\n- k in {'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND'}\n+ k in {'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND', 'GIT_SSL_CAINFO'}\n }\n", "issue": "Running `pre-commit` 1.20.0 on Guix gives server certificate verification failed. CAfile: none CRLfile: none\nRunning `pre-commit` 1.20.0 on Guix gives\r\n\r\n```\r\nAn unexpected error has occurred: CalledProcessError: Command: ('/home/igankevich/.guix-profile/bin/git', 'fetch', 'origin', '--tags')\r\nReturn code: 128\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors:\r\n fatal: unable to access 'https://github.com/pre-commit/pre-commit-hooks/': server certificate verification failed. CAfile: none CRLfile: none\r\n\r\nTraceback (most recent call last):\r\n File \"/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/store.py\", line 168, in clone_strategy\r\n self._shallow_clone(ref, _git_cmd)\r\n File \"/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/store.py\", line 150, in _shallow_clone\r\n git_cmd('-c', git_config, 'fetch', 'origin', ref, '--depth=1')\r\n File \"/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/store.py\", line 165, in _git_cmd\r\n cmd_output_b('git', *args, cwd=directory, env=env)\r\n File \"/gnu/store/35ppc0zpffbzc6zsw9xnks1hxmr7wh19-python-pre-commit-1.20.0/lib/python3.7/site-packages/pre_commit/util.py\", line 147, in cmd_output_b\r\n returncode, cmd, retcode, output=(stdout_b, stderr_b),\r\npre_commit.util.CalledProcessError: Command: ('/home/igankevich/.guix-profile/bin/git', '-c', 'protocol.version=2', 'fetch', 'origin', 'v2.4.0', '--depth=1')\r\nReturn code: 128\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors:\r\n fatal: unable to access 'https://github.com/pre-commit/pre-commit-hooks/': server certificate verification failed. CAfile: none CRLfile: none\r\n```\r\n\r\nIt looks like pre-commit sanitises GIT_SSL_CAINFO environment variable. Guix uses this variable to specify the path to the certificates. I fixed this bug by _whitelisting_ GIT_SSL_CAINFO in `no_git_env` in `pre_commit/git.py`.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef zsplit(s):\n s = s.strip('\\0')\n if s:\n return s.split('\\0')\n else:\n return []\n\n\ndef no_git_env(_env=None):\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n _env = _env if _env is not None else os.environ\n return {\n k: v for k, v in _env.items()\n if not k.startswith('GIT_') or\n k in {'GIT_EXEC_PATH', 'GIT_SSH', 'GIT_SSH_COMMAND'}\n }\n\n\ndef get_root():\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n\n\ndef get_git_dir(git_root='.'):\n opts = ('--git-common-dir', '--git-dir')\n _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)\n for line, opt in zip(out.splitlines(), opts):\n if line != opt: # pragma: no branch (git < 2.5)\n return os.path.normpath(os.path.join(git_root, line))\n else:\n raise AssertionError('unreachable: no git dir')\n\n\ndef get_remote_url(git_root):\n _, out, _ = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)\n return out.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:\n merge_msg = f.read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = zsplit(\n cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '-m', tree_hash, 'HEAD', 'MERGE_HEAD',\n )[1],\n )\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\ndef get_staged_files(cwd=None):\n return zsplit(\n cmd_output(\n 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',\n # Everything except for D\n '--diff-filter=ACMRTUXB',\n cwd=cwd,\n )[1],\n )\n\n\ndef intent_to_add_files():\n _, stdout, _ = cmd_output('git', 'status', '--porcelain', '-z')\n parts = list(reversed(zsplit(stdout)))\n intent_to_add = []\n while parts:\n line = parts.pop()\n status, filename = line[:3], line[3:]\n if status[0] in {'C', 'R'}: # renames / moves have an additional arg\n parts.pop()\n if status[1] == 'A':\n intent_to_add.append(filename)\n return intent_to_add\n\n\ndef get_all_files():\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n\n\ndef get_changed_files(new, old):\n return zsplit(\n cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '{}...{}'.format(old, new),\n )[1],\n )\n\n\ndef head_rev(remote):\n _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')\n return out.split()[0]\n\n\ndef has_diff(*args, **kwargs):\n repo = kwargs.pop('repo', '.')\n assert not kwargs, kwargs\n cmd = ('git', 'diff', '--quiet', '--no-ext-diff') + args\n return cmd_output_b(*cmd, cwd=repo, retcode=None)[0]\n\n\ndef has_core_hookpaths_set():\n _, out, _ = cmd_output_b('git', 'config', 'core.hooksPath', retcode=None)\n return bool(out.strip())\n\n\ndef init_repo(path, remote):\n if os.path.isdir(remote):\n remote = os.path.abspath(remote)\n\n env = no_git_env()\n cmd_output_b('git', 'init', path, env=env)\n cmd_output_b('git', 'remote', 'add', 'origin', remote, cwd=path, env=env)\n\n\ndef commit(repo='.'):\n env = no_git_env()\n name, email = 'pre-commit', '[email protected]'\n env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name\n env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email\n cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')\n cmd_output_b(*cmd, cwd=repo, env=env)\n\n\ndef git_path(name, repo='.'):\n _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)\n return os.path.join(repo, out.strip())\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python], exe_type[is_cygwin_git],\n ),\n )\n", "path": "pre_commit/git.py"}]} | 3,337 | 115 |
gh_patches_debug_8396 | rasdani/github-patches | git_diff | cupy__cupy-1209 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
default type of `full`is int in numpy but float in cupy
```python
In [53]: np.full((2,2), -1)
Out[53]:
array([[-1, -1],
[-1, -1]])
In [54]: cp.full((2,2), -1)
Out[54]:
array([[-1., -1.],
[-1., -1.]])
In [55]: cp.full((2,2), -1, dtype=int)
Out[55]:
array([[-1, -1],
[-1, -1]])
```
</issue>
<code>
[start of cupy/creation/basic.py]
1 import cupy
2
3
4 def empty(shape, dtype=float, order='C'):
5 """Returns an array without initializing the elements.
6
7 Args:
8 shape (tuple of ints): Dimensionalities of the array.
9 dtype: Data type specifier.
10 order ({'C', 'F'}): Row-major (C-style) or column-major
11 (Fortran-style) order.
12
13 Returns:
14 cupy.ndarray: A new array with elements not initialized.
15
16 .. seealso:: :func:`numpy.empty`
17
18 """
19 return cupy.ndarray(shape, dtype=dtype, order=order)
20
21
22 def empty_like(a, dtype=None):
23 """Returns a new array with same shape and dtype of a given array.
24
25 This function currently does not support ``order`` and ``subok`` options.
26
27 Args:
28 a (cupy.ndarray): Base array.
29 dtype: Data type specifier. The data type of ``a`` is used by default.
30
31 Returns:
32 cupy.ndarray: A new array with same shape and dtype of ``a`` with
33 elements not initialized.
34
35 .. seealso:: :func:`numpy.empty_like`
36
37 """
38 # TODO(beam2d): Support ordering option
39 if dtype is None:
40 dtype = a.dtype
41 return cupy.ndarray(a.shape, dtype=dtype)
42
43
44 def eye(N, M=None, k=0, dtype=float):
45 """Returns a 2-D array with ones on the diagonals and zeros elsewhere.
46
47 Args:
48 N (int): Number of rows.
49 M (int): Number of columns. M == N by default.
50 k (int): Index of the diagonal. Zero indicates the main diagonal,
51 a positive index an upper diagonal, and a negative index a lower
52 diagonal.
53 dtype: Data type specifier.
54
55 Returns:
56 cupy.ndarray: A 2-D array with given diagonals filled with ones and
57 zeros elsewhere.
58
59 .. seealso:: :func:`numpy.eye`
60
61 """
62 if M is None:
63 M = N
64 ret = zeros((N, M), dtype)
65 ret.diagonal(k)[:] = 1
66 return ret
67
68
69 def identity(n, dtype=float):
70 """Returns a 2-D identity array.
71
72 It is equivalent to ``eye(n, n, dtype)``.
73
74 Args:
75 n (int): Number of rows and columns.
76 dtype: Data type specifier.
77
78 Returns:
79 cupy.ndarray: A 2-D identity array.
80
81 .. seealso:: :func:`numpy.identity`
82
83 """
84 return eye(n, dtype=dtype)
85
86
87 def ones(shape, dtype=float):
88 """Returns a new array of given shape and dtype, filled with ones.
89
90 This function currently does not support ``order`` option.
91
92 Args:
93 shape (tuple of ints): Dimensionalities of the array.
94 dtype: Data type specifier.
95
96 Returns:
97 cupy.ndarray: An array filled with ones.
98
99 .. seealso:: :func:`numpy.ones`
100
101 """
102 # TODO(beam2d): Support ordering option
103 a = cupy.ndarray(shape, dtype=dtype)
104 a.fill(1)
105 return a
106
107
108 def ones_like(a, dtype=None):
109 """Returns an array of ones with same shape and dtype as a given array.
110
111 This function currently does not support ``order`` and ``subok`` options.
112
113 Args:
114 a (cupy.ndarray): Base array.
115 dtype: Data type specifier. The dtype of ``a`` is used by default.
116
117 Returns:
118 cupy.ndarray: An array filled with ones.
119
120 .. seealso:: :func:`numpy.ones_like`
121
122 """
123 # TODO(beam2d): Support ordering option
124 if dtype is None:
125 dtype = a.dtype
126 a = cupy.ndarray(a.shape, dtype=dtype)
127 a.fill(1)
128 return a
129
130
131 def zeros(shape, dtype=float, order='C'):
132 """Returns a new array of given shape and dtype, filled with zeros.
133
134 Args:
135 shape (tuple of ints): Dimensionalities of the array.
136 dtype: Data type specifier.
137 order ({'C', 'F'}): Row-major (C-style) or column-major
138 (Fortran-style) order.
139
140 Returns:
141 cupy.ndarray: An array filled with ones.
142
143 .. seealso:: :func:`numpy.zeros`
144
145 """
146 a = cupy.ndarray(shape, dtype, order=order)
147 a.data.memset_async(0, a.nbytes)
148 return a
149
150
151 def zeros_like(a, dtype=None):
152 """Returns an array of zeros with same shape and dtype as a given array.
153
154 This function currently does not support ``order`` and ``subok`` options.
155
156 Args:
157 a (cupy.ndarray): Base array.
158 dtype: Data type specifier. The dtype of ``a`` is used by default.
159
160 Returns:
161 cupy.ndarray: An array filled with ones.
162
163 .. seealso:: :func:`numpy.zeros_like`
164
165 """
166 # TODO(beam2d): Support ordering option
167 if dtype is None:
168 dtype = a.dtype
169 a = cupy.ndarray(a.shape, dtype)
170 a.data.memset_async(0, a.nbytes)
171 return a
172
173
174 def full(shape, fill_value, dtype=None):
175 """Returns a new array of given shape and dtype, filled with a given value.
176
177 This function currently does not support ``order`` option.
178
179 Args:
180 shape (tuple of ints): Dimensionalities of the array.
181 fill_value: A scalar value to fill a new array.
182 dtype: Data type specifier.
183
184 Returns:
185 cupy.ndarray: An array filled with ``fill_value``.
186
187 .. seealso:: :func:`numpy.full`
188
189 """
190 # TODO(beam2d): Support ordering option
191 a = cupy.ndarray(shape, dtype=dtype)
192 a.fill(fill_value)
193 return a
194
195
196 def full_like(a, fill_value, dtype=None):
197 """Returns a full array with same shape and dtype as a given array.
198
199 This function currently does not support ``order`` and ``subok`` options.
200
201 Args:
202 a (cupy.ndarray): Base array.
203 fill_value: A scalar value to fill a new array.
204 dtype: Data type specifier. The dtype of ``a`` is used by default.
205
206 Returns:
207 cupy.ndarray: An array filled with ``fill_value``.
208
209 .. seealso:: :func:`numpy.full_like`
210
211 """
212 # TODO(beam2d): Support ordering option
213 if dtype is None:
214 dtype = a.dtype
215 a = cupy.ndarray(a.shape, dtype=dtype)
216 a.fill(fill_value)
217 return a
218
[end of cupy/creation/basic.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cupy/creation/basic.py b/cupy/creation/basic.py
--- a/cupy/creation/basic.py
+++ b/cupy/creation/basic.py
@@ -1,4 +1,5 @@
import cupy
+import numpy
def empty(shape, dtype=float, order='C'):
@@ -188,6 +189,11 @@
"""
# TODO(beam2d): Support ordering option
+ if dtype is None:
+ if isinstance(fill_value, cupy.ndarray):
+ dtype = fill_value.dtype
+ else:
+ dtype = numpy.array(fill_value).dtype
a = cupy.ndarray(shape, dtype=dtype)
a.fill(fill_value)
return a
| {"golden_diff": "diff --git a/cupy/creation/basic.py b/cupy/creation/basic.py\n--- a/cupy/creation/basic.py\n+++ b/cupy/creation/basic.py\n@@ -1,4 +1,5 @@\n import cupy\n+import numpy\n \n \n def empty(shape, dtype=float, order='C'):\n@@ -188,6 +189,11 @@\n \n \"\"\"\n # TODO(beam2d): Support ordering option\n+ if dtype is None:\n+ if isinstance(fill_value, cupy.ndarray):\n+ dtype = fill_value.dtype\n+ else:\n+ dtype = numpy.array(fill_value).dtype\n a = cupy.ndarray(shape, dtype=dtype)\n a.fill(fill_value)\n return a\n", "issue": "default type of `full`is int in numpy but float in cupy\n```python\r\nIn [53]: np.full((2,2), -1)\r\nOut[53]:\r\narray([[-1, -1],\r\n [-1, -1]])\r\n\r\nIn [54]: cp.full((2,2), -1)\r\nOut[54]:\r\narray([[-1., -1.],\r\n [-1., -1.]])\r\n\r\nIn [55]: cp.full((2,2), -1, dtype=int)\r\nOut[55]:\r\narray([[-1, -1],\r\n [-1, -1]])\r\n```\n", "before_files": [{"content": "import cupy\n\n\ndef empty(shape, dtype=float, order='C'):\n \"\"\"Returns an array without initializing the elements.\n\n Args:\n shape (tuple of ints): Dimensionalities of the array.\n dtype: Data type specifier.\n order ({'C', 'F'}): Row-major (C-style) or column-major\n (Fortran-style) order.\n\n Returns:\n cupy.ndarray: A new array with elements not initialized.\n\n .. seealso:: :func:`numpy.empty`\n\n \"\"\"\n return cupy.ndarray(shape, dtype=dtype, order=order)\n\n\ndef empty_like(a, dtype=None):\n \"\"\"Returns a new array with same shape and dtype of a given array.\n\n This function currently does not support ``order`` and ``subok`` options.\n\n Args:\n a (cupy.ndarray): Base array.\n dtype: Data type specifier. The data type of ``a`` is used by default.\n\n Returns:\n cupy.ndarray: A new array with same shape and dtype of ``a`` with\n elements not initialized.\n\n .. seealso:: :func:`numpy.empty_like`\n\n \"\"\"\n # TODO(beam2d): Support ordering option\n if dtype is None:\n dtype = a.dtype\n return cupy.ndarray(a.shape, dtype=dtype)\n\n\ndef eye(N, M=None, k=0, dtype=float):\n \"\"\"Returns a 2-D array with ones on the diagonals and zeros elsewhere.\n\n Args:\n N (int): Number of rows.\n M (int): Number of columns. M == N by default.\n k (int): Index of the diagonal. Zero indicates the main diagonal,\n a positive index an upper diagonal, and a negative index a lower\n diagonal.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: A 2-D array with given diagonals filled with ones and\n zeros elsewhere.\n\n .. seealso:: :func:`numpy.eye`\n\n \"\"\"\n if M is None:\n M = N\n ret = zeros((N, M), dtype)\n ret.diagonal(k)[:] = 1\n return ret\n\n\ndef identity(n, dtype=float):\n \"\"\"Returns a 2-D identity array.\n\n It is equivalent to ``eye(n, n, dtype)``.\n\n Args:\n n (int): Number of rows and columns.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: A 2-D identity array.\n\n .. seealso:: :func:`numpy.identity`\n\n \"\"\"\n return eye(n, dtype=dtype)\n\n\ndef ones(shape, dtype=float):\n \"\"\"Returns a new array of given shape and dtype, filled with ones.\n\n This function currently does not support ``order`` option.\n\n Args:\n shape (tuple of ints): Dimensionalities of the array.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: An array filled with ones.\n\n .. seealso:: :func:`numpy.ones`\n\n \"\"\"\n # TODO(beam2d): Support ordering option\n a = cupy.ndarray(shape, dtype=dtype)\n a.fill(1)\n return a\n\n\ndef ones_like(a, dtype=None):\n \"\"\"Returns an array of ones with same shape and dtype as a given array.\n\n This function currently does not support ``order`` and ``subok`` options.\n\n Args:\n a (cupy.ndarray): Base array.\n dtype: Data type specifier. The dtype of ``a`` is used by default.\n\n Returns:\n cupy.ndarray: An array filled with ones.\n\n .. seealso:: :func:`numpy.ones_like`\n\n \"\"\"\n # TODO(beam2d): Support ordering option\n if dtype is None:\n dtype = a.dtype\n a = cupy.ndarray(a.shape, dtype=dtype)\n a.fill(1)\n return a\n\n\ndef zeros(shape, dtype=float, order='C'):\n \"\"\"Returns a new array of given shape and dtype, filled with zeros.\n\n Args:\n shape (tuple of ints): Dimensionalities of the array.\n dtype: Data type specifier.\n order ({'C', 'F'}): Row-major (C-style) or column-major\n (Fortran-style) order.\n\n Returns:\n cupy.ndarray: An array filled with ones.\n\n .. seealso:: :func:`numpy.zeros`\n\n \"\"\"\n a = cupy.ndarray(shape, dtype, order=order)\n a.data.memset_async(0, a.nbytes)\n return a\n\n\ndef zeros_like(a, dtype=None):\n \"\"\"Returns an array of zeros with same shape and dtype as a given array.\n\n This function currently does not support ``order`` and ``subok`` options.\n\n Args:\n a (cupy.ndarray): Base array.\n dtype: Data type specifier. The dtype of ``a`` is used by default.\n\n Returns:\n cupy.ndarray: An array filled with ones.\n\n .. seealso:: :func:`numpy.zeros_like`\n\n \"\"\"\n # TODO(beam2d): Support ordering option\n if dtype is None:\n dtype = a.dtype\n a = cupy.ndarray(a.shape, dtype)\n a.data.memset_async(0, a.nbytes)\n return a\n\n\ndef full(shape, fill_value, dtype=None):\n \"\"\"Returns a new array of given shape and dtype, filled with a given value.\n\n This function currently does not support ``order`` option.\n\n Args:\n shape (tuple of ints): Dimensionalities of the array.\n fill_value: A scalar value to fill a new array.\n dtype: Data type specifier.\n\n Returns:\n cupy.ndarray: An array filled with ``fill_value``.\n\n .. seealso:: :func:`numpy.full`\n\n \"\"\"\n # TODO(beam2d): Support ordering option\n a = cupy.ndarray(shape, dtype=dtype)\n a.fill(fill_value)\n return a\n\n\ndef full_like(a, fill_value, dtype=None):\n \"\"\"Returns a full array with same shape and dtype as a given array.\n\n This function currently does not support ``order`` and ``subok`` options.\n\n Args:\n a (cupy.ndarray): Base array.\n fill_value: A scalar value to fill a new array.\n dtype: Data type specifier. The dtype of ``a`` is used by default.\n\n Returns:\n cupy.ndarray: An array filled with ``fill_value``.\n\n .. seealso:: :func:`numpy.full_like`\n\n \"\"\"\n # TODO(beam2d): Support ordering option\n if dtype is None:\n dtype = a.dtype\n a = cupy.ndarray(a.shape, dtype=dtype)\n a.fill(fill_value)\n return a\n", "path": "cupy/creation/basic.py"}]} | 2,685 | 159 |
gh_patches_debug_8778 | rasdani/github-patches | git_diff | pytorch__ignite-1330 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docstring of Canberra metric warning
Following this comment
> @sdesrozis could you please investigate why there is a warning here : https://travis-ci.org/github/pytorch/ignite/jobs/730492404#L2924
thanks !
> Doc link is apparently badly rendered : https://pytorch.org/ignite/master/contrib/metrics.html#ignite.contrib.metrics.regression.CanberraMetric
> _Originally posted by @vfdev-5 in https://github.com/pytorch/ignite/pull/1314#issuecomment-699506241_
Namespace are shared so reference should be unique
</issue>
<code>
[start of ignite/contrib/metrics/regression/canberra_metric.py]
1 from typing import Callable, Union
2
3 import torch
4
5 from ignite.contrib.metrics.regression._base import _BaseRegression
6 from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
7
8
9 class CanberraMetric(_BaseRegression):
10 r"""
11 Calculates the Canberra Metric.
12
13 :math:`\text{CM} = \sum_{j=1}^n\frac{|A_j - P_j|}{|A_j| + |P_j|}`
14
15 where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.
16
17 More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_
18
19 - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
20 - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
21
22 .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006
23 .. _scikit-learn distance metrics:
24 https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html
25
26 """
27
28 def __init__(
29 self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
30 ):
31 self._sum_of_errors = None
32 super(CanberraMetric, self).__init__(output_transform, device)
33
34 @reinit__is_reduced
35 def reset(self):
36 self._sum_of_errors = torch.tensor(0.0, device=self._device)
37
38 def _update(self, output):
39 y_pred, y = output
40 errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y))
41 self._sum_of_errors += torch.sum(errors).to(self._device)
42
43 @sync_all_reduce("_sum_of_errors")
44 def compute(self):
45 return self._sum_of_errors.item()
46
[end of ignite/contrib/metrics/regression/canberra_metric.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py
--- a/ignite/contrib/metrics/regression/canberra_metric.py
+++ b/ignite/contrib/metrics/regression/canberra_metric.py
@@ -19,7 +19,6 @@
- ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
- `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.
- .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006
.. _scikit-learn distance metrics:
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html
| {"golden_diff": "diff --git a/ignite/contrib/metrics/regression/canberra_metric.py b/ignite/contrib/metrics/regression/canberra_metric.py\n--- a/ignite/contrib/metrics/regression/canberra_metric.py\n+++ b/ignite/contrib/metrics/regression/canberra_metric.py\n@@ -19,7 +19,6 @@\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n \n- .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006\n .. _scikit-learn distance metrics:\n https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html\n", "issue": "Docstring of Canberra metric warning\nFollowing this comment \r\n\r\n\r\n> @sdesrozis could you please investigate why there is a warning here : https://travis-ci.org/github/pytorch/ignite/jobs/730492404#L2924 \r\nthanks !\r\n> Doc link is apparently badly rendered : https://pytorch.org/ignite/master/contrib/metrics.html#ignite.contrib.metrics.regression.CanberraMetric\r\n> _Originally posted by @vfdev-5 in https://github.com/pytorch/ignite/pull/1314#issuecomment-699506241_\r\n\r\nNamespace are shared so reference should be unique\r\n\n", "before_files": [{"content": "from typing import Callable, Union\n\nimport torch\n\nfrom ignite.contrib.metrics.regression._base import _BaseRegression\nfrom ignite.metrics.metric import reinit__is_reduced, sync_all_reduce\n\n\nclass CanberraMetric(_BaseRegression):\n r\"\"\"\n Calculates the Canberra Metric.\n\n :math:`\\text{CM} = \\sum_{j=1}^n\\frac{|A_j - P_j|}{|A_j| + |P_j|}`\n\n where, :math:`A_j` is the ground truth and :math:`P_j` is the predicted value.\n\n More details can be found in `Botchkarev 2018`_ or `scikit-learn distance metrics`_\n\n - ``update`` must receive output of the form ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.\n - `y` and `y_pred` must be of same shape `(N, )` or `(N, 1)`.\n\n .. _Botchkarev 2018: https://arxiv.org/abs/1809.03006\n .. _scikit-learn distance metrics:\n https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.DistanceMetric.html\n\n \"\"\"\n\n def __init__(\n self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device(\"cpu\")\n ):\n self._sum_of_errors = None\n super(CanberraMetric, self).__init__(output_transform, device)\n\n @reinit__is_reduced\n def reset(self):\n self._sum_of_errors = torch.tensor(0.0, device=self._device)\n\n def _update(self, output):\n y_pred, y = output\n errors = torch.abs(y - y_pred) / (torch.abs(y_pred) + torch.abs(y))\n self._sum_of_errors += torch.sum(errors).to(self._device)\n\n @sync_all_reduce(\"_sum_of_errors\")\n def compute(self):\n return self._sum_of_errors.item()\n", "path": "ignite/contrib/metrics/regression/canberra_metric.py"}]} | 1,232 | 201 |
gh_patches_debug_24020 | rasdani/github-patches | git_diff | pyca__cryptography-1028 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MultiBackend doesn't actually provide RSABackend.
We've been kind of haphazardly adding things to the interface as we add features and it's resulted in an incompletely implementation of MultiBackend.
</issue>
<code>
[start of cryptography/hazmat/backends/multibackend.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
10 # implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 from __future__ import absolute_import, division, print_function
15
16 from cryptography import utils
17 from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
18 from cryptography.hazmat.backends.interfaces import (
19 CMACBackend, CipherBackend, DSABackend, HMACBackend, HashBackend,
20 PBKDF2HMACBackend, RSABackend
21 )
22
23
24 @utils.register_interface(CMACBackend)
25 @utils.register_interface(CipherBackend)
26 @utils.register_interface(HashBackend)
27 @utils.register_interface(HMACBackend)
28 @utils.register_interface(PBKDF2HMACBackend)
29 @utils.register_interface(RSABackend)
30 @utils.register_interface(DSABackend)
31 class MultiBackend(object):
32 name = "multibackend"
33
34 def __init__(self, backends):
35 self._backends = backends
36
37 def _filtered_backends(self, interface):
38 for b in self._backends:
39 if isinstance(b, interface):
40 yield b
41
42 def cipher_supported(self, algorithm, mode):
43 return any(
44 b.cipher_supported(algorithm, mode)
45 for b in self._filtered_backends(CipherBackend)
46 )
47
48 def create_symmetric_encryption_ctx(self, algorithm, mode):
49 for b in self._filtered_backends(CipherBackend):
50 try:
51 return b.create_symmetric_encryption_ctx(algorithm, mode)
52 except UnsupportedAlgorithm:
53 pass
54 raise UnsupportedAlgorithm(
55 "cipher {0} in {1} mode is not supported by this backend".format(
56 algorithm.name, mode.name if mode else mode),
57 _Reasons.UNSUPPORTED_CIPHER
58 )
59
60 def create_symmetric_decryption_ctx(self, algorithm, mode):
61 for b in self._filtered_backends(CipherBackend):
62 try:
63 return b.create_symmetric_decryption_ctx(algorithm, mode)
64 except UnsupportedAlgorithm:
65 pass
66 raise UnsupportedAlgorithm(
67 "cipher {0} in {1} mode is not supported by this backend".format(
68 algorithm.name, mode.name if mode else mode),
69 _Reasons.UNSUPPORTED_CIPHER
70 )
71
72 def hash_supported(self, algorithm):
73 return any(
74 b.hash_supported(algorithm)
75 for b in self._filtered_backends(HashBackend)
76 )
77
78 def create_hash_ctx(self, algorithm):
79 for b in self._filtered_backends(HashBackend):
80 try:
81 return b.create_hash_ctx(algorithm)
82 except UnsupportedAlgorithm:
83 pass
84 raise UnsupportedAlgorithm(
85 "{0} is not a supported hash on this backend".format(
86 algorithm.name),
87 _Reasons.UNSUPPORTED_HASH
88 )
89
90 def hmac_supported(self, algorithm):
91 return any(
92 b.hmac_supported(algorithm)
93 for b in self._filtered_backends(HMACBackend)
94 )
95
96 def create_hmac_ctx(self, key, algorithm):
97 for b in self._filtered_backends(HMACBackend):
98 try:
99 return b.create_hmac_ctx(key, algorithm)
100 except UnsupportedAlgorithm:
101 pass
102 raise UnsupportedAlgorithm(
103 "{0} is not a supported hash on this backend".format(
104 algorithm.name),
105 _Reasons.UNSUPPORTED_HASH
106 )
107
108 def pbkdf2_hmac_supported(self, algorithm):
109 return any(
110 b.pbkdf2_hmac_supported(algorithm)
111 for b in self._filtered_backends(PBKDF2HMACBackend)
112 )
113
114 def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,
115 key_material):
116 for b in self._filtered_backends(PBKDF2HMACBackend):
117 try:
118 return b.derive_pbkdf2_hmac(
119 algorithm, length, salt, iterations, key_material
120 )
121 except UnsupportedAlgorithm:
122 pass
123 raise UnsupportedAlgorithm(
124 "{0} is not a supported hash on this backend".format(
125 algorithm.name),
126 _Reasons.UNSUPPORTED_HASH
127 )
128
129 def generate_rsa_private_key(self, public_exponent, key_size):
130 for b in self._filtered_backends(RSABackend):
131 return b.generate_rsa_private_key(public_exponent, key_size)
132 raise UnsupportedAlgorithm("RSA is not supported by the backend",
133 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
134
135 def create_rsa_signature_ctx(self, private_key, padding, algorithm):
136 for b in self._filtered_backends(RSABackend):
137 return b.create_rsa_signature_ctx(private_key, padding, algorithm)
138 raise UnsupportedAlgorithm("RSA is not supported by the backend",
139 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
140
141 def create_rsa_verification_ctx(self, public_key, signature, padding,
142 algorithm):
143 for b in self._filtered_backends(RSABackend):
144 return b.create_rsa_verification_ctx(public_key, signature,
145 padding, algorithm)
146 raise UnsupportedAlgorithm("RSA is not supported by the backend",
147 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
148
149 def generate_dsa_parameters(self, key_size):
150 for b in self._filtered_backends(DSABackend):
151 return b.generate_dsa_parameters(key_size)
152 raise UnsupportedAlgorithm("DSA is not supported by the backend",
153 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
154
155 def generate_dsa_private_key(self, parameters):
156 for b in self._filtered_backends(DSABackend):
157 return b.generate_dsa_private_key(parameters)
158 raise UnsupportedAlgorithm("DSA is not supported by the backend",
159 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
160
161 def create_dsa_verification_ctx(self, public_key, signature, algorithm):
162 for b in self._filtered_backends(DSABackend):
163 return b.create_dsa_verification_ctx(public_key, signature,
164 algorithm)
165 raise UnsupportedAlgorithm("DSA is not supported by the backend",
166 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
167
168 def create_dsa_signature_ctx(self, private_key, algorithm):
169 for b in self._filtered_backends(DSABackend):
170 return b.create_dsa_signature_ctx(private_key, algorithm)
171 raise UnsupportedAlgorithm("DSA is not supported by the backend",
172 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
173
174 def dsa_hash_supported(self, algorithm):
175 for b in self._filtered_backends(DSABackend):
176 return b.dsa_hash_supported(algorithm)
177 raise UnsupportedAlgorithm("DSA is not supported by the backend",
178 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
179
180 def dsa_parameters_supported(self, p, q, g):
181 for b in self._filtered_backends(DSABackend):
182 return b.dsa_parameters_supported(p, q, g)
183 raise UnsupportedAlgorithm("DSA is not supported by the backend",
184 _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
185
186 def cmac_algorithm_supported(self, algorithm):
187 return any(
188 b.cmac_algorithm_supported(algorithm)
189 for b in self._filtered_backends(CMACBackend)
190 )
191
192 def create_cmac_ctx(self, algorithm):
193 for b in self._filtered_backends(CMACBackend):
194 try:
195 return b.create_cmac_ctx(algorithm)
196 except UnsupportedAlgorithm:
197 pass
198 raise UnsupportedAlgorithm("This backend does not support CMAC",
199 _Reasons.UNSUPPORTED_CIPHER)
200
[end of cryptography/hazmat/backends/multibackend.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cryptography/hazmat/backends/multibackend.py b/cryptography/hazmat/backends/multibackend.py
--- a/cryptography/hazmat/backends/multibackend.py
+++ b/cryptography/hazmat/backends/multibackend.py
@@ -146,6 +146,24 @@
raise UnsupportedAlgorithm("RSA is not supported by the backend",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
+ def mgf1_hash_supported(self, algorithm):
+ for b in self._filtered_backends(RSABackend):
+ return b.mgf1_hash_supported(algorithm)
+ raise UnsupportedAlgorithm("RSA is not supported by the backend",
+ _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
+
+ def decrypt_rsa(self, private_key, ciphertext, padding):
+ for b in self._filtered_backends(RSABackend):
+ return b.decrypt_rsa(private_key, ciphertext, padding)
+ raise UnsupportedAlgorithm("RSA is not supported by the backend",
+ _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
+
+ def encrypt_rsa(self, public_key, plaintext, padding):
+ for b in self._filtered_backends(RSABackend):
+ return b.encrypt_rsa(public_key, plaintext, padding)
+ raise UnsupportedAlgorithm("RSA is not supported by the backend",
+ _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
+
def generate_dsa_parameters(self, key_size):
for b in self._filtered_backends(DSABackend):
return b.generate_dsa_parameters(key_size)
| {"golden_diff": "diff --git a/cryptography/hazmat/backends/multibackend.py b/cryptography/hazmat/backends/multibackend.py\n--- a/cryptography/hazmat/backends/multibackend.py\n+++ b/cryptography/hazmat/backends/multibackend.py\n@@ -146,6 +146,24 @@\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n \n+ def mgf1_hash_supported(self, algorithm):\n+ for b in self._filtered_backends(RSABackend):\n+ return b.mgf1_hash_supported(algorithm)\n+ raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n+ _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n+\n+ def decrypt_rsa(self, private_key, ciphertext, padding):\n+ for b in self._filtered_backends(RSABackend):\n+ return b.decrypt_rsa(private_key, ciphertext, padding)\n+ raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n+ _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n+\n+ def encrypt_rsa(self, public_key, plaintext, padding):\n+ for b in self._filtered_backends(RSABackend):\n+ return b.encrypt_rsa(public_key, plaintext, padding)\n+ raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n+ _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n+\n def generate_dsa_parameters(self, key_size):\n for b in self._filtered_backends(DSABackend):\n return b.generate_dsa_parameters(key_size)\n", "issue": "MultiBackend doesn't actually provide RSABackend.\nWe've been kind of haphazardly adding things to the interface as we add features and it's resulted in an incompletely implementation of MultiBackend.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom cryptography import utils\nfrom cryptography.exceptions import UnsupportedAlgorithm, _Reasons\nfrom cryptography.hazmat.backends.interfaces import (\n CMACBackend, CipherBackend, DSABackend, HMACBackend, HashBackend,\n PBKDF2HMACBackend, RSABackend\n)\n\n\[email protected]_interface(CMACBackend)\[email protected]_interface(CipherBackend)\[email protected]_interface(HashBackend)\[email protected]_interface(HMACBackend)\[email protected]_interface(PBKDF2HMACBackend)\[email protected]_interface(RSABackend)\[email protected]_interface(DSABackend)\nclass MultiBackend(object):\n name = \"multibackend\"\n\n def __init__(self, backends):\n self._backends = backends\n\n def _filtered_backends(self, interface):\n for b in self._backends:\n if isinstance(b, interface):\n yield b\n\n def cipher_supported(self, algorithm, mode):\n return any(\n b.cipher_supported(algorithm, mode)\n for b in self._filtered_backends(CipherBackend)\n )\n\n def create_symmetric_encryption_ctx(self, algorithm, mode):\n for b in self._filtered_backends(CipherBackend):\n try:\n return b.create_symmetric_encryption_ctx(algorithm, mode)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"cipher {0} in {1} mode is not supported by this backend\".format(\n algorithm.name, mode.name if mode else mode),\n _Reasons.UNSUPPORTED_CIPHER\n )\n\n def create_symmetric_decryption_ctx(self, algorithm, mode):\n for b in self._filtered_backends(CipherBackend):\n try:\n return b.create_symmetric_decryption_ctx(algorithm, mode)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"cipher {0} in {1} mode is not supported by this backend\".format(\n algorithm.name, mode.name if mode else mode),\n _Reasons.UNSUPPORTED_CIPHER\n )\n\n def hash_supported(self, algorithm):\n return any(\n b.hash_supported(algorithm)\n for b in self._filtered_backends(HashBackend)\n )\n\n def create_hash_ctx(self, algorithm):\n for b in self._filtered_backends(HashBackend):\n try:\n return b.create_hash_ctx(algorithm)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n def hmac_supported(self, algorithm):\n return any(\n b.hmac_supported(algorithm)\n for b in self._filtered_backends(HMACBackend)\n )\n\n def create_hmac_ctx(self, key, algorithm):\n for b in self._filtered_backends(HMACBackend):\n try:\n return b.create_hmac_ctx(key, algorithm)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n def pbkdf2_hmac_supported(self, algorithm):\n return any(\n b.pbkdf2_hmac_supported(algorithm)\n for b in self._filtered_backends(PBKDF2HMACBackend)\n )\n\n def derive_pbkdf2_hmac(self, algorithm, length, salt, iterations,\n key_material):\n for b in self._filtered_backends(PBKDF2HMACBackend):\n try:\n return b.derive_pbkdf2_hmac(\n algorithm, length, salt, iterations, key_material\n )\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\n \"{0} is not a supported hash on this backend\".format(\n algorithm.name),\n _Reasons.UNSUPPORTED_HASH\n )\n\n def generate_rsa_private_key(self, public_exponent, key_size):\n for b in self._filtered_backends(RSABackend):\n return b.generate_rsa_private_key(public_exponent, key_size)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_rsa_signature_ctx(self, private_key, padding, algorithm):\n for b in self._filtered_backends(RSABackend):\n return b.create_rsa_signature_ctx(private_key, padding, algorithm)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_rsa_verification_ctx(self, public_key, signature, padding,\n algorithm):\n for b in self._filtered_backends(RSABackend):\n return b.create_rsa_verification_ctx(public_key, signature,\n padding, algorithm)\n raise UnsupportedAlgorithm(\"RSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_dsa_parameters(self, key_size):\n for b in self._filtered_backends(DSABackend):\n return b.generate_dsa_parameters(key_size)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def generate_dsa_private_key(self, parameters):\n for b in self._filtered_backends(DSABackend):\n return b.generate_dsa_private_key(parameters)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_dsa_verification_ctx(self, public_key, signature, algorithm):\n for b in self._filtered_backends(DSABackend):\n return b.create_dsa_verification_ctx(public_key, signature,\n algorithm)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def create_dsa_signature_ctx(self, private_key, algorithm):\n for b in self._filtered_backends(DSABackend):\n return b.create_dsa_signature_ctx(private_key, algorithm)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def dsa_hash_supported(self, algorithm):\n for b in self._filtered_backends(DSABackend):\n return b.dsa_hash_supported(algorithm)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def dsa_parameters_supported(self, p, q, g):\n for b in self._filtered_backends(DSABackend):\n return b.dsa_parameters_supported(p, q, g)\n raise UnsupportedAlgorithm(\"DSA is not supported by the backend\",\n _Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)\n\n def cmac_algorithm_supported(self, algorithm):\n return any(\n b.cmac_algorithm_supported(algorithm)\n for b in self._filtered_backends(CMACBackend)\n )\n\n def create_cmac_ctx(self, algorithm):\n for b in self._filtered_backends(CMACBackend):\n try:\n return b.create_cmac_ctx(algorithm)\n except UnsupportedAlgorithm:\n pass\n raise UnsupportedAlgorithm(\"This backend does not support CMAC\",\n _Reasons.UNSUPPORTED_CIPHER)\n", "path": "cryptography/hazmat/backends/multibackend.py"}]} | 2,759 | 351 |
gh_patches_debug_34615 | rasdani/github-patches | git_diff | kserve__kserve-292 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support credentials for Azure Blobs
/kind feature
**Describe the solution you'd like**
1. Credentials builder should support exposing azure blob credentials to model initializer
2. Downloader should read and use credentials
**Anything else you would like to add:**
Here is how az creds are setup for kubeflow: https://github.com/kubeflow/kubeflow/pull/2676
</issue>
<code>
[start of python/kfserving/kfserving/storage.py]
1 # Copyright 2019 kubeflow.org.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import glob
16 import logging
17 import tempfile
18 import os
19 import re
20 from azure.storage.blob import BlockBlobService
21 from google.auth import exceptions
22 from google.cloud import storage
23 from minio import Minio
24
25 _GCS_PREFIX = "gs://"
26 _S3_PREFIX = "s3://"
27 _BLOB_RE = "https://(.+?).blob.core.windows.net/(.+)"
28 _LOCAL_PREFIX = "file://"
29
30
31 class Storage(object): # pylint: disable=too-few-public-methods
32 @staticmethod
33 def download(uri: str, out_dir: str = None) -> str:
34 logging.info("Copying contents of %s to local", uri)
35
36 is_local = False
37 if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):
38 is_local = True
39
40 if out_dir is None:
41 if is_local:
42 # noop if out_dir is not set and the path is local
43 return Storage._download_local(uri)
44 out_dir = tempfile.mkdtemp()
45
46 if uri.startswith(_GCS_PREFIX):
47 Storage._download_gcs(uri, out_dir)
48 elif uri.startswith(_S3_PREFIX):
49 Storage._download_s3(uri, out_dir)
50 elif re.search(_BLOB_RE, uri):
51 Storage._download_blob(uri, out_dir)
52 elif is_local:
53 return Storage._download_local(uri, out_dir)
54 else:
55 raise Exception("Cannot recognize storage type for " + uri +
56 "\n'%s', '%s', and '%s' are the current available storage type." %
57 (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))
58
59 logging.info("Successfully copied %s to %s", uri, out_dir)
60 return out_dir
61
62 @staticmethod
63 def _download_s3(uri, temp_dir: str):
64 client = Storage._create_minio_client()
65 bucket_args = uri.replace(_S3_PREFIX, "", 1).split("/", 1)
66 bucket_name = bucket_args[0]
67 bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
68 objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)
69 for obj in objects:
70 # Replace any prefix from the object key with temp_dir
71 subdir_object_key = obj.object_name.replace(bucket_path, "", 1).strip("/")
72 client.fget_object(bucket_name, obj.object_name,
73 os.path.join(temp_dir, subdir_object_key))
74
75 @staticmethod
76 def _download_gcs(uri, temp_dir: str):
77 try:
78 storage_client = storage.Client()
79 except exceptions.DefaultCredentialsError:
80 storage_client = storage.Client.create_anonymous_client()
81 bucket_args = uri.replace(_GCS_PREFIX, "", 1).split("/", 1)
82 bucket_name = bucket_args[0]
83 bucket_path = bucket_args[1] if len(bucket_args) > 1 else ""
84 bucket = storage_client.bucket(bucket_name)
85 prefix = bucket_path
86 if not prefix.endswith("/"):
87 prefix = prefix + "/"
88 blobs = bucket.list_blobs(prefix=prefix)
89 for blob in blobs:
90 # Replace any prefix from the object key with temp_dir
91 subdir_object_key = blob.name.replace(bucket_path, "", 1).strip("/")
92
93 # Create necessary subdirectory to store the object locally
94 if "/" in subdir_object_key:
95 local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit("/", 1)[0])
96 if not os.path.isdir(local_object_dir):
97 os.makedirs(local_object_dir, exist_ok=True)
98 if subdir_object_key.strip() != "":
99 dest_path = os.path.join(temp_dir, subdir_object_key)
100 logging.info("Downloading: %s", dest_path)
101 blob.download_to_filename(dest_path)
102
103 @staticmethod
104 def _download_blob(uri, out_dir: str):
105 match = re.search(_BLOB_RE, uri)
106 account_name = match.group(1)
107 storage_url = match.group(2)
108 container_name, prefix = storage_url.split("/", 1)
109
110 logging.info("Connecting to BLOB account: %s, contianer: %s", account_name, container_name)
111 block_blob_service = BlockBlobService(account_name=account_name)
112 blobs = block_blob_service.list_blobs(container_name, prefix=prefix)
113
114 for blob in blobs:
115 if "/" in blob.name:
116 head, _ = os.path.split(blob.name)
117 dir_path = os.path.join(out_dir, head)
118 if not os.path.isdir(dir_path):
119 os.makedirs(dir_path)
120
121 dest_path = os.path.join(out_dir, blob.name)
122 logging.info("Downloading: %s", dest_path)
123 block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)
124
125 @staticmethod
126 def _download_local(uri, out_dir=None):
127 local_path = uri.replace(_LOCAL_PREFIX, "", 1)
128 if not os.path.exists(local_path):
129 raise Exception("Local path %s does not exist." % (uri))
130
131 if out_dir is None:
132 return local_path
133 elif not os.path.isdir(out_dir):
134 os.makedirs(out_dir)
135
136 if os.path.isdir(local_path):
137 local_path = os.path.join(local_path, "*")
138
139 for src in glob.glob(local_path):
140 _, tail = os.path.split(src)
141 dest_path = os.path.join(out_dir, tail)
142 logging.info("Linking: %s to %s", src, dest_path)
143 os.symlink(src, dest_path)
144 return out_dir
145
146 @staticmethod
147 def _create_minio_client():
148 # Remove possible http scheme for Minio
149 url = re.compile(r"https?://")
150 minioClient = Minio(url.sub("", os.getenv("S3_ENDPOINT", "")),
151 access_key=os.getenv("AWS_ACCESS_KEY_ID", ""),
152 secret_key=os.getenv("AWS_SECRET_ACCESS_KEY", ""),
153 secure=True)
154 return minioClient
155
[end of python/kfserving/kfserving/storage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py
--- a/python/kfserving/kfserving/storage.py
+++ b/python/kfserving/kfserving/storage.py
@@ -101,14 +101,15 @@
blob.download_to_filename(dest_path)
@staticmethod
- def _download_blob(uri, out_dir: str):
+ def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals
match = re.search(_BLOB_RE, uri)
account_name = match.group(1)
storage_url = match.group(2)
container_name, prefix = storage_url.split("/", 1)
logging.info("Connecting to BLOB account: %s, contianer: %s", account_name, container_name)
- block_blob_service = BlockBlobService(account_name=account_name)
+ token = Storage._get_azure_storage_token()
+ block_blob_service = BlockBlobService(account_name=account_name, token_credential=token)
blobs = block_blob_service.list_blobs(container_name, prefix=prefix)
for blob in blobs:
@@ -122,6 +123,33 @@
logging.info("Downloading: %s", dest_path)
block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)
+ @staticmethod
+ def _get_azure_storage_token():
+ tenant_id = os.getenv("AZ_TENANT_ID", "")
+ client_id = os.getenv("AZ_CLIENT_ID", "")
+ client_secret = os.getenv("AZ_CLIENT_SECRET", "")
+ subscription_id = os.getenv("AZ_SUBSCRIPTION_ID", "")
+
+ if tenant_id == "" or client_id == "" or client_secret == "" or subscription_id == "":
+ return None
+
+ # note the SP must have "Storage Blob Data Owner" perms for this to work
+ import adal
+ from azure.storage.common import TokenCredential
+
+ authority_url = "https://login.microsoftonline.com/" + tenant_id
+
+ context = adal.AuthenticationContext(authority_url)
+
+ token = context.acquire_token_with_client_credentials(
+ "https://storage.azure.com/",
+ client_id,
+ client_secret)
+
+ token_credential = TokenCredential(token["accessToken"])
+
+ return token_credential
+
@staticmethod
def _download_local(uri, out_dir=None):
local_path = uri.replace(_LOCAL_PREFIX, "", 1)
| {"golden_diff": "diff --git a/python/kfserving/kfserving/storage.py b/python/kfserving/kfserving/storage.py\n--- a/python/kfserving/kfserving/storage.py\n+++ b/python/kfserving/kfserving/storage.py\n@@ -101,14 +101,15 @@\n blob.download_to_filename(dest_path)\n \n @staticmethod\n- def _download_blob(uri, out_dir: str):\n+ def _download_blob(uri, out_dir: str): # pylint: disable=too-many-locals\n match = re.search(_BLOB_RE, uri)\n account_name = match.group(1)\n storage_url = match.group(2)\n container_name, prefix = storage_url.split(\"/\", 1)\n \n logging.info(\"Connecting to BLOB account: %s, contianer: %s\", account_name, container_name)\n- block_blob_service = BlockBlobService(account_name=account_name)\n+ token = Storage._get_azure_storage_token()\n+ block_blob_service = BlockBlobService(account_name=account_name, token_credential=token)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n \n for blob in blobs:\n@@ -122,6 +123,33 @@\n logging.info(\"Downloading: %s\", dest_path)\n block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)\n \n+ @staticmethod\n+ def _get_azure_storage_token():\n+ tenant_id = os.getenv(\"AZ_TENANT_ID\", \"\")\n+ client_id = os.getenv(\"AZ_CLIENT_ID\", \"\")\n+ client_secret = os.getenv(\"AZ_CLIENT_SECRET\", \"\")\n+ subscription_id = os.getenv(\"AZ_SUBSCRIPTION_ID\", \"\")\n+\n+ if tenant_id == \"\" or client_id == \"\" or client_secret == \"\" or subscription_id == \"\":\n+ return None\n+\n+ # note the SP must have \"Storage Blob Data Owner\" perms for this to work\n+ import adal\n+ from azure.storage.common import TokenCredential\n+\n+ authority_url = \"https://login.microsoftonline.com/\" + tenant_id\n+\n+ context = adal.AuthenticationContext(authority_url)\n+\n+ token = context.acquire_token_with_client_credentials(\n+ \"https://storage.azure.com/\",\n+ client_id,\n+ client_secret)\n+\n+ token_credential = TokenCredential(token[\"accessToken\"])\n+\n+ return token_credential\n+\n @staticmethod\n def _download_local(uri, out_dir=None):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n", "issue": "Support credentials for Azure Blobs\n/kind feature\r\n\r\n**Describe the solution you'd like**\r\n1. Credentials builder should support exposing azure blob credentials to model initializer\r\n2. Downloader should read and use credentials\r\n\r\n\r\n**Anything else you would like to add:**\r\nHere is how az creds are setup for kubeflow: https://github.com/kubeflow/kubeflow/pull/2676\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport logging\nimport tempfile\nimport os\nimport re\nfrom azure.storage.blob import BlockBlobService\nfrom google.auth import exceptions\nfrom google.cloud import storage\nfrom minio import Minio\n\n_GCS_PREFIX = \"gs://\"\n_S3_PREFIX = \"s3://\"\n_BLOB_RE = \"https://(.+?).blob.core.windows.net/(.+)\"\n_LOCAL_PREFIX = \"file://\"\n\n\nclass Storage(object): # pylint: disable=too-few-public-methods\n @staticmethod\n def download(uri: str, out_dir: str = None) -> str:\n logging.info(\"Copying contents of %s to local\", uri)\n\n is_local = False\n if uri.startswith(_LOCAL_PREFIX) or os.path.exists(uri):\n is_local = True\n\n if out_dir is None:\n if is_local:\n # noop if out_dir is not set and the path is local\n return Storage._download_local(uri)\n out_dir = tempfile.mkdtemp()\n\n if uri.startswith(_GCS_PREFIX):\n Storage._download_gcs(uri, out_dir)\n elif uri.startswith(_S3_PREFIX):\n Storage._download_s3(uri, out_dir)\n elif re.search(_BLOB_RE, uri):\n Storage._download_blob(uri, out_dir)\n elif is_local:\n return Storage._download_local(uri, out_dir)\n else:\n raise Exception(\"Cannot recognize storage type for \" + uri +\n \"\\n'%s', '%s', and '%s' are the current available storage type.\" %\n (_GCS_PREFIX, _S3_PREFIX, _LOCAL_PREFIX))\n\n logging.info(\"Successfully copied %s to %s\", uri, out_dir)\n return out_dir\n\n @staticmethod\n def _download_s3(uri, temp_dir: str):\n client = Storage._create_minio_client()\n bucket_args = uri.replace(_S3_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n objects = client.list_objects(bucket_name, prefix=bucket_path, recursive=True)\n for obj in objects:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = obj.object_name.replace(bucket_path, \"\", 1).strip(\"/\")\n client.fget_object(bucket_name, obj.object_name,\n os.path.join(temp_dir, subdir_object_key))\n\n @staticmethod\n def _download_gcs(uri, temp_dir: str):\n try:\n storage_client = storage.Client()\n except exceptions.DefaultCredentialsError:\n storage_client = storage.Client.create_anonymous_client()\n bucket_args = uri.replace(_GCS_PREFIX, \"\", 1).split(\"/\", 1)\n bucket_name = bucket_args[0]\n bucket_path = bucket_args[1] if len(bucket_args) > 1 else \"\"\n bucket = storage_client.bucket(bucket_name)\n prefix = bucket_path\n if not prefix.endswith(\"/\"):\n prefix = prefix + \"/\"\n blobs = bucket.list_blobs(prefix=prefix)\n for blob in blobs:\n # Replace any prefix from the object key with temp_dir\n subdir_object_key = blob.name.replace(bucket_path, \"\", 1).strip(\"/\")\n\n # Create necessary subdirectory to store the object locally\n if \"/\" in subdir_object_key:\n local_object_dir = os.path.join(temp_dir, subdir_object_key.rsplit(\"/\", 1)[0])\n if not os.path.isdir(local_object_dir):\n os.makedirs(local_object_dir, exist_ok=True)\n if subdir_object_key.strip() != \"\":\n dest_path = os.path.join(temp_dir, subdir_object_key)\n logging.info(\"Downloading: %s\", dest_path)\n blob.download_to_filename(dest_path)\n\n @staticmethod\n def _download_blob(uri, out_dir: str):\n match = re.search(_BLOB_RE, uri)\n account_name = match.group(1)\n storage_url = match.group(2)\n container_name, prefix = storage_url.split(\"/\", 1)\n\n logging.info(\"Connecting to BLOB account: %s, contianer: %s\", account_name, container_name)\n block_blob_service = BlockBlobService(account_name=account_name)\n blobs = block_blob_service.list_blobs(container_name, prefix=prefix)\n\n for blob in blobs:\n if \"/\" in blob.name:\n head, _ = os.path.split(blob.name)\n dir_path = os.path.join(out_dir, head)\n if not os.path.isdir(dir_path):\n os.makedirs(dir_path)\n\n dest_path = os.path.join(out_dir, blob.name)\n logging.info(\"Downloading: %s\", dest_path)\n block_blob_service.get_blob_to_path(container_name, blob.name, dest_path)\n\n @staticmethod\n def _download_local(uri, out_dir=None):\n local_path = uri.replace(_LOCAL_PREFIX, \"\", 1)\n if not os.path.exists(local_path):\n raise Exception(\"Local path %s does not exist.\" % (uri))\n\n if out_dir is None:\n return local_path\n elif not os.path.isdir(out_dir):\n os.makedirs(out_dir)\n\n if os.path.isdir(local_path):\n local_path = os.path.join(local_path, \"*\")\n\n for src in glob.glob(local_path):\n _, tail = os.path.split(src)\n dest_path = os.path.join(out_dir, tail)\n logging.info(\"Linking: %s to %s\", src, dest_path)\n os.symlink(src, dest_path)\n return out_dir\n\n @staticmethod\n def _create_minio_client():\n # Remove possible http scheme for Minio\n url = re.compile(r\"https?://\")\n minioClient = Minio(url.sub(\"\", os.getenv(\"S3_ENDPOINT\", \"\")),\n access_key=os.getenv(\"AWS_ACCESS_KEY_ID\", \"\"),\n secret_key=os.getenv(\"AWS_SECRET_ACCESS_KEY\", \"\"),\n secure=True)\n return minioClient\n", "path": "python/kfserving/kfserving/storage.py"}]} | 2,382 | 547 |
gh_patches_debug_35818 | rasdani/github-patches | git_diff | beetbox__beets-1779 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
mbsubmit: cleanup and completion
Glad to see a new release has been made!
I'm getting back to work on beets after a few days away from the computer, hopefully bringing issue #1689 to a close eventually. As hinted on the previous discussion, this pull request is intended to take care of the `mbsubmit` plugin cleanup, now that the underlying pieces are in place.
I have modified a bit the behaviour, making the decision of appending the `"Print tracks"` choice depend solely on `task.rec`. The default behaviour is to only append the choice to matches where the recommendation is equal or lower than `Recommendation.medium`, which hopefully covers the most obvious choices (albums with no matches, albums with weak-ish matches) and the original request by @awesomer, and also avoids polluting the prompt in the cases where the match is strong. A config option has been added that allows the user to modify this settings (extra-picky users might find it useful to always be able to print tracks for fixing spelling mistakes, other users might only want it on albums with no matches, etc).
Other than that, a configuration option for setting the format string has been added as well - I can't think of a case where this might come in handy currently, but maybe more creative users might find it useful.
A couple of notes:
- currently, the plugin makes no effort of nicely formatting items that might be lacking some of the required fields. Would it be useful to add some extra checks and fall back to printing the filename (or something more advanced with the help of `fromfilename`, etc) in those cases?
- there might be some problems on some combination on options: for example, if the user sets the threshold to `strong`, but launches the importer in non-timid mode, the prompt will not actually be displayed. Would a note on the (upcoming) documentation suffice, as handling this case probably requires some changes that seem to be a bit out of the scope of the plugin?
As usual, any comments and input are more than welcome!
</issue>
<code>
[start of beetsplug/mbsubmit.py]
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2016, Adrian Sampson and Diego Moreda.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Aid in submitting information to MusicBrainz.
17
18 This plugin allows the user to print track information in a format that is
19 parseable by the MusicBrainz track parser. Programmatic submitting is not
20 implemented by MusicBrainz yet.
21 """
22
23 from __future__ import (division, absolute_import, print_function,
24 unicode_literals)
25
26
27 from beets.autotag import Recommendation
28 from beets.importer import action
29 from beets.plugins import BeetsPlugin
30 from beets.ui.commands import PromptChoice
31 from beetsplug.info import print_data
32
33
34 class MBSubmitPlugin(BeetsPlugin):
35 def __init__(self):
36 super(MBSubmitPlugin, self).__init__()
37
38 self.register_listener('before_choose_candidate',
39 self.before_choose_candidate_event)
40
41 def before_choose_candidate_event(self, session, task):
42 if not task.candidates or task.rec == Recommendation.none:
43 return [PromptChoice('p', 'Print tracks', self.print_tracks),
44 PromptChoice('k', 'print tracks and sKip',
45 self.print_tracks_and_skip)]
46
47 # Callbacks for choices.
48 def print_tracks(self, session, task):
49 for i in task.items:
50 print_data(None, i, '$track. $artist - $title ($length)')
51
52 def print_tracks_and_skip(self, session, task):
53 for i in task.items:
54 print_data(None, i, '$track. $artist - $title ($length)')
55 return action.SKIP
56
[end of beetsplug/mbsubmit.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py
--- a/beetsplug/mbsubmit.py
+++ b/beetsplug/mbsubmit.py
@@ -16,8 +16,10 @@
"""Aid in submitting information to MusicBrainz.
This plugin allows the user to print track information in a format that is
-parseable by the MusicBrainz track parser. Programmatic submitting is not
+parseable by the MusicBrainz track parser [1]. Programmatic submitting is not
implemented by MusicBrainz yet.
+
+[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings
"""
from __future__ import (division, absolute_import, print_function,
@@ -25,7 +27,6 @@
from beets.autotag import Recommendation
-from beets.importer import action
from beets.plugins import BeetsPlugin
from beets.ui.commands import PromptChoice
from beetsplug.info import print_data
@@ -35,21 +36,26 @@
def __init__(self):
super(MBSubmitPlugin, self).__init__()
+ self.config.add({
+ 'format': '$track. $title - $artist ($length)',
+ 'threshold': 'medium',
+ })
+
+ # Validate and store threshold.
+ self.threshold = self.config['threshold'].as_choice({
+ 'none': Recommendation.none,
+ 'low': Recommendation.low,
+ 'medium': Recommendation.medium,
+ 'strong': Recommendation.strong
+ })
+
self.register_listener('before_choose_candidate',
self.before_choose_candidate_event)
def before_choose_candidate_event(self, session, task):
- if not task.candidates or task.rec == Recommendation.none:
- return [PromptChoice('p', 'Print tracks', self.print_tracks),
- PromptChoice('k', 'print tracks and sKip',
- self.print_tracks_and_skip)]
+ if task.rec <= self.threshold:
+ return [PromptChoice('p', 'Print tracks', self.print_tracks)]
- # Callbacks for choices.
def print_tracks(self, session, task):
for i in task.items:
- print_data(None, i, '$track. $artist - $title ($length)')
-
- def print_tracks_and_skip(self, session, task):
- for i in task.items:
- print_data(None, i, '$track. $artist - $title ($length)')
- return action.SKIP
+ print_data(None, i, self.config['format'].get())
| {"golden_diff": "diff --git a/beetsplug/mbsubmit.py b/beetsplug/mbsubmit.py\n--- a/beetsplug/mbsubmit.py\n+++ b/beetsplug/mbsubmit.py\n@@ -16,8 +16,10 @@\n \"\"\"Aid in submitting information to MusicBrainz.\n \n This plugin allows the user to print track information in a format that is\n-parseable by the MusicBrainz track parser. Programmatic submitting is not\n+parseable by the MusicBrainz track parser [1]. Programmatic submitting is not\n implemented by MusicBrainz yet.\n+\n+[1] http://wiki.musicbrainz.org/History:How_To_Parse_Track_Listings\n \"\"\"\n \n from __future__ import (division, absolute_import, print_function,\n@@ -25,7 +27,6 @@\n \n \n from beets.autotag import Recommendation\n-from beets.importer import action\n from beets.plugins import BeetsPlugin\n from beets.ui.commands import PromptChoice\n from beetsplug.info import print_data\n@@ -35,21 +36,26 @@\n def __init__(self):\n super(MBSubmitPlugin, self).__init__()\n \n+ self.config.add({\n+ 'format': '$track. $title - $artist ($length)',\n+ 'threshold': 'medium',\n+ })\n+\n+ # Validate and store threshold.\n+ self.threshold = self.config['threshold'].as_choice({\n+ 'none': Recommendation.none,\n+ 'low': Recommendation.low,\n+ 'medium': Recommendation.medium,\n+ 'strong': Recommendation.strong\n+ })\n+\n self.register_listener('before_choose_candidate',\n self.before_choose_candidate_event)\n \n def before_choose_candidate_event(self, session, task):\n- if not task.candidates or task.rec == Recommendation.none:\n- return [PromptChoice('p', 'Print tracks', self.print_tracks),\n- PromptChoice('k', 'print tracks and sKip',\n- self.print_tracks_and_skip)]\n+ if task.rec <= self.threshold:\n+ return [PromptChoice('p', 'Print tracks', self.print_tracks)]\n \n- # Callbacks for choices.\n def print_tracks(self, session, task):\n for i in task.items:\n- print_data(None, i, '$track. $artist - $title ($length)')\n-\n- def print_tracks_and_skip(self, session, task):\n- for i in task.items:\n- print_data(None, i, '$track. $artist - $title ($length)')\n- return action.SKIP\n+ print_data(None, i, self.config['format'].get())\n", "issue": "mbsubmit: cleanup and completion\nGlad to see a new release has been made!\n\nI'm getting back to work on beets after a few days away from the computer, hopefully bringing issue #1689 to a close eventually. As hinted on the previous discussion, this pull request is intended to take care of the `mbsubmit` plugin cleanup, now that the underlying pieces are in place.\n\nI have modified a bit the behaviour, making the decision of appending the `\"Print tracks\"` choice depend solely on `task.rec`. The default behaviour is to only append the choice to matches where the recommendation is equal or lower than `Recommendation.medium`, which hopefully covers the most obvious choices (albums with no matches, albums with weak-ish matches) and the original request by @awesomer, and also avoids polluting the prompt in the cases where the match is strong. A config option has been added that allows the user to modify this settings (extra-picky users might find it useful to always be able to print tracks for fixing spelling mistakes, other users might only want it on albums with no matches, etc).\n\nOther than that, a configuration option for setting the format string has been added as well - I can't think of a case where this might come in handy currently, but maybe more creative users might find it useful.\n\nA couple of notes:\n- currently, the plugin makes no effort of nicely formatting items that might be lacking some of the required fields. Would it be useful to add some extra checks and fall back to printing the filename (or something more advanced with the help of `fromfilename`, etc) in those cases?\n- there might be some problems on some combination on options: for example, if the user sets the threshold to `strong`, but launches the importer in non-timid mode, the prompt will not actually be displayed. Would a note on the (upcoming) documentation suffice, as handling this case probably requires some changes that seem to be a bit out of the scope of the plugin?\n\nAs usual, any comments and input are more than welcome!\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2016, Adrian Sampson and Diego Moreda.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Aid in submitting information to MusicBrainz.\n\nThis plugin allows the user to print track information in a format that is\nparseable by the MusicBrainz track parser. Programmatic submitting is not\nimplemented by MusicBrainz yet.\n\"\"\"\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\n\nfrom beets.autotag import Recommendation\nfrom beets.importer import action\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui.commands import PromptChoice\nfrom beetsplug.info import print_data\n\n\nclass MBSubmitPlugin(BeetsPlugin):\n def __init__(self):\n super(MBSubmitPlugin, self).__init__()\n\n self.register_listener('before_choose_candidate',\n self.before_choose_candidate_event)\n\n def before_choose_candidate_event(self, session, task):\n if not task.candidates or task.rec == Recommendation.none:\n return [PromptChoice('p', 'Print tracks', self.print_tracks),\n PromptChoice('k', 'print tracks and sKip',\n self.print_tracks_and_skip)]\n\n # Callbacks for choices.\n def print_tracks(self, session, task):\n for i in task.items:\n print_data(None, i, '$track. $artist - $title ($length)')\n\n def print_tracks_and_skip(self, session, task):\n for i in task.items:\n print_data(None, i, '$track. $artist - $title ($length)')\n return action.SKIP\n", "path": "beetsplug/mbsubmit.py"}]} | 1,531 | 559 |
gh_patches_debug_27451 | rasdani/github-patches | git_diff | RedHatInsights__insights-core-3041 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Systemd_analyze parser is raising lots of exceptions in production
The SystemdAnalyzeBlame parser is throwing a large number of the exception ValueError('too many values to unpack (expected 2)',) in production.
</issue>
<code>
[start of insights/parsers/systemd_analyze.py]
1 """
2 SystemdAnalyzeBlame - command ``systemd-analyze blame``
3 =======================================================
4
5 This module parses the output of command ``systemd-analyze blame``.
6 """
7 from insights.specs import Specs
8 from insights import CommandParser, parser
9 from insights.parsers import SkipException
10
11
12 @parser(Specs.systemd_analyze_blame)
13 class SystemdAnalyzeBlame(CommandParser, dict):
14 """Parse the output of ``systemd-analyze blame`` as ``dict``. The time to
15 initialize is converted into seconds.
16
17 Typical output::
18
19 33.080s cloud-init-local.service
20 32.423s unbound-anchor.service
21 2.773s kdump.service
22 1.699s dnf-makecache.service
23 1.304s cloud-init.service
24 1.073s initrd-switch-root.service
25 939ms cloud-config.service
26 872ms tuned.service
27 770ms cloud-final.service
28
29 Examples:
30
31 >>> 'cloud-init-local.service' in output
32 True
33 >>> output.get('cloud-init.service', 0)
34 1.304
35
36 Returns:
37 (dict): With unit-name & time as key-value pair.
38 Ex::
39
40 {'cloud-config.service': 0.939,
41 'cloud-final.service': 0.77,
42 'cloud-init-local.service': 33.08,
43 'cloud-init.service': 1.304,
44 'dnf-makecache.service': 1.699,
45 'initrd-switch-root.service': 1.073,
46 'kdump.service': 2.773,
47 'tuned.service': 0.872,
48 'unbound-anchor.service': 32.423}
49
50 Raises:
51 SkipException: If content is not provided.
52 """
53 def parse_content(self, content):
54 if not content:
55 raise SkipException
56
57 for c in content:
58 time, service = c.split()
59 if time.endswith('ms'):
60 _time = round(float(time.strip('ms')) / 1000, 5)
61 else:
62 _time = round(float(time.strip('ms')), 5)
63
64 self[service] = _time
65
[end of insights/parsers/systemd_analyze.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/insights/parsers/systemd_analyze.py b/insights/parsers/systemd_analyze.py
--- a/insights/parsers/systemd_analyze.py
+++ b/insights/parsers/systemd_analyze.py
@@ -55,10 +55,34 @@
raise SkipException
for c in content:
- time, service = c.split()
- if time.endswith('ms'):
- _time = round(float(time.strip('ms')) / 1000, 5)
- else:
- _time = round(float(time.strip('ms')), 5)
+ cols = c.split()
+ # Check to make sure that the first character of the first
+ # entry is a number. This will hopefully exclude any errors
+ # that are outputted in the file.
+ if cols[0][0].isdigit():
+ # The service should be the last column, so just
+ # remove the last column from the list before looping.
+ service = cols.pop()
+ time = 0
+ for x in cols:
+ # Convert each column to seconds, and add them up.
+ if x.endswith('y'):
+ # Pulled the 31557600 from systemd src.
+ time += int(x.strip('y')) * 31557600
+ elif x.endswith('month'):
+ # Pulled the 2629800 from systemd src.
+ time += int(x.strip('month')) * 2629800
+ elif x.endswith('w'):
+ time += int(x.strip('w')) * 7 * 24 * 60 ** 2
+ elif x.endswith('d'):
+ time += int(x.strip('d')) * 24 * 60 ** 2
+ elif x.endswith('h'):
+ time += int(x.strip('h')) * 60 ** 2
+ elif x.endswith('min'):
+ time += int(x.strip('min')) * 60
+ elif x.endswith('ms'):
+ time += float(x.strip('ms')) / 1000
+ elif x.endswith('s'):
+ time += float(x.strip('s'))
- self[service] = _time
+ self[service] = time
| {"golden_diff": "diff --git a/insights/parsers/systemd_analyze.py b/insights/parsers/systemd_analyze.py\n--- a/insights/parsers/systemd_analyze.py\n+++ b/insights/parsers/systemd_analyze.py\n@@ -55,10 +55,34 @@\n raise SkipException\n \n for c in content:\n- time, service = c.split()\n- if time.endswith('ms'):\n- _time = round(float(time.strip('ms')) / 1000, 5)\n- else:\n- _time = round(float(time.strip('ms')), 5)\n+ cols = c.split()\n+ # Check to make sure that the first character of the first\n+ # entry is a number. This will hopefully exclude any errors\n+ # that are outputted in the file.\n+ if cols[0][0].isdigit():\n+ # The service should be the last column, so just\n+ # remove the last column from the list before looping.\n+ service = cols.pop()\n+ time = 0\n+ for x in cols:\n+ # Convert each column to seconds, and add them up.\n+ if x.endswith('y'):\n+ # Pulled the 31557600 from systemd src.\n+ time += int(x.strip('y')) * 31557600\n+ elif x.endswith('month'):\n+ # Pulled the 2629800 from systemd src.\n+ time += int(x.strip('month')) * 2629800\n+ elif x.endswith('w'):\n+ time += int(x.strip('w')) * 7 * 24 * 60 ** 2\n+ elif x.endswith('d'):\n+ time += int(x.strip('d')) * 24 * 60 ** 2\n+ elif x.endswith('h'):\n+ time += int(x.strip('h')) * 60 ** 2\n+ elif x.endswith('min'):\n+ time += int(x.strip('min')) * 60\n+ elif x.endswith('ms'):\n+ time += float(x.strip('ms')) / 1000\n+ elif x.endswith('s'):\n+ time += float(x.strip('s'))\n \n- self[service] = _time\n+ self[service] = time\n", "issue": "Systemd_analyze parser is raising lots of exceptions in production\nThe SystemdAnalyzeBlame parser is throwing a large number of the exception ValueError('too many values to unpack (expected 2)',) in production.\n", "before_files": [{"content": "\"\"\"\nSystemdAnalyzeBlame - command ``systemd-analyze blame``\n=======================================================\n\nThis module parses the output of command ``systemd-analyze blame``.\n\"\"\"\nfrom insights.specs import Specs\nfrom insights import CommandParser, parser\nfrom insights.parsers import SkipException\n\n\n@parser(Specs.systemd_analyze_blame)\nclass SystemdAnalyzeBlame(CommandParser, dict):\n \"\"\"Parse the output of ``systemd-analyze blame`` as ``dict``. The time to\n initialize is converted into seconds.\n\n Typical output::\n\n 33.080s cloud-init-local.service\n 32.423s unbound-anchor.service\n 2.773s kdump.service\n 1.699s dnf-makecache.service\n 1.304s cloud-init.service\n 1.073s initrd-switch-root.service\n 939ms cloud-config.service\n 872ms tuned.service\n 770ms cloud-final.service\n\n Examples:\n\n >>> 'cloud-init-local.service' in output\n True\n >>> output.get('cloud-init.service', 0)\n 1.304\n\n Returns:\n (dict): With unit-name & time as key-value pair.\n Ex::\n\n {'cloud-config.service': 0.939,\n 'cloud-final.service': 0.77,\n 'cloud-init-local.service': 33.08,\n 'cloud-init.service': 1.304,\n 'dnf-makecache.service': 1.699,\n 'initrd-switch-root.service': 1.073,\n 'kdump.service': 2.773,\n 'tuned.service': 0.872,\n 'unbound-anchor.service': 32.423}\n\n Raises:\n SkipException: If content is not provided.\n \"\"\"\n def parse_content(self, content):\n if not content:\n raise SkipException\n\n for c in content:\n time, service = c.split()\n if time.endswith('ms'):\n _time = round(float(time.strip('ms')) / 1000, 5)\n else:\n _time = round(float(time.strip('ms')), 5)\n\n self[service] = _time\n", "path": "insights/parsers/systemd_analyze.py"}]} | 1,222 | 525 |
gh_patches_debug_30261 | rasdani/github-patches | git_diff | mozilla__pontoon-2490 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rename "Deadline"
As part of https://github.com/mozilla/pontoon/pull/1565, we wrote that "Deadline sounds permanent, threatening, and ugly."
Let's replace the word with something else.
Maybe "Due date"?
Rename "Deadline"
As part of https://github.com/mozilla/pontoon/pull/1565, we wrote that "Deadline sounds permanent, threatening, and ugly."
Let's replace the word with something else.
Maybe "Due date"?
</issue>
<code>
[start of pontoon/projects/management/commands/send_deadline_notifications.py]
1 import datetime
2
3 from django.contrib.auth.models import User
4 from django.core.management.base import BaseCommand
5 from notifications.signals import notify
6
7 from pontoon.base.models import Project
8
9
10 class Command(BaseCommand):
11 help = "Notify contributors about the approaching project deadline"
12
13 def handle(self, *args, **options):
14 """
15 This command sends deadline reminders to contributors of projects that
16 are due in 7 days. If 2 days before the deadline project still isn't
17 complete for the contributor's locale, notifications are sent again.
18
19 The command is designed to run daily.
20 """
21 for project in Project.objects.available():
22 if project.deadline:
23 days_left = (project.deadline - datetime.date.today()).days
24 if days_left not in (2, 7):
25 continue
26 else:
27 continue
28
29 self.stdout.write(f"Sending deadline notifications for project {project}.")
30
31 is_project_public = project.visibility == Project.Visibility.PUBLIC
32 verb = f"due in {days_left} days"
33 locales = []
34
35 for project_locale in project.project_locale.all():
36 if project_locale.approved_strings < project_locale.total_strings:
37 locales.append(project_locale.locale)
38
39 contributors = (
40 User.objects.filter(
41 translation__entity__resource__project=project,
42 translation__locale__in=locales,
43 profile__project_deadline_notifications=True,
44 ).distinct(),
45 )
46
47 for contributor in contributors:
48 if is_project_public or contributor.is_superuser:
49 notify.send(project, recipient=contributor, verb=verb)
50
51 self.stdout.write(f"Deadline notifications for project {project} sent.")
52
[end of pontoon/projects/management/commands/send_deadline_notifications.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pontoon/projects/management/commands/send_deadline_notifications.py b/pontoon/projects/management/commands/send_deadline_notifications.py
--- a/pontoon/projects/management/commands/send_deadline_notifications.py
+++ b/pontoon/projects/management/commands/send_deadline_notifications.py
@@ -8,12 +8,12 @@
class Command(BaseCommand):
- help = "Notify contributors about the approaching project deadline"
+ help = "Notify contributors about the approaching project target date"
def handle(self, *args, **options):
"""
- This command sends deadline reminders to contributors of projects that
- are due in 7 days. If 2 days before the deadline project still isn't
+ This command sends target date reminders to contributors of projects that
+ are due in 7 days. If 2 days before the target date project still isn't
complete for the contributor's locale, notifications are sent again.
The command is designed to run daily.
@@ -26,7 +26,9 @@
else:
continue
- self.stdout.write(f"Sending deadline notifications for project {project}.")
+ self.stdout.write(
+ f"Sending target date notifications for project {project}."
+ )
is_project_public = project.visibility == Project.Visibility.PUBLIC
verb = f"due in {days_left} days"
@@ -48,4 +50,4 @@
if is_project_public or contributor.is_superuser:
notify.send(project, recipient=contributor, verb=verb)
- self.stdout.write(f"Deadline notifications for project {project} sent.")
+ self.stdout.write(f"Target date notifications for project {project} sent.")
| {"golden_diff": "diff --git a/pontoon/projects/management/commands/send_deadline_notifications.py b/pontoon/projects/management/commands/send_deadline_notifications.py\n--- a/pontoon/projects/management/commands/send_deadline_notifications.py\n+++ b/pontoon/projects/management/commands/send_deadline_notifications.py\n@@ -8,12 +8,12 @@\n \n \n class Command(BaseCommand):\n- help = \"Notify contributors about the approaching project deadline\"\n+ help = \"Notify contributors about the approaching project target date\"\n \n def handle(self, *args, **options):\n \"\"\"\n- This command sends deadline reminders to contributors of projects that\n- are due in 7 days. If 2 days before the deadline project still isn't\n+ This command sends target date reminders to contributors of projects that\n+ are due in 7 days. If 2 days before the target date project still isn't\n complete for the contributor's locale, notifications are sent again.\n \n The command is designed to run daily.\n@@ -26,7 +26,9 @@\n else:\n continue\n \n- self.stdout.write(f\"Sending deadline notifications for project {project}.\")\n+ self.stdout.write(\n+ f\"Sending target date notifications for project {project}.\"\n+ )\n \n is_project_public = project.visibility == Project.Visibility.PUBLIC\n verb = f\"due in {days_left} days\"\n@@ -48,4 +50,4 @@\n if is_project_public or contributor.is_superuser:\n notify.send(project, recipient=contributor, verb=verb)\n \n- self.stdout.write(f\"Deadline notifications for project {project} sent.\")\n+ self.stdout.write(f\"Target date notifications for project {project} sent.\")\n", "issue": "Rename \"Deadline\"\nAs part of https://github.com/mozilla/pontoon/pull/1565, we wrote that \"Deadline sounds permanent, threatening, and ugly.\"\r\n\r\nLet's replace the word with something else.\r\n\r\nMaybe \"Due date\"?\nRename \"Deadline\"\nAs part of https://github.com/mozilla/pontoon/pull/1565, we wrote that \"Deadline sounds permanent, threatening, and ugly.\"\r\n\r\nLet's replace the word with something else.\r\n\r\nMaybe \"Due date\"?\n", "before_files": [{"content": "import datetime\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\nfrom notifications.signals import notify\n\nfrom pontoon.base.models import Project\n\n\nclass Command(BaseCommand):\n help = \"Notify contributors about the approaching project deadline\"\n\n def handle(self, *args, **options):\n \"\"\"\n This command sends deadline reminders to contributors of projects that\n are due in 7 days. If 2 days before the deadline project still isn't\n complete for the contributor's locale, notifications are sent again.\n\n The command is designed to run daily.\n \"\"\"\n for project in Project.objects.available():\n if project.deadline:\n days_left = (project.deadline - datetime.date.today()).days\n if days_left not in (2, 7):\n continue\n else:\n continue\n\n self.stdout.write(f\"Sending deadline notifications for project {project}.\")\n\n is_project_public = project.visibility == Project.Visibility.PUBLIC\n verb = f\"due in {days_left} days\"\n locales = []\n\n for project_locale in project.project_locale.all():\n if project_locale.approved_strings < project_locale.total_strings:\n locales.append(project_locale.locale)\n\n contributors = (\n User.objects.filter(\n translation__entity__resource__project=project,\n translation__locale__in=locales,\n profile__project_deadline_notifications=True,\n ).distinct(),\n )\n\n for contributor in contributors:\n if is_project_public or contributor.is_superuser:\n notify.send(project, recipient=contributor, verb=verb)\n\n self.stdout.write(f\"Deadline notifications for project {project} sent.\")\n", "path": "pontoon/projects/management/commands/send_deadline_notifications.py"}]} | 1,096 | 370 |
gh_patches_debug_13194 | rasdani/github-patches | git_diff | mlflow__mlflow-10095 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Set `openai.api_key` if `OPENAI_API_KEY` exists
### Summary
The line that needs a fix:
https://github.com/mlflow/mlflow/blob/a4db4ee826765f0365944f83124af8840c72f4d8/mlflow/openai/utils.py#L161
Just because `"OPENAI_API_KEY" in os.environ` is True doesn't mean `openai.api_key` is set. The following code currently doens't work.
```python
import os
assert "OPENAI_API_KEY" not in os.environ
import openai
# Set OPENAI_API_KEY after openai is imported
os.environ["OPENAI_API_KEY"] = "..."
basic_qa_model = mlflow.openai.log_model(
model="gpt-3.5-turbo",
task=openai.ChatCompletion,
artifact_path="model",
messages=[
{"role": "user", "content": "{question}"},
],
)
loaded_model = mlflow.pyfunc.load_model(basic_qa_model.model_uri)
loaded_model.predict(["What is MLflow"]) # this line throws because `openai.api_key` is None
```
### Notes
- Make sure to open a PR from a **non-master** branch.
- Sign off the commit using the `-s` flag when making a commit:
```sh
git commit -s -m "..."
# ^^ make sure to use this
```
- Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR.
</issue>
<code>
[start of mlflow/openai/utils.py]
1 import json
2 import os
3 import time
4 from contextlib import contextmanager
5 from unittest import mock
6
7 import requests
8
9 import mlflow
10
11 TEST_CONTENT = "test"
12
13 TEST_SOURCE_DOCUMENTS = [
14 {
15 "page_content": "We see the unity among leaders ...",
16 "metadata": {"source": "tests/langchain/state_of_the_union.txt"},
17 },
18 ]
19 TEST_INTERMEDIATE_STEPS = (
20 [
21 {
22 "tool": "Search",
23 "tool_input": "High temperature in SF yesterday",
24 "log": " I need to find the temperature first...",
25 "result": "San Francisco...",
26 },
27 ],
28 )
29
30
31 class _MockResponse:
32 def __init__(self, status_code, json_data):
33 self.status_code = status_code
34 self.content = json.dumps(json_data).encode()
35 self.headers = {"Content-Type": "application/json"}
36 self.text = mlflow.__version__
37
38
39 def _chat_completion_json_sample(content):
40 # https://platform.openai.com/docs/api-reference/chat/create
41 return {
42 "id": "chatcmpl-123",
43 "object": "chat.completion",
44 "created": 1677652288,
45 "choices": [
46 {
47 "index": 0,
48 "message": {"role": "assistant", "content": content},
49 "finish_reason": "stop",
50 "text": content,
51 }
52 ],
53 "usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
54 }
55
56
57 def _completion_json_sample(content):
58 return {
59 "id": "cmpl-123",
60 "object": "text_completion",
61 "created": 1589478378,
62 "model": "text-davinci-003",
63 "choices": [{"text": content, "index": 0, "finish_reason": "length"}],
64 "usage": {"prompt_tokens": 5, "completion_tokens": 7, "total_tokens": 12},
65 }
66
67
68 def _models_retrieve_json_sample():
69 # https://platform.openai.com/docs/api-reference/models/retrieve
70 return {
71 "id": "gpt-3.5-turbo",
72 "object": "model",
73 "owned_by": "openai",
74 "permission": [],
75 }
76
77
78 def _mock_chat_completion_response(content=TEST_CONTENT):
79 return _MockResponse(200, _chat_completion_json_sample(content))
80
81
82 def _mock_completion_response(content=TEST_CONTENT):
83 return _MockResponse(200, _completion_json_sample(content))
84
85
86 def _mock_embeddings_response(num_texts):
87 return _MockResponse(
88 200,
89 {
90 "object": "list",
91 "data": [
92 {
93 "object": "embedding",
94 "embedding": [
95 0.0,
96 ],
97 "index": i,
98 }
99 for i in range(num_texts)
100 ],
101 "model": "text-embedding-ada-002",
102 "usage": {"prompt_tokens": 8, "total_tokens": 8},
103 },
104 )
105
106
107 def _mock_models_retrieve_response():
108 return _MockResponse(200, _models_retrieve_json_sample())
109
110
111 @contextmanager
112 def _mock_request(**kwargs):
113 with mock.patch("requests.Session.request", **kwargs) as m:
114 yield m
115
116
117 def _mock_openai_request():
118 original = requests.Session.request
119
120 def request(*args, **kwargs):
121 url = args[2] if len(args) > 2 else kwargs.get("url")
122
123 if url.endswith("/chat/completions"):
124 messages = json.loads(kwargs.get("data")).get("messages")
125 return _mock_chat_completion_response(content=json.dumps(messages))
126 elif url.endswith("/completions"):
127 prompt = json.loads(kwargs.get("data")).get("prompt")
128 return _mock_completion_response(content=json.dumps(prompt))
129 elif url.endswith("/embeddings"):
130 inp = json.loads(kwargs.get("data")).get("input")
131 return _mock_embeddings_response(len(inp) if isinstance(inp, list) else 1)
132 else:
133 return original(*args, **kwargs)
134
135 return _mock_request(new=request)
136
137
138 def _validate_model_params(task, model, params):
139 if not params:
140 return
141
142 if any(key in model for key in params):
143 raise mlflow.MlflowException.invalid_parameter_value(
144 f"Providing any of {list(model.keys())} as parameters in the signature is not "
145 "allowed because they were indicated as part of the OpenAI model. Either remove "
146 "the argument when logging the model or remove the parameter from the signature.",
147 )
148 if "batch_size" in params and task == "chat.completions":
149 raise mlflow.MlflowException.invalid_parameter_value(
150 "Parameter `batch_size` is not supported for task `chat.completions`"
151 )
152
153
154 class _OAITokenHolder:
155 def __init__(self, api_type):
156 import openai
157
158 self._api_token = None
159 self._credential = None
160 self._is_azure_ad = api_type in ("azure_ad", "azuread")
161 self._key_configured = bool(openai.api_key) or "OPENAI_API_KEY" in os.environ
162
163 if self._is_azure_ad and not self._key_configured:
164 try:
165 from azure.identity import DefaultAzureCredential
166 except ImportError:
167 raise mlflow.MlflowException(
168 "Using API type `azure_ad` or `azuread` requires the package"
169 " `azure-identity` to be installed."
170 )
171 self._credential = DefaultAzureCredential()
172
173 def validate(self, logger=None):
174 """
175 Validates the token or API key configured for accessing the OpenAI resource.
176 """
177 import openai
178
179 if self._key_configured:
180 return
181
182 if self._is_azure_ad:
183 if not self._api_token or self._api_token.expires_on < time.time() + 60:
184 from azure.core.exceptions import ClientAuthenticationError
185
186 if logger:
187 logger.debug(
188 "Token for Azure AD is either expired or unset. Attempting to "
189 "acquire a new token."
190 )
191 try:
192 self._api_token = self._credential.get_token(
193 "https://cognitiveservices.azure.com/.default"
194 )
195 except ClientAuthenticationError as err:
196 raise mlflow.MlflowException(
197 "Unable to acquire a valid Azure AD token for the resource due to "
198 f"the following error: {err.message}"
199 ) from err
200 openai.api_key = self._api_token.token
201 if logger:
202 logger.debug("Token refreshed successfully")
203 else:
204 raise mlflow.MlflowException(
205 "OpenAI API key must be set in the ``OPENAI_API_KEY`` environment variable."
206 )
207
[end of mlflow/openai/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mlflow/openai/utils.py b/mlflow/openai/utils.py
--- a/mlflow/openai/utils.py
+++ b/mlflow/openai/utils.py
@@ -158,7 +158,13 @@
self._api_token = None
self._credential = None
self._is_azure_ad = api_type in ("azure_ad", "azuread")
- self._key_configured = bool(openai.api_key) or "OPENAI_API_KEY" in os.environ
+ self._key_configured = bool(openai.api_key)
+
+ # set the api key if it's not set. this is to deal with cases where the
+ # user sets the environment variable after importing the `openai` module
+ if not bool(openai.api_key) and "OPENAI_API_KEY" in os.environ:
+ openai.api_key = os.environ["OPENAI_API_KEY"]
+ self._key_configured = True
if self._is_azure_ad and not self._key_configured:
try:
| {"golden_diff": "diff --git a/mlflow/openai/utils.py b/mlflow/openai/utils.py\n--- a/mlflow/openai/utils.py\n+++ b/mlflow/openai/utils.py\n@@ -158,7 +158,13 @@\n self._api_token = None\n self._credential = None\n self._is_azure_ad = api_type in (\"azure_ad\", \"azuread\")\n- self._key_configured = bool(openai.api_key) or \"OPENAI_API_KEY\" in os.environ\n+ self._key_configured = bool(openai.api_key)\n+\n+ # set the api key if it's not set. this is to deal with cases where the\n+ # user sets the environment variable after importing the `openai` module\n+ if not bool(openai.api_key) and \"OPENAI_API_KEY\" in os.environ:\n+ openai.api_key = os.environ[\"OPENAI_API_KEY\"]\n+ self._key_configured = True\n \n if self._is_azure_ad and not self._key_configured:\n try:\n", "issue": "Set `openai.api_key` if `OPENAI_API_KEY` exists\n### Summary\r\n\r\nThe line that needs a fix:\r\n\r\nhttps://github.com/mlflow/mlflow/blob/a4db4ee826765f0365944f83124af8840c72f4d8/mlflow/openai/utils.py#L161\r\n\r\nJust because `\"OPENAI_API_KEY\" in os.environ` is True doesn't mean `openai.api_key` is set. The following code currently doens't work.\r\n\r\n```python\r\nimport os\r\n\r\nassert \"OPENAI_API_KEY\" not in os.environ\r\n\r\nimport openai\r\n\r\n# Set OPENAI_API_KEY after openai is imported\r\nos.environ[\"OPENAI_API_KEY\"] = \"...\"\r\n\r\nbasic_qa_model = mlflow.openai.log_model(\r\n model=\"gpt-3.5-turbo\",\r\n task=openai.ChatCompletion,\r\n artifact_path=\"model\",\r\n messages=[\r\n {\"role\": \"user\", \"content\": \"{question}\"},\r\n ],\r\n)\r\nloaded_model = mlflow.pyfunc.load_model(basic_qa_model.model_uri)\r\nloaded_model.predict([\"What is MLflow\"]) # this line throws because `openai.api_key` is None\r\n```\r\n\r\n### Notes\r\n\r\n- Make sure to open a PR from a **non-master** branch.\r\n- Sign off the commit using the `-s` flag when making a commit:\r\n\r\n ```sh\r\n git commit -s -m \"...\"\r\n # ^^ make sure to use this\r\n ```\r\n\r\n- Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR.\r\n\n", "before_files": [{"content": "import json\nimport os\nimport time\nfrom contextlib import contextmanager\nfrom unittest import mock\n\nimport requests\n\nimport mlflow\n\nTEST_CONTENT = \"test\"\n\nTEST_SOURCE_DOCUMENTS = [\n {\n \"page_content\": \"We see the unity among leaders ...\",\n \"metadata\": {\"source\": \"tests/langchain/state_of_the_union.txt\"},\n },\n]\nTEST_INTERMEDIATE_STEPS = (\n [\n {\n \"tool\": \"Search\",\n \"tool_input\": \"High temperature in SF yesterday\",\n \"log\": \" I need to find the temperature first...\",\n \"result\": \"San Francisco...\",\n },\n ],\n)\n\n\nclass _MockResponse:\n def __init__(self, status_code, json_data):\n self.status_code = status_code\n self.content = json.dumps(json_data).encode()\n self.headers = {\"Content-Type\": \"application/json\"}\n self.text = mlflow.__version__\n\n\ndef _chat_completion_json_sample(content):\n # https://platform.openai.com/docs/api-reference/chat/create\n return {\n \"id\": \"chatcmpl-123\",\n \"object\": \"chat.completion\",\n \"created\": 1677652288,\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\"role\": \"assistant\", \"content\": content},\n \"finish_reason\": \"stop\",\n \"text\": content,\n }\n ],\n \"usage\": {\"prompt_tokens\": 9, \"completion_tokens\": 12, \"total_tokens\": 21},\n }\n\n\ndef _completion_json_sample(content):\n return {\n \"id\": \"cmpl-123\",\n \"object\": \"text_completion\",\n \"created\": 1589478378,\n \"model\": \"text-davinci-003\",\n \"choices\": [{\"text\": content, \"index\": 0, \"finish_reason\": \"length\"}],\n \"usage\": {\"prompt_tokens\": 5, \"completion_tokens\": 7, \"total_tokens\": 12},\n }\n\n\ndef _models_retrieve_json_sample():\n # https://platform.openai.com/docs/api-reference/models/retrieve\n return {\n \"id\": \"gpt-3.5-turbo\",\n \"object\": \"model\",\n \"owned_by\": \"openai\",\n \"permission\": [],\n }\n\n\ndef _mock_chat_completion_response(content=TEST_CONTENT):\n return _MockResponse(200, _chat_completion_json_sample(content))\n\n\ndef _mock_completion_response(content=TEST_CONTENT):\n return _MockResponse(200, _completion_json_sample(content))\n\n\ndef _mock_embeddings_response(num_texts):\n return _MockResponse(\n 200,\n {\n \"object\": \"list\",\n \"data\": [\n {\n \"object\": \"embedding\",\n \"embedding\": [\n 0.0,\n ],\n \"index\": i,\n }\n for i in range(num_texts)\n ],\n \"model\": \"text-embedding-ada-002\",\n \"usage\": {\"prompt_tokens\": 8, \"total_tokens\": 8},\n },\n )\n\n\ndef _mock_models_retrieve_response():\n return _MockResponse(200, _models_retrieve_json_sample())\n\n\n@contextmanager\ndef _mock_request(**kwargs):\n with mock.patch(\"requests.Session.request\", **kwargs) as m:\n yield m\n\n\ndef _mock_openai_request():\n original = requests.Session.request\n\n def request(*args, **kwargs):\n url = args[2] if len(args) > 2 else kwargs.get(\"url\")\n\n if url.endswith(\"/chat/completions\"):\n messages = json.loads(kwargs.get(\"data\")).get(\"messages\")\n return _mock_chat_completion_response(content=json.dumps(messages))\n elif url.endswith(\"/completions\"):\n prompt = json.loads(kwargs.get(\"data\")).get(\"prompt\")\n return _mock_completion_response(content=json.dumps(prompt))\n elif url.endswith(\"/embeddings\"):\n inp = json.loads(kwargs.get(\"data\")).get(\"input\")\n return _mock_embeddings_response(len(inp) if isinstance(inp, list) else 1)\n else:\n return original(*args, **kwargs)\n\n return _mock_request(new=request)\n\n\ndef _validate_model_params(task, model, params):\n if not params:\n return\n\n if any(key in model for key in params):\n raise mlflow.MlflowException.invalid_parameter_value(\n f\"Providing any of {list(model.keys())} as parameters in the signature is not \"\n \"allowed because they were indicated as part of the OpenAI model. Either remove \"\n \"the argument when logging the model or remove the parameter from the signature.\",\n )\n if \"batch_size\" in params and task == \"chat.completions\":\n raise mlflow.MlflowException.invalid_parameter_value(\n \"Parameter `batch_size` is not supported for task `chat.completions`\"\n )\n\n\nclass _OAITokenHolder:\n def __init__(self, api_type):\n import openai\n\n self._api_token = None\n self._credential = None\n self._is_azure_ad = api_type in (\"azure_ad\", \"azuread\")\n self._key_configured = bool(openai.api_key) or \"OPENAI_API_KEY\" in os.environ\n\n if self._is_azure_ad and not self._key_configured:\n try:\n from azure.identity import DefaultAzureCredential\n except ImportError:\n raise mlflow.MlflowException(\n \"Using API type `azure_ad` or `azuread` requires the package\"\n \" `azure-identity` to be installed.\"\n )\n self._credential = DefaultAzureCredential()\n\n def validate(self, logger=None):\n \"\"\"\n Validates the token or API key configured for accessing the OpenAI resource.\n \"\"\"\n import openai\n\n if self._key_configured:\n return\n\n if self._is_azure_ad:\n if not self._api_token or self._api_token.expires_on < time.time() + 60:\n from azure.core.exceptions import ClientAuthenticationError\n\n if logger:\n logger.debug(\n \"Token for Azure AD is either expired or unset. Attempting to \"\n \"acquire a new token.\"\n )\n try:\n self._api_token = self._credential.get_token(\n \"https://cognitiveservices.azure.com/.default\"\n )\n except ClientAuthenticationError as err:\n raise mlflow.MlflowException(\n \"Unable to acquire a valid Azure AD token for the resource due to \"\n f\"the following error: {err.message}\"\n ) from err\n openai.api_key = self._api_token.token\n if logger:\n logger.debug(\"Token refreshed successfully\")\n else:\n raise mlflow.MlflowException(\n \"OpenAI API key must be set in the ``OPENAI_API_KEY`` environment variable.\"\n )\n", "path": "mlflow/openai/utils.py"}]} | 2,906 | 229 |
gh_patches_debug_23832 | rasdani/github-patches | git_diff | openai__gym-2630 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug Report] Box bounded below and above doesn't work when dtype is int
**Describe the bug**
A `Box` space that is created with dtype `int` and `low` (`high`) values being `-np.inf` (`np.inf`) will return `bounded_{above,below}` as True.
**Code example**
```python3
from gym.spaces import Box
import numpy as np
unbounded_below = Box(
np.array([0, 13, -3, -np.inf]),
np.array([0, 20, 0, 0]),
dtype=int
)
unbounded_above = Box(
np.array([0, 12, 20, 0]),
np.array([np.inf, 20, 24, np.inf]),
dtype=int
)
print(unbounded_above.bounded_above)
print(unbounded_below.bounded_below)
>>> [ True True True True]
>>> [ True True True True]
```
**My analysis**
At the [broadcast](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L64), [get_inf](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L226) turns `np.inf` into their integer equivalents. Determining [boundedness](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L86) is comparing directly with `np.inf`, not with the integer equivalents given from `get_inf`.
**System Info**
Describe the characteristic of your environment:
* Gym installed with pip
* MacOS 10.15.7
* Python version: 3.7.9
* numpy 1.21.5
**Additional context**
I know it may seem strange to use `np.inf` with an integer-Box, but I use integer-Boxes all the time, and I need to check against `np.inf`.
### Checklist
- [X] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)
</issue>
<code>
[start of gym/spaces/box.py]
1 from __future__ import annotations
2
3 from typing import Tuple, SupportsFloat, Union, Type, Optional, Sequence
4
5 import numpy as np
6
7 from .space import Space
8 from gym import logger
9
10
11 def _short_repr(arr: np.ndarray) -> str:
12 """Create a shortened string representation of a numpy array.
13
14 If arr is a multiple of the all-ones vector, return a string representation of the multiplier.
15 Otherwise, return a string representation of the entire array.
16 """
17 if arr.size != 0 and np.min(arr) == np.max(arr):
18 return str(np.min(arr))
19 return str(arr)
20
21
22 class Box(Space[np.ndarray]):
23 """
24 A (possibly unbounded) box in R^n. Specifically, a Box represents the
25 Cartesian product of n closed intervals. Each interval has the form of one
26 of [a, b], (-oo, b], [a, oo), or (-oo, oo).
27
28 There are two common use cases:
29
30 * Identical bound for each dimension::
31 >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)
32 Box(3, 4)
33
34 * Independent bound for each dimension::
35 >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)
36 Box(2,)
37
38 """
39
40 def __init__(
41 self,
42 low: Union[SupportsFloat, np.ndarray],
43 high: Union[SupportsFloat, np.ndarray],
44 shape: Optional[Sequence[int]] = None,
45 dtype: Type = np.float32,
46 seed: Optional[int] = None,
47 ):
48 assert dtype is not None, "dtype must be explicitly provided. "
49 self.dtype = np.dtype(dtype)
50
51 # determine shape if it isn't provided directly
52 if shape is not None:
53 shape = tuple(shape)
54 elif not np.isscalar(low):
55 shape = low.shape # type: ignore
56 elif not np.isscalar(high):
57 shape = high.shape # type: ignore
58 else:
59 raise ValueError(
60 "shape must be provided or inferred from the shapes of low or high"
61 )
62 assert isinstance(shape, tuple)
63
64 low = _broadcast(low, dtype, shape, inf_sign="-") # type: ignore
65 high = _broadcast(high, dtype, shape, inf_sign="+")
66
67 assert isinstance(low, np.ndarray)
68 assert low.shape == shape, "low.shape doesn't match provided shape"
69 assert isinstance(high, np.ndarray)
70 assert high.shape == shape, "high.shape doesn't match provided shape"
71
72 self._shape: Tuple[int, ...] = shape
73
74 low_precision = get_precision(low.dtype)
75 high_precision = get_precision(high.dtype)
76 dtype_precision = get_precision(self.dtype)
77 if min(low_precision, high_precision) > dtype_precision: # type: ignore
78 logger.warn(f"Box bound precision lowered by casting to {self.dtype}")
79 self.low = low.astype(self.dtype)
80 self.high = high.astype(self.dtype)
81
82 self.low_repr = _short_repr(self.low)
83 self.high_repr = _short_repr(self.high)
84
85 # Boolean arrays which indicate the interval type for each coordinate
86 self.bounded_below = -np.inf < self.low
87 self.bounded_above = np.inf > self.high
88
89 super().__init__(self.shape, self.dtype, seed)
90
91 @property
92 def shape(self) -> Tuple[int, ...]:
93 """Has stricter type than gym.Space - never None."""
94 return self._shape
95
96 def is_bounded(self, manner: str = "both") -> bool:
97 below = bool(np.all(self.bounded_below))
98 above = bool(np.all(self.bounded_above))
99 if manner == "both":
100 return below and above
101 elif manner == "below":
102 return below
103 elif manner == "above":
104 return above
105 else:
106 raise ValueError("manner is not in {'below', 'above', 'both'}")
107
108 def sample(self) -> np.ndarray:
109 """
110 Generates a single random sample inside of the Box.
111
112 In creating a sample of the box, each coordinate is sampled according to
113 the form of the interval:
114
115 * [a, b] : uniform distribution
116 * [a, oo) : shifted exponential distribution
117 * (-oo, b] : shifted negative exponential distribution
118 * (-oo, oo) : normal distribution
119 """
120 high = self.high if self.dtype.kind == "f" else self.high.astype("int64") + 1
121 sample = np.empty(self.shape)
122
123 # Masking arrays which classify the coordinates according to interval
124 # type
125 unbounded = ~self.bounded_below & ~self.bounded_above
126 upp_bounded = ~self.bounded_below & self.bounded_above
127 low_bounded = self.bounded_below & ~self.bounded_above
128 bounded = self.bounded_below & self.bounded_above
129
130 # Vectorized sampling by interval type
131 sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape)
132
133 sample[low_bounded] = (
134 self.np_random.exponential(size=low_bounded[low_bounded].shape)
135 + self.low[low_bounded]
136 )
137
138 sample[upp_bounded] = (
139 -self.np_random.exponential(size=upp_bounded[upp_bounded].shape)
140 + self.high[upp_bounded]
141 )
142
143 sample[bounded] = self.np_random.uniform(
144 low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape
145 )
146 if self.dtype.kind == "i":
147 sample = np.floor(sample)
148
149 return sample.astype(self.dtype)
150
151 def contains(self, x) -> bool:
152 if not isinstance(x, np.ndarray):
153 logger.warn("Casting input x to numpy array.")
154 x = np.asarray(x, dtype=self.dtype)
155
156 return bool(
157 np.can_cast(x.dtype, self.dtype)
158 and x.shape == self.shape
159 and np.all(x >= self.low)
160 and np.all(x <= self.high)
161 )
162
163 def to_jsonable(self, sample_n):
164 return np.array(sample_n).tolist()
165
166 def from_jsonable(self, sample_n: Sequence[SupportsFloat]) -> list[np.ndarray]:
167 return [np.asarray(sample) for sample in sample_n]
168
169 def __repr__(self) -> str:
170 return f"Box({self.low_repr}, {self.high_repr}, {self.shape}, {self.dtype})"
171
172 def __eq__(self, other) -> bool:
173 return (
174 isinstance(other, Box)
175 and (self.shape == other.shape)
176 and np.allclose(self.low, other.low)
177 and np.allclose(self.high, other.high)
178 )
179
180
181 def get_inf(dtype, sign: str) -> SupportsFloat:
182 """Returns an infinite that doesn't break things.
183 `dtype` must be an `np.dtype`
184 `bound` must be either `min` or `max`
185 """
186 if np.dtype(dtype).kind == "f":
187 if sign == "+":
188 return np.inf
189 elif sign == "-":
190 return -np.inf
191 else:
192 raise TypeError(f"Unknown sign {sign}, use either '+' or '-'")
193 elif np.dtype(dtype).kind == "i":
194 if sign == "+":
195 return np.iinfo(dtype).max - 2
196 elif sign == "-":
197 return np.iinfo(dtype).min + 2
198 else:
199 raise TypeError(f"Unknown sign {sign}, use either '+' or '-'")
200 else:
201 raise ValueError(f"Unknown dtype {dtype} for infinite bounds")
202
203
204 def get_precision(dtype) -> SupportsFloat:
205 if np.issubdtype(dtype, np.floating):
206 return np.finfo(dtype).precision
207 else:
208 return np.inf
209
210
211 def _broadcast(
212 value: Union[SupportsFloat, np.ndarray],
213 dtype,
214 shape: tuple[int, ...],
215 inf_sign: str,
216 ) -> np.ndarray:
217 """handle infinite bounds and broadcast at the same time if needed"""
218 if np.isscalar(value):
219 value = get_inf(dtype, inf_sign) if np.isinf(value) else value # type: ignore
220 value = np.full(shape, value, dtype=dtype)
221 else:
222 assert isinstance(value, np.ndarray)
223 if np.any(np.isinf(value)):
224 # create new array with dtype, but maintain old one to preserve np.inf
225 temp = value.astype(dtype)
226 temp[np.isinf(value)] = get_inf(dtype, inf_sign)
227 value = temp
228 return value
229
[end of gym/spaces/box.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gym/spaces/box.py b/gym/spaces/box.py
--- a/gym/spaces/box.py
+++ b/gym/spaces/box.py
@@ -61,8 +61,14 @@
)
assert isinstance(shape, tuple)
+ # Capture the boundedness information before replacing np.inf with get_inf
+ _low = np.full(shape, low, dtype=float) if np.isscalar(low) else low
+ self.bounded_below = -np.inf < _low
+ _high = np.full(shape, high, dtype=float) if np.isscalar(high) else high
+ self.bounded_above = np.inf > _high
+
low = _broadcast(low, dtype, shape, inf_sign="-") # type: ignore
- high = _broadcast(high, dtype, shape, inf_sign="+")
+ high = _broadcast(high, dtype, shape, inf_sign="+") # type: ignore
assert isinstance(low, np.ndarray)
assert low.shape == shape, "low.shape doesn't match provided shape"
@@ -82,10 +88,6 @@
self.low_repr = _short_repr(self.low)
self.high_repr = _short_repr(self.high)
- # Boolean arrays which indicate the interval type for each coordinate
- self.bounded_below = -np.inf < self.low
- self.bounded_above = np.inf > self.high
-
super().__init__(self.shape, self.dtype, seed)
@property
| {"golden_diff": "diff --git a/gym/spaces/box.py b/gym/spaces/box.py\n--- a/gym/spaces/box.py\n+++ b/gym/spaces/box.py\n@@ -61,8 +61,14 @@\n )\n assert isinstance(shape, tuple)\n \n+ # Capture the boundedness information before replacing np.inf with get_inf\n+ _low = np.full(shape, low, dtype=float) if np.isscalar(low) else low\n+ self.bounded_below = -np.inf < _low\n+ _high = np.full(shape, high, dtype=float) if np.isscalar(high) else high\n+ self.bounded_above = np.inf > _high\n+\n low = _broadcast(low, dtype, shape, inf_sign=\"-\") # type: ignore\n- high = _broadcast(high, dtype, shape, inf_sign=\"+\")\n+ high = _broadcast(high, dtype, shape, inf_sign=\"+\") # type: ignore\n \n assert isinstance(low, np.ndarray)\n assert low.shape == shape, \"low.shape doesn't match provided shape\"\n@@ -82,10 +88,6 @@\n self.low_repr = _short_repr(self.low)\n self.high_repr = _short_repr(self.high)\n \n- # Boolean arrays which indicate the interval type for each coordinate\n- self.bounded_below = -np.inf < self.low\n- self.bounded_above = np.inf > self.high\n-\n super().__init__(self.shape, self.dtype, seed)\n \n @property\n", "issue": "[Bug Report] Box bounded below and above doesn't work when dtype is int\n**Describe the bug**\r\nA `Box` space that is created with dtype `int` and `low` (`high`) values being `-np.inf` (`np.inf`) will return `bounded_{above,below}` as True.\r\n\r\n**Code example**\r\n```python3\r\nfrom gym.spaces import Box\r\nimport numpy as np\r\n\r\nunbounded_below = Box(\r\n np.array([0, 13, -3, -np.inf]),\r\n np.array([0, 20, 0, 0]),\r\n dtype=int\r\n)\r\n\r\nunbounded_above = Box(\r\n np.array([0, 12, 20, 0]),\r\n np.array([np.inf, 20, 24, np.inf]),\r\n dtype=int\r\n)\r\n\r\nprint(unbounded_above.bounded_above)\r\nprint(unbounded_below.bounded_below)\r\n\r\n>>> [ True True True True]\r\n>>> [ True True True True]\r\n```\r\n\r\n**My analysis**\r\nAt the [broadcast](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L64), [get_inf](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L226) turns `np.inf` into their integer equivalents. Determining [boundedness](https://github.com/openai/gym/blob/master/gym/spaces/box.py#L86) is comparing directly with `np.inf`, not with the integer equivalents given from `get_inf`.\r\n\r\n**System Info**\r\nDescribe the characteristic of your environment:\r\n * Gym installed with pip\r\n * MacOS 10.15.7\r\n * Python version: 3.7.9\r\n * numpy 1.21.5\r\n\r\n**Additional context**\r\nI know it may seem strange to use `np.inf` with an integer-Box, but I use integer-Boxes all the time, and I need to check against `np.inf`.\r\n\r\n### Checklist\r\n\r\n- [X] I have checked that there is no similar [issue](https://github.com/openai/gym/issues) in the repo (**required**)\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Tuple, SupportsFloat, Union, Type, Optional, Sequence\n\nimport numpy as np\n\nfrom .space import Space\nfrom gym import logger\n\n\ndef _short_repr(arr: np.ndarray) -> str:\n \"\"\"Create a shortened string representation of a numpy array.\n\n If arr is a multiple of the all-ones vector, return a string representation of the multiplier.\n Otherwise, return a string representation of the entire array.\n \"\"\"\n if arr.size != 0 and np.min(arr) == np.max(arr):\n return str(np.min(arr))\n return str(arr)\n\n\nclass Box(Space[np.ndarray]):\n \"\"\"\n A (possibly unbounded) box in R^n. Specifically, a Box represents the\n Cartesian product of n closed intervals. Each interval has the form of one\n of [a, b], (-oo, b], [a, oo), or (-oo, oo).\n\n There are two common use cases:\n\n * Identical bound for each dimension::\n >>> Box(low=-1.0, high=2.0, shape=(3, 4), dtype=np.float32)\n Box(3, 4)\n\n * Independent bound for each dimension::\n >>> Box(low=np.array([-1.0, -2.0]), high=np.array([2.0, 4.0]), dtype=np.float32)\n Box(2,)\n\n \"\"\"\n\n def __init__(\n self,\n low: Union[SupportsFloat, np.ndarray],\n high: Union[SupportsFloat, np.ndarray],\n shape: Optional[Sequence[int]] = None,\n dtype: Type = np.float32,\n seed: Optional[int] = None,\n ):\n assert dtype is not None, \"dtype must be explicitly provided. \"\n self.dtype = np.dtype(dtype)\n\n # determine shape if it isn't provided directly\n if shape is not None:\n shape = tuple(shape)\n elif not np.isscalar(low):\n shape = low.shape # type: ignore\n elif not np.isscalar(high):\n shape = high.shape # type: ignore\n else:\n raise ValueError(\n \"shape must be provided or inferred from the shapes of low or high\"\n )\n assert isinstance(shape, tuple)\n\n low = _broadcast(low, dtype, shape, inf_sign=\"-\") # type: ignore\n high = _broadcast(high, dtype, shape, inf_sign=\"+\")\n\n assert isinstance(low, np.ndarray)\n assert low.shape == shape, \"low.shape doesn't match provided shape\"\n assert isinstance(high, np.ndarray)\n assert high.shape == shape, \"high.shape doesn't match provided shape\"\n\n self._shape: Tuple[int, ...] = shape\n\n low_precision = get_precision(low.dtype)\n high_precision = get_precision(high.dtype)\n dtype_precision = get_precision(self.dtype)\n if min(low_precision, high_precision) > dtype_precision: # type: ignore\n logger.warn(f\"Box bound precision lowered by casting to {self.dtype}\")\n self.low = low.astype(self.dtype)\n self.high = high.astype(self.dtype)\n\n self.low_repr = _short_repr(self.low)\n self.high_repr = _short_repr(self.high)\n\n # Boolean arrays which indicate the interval type for each coordinate\n self.bounded_below = -np.inf < self.low\n self.bounded_above = np.inf > self.high\n\n super().__init__(self.shape, self.dtype, seed)\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"Has stricter type than gym.Space - never None.\"\"\"\n return self._shape\n\n def is_bounded(self, manner: str = \"both\") -> bool:\n below = bool(np.all(self.bounded_below))\n above = bool(np.all(self.bounded_above))\n if manner == \"both\":\n return below and above\n elif manner == \"below\":\n return below\n elif manner == \"above\":\n return above\n else:\n raise ValueError(\"manner is not in {'below', 'above', 'both'}\")\n\n def sample(self) -> np.ndarray:\n \"\"\"\n Generates a single random sample inside of the Box.\n\n In creating a sample of the box, each coordinate is sampled according to\n the form of the interval:\n\n * [a, b] : uniform distribution\n * [a, oo) : shifted exponential distribution\n * (-oo, b] : shifted negative exponential distribution\n * (-oo, oo) : normal distribution\n \"\"\"\n high = self.high if self.dtype.kind == \"f\" else self.high.astype(\"int64\") + 1\n sample = np.empty(self.shape)\n\n # Masking arrays which classify the coordinates according to interval\n # type\n unbounded = ~self.bounded_below & ~self.bounded_above\n upp_bounded = ~self.bounded_below & self.bounded_above\n low_bounded = self.bounded_below & ~self.bounded_above\n bounded = self.bounded_below & self.bounded_above\n\n # Vectorized sampling by interval type\n sample[unbounded] = self.np_random.normal(size=unbounded[unbounded].shape)\n\n sample[low_bounded] = (\n self.np_random.exponential(size=low_bounded[low_bounded].shape)\n + self.low[low_bounded]\n )\n\n sample[upp_bounded] = (\n -self.np_random.exponential(size=upp_bounded[upp_bounded].shape)\n + self.high[upp_bounded]\n )\n\n sample[bounded] = self.np_random.uniform(\n low=self.low[bounded], high=high[bounded], size=bounded[bounded].shape\n )\n if self.dtype.kind == \"i\":\n sample = np.floor(sample)\n\n return sample.astype(self.dtype)\n\n def contains(self, x) -> bool:\n if not isinstance(x, np.ndarray):\n logger.warn(\"Casting input x to numpy array.\")\n x = np.asarray(x, dtype=self.dtype)\n\n return bool(\n np.can_cast(x.dtype, self.dtype)\n and x.shape == self.shape\n and np.all(x >= self.low)\n and np.all(x <= self.high)\n )\n\n def to_jsonable(self, sample_n):\n return np.array(sample_n).tolist()\n\n def from_jsonable(self, sample_n: Sequence[SupportsFloat]) -> list[np.ndarray]:\n return [np.asarray(sample) for sample in sample_n]\n\n def __repr__(self) -> str:\n return f\"Box({self.low_repr}, {self.high_repr}, {self.shape}, {self.dtype})\"\n\n def __eq__(self, other) -> bool:\n return (\n isinstance(other, Box)\n and (self.shape == other.shape)\n and np.allclose(self.low, other.low)\n and np.allclose(self.high, other.high)\n )\n\n\ndef get_inf(dtype, sign: str) -> SupportsFloat:\n \"\"\"Returns an infinite that doesn't break things.\n `dtype` must be an `np.dtype`\n `bound` must be either `min` or `max`\n \"\"\"\n if np.dtype(dtype).kind == \"f\":\n if sign == \"+\":\n return np.inf\n elif sign == \"-\":\n return -np.inf\n else:\n raise TypeError(f\"Unknown sign {sign}, use either '+' or '-'\")\n elif np.dtype(dtype).kind == \"i\":\n if sign == \"+\":\n return np.iinfo(dtype).max - 2\n elif sign == \"-\":\n return np.iinfo(dtype).min + 2\n else:\n raise TypeError(f\"Unknown sign {sign}, use either '+' or '-'\")\n else:\n raise ValueError(f\"Unknown dtype {dtype} for infinite bounds\")\n\n\ndef get_precision(dtype) -> SupportsFloat:\n if np.issubdtype(dtype, np.floating):\n return np.finfo(dtype).precision\n else:\n return np.inf\n\n\ndef _broadcast(\n value: Union[SupportsFloat, np.ndarray],\n dtype,\n shape: tuple[int, ...],\n inf_sign: str,\n) -> np.ndarray:\n \"\"\"handle infinite bounds and broadcast at the same time if needed\"\"\"\n if np.isscalar(value):\n value = get_inf(dtype, inf_sign) if np.isinf(value) else value # type: ignore\n value = np.full(shape, value, dtype=dtype)\n else:\n assert isinstance(value, np.ndarray)\n if np.any(np.isinf(value)):\n # create new array with dtype, but maintain old one to preserve np.inf\n temp = value.astype(dtype)\n temp[np.isinf(value)] = get_inf(dtype, inf_sign)\n value = temp\n return value\n", "path": "gym/spaces/box.py"}]} | 3,468 | 334 |
gh_patches_debug_33463 | rasdani/github-patches | git_diff | coala__coala-1543 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
git commit bear doesn't work if coala isn't run in the same directory
it should also work if I provide a coafile with `-c`, @Makman2 can you fix that soonish? This is kind of the reason why this breaks everywhere for my gitmate experiments.
</issue>
<code>
[start of coalib/bears/Bear.py]
1 import traceback
2 from pyprint.Printer import Printer
3
4 from coalib.misc.Decorators import enforce_signature
5 from coalib.output.printers.LogPrinter import LogPrinter
6 from coalib.settings.FunctionMetadata import FunctionMetadata
7 from coalib.settings.Section import Section
8
9
10 class Bear(Printer, LogPrinter):
11 """
12 A bear contains the actual subroutine that is responsible for checking
13 source code for certain specifications. However it can actually do
14 whatever it wants with the files it gets. If you are missing some Result
15 type, feel free to contact us and/or help us extending the coalib.
16
17 This is the base class for every bear. If you want to write an bear, you
18 will probably want to look at the GlobalBear and LocalBear classes that
19 inherit from this class. In any case you'll want to overwrite at least the
20 run method. You can send debug/warning/error messages through the
21 debug(), warn(), err() functions. These will send the
22 appropriate messages so that they are outputted. Be aware that if you use
23 err(), you are expected to also terminate the bear run-through
24 immediately.
25
26 If you need some setup or teardown for your bear, feel free to overwrite
27 the set_up() and tear_down() functions. They will be invoked
28 before/after every run invocation.
29
30 Settings are available at all times through self.section.
31 """
32
33 @enforce_signature
34 def __init__(self,
35 section: Section,
36 message_queue,
37 timeout=0):
38 """
39 Constructs a new bear.
40
41 :param section: The section object where bear settings are
42 contained.
43 :param message_queue: The queue object for messages. Can be `None`.
44 :param timeout: The time the bear is allowed to run. To set no
45 time limit, use 0.
46 :raises TypeError: Raised when `message_queue` is no queue.
47 :raises RuntimeError: Raised when bear requirements are not fulfilled.
48 """
49 Printer.__init__(self)
50 LogPrinter.__init__(self, self)
51
52 if message_queue is not None and not hasattr(message_queue, "put"):
53 raise TypeError("message_queue has to be a Queue or None.")
54
55 self.section = section
56 self.message_queue = message_queue
57 self.timeout = timeout
58
59 cp = type(self).check_prerequisites()
60 if cp is not True:
61 error_string = ("The bear " + type(self).__name__ +
62 " does not fulfill all requirements.")
63 if cp is not False:
64 error_string += " " + cp
65
66 self.warn(error_string)
67 raise RuntimeError(error_string)
68
69 def _print(self, output, **kwargs):
70 self.debug(output)
71
72 def log_message(self, log_message, timestamp=None, **kwargs):
73 if self.message_queue is not None:
74 self.message_queue.put(log_message)
75
76 def run(self, *args, dependency_results=None, **kwargs):
77 raise NotImplementedError
78
79 def run_bear_from_section(self, args, kwargs):
80 try:
81 kwargs.update(
82 self.get_metadata().create_params_from_section(self.section))
83 except ValueError as err:
84 self.warn("The bear {} cannot be executed.".format(
85 type(self).__name__), str(err))
86 return
87
88 return self.run(*args, **kwargs)
89
90 def execute(self, *args, **kwargs):
91 name = type(self).__name__
92 try:
93 self.debug("Running bear {}...".format(name))
94 # If it's already a list it won't change it
95 return list(self.run_bear_from_section(args, kwargs) or [])
96 except:
97 self.warn(
98 "Bear {} failed to run. Take a look at debug messages for "
99 "further information.".format(name))
100 self.debug(
101 "The bear {bear} raised an exception. If you are the writer "
102 "of this bear, please make sure to catch all exceptions. If "
103 "not and this error annoys you, you might want to get in "
104 "contact with the writer of this bear.\n\nTraceback "
105 "information is provided below:\n\n{traceback}"
106 "\n".format(bear=name, traceback=traceback.format_exc()))
107
108 @staticmethod
109 def kind():
110 """
111 :return: The kind of the bear
112 """
113 raise NotImplementedError
114
115 @classmethod
116 def get_metadata(cls):
117 """
118 :return: Metadata for the run function. However parameters like `self`
119 or parameters implicitly used by coala (e.g. filename for
120 local bears) are already removed.
121 """
122 return FunctionMetadata.from_function(
123 cls.run,
124 omit={"self", "dependency_results"})
125
126 @classmethod
127 def missing_dependencies(cls, lst):
128 """
129 Checks if the given list contains all dependencies.
130
131 :param lst: A list of all already resolved bear classes (not
132 instances).
133 :return: A list of missing dependencies.
134 """
135 dep_classes = cls.get_dependencies()
136
137 for item in lst:
138 if item in dep_classes:
139 dep_classes.remove(item)
140
141 return dep_classes
142
143 @staticmethod
144 def get_dependencies():
145 """
146 Retrieves bear classes that are to be executed before this bear gets
147 executed. The results of these bears will then be passed to the
148 run method as a dict via the dependency_results argument. The dict
149 will have the name of the Bear as key and the list of its results as
150 results.
151
152 :return: A list of bear classes.
153 """
154 return []
155
156 @classmethod
157 def get_non_optional_settings(cls):
158 """
159 This method has to determine which settings are needed by this bear.
160 The user will be prompted for needed settings that are not available
161 in the settings file so don't include settings where a default value
162 would do.
163
164 :return: A dictionary of needed settings as keys and a tuple of help
165 text and annotation as values
166 """
167 return cls.get_metadata().non_optional_params
168
169 @classmethod
170 def check_prerequisites(cls):
171 """
172 Checks whether needed runtime prerequisites of the bear are satisfied.
173
174 This function gets executed at construction and returns True by
175 default.
176
177 Section value requirements shall be checked inside the `run` method.
178
179 :return: True if prerequisites are satisfied, else False or a string
180 that serves a more detailed description of what's missing.
181 """
182 return True
183
[end of coalib/bears/Bear.py]
[start of bears/vcs/git/GitCommitBear.py]
1 import shutil
2
3 from coalib.bears.GlobalBear import GlobalBear
4 from coalib.misc.Shell import run_shell_command
5 from coalib.results.Result import Result
6
7
8 class GitCommitBear(GlobalBear):
9 _git_command = "git log -1 --pretty=%B"
10
11 @classmethod
12 def check_prerequisites(cls):
13 if shutil.which("git") is None:
14 return "git is not installed."
15 else:
16 return True
17
18 def run(self,
19 shortlog_length: int=50,
20 body_line_length: int=73,
21 force_body: bool=False,
22 allow_empty_commit_message: bool=False):
23 """
24 Checks the current git commit message at HEAD.
25
26 This bear ensures that the shortlog and body do not exceed a given
27 line-length and that a newline lies between them.
28
29 :param shortlog_length: The maximum length of the shortlog.
30 The shortlog is the first line of
31 the commit message. The newline
32 character at end does not count to
33 the length.
34 :param body_line_length: The maximum line-length of the body.
35 The newline character at each line
36 end does not count to the length.
37 :param force_body: Whether a body shall exist or not.
38 :param allow_empty_commit_message: Whether empty commit messages are
39 allowed or not.
40 """
41 stdout, stderr = run_shell_command(self._git_command)
42
43 if stderr:
44 self.err("git:", repr(stderr))
45 return
46
47 # git automatically removes trailing whitespaces. Also we need to
48 # remove the last \n printed to align the prompt onto the next line.
49 stdout = stdout.splitlines()[:-1]
50
51 if len(stdout) == 0:
52 if not allow_empty_commit_message:
53 yield Result(self, "HEAD commit has no message.")
54 return
55
56 yield from self.check_shortlog(shortlog_length, stdout[0])
57 yield from self.check_body(body_line_length, force_body, stdout[1:])
58
59 def check_shortlog(self, shortlog_length, shortlog):
60 """
61 Checks the given shortlog.
62
63 :param shortlog_length: The maximum length of the shortlog. The newline
64 character at end does not count to the length.
65 :param shortlog: The shortlog message string.
66 """
67 if len(shortlog) > shortlog_length:
68 yield Result(self, "Shortlog of HEAD commit is too long.")
69
70 def check_body(self, body_line_length, force_body, body):
71 """
72 Checks the given commit body.
73
74 :param body_line_length: The maximum line-length of the body. The
75 newline character at each line end does not
76 count to the length.
77 :param force_body: Whether a body shall exist or not.
78 :param body: The commit body splitted by lines.
79 """
80 if len(body) == 0:
81 if force_body:
82 yield Result(self, "No commit message body at HEAD.")
83 return
84
85 if body[0] != "":
86 yield Result(self, "No newline between shortlog and body at HEAD.")
87 return
88
89 if any(len(line) > body_line_length for line in body[1:]):
90 yield Result(self, "Body of HEAD commit contains too long lines.")
91
[end of bears/vcs/git/GitCommitBear.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py
--- a/bears/vcs/git/GitCommitBear.py
+++ b/bears/vcs/git/GitCommitBear.py
@@ -1,4 +1,5 @@
import shutil
+import os
from coalib.bears.GlobalBear import GlobalBear
from coalib.misc.Shell import run_shell_command
@@ -38,6 +39,10 @@
:param allow_empty_commit_message: Whether empty commit messages are
allowed or not.
"""
+ config_dir = self.get_config_dir()
+ old_dir = os.getcwd()
+ if config_dir:
+ os.chdir(config_dir)
stdout, stderr = run_shell_command(self._git_command)
if stderr:
@@ -56,6 +61,8 @@
yield from self.check_shortlog(shortlog_length, stdout[0])
yield from self.check_body(body_line_length, force_body, stdout[1:])
+ os.chdir(old_dir)
+
def check_shortlog(self, shortlog_length, shortlog):
"""
Checks the given shortlog.
diff --git a/coalib/bears/Bear.py b/coalib/bears/Bear.py
--- a/coalib/bears/Bear.py
+++ b/coalib/bears/Bear.py
@@ -5,6 +5,7 @@
from coalib.output.printers.LogPrinter import LogPrinter
from coalib.settings.FunctionMetadata import FunctionMetadata
from coalib.settings.Section import Section
+from coalib.settings.ConfigurationGathering import get_config_directory
class Bear(Printer, LogPrinter):
@@ -180,3 +181,11 @@
that serves a more detailed description of what's missing.
"""
return True
+
+ def get_config_dir(self):
+ """
+ Gives the directory where the configuration file is
+
+ :return: Directory of the config file
+ """
+ return get_config_directory(self.section)
| {"golden_diff": "diff --git a/bears/vcs/git/GitCommitBear.py b/bears/vcs/git/GitCommitBear.py\n--- a/bears/vcs/git/GitCommitBear.py\n+++ b/bears/vcs/git/GitCommitBear.py\n@@ -1,4 +1,5 @@\n import shutil\n+import os\n \n from coalib.bears.GlobalBear import GlobalBear\n from coalib.misc.Shell import run_shell_command\n@@ -38,6 +39,10 @@\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n \"\"\"\n+ config_dir = self.get_config_dir()\n+ old_dir = os.getcwd()\n+ if config_dir:\n+ os.chdir(config_dir)\n stdout, stderr = run_shell_command(self._git_command)\n \n if stderr:\n@@ -56,6 +61,8 @@\n yield from self.check_shortlog(shortlog_length, stdout[0])\n yield from self.check_body(body_line_length, force_body, stdout[1:])\n \n+ os.chdir(old_dir)\n+\n def check_shortlog(self, shortlog_length, shortlog):\n \"\"\"\n Checks the given shortlog.\ndiff --git a/coalib/bears/Bear.py b/coalib/bears/Bear.py\n--- a/coalib/bears/Bear.py\n+++ b/coalib/bears/Bear.py\n@@ -5,6 +5,7 @@\n from coalib.output.printers.LogPrinter import LogPrinter\n from coalib.settings.FunctionMetadata import FunctionMetadata\n from coalib.settings.Section import Section\n+from coalib.settings.ConfigurationGathering import get_config_directory\n \n \n class Bear(Printer, LogPrinter):\n@@ -180,3 +181,11 @@\n that serves a more detailed description of what's missing.\n \"\"\"\n return True\n+\n+ def get_config_dir(self):\n+ \"\"\"\n+ Gives the directory where the configuration file is\n+\n+ :return: Directory of the config file\n+ \"\"\"\n+ return get_config_directory(self.section)\n", "issue": "git commit bear doesn't work if coala isn't run in the same directory\nit should also work if I provide a coafile with `-c`, @Makman2 can you fix that soonish? This is kind of the reason why this breaks everywhere for my gitmate experiments.\n\n", "before_files": [{"content": "import traceback\nfrom pyprint.Printer import Printer\n\nfrom coalib.misc.Decorators import enforce_signature\nfrom coalib.output.printers.LogPrinter import LogPrinter\nfrom coalib.settings.FunctionMetadata import FunctionMetadata\nfrom coalib.settings.Section import Section\n\n\nclass Bear(Printer, LogPrinter):\n \"\"\"\n A bear contains the actual subroutine that is responsible for checking\n source code for certain specifications. However it can actually do\n whatever it wants with the files it gets. If you are missing some Result\n type, feel free to contact us and/or help us extending the coalib.\n\n This is the base class for every bear. If you want to write an bear, you\n will probably want to look at the GlobalBear and LocalBear classes that\n inherit from this class. In any case you'll want to overwrite at least the\n run method. You can send debug/warning/error messages through the\n debug(), warn(), err() functions. These will send the\n appropriate messages so that they are outputted. Be aware that if you use\n err(), you are expected to also terminate the bear run-through\n immediately.\n\n If you need some setup or teardown for your bear, feel free to overwrite\n the set_up() and tear_down() functions. They will be invoked\n before/after every run invocation.\n\n Settings are available at all times through self.section.\n \"\"\"\n\n @enforce_signature\n def __init__(self,\n section: Section,\n message_queue,\n timeout=0):\n \"\"\"\n Constructs a new bear.\n\n :param section: The section object where bear settings are\n contained.\n :param message_queue: The queue object for messages. Can be `None`.\n :param timeout: The time the bear is allowed to run. To set no\n time limit, use 0.\n :raises TypeError: Raised when `message_queue` is no queue.\n :raises RuntimeError: Raised when bear requirements are not fulfilled.\n \"\"\"\n Printer.__init__(self)\n LogPrinter.__init__(self, self)\n\n if message_queue is not None and not hasattr(message_queue, \"put\"):\n raise TypeError(\"message_queue has to be a Queue or None.\")\n\n self.section = section\n self.message_queue = message_queue\n self.timeout = timeout\n\n cp = type(self).check_prerequisites()\n if cp is not True:\n error_string = (\"The bear \" + type(self).__name__ +\n \" does not fulfill all requirements.\")\n if cp is not False:\n error_string += \" \" + cp\n\n self.warn(error_string)\n raise RuntimeError(error_string)\n\n def _print(self, output, **kwargs):\n self.debug(output)\n\n def log_message(self, log_message, timestamp=None, **kwargs):\n if self.message_queue is not None:\n self.message_queue.put(log_message)\n\n def run(self, *args, dependency_results=None, **kwargs):\n raise NotImplementedError\n\n def run_bear_from_section(self, args, kwargs):\n try:\n kwargs.update(\n self.get_metadata().create_params_from_section(self.section))\n except ValueError as err:\n self.warn(\"The bear {} cannot be executed.\".format(\n type(self).__name__), str(err))\n return\n\n return self.run(*args, **kwargs)\n\n def execute(self, *args, **kwargs):\n name = type(self).__name__\n try:\n self.debug(\"Running bear {}...\".format(name))\n # If it's already a list it won't change it\n return list(self.run_bear_from_section(args, kwargs) or [])\n except:\n self.warn(\n \"Bear {} failed to run. Take a look at debug messages for \"\n \"further information.\".format(name))\n self.debug(\n \"The bear {bear} raised an exception. If you are the writer \"\n \"of this bear, please make sure to catch all exceptions. If \"\n \"not and this error annoys you, you might want to get in \"\n \"contact with the writer of this bear.\\n\\nTraceback \"\n \"information is provided below:\\n\\n{traceback}\"\n \"\\n\".format(bear=name, traceback=traceback.format_exc()))\n\n @staticmethod\n def kind():\n \"\"\"\n :return: The kind of the bear\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_metadata(cls):\n \"\"\"\n :return: Metadata for the run function. However parameters like `self`\n or parameters implicitly used by coala (e.g. filename for\n local bears) are already removed.\n \"\"\"\n return FunctionMetadata.from_function(\n cls.run,\n omit={\"self\", \"dependency_results\"})\n\n @classmethod\n def missing_dependencies(cls, lst):\n \"\"\"\n Checks if the given list contains all dependencies.\n\n :param lst: A list of all already resolved bear classes (not\n instances).\n :return: A list of missing dependencies.\n \"\"\"\n dep_classes = cls.get_dependencies()\n\n for item in lst:\n if item in dep_classes:\n dep_classes.remove(item)\n\n return dep_classes\n\n @staticmethod\n def get_dependencies():\n \"\"\"\n Retrieves bear classes that are to be executed before this bear gets\n executed. The results of these bears will then be passed to the\n run method as a dict via the dependency_results argument. The dict\n will have the name of the Bear as key and the list of its results as\n results.\n\n :return: A list of bear classes.\n \"\"\"\n return []\n\n @classmethod\n def get_non_optional_settings(cls):\n \"\"\"\n This method has to determine which settings are needed by this bear.\n The user will be prompted for needed settings that are not available\n in the settings file so don't include settings where a default value\n would do.\n\n :return: A dictionary of needed settings as keys and a tuple of help\n text and annotation as values\n \"\"\"\n return cls.get_metadata().non_optional_params\n\n @classmethod\n def check_prerequisites(cls):\n \"\"\"\n Checks whether needed runtime prerequisites of the bear are satisfied.\n\n This function gets executed at construction and returns True by\n default.\n\n Section value requirements shall be checked inside the `run` method.\n\n :return: True if prerequisites are satisfied, else False or a string\n that serves a more detailed description of what's missing.\n \"\"\"\n return True\n", "path": "coalib/bears/Bear.py"}, {"content": "import shutil\n\nfrom coalib.bears.GlobalBear import GlobalBear\nfrom coalib.misc.Shell import run_shell_command\nfrom coalib.results.Result import Result\n\n\nclass GitCommitBear(GlobalBear):\n _git_command = \"git log -1 --pretty=%B\"\n\n @classmethod\n def check_prerequisites(cls):\n if shutil.which(\"git\") is None:\n return \"git is not installed.\"\n else:\n return True\n\n def run(self,\n shortlog_length: int=50,\n body_line_length: int=73,\n force_body: bool=False,\n allow_empty_commit_message: bool=False):\n \"\"\"\n Checks the current git commit message at HEAD.\n\n This bear ensures that the shortlog and body do not exceed a given\n line-length and that a newline lies between them.\n\n :param shortlog_length: The maximum length of the shortlog.\n The shortlog is the first line of\n the commit message. The newline\n character at end does not count to\n the length.\n :param body_line_length: The maximum line-length of the body.\n The newline character at each line\n end does not count to the length.\n :param force_body: Whether a body shall exist or not.\n :param allow_empty_commit_message: Whether empty commit messages are\n allowed or not.\n \"\"\"\n stdout, stderr = run_shell_command(self._git_command)\n\n if stderr:\n self.err(\"git:\", repr(stderr))\n return\n\n # git automatically removes trailing whitespaces. Also we need to\n # remove the last \\n printed to align the prompt onto the next line.\n stdout = stdout.splitlines()[:-1]\n\n if len(stdout) == 0:\n if not allow_empty_commit_message:\n yield Result(self, \"HEAD commit has no message.\")\n return\n\n yield from self.check_shortlog(shortlog_length, stdout[0])\n yield from self.check_body(body_line_length, force_body, stdout[1:])\n\n def check_shortlog(self, shortlog_length, shortlog):\n \"\"\"\n Checks the given shortlog.\n\n :param shortlog_length: The maximum length of the shortlog. The newline\n character at end does not count to the length.\n :param shortlog: The shortlog message string.\n \"\"\"\n if len(shortlog) > shortlog_length:\n yield Result(self, \"Shortlog of HEAD commit is too long.\")\n\n def check_body(self, body_line_length, force_body, body):\n \"\"\"\n Checks the given commit body.\n\n :param body_line_length: The maximum line-length of the body. The\n newline character at each line end does not\n count to the length.\n :param force_body: Whether a body shall exist or not.\n :param body: The commit body splitted by lines.\n \"\"\"\n if len(body) == 0:\n if force_body:\n yield Result(self, \"No commit message body at HEAD.\")\n return\n\n if body[0] != \"\":\n yield Result(self, \"No newline between shortlog and body at HEAD.\")\n return\n\n if any(len(line) > body_line_length for line in body[1:]):\n yield Result(self, \"Body of HEAD commit contains too long lines.\")\n", "path": "bears/vcs/git/GitCommitBear.py"}]} | 3,329 | 436 |
gh_patches_debug_29201 | rasdani/github-patches | git_diff | PrefectHQ__prefect-3923 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prefect serializes flows based on flow name ignoring project
Hi,
I searched the slack channel and did not find anything that pertained directly to this. We use Prefect for two departments that have virtually the same flows, but different sources and different processing rules. What we have found is that flows with the same name overwrite each other regardless of the project. I am not sure if that is intended or if there is a way to work around this issue.
This is a minimal example that I confirmed in a fresh install of Prefect 0.14.0 running the server backend in docker with a local agent all on the same machine.
```
jaykae@DT-JKNICKERBOCKER: ~: ls -1 prefect_test prefect_test/proj*
prefect_test:
proj1
proj2
prefect_test/proj1:
WriteHello.py
prefect_test/proj2:
WriteHello.py
```
Just a folder with two subfolders, projects 1 and 2, each folder holds a file with nearly the same content. Note the file paths are different and also the project names are different.
**proj1/WriteHello.py**
```
jaykae@DT-JKNICKERBOCKER: ~/prefect_test cat proj1/WriteHello.py
import prefect
from prefect import task, Flow
@task
def write_hello():
with open("proj1/hello.txt","w") as hello_file:
hello_file.write("Hello Project 1!!!")
with Flow("WriteHello") as flow:
write_it = write_hello()
flow.register(project_name="Project1")
```
**proj2/WriteHello.py**
```
jaykae@DT-JKNICKERBOCKER: ~/prefect_test cat proj2/WriteHello.py
import prefect
from prefect import task, Flow
@task
def write_hello():
with open("proj2/hello.txt","w") as hello_file:
hello_file.write("Hello Project 2!!!")
with Flow("WriteHello") as flow:
write_it = write_hello()
flow.register(project_name="Project2")
```
I register proj1/HelloWorld.py and then proj2/HelloWorld.py, I would expect that registering two flows with different file paths, as well as different project names would lead result in writing a text file in the directory for each flow when it is called. However, when I run proj1/HelloWorld.py the file is written in the proj2 directory and says Hello Project 2!!!.
```
jaykae@DT-JKNICKERBOCKER: ~/prefect_test ls proj*
proj1:
WriteHello.py
proj2:
WriteHello.py
jaykae@DT-JKNICKERBOCKER: ~/prefect_test prefect run flow --name WriteHello --project Project1
Flow Run: http://localhost:8080/default/flow-run/e05b8f99-945d-4954-ab0d-7e5e89df0965
jaykae@DT-JKNICKERBOCKER: ~/prefect_test ls proj*
proj1:
WriteHello.py
proj2:
WriteHello.py hello.txt
```
I tried changing the .py file name and that did not help at all, so I assume it has to do with the flow name. Is there a way to have two flows with the same name, but different content, in separate projects and peacefully coexist?
</issue>
<code>
[start of src/prefect/storage/local.py]
1 import os
2 import socket
3 from typing import TYPE_CHECKING, Any, Dict, List
4
5 from slugify import slugify
6
7 import prefect
8 from prefect.engine.results import LocalResult
9 from prefect.storage import Storage
10 from prefect.utilities.storage import extract_flow_from_file, extract_flow_from_module
11
12 if TYPE_CHECKING:
13 from prefect.core.flow import Flow
14
15
16 class Local(Storage):
17 """
18 Local storage class. This class represents the Storage
19 interface for Flows stored as bytes in the local filesystem.
20
21 Note that if you register a Flow with Prefect Cloud using this storage,
22 your flow will automatically be labeled with your machine's hostname. This
23 ensures that only agents that are known to be running on the same
24 filesystem can run your flow.
25
26 Args:
27 - directory (str, optional): the directory the flows will be stored in;
28 defaults to `~/.prefect/flows`. If it doesn't already exist, it will be
29 created for you.
30 - validate (bool, optional): a boolean specifying whether to validate the
31 provided directory path; if `True`, the directory will be converted to an
32 absolute path and created. Defaults to `True`
33 - path (str, optional): a direct path to the location of the flow file if
34 `stored_as_script=True`, otherwise this path will be used when storing the serialized,
35 pickled flow. If `stored_as_script=True`, the direct path may be a file path
36 (such as 'path/to/myflow.py') or a direct python path (such as 'myrepo.mymodule.myflow')
37 - stored_as_script (bool, optional): boolean for specifying if the flow has been stored
38 as a `.py` file. Defaults to `False`
39 - **kwargs (Any, optional): any additional `Storage` initialization options
40 """
41
42 def __init__(
43 self,
44 directory: str = None,
45 validate: bool = True,
46 path: str = None,
47 stored_as_script: bool = False,
48 **kwargs: Any,
49 ) -> None:
50 directory = directory or os.path.join(prefect.config.home_dir, "flows")
51 self.flows = dict() # type: Dict[str, str]
52 self._flows = dict() # type: Dict[str, "prefect.core.flow.Flow"]
53
54 self.path = path
55
56 if validate:
57 abs_directory = os.path.abspath(os.path.expanduser(directory))
58 os.makedirs(abs_directory, exist_ok=True)
59 else:
60 abs_directory = directory
61
62 self.directory = abs_directory
63 result = LocalResult(self.directory, validate_dir=validate)
64 super().__init__(result=result, stored_as_script=stored_as_script, **kwargs)
65
66 @property
67 def default_labels(self) -> List[str]:
68 if self.add_default_labels:
69 return [socket.gethostname()]
70 else:
71 return []
72
73 def get_flow(self, flow_location: str = None) -> "Flow":
74 """
75 Given a flow_location within this Storage object, returns the underlying Flow (if possible).
76
77 Args:
78 - flow_location (str, optional): the location of a flow within this Storage; in this case,
79 a file path or python path where a Flow has been serialized to. Will use `path`
80 if not provided.
81
82 Returns:
83 - Flow: the requested flow
84
85 Raises:
86 - ValueError: if the flow is not contained in this storage
87 """
88 if flow_location:
89 if flow_location not in self.flows.values():
90 raise ValueError("Flow is not contained in this Storage")
91 elif self.path:
92 flow_location = self.path
93 else:
94 raise ValueError("No flow location provided")
95
96 # check if the path given is a file path
97 try:
98 if os.path.isfile(flow_location):
99 if self.stored_as_script:
100 return extract_flow_from_file(file_path=flow_location)
101 else:
102 return prefect.core.flow.Flow.load(flow_location)
103 # otherwise the path is given in the module format
104 else:
105 return extract_flow_from_module(module_str=flow_location)
106 except Exception:
107 self.logger.exception(f"Failed to load Flow from {flow_location}")
108 raise
109
110 def add_flow(self, flow: "Flow") -> str:
111 """
112 Method for storing a new flow as bytes in the local filesytem.
113
114 Args:
115 - flow (Flow): a Prefect Flow to add
116
117 Returns:
118 - str: the location of the newly added flow in this Storage object
119
120 Raises:
121 - ValueError: if a flow with the same name is already contained in this storage
122 """
123 if flow.name in self:
124 raise ValueError(
125 'Name conflict: Flow with the name "{}" is already present in this storage.'.format(
126 flow.name
127 )
128 )
129
130 if self.stored_as_script:
131 if not self.path:
132 raise ValueError(
133 "A `path` must be provided to show where flow `.py` file is stored."
134 )
135 flow_location = self.path
136 else:
137 if self.path:
138 flow_location = self.path
139 else:
140 flow_location = os.path.join(
141 self.directory, "{}.prefect".format(slugify(flow.name))
142 )
143 flow_location = flow.save(flow_location)
144
145 self.flows[flow.name] = flow_location
146 self._flows[flow.name] = flow
147 return flow_location
148
149 def __contains__(self, obj: Any) -> bool:
150 """
151 Method for determining whether an object is contained within this storage.
152 """
153 if not isinstance(obj, str):
154 return False
155 return obj in self.flows
156
157 def build(self) -> "Storage":
158 """
159 Build the Storage object.
160
161 Returns:
162 - Storage: a Storage object that contains information about how and where
163 each flow is stored
164 """
165 self.run_basic_healthchecks()
166 return self
167
[end of src/prefect/storage/local.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/prefect/storage/local.py b/src/prefect/storage/local.py
--- a/src/prefect/storage/local.py
+++ b/src/prefect/storage/local.py
@@ -2,6 +2,8 @@
import socket
from typing import TYPE_CHECKING, Any, Dict, List
+import cloudpickle
+import pendulum
from slugify import slugify
import prefect
@@ -99,7 +101,8 @@
if self.stored_as_script:
return extract_flow_from_file(file_path=flow_location)
else:
- return prefect.core.flow.Flow.load(flow_location)
+ with open(flow_location, "rb") as f:
+ return cloudpickle.load(f)
# otherwise the path is given in the module format
else:
return extract_flow_from_module(module_str=flow_location)
@@ -138,9 +141,13 @@
flow_location = self.path
else:
flow_location = os.path.join(
- self.directory, "{}.prefect".format(slugify(flow.name))
+ self.directory,
+ slugify(flow.name),
+ slugify(pendulum.now("utc").isoformat()),
)
- flow_location = flow.save(flow_location)
+ os.makedirs(os.path.dirname(flow_location), exist_ok=True)
+ with open(flow_location, "wb") as f:
+ cloudpickle.dump(flow, f)
self.flows[flow.name] = flow_location
self._flows[flow.name] = flow
| {"golden_diff": "diff --git a/src/prefect/storage/local.py b/src/prefect/storage/local.py\n--- a/src/prefect/storage/local.py\n+++ b/src/prefect/storage/local.py\n@@ -2,6 +2,8 @@\n import socket\n from typing import TYPE_CHECKING, Any, Dict, List\n \n+import cloudpickle\n+import pendulum\n from slugify import slugify\n \n import prefect\n@@ -99,7 +101,8 @@\n if self.stored_as_script:\n return extract_flow_from_file(file_path=flow_location)\n else:\n- return prefect.core.flow.Flow.load(flow_location)\n+ with open(flow_location, \"rb\") as f:\n+ return cloudpickle.load(f)\n # otherwise the path is given in the module format\n else:\n return extract_flow_from_module(module_str=flow_location)\n@@ -138,9 +141,13 @@\n flow_location = self.path\n else:\n flow_location = os.path.join(\n- self.directory, \"{}.prefect\".format(slugify(flow.name))\n+ self.directory,\n+ slugify(flow.name),\n+ slugify(pendulum.now(\"utc\").isoformat()),\n )\n- flow_location = flow.save(flow_location)\n+ os.makedirs(os.path.dirname(flow_location), exist_ok=True)\n+ with open(flow_location, \"wb\") as f:\n+ cloudpickle.dump(flow, f)\n \n self.flows[flow.name] = flow_location\n self._flows[flow.name] = flow\n", "issue": "Prefect serializes flows based on flow name ignoring project\nHi,\r\n\r\nI searched the slack channel and did not find anything that pertained directly to this. We use Prefect for two departments that have virtually the same flows, but different sources and different processing rules. What we have found is that flows with the same name overwrite each other regardless of the project. I am not sure if that is intended or if there is a way to work around this issue.\r\n\r\nThis is a minimal example that I confirmed in a fresh install of Prefect 0.14.0 running the server backend in docker with a local agent all on the same machine.\r\n\r\n```\r\njaykae@DT-JKNICKERBOCKER: ~: ls -1 prefect_test prefect_test/proj*\r\nprefect_test:\r\nproj1\r\nproj2\r\n\r\nprefect_test/proj1:\r\nWriteHello.py\r\n\r\nprefect_test/proj2:\r\nWriteHello.py\r\n```\r\nJust a folder with two subfolders, projects 1 and 2, each folder holds a file with nearly the same content. Note the file paths are different and also the project names are different.\r\n\r\n**proj1/WriteHello.py**\r\n```\r\njaykae@DT-JKNICKERBOCKER: ~/prefect_test cat proj1/WriteHello.py\r\nimport prefect\r\nfrom prefect import task, Flow\r\n\r\n@task\r\ndef write_hello():\r\n with open(\"proj1/hello.txt\",\"w\") as hello_file:\r\n hello_file.write(\"Hello Project 1!!!\")\r\n\r\nwith Flow(\"WriteHello\") as flow:\r\n write_it = write_hello()\r\n\r\nflow.register(project_name=\"Project1\")\r\n```\r\n\r\n**proj2/WriteHello.py**\r\n```\r\njaykae@DT-JKNICKERBOCKER: ~/prefect_test cat proj2/WriteHello.py\r\nimport prefect\r\nfrom prefect import task, Flow\r\n\r\n@task\r\ndef write_hello():\r\n with open(\"proj2/hello.txt\",\"w\") as hello_file:\r\n hello_file.write(\"Hello Project 2!!!\")\r\n\r\nwith Flow(\"WriteHello\") as flow:\r\n write_it = write_hello()\r\n\r\nflow.register(project_name=\"Project2\")\r\n```\r\n\r\nI register proj1/HelloWorld.py and then proj2/HelloWorld.py, I would expect that registering two flows with different file paths, as well as different project names would lead result in writing a text file in the directory for each flow when it is called. However, when I run proj1/HelloWorld.py the file is written in the proj2 directory and says Hello Project 2!!!.\r\n\r\n```\r\njaykae@DT-JKNICKERBOCKER: ~/prefect_test ls proj*\r\nproj1:\r\nWriteHello.py\r\n\r\nproj2:\r\nWriteHello.py\r\njaykae@DT-JKNICKERBOCKER: ~/prefect_test prefect run flow --name WriteHello --project Project1\r\nFlow Run: http://localhost:8080/default/flow-run/e05b8f99-945d-4954-ab0d-7e5e89df0965\r\njaykae@DT-JKNICKERBOCKER: ~/prefect_test ls proj*\r\nproj1:\r\nWriteHello.py\r\n\r\nproj2:\r\nWriteHello.py hello.txt\r\n```\r\n\r\nI tried changing the .py file name and that did not help at all, so I assume it has to do with the flow name. Is there a way to have two flows with the same name, but different content, in separate projects and peacefully coexist?\n", "before_files": [{"content": "import os\nimport socket\nfrom typing import TYPE_CHECKING, Any, Dict, List\n\nfrom slugify import slugify\n\nimport prefect\nfrom prefect.engine.results import LocalResult\nfrom prefect.storage import Storage\nfrom prefect.utilities.storage import extract_flow_from_file, extract_flow_from_module\n\nif TYPE_CHECKING:\n from prefect.core.flow import Flow\n\n\nclass Local(Storage):\n \"\"\"\n Local storage class. This class represents the Storage\n interface for Flows stored as bytes in the local filesystem.\n\n Note that if you register a Flow with Prefect Cloud using this storage,\n your flow will automatically be labeled with your machine's hostname. This\n ensures that only agents that are known to be running on the same\n filesystem can run your flow.\n\n Args:\n - directory (str, optional): the directory the flows will be stored in;\n defaults to `~/.prefect/flows`. If it doesn't already exist, it will be\n created for you.\n - validate (bool, optional): a boolean specifying whether to validate the\n provided directory path; if `True`, the directory will be converted to an\n absolute path and created. Defaults to `True`\n - path (str, optional): a direct path to the location of the flow file if\n `stored_as_script=True`, otherwise this path will be used when storing the serialized,\n pickled flow. If `stored_as_script=True`, the direct path may be a file path\n (such as 'path/to/myflow.py') or a direct python path (such as 'myrepo.mymodule.myflow')\n - stored_as_script (bool, optional): boolean for specifying if the flow has been stored\n as a `.py` file. Defaults to `False`\n - **kwargs (Any, optional): any additional `Storage` initialization options\n \"\"\"\n\n def __init__(\n self,\n directory: str = None,\n validate: bool = True,\n path: str = None,\n stored_as_script: bool = False,\n **kwargs: Any,\n ) -> None:\n directory = directory or os.path.join(prefect.config.home_dir, \"flows\")\n self.flows = dict() # type: Dict[str, str]\n self._flows = dict() # type: Dict[str, \"prefect.core.flow.Flow\"]\n\n self.path = path\n\n if validate:\n abs_directory = os.path.abspath(os.path.expanduser(directory))\n os.makedirs(abs_directory, exist_ok=True)\n else:\n abs_directory = directory\n\n self.directory = abs_directory\n result = LocalResult(self.directory, validate_dir=validate)\n super().__init__(result=result, stored_as_script=stored_as_script, **kwargs)\n\n @property\n def default_labels(self) -> List[str]:\n if self.add_default_labels:\n return [socket.gethostname()]\n else:\n return []\n\n def get_flow(self, flow_location: str = None) -> \"Flow\":\n \"\"\"\n Given a flow_location within this Storage object, returns the underlying Flow (if possible).\n\n Args:\n - flow_location (str, optional): the location of a flow within this Storage; in this case,\n a file path or python path where a Flow has been serialized to. Will use `path`\n if not provided.\n\n Returns:\n - Flow: the requested flow\n\n Raises:\n - ValueError: if the flow is not contained in this storage\n \"\"\"\n if flow_location:\n if flow_location not in self.flows.values():\n raise ValueError(\"Flow is not contained in this Storage\")\n elif self.path:\n flow_location = self.path\n else:\n raise ValueError(\"No flow location provided\")\n\n # check if the path given is a file path\n try:\n if os.path.isfile(flow_location):\n if self.stored_as_script:\n return extract_flow_from_file(file_path=flow_location)\n else:\n return prefect.core.flow.Flow.load(flow_location)\n # otherwise the path is given in the module format\n else:\n return extract_flow_from_module(module_str=flow_location)\n except Exception:\n self.logger.exception(f\"Failed to load Flow from {flow_location}\")\n raise\n\n def add_flow(self, flow: \"Flow\") -> str:\n \"\"\"\n Method for storing a new flow as bytes in the local filesytem.\n\n Args:\n - flow (Flow): a Prefect Flow to add\n\n Returns:\n - str: the location of the newly added flow in this Storage object\n\n Raises:\n - ValueError: if a flow with the same name is already contained in this storage\n \"\"\"\n if flow.name in self:\n raise ValueError(\n 'Name conflict: Flow with the name \"{}\" is already present in this storage.'.format(\n flow.name\n )\n )\n\n if self.stored_as_script:\n if not self.path:\n raise ValueError(\n \"A `path` must be provided to show where flow `.py` file is stored.\"\n )\n flow_location = self.path\n else:\n if self.path:\n flow_location = self.path\n else:\n flow_location = os.path.join(\n self.directory, \"{}.prefect\".format(slugify(flow.name))\n )\n flow_location = flow.save(flow_location)\n\n self.flows[flow.name] = flow_location\n self._flows[flow.name] = flow\n return flow_location\n\n def __contains__(self, obj: Any) -> bool:\n \"\"\"\n Method for determining whether an object is contained within this storage.\n \"\"\"\n if not isinstance(obj, str):\n return False\n return obj in self.flows\n\n def build(self) -> \"Storage\":\n \"\"\"\n Build the Storage object.\n\n Returns:\n - Storage: a Storage object that contains information about how and where\n each flow is stored\n \"\"\"\n self.run_basic_healthchecks()\n return self\n", "path": "src/prefect/storage/local.py"}]} | 2,928 | 328 |
gh_patches_debug_34791 | rasdani/github-patches | git_diff | fonttools__fonttools-2137 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`name` table decoding error
While using the following code:
```
for record in font['name'].names:
if record.nameID == 1:
print(record)
```
It gave an error for some of the Chinese fonts I have:
```
Traceback (most recent call last):
File "C:\Python39\lib\site-packages\fontTools\encodings\codecs.py", line 54, in decode
assert errors == 'strict'
AssertionError
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "d:\Desktop\coding\fontname\main.py", line 75, in <module>
print(record)
File "C:\Python39\lib\site-packages\fontTools\ttLib\tables\_n_a_m_e.py", line 414, in __str__
return self.toStr(errors='backslashreplace')
File "C:\Python39\lib\site-packages\fontTools\ttLib\tables\_n_a_m_e.py", line 460, in toUnicode
string = tounicode(string, encoding=encoding, errors=errors)
File "C:\Python39\lib\site-packages\fontTools\misc\py23.py", line 82, in tounicode
return s.decode(encoding, errors)
AssertionError: decoding with 'x_mac_simp_chinese_ttx' codec failed (AssertionError: )
```
It seems to be stuck on this section (based on `ttx`):
```
<namerecord nameID="1" platformID="3" platEncID="1" langID="0x804">
汉仪彩云体简
</namerecord>
```
</issue>
<code>
[start of Lib/fontTools/encodings/codecs.py]
1 """Extend the Python codecs module with a few encodings that are used in OpenType (name table)
2 but missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details."""
3
4 from fontTools.misc.py23 import *
5 import codecs
6 import encodings
7
8 class ExtendCodec(codecs.Codec):
9
10 def __init__(self, name, base_encoding, mapping):
11 self.name = name
12 self.base_encoding = base_encoding
13 self.mapping = mapping
14 self.reverse = {v:k for k,v in mapping.items()}
15 self.max_len = max(len(v) for v in mapping.values())
16 self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode)
17 codecs.register_error(name, self.error)
18
19 def encode(self, input, errors='strict'):
20 assert errors == 'strict'
21 #return codecs.encode(input, self.base_encoding, self.name), len(input)
22
23 # The above line could totally be all we needed, relying on the error
24 # handling to replace the unencodable Unicode characters with our extended
25 # byte sequences.
26 #
27 # However, there seems to be a design bug in Python (probably intentional):
28 # the error handler for encoding is supposed to return a **Unicode** character,
29 # that then needs to be encodable itself... Ugh.
30 #
31 # So we implement what codecs.encode() should have been doing: which is expect
32 # error handler to return bytes() to be added to the output.
33 #
34 # This seems to have been fixed in Python 3.3. We should try using that and
35 # use fallback only if that failed.
36 # https://docs.python.org/3.3/library/codecs.html#codecs.register_error
37
38 length = len(input)
39 out = b''
40 while input:
41 try:
42 part = codecs.encode(input, self.base_encoding)
43 out += part
44 input = '' # All converted
45 except UnicodeEncodeError as e:
46 # Convert the correct part
47 out += codecs.encode(input[:e.start], self.base_encoding)
48 replacement, pos = self.error(e)
49 out += replacement
50 input = input[pos:]
51 return out, length
52
53 def decode(self, input, errors='strict'):
54 assert errors == 'strict'
55 return codecs.decode(input, self.base_encoding, self.name), len(input)
56
57 def error(self, e):
58 if isinstance(e, UnicodeDecodeError):
59 for end in range(e.start + 1, e.end + 1):
60 s = e.object[e.start:end]
61 if s in self.mapping:
62 return self.mapping[s], end
63 elif isinstance(e, UnicodeEncodeError):
64 for end in range(e.start + 1, e.start + self.max_len + 1):
65 s = e.object[e.start:end]
66 if s in self.reverse:
67 return self.reverse[s], end
68 e.encoding = self.name
69 raise e
70
71
72 _extended_encodings = {
73 "x_mac_japanese_ttx": ("shift_jis", {
74 b"\xFC": unichr(0x007C),
75 b"\x7E": unichr(0x007E),
76 b"\x80": unichr(0x005C),
77 b"\xA0": unichr(0x00A0),
78 b"\xFD": unichr(0x00A9),
79 b"\xFE": unichr(0x2122),
80 b"\xFF": unichr(0x2026),
81 }),
82 "x_mac_trad_chinese_ttx": ("big5", {
83 b"\x80": unichr(0x005C),
84 b"\xA0": unichr(0x00A0),
85 b"\xFD": unichr(0x00A9),
86 b"\xFE": unichr(0x2122),
87 b"\xFF": unichr(0x2026),
88 }),
89 "x_mac_korean_ttx": ("euc_kr", {
90 b"\x80": unichr(0x00A0),
91 b"\x81": unichr(0x20A9),
92 b"\x82": unichr(0x2014),
93 b"\x83": unichr(0x00A9),
94 b"\xFE": unichr(0x2122),
95 b"\xFF": unichr(0x2026),
96 }),
97 "x_mac_simp_chinese_ttx": ("gb2312", {
98 b"\x80": unichr(0x00FC),
99 b"\xA0": unichr(0x00A0),
100 b"\xFD": unichr(0x00A9),
101 b"\xFE": unichr(0x2122),
102 b"\xFF": unichr(0x2026),
103 }),
104 }
105
106 _cache = {}
107
108 def search_function(name):
109 name = encodings.normalize_encoding(name) # Rather undocumented...
110 if name in _extended_encodings:
111 if name not in _cache:
112 base_encoding, mapping = _extended_encodings[name]
113 assert(name[-4:] == "_ttx")
114 # Python 2 didn't have any of the encodings that we are implementing
115 # in this file. Python 3 added aliases for the East Asian ones, mapping
116 # them "temporarily" to the same base encoding as us, with a comment
117 # suggesting that full implementation will appear some time later.
118 # As such, try the Python version of the x_mac_... first, if that is found,
119 # use *that* as our base encoding. This would make our encoding upgrade
120 # to the full encoding when and if Python finally implements that.
121 # http://bugs.python.org/issue24041
122 base_encodings = [name[:-4], base_encoding]
123 for base_encoding in base_encodings:
124 try:
125 codecs.lookup(base_encoding)
126 except LookupError:
127 continue
128 _cache[name] = ExtendCodec(name, base_encoding, mapping)
129 break
130 return _cache[name].info
131
132 return None
133
134 codecs.register(search_function)
135
[end of Lib/fontTools/encodings/codecs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/Lib/fontTools/encodings/codecs.py b/Lib/fontTools/encodings/codecs.py
--- a/Lib/fontTools/encodings/codecs.py
+++ b/Lib/fontTools/encodings/codecs.py
@@ -16,43 +16,29 @@
self.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode)
codecs.register_error(name, self.error)
- def encode(self, input, errors='strict'):
- assert errors == 'strict'
- #return codecs.encode(input, self.base_encoding, self.name), len(input)
-
- # The above line could totally be all we needed, relying on the error
- # handling to replace the unencodable Unicode characters with our extended
- # byte sequences.
- #
- # However, there seems to be a design bug in Python (probably intentional):
- # the error handler for encoding is supposed to return a **Unicode** character,
- # that then needs to be encodable itself... Ugh.
- #
- # So we implement what codecs.encode() should have been doing: which is expect
- # error handler to return bytes() to be added to the output.
- #
- # This seems to have been fixed in Python 3.3. We should try using that and
- # use fallback only if that failed.
- # https://docs.python.org/3.3/library/codecs.html#codecs.register_error
-
+ def _map(self, mapper, output_type, exc_type, input, errors):
+ base_error_handler = codecs.lookup_error(errors)
length = len(input)
- out = b''
+ out = output_type()
while input:
+ # first try to use self.error as the error handler
try:
- part = codecs.encode(input, self.base_encoding)
+ part = mapper(input, self.base_encoding, errors=self.name)
out += part
- input = '' # All converted
- except UnicodeEncodeError as e:
- # Convert the correct part
- out += codecs.encode(input[:e.start], self.base_encoding)
- replacement, pos = self.error(e)
+ break # All converted
+ except exc_type as e:
+ # else convert the correct part, handle error as requested and continue
+ out += mapper(input[:e.start], self.base_encoding, self.name)
+ replacement, pos = base_error_handler(e)
out += replacement
input = input[pos:]
return out, length
+ def encode(self, input, errors='strict'):
+ return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
+
def decode(self, input, errors='strict'):
- assert errors == 'strict'
- return codecs.decode(input, self.base_encoding, self.name), len(input)
+ return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
def error(self, e):
if isinstance(e, UnicodeDecodeError):
| {"golden_diff": "diff --git a/Lib/fontTools/encodings/codecs.py b/Lib/fontTools/encodings/codecs.py\n--- a/Lib/fontTools/encodings/codecs.py\n+++ b/Lib/fontTools/encodings/codecs.py\n@@ -16,43 +16,29 @@\n \t\tself.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode)\n \t\tcodecs.register_error(name, self.error)\n \n-\tdef encode(self, input, errors='strict'):\n-\t\tassert errors == 'strict'\n-\t\t#return codecs.encode(input, self.base_encoding, self.name), len(input)\n-\n-\t\t# The above line could totally be all we needed, relying on the error\n-\t\t# handling to replace the unencodable Unicode characters with our extended\n-\t\t# byte sequences.\n-\t\t#\n-\t\t# However, there seems to be a design bug in Python (probably intentional):\n-\t\t# the error handler for encoding is supposed to return a **Unicode** character,\n-\t\t# that then needs to be encodable itself... Ugh.\n-\t\t#\n-\t\t# So we implement what codecs.encode() should have been doing: which is expect\n-\t\t# error handler to return bytes() to be added to the output.\n-\t\t#\n-\t\t# This seems to have been fixed in Python 3.3. We should try using that and\n-\t\t# use fallback only if that failed.\n-\t\t# https://docs.python.org/3.3/library/codecs.html#codecs.register_error\n-\n+\tdef _map(self, mapper, output_type, exc_type, input, errors):\n+\t\tbase_error_handler = codecs.lookup_error(errors)\n \t\tlength = len(input)\n-\t\tout = b''\n+\t\tout = output_type()\n \t\twhile input:\n+\t\t\t# first try to use self.error as the error handler\n \t\t\ttry:\n-\t\t\t\tpart = codecs.encode(input, self.base_encoding)\n+\t\t\t\tpart = mapper(input, self.base_encoding, errors=self.name)\n \t\t\t\tout += part\n-\t\t\t\tinput = '' # All converted\n-\t\t\texcept UnicodeEncodeError as e:\n-\t\t\t\t# Convert the correct part\n-\t\t\t\tout += codecs.encode(input[:e.start], self.base_encoding)\n-\t\t\t\treplacement, pos = self.error(e)\n+\t\t\t\tbreak # All converted\n+\t\t\texcept exc_type as e:\n+\t\t\t\t# else convert the correct part, handle error as requested and continue\n+\t\t\t\tout += mapper(input[:e.start], self.base_encoding, self.name)\n+\t\t\t\treplacement, pos = base_error_handler(e)\n \t\t\t\tout += replacement\n \t\t\t\tinput = input[pos:]\n \t\treturn out, length\n \n+\tdef encode(self, input, errors='strict'):\n+\t\treturn self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)\n+\n \tdef decode(self, input, errors='strict'):\n-\t\tassert errors == 'strict'\n-\t\treturn codecs.decode(input, self.base_encoding, self.name), len(input)\n+\t\treturn self._map(codecs.decode, str, UnicodeDecodeError, input, errors)\n \n \tdef error(self, e):\n \t\tif isinstance(e, UnicodeDecodeError):\n", "issue": "`name` table decoding error\nWhile using the following code:\r\n```\r\nfor record in font['name'].names:\r\n if record.nameID == 1:\r\n print(record)\r\n```\r\nIt gave an error for some of the Chinese fonts I have:\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Python39\\lib\\site-packages\\fontTools\\encodings\\codecs.py\", line 54, in decode\r\n assert errors == 'strict'\r\nAssertionError\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"d:\\Desktop\\coding\\fontname\\main.py\", line 75, in <module>\r\n print(record)\r\n File \"C:\\Python39\\lib\\site-packages\\fontTools\\ttLib\\tables\\_n_a_m_e.py\", line 414, in __str__\r\n return self.toStr(errors='backslashreplace')\r\n File \"C:\\Python39\\lib\\site-packages\\fontTools\\ttLib\\tables\\_n_a_m_e.py\", line 460, in toUnicode\r\n string = tounicode(string, encoding=encoding, errors=errors)\r\n File \"C:\\Python39\\lib\\site-packages\\fontTools\\misc\\py23.py\", line 82, in tounicode\r\n return s.decode(encoding, errors)\r\nAssertionError: decoding with 'x_mac_simp_chinese_ttx' codec failed (AssertionError: )\r\n```\r\nIt seems to be stuck on this section (based on `ttx`):\r\n```\r\n <namerecord nameID=\"1\" platformID=\"3\" platEncID=\"1\" langID=\"0x804\">\r\n \u6c49\u4eea\u5f69\u4e91\u4f53\u7b80\r\n </namerecord>\r\n```\n", "before_files": [{"content": "\"\"\"Extend the Python codecs module with a few encodings that are used in OpenType (name table)\nbut missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details.\"\"\"\n\nfrom fontTools.misc.py23 import *\nimport codecs\nimport encodings\n\nclass ExtendCodec(codecs.Codec):\n\n\tdef __init__(self, name, base_encoding, mapping):\n\t\tself.name = name\n\t\tself.base_encoding = base_encoding\n\t\tself.mapping = mapping\n\t\tself.reverse = {v:k for k,v in mapping.items()}\n\t\tself.max_len = max(len(v) for v in mapping.values())\n\t\tself.info = codecs.CodecInfo(name=self.name, encode=self.encode, decode=self.decode)\n\t\tcodecs.register_error(name, self.error)\n\n\tdef encode(self, input, errors='strict'):\n\t\tassert errors == 'strict'\n\t\t#return codecs.encode(input, self.base_encoding, self.name), len(input)\n\n\t\t# The above line could totally be all we needed, relying on the error\n\t\t# handling to replace the unencodable Unicode characters with our extended\n\t\t# byte sequences.\n\t\t#\n\t\t# However, there seems to be a design bug in Python (probably intentional):\n\t\t# the error handler for encoding is supposed to return a **Unicode** character,\n\t\t# that then needs to be encodable itself... Ugh.\n\t\t#\n\t\t# So we implement what codecs.encode() should have been doing: which is expect\n\t\t# error handler to return bytes() to be added to the output.\n\t\t#\n\t\t# This seems to have been fixed in Python 3.3. We should try using that and\n\t\t# use fallback only if that failed.\n\t\t# https://docs.python.org/3.3/library/codecs.html#codecs.register_error\n\n\t\tlength = len(input)\n\t\tout = b''\n\t\twhile input:\n\t\t\ttry:\n\t\t\t\tpart = codecs.encode(input, self.base_encoding)\n\t\t\t\tout += part\n\t\t\t\tinput = '' # All converted\n\t\t\texcept UnicodeEncodeError as e:\n\t\t\t\t# Convert the correct part\n\t\t\t\tout += codecs.encode(input[:e.start], self.base_encoding)\n\t\t\t\treplacement, pos = self.error(e)\n\t\t\t\tout += replacement\n\t\t\t\tinput = input[pos:]\n\t\treturn out, length\n\n\tdef decode(self, input, errors='strict'):\n\t\tassert errors == 'strict'\n\t\treturn codecs.decode(input, self.base_encoding, self.name), len(input)\n\n\tdef error(self, e):\n\t\tif isinstance(e, UnicodeDecodeError):\n\t\t\tfor end in range(e.start + 1, e.end + 1):\n\t\t\t\ts = e.object[e.start:end]\n\t\t\t\tif s in self.mapping:\n\t\t\t\t\treturn self.mapping[s], end\n\t\telif isinstance(e, UnicodeEncodeError):\n\t\t\tfor end in range(e.start + 1, e.start + self.max_len + 1):\n\t\t\t\ts = e.object[e.start:end]\n\t\t\t\tif s in self.reverse:\n\t\t\t\t\treturn self.reverse[s], end\n\t\te.encoding = self.name\n\t\traise e\n\n\n_extended_encodings = {\n\t\"x_mac_japanese_ttx\": (\"shift_jis\", {\n\t\t\t\t\tb\"\\xFC\": unichr(0x007C),\n\t\t\t\t\tb\"\\x7E\": unichr(0x007E),\n\t\t\t\t\tb\"\\x80\": unichr(0x005C),\n\t\t\t\t\tb\"\\xA0\": unichr(0x00A0),\n\t\t\t\t\tb\"\\xFD\": unichr(0x00A9),\n\t\t\t\t\tb\"\\xFE\": unichr(0x2122),\n\t\t\t\t\tb\"\\xFF\": unichr(0x2026),\n\t\t\t\t}),\n\t\"x_mac_trad_chinese_ttx\": (\"big5\", {\n\t\t\t\t\tb\"\\x80\": unichr(0x005C),\n\t\t\t\t\tb\"\\xA0\": unichr(0x00A0),\n\t\t\t\t\tb\"\\xFD\": unichr(0x00A9),\n\t\t\t\t\tb\"\\xFE\": unichr(0x2122),\n\t\t\t\t\tb\"\\xFF\": unichr(0x2026),\n\t\t\t\t}),\n\t\"x_mac_korean_ttx\": (\"euc_kr\", {\n\t\t\t\t\tb\"\\x80\": unichr(0x00A0),\n\t\t\t\t\tb\"\\x81\": unichr(0x20A9),\n\t\t\t\t\tb\"\\x82\": unichr(0x2014),\n\t\t\t\t\tb\"\\x83\": unichr(0x00A9),\n\t\t\t\t\tb\"\\xFE\": unichr(0x2122),\n\t\t\t\t\tb\"\\xFF\": unichr(0x2026),\n\t\t\t\t}),\n\t\"x_mac_simp_chinese_ttx\": (\"gb2312\", {\n\t\t\t\t\tb\"\\x80\": unichr(0x00FC),\n\t\t\t\t\tb\"\\xA0\": unichr(0x00A0),\n\t\t\t\t\tb\"\\xFD\": unichr(0x00A9),\n\t\t\t\t\tb\"\\xFE\": unichr(0x2122),\n\t\t\t\t\tb\"\\xFF\": unichr(0x2026),\n\t\t\t\t}),\n}\n\n_cache = {}\n\ndef search_function(name):\n\tname = encodings.normalize_encoding(name) # Rather undocumented...\n\tif name in _extended_encodings:\n\t\tif name not in _cache:\n\t\t\tbase_encoding, mapping = _extended_encodings[name]\n\t\t\tassert(name[-4:] == \"_ttx\")\n\t\t\t# Python 2 didn't have any of the encodings that we are implementing\n\t\t\t# in this file. Python 3 added aliases for the East Asian ones, mapping\n\t\t\t# them \"temporarily\" to the same base encoding as us, with a comment\n\t\t\t# suggesting that full implementation will appear some time later.\n\t\t\t# As such, try the Python version of the x_mac_... first, if that is found,\n\t\t\t# use *that* as our base encoding. This would make our encoding upgrade\n\t\t\t# to the full encoding when and if Python finally implements that.\n\t\t\t# http://bugs.python.org/issue24041\n\t\t\tbase_encodings = [name[:-4], base_encoding]\n\t\t\tfor base_encoding in base_encodings:\n\t\t\t\ttry:\n\t\t\t\t\tcodecs.lookup(base_encoding)\n\t\t\t\texcept LookupError:\n\t\t\t\t\tcontinue\n\t\t\t\t_cache[name] = ExtendCodec(name, base_encoding, mapping)\n\t\t\t\tbreak\n\t\treturn _cache[name].info\n\n\treturn None\n\ncodecs.register(search_function)\n", "path": "Lib/fontTools/encodings/codecs.py"}]} | 2,640 | 682 |
gh_patches_debug_51239 | rasdani/github-patches | git_diff | ManimCommunity__manim-3541 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
reST in ``MoveAlongPath`` not correct
[https://docs.manim.community/en/stable/reference/manim.animation.movement.MoveAlongPath.html](https://docs.manim.community/en/stable/reference/manim.animation.movement.MoveAlongPath.html)

The ``..rubric:: Example`` should not be shown in the docs.
</issue>
<code>
[start of manim/animation/movement.py]
1 """Animations related to movement."""
2
3 from __future__ import annotations
4
5 __all__ = [
6 "Homotopy",
7 "SmoothedVectorizedHomotopy",
8 "ComplexHomotopy",
9 "PhaseFlow",
10 "MoveAlongPath",
11 ]
12
13 from typing import TYPE_CHECKING, Any, Callable
14
15 import numpy as np
16
17 from ..animation.animation import Animation
18 from ..utils.rate_functions import linear
19
20 if TYPE_CHECKING:
21 from ..mobject.mobject import Mobject, VMobject
22
23
24 class Homotopy(Animation):
25 """A Homotopy.
26
27 This is an animation transforming the points of a mobject according
28 to the specified transformation function. With the parameter :math:`t`
29 moving from 0 to 1 throughout the animation and :math:`(x, y, z)`
30 describing the coordinates of the point of a mobject,
31 the function passed to the ``homotopy`` keyword argument should
32 transform the tuple :math:`(x, y, z, t)` to :math:`(x', y', z')`,
33 the coordinates the original point is transformed to at time :math:`t`.
34
35 Parameters
36 ----------
37 homotopy
38 A function mapping :math:`(x, y, z, t)` to :math:`(x', y', z')`.
39 mobject
40 The mobject transformed under the given homotopy.
41 run_time
42 The run time of the animation.
43 apply_function_kwargs
44 Keyword arguments propagated to :meth:`.Mobject.apply_function`.
45 kwargs
46 Further keyword arguments passed to the parent class.
47 """
48
49 def __init__(
50 self,
51 homotopy: Callable[[float, float, float, float], tuple[float, float, float]],
52 mobject: Mobject,
53 run_time: float = 3,
54 apply_function_kwargs: dict[str, Any] | None = None,
55 **kwargs,
56 ) -> None:
57 self.homotopy = homotopy
58 self.apply_function_kwargs = (
59 apply_function_kwargs if apply_function_kwargs is not None else {}
60 )
61 super().__init__(mobject, run_time=run_time, **kwargs)
62
63 def function_at_time_t(self, t: float) -> tuple[float, float, float]:
64 return lambda p: self.homotopy(*p, t)
65
66 def interpolate_submobject(
67 self,
68 submobject: Mobject,
69 starting_submobject: Mobject,
70 alpha: float,
71 ) -> None:
72 submobject.points = starting_submobject.points
73 submobject.apply_function(
74 self.function_at_time_t(alpha), **self.apply_function_kwargs
75 )
76
77
78 class SmoothedVectorizedHomotopy(Homotopy):
79 def interpolate_submobject(
80 self,
81 submobject: Mobject,
82 starting_submobject: Mobject,
83 alpha: float,
84 ) -> None:
85 super().interpolate_submobject(submobject, starting_submobject, alpha)
86 submobject.make_smooth()
87
88
89 class ComplexHomotopy(Homotopy):
90 def __init__(
91 self, complex_homotopy: Callable[[complex], float], mobject: Mobject, **kwargs
92 ) -> None:
93 """
94 Complex Homotopy a function Cx[0, 1] to C
95 """
96
97 def homotopy(
98 x: float,
99 y: float,
100 z: float,
101 t: float,
102 ) -> tuple[float, float, float]:
103 c = complex_homotopy(complex(x, y), t)
104 return (c.real, c.imag, z)
105
106 super().__init__(homotopy, mobject, **kwargs)
107
108
109 class PhaseFlow(Animation):
110 def __init__(
111 self,
112 function: Callable[[np.ndarray], np.ndarray],
113 mobject: Mobject,
114 virtual_time: float = 1,
115 suspend_mobject_updating: bool = False,
116 rate_func: Callable[[float], float] = linear,
117 **kwargs,
118 ) -> None:
119 self.virtual_time = virtual_time
120 self.function = function
121 super().__init__(
122 mobject,
123 suspend_mobject_updating=suspend_mobject_updating,
124 rate_func=rate_func,
125 **kwargs,
126 )
127
128 def interpolate_mobject(self, alpha: float) -> None:
129 if hasattr(self, "last_alpha"):
130 dt = self.virtual_time * (
131 self.rate_func(alpha) - self.rate_func(self.last_alpha)
132 )
133 self.mobject.apply_function(lambda p: p + dt * self.function(p))
134 self.last_alpha = alpha
135
136
137 class MoveAlongPath(Animation):
138 """Make one mobject move along the path of another mobject.
139 Example
140 --------
141 .. manim:: MoveAlongPathExample
142
143 class MoveAlongPathExample(Scene):
144 def construct(self):
145 d1 = Dot().set_color(ORANGE)
146 l1 = Line(LEFT, RIGHT)
147 l2 = VMobject()
148 self.add(d1, l1, l2)
149 l2.add_updater(lambda x: x.become(Line(LEFT, d1.get_center()).set_color(ORANGE)))
150 self.play(MoveAlongPath(d1, l1), rate_func=linear)
151 """
152
153 def __init__(
154 self,
155 mobject: Mobject,
156 path: VMobject,
157 suspend_mobject_updating: bool | None = False,
158 **kwargs,
159 ) -> None:
160 self.path = path
161 super().__init__(
162 mobject, suspend_mobject_updating=suspend_mobject_updating, **kwargs
163 )
164
165 def interpolate_mobject(self, alpha: float) -> None:
166 point = self.path.point_from_proportion(self.rate_func(alpha))
167 self.mobject.move_to(point)
168
[end of manim/animation/movement.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/manim/animation/movement.py b/manim/animation/movement.py
--- a/manim/animation/movement.py
+++ b/manim/animation/movement.py
@@ -136,8 +136,7 @@
class MoveAlongPath(Animation):
"""Make one mobject move along the path of another mobject.
- Example
- --------
+
.. manim:: MoveAlongPathExample
class MoveAlongPathExample(Scene):
| {"golden_diff": "diff --git a/manim/animation/movement.py b/manim/animation/movement.py\n--- a/manim/animation/movement.py\n+++ b/manim/animation/movement.py\n@@ -136,8 +136,7 @@\n \n class MoveAlongPath(Animation):\n \"\"\"Make one mobject move along the path of another mobject.\n- Example\n- --------\n+\n .. manim:: MoveAlongPathExample\n \n class MoveAlongPathExample(Scene):\n", "issue": "reST in ``MoveAlongPath`` not correct\n[https://docs.manim.community/en/stable/reference/manim.animation.movement.MoveAlongPath.html](https://docs.manim.community/en/stable/reference/manim.animation.movement.MoveAlongPath.html)\r\n\r\n\r\n\r\nThe ``..rubric:: Example`` should not be shown in the docs.\n", "before_files": [{"content": "\"\"\"Animations related to movement.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"Homotopy\",\n \"SmoothedVectorizedHomotopy\",\n \"ComplexHomotopy\",\n \"PhaseFlow\",\n \"MoveAlongPath\",\n]\n\nfrom typing import TYPE_CHECKING, Any, Callable\n\nimport numpy as np\n\nfrom ..animation.animation import Animation\nfrom ..utils.rate_functions import linear\n\nif TYPE_CHECKING:\n from ..mobject.mobject import Mobject, VMobject\n\n\nclass Homotopy(Animation):\n \"\"\"A Homotopy.\n\n This is an animation transforming the points of a mobject according\n to the specified transformation function. With the parameter :math:`t`\n moving from 0 to 1 throughout the animation and :math:`(x, y, z)`\n describing the coordinates of the point of a mobject,\n the function passed to the ``homotopy`` keyword argument should\n transform the tuple :math:`(x, y, z, t)` to :math:`(x', y', z')`,\n the coordinates the original point is transformed to at time :math:`t`.\n\n Parameters\n ----------\n homotopy\n A function mapping :math:`(x, y, z, t)` to :math:`(x', y', z')`.\n mobject\n The mobject transformed under the given homotopy.\n run_time\n The run time of the animation.\n apply_function_kwargs\n Keyword arguments propagated to :meth:`.Mobject.apply_function`.\n kwargs\n Further keyword arguments passed to the parent class.\n \"\"\"\n\n def __init__(\n self,\n homotopy: Callable[[float, float, float, float], tuple[float, float, float]],\n mobject: Mobject,\n run_time: float = 3,\n apply_function_kwargs: dict[str, Any] | None = None,\n **kwargs,\n ) -> None:\n self.homotopy = homotopy\n self.apply_function_kwargs = (\n apply_function_kwargs if apply_function_kwargs is not None else {}\n )\n super().__init__(mobject, run_time=run_time, **kwargs)\n\n def function_at_time_t(self, t: float) -> tuple[float, float, float]:\n return lambda p: self.homotopy(*p, t)\n\n def interpolate_submobject(\n self,\n submobject: Mobject,\n starting_submobject: Mobject,\n alpha: float,\n ) -> None:\n submobject.points = starting_submobject.points\n submobject.apply_function(\n self.function_at_time_t(alpha), **self.apply_function_kwargs\n )\n\n\nclass SmoothedVectorizedHomotopy(Homotopy):\n def interpolate_submobject(\n self,\n submobject: Mobject,\n starting_submobject: Mobject,\n alpha: float,\n ) -> None:\n super().interpolate_submobject(submobject, starting_submobject, alpha)\n submobject.make_smooth()\n\n\nclass ComplexHomotopy(Homotopy):\n def __init__(\n self, complex_homotopy: Callable[[complex], float], mobject: Mobject, **kwargs\n ) -> None:\n \"\"\"\n Complex Homotopy a function Cx[0, 1] to C\n \"\"\"\n\n def homotopy(\n x: float,\n y: float,\n z: float,\n t: float,\n ) -> tuple[float, float, float]:\n c = complex_homotopy(complex(x, y), t)\n return (c.real, c.imag, z)\n\n super().__init__(homotopy, mobject, **kwargs)\n\n\nclass PhaseFlow(Animation):\n def __init__(\n self,\n function: Callable[[np.ndarray], np.ndarray],\n mobject: Mobject,\n virtual_time: float = 1,\n suspend_mobject_updating: bool = False,\n rate_func: Callable[[float], float] = linear,\n **kwargs,\n ) -> None:\n self.virtual_time = virtual_time\n self.function = function\n super().__init__(\n mobject,\n suspend_mobject_updating=suspend_mobject_updating,\n rate_func=rate_func,\n **kwargs,\n )\n\n def interpolate_mobject(self, alpha: float) -> None:\n if hasattr(self, \"last_alpha\"):\n dt = self.virtual_time * (\n self.rate_func(alpha) - self.rate_func(self.last_alpha)\n )\n self.mobject.apply_function(lambda p: p + dt * self.function(p))\n self.last_alpha = alpha\n\n\nclass MoveAlongPath(Animation):\n \"\"\"Make one mobject move along the path of another mobject.\n Example\n --------\n .. manim:: MoveAlongPathExample\n\n class MoveAlongPathExample(Scene):\n def construct(self):\n d1 = Dot().set_color(ORANGE)\n l1 = Line(LEFT, RIGHT)\n l2 = VMobject()\n self.add(d1, l1, l2)\n l2.add_updater(lambda x: x.become(Line(LEFT, d1.get_center()).set_color(ORANGE)))\n self.play(MoveAlongPath(d1, l1), rate_func=linear)\n \"\"\"\n\n def __init__(\n self,\n mobject: Mobject,\n path: VMobject,\n suspend_mobject_updating: bool | None = False,\n **kwargs,\n ) -> None:\n self.path = path\n super().__init__(\n mobject, suspend_mobject_updating=suspend_mobject_updating, **kwargs\n )\n\n def interpolate_mobject(self, alpha: float) -> None:\n point = self.path.point_from_proportion(self.rate_func(alpha))\n self.mobject.move_to(point)\n", "path": "manim/animation/movement.py"}]} | 2,314 | 103 |
gh_patches_debug_19412 | rasdani/github-patches | git_diff | pre-commit__pre-commit-2905 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No such file or directory: '/github/home/.cache/pre-commit/repo4mrvfeou/rbenv-system/.install_state_v1staging'
### search you tried in the issue tracker
Found this one #1658
### describe your issue
Running pre-commit GitHub action in a [custom container](https://github.com/platform-engineering-org/helper/blob/main/Dockerfile).
Workflow is [broken](https://github.com/platform-engineering-org/bootstrap/actions/runs/4342905858/jobs/7584289627):
```
[INFO] Installing environment for https://github.com/markdownlint/markdownlint.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: FileNotFoundError: [Errno 2] No such file or directory: '/github/home/.cache/pre-commit/repo4mrvfeou/rbenv-system/.install_state_v1staging'
```
### pre-commit --version
pre-commit 2.20.0
### .pre-commit-config.yaml
```yaml
---
ci:
skip: [terraform_fmt, terraform_validate, terragrunt_validate]
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: end-of-file-fixer
- id: trailing-whitespace
args:
- --markdown-linebreak-ext=md
- id: check-docstring-first
- id: requirements-txt-fixer
- id: check-merge-conflict
- id: no-commit-to-branch
args:
- "--branch"
- "main"
- id: check-symlinks
- id: detect-private-key
- id: detect-aws-credentials
args:
- --allow-missing-credentials
- id: check-json
- repo: https://github.com/markdownlint/markdownlint
rev: v0.12.0
hooks:
- id: markdownlint
additional_dependencies: [rake]
- repo: https://github.com/maxbrunet/pre-commit-renovate
rev: 34.157.1
hooks:
- id: renovate-config-validator
- repo: https://github.com/antonbabenko/pre-commit-terraform
rev: v1.77.1
hooks:
- id: terraform_providers_lock
args:
- --tf-init-args=-upgrade
- id: terraform_fmt
- id: terraform_validate
- id: terragrunt_validate
```
### ~/.cache/pre-commit/pre-commit.log (if present)
_No response_
</issue>
<code>
[start of pre_commit/languages/ruby.py]
1 from __future__ import annotations
2
3 import contextlib
4 import functools
5 import importlib.resources
6 import os.path
7 import shutil
8 import tarfile
9 from typing import Generator
10 from typing import IO
11 from typing import Sequence
12
13 import pre_commit.constants as C
14 from pre_commit import lang_base
15 from pre_commit.envcontext import envcontext
16 from pre_commit.envcontext import PatchesT
17 from pre_commit.envcontext import UNSET
18 from pre_commit.envcontext import Var
19 from pre_commit.prefix import Prefix
20 from pre_commit.util import CalledProcessError
21
22 ENVIRONMENT_DIR = 'rbenv'
23 health_check = lang_base.basic_health_check
24 run_hook = lang_base.basic_run_hook
25
26
27 def _resource_bytesio(filename: str) -> IO[bytes]:
28 return importlib.resources.open_binary('pre_commit.resources', filename)
29
30
31 @functools.lru_cache(maxsize=1)
32 def get_default_version() -> str:
33 if all(lang_base.exe_exists(exe) for exe in ('ruby', 'gem')):
34 return 'system'
35 else:
36 return C.DEFAULT
37
38
39 def get_env_patch(
40 venv: str,
41 language_version: str,
42 ) -> PatchesT:
43 patches: PatchesT = (
44 ('GEM_HOME', os.path.join(venv, 'gems')),
45 ('GEM_PATH', UNSET),
46 ('BUNDLE_IGNORE_CONFIG', '1'),
47 )
48 if language_version == 'system':
49 patches += (
50 (
51 'PATH', (
52 os.path.join(venv, 'gems', 'bin'), os.pathsep,
53 Var('PATH'),
54 ),
55 ),
56 )
57 else: # pragma: win32 no cover
58 patches += (
59 ('RBENV_ROOT', venv),
60 (
61 'PATH', (
62 os.path.join(venv, 'gems', 'bin'), os.pathsep,
63 os.path.join(venv, 'shims'), os.pathsep,
64 os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),
65 ),
66 ),
67 )
68 if language_version not in {'system', 'default'}: # pragma: win32 no cover
69 patches += (('RBENV_VERSION', language_version),)
70
71 return patches
72
73
74 @contextlib.contextmanager
75 def in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:
76 envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)
77 with envcontext(get_env_patch(envdir, version)):
78 yield
79
80
81 def _extract_resource(filename: str, dest: str) -> None:
82 with _resource_bytesio(filename) as bio:
83 with tarfile.open(fileobj=bio) as tf:
84 tf.extractall(dest)
85
86
87 def _install_rbenv(
88 prefix: Prefix,
89 version: str,
90 ) -> None: # pragma: win32 no cover
91 envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)
92
93 _extract_resource('rbenv.tar.gz', prefix.path('.'))
94 shutil.move(prefix.path('rbenv'), envdir)
95
96 # Only install ruby-build if the version is specified
97 if version != C.DEFAULT:
98 plugins_dir = os.path.join(envdir, 'plugins')
99 _extract_resource('ruby-download.tar.gz', plugins_dir)
100 _extract_resource('ruby-build.tar.gz', plugins_dir)
101
102
103 def _install_ruby(
104 prefix: Prefix,
105 version: str,
106 ) -> None: # pragma: win32 no cover
107 try:
108 lang_base.setup_cmd(prefix, ('rbenv', 'download', version))
109 except CalledProcessError: # pragma: no cover (usually find with download)
110 # Failed to download from mirror for some reason, build it instead
111 lang_base.setup_cmd(prefix, ('rbenv', 'install', version))
112
113
114 def install_environment(
115 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
116 ) -> None:
117 if version != 'system': # pragma: win32 no cover
118 _install_rbenv(prefix, version)
119 with in_env(prefix, version):
120 # Need to call this before installing so rbenv's directories
121 # are set up
122 lang_base.setup_cmd(prefix, ('rbenv', 'init', '-'))
123 if version != C.DEFAULT:
124 _install_ruby(prefix, version)
125 # Need to call this after installing to set up the shims
126 lang_base.setup_cmd(prefix, ('rbenv', 'rehash'))
127
128 with in_env(prefix, version):
129 lang_base.setup_cmd(
130 prefix, ('gem', 'build', *prefix.star('.gemspec')),
131 )
132 lang_base.setup_cmd(
133 prefix,
134 (
135 'gem', 'install',
136 '--no-document', '--no-format-executable',
137 '--no-user-install',
138 *prefix.star('.gem'), *additional_dependencies,
139 ),
140 )
141
[end of pre_commit/languages/ruby.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py
--- a/pre_commit/languages/ruby.py
+++ b/pre_commit/languages/ruby.py
@@ -114,6 +114,8 @@
def install_environment(
prefix: Prefix, version: str, additional_dependencies: Sequence[str],
) -> None:
+ envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)
+
if version != 'system': # pragma: win32 no cover
_install_rbenv(prefix, version)
with in_env(prefix, version):
@@ -135,6 +137,8 @@
'gem', 'install',
'--no-document', '--no-format-executable',
'--no-user-install',
+ '--install-dir', os.path.join(envdir, 'gems'),
+ '--bindir', os.path.join(envdir, 'gems', 'bin'),
*prefix.star('.gem'), *additional_dependencies,
),
)
| {"golden_diff": "diff --git a/pre_commit/languages/ruby.py b/pre_commit/languages/ruby.py\n--- a/pre_commit/languages/ruby.py\n+++ b/pre_commit/languages/ruby.py\n@@ -114,6 +114,8 @@\n def install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n ) -> None:\n+ envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n+\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n@@ -135,6 +137,8 @@\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n '--no-user-install',\n+ '--install-dir', os.path.join(envdir, 'gems'),\n+ '--bindir', os.path.join(envdir, 'gems', 'bin'),\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n", "issue": "No such file or directory: '/github/home/.cache/pre-commit/repo4mrvfeou/rbenv-system/.install_state_v1staging'\n### search you tried in the issue tracker\n\nFound this one #1658\n\n### describe your issue\n\nRunning pre-commit GitHub action in a [custom container](https://github.com/platform-engineering-org/helper/blob/main/Dockerfile).\r\n\r\nWorkflow is [broken](https://github.com/platform-engineering-org/bootstrap/actions/runs/4342905858/jobs/7584289627):\r\n```\r\n[INFO] Installing environment for https://github.com/markdownlint/markdownlint.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\nAn unexpected error has occurred: FileNotFoundError: [Errno 2] No such file or directory: '/github/home/.cache/pre-commit/repo4mrvfeou/rbenv-system/.install_state_v1staging'\r\n```\r\n\r\n\n\n### pre-commit --version\n\npre-commit 2.20.0\n\n### .pre-commit-config.yaml\n\n```yaml\n---\r\nci:\r\n skip: [terraform_fmt, terraform_validate, terragrunt_validate]\r\n\r\nrepos:\r\n - repo: https://github.com/pre-commit/pre-commit-hooks\r\n rev: v4.4.0\r\n hooks:\r\n - id: end-of-file-fixer\r\n - id: trailing-whitespace\r\n args:\r\n - --markdown-linebreak-ext=md\r\n - id: check-docstring-first\r\n - id: requirements-txt-fixer\r\n - id: check-merge-conflict\r\n - id: no-commit-to-branch\r\n args:\r\n - \"--branch\"\r\n - \"main\"\r\n - id: check-symlinks\r\n - id: detect-private-key\r\n - id: detect-aws-credentials\r\n args:\r\n - --allow-missing-credentials\r\n - id: check-json\r\n - repo: https://github.com/markdownlint/markdownlint\r\n rev: v0.12.0\r\n hooks:\r\n - id: markdownlint\r\n additional_dependencies: [rake]\r\n - repo: https://github.com/maxbrunet/pre-commit-renovate\r\n rev: 34.157.1\r\n hooks:\r\n - id: renovate-config-validator\r\n - repo: https://github.com/antonbabenko/pre-commit-terraform\r\n rev: v1.77.1\r\n hooks:\r\n - id: terraform_providers_lock\r\n args:\r\n - --tf-init-args=-upgrade\r\n - id: terraform_fmt\r\n - id: terraform_validate\r\n - id: terragrunt_validate\n```\n\n\n### ~/.cache/pre-commit/pre-commit.log (if present)\n\n_No response_\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport functools\nimport importlib.resources\nimport os.path\nimport shutil\nimport tarfile\nfrom typing import Generator\nfrom typing import IO\nfrom typing import Sequence\n\nimport pre_commit.constants as C\nfrom pre_commit import lang_base\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\n\nENVIRONMENT_DIR = 'rbenv'\nhealth_check = lang_base.basic_health_check\nrun_hook = lang_base.basic_run_hook\n\n\ndef _resource_bytesio(filename: str) -> IO[bytes]:\n return importlib.resources.open_binary('pre_commit.resources', filename)\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str:\n if all(lang_base.exe_exists(exe) for exe in ('ruby', 'gem')):\n return 'system'\n else:\n return C.DEFAULT\n\n\ndef get_env_patch(\n venv: str,\n language_version: str,\n) -> PatchesT:\n patches: PatchesT = (\n ('GEM_HOME', os.path.join(venv, 'gems')),\n ('GEM_PATH', UNSET),\n ('BUNDLE_IGNORE_CONFIG', '1'),\n )\n if language_version == 'system':\n patches += (\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n Var('PATH'),\n ),\n ),\n )\n else: # pragma: win32 no cover\n patches += (\n ('RBENV_ROOT', venv),\n (\n 'PATH', (\n os.path.join(venv, 'gems', 'bin'), os.pathsep,\n os.path.join(venv, 'shims'), os.pathsep,\n os.path.join(venv, 'bin'), os.pathsep, Var('PATH'),\n ),\n ),\n )\n if language_version not in {'system', 'default'}: # pragma: win32 no cover\n patches += (('RBENV_VERSION', language_version),)\n\n return patches\n\n\[email protected]\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir, version)):\n yield\n\n\ndef _extract_resource(filename: str, dest: str) -> None:\n with _resource_bytesio(filename) as bio:\n with tarfile.open(fileobj=bio) as tf:\n tf.extractall(dest)\n\n\ndef _install_rbenv(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n _extract_resource('rbenv.tar.gz', prefix.path('.'))\n shutil.move(prefix.path('rbenv'), envdir)\n\n # Only install ruby-build if the version is specified\n if version != C.DEFAULT:\n plugins_dir = os.path.join(envdir, 'plugins')\n _extract_resource('ruby-download.tar.gz', plugins_dir)\n _extract_resource('ruby-build.tar.gz', plugins_dir)\n\n\ndef _install_ruby(\n prefix: Prefix,\n version: str,\n) -> None: # pragma: win32 no cover\n try:\n lang_base.setup_cmd(prefix, ('rbenv', 'download', version))\n except CalledProcessError: # pragma: no cover (usually find with download)\n # Failed to download from mirror for some reason, build it instead\n lang_base.setup_cmd(prefix, ('rbenv', 'install', version))\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None:\n if version != 'system': # pragma: win32 no cover\n _install_rbenv(prefix, version)\n with in_env(prefix, version):\n # Need to call this before installing so rbenv's directories\n # are set up\n lang_base.setup_cmd(prefix, ('rbenv', 'init', '-'))\n if version != C.DEFAULT:\n _install_ruby(prefix, version)\n # Need to call this after installing to set up the shims\n lang_base.setup_cmd(prefix, ('rbenv', 'rehash'))\n\n with in_env(prefix, version):\n lang_base.setup_cmd(\n prefix, ('gem', 'build', *prefix.star('.gemspec')),\n )\n lang_base.setup_cmd(\n prefix,\n (\n 'gem', 'install',\n '--no-document', '--no-format-executable',\n '--no-user-install',\n *prefix.star('.gem'), *additional_dependencies,\n ),\n )\n", "path": "pre_commit/languages/ruby.py"}]} | 2,493 | 215 |
gh_patches_debug_563 | rasdani/github-patches | git_diff | pex-tool__pex-910 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Release 2.1.5
On the docket:
+ [x] Kill `Pip.spawn_install_wheel` `overwrite` arg. #907
+ [x] Silence pip warnings about Python 2.7. #908
</issue>
<code>
[start of pex/version.py]
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = '2.1.4'
5
[end of pex/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = '2.1.4'
+__version__ = '2.1.5'
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.4'\n+__version__ = '2.1.5'\n", "issue": "Release 2.1.5\nOn the docket:\r\n+ [x] Kill `Pip.spawn_install_wheel` `overwrite` arg. #907\r\n+ [x] Silence pip warnings about Python 2.7. #908\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.4'\n", "path": "pex/version.py"}]} | 635 | 94 |
gh_patches_debug_36943 | rasdani/github-patches | git_diff | benoitc__gunicorn-697 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Response silently fails if app_iter returns unicode
If you pass an iterator as your response, and the iterator `yield`s unicode, Gunicorn will silently abort the request.
I'm attaching a pull request to fix this. It may also be worth considering coercing iterator output to strings, to avoid the issue altogether, but I don't know enough to say whether that's appropriate.
Below is a minimal test case to demonstrate the problem. Here are the steps I witnessed, stepping through the response process with pdb. All line numbers are referenced according to [version 18 of gunicorn/workers/sync.py](https://github.com/benoitc/gunicorn/blob/18.0/gunicorn/workers/sync.py).
- In my view: `yield u"test\n"`
- In sync.py, line 137: `resp.write(item)`
- Exception raised: `AssertionError: u'test\n' is not a byte.`
- `Except` clause on line 146 is reached
- `if` condition on line 147 is triggered
- `raise StopIteration()` on line 155 is reached
- `finally` clause on line 159 is reached, bypassing the error handling clause on line 157.
Here's the sample view (written for Pyramid):
```
@view_config(route_name='test')
def test(request):
def test_():
while True:
yield u"test\n"
time.sleep(.5)
return Response(content_type="text/event-stream", app_iter=test_())
```
Response silently fails if app_iter returns unicode
If you pass an iterator as your response, and the iterator `yield`s unicode, Gunicorn will silently abort the request.
I'm attaching a pull request to fix this. It may also be worth considering coercing iterator output to strings, to avoid the issue altogether, but I don't know enough to say whether that's appropriate.
Below is a minimal test case to demonstrate the problem. Here are the steps I witnessed, stepping through the response process with pdb. All line numbers are referenced according to [version 18 of gunicorn/workers/sync.py](https://github.com/benoitc/gunicorn/blob/18.0/gunicorn/workers/sync.py).
- In my view: `yield u"test\n"`
- In sync.py, line 137: `resp.write(item)`
- Exception raised: `AssertionError: u'test\n' is not a byte.`
- `Except` clause on line 146 is reached
- `if` condition on line 147 is triggered
- `raise StopIteration()` on line 155 is reached
- `finally` clause on line 159 is reached, bypassing the error handling clause on line 157.
Here's the sample view (written for Pyramid):
```
@view_config(route_name='test')
def test(request):
def test_():
while True:
yield u"test\n"
time.sleep(.5)
return Response(content_type="text/event-stream", app_iter=test_())
```
</issue>
<code>
[start of gunicorn/workers/async.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5
6 from datetime import datetime
7 import errno
8 import socket
9 import ssl
10
11 import gunicorn.http as http
12 import gunicorn.http.wsgi as wsgi
13 import gunicorn.util as util
14 import gunicorn.workers.base as base
15 from gunicorn import six
16
17 ALREADY_HANDLED = object()
18
19
20 class AsyncWorker(base.Worker):
21
22 def __init__(self, *args, **kwargs):
23 super(AsyncWorker, self).__init__(*args, **kwargs)
24 self.worker_connections = self.cfg.worker_connections
25
26 def timeout_ctx(self):
27 raise NotImplementedError()
28
29 def handle(self, listener, client, addr):
30 req = None
31 try:
32 parser = http.RequestParser(self.cfg, client)
33 try:
34 if not self.cfg.keepalive:
35 req = six.next(parser)
36 self.handle_request(listener, req, client, addr)
37 else:
38 # keepalive loop
39 while True:
40 req = None
41 with self.timeout_ctx():
42 req = six.next(parser)
43 if not req:
44 break
45 self.handle_request(listener, req, client, addr)
46 except http.errors.NoMoreData as e:
47 self.log.debug("Ignored premature client disconnection. %s", e)
48 except StopIteration as e:
49 self.log.debug("Closing connection. %s", e)
50 except ssl.SSLError:
51 raise # pass to next try-except level
52 except socket.error:
53 raise # pass to next try-except level
54 except Exception as e:
55 self.handle_error(req, client, addr, e)
56 except ssl.SSLError as e:
57 if e.args[0] == ssl.SSL_ERROR_EOF:
58 self.log.debug("ssl connection closed")
59 client.close()
60 else:
61 self.log.debug("Error processing SSL request.")
62 self.handle_error(req, client, addr, e)
63 except socket.error as e:
64 if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):
65 self.log.exception("Socket error processing request.")
66 else:
67 if e.args[0] == errno.ECONNRESET:
68 self.log.debug("Ignoring connection reset")
69 else:
70 self.log.debug("Ignoring EPIPE")
71 except Exception as e:
72 self.handle_error(req, client, addr, e)
73 finally:
74 util.close(client)
75
76 def handle_request(self, listener, req, sock, addr):
77 request_start = datetime.now()
78 environ = {}
79 resp = None
80 try:
81 self.cfg.pre_request(self, req)
82 resp, environ = wsgi.create(req, sock, addr,
83 listener.getsockname(), self.cfg)
84 environ["wsgi.multithread"] = True
85 self.nr += 1
86 if self.alive and self.nr >= self.max_requests:
87 self.log.info("Autorestarting worker after current request.")
88 resp.force_close()
89 self.alive = False
90
91 if not self.cfg.keepalive:
92 resp.force_close()
93
94 respiter = self.wsgi(environ, resp.start_response)
95 if respiter == ALREADY_HANDLED:
96 return False
97 try:
98 if isinstance(respiter, environ['wsgi.file_wrapper']):
99 resp.write_file(respiter)
100 else:
101 for item in respiter:
102 resp.write(item)
103 resp.close()
104 request_time = datetime.now() - request_start
105 self.log.access(resp, req, environ, request_time)
106 finally:
107 if hasattr(respiter, "close"):
108 respiter.close()
109 if resp.should_close():
110 raise StopIteration()
111 except Exception:
112 if resp and resp.headers_sent:
113 # If the requests have already been sent, we should close the
114 # connection to indicate the error.
115 try:
116 sock.shutdown(socket.SHUT_RDWR)
117 sock.close()
118 except socket.error:
119 pass
120 raise StopIteration()
121 raise
122 finally:
123 try:
124 self.cfg.post_request(self, req, environ, resp)
125 except Exception:
126 self.log.exception("Exception in post_request hook")
127 return True
128
[end of gunicorn/workers/async.py]
[start of gunicorn/workers/sync.py]
1 # -*- coding: utf-8 -
2 #
3 # This file is part of gunicorn released under the MIT license.
4 # See the NOTICE for more information.
5 #
6
7 from datetime import datetime
8 import errno
9 import os
10 import select
11 import socket
12 import ssl
13
14 import gunicorn.http as http
15 import gunicorn.http.wsgi as wsgi
16 import gunicorn.util as util
17 import gunicorn.workers.base as base
18 from gunicorn import six
19
20
21 class SyncWorker(base.Worker):
22
23 def run(self):
24 # self.socket appears to lose its blocking status after
25 # we fork in the arbiter. Reset it here.
26 for s in self.sockets:
27 s.setblocking(0)
28
29 ready = self.sockets
30 while self.alive:
31 self.notify()
32
33 # Accept a connection. If we get an error telling us
34 # that no connection is waiting we fall down to the
35 # select which is where we'll wait for a bit for new
36 # workers to come give us some love.
37
38 for sock in ready:
39 try:
40 client, addr = sock.accept()
41 client.setblocking(1)
42 util.close_on_exec(client)
43 self.handle(sock, client, addr)
44
45 # Keep processing clients until no one is waiting. This
46 # prevents the need to select() for every client that we
47 # process.
48 continue
49
50 except socket.error as e:
51 if e.args[0] not in (errno.EAGAIN, errno.ECONNABORTED,
52 errno.EWOULDBLOCK):
53 raise
54
55 # If our parent changed then we shut down.
56 if self.ppid != os.getppid():
57 self.log.info("Parent changed, shutting down: %s", self)
58 return
59
60 try:
61 self.notify()
62
63 # if no timeout is given the worker will never wait and will
64 # use the CPU for nothing. This minimal timeout prevent it.
65 timeout = self.timeout or 0.5
66
67 ret = select.select(self.sockets, [], self.PIPE, timeout)
68 if ret[0]:
69 ready = ret[0]
70 continue
71 except select.error as e:
72 if e.args[0] == errno.EINTR:
73 ready = self.sockets
74 continue
75 if e.args[0] == errno.EBADF:
76 if self.nr < 0:
77 ready = self.sockets
78 continue
79 else:
80 return
81 raise
82
83 def handle(self, listener, client, addr):
84 req = None
85 try:
86 if self.cfg.is_ssl:
87 client = ssl.wrap_socket(client, server_side=True,
88 **self.cfg.ssl_options)
89
90 parser = http.RequestParser(self.cfg, client)
91 req = six.next(parser)
92 self.handle_request(listener, req, client, addr)
93 except http.errors.NoMoreData as e:
94 self.log.debug("Ignored premature client disconnection. %s", e)
95 except StopIteration as e:
96 self.log.debug("Closing connection. %s", e)
97 except ssl.SSLError as e:
98 if e.args[0] == ssl.SSL_ERROR_EOF:
99 self.log.debug("ssl connection closed")
100 client.close()
101 else:
102 self.log.debug("Error processing SSL request.")
103 self.handle_error(req, client, addr, e)
104 except socket.error as e:
105 if e.args[0] != errno.EPIPE:
106 self.log.exception("Error processing request.")
107 else:
108 self.log.debug("Ignoring EPIPE")
109 except Exception as e:
110 self.handle_error(req, client, addr, e)
111 finally:
112 util.close(client)
113
114 def handle_request(self, listener, req, client, addr):
115 environ = {}
116 resp = None
117 try:
118 self.cfg.pre_request(self, req)
119 request_start = datetime.now()
120 resp, environ = wsgi.create(req, client, addr,
121 listener.getsockname(), self.cfg)
122 # Force the connection closed until someone shows
123 # a buffering proxy that supports Keep-Alive to
124 # the backend.
125 resp.force_close()
126 self.nr += 1
127 if self.nr >= self.max_requests:
128 self.log.info("Autorestarting worker after current request.")
129 self.alive = False
130 respiter = self.wsgi(environ, resp.start_response)
131 try:
132 if isinstance(respiter, environ['wsgi.file_wrapper']):
133 resp.write_file(respiter)
134 else:
135 for item in respiter:
136 resp.write(item)
137 resp.close()
138 request_time = datetime.now() - request_start
139 self.log.access(resp, req, environ, request_time)
140 finally:
141 if hasattr(respiter, "close"):
142 respiter.close()
143 except socket.error:
144 raise
145 except Exception as e:
146 if resp and resp.headers_sent:
147 # If the requests have already been sent, we should close the
148 # connection to indicate the error.
149 try:
150 client.shutdown(socket.SHUT_RDWR)
151 client.close()
152 except socket.error:
153 pass
154 raise StopIteration()
155 # Only send back traceback in HTTP in debug mode.
156 self.handle_error(req, client, addr, e)
157 return
158 finally:
159 try:
160 self.cfg.post_request(self, req, environ, resp)
161 except Exception:
162 self.log.exception("Exception in post_request hook")
163
[end of gunicorn/workers/sync.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/gunicorn/workers/async.py b/gunicorn/workers/async.py
--- a/gunicorn/workers/async.py
+++ b/gunicorn/workers/async.py
@@ -112,6 +112,7 @@
if resp and resp.headers_sent:
# If the requests have already been sent, we should close the
# connection to indicate the error.
+ self.log.exception("Error handling request")
try:
sock.shutdown(socket.SHUT_RDWR)
sock.close()
diff --git a/gunicorn/workers/sync.py b/gunicorn/workers/sync.py
--- a/gunicorn/workers/sync.py
+++ b/gunicorn/workers/sync.py
@@ -102,10 +102,13 @@
self.log.debug("Error processing SSL request.")
self.handle_error(req, client, addr, e)
except socket.error as e:
- if e.args[0] != errno.EPIPE:
- self.log.exception("Error processing request.")
+ if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):
+ self.log.exception("Socket error processing request.")
else:
- self.log.debug("Ignoring EPIPE")
+ if e.args[0] == errno.ECONNRESET:
+ self.log.debug("Ignoring connection reset")
+ else:
+ self.log.debug("Ignoring EPIPE")
except Exception as e:
self.handle_error(req, client, addr, e)
finally:
@@ -142,19 +145,18 @@
respiter.close()
except socket.error:
raise
- except Exception as e:
+ except Exception:
if resp and resp.headers_sent:
# If the requests have already been sent, we should close the
# connection to indicate the error.
+ self.log.exception("Error handling request")
try:
client.shutdown(socket.SHUT_RDWR)
client.close()
except socket.error:
pass
raise StopIteration()
- # Only send back traceback in HTTP in debug mode.
- self.handle_error(req, client, addr, e)
- return
+ raise
finally:
try:
self.cfg.post_request(self, req, environ, resp)
| {"golden_diff": "diff --git a/gunicorn/workers/async.py b/gunicorn/workers/async.py\n--- a/gunicorn/workers/async.py\n+++ b/gunicorn/workers/async.py\n@@ -112,6 +112,7 @@\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n+ self.log.exception(\"Error handling request\")\n try:\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\ndiff --git a/gunicorn/workers/sync.py b/gunicorn/workers/sync.py\n--- a/gunicorn/workers/sync.py\n+++ b/gunicorn/workers/sync.py\n@@ -102,10 +102,13 @@\n self.log.debug(\"Error processing SSL request.\")\n self.handle_error(req, client, addr, e)\n except socket.error as e:\n- if e.args[0] != errno.EPIPE:\n- self.log.exception(\"Error processing request.\")\n+ if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):\n+ self.log.exception(\"Socket error processing request.\")\n else:\n- self.log.debug(\"Ignoring EPIPE\")\n+ if e.args[0] == errno.ECONNRESET:\n+ self.log.debug(\"Ignoring connection reset\")\n+ else:\n+ self.log.debug(\"Ignoring EPIPE\")\n except Exception as e:\n self.handle_error(req, client, addr, e)\n finally:\n@@ -142,19 +145,18 @@\n respiter.close()\n except socket.error:\n raise\n- except Exception as e:\n+ except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n+ self.log.exception(\"Error handling request\")\n try:\n client.shutdown(socket.SHUT_RDWR)\n client.close()\n except socket.error:\n pass\n raise StopIteration()\n- # Only send back traceback in HTTP in debug mode.\n- self.handle_error(req, client, addr, e)\n- return\n+ raise\n finally:\n try:\n self.cfg.post_request(self, req, environ, resp)\n", "issue": "Response silently fails if app_iter returns unicode\nIf you pass an iterator as your response, and the iterator `yield`s unicode, Gunicorn will silently abort the request.\r\n\r\nI'm attaching a pull request to fix this. It may also be worth considering coercing iterator output to strings, to avoid the issue altogether, but I don't know enough to say whether that's appropriate.\r\n\r\nBelow is a minimal test case to demonstrate the problem. Here are the steps I witnessed, stepping through the response process with pdb. All line numbers are referenced according to [version 18 of gunicorn/workers/sync.py](https://github.com/benoitc/gunicorn/blob/18.0/gunicorn/workers/sync.py).\r\n- In my view: `yield u\"test\\n\"`\r\n- In sync.py, line 137: `resp.write(item)`\r\n- Exception raised: `AssertionError: u'test\\n' is not a byte.`\r\n- `Except` clause on line 146 is reached\r\n- `if` condition on line 147 is triggered\r\n- `raise StopIteration()` on line 155 is reached\r\n- `finally` clause on line 159 is reached, bypassing the error handling clause on line 157.\r\n\r\nHere's the sample view (written for Pyramid):\r\n\r\n```\r\n@view_config(route_name='test') \r\ndef test(request): \r\n def test_(): \r\n while True: \r\n yield u\"test\\n\" \r\n time.sleep(.5) \r\n return Response(content_type=\"text/event-stream\", app_iter=test_())\r\n```\r\n\nResponse silently fails if app_iter returns unicode\nIf you pass an iterator as your response, and the iterator `yield`s unicode, Gunicorn will silently abort the request.\r\n\r\nI'm attaching a pull request to fix this. It may also be worth considering coercing iterator output to strings, to avoid the issue altogether, but I don't know enough to say whether that's appropriate.\r\n\r\nBelow is a minimal test case to demonstrate the problem. Here are the steps I witnessed, stepping through the response process with pdb. All line numbers are referenced according to [version 18 of gunicorn/workers/sync.py](https://github.com/benoitc/gunicorn/blob/18.0/gunicorn/workers/sync.py).\r\n- In my view: `yield u\"test\\n\"`\r\n- In sync.py, line 137: `resp.write(item)`\r\n- Exception raised: `AssertionError: u'test\\n' is not a byte.`\r\n- `Except` clause on line 146 is reached\r\n- `if` condition on line 147 is triggered\r\n- `raise StopIteration()` on line 155 is reached\r\n- `finally` clause on line 159 is reached, bypassing the error handling clause on line 157.\r\n\r\nHere's the sample view (written for Pyramid):\r\n\r\n```\r\n@view_config(route_name='test') \r\ndef test(request): \r\n def test_(): \r\n while True: \r\n yield u\"test\\n\" \r\n time.sleep(.5) \r\n return Response(content_type=\"text/event-stream\", app_iter=test_())\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport errno\nimport socket\nimport ssl\n\nimport gunicorn.http as http\nimport gunicorn.http.wsgi as wsgi\nimport gunicorn.util as util\nimport gunicorn.workers.base as base\nfrom gunicorn import six\n\nALREADY_HANDLED = object()\n\n\nclass AsyncWorker(base.Worker):\n\n def __init__(self, *args, **kwargs):\n super(AsyncWorker, self).__init__(*args, **kwargs)\n self.worker_connections = self.cfg.worker_connections\n\n def timeout_ctx(self):\n raise NotImplementedError()\n\n def handle(self, listener, client, addr):\n req = None\n try:\n parser = http.RequestParser(self.cfg, client)\n try:\n if not self.cfg.keepalive:\n req = six.next(parser)\n self.handle_request(listener, req, client, addr)\n else:\n # keepalive loop\n while True:\n req = None\n with self.timeout_ctx():\n req = six.next(parser)\n if not req:\n break\n self.handle_request(listener, req, client, addr)\n except http.errors.NoMoreData as e:\n self.log.debug(\"Ignored premature client disconnection. %s\", e)\n except StopIteration as e:\n self.log.debug(\"Closing connection. %s\", e)\n except ssl.SSLError:\n raise # pass to next try-except level\n except socket.error:\n raise # pass to next try-except level\n except Exception as e:\n self.handle_error(req, client, addr, e)\n except ssl.SSLError as e:\n if e.args[0] == ssl.SSL_ERROR_EOF:\n self.log.debug(\"ssl connection closed\")\n client.close()\n else:\n self.log.debug(\"Error processing SSL request.\")\n self.handle_error(req, client, addr, e)\n except socket.error as e:\n if e.args[0] not in (errno.EPIPE, errno.ECONNRESET):\n self.log.exception(\"Socket error processing request.\")\n else:\n if e.args[0] == errno.ECONNRESET:\n self.log.debug(\"Ignoring connection reset\")\n else:\n self.log.debug(\"Ignoring EPIPE\")\n except Exception as e:\n self.handle_error(req, client, addr, e)\n finally:\n util.close(client)\n\n def handle_request(self, listener, req, sock, addr):\n request_start = datetime.now()\n environ = {}\n resp = None\n try:\n self.cfg.pre_request(self, req)\n resp, environ = wsgi.create(req, sock, addr,\n listener.getsockname(), self.cfg)\n environ[\"wsgi.multithread\"] = True\n self.nr += 1\n if self.alive and self.nr >= self.max_requests:\n self.log.info(\"Autorestarting worker after current request.\")\n resp.force_close()\n self.alive = False\n\n if not self.cfg.keepalive:\n resp.force_close()\n\n respiter = self.wsgi(environ, resp.start_response)\n if respiter == ALREADY_HANDLED:\n return False\n try:\n if isinstance(respiter, environ['wsgi.file_wrapper']):\n resp.write_file(respiter)\n else:\n for item in respiter:\n resp.write(item)\n resp.close()\n request_time = datetime.now() - request_start\n self.log.access(resp, req, environ, request_time)\n finally:\n if hasattr(respiter, \"close\"):\n respiter.close()\n if resp.should_close():\n raise StopIteration()\n except Exception:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n try:\n sock.shutdown(socket.SHUT_RDWR)\n sock.close()\n except socket.error:\n pass\n raise StopIteration()\n raise\n finally:\n try:\n self.cfg.post_request(self, req, environ, resp)\n except Exception:\n self.log.exception(\"Exception in post_request hook\")\n return True\n", "path": "gunicorn/workers/async.py"}, {"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n#\n\nfrom datetime import datetime\nimport errno\nimport os\nimport select\nimport socket\nimport ssl\n\nimport gunicorn.http as http\nimport gunicorn.http.wsgi as wsgi\nimport gunicorn.util as util\nimport gunicorn.workers.base as base\nfrom gunicorn import six\n\n\nclass SyncWorker(base.Worker):\n\n def run(self):\n # self.socket appears to lose its blocking status after\n # we fork in the arbiter. Reset it here.\n for s in self.sockets:\n s.setblocking(0)\n\n ready = self.sockets\n while self.alive:\n self.notify()\n\n # Accept a connection. If we get an error telling us\n # that no connection is waiting we fall down to the\n # select which is where we'll wait for a bit for new\n # workers to come give us some love.\n\n for sock in ready:\n try:\n client, addr = sock.accept()\n client.setblocking(1)\n util.close_on_exec(client)\n self.handle(sock, client, addr)\n\n # Keep processing clients until no one is waiting. This\n # prevents the need to select() for every client that we\n # process.\n continue\n\n except socket.error as e:\n if e.args[0] not in (errno.EAGAIN, errno.ECONNABORTED,\n errno.EWOULDBLOCK):\n raise\n\n # If our parent changed then we shut down.\n if self.ppid != os.getppid():\n self.log.info(\"Parent changed, shutting down: %s\", self)\n return\n\n try:\n self.notify()\n\n # if no timeout is given the worker will never wait and will\n # use the CPU for nothing. This minimal timeout prevent it.\n timeout = self.timeout or 0.5\n\n ret = select.select(self.sockets, [], self.PIPE, timeout)\n if ret[0]:\n ready = ret[0]\n continue\n except select.error as e:\n if e.args[0] == errno.EINTR:\n ready = self.sockets\n continue\n if e.args[0] == errno.EBADF:\n if self.nr < 0:\n ready = self.sockets\n continue\n else:\n return\n raise\n\n def handle(self, listener, client, addr):\n req = None\n try:\n if self.cfg.is_ssl:\n client = ssl.wrap_socket(client, server_side=True,\n **self.cfg.ssl_options)\n\n parser = http.RequestParser(self.cfg, client)\n req = six.next(parser)\n self.handle_request(listener, req, client, addr)\n except http.errors.NoMoreData as e:\n self.log.debug(\"Ignored premature client disconnection. %s\", e)\n except StopIteration as e:\n self.log.debug(\"Closing connection. %s\", e)\n except ssl.SSLError as e:\n if e.args[0] == ssl.SSL_ERROR_EOF:\n self.log.debug(\"ssl connection closed\")\n client.close()\n else:\n self.log.debug(\"Error processing SSL request.\")\n self.handle_error(req, client, addr, e)\n except socket.error as e:\n if e.args[0] != errno.EPIPE:\n self.log.exception(\"Error processing request.\")\n else:\n self.log.debug(\"Ignoring EPIPE\")\n except Exception as e:\n self.handle_error(req, client, addr, e)\n finally:\n util.close(client)\n\n def handle_request(self, listener, req, client, addr):\n environ = {}\n resp = None\n try:\n self.cfg.pre_request(self, req)\n request_start = datetime.now()\n resp, environ = wsgi.create(req, client, addr,\n listener.getsockname(), self.cfg)\n # Force the connection closed until someone shows\n # a buffering proxy that supports Keep-Alive to\n # the backend.\n resp.force_close()\n self.nr += 1\n if self.nr >= self.max_requests:\n self.log.info(\"Autorestarting worker after current request.\")\n self.alive = False\n respiter = self.wsgi(environ, resp.start_response)\n try:\n if isinstance(respiter, environ['wsgi.file_wrapper']):\n resp.write_file(respiter)\n else:\n for item in respiter:\n resp.write(item)\n resp.close()\n request_time = datetime.now() - request_start\n self.log.access(resp, req, environ, request_time)\n finally:\n if hasattr(respiter, \"close\"):\n respiter.close()\n except socket.error:\n raise\n except Exception as e:\n if resp and resp.headers_sent:\n # If the requests have already been sent, we should close the\n # connection to indicate the error.\n try:\n client.shutdown(socket.SHUT_RDWR)\n client.close()\n except socket.error:\n pass\n raise StopIteration()\n # Only send back traceback in HTTP in debug mode.\n self.handle_error(req, client, addr, e)\n return\n finally:\n try:\n self.cfg.post_request(self, req, environ, resp)\n except Exception:\n self.log.exception(\"Exception in post_request hook\")\n", "path": "gunicorn/workers/sync.py"}]} | 3,945 | 484 |
gh_patches_debug_31701 | rasdani/github-patches | git_diff | searx__searx-1594 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Duden search engine not working anymore
They changed the site layout.
</issue>
<code>
[start of searx/engines/duden.py]
1 """
2 Duden
3 @website https://www.duden.de
4 @provide-api no
5 @using-api no
6 @results HTML (using search portal)
7 @stable no (HTML can change)
8 @parse url, title, content
9 """
10
11 from lxml import html, etree
12 import re
13 from searx.engines.xpath import extract_text
14 from searx.url_utils import quote
15 from searx import logger
16
17 categories = ['general']
18 paging = True
19 language_support = False
20
21 # search-url
22 base_url = 'https://www.duden.de/'
23 search_url = base_url + 'suchen/dudenonline/{query}?page={offset}'
24
25
26 def request(query, params):
27 '''pre-request callback
28 params<dict>:
29 method : POST/GET
30 headers : {}
31 data : {} # if method == POST
32 url : ''
33 category: 'search category'
34 pageno : 1 # number of the requested page
35 '''
36
37 offset = (params['pageno'] - 1)
38 params['url'] = search_url.format(offset=offset, query=quote(query))
39 return params
40
41
42 def response(resp):
43 '''post-response callback
44 resp: requests response object
45 '''
46 results = []
47
48 dom = html.fromstring(resp.text)
49
50 try:
51 number_of_results_string = re.sub('[^0-9]', '', dom.xpath(
52 '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0]
53 )
54
55 results.append({'number_of_results': int(number_of_results_string)})
56
57 except:
58 logger.debug("Couldn't read number of results.")
59 pass
60
61 for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'):
62 try:
63 logger.debug("running for %s" % str(result))
64 link = result.xpath('.//h2/a')[0]
65 url = link.attrib.get('href')
66 title = result.xpath('string(.//h2/a)')
67 content = extract_text(result.xpath('.//p'))
68 # append result
69 results.append({'url': url,
70 'title': title,
71 'content': content})
72 except:
73 logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
74 continue
75
76 return results
77
[end of searx/engines/duden.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/searx/engines/duden.py b/searx/engines/duden.py
--- a/searx/engines/duden.py
+++ b/searx/engines/duden.py
@@ -11,7 +11,7 @@
from lxml import html, etree
import re
from searx.engines.xpath import extract_text
-from searx.url_utils import quote
+from searx.url_utils import quote, urljoin
from searx import logger
categories = ['general']
@@ -20,7 +20,7 @@
# search-url
base_url = 'https://www.duden.de/'
-search_url = base_url + 'suchen/dudenonline/{query}?page={offset}'
+search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'
def request(query, params):
@@ -35,7 +35,11 @@
'''
offset = (params['pageno'] - 1)
- params['url'] = search_url.format(offset=offset, query=quote(query))
+ if offset == 0:
+ search_url_fmt = base_url + 'suchen/dudenonline/{query}'
+ params['url'] = search_url_fmt.format(query=quote(query))
+ else:
+ params['url'] = search_url.format(offset=offset, query=quote(query))
return params
@@ -58,12 +62,11 @@
logger.debug("Couldn't read number of results.")
pass
- for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'):
+ for result in dom.xpath('//section[not(contains(@class, "essay"))]'):
try:
- logger.debug("running for %s" % str(result))
- link = result.xpath('.//h2/a')[0]
- url = link.attrib.get('href')
- title = result.xpath('string(.//h2/a)')
+ url = result.xpath('.//h2/a')[0].get('href')
+ url = urljoin(base_url, url)
+ title = result.xpath('string(.//h2/a)').strip()
content = extract_text(result.xpath('.//p'))
# append result
results.append({'url': url,
| {"golden_diff": "diff --git a/searx/engines/duden.py b/searx/engines/duden.py\n--- a/searx/engines/duden.py\n+++ b/searx/engines/duden.py\n@@ -11,7 +11,7 @@\n from lxml import html, etree\n import re\n from searx.engines.xpath import extract_text\n-from searx.url_utils import quote\n+from searx.url_utils import quote, urljoin\n from searx import logger\n \n categories = ['general']\n@@ -20,7 +20,7 @@\n \n # search-url\n base_url = 'https://www.duden.de/'\n-search_url = base_url + 'suchen/dudenonline/{query}?page={offset}'\n+search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}'\n \n \n def request(query, params):\n@@ -35,7 +35,11 @@\n '''\n \n offset = (params['pageno'] - 1)\n- params['url'] = search_url.format(offset=offset, query=quote(query))\n+ if offset == 0:\n+ search_url_fmt = base_url + 'suchen/dudenonline/{query}'\n+ params['url'] = search_url_fmt.format(query=quote(query))\n+ else:\n+ params['url'] = search_url.format(offset=offset, query=quote(query))\n return params\n \n \n@@ -58,12 +62,11 @@\n logger.debug(\"Couldn't read number of results.\")\n pass\n \n- for result in dom.xpath('//section[@class=\"wide\" and not(contains(@style,\"overflow:hidden\"))]'):\n+ for result in dom.xpath('//section[not(contains(@class, \"essay\"))]'):\n try:\n- logger.debug(\"running for %s\" % str(result))\n- link = result.xpath('.//h2/a')[0]\n- url = link.attrib.get('href')\n- title = result.xpath('string(.//h2/a)')\n+ url = result.xpath('.//h2/a')[0].get('href')\n+ url = urljoin(base_url, url)\n+ title = result.xpath('string(.//h2/a)').strip()\n content = extract_text(result.xpath('.//p'))\n # append result\n results.append({'url': url,\n", "issue": "Duden search engine not working anymore\nThey changed the site layout.\n", "before_files": [{"content": "\"\"\"\n Duden\n @website https://www.duden.de\n @provide-api no\n @using-api no\n @results HTML (using search portal)\n @stable no (HTML can change)\n @parse url, title, content\n\"\"\"\n\nfrom lxml import html, etree\nimport re\nfrom searx.engines.xpath import extract_text\nfrom searx.url_utils import quote\nfrom searx import logger\n\ncategories = ['general']\npaging = True\nlanguage_support = False\n\n# search-url\nbase_url = 'https://www.duden.de/'\nsearch_url = base_url + 'suchen/dudenonline/{query}?page={offset}'\n\n\ndef request(query, params):\n '''pre-request callback\n params<dict>:\n method : POST/GET\n headers : {}\n data : {} # if method == POST\n url : ''\n category: 'search category'\n pageno : 1 # number of the requested page\n '''\n\n offset = (params['pageno'] - 1)\n params['url'] = search_url.format(offset=offset, query=quote(query))\n return params\n\n\ndef response(resp):\n '''post-response callback\n resp: requests response object\n '''\n results = []\n\n dom = html.fromstring(resp.text)\n\n try:\n number_of_results_string = re.sub('[^0-9]', '', dom.xpath(\n '//a[@class=\"active\" and contains(@href,\"/suchen/dudenonline\")]/span/text()')[0]\n )\n\n results.append({'number_of_results': int(number_of_results_string)})\n\n except:\n logger.debug(\"Couldn't read number of results.\")\n pass\n\n for result in dom.xpath('//section[@class=\"wide\" and not(contains(@style,\"overflow:hidden\"))]'):\n try:\n logger.debug(\"running for %s\" % str(result))\n link = result.xpath('.//h2/a')[0]\n url = link.attrib.get('href')\n title = result.xpath('string(.//h2/a)')\n content = extract_text(result.xpath('.//p'))\n # append result\n results.append({'url': url,\n 'title': title,\n 'content': content})\n except:\n logger.debug('result parse error in:\\n%s', etree.tostring(result, pretty_print=True))\n continue\n\n return results\n", "path": "searx/engines/duden.py"}]} | 1,222 | 511 |
gh_patches_debug_6513 | rasdani/github-patches | git_diff | pypi__warehouse-1623 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
json urls object may contain prereleases
I think the urls object in the JSON output should reference downloads for the latest stable release; even after #1519, the urls object may contain references to prerelease versions. For example, https://pypi.org/pypi/wheel/json has
```json
{
"urls": [
{
"filename": "wheel-0.30.0a0-py2.py3-none-any.whl",
"url": "https://files.pythonhosted.org/packages/83/53/e120833aa2350db333df89a40dea3b310dd9dabf6f29eaa18934a597dc79/wheel-0.30.0a0-py2.py3-none-any.whl"
}, ...
```
I think it should point to 0.29.0 instead, like the project page does.
</issue>
<code>
[start of warehouse/legacy/api/json.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
14 from pyramid.view import view_config
15 from sqlalchemy.orm.exc import NoResultFound
16
17 from warehouse.cache.http import cache_control
18 from warehouse.cache.origin import origin_cache
19 from warehouse.packaging.interfaces import IDownloadStatService
20 from warehouse.packaging.models import File, Release
21
22
23 @view_config(
24 route_name="legacy.api.json.project",
25 renderer="json",
26 decorator=[
27 cache_control(15 * 60), # 15 minutes
28 origin_cache(
29 1 * 24 * 60 * 60, # 1 day
30 stale_while_revalidate=5 * 60, # 5 minutes
31 stale_if_error=1 * 24 * 60 * 60, # 1 day
32 ),
33 ],
34 )
35 def json_project(project, request):
36 if project.name != request.matchdict.get("name", project.name):
37 return HTTPMovedPermanently(
38 request.current_route_path(name=project.name),
39 )
40
41 try:
42 release = (
43 request.db.query(Release)
44 .filter(Release.project == project)
45 .order_by(Release._pypi_ordering.desc())
46 .limit(1)
47 .one()
48 )
49 except NoResultFound:
50 return HTTPNotFound()
51
52 return json_release(release, request)
53
54
55 @view_config(
56 route_name="legacy.api.json.release",
57 renderer="json",
58 decorator=[
59 cache_control(15 * 60), # 15 minutes
60 origin_cache(
61 1 * 24 * 60 * 60, # 1 day
62 stale_while_revalidate=5 * 60, # 5 minutes
63 stale_if_error=1 * 24 * 60 * 60, # 1 day
64 ),
65 ],
66 )
67 def json_release(release, request):
68 project = release.project
69
70 if project.name != request.matchdict.get("name", project.name):
71 return HTTPMovedPermanently(
72 request.current_route_path(name=project.name),
73 )
74
75 # We want to allow CORS here to enable anyone to fetch data from this API
76 request.response.headers["Access-Control-Allow-Origin"] = "*"
77 request.response.headers["Access-Control-Allow-Headers"] = ", ".join([
78 "Content-Type",
79 "If-Match",
80 "If-Modified-Since",
81 "If-None-Match",
82 "If-Unmodified-Since",
83 ])
84 request.response.headers["Access-Control-Allow-Methods"] = "GET"
85 request.response.headers["Access-Control-Max-Age"] = "86400"
86 request.response.headers["Access-Control-Expose-Headers"] = ", ".join([
87 "X-PyPI-Last-Serial",
88 ])
89
90 # Get the latest serial number for this project.
91 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
92
93 # Get all of the releases and files for this project.
94 release_files = (
95 request.db.query(Release, File)
96 .outerjoin(File)
97 .filter(Release.project == project)
98 .order_by(Release._pypi_ordering.desc(), File.filename)
99 .all()
100 )
101
102 # Map our releases + files into a dictionary that maps each release to a
103 # list of all its files.
104 releases = {}
105 for r, file_ in release_files:
106 files = releases.setdefault(r, [])
107 if file_ is not None:
108 files.append(file_)
109
110 # Serialize our database objects to match the way that PyPI legacy
111 # presented this data.
112 releases = {
113 r.version: [
114 {
115 "filename": f.filename,
116 "packagetype": f.packagetype,
117 "python_version": f.python_version,
118 "has_sig": f.has_signature,
119 "comment_text": f.comment_text,
120 "md5_digest": f.md5_digest,
121 "digests": {
122 "md5": f.md5_digest,
123 "sha256": f.sha256_digest,
124 },
125 "size": f.size,
126 "downloads": f.downloads,
127 "upload_time": f.upload_time.strftime("%Y-%m-%dT%H:%M:%S"),
128 "url": request.route_url("packaging.file", path=f.path),
129 }
130 for f in fs
131 ]
132 for r, fs in releases.items()
133 }
134
135 # Get our stats service
136 stats_svc = request.find_service(IDownloadStatService)
137
138 return {
139 "info": {
140 "name": project.name,
141 "version": release.version,
142 "summary": release.summary,
143 "description": release.description,
144 "keywords": release.keywords,
145 "license": release.license,
146 "classifiers": list(release.classifiers),
147 "author": release.author,
148 "author_email": release.author_email,
149 "maintainer": release.maintainer,
150 "maintainer_email": release.maintainer_email,
151 "requires_python": release.requires_python,
152 "platform": release.platform,
153 "downloads": {
154 "last_day": stats_svc.get_daily_stats(project.name),
155 "last_week": stats_svc.get_weekly_stats(project.name),
156 "last_month": stats_svc.get_monthly_stats(project.name),
157 },
158 "project_url": request.route_url(
159 "packaging.project",
160 name=project.name,
161 ),
162 "release_url": request.route_url(
163 "packaging.release",
164 name=project.name,
165 version=release.version,
166 ),
167 "docs_url": project.documentation_url,
168 "bugtrack_url": project.bugtrack_url,
169 "home_page": release.home_page,
170 "download_url": release.download_url,
171 },
172 "urls": releases[release.version],
173 "releases": releases,
174 }
175
[end of warehouse/legacy/api/json.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py
--- a/warehouse/legacy/api/json.py
+++ b/warehouse/legacy/api/json.py
@@ -42,7 +42,9 @@
release = (
request.db.query(Release)
.filter(Release.project == project)
- .order_by(Release._pypi_ordering.desc())
+ .order_by(
+ Release.is_prerelease.nullslast(),
+ Release._pypi_ordering.desc())
.limit(1)
.one()
)
| {"golden_diff": "diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py\n--- a/warehouse/legacy/api/json.py\n+++ b/warehouse/legacy/api/json.py\n@@ -42,7 +42,9 @@\n release = (\n request.db.query(Release)\n .filter(Release.project == project)\n- .order_by(Release._pypi_ordering.desc())\n+ .order_by(\n+ Release.is_prerelease.nullslast(),\n+ Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n", "issue": "json urls object may contain prereleases\nI think the urls object in the JSON output should reference downloads for the latest stable release; even after #1519, the urls object may contain references to prerelease versions. For example, https://pypi.org/pypi/wheel/json has\r\n\r\n```json\r\n{\r\n \"urls\": [\r\n {\r\n \"filename\": \"wheel-0.30.0a0-py2.py3-none-any.whl\",\r\n \"url\": \"https://files.pythonhosted.org/packages/83/53/e120833aa2350db333df89a40dea3b310dd9dabf6f29eaa18934a597dc79/wheel-0.30.0a0-py2.py3-none-any.whl\"\r\n }, ...\r\n```\r\n\r\nI think it should point to 0.29.0 instead, like the project page does.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.interfaces import IDownloadStatService\nfrom warehouse.packaging.models import File, Release\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound()\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n )\n\n # We want to allow CORS here to enable anyone to fetch data from this API\n request.response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n request.response.headers[\"Access-Control-Allow-Headers\"] = \", \".join([\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ])\n request.response.headers[\"Access-Control-Allow-Methods\"] = \"GET\"\n request.response.headers[\"Access-Control-Max-Age\"] = \"86400\"\n request.response.headers[\"Access-Control-Expose-Headers\"] = \", \".join([\n \"X-PyPI-Last-Serial\",\n ])\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\n \"md5\": f.md5_digest,\n \"sha256\": f.sha256_digest,\n },\n \"size\": f.size,\n \"downloads\": f.downloads,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n # Get our stats service\n stats_svc = request.find_service(IDownloadStatService)\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description\": release.description,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\n \"last_day\": stats_svc.get_daily_stats(project.name),\n \"last_week\": stats_svc.get_weekly_stats(project.name),\n \"last_month\": stats_svc.get_monthly_stats(project.name),\n },\n \"project_url\": request.route_url(\n \"packaging.project\",\n name=project.name,\n ),\n \"release_url\": request.route_url(\n \"packaging.release\",\n name=project.name,\n version=release.version,\n ),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": project.bugtrack_url,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n }\n", "path": "warehouse/legacy/api/json.py"}]} | 2,541 | 125 |
gh_patches_debug_27899 | rasdani/github-patches | git_diff | ytdl-org__youtube-dl-22921 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
720p ABC iView
<!--
######################################################################
WARNING!
IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE
######################################################################
-->
## Checklist
<!--
Carefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:
- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.10.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.
- Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.
- Finally, put x into all relevant boxes (like this [x])
-->
- [x] I'm reporting a site feature request
- [x] I've verified that I'm running youtube-dl version **2019.10.29**
- [x] I've searched the bugtracker for similar site feature requests including closed ones
## Description
<!--
Provide an explanation of your site feature request in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.
-->
WRITE DESCRIPTION HERE
ABC iView has recently started streaming a lot of their stuff in 720p. Youtube-dl doesn't pick up the 720p videos and only downloads 540p. If you could get 720p ABC iView downloading on youtube-dl that would be awesome! Thanks!
</issue>
<code>
[start of youtube_dl/extractor/abc.py]
1 from __future__ import unicode_literals
2
3 import hashlib
4 import hmac
5 import re
6 import time
7
8 from .common import InfoExtractor
9 from ..compat import compat_str
10 from ..utils import (
11 ExtractorError,
12 js_to_json,
13 int_or_none,
14 parse_iso8601,
15 try_get,
16 unescapeHTML,
17 update_url_query,
18 )
19
20
21 class ABCIE(InfoExtractor):
22 IE_NAME = 'abc.net.au'
23 _VALID_URL = r'https?://(?:www\.)?abc\.net\.au/news/(?:[^/]+/){1,2}(?P<id>\d+)'
24
25 _TESTS = [{
26 'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',
27 'md5': 'cb3dd03b18455a661071ee1e28344d9f',
28 'info_dict': {
29 'id': '5868334',
30 'ext': 'mp4',
31 'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',
32 'description': 'md5:809ad29c67a05f54eb41f2a105693a67',
33 },
34 'skip': 'this video has expired',
35 }, {
36 'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',
37 'md5': 'db2a5369238b51f9811ad815b69dc086',
38 'info_dict': {
39 'id': 'NvqvPeNZsHU',
40 'ext': 'mp4',
41 'upload_date': '20150816',
42 'uploader': 'ABC News (Australia)',
43 'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote "an inclusive Australia, not a divided one.". Read more here: http://ab.co/1Mwc6ef',
44 'uploader_id': 'NewsOnABC',
45 'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',
46 },
47 'add_ie': ['Youtube'],
48 'skip': 'Not accessible from Travis CI server',
49 }, {
50 'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',
51 'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',
52 'info_dict': {
53 'id': '6880080',
54 'ext': 'mp3',
55 'title': 'NAB lifts interest rates, following Westpac and CBA',
56 'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',
57 },
58 }, {
59 'url': 'http://www.abc.net.au/news/2015-10-19/6866214',
60 'only_matching': True,
61 }]
62
63 def _real_extract(self, url):
64 video_id = self._match_id(url)
65 webpage = self._download_webpage(url, video_id)
66
67 mobj = re.search(
68 r'inline(?P<type>Video|Audio|YouTube)Data\.push\((?P<json_data>[^)]+)\);',
69 webpage)
70 if mobj is None:
71 expired = self._html_search_regex(r'(?s)class="expired-(?:video|audio)".+?<span>(.+?)</span>', webpage, 'expired', None)
72 if expired:
73 raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)
74 raise ExtractorError('Unable to extract video urls')
75
76 urls_info = self._parse_json(
77 mobj.group('json_data'), video_id, transform_source=js_to_json)
78
79 if not isinstance(urls_info, list):
80 urls_info = [urls_info]
81
82 if mobj.group('type') == 'YouTube':
83 return self.playlist_result([
84 self.url_result(url_info['url']) for url_info in urls_info])
85
86 formats = [{
87 'url': url_info['url'],
88 'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none',
89 'width': int_or_none(url_info.get('width')),
90 'height': int_or_none(url_info.get('height')),
91 'tbr': int_or_none(url_info.get('bitrate')),
92 'filesize': int_or_none(url_info.get('filesize')),
93 } for url_info in urls_info]
94
95 self._sort_formats(formats)
96
97 return {
98 'id': video_id,
99 'title': self._og_search_title(webpage),
100 'formats': formats,
101 'description': self._og_search_description(webpage),
102 'thumbnail': self._og_search_thumbnail(webpage),
103 }
104
105
106 class ABCIViewIE(InfoExtractor):
107 IE_NAME = 'abc.net.au:iview'
108 _VALID_URL = r'https?://iview\.abc\.net\.au/(?:[^/]+/)*video/(?P<id>[^/?#]+)'
109 _GEO_COUNTRIES = ['AU']
110
111 # ABC iview programs are normally available for 14 days only.
112 _TESTS = [{
113 'url': 'https://iview.abc.net.au/show/ben-and-hollys-little-kingdom/series/0/video/ZX9371A050S00',
114 'md5': 'cde42d728b3b7c2b32b1b94b4a548afc',
115 'info_dict': {
116 'id': 'ZX9371A050S00',
117 'ext': 'mp4',
118 'title': "Gaston's Birthday",
119 'series': "Ben And Holly's Little Kingdom",
120 'description': 'md5:f9de914d02f226968f598ac76f105bcf',
121 'upload_date': '20180604',
122 'uploader_id': 'abc4kids',
123 'timestamp': 1528140219,
124 },
125 'params': {
126 'skip_download': True,
127 },
128 }]
129
130 def _real_extract(self, url):
131 video_id = self._match_id(url)
132 video_params = self._download_json(
133 'https://iview.abc.net.au/api/programs/' + video_id, video_id)
134 title = unescapeHTML(video_params.get('title') or video_params['seriesTitle'])
135 stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))
136
137 house_number = video_params.get('episodeHouseNumber') or video_id
138 path = '/auth/hls/sign?ts={0}&hn={1}&d=android-tablet'.format(
139 int(time.time()), house_number)
140 sig = hmac.new(
141 b'android.content.res.Resources',
142 path.encode('utf-8'), hashlib.sha256).hexdigest()
143 token = self._download_webpage(
144 'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id)
145
146 def tokenize_url(url, token):
147 return update_url_query(url, {
148 'hdnea': token,
149 })
150
151 for sd in ('sd', 'sd-low'):
152 sd_url = try_get(
153 stream, lambda x: x['streams']['hls'][sd], compat_str)
154 if not sd_url:
155 continue
156 formats = self._extract_m3u8_formats(
157 tokenize_url(sd_url, token), video_id, 'mp4',
158 entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)
159 if formats:
160 break
161 self._sort_formats(formats)
162
163 subtitles = {}
164 src_vtt = stream.get('captions', {}).get('src-vtt')
165 if src_vtt:
166 subtitles['en'] = [{
167 'url': src_vtt,
168 'ext': 'vtt',
169 }]
170
171 is_live = video_params.get('livestream') == '1'
172 if is_live:
173 title = self._live_title(title)
174
175 return {
176 'id': video_id,
177 'title': title,
178 'description': video_params.get('description'),
179 'thumbnail': video_params.get('thumbnail'),
180 'duration': int_or_none(video_params.get('eventDuration')),
181 'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),
182 'series': unescapeHTML(video_params.get('seriesTitle')),
183 'series_id': video_params.get('seriesHouseNumber') or video_id[:7],
184 'season_number': int_or_none(self._search_regex(
185 r'\bSeries\s+(\d+)\b', title, 'season number', default=None)),
186 'episode_number': int_or_none(self._search_regex(
187 r'\bEp\s+(\d+)\b', title, 'episode number', default=None)),
188 'episode_id': house_number,
189 'uploader_id': video_params.get('channel'),
190 'formats': formats,
191 'subtitles': subtitles,
192 'is_live': is_live,
193 }
194
[end of youtube_dl/extractor/abc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/youtube_dl/extractor/abc.py b/youtube_dl/extractor/abc.py
--- a/youtube_dl/extractor/abc.py
+++ b/youtube_dl/extractor/abc.py
@@ -110,17 +110,17 @@
# ABC iview programs are normally available for 14 days only.
_TESTS = [{
- 'url': 'https://iview.abc.net.au/show/ben-and-hollys-little-kingdom/series/0/video/ZX9371A050S00',
- 'md5': 'cde42d728b3b7c2b32b1b94b4a548afc',
+ 'url': 'https://iview.abc.net.au/show/gruen/series/11/video/LE1927H001S00',
+ 'md5': '67715ce3c78426b11ba167d875ac6abf',
'info_dict': {
- 'id': 'ZX9371A050S00',
+ 'id': 'LE1927H001S00',
'ext': 'mp4',
- 'title': "Gaston's Birthday",
- 'series': "Ben And Holly's Little Kingdom",
- 'description': 'md5:f9de914d02f226968f598ac76f105bcf',
- 'upload_date': '20180604',
- 'uploader_id': 'abc4kids',
- 'timestamp': 1528140219,
+ 'title': "Series 11 Ep 1",
+ 'series': "Gruen",
+ 'description': 'md5:52cc744ad35045baf6aded2ce7287f67',
+ 'upload_date': '20190925',
+ 'uploader_id': 'abc1',
+ 'timestamp': 1569445289,
},
'params': {
'skip_download': True,
@@ -148,7 +148,7 @@
'hdnea': token,
})
- for sd in ('sd', 'sd-low'):
+ for sd in ('720', 'sd', 'sd-low'):
sd_url = try_get(
stream, lambda x: x['streams']['hls'][sd], compat_str)
if not sd_url:
| {"golden_diff": "diff --git a/youtube_dl/extractor/abc.py b/youtube_dl/extractor/abc.py\n--- a/youtube_dl/extractor/abc.py\n+++ b/youtube_dl/extractor/abc.py\n@@ -110,17 +110,17 @@\n \n # ABC iview programs are normally available for 14 days only.\n _TESTS = [{\n- 'url': 'https://iview.abc.net.au/show/ben-and-hollys-little-kingdom/series/0/video/ZX9371A050S00',\n- 'md5': 'cde42d728b3b7c2b32b1b94b4a548afc',\n+ 'url': 'https://iview.abc.net.au/show/gruen/series/11/video/LE1927H001S00',\n+ 'md5': '67715ce3c78426b11ba167d875ac6abf',\n 'info_dict': {\n- 'id': 'ZX9371A050S00',\n+ 'id': 'LE1927H001S00',\n 'ext': 'mp4',\n- 'title': \"Gaston's Birthday\",\n- 'series': \"Ben And Holly's Little Kingdom\",\n- 'description': 'md5:f9de914d02f226968f598ac76f105bcf',\n- 'upload_date': '20180604',\n- 'uploader_id': 'abc4kids',\n- 'timestamp': 1528140219,\n+ 'title': \"Series 11 Ep 1\",\n+ 'series': \"Gruen\",\n+ 'description': 'md5:52cc744ad35045baf6aded2ce7287f67',\n+ 'upload_date': '20190925',\n+ 'uploader_id': 'abc1',\n+ 'timestamp': 1569445289,\n },\n 'params': {\n 'skip_download': True,\n@@ -148,7 +148,7 @@\n 'hdnea': token,\n })\n \n- for sd in ('sd', 'sd-low'):\n+ for sd in ('720', 'sd', 'sd-low'):\n sd_url = try_get(\n stream, lambda x: x['streams']['hls'][sd], compat_str)\n if not sd_url:\n", "issue": "720p ABC iView\n<!--\r\n\r\n######################################################################\r\n WARNING!\r\n IGNORING THE FOLLOWING TEMPLATE WILL RESULT IN ISSUE CLOSED AS INCOMPLETE\r\n######################################################################\r\n\r\n-->\r\n\r\n\r\n## Checklist\r\n\r\n<!--\r\nCarefully read and work through this check list in order to prevent the most common mistakes and misuse of youtube-dl:\r\n- First of, make sure you are using the latest version of youtube-dl. Run `youtube-dl --version` and ensure your version is 2019.10.29. If it's not, see https://yt-dl.org/update on how to update. Issues with outdated version will be REJECTED.\r\n- Search the bugtracker for similar site feature requests: http://yt-dl.org/search-issues. DO NOT post duplicates.\r\n- Finally, put x into all relevant boxes (like this [x])\r\n-->\r\n\r\n- [x] I'm reporting a site feature request\r\n- [x] I've verified that I'm running youtube-dl version **2019.10.29**\r\n- [x] I've searched the bugtracker for similar site feature requests including closed ones\r\n\r\n\r\n## Description\r\n\r\n<!--\r\nProvide an explanation of your site feature request in an arbitrary form. Please make sure the description is worded well enough to be understood, see https://github.com/ytdl-org/youtube-dl#is-the-description-of-the-issue-itself-sufficient. Provide any additional information, suggested solution and as much context and examples as possible.\r\n-->\r\n\r\nWRITE DESCRIPTION HERE\r\n\r\nABC iView has recently started streaming a lot of their stuff in 720p. Youtube-dl doesn't pick up the 720p videos and only downloads 540p. If you could get 720p ABC iView downloading on youtube-dl that would be awesome! Thanks!\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport hashlib\nimport hmac\nimport re\nimport time\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_str\nfrom ..utils import (\n ExtractorError,\n js_to_json,\n int_or_none,\n parse_iso8601,\n try_get,\n unescapeHTML,\n update_url_query,\n)\n\n\nclass ABCIE(InfoExtractor):\n IE_NAME = 'abc.net.au'\n _VALID_URL = r'https?://(?:www\\.)?abc\\.net\\.au/news/(?:[^/]+/){1,2}(?P<id>\\d+)'\n\n _TESTS = [{\n 'url': 'http://www.abc.net.au/news/2014-11-05/australia-to-staff-ebola-treatment-centre-in-sierra-leone/5868334',\n 'md5': 'cb3dd03b18455a661071ee1e28344d9f',\n 'info_dict': {\n 'id': '5868334',\n 'ext': 'mp4',\n 'title': 'Australia to help staff Ebola treatment centre in Sierra Leone',\n 'description': 'md5:809ad29c67a05f54eb41f2a105693a67',\n },\n 'skip': 'this video has expired',\n }, {\n 'url': 'http://www.abc.net.au/news/2015-08-17/warren-entsch-introduces-same-sex-marriage-bill/6702326',\n 'md5': 'db2a5369238b51f9811ad815b69dc086',\n 'info_dict': {\n 'id': 'NvqvPeNZsHU',\n 'ext': 'mp4',\n 'upload_date': '20150816',\n 'uploader': 'ABC News (Australia)',\n 'description': 'Government backbencher Warren Entsch introduces a cross-party sponsored bill to legalise same-sex marriage, saying the bill is designed to promote \"an inclusive Australia, not a divided one.\". Read more here: http://ab.co/1Mwc6ef',\n 'uploader_id': 'NewsOnABC',\n 'title': 'Marriage Equality: Warren Entsch introduces same sex marriage bill',\n },\n 'add_ie': ['Youtube'],\n 'skip': 'Not accessible from Travis CI server',\n }, {\n 'url': 'http://www.abc.net.au/news/2015-10-23/nab-lifts-interest-rates-following-westpac-and-cba/6880080',\n 'md5': 'b96eee7c9edf4fc5a358a0252881cc1f',\n 'info_dict': {\n 'id': '6880080',\n 'ext': 'mp3',\n 'title': 'NAB lifts interest rates, following Westpac and CBA',\n 'description': 'md5:f13d8edc81e462fce4a0437c7dc04728',\n },\n }, {\n 'url': 'http://www.abc.net.au/news/2015-10-19/6866214',\n 'only_matching': True,\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n\n mobj = re.search(\n r'inline(?P<type>Video|Audio|YouTube)Data\\.push\\((?P<json_data>[^)]+)\\);',\n webpage)\n if mobj is None:\n expired = self._html_search_regex(r'(?s)class=\"expired-(?:video|audio)\".+?<span>(.+?)</span>', webpage, 'expired', None)\n if expired:\n raise ExtractorError('%s said: %s' % (self.IE_NAME, expired), expected=True)\n raise ExtractorError('Unable to extract video urls')\n\n urls_info = self._parse_json(\n mobj.group('json_data'), video_id, transform_source=js_to_json)\n\n if not isinstance(urls_info, list):\n urls_info = [urls_info]\n\n if mobj.group('type') == 'YouTube':\n return self.playlist_result([\n self.url_result(url_info['url']) for url_info in urls_info])\n\n formats = [{\n 'url': url_info['url'],\n 'vcodec': url_info.get('codec') if mobj.group('type') == 'Video' else 'none',\n 'width': int_or_none(url_info.get('width')),\n 'height': int_or_none(url_info.get('height')),\n 'tbr': int_or_none(url_info.get('bitrate')),\n 'filesize': int_or_none(url_info.get('filesize')),\n } for url_info in urls_info]\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': self._og_search_title(webpage),\n 'formats': formats,\n 'description': self._og_search_description(webpage),\n 'thumbnail': self._og_search_thumbnail(webpage),\n }\n\n\nclass ABCIViewIE(InfoExtractor):\n IE_NAME = 'abc.net.au:iview'\n _VALID_URL = r'https?://iview\\.abc\\.net\\.au/(?:[^/]+/)*video/(?P<id>[^/?#]+)'\n _GEO_COUNTRIES = ['AU']\n\n # ABC iview programs are normally available for 14 days only.\n _TESTS = [{\n 'url': 'https://iview.abc.net.au/show/ben-and-hollys-little-kingdom/series/0/video/ZX9371A050S00',\n 'md5': 'cde42d728b3b7c2b32b1b94b4a548afc',\n 'info_dict': {\n 'id': 'ZX9371A050S00',\n 'ext': 'mp4',\n 'title': \"Gaston's Birthday\",\n 'series': \"Ben And Holly's Little Kingdom\",\n 'description': 'md5:f9de914d02f226968f598ac76f105bcf',\n 'upload_date': '20180604',\n 'uploader_id': 'abc4kids',\n 'timestamp': 1528140219,\n },\n 'params': {\n 'skip_download': True,\n },\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n video_params = self._download_json(\n 'https://iview.abc.net.au/api/programs/' + video_id, video_id)\n title = unescapeHTML(video_params.get('title') or video_params['seriesTitle'])\n stream = next(s for s in video_params['playlist'] if s.get('type') in ('program', 'livestream'))\n\n house_number = video_params.get('episodeHouseNumber') or video_id\n path = '/auth/hls/sign?ts={0}&hn={1}&d=android-tablet'.format(\n int(time.time()), house_number)\n sig = hmac.new(\n b'android.content.res.Resources',\n path.encode('utf-8'), hashlib.sha256).hexdigest()\n token = self._download_webpage(\n 'http://iview.abc.net.au{0}&sig={1}'.format(path, sig), video_id)\n\n def tokenize_url(url, token):\n return update_url_query(url, {\n 'hdnea': token,\n })\n\n for sd in ('sd', 'sd-low'):\n sd_url = try_get(\n stream, lambda x: x['streams']['hls'][sd], compat_str)\n if not sd_url:\n continue\n formats = self._extract_m3u8_formats(\n tokenize_url(sd_url, token), video_id, 'mp4',\n entry_protocol='m3u8_native', m3u8_id='hls', fatal=False)\n if formats:\n break\n self._sort_formats(formats)\n\n subtitles = {}\n src_vtt = stream.get('captions', {}).get('src-vtt')\n if src_vtt:\n subtitles['en'] = [{\n 'url': src_vtt,\n 'ext': 'vtt',\n }]\n\n is_live = video_params.get('livestream') == '1'\n if is_live:\n title = self._live_title(title)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': video_params.get('description'),\n 'thumbnail': video_params.get('thumbnail'),\n 'duration': int_or_none(video_params.get('eventDuration')),\n 'timestamp': parse_iso8601(video_params.get('pubDate'), ' '),\n 'series': unescapeHTML(video_params.get('seriesTitle')),\n 'series_id': video_params.get('seriesHouseNumber') or video_id[:7],\n 'season_number': int_or_none(self._search_regex(\n r'\\bSeries\\s+(\\d+)\\b', title, 'season number', default=None)),\n 'episode_number': int_or_none(self._search_regex(\n r'\\bEp\\s+(\\d+)\\b', title, 'episode number', default=None)),\n 'episode_id': house_number,\n 'uploader_id': video_params.get('channel'),\n 'formats': formats,\n 'subtitles': subtitles,\n 'is_live': is_live,\n }\n", "path": "youtube_dl/extractor/abc.py"}]} | 3,557 | 600 |
gh_patches_debug_43872 | rasdani/github-patches | git_diff | conda-forge__conda-smithy-133 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Harden maintainers linting
As addressed in https://github.com/conda-forge/pyutilib-feedstock/pull/1:
```
Running command: ['python', '/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py', './feedstocks_repo/feedstocks']
Traceback (most recent call last):
File "/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py", line 85, in <module>
contributors = data.get('extra', {}).get('recipe-maintainers', [])
AttributeError: 'list' object has no attribute 'get'
CalledProcessError: Command '['python', '/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py',
```
</issue>
<code>
[start of conda_smithy/lint_recipe.py]
1 import os
2 import re
3
4 import jinja2
5 import ruamel.yaml
6
7
8 EXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements',
9 'test', 'app', 'about', 'extra']
10
11 REQUIREMENTS_ORDER = ['build', 'run']
12
13
14 class NullUndefined(jinja2.Undefined):
15 def __unicode__(self):
16 return unicode(self._undefined_name)
17
18
19 def lintify(meta, recipe_dir=None):
20 lints = []
21 major_sections = list(meta.keys())
22
23 # If the recipe_dir exists (no guarantee within this function) , we can
24 # find the meta.yaml within it.
25 meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')
26
27 # 1: Top level meta.yaml keys should have a specific order.
28 section_order_sorted = sorted(major_sections,
29 key=EXPECTED_SECTION_ORDER.index)
30 if major_sections != section_order_sorted:
31 lints.append('The top level meta keys are in an unexpected order. '
32 'Expecting {}.'.format(section_order_sorted))
33
34 # 2: The about section should have a home, license and summary.
35 for about_item in ['home', 'license', 'summary']:
36 about_section = meta.get('about', {}) or {}
37 # if the section doesn't exist, or is just empty, lint it.
38 if not about_section.get(about_item, ''):
39 lints.append('The {} item is expected in the about section.'
40 ''.format(about_item))
41
42 # 3: The recipe should have some maintainers.
43 extra_section = meta.get('extra', {}) or {}
44 if not extra_section.get('recipe-maintainers', []):
45 lints.append('The recipe could do with some maintainers listed in '
46 'the "extra/recipe-maintainers" section.')
47
48 # 4: The recipe should have some tests.
49 if 'test' not in major_sections:
50 test_files = ['run_test.py', 'run_test.sh', 'run_test.bat',
51 'run_test.pl']
52 a_test_file_exists = (recipe_dir is not None and
53 any(os.path.exists(os.path.join(recipe_dir,
54 test_file))
55 for test_file in test_files))
56 if not a_test_file_exists:
57 lints.append('The recipe must have some tests.')
58
59 # 5: License cannot be 'unknown.'
60 license = meta.get('about', {}).get('license', '').lower()
61 if 'unknown' == license.strip():
62 lints.append('The recipe license cannot be unknown.')
63
64 # 6: Selectors should be in a tidy form.
65 if recipe_dir is not None and os.path.exists(meta_fname):
66 bad_selectors = []
67 # Good selectors look like ".*\s\s#\s[...]"
68 good_selectors_pat = re.compile(r'(.+?)\s{2,}#\s\[(.+)\](?(2).*)$')
69 with open(meta_fname, 'r') as fh:
70 for selector_line in selector_lines(fh):
71 if not good_selectors_pat.match(selector_line):
72 bad_selectors.append(selector_line)
73 if bad_selectors:
74 lints.append('Selectors are suggested to take a '
75 '" # [<selector>]" form.')
76
77 # 7: The build section should have a build number.
78 build_section = meta.get('build', {}) or {}
79 build_number = build_section.get('number', None)
80 if build_number is None:
81 lints.append('The recipe must have a `build/number` section.')
82
83 # 8: The build section should be before the run section in requirements.
84 requirements_section = meta.get('requirements', {}) or {}
85 requirements_order_sorted = sorted(requirements_section,
86 key=REQUIREMENTS_ORDER.index)
87 if requirements_section.keys() != requirements_order_sorted:
88 lints.append('The `requirements/build` section should be defined '
89 'before the `requirements/run` section.')
90
91 # 9: Files downloaded should have a hash.
92 source_section = meta.get('source', {}) or {}
93 if ('url' in source_section and
94 not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):
95 lints.append('When defining a source/url please add a sha256, sha1 '
96 'or md5 checksum (sha256 preferably).')
97
98 return lints
99
100
101 def selector_lines(lines):
102 # Using the same pattern defined in conda-build (metadata.py),
103 # we identify selectors.
104 sel_pat = re.compile(r'(.+?)\s*(#.*)?\[(.+)\](?(2).*)$')
105
106 for line in lines:
107 line = line.rstrip()
108 if line.lstrip().startswith('#'):
109 # Don't bother with comment only lines
110 continue
111 m = sel_pat.match(line)
112 if m:
113 m.group(3)
114 yield line
115
116
117 def main(recipe_dir):
118 recipe_dir = os.path.abspath(recipe_dir)
119 recipe_meta = os.path.join(recipe_dir, 'meta.yaml')
120 if not os.path.exists(recipe_dir):
121 raise IOError('Feedstock has no recipe/meta.yaml.')
122
123 env = jinja2.Environment(undefined=NullUndefined)
124
125 with open(recipe_meta, 'r') as fh:
126 content = env.from_string(''.join(fh)).render()
127 meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)
128 results = lintify(meta, recipe_dir)
129 return results
130
[end of conda_smithy/lint_recipe.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_smithy/lint_recipe.py b/conda_smithy/lint_recipe.py
--- a/conda_smithy/lint_recipe.py
+++ b/conda_smithy/lint_recipe.py
@@ -16,6 +16,15 @@
return unicode(self._undefined_name)
+def get_section(parent, name, lints):
+ section = parent.get(name, {})
+ if not isinstance(section, dict):
+ lints.append('The "{}" section was expected to be a dictionary, but '
+ 'got a {}.'.format(name, type(section).__name__))
+ section = {}
+ return section
+
+
def lintify(meta, recipe_dir=None):
lints = []
major_sections = list(meta.keys())
@@ -24,6 +33,12 @@
# find the meta.yaml within it.
meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')
+ source_section = get_section(meta, 'source', lints)
+ build_section = get_section(meta, 'build', lints)
+ requirements_section = get_section(meta, 'requirements', lints)
+ about_section = get_section(meta, 'about', lints)
+ extra_section = get_section(meta, 'extra', lints)
+
# 1: Top level meta.yaml keys should have a specific order.
section_order_sorted = sorted(major_sections,
key=EXPECTED_SECTION_ORDER.index)
@@ -33,14 +48,12 @@
# 2: The about section should have a home, license and summary.
for about_item in ['home', 'license', 'summary']:
- about_section = meta.get('about', {}) or {}
# if the section doesn't exist, or is just empty, lint it.
if not about_section.get(about_item, ''):
lints.append('The {} item is expected in the about section.'
''.format(about_item))
# 3: The recipe should have some maintainers.
- extra_section = meta.get('extra', {}) or {}
if not extra_section.get('recipe-maintainers', []):
lints.append('The recipe could do with some maintainers listed in '
'the "extra/recipe-maintainers" section.')
@@ -57,7 +70,7 @@
lints.append('The recipe must have some tests.')
# 5: License cannot be 'unknown.'
- license = meta.get('about', {}).get('license', '').lower()
+ license = about_section.get('license', '').lower()
if 'unknown' == license.strip():
lints.append('The recipe license cannot be unknown.')
@@ -75,13 +88,10 @@
'" # [<selector>]" form.')
# 7: The build section should have a build number.
- build_section = meta.get('build', {}) or {}
- build_number = build_section.get('number', None)
- if build_number is None:
+ if build_section.get('number', None) is None:
lints.append('The recipe must have a `build/number` section.')
# 8: The build section should be before the run section in requirements.
- requirements_section = meta.get('requirements', {}) or {}
requirements_order_sorted = sorted(requirements_section,
key=REQUIREMENTS_ORDER.index)
if requirements_section.keys() != requirements_order_sorted:
@@ -89,7 +99,6 @@
'before the `requirements/run` section.')
# 9: Files downloaded should have a hash.
- source_section = meta.get('source', {}) or {}
if ('url' in source_section and
not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):
lints.append('When defining a source/url please add a sha256, sha1 '
| {"golden_diff": "diff --git a/conda_smithy/lint_recipe.py b/conda_smithy/lint_recipe.py\n--- a/conda_smithy/lint_recipe.py\n+++ b/conda_smithy/lint_recipe.py\n@@ -16,6 +16,15 @@\n return unicode(self._undefined_name)\n \n \n+def get_section(parent, name, lints):\n+ section = parent.get(name, {})\n+ if not isinstance(section, dict):\n+ lints.append('The \"{}\" section was expected to be a dictionary, but '\n+ 'got a {}.'.format(name, type(section).__name__))\n+ section = {}\n+ return section\n+\n+\n def lintify(meta, recipe_dir=None):\n lints = []\n major_sections = list(meta.keys())\n@@ -24,6 +33,12 @@\n # find the meta.yaml within it.\n meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')\n \n+ source_section = get_section(meta, 'source', lints)\n+ build_section = get_section(meta, 'build', lints)\n+ requirements_section = get_section(meta, 'requirements', lints)\n+ about_section = get_section(meta, 'about', lints)\n+ extra_section = get_section(meta, 'extra', lints)\n+\n # 1: Top level meta.yaml keys should have a specific order.\n section_order_sorted = sorted(major_sections,\n key=EXPECTED_SECTION_ORDER.index)\n@@ -33,14 +48,12 @@\n \n # 2: The about section should have a home, license and summary.\n for about_item in ['home', 'license', 'summary']:\n- about_section = meta.get('about', {}) or {}\n # if the section doesn't exist, or is just empty, lint it.\n if not about_section.get(about_item, ''):\n lints.append('The {} item is expected in the about section.'\n ''.format(about_item))\n \n # 3: The recipe should have some maintainers.\n- extra_section = meta.get('extra', {}) or {}\n if not extra_section.get('recipe-maintainers', []):\n lints.append('The recipe could do with some maintainers listed in '\n 'the \"extra/recipe-maintainers\" section.')\n@@ -57,7 +70,7 @@\n lints.append('The recipe must have some tests.')\n \n # 5: License cannot be 'unknown.'\n- license = meta.get('about', {}).get('license', '').lower()\n+ license = about_section.get('license', '').lower()\n if 'unknown' == license.strip():\n lints.append('The recipe license cannot be unknown.')\n \n@@ -75,13 +88,10 @@\n '\" # [<selector>]\" form.')\n \n # 7: The build section should have a build number.\n- build_section = meta.get('build', {}) or {}\n- build_number = build_section.get('number', None)\n- if build_number is None:\n+ if build_section.get('number', None) is None:\n lints.append('The recipe must have a `build/number` section.')\n \n # 8: The build section should be before the run section in requirements.\n- requirements_section = meta.get('requirements', {}) or {}\n requirements_order_sorted = sorted(requirements_section,\n key=REQUIREMENTS_ORDER.index)\n if requirements_section.keys() != requirements_order_sorted:\n@@ -89,7 +99,6 @@\n 'before the `requirements/run` section.')\n \n # 9: Files downloaded should have a hash.\n- source_section = meta.get('source', {}) or {}\n if ('url' in source_section and\n not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):\n lints.append('When defining a source/url please add a sha256, sha1 '\n", "issue": "Harden maintainers linting\nAs addressed in https://github.com/conda-forge/pyutilib-feedstock/pull/1:\n\n```\nRunning command: ['python', '/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py', './feedstocks_repo/feedstocks']\nTraceback (most recent call last):\n File \"/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py\", line 85, in <module>\n contributors = data.get('extra', {}).get('recipe-maintainers', [])\nAttributeError: 'list' object has no attribute 'get'\nCalledProcessError: Command '['python', '/home/travis/build/conda-forge/conda-forge.github.io/scripts/update_teams.py',\n```\n\n", "before_files": [{"content": "import os\nimport re\n\nimport jinja2\nimport ruamel.yaml\n\n\nEXPECTED_SECTION_ORDER = ['package', 'source', 'build', 'requirements',\n 'test', 'app', 'about', 'extra']\n\nREQUIREMENTS_ORDER = ['build', 'run']\n\n\nclass NullUndefined(jinja2.Undefined):\n def __unicode__(self):\n return unicode(self._undefined_name)\n\n\ndef lintify(meta, recipe_dir=None):\n lints = []\n major_sections = list(meta.keys())\n\n # If the recipe_dir exists (no guarantee within this function) , we can\n # find the meta.yaml within it.\n meta_fname = os.path.join(recipe_dir or '', 'meta.yaml')\n\n # 1: Top level meta.yaml keys should have a specific order.\n section_order_sorted = sorted(major_sections,\n key=EXPECTED_SECTION_ORDER.index)\n if major_sections != section_order_sorted:\n lints.append('The top level meta keys are in an unexpected order. '\n 'Expecting {}.'.format(section_order_sorted))\n\n # 2: The about section should have a home, license and summary.\n for about_item in ['home', 'license', 'summary']:\n about_section = meta.get('about', {}) or {}\n # if the section doesn't exist, or is just empty, lint it.\n if not about_section.get(about_item, ''):\n lints.append('The {} item is expected in the about section.'\n ''.format(about_item))\n\n # 3: The recipe should have some maintainers.\n extra_section = meta.get('extra', {}) or {}\n if not extra_section.get('recipe-maintainers', []):\n lints.append('The recipe could do with some maintainers listed in '\n 'the \"extra/recipe-maintainers\" section.')\n\n # 4: The recipe should have some tests.\n if 'test' not in major_sections:\n test_files = ['run_test.py', 'run_test.sh', 'run_test.bat',\n 'run_test.pl']\n a_test_file_exists = (recipe_dir is not None and\n any(os.path.exists(os.path.join(recipe_dir,\n test_file))\n for test_file in test_files))\n if not a_test_file_exists:\n lints.append('The recipe must have some tests.')\n\n # 5: License cannot be 'unknown.'\n license = meta.get('about', {}).get('license', '').lower()\n if 'unknown' == license.strip():\n lints.append('The recipe license cannot be unknown.')\n\n # 6: Selectors should be in a tidy form.\n if recipe_dir is not None and os.path.exists(meta_fname):\n bad_selectors = []\n # Good selectors look like \".*\\s\\s#\\s[...]\"\n good_selectors_pat = re.compile(r'(.+?)\\s{2,}#\\s\\[(.+)\\](?(2).*)$')\n with open(meta_fname, 'r') as fh:\n for selector_line in selector_lines(fh):\n if not good_selectors_pat.match(selector_line):\n bad_selectors.append(selector_line)\n if bad_selectors:\n lints.append('Selectors are suggested to take a '\n '\" # [<selector>]\" form.')\n\n # 7: The build section should have a build number.\n build_section = meta.get('build', {}) or {}\n build_number = build_section.get('number', None)\n if build_number is None:\n lints.append('The recipe must have a `build/number` section.')\n\n # 8: The build section should be before the run section in requirements.\n requirements_section = meta.get('requirements', {}) or {}\n requirements_order_sorted = sorted(requirements_section,\n key=REQUIREMENTS_ORDER.index)\n if requirements_section.keys() != requirements_order_sorted:\n lints.append('The `requirements/build` section should be defined '\n 'before the `requirements/run` section.')\n\n # 9: Files downloaded should have a hash.\n source_section = meta.get('source', {}) or {}\n if ('url' in source_section and\n not ({'sha1', 'sha256', 'md5'} & set(source_section.keys()))):\n lints.append('When defining a source/url please add a sha256, sha1 '\n 'or md5 checksum (sha256 preferably).')\n\n return lints\n\n\ndef selector_lines(lines):\n # Using the same pattern defined in conda-build (metadata.py),\n # we identify selectors.\n sel_pat = re.compile(r'(.+?)\\s*(#.*)?\\[(.+)\\](?(2).*)$')\n\n for line in lines:\n line = line.rstrip()\n if line.lstrip().startswith('#'):\n # Don't bother with comment only lines\n continue\n m = sel_pat.match(line)\n if m:\n m.group(3)\n yield line\n\n\ndef main(recipe_dir):\n recipe_dir = os.path.abspath(recipe_dir)\n recipe_meta = os.path.join(recipe_dir, 'meta.yaml')\n if not os.path.exists(recipe_dir):\n raise IOError('Feedstock has no recipe/meta.yaml.')\n\n env = jinja2.Environment(undefined=NullUndefined)\n\n with open(recipe_meta, 'r') as fh:\n content = env.from_string(''.join(fh)).render()\n meta = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)\n results = lintify(meta, recipe_dir)\n return results\n", "path": "conda_smithy/lint_recipe.py"}]} | 2,163 | 851 |
gh_patches_debug_10413 | rasdani/github-patches | git_diff | ray-project__ray-4181 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[autoscaler] bash 4.4 does not support set -i
<!--
General questions should be asked on the mailing list [email protected].
Before submitting an issue, please fill out the following form.
-->
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:
Ubuntu 17.10
- **Ray installed from (source or binary)**:
Source
- **Ray version**:
commit b8811cbe3418ab0d3ea10deaa54947d5bb26cecf
- **Python version**:
3.6
- **Exact command to reproduce**:
ray create_or_update example.yaml
<!--
You can obtain the Ray version with
python -c "import ray; print(ray.__version__)"
-->
### Describe the problem
<!-- Describe the problem clearly here. -->
As of bash 4.4, set -i is no longer accepted to create an interactive shell. Consider -t in stead.
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
</issue>
<code>
[start of python/ray/autoscaler/updater.py]
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 try: # py3
6 from shlex import quote
7 except ImportError: # py2
8 from pipes import quote
9 import logging
10 import os
11 import subprocess
12 import sys
13 import time
14
15 from threading import Thread
16
17 from ray.autoscaler.tags import TAG_RAY_NODE_STATUS, TAG_RAY_RUNTIME_CONFIG
18 from ray.autoscaler.log_timer import LogTimer
19
20 logger = logging.getLogger(__name__)
21
22 # How long to wait for a node to start, in seconds
23 NODE_START_WAIT_S = 300
24 SSH_CHECK_INTERVAL = 5
25 SSH_CONTROL_PATH = "/tmp/ray_ssh_sockets"
26
27
28 def get_default_ssh_options(private_key, connect_timeout):
29 OPTS = [
30 ("ConnectTimeout", "{}s".format(connect_timeout)),
31 ("StrictHostKeyChecking", "no"),
32 ("ControlMaster", "auto"),
33 ("ControlPath", "{}/%C".format(SSH_CONTROL_PATH)),
34 ("ControlPersist", "yes"),
35 ]
36
37 return ["-i", private_key] + [
38 x for y in (["-o", "{}={}".format(k, v)] for k, v in OPTS) for x in y
39 ]
40
41
42 class NodeUpdater(object):
43 """A process for syncing files and running init commands on a node."""
44
45 def __init__(self,
46 node_id,
47 provider_config,
48 provider,
49 auth_config,
50 cluster_name,
51 file_mounts,
52 initialization_commands,
53 setup_commands,
54 runtime_hash,
55 process_runner=subprocess,
56 use_internal_ip=False):
57 self.daemon = True
58 self.process_runner = process_runner
59 self.node_id = node_id
60 self.use_internal_ip = (use_internal_ip or provider_config.get(
61 "use_internal_ips", False))
62 self.provider = provider
63 self.ssh_private_key = auth_config["ssh_private_key"]
64 self.ssh_user = auth_config["ssh_user"]
65 self.ssh_ip = None
66 self.file_mounts = {
67 remote: os.path.expanduser(local)
68 for remote, local in file_mounts.items()
69 }
70 self.initialization_commands = initialization_commands
71 self.setup_commands = setup_commands
72 self.runtime_hash = runtime_hash
73
74 def get_caller(self, check_error):
75 if check_error:
76 return self.process_runner.call
77 else:
78 return self.process_runner.check_call
79
80 def get_node_ip(self):
81 if self.use_internal_ip:
82 return self.provider.internal_ip(self.node_id)
83 else:
84 return self.provider.external_ip(self.node_id)
85
86 def wait_for_ip(self, deadline):
87 while time.time() < deadline and \
88 not self.provider.is_terminated(self.node_id):
89 logger.info("NodeUpdater: "
90 "Waiting for IP of {}...".format(self.node_id))
91 ip = self.get_node_ip()
92 if ip is not None:
93 return ip
94 time.sleep(10)
95
96 return None
97
98 def set_ssh_ip_if_required(self):
99 if self.ssh_ip is not None:
100 return
101
102 # We assume that this never changes.
103 # I think that's reasonable.
104 deadline = time.time() + NODE_START_WAIT_S
105 with LogTimer("NodeUpdater: {}: Got IP".format(self.node_id)):
106 ip = self.wait_for_ip(deadline)
107 assert ip is not None, "Unable to find IP of node"
108
109 self.ssh_ip = ip
110
111 # This should run before any SSH commands and therefore ensure that
112 # the ControlPath directory exists, allowing SSH to maintain
113 # persistent sessions later on.
114 with open("/dev/null", "w") as redirect:
115 self.get_caller(False)(
116 ["mkdir", "-p", SSH_CONTROL_PATH],
117 stdout=redirect,
118 stderr=redirect)
119
120 self.get_caller(False)(
121 ["chmod", "0700", SSH_CONTROL_PATH],
122 stdout=redirect,
123 stderr=redirect)
124
125 def run(self):
126 logger.info("NodeUpdater: "
127 "{}: Updating to {}".format(self.node_id,
128 self.runtime_hash))
129 try:
130 m = "{}: Applied config {}".format(self.node_id, self.runtime_hash)
131 with LogTimer("NodeUpdater: {}".format(m)):
132 self.do_update()
133 except Exception as e:
134 error_str = str(e)
135 if hasattr(e, "cmd"):
136 error_str = "(Exit Status {}) {}".format(
137 e.returncode, " ".join(e.cmd))
138 logger.error("NodeUpdater: "
139 "{}: Error updating {}".format(
140 self.node_id, error_str))
141 self.provider.set_node_tags(self.node_id,
142 {TAG_RAY_NODE_STATUS: "update-failed"})
143 raise e
144
145 self.provider.set_node_tags(
146 self.node_id, {
147 TAG_RAY_NODE_STATUS: "up-to-date",
148 TAG_RAY_RUNTIME_CONFIG: self.runtime_hash
149 })
150
151 self.exitcode = 0
152
153 def wait_for_ssh(self, deadline):
154 logger.info("NodeUpdater: "
155 "{}: Waiting for SSH...".format(self.node_id))
156
157 while time.time() < deadline and \
158 not self.provider.is_terminated(self.node_id):
159 try:
160 logger.debug("NodeUpdater: "
161 "{}: Waiting for SSH...".format(self.node_id))
162 self.ssh_cmd(
163 "uptime",
164 connect_timeout=5,
165 redirect=open("/dev/null", "w"))
166 return True
167
168 except Exception as e:
169 retry_str = str(e)
170 if hasattr(e, "cmd"):
171 retry_str = "(Exit Status {}): {}".format(
172 e.returncode, " ".join(e.cmd))
173 logger.debug("NodeUpdater: "
174 "{}: SSH not up, retrying: {}".format(
175 self.node_id, retry_str))
176 time.sleep(SSH_CHECK_INTERVAL)
177
178 return False
179
180 def do_update(self):
181 self.provider.set_node_tags(self.node_id,
182 {TAG_RAY_NODE_STATUS: "waiting-for-ssh"})
183
184 deadline = time.time() + NODE_START_WAIT_S
185 self.set_ssh_ip_if_required()
186
187 # Wait for SSH access
188 with LogTimer("NodeUpdater: " "{}: Got SSH".format(self.node_id)):
189 ssh_ok = self.wait_for_ssh(deadline)
190 assert ssh_ok, "Unable to SSH to node"
191
192 # Rsync file mounts
193 self.provider.set_node_tags(self.node_id,
194 {TAG_RAY_NODE_STATUS: "syncing-files"})
195 for remote_path, local_path in self.file_mounts.items():
196 logger.info("NodeUpdater: "
197 "{}: Syncing {} to {}...".format(
198 self.node_id, local_path, remote_path))
199 assert os.path.exists(local_path), local_path
200 if os.path.isdir(local_path):
201 if not local_path.endswith("/"):
202 local_path += "/"
203 if not remote_path.endswith("/"):
204 remote_path += "/"
205
206 m = "{}: Synced {} to {}".format(self.node_id, local_path,
207 remote_path)
208 with LogTimer("NodeUpdater {}".format(m)):
209 self.ssh_cmd(
210 "mkdir -p {}".format(os.path.dirname(remote_path)),
211 redirect=open("/dev/null", "w"),
212 )
213 self.rsync_up(
214 local_path, remote_path, redirect=open("/dev/null", "w"))
215
216 # Run init commands
217 self.provider.set_node_tags(self.node_id,
218 {TAG_RAY_NODE_STATUS: "setting-up"})
219
220 m = "{}: Initialization commands completed".format(self.node_id)
221 with LogTimer("NodeUpdater: {}".format(m)):
222 for cmd in self.initialization_commands:
223 self.ssh_cmd(cmd, redirect=open("/dev/null", "w"))
224
225 m = "{}: Setup commands completed".format(self.node_id)
226 with LogTimer("NodeUpdater: {}".format(m)):
227 for cmd in self.setup_commands:
228 self.ssh_cmd(cmd, redirect=open("/dev/null", "w"))
229
230 def rsync_up(self, source, target, redirect=None, check_error=True):
231 self.set_ssh_ip_if_required()
232 self.get_caller(check_error)(
233 [
234 "rsync", "-e",
235 " ".join(["ssh"] +
236 get_default_ssh_options(self.ssh_private_key, 120)),
237 "--delete", "-avz", source, "{}@{}:{}".format(
238 self.ssh_user, self.ssh_ip, target)
239 ],
240 stdout=redirect or sys.stdout,
241 stderr=redirect or sys.stderr)
242
243 def rsync_down(self, source, target, redirect=None, check_error=True):
244 self.set_ssh_ip_if_required()
245 self.get_caller(check_error)(
246 [
247 "rsync", "-e",
248 " ".join(["ssh"] +
249 get_default_ssh_options(self.ssh_private_key, 120)),
250 "-avz", "{}@{}:{}".format(self.ssh_user, self.ssh_ip,
251 source), target
252 ],
253 stdout=redirect or sys.stdout,
254 stderr=redirect or sys.stderr)
255
256 def ssh_cmd(self,
257 cmd,
258 connect_timeout=120,
259 redirect=None,
260 allocate_tty=False,
261 emulate_interactive=True,
262 expect_error=False,
263 port_forward=None):
264
265 self.set_ssh_ip_if_required()
266
267 logger.info("NodeUpdater: Running {} on {}...".format(
268 cmd, self.ssh_ip))
269 ssh = ["ssh"]
270 if allocate_tty:
271 ssh.append("-tt")
272 if emulate_interactive:
273 force_interactive = (
274 "set -i || true && source ~/.bashrc && "
275 "export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && ")
276 cmd = "bash --login -c {}".format(quote(force_interactive + cmd))
277
278 if port_forward is None:
279 ssh_opt = []
280 else:
281 ssh_opt = [
282 "-L", "{}:localhost:{}".format(port_forward, port_forward)
283 ]
284
285 self.get_caller(expect_error)(
286 ssh + ssh_opt + get_default_ssh_options(self.ssh_private_key,
287 connect_timeout) +
288 ["{}@{}".format(self.ssh_user, self.ssh_ip), cmd],
289 stdout=redirect or sys.stdout,
290 stderr=redirect or sys.stderr)
291
292
293 class NodeUpdaterThread(NodeUpdater, Thread):
294 def __init__(self, *args, **kwargs):
295 Thread.__init__(self)
296 NodeUpdater.__init__(self, *args, **kwargs)
297 self.exitcode = -1
298
[end of python/ray/autoscaler/updater.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/autoscaler/updater.py b/python/ray/autoscaler/updater.py
--- a/python/ray/autoscaler/updater.py
+++ b/python/ray/autoscaler/updater.py
@@ -271,9 +271,10 @@
ssh.append("-tt")
if emulate_interactive:
force_interactive = (
- "set -i || true && source ~/.bashrc && "
+ "true && source ~/.bashrc && "
"export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && ")
- cmd = "bash --login -c {}".format(quote(force_interactive + cmd))
+ cmd = "bash --login -c -i {}".format(
+ quote(force_interactive + cmd))
if port_forward is None:
ssh_opt = []
| {"golden_diff": "diff --git a/python/ray/autoscaler/updater.py b/python/ray/autoscaler/updater.py\n--- a/python/ray/autoscaler/updater.py\n+++ b/python/ray/autoscaler/updater.py\n@@ -271,9 +271,10 @@\n ssh.append(\"-tt\")\n if emulate_interactive:\n force_interactive = (\n- \"set -i || true && source ~/.bashrc && \"\n+ \"true && source ~/.bashrc && \"\n \"export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && \")\n- cmd = \"bash --login -c {}\".format(quote(force_interactive + cmd))\n+ cmd = \"bash --login -c -i {}\".format(\n+ quote(force_interactive + cmd))\n \n if port_forward is None:\n ssh_opt = []\n", "issue": "[autoscaler] bash 4.4 does not support set -i\n<!--\r\nGeneral questions should be asked on the mailing list [email protected].\r\n\r\nBefore submitting an issue, please fill out the following form.\r\n-->\r\n\r\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\nUbuntu 17.10\r\n\r\n- **Ray installed from (source or binary)**:\r\nSource\r\n- **Ray version**:\r\ncommit b8811cbe3418ab0d3ea10deaa54947d5bb26cecf\r\n\r\n- **Python version**:\r\n3.6\r\n- **Exact command to reproduce**:\r\nray create_or_update example.yaml\r\n\r\n<!--\r\nYou can obtain the Ray version with\r\n\r\npython -c \"import ray; print(ray.__version__)\"\r\n-->\r\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\n\r\nAs of bash 4.4, set -i is no longer accepted to create an interactive shell. Consider -t in stead.\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\ntry: # py3\n from shlex import quote\nexcept ImportError: # py2\n from pipes import quote\nimport logging\nimport os\nimport subprocess\nimport sys\nimport time\n\nfrom threading import Thread\n\nfrom ray.autoscaler.tags import TAG_RAY_NODE_STATUS, TAG_RAY_RUNTIME_CONFIG\nfrom ray.autoscaler.log_timer import LogTimer\n\nlogger = logging.getLogger(__name__)\n\n# How long to wait for a node to start, in seconds\nNODE_START_WAIT_S = 300\nSSH_CHECK_INTERVAL = 5\nSSH_CONTROL_PATH = \"/tmp/ray_ssh_sockets\"\n\n\ndef get_default_ssh_options(private_key, connect_timeout):\n OPTS = [\n (\"ConnectTimeout\", \"{}s\".format(connect_timeout)),\n (\"StrictHostKeyChecking\", \"no\"),\n (\"ControlMaster\", \"auto\"),\n (\"ControlPath\", \"{}/%C\".format(SSH_CONTROL_PATH)),\n (\"ControlPersist\", \"yes\"),\n ]\n\n return [\"-i\", private_key] + [\n x for y in ([\"-o\", \"{}={}\".format(k, v)] for k, v in OPTS) for x in y\n ]\n\n\nclass NodeUpdater(object):\n \"\"\"A process for syncing files and running init commands on a node.\"\"\"\n\n def __init__(self,\n node_id,\n provider_config,\n provider,\n auth_config,\n cluster_name,\n file_mounts,\n initialization_commands,\n setup_commands,\n runtime_hash,\n process_runner=subprocess,\n use_internal_ip=False):\n self.daemon = True\n self.process_runner = process_runner\n self.node_id = node_id\n self.use_internal_ip = (use_internal_ip or provider_config.get(\n \"use_internal_ips\", False))\n self.provider = provider\n self.ssh_private_key = auth_config[\"ssh_private_key\"]\n self.ssh_user = auth_config[\"ssh_user\"]\n self.ssh_ip = None\n self.file_mounts = {\n remote: os.path.expanduser(local)\n for remote, local in file_mounts.items()\n }\n self.initialization_commands = initialization_commands\n self.setup_commands = setup_commands\n self.runtime_hash = runtime_hash\n\n def get_caller(self, check_error):\n if check_error:\n return self.process_runner.call\n else:\n return self.process_runner.check_call\n\n def get_node_ip(self):\n if self.use_internal_ip:\n return self.provider.internal_ip(self.node_id)\n else:\n return self.provider.external_ip(self.node_id)\n\n def wait_for_ip(self, deadline):\n while time.time() < deadline and \\\n not self.provider.is_terminated(self.node_id):\n logger.info(\"NodeUpdater: \"\n \"Waiting for IP of {}...\".format(self.node_id))\n ip = self.get_node_ip()\n if ip is not None:\n return ip\n time.sleep(10)\n\n return None\n\n def set_ssh_ip_if_required(self):\n if self.ssh_ip is not None:\n return\n\n # We assume that this never changes.\n # I think that's reasonable.\n deadline = time.time() + NODE_START_WAIT_S\n with LogTimer(\"NodeUpdater: {}: Got IP\".format(self.node_id)):\n ip = self.wait_for_ip(deadline)\n assert ip is not None, \"Unable to find IP of node\"\n\n self.ssh_ip = ip\n\n # This should run before any SSH commands and therefore ensure that\n # the ControlPath directory exists, allowing SSH to maintain\n # persistent sessions later on.\n with open(\"/dev/null\", \"w\") as redirect:\n self.get_caller(False)(\n [\"mkdir\", \"-p\", SSH_CONTROL_PATH],\n stdout=redirect,\n stderr=redirect)\n\n self.get_caller(False)(\n [\"chmod\", \"0700\", SSH_CONTROL_PATH],\n stdout=redirect,\n stderr=redirect)\n\n def run(self):\n logger.info(\"NodeUpdater: \"\n \"{}: Updating to {}\".format(self.node_id,\n self.runtime_hash))\n try:\n m = \"{}: Applied config {}\".format(self.node_id, self.runtime_hash)\n with LogTimer(\"NodeUpdater: {}\".format(m)):\n self.do_update()\n except Exception as e:\n error_str = str(e)\n if hasattr(e, \"cmd\"):\n error_str = \"(Exit Status {}) {}\".format(\n e.returncode, \" \".join(e.cmd))\n logger.error(\"NodeUpdater: \"\n \"{}: Error updating {}\".format(\n self.node_id, error_str))\n self.provider.set_node_tags(self.node_id,\n {TAG_RAY_NODE_STATUS: \"update-failed\"})\n raise e\n\n self.provider.set_node_tags(\n self.node_id, {\n TAG_RAY_NODE_STATUS: \"up-to-date\",\n TAG_RAY_RUNTIME_CONFIG: self.runtime_hash\n })\n\n self.exitcode = 0\n\n def wait_for_ssh(self, deadline):\n logger.info(\"NodeUpdater: \"\n \"{}: Waiting for SSH...\".format(self.node_id))\n\n while time.time() < deadline and \\\n not self.provider.is_terminated(self.node_id):\n try:\n logger.debug(\"NodeUpdater: \"\n \"{}: Waiting for SSH...\".format(self.node_id))\n self.ssh_cmd(\n \"uptime\",\n connect_timeout=5,\n redirect=open(\"/dev/null\", \"w\"))\n return True\n\n except Exception as e:\n retry_str = str(e)\n if hasattr(e, \"cmd\"):\n retry_str = \"(Exit Status {}): {}\".format(\n e.returncode, \" \".join(e.cmd))\n logger.debug(\"NodeUpdater: \"\n \"{}: SSH not up, retrying: {}\".format(\n self.node_id, retry_str))\n time.sleep(SSH_CHECK_INTERVAL)\n\n return False\n\n def do_update(self):\n self.provider.set_node_tags(self.node_id,\n {TAG_RAY_NODE_STATUS: \"waiting-for-ssh\"})\n\n deadline = time.time() + NODE_START_WAIT_S\n self.set_ssh_ip_if_required()\n\n # Wait for SSH access\n with LogTimer(\"NodeUpdater: \" \"{}: Got SSH\".format(self.node_id)):\n ssh_ok = self.wait_for_ssh(deadline)\n assert ssh_ok, \"Unable to SSH to node\"\n\n # Rsync file mounts\n self.provider.set_node_tags(self.node_id,\n {TAG_RAY_NODE_STATUS: \"syncing-files\"})\n for remote_path, local_path in self.file_mounts.items():\n logger.info(\"NodeUpdater: \"\n \"{}: Syncing {} to {}...\".format(\n self.node_id, local_path, remote_path))\n assert os.path.exists(local_path), local_path\n if os.path.isdir(local_path):\n if not local_path.endswith(\"/\"):\n local_path += \"/\"\n if not remote_path.endswith(\"/\"):\n remote_path += \"/\"\n\n m = \"{}: Synced {} to {}\".format(self.node_id, local_path,\n remote_path)\n with LogTimer(\"NodeUpdater {}\".format(m)):\n self.ssh_cmd(\n \"mkdir -p {}\".format(os.path.dirname(remote_path)),\n redirect=open(\"/dev/null\", \"w\"),\n )\n self.rsync_up(\n local_path, remote_path, redirect=open(\"/dev/null\", \"w\"))\n\n # Run init commands\n self.provider.set_node_tags(self.node_id,\n {TAG_RAY_NODE_STATUS: \"setting-up\"})\n\n m = \"{}: Initialization commands completed\".format(self.node_id)\n with LogTimer(\"NodeUpdater: {}\".format(m)):\n for cmd in self.initialization_commands:\n self.ssh_cmd(cmd, redirect=open(\"/dev/null\", \"w\"))\n\n m = \"{}: Setup commands completed\".format(self.node_id)\n with LogTimer(\"NodeUpdater: {}\".format(m)):\n for cmd in self.setup_commands:\n self.ssh_cmd(cmd, redirect=open(\"/dev/null\", \"w\"))\n\n def rsync_up(self, source, target, redirect=None, check_error=True):\n self.set_ssh_ip_if_required()\n self.get_caller(check_error)(\n [\n \"rsync\", \"-e\",\n \" \".join([\"ssh\"] +\n get_default_ssh_options(self.ssh_private_key, 120)),\n \"--delete\", \"-avz\", source, \"{}@{}:{}\".format(\n self.ssh_user, self.ssh_ip, target)\n ],\n stdout=redirect or sys.stdout,\n stderr=redirect or sys.stderr)\n\n def rsync_down(self, source, target, redirect=None, check_error=True):\n self.set_ssh_ip_if_required()\n self.get_caller(check_error)(\n [\n \"rsync\", \"-e\",\n \" \".join([\"ssh\"] +\n get_default_ssh_options(self.ssh_private_key, 120)),\n \"-avz\", \"{}@{}:{}\".format(self.ssh_user, self.ssh_ip,\n source), target\n ],\n stdout=redirect or sys.stdout,\n stderr=redirect or sys.stderr)\n\n def ssh_cmd(self,\n cmd,\n connect_timeout=120,\n redirect=None,\n allocate_tty=False,\n emulate_interactive=True,\n expect_error=False,\n port_forward=None):\n\n self.set_ssh_ip_if_required()\n\n logger.info(\"NodeUpdater: Running {} on {}...\".format(\n cmd, self.ssh_ip))\n ssh = [\"ssh\"]\n if allocate_tty:\n ssh.append(\"-tt\")\n if emulate_interactive:\n force_interactive = (\n \"set -i || true && source ~/.bashrc && \"\n \"export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && \")\n cmd = \"bash --login -c {}\".format(quote(force_interactive + cmd))\n\n if port_forward is None:\n ssh_opt = []\n else:\n ssh_opt = [\n \"-L\", \"{}:localhost:{}\".format(port_forward, port_forward)\n ]\n\n self.get_caller(expect_error)(\n ssh + ssh_opt + get_default_ssh_options(self.ssh_private_key,\n connect_timeout) +\n [\"{}@{}\".format(self.ssh_user, self.ssh_ip), cmd],\n stdout=redirect or sys.stdout,\n stderr=redirect or sys.stderr)\n\n\nclass NodeUpdaterThread(NodeUpdater, Thread):\n def __init__(self, *args, **kwargs):\n Thread.__init__(self)\n NodeUpdater.__init__(self, *args, **kwargs)\n self.exitcode = -1\n", "path": "python/ray/autoscaler/updater.py"}]} | 3,864 | 180 |
gh_patches_debug_19671 | rasdani/github-patches | git_diff | kartoza__prj.app-508 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Course name
Currently the course name is quite long, i.e. XProject_QGIS Introduction_2017-07-05-2017-07-19
Would it be better if we can have a shorter course name, i.e. QGIS Introduction 101?
What do you think @timlinux ?
</issue>
<code>
[start of django_project/certification/models/certifying_organisation.py]
1 # coding=utf-8
2 """Certifying organisation model definitions for certification apps.
3
4 """
5
6 import os
7 from django.conf.global_settings import MEDIA_ROOT
8 from django.core.urlresolvers import reverse
9 from django.core.exceptions import ValidationError
10 from django.core.validators import validate_email
11 from django.db import models
12 from django.utils.text import slugify
13 from django.utils.translation import ugettext_lazy as _
14 from core.settings.contrib import STOP_WORDS
15 from unidecode import unidecode
16 from django.contrib.auth.models import User
17 from django_countries.fields import CountryField
18 import logging
19
20 logger = logging.getLogger(__name__)
21
22
23 class SlugifyingMixin(object):
24
25 class Meta:
26 abstract = True
27
28 def save(self, *args, **kwargs):
29 if not self.pk:
30 words = self.name.split()
31 filtered_words = [word for word in words if
32 word.lower() not in STOP_WORDS]
33 # unidecode() represents special characters (unicode data) in ASCII
34 new_list = unidecode(' '.join(filtered_words))
35 self.slug = slugify(new_list)[:50]
36 super(SlugifyingMixin, self).save(*args, **kwargs)
37
38
39 class ApprovedCertifyingOrganisationManager(models.Manager):
40 """Custom training centre manager.
41
42 Shows only approved certifying organisation.
43 """
44
45 def get_queryset(self):
46 """Query set generator. """
47
48 return super(
49 ApprovedCertifyingOrganisationManager, self).get_queryset().filter(
50 approved=True)
51
52
53 class UnapprovedCertifyingOrganisationManager(models.Manager):
54 """Custom training centre manager.
55
56 Shows only unapproved certifying organisation.
57 """
58
59 def get_queryset(self):
60 """Query set generator. """
61
62 return super(
63 UnapprovedCertifyingOrganisationManager, self).get_queryset(
64 ).filter(approved=False)
65
66
67 def validate_email_address(value):
68 try:
69 validate_email(value)
70 return True
71 except ValidationError(
72 _('%(value)s is not a valid email address'),
73 params={'value': value},):
74 return False
75
76
77 class CertifyingOrganisation(SlugifyingMixin, models.Model):
78 """Certifying organisation model."""
79
80 name = models.CharField(
81 help_text=_('name of organisation or institution'),
82 max_length=200,
83 null=False,
84 blank=False
85 )
86
87 organisation_email = models.CharField(
88 help_text=_('Email address organisation or institution.'),
89 max_length=200,
90 null=False,
91 blank=False,
92 validators=[validate_email_address],
93 )
94
95 address = models.TextField(
96 help_text=_('Address of Organisation or Institution.'),
97 max_length=1000,
98 null=False,
99 blank=False
100 )
101
102 logo = models.ImageField(
103 help_text=_('Logo for this organisation. '
104 'Most browsers support dragging the image directly on to '
105 'the "Choose File" button above.'),
106 upload_to=os.path.join(MEDIA_ROOT, 'images/organisations'),
107 blank=True
108 )
109
110 country = CountryField(
111 help_text=_('Select the country for this Institution'),
112 null=True,
113 blank=True)
114
115 organisation_phone = models.CharField(
116 help_text=_('Phone number: (country code)(number) e.g. +6221551553'),
117 max_length=200,
118 null=False,
119 blank=False
120 )
121
122 approved = models.BooleanField(
123 help_text=_('Approval from project admin'),
124 default=False
125 )
126
127 enabled = models.BooleanField(
128 help_text=_('Project enabled'),
129 default=True
130 )
131
132 slug = models.SlugField()
133 organisation_owners = models.ManyToManyField(User)
134 project = models.ForeignKey('base.Project')
135 objects = models.Manager()
136 approved_objects = ApprovedCertifyingOrganisationManager()
137 unapproved_objects = UnapprovedCertifyingOrganisationManager()
138
139 # noinspection PyClassicStyleClass.
140 class Meta:
141 """Meta class for Course attendee."""
142
143 app_label = 'certification'
144 ordering = ['name']
145 unique_together = ['name', 'project']
146
147 def save(self, *args, **kwargs):
148 super(CertifyingOrganisation, self).save(*args, **kwargs)
149
150 def __unicode__(self):
151 return '%s - %s' % (self.project.name, self.name)
152
153 def get_absolute_url(self):
154 """Return URL to certifying organisation detail page.
155
156 :return: URL
157 :rtype: str
158 """
159 return reverse('certifying-organisation-detail', kwargs={
160 'slug': self.slug,
161 'project_slug': self.project.slug
162 })
163
[end of django_project/certification/models/certifying_organisation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/django_project/certification/models/certifying_organisation.py b/django_project/certification/models/certifying_organisation.py
--- a/django_project/certification/models/certifying_organisation.py
+++ b/django_project/certification/models/certifying_organisation.py
@@ -74,7 +74,7 @@
return False
-class CertifyingOrganisation(SlugifyingMixin, models.Model):
+class CertifyingOrganisation(models.Model):
"""Certifying organisation model."""
name = models.CharField(
@@ -145,6 +145,15 @@
unique_together = ['name', 'project']
def save(self, *args, **kwargs):
+ if not self.pk:
+ words = self.name.split()
+ filtered_words = [word for word in words if
+ word.lower() not in STOP_WORDS]
+ # unidecode() represents special characters (unicode data) in ASCII
+ new_list = \
+ self.project.slug + ' ' + \
+ unidecode(' '.join(filtered_words))
+ self.slug = slugify(new_list)[:50]
super(CertifyingOrganisation, self).save(*args, **kwargs)
def __unicode__(self):
| {"golden_diff": "diff --git a/django_project/certification/models/certifying_organisation.py b/django_project/certification/models/certifying_organisation.py\n--- a/django_project/certification/models/certifying_organisation.py\n+++ b/django_project/certification/models/certifying_organisation.py\n@@ -74,7 +74,7 @@\n return False\n \n \n-class CertifyingOrganisation(SlugifyingMixin, models.Model):\n+class CertifyingOrganisation(models.Model):\n \"\"\"Certifying organisation model.\"\"\"\n \n name = models.CharField(\n@@ -145,6 +145,15 @@\n unique_together = ['name', 'project']\n \n def save(self, *args, **kwargs):\n+ if not self.pk:\n+ words = self.name.split()\n+ filtered_words = [word for word in words if\n+ word.lower() not in STOP_WORDS]\n+ # unidecode() represents special characters (unicode data) in ASCII\n+ new_list = \\\n+ self.project.slug + ' ' + \\\n+ unidecode(' '.join(filtered_words))\n+ self.slug = slugify(new_list)[:50]\n super(CertifyingOrganisation, self).save(*args, **kwargs)\n \n def __unicode__(self):\n", "issue": "Course name\nCurrently the course name is quite long, i.e. XProject_QGIS Introduction_2017-07-05-2017-07-19 \n\nWould it be better if we can have a shorter course name, i.e. QGIS Introduction 101?\nWhat do you think @timlinux ?\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Certifying organisation model definitions for certification apps.\n\n\"\"\"\n\nimport os\nfrom django.conf.global_settings import MEDIA_ROOT\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email\nfrom django.db import models\nfrom django.utils.text import slugify\nfrom django.utils.translation import ugettext_lazy as _\nfrom core.settings.contrib import STOP_WORDS\nfrom unidecode import unidecode\nfrom django.contrib.auth.models import User\nfrom django_countries.fields import CountryField\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass SlugifyingMixin(object):\n\n class Meta:\n abstract = True\n\n def save(self, *args, **kwargs):\n if not self.pk:\n words = self.name.split()\n filtered_words = [word for word in words if\n word.lower() not in STOP_WORDS]\n # unidecode() represents special characters (unicode data) in ASCII\n new_list = unidecode(' '.join(filtered_words))\n self.slug = slugify(new_list)[:50]\n super(SlugifyingMixin, self).save(*args, **kwargs)\n\n\nclass ApprovedCertifyingOrganisationManager(models.Manager):\n \"\"\"Custom training centre manager.\n\n Shows only approved certifying organisation.\n \"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator. \"\"\"\n\n return super(\n ApprovedCertifyingOrganisationManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedCertifyingOrganisationManager(models.Manager):\n \"\"\"Custom training centre manager.\n\n Shows only unapproved certifying organisation.\n \"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator. \"\"\"\n\n return super(\n UnapprovedCertifyingOrganisationManager, self).get_queryset(\n ).filter(approved=False)\n\n\ndef validate_email_address(value):\n try:\n validate_email(value)\n return True\n except ValidationError(\n _('%(value)s is not a valid email address'),\n params={'value': value},):\n return False\n\n\nclass CertifyingOrganisation(SlugifyingMixin, models.Model):\n \"\"\"Certifying organisation model.\"\"\"\n\n name = models.CharField(\n help_text=_('name of organisation or institution'),\n max_length=200,\n null=False,\n blank=False\n )\n\n organisation_email = models.CharField(\n help_text=_('Email address organisation or institution.'),\n max_length=200,\n null=False,\n blank=False,\n validators=[validate_email_address],\n )\n\n address = models.TextField(\n help_text=_('Address of Organisation or Institution.'),\n max_length=1000,\n null=False,\n blank=False\n )\n\n logo = models.ImageField(\n help_text=_('Logo for this organisation. '\n 'Most browsers support dragging the image directly on to '\n 'the \"Choose File\" button above.'),\n upload_to=os.path.join(MEDIA_ROOT, 'images/organisations'),\n blank=True\n )\n\n country = CountryField(\n help_text=_('Select the country for this Institution'),\n null=True,\n blank=True)\n\n organisation_phone = models.CharField(\n help_text=_('Phone number: (country code)(number) e.g. +6221551553'),\n max_length=200,\n null=False,\n blank=False\n )\n\n approved = models.BooleanField(\n help_text=_('Approval from project admin'),\n default=False\n )\n\n enabled = models.BooleanField(\n help_text=_('Project enabled'),\n default=True\n )\n\n slug = models.SlugField()\n organisation_owners = models.ManyToManyField(User)\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedCertifyingOrganisationManager()\n unapproved_objects = UnapprovedCertifyingOrganisationManager()\n\n # noinspection PyClassicStyleClass.\n class Meta:\n \"\"\"Meta class for Course attendee.\"\"\"\n\n app_label = 'certification'\n ordering = ['name']\n unique_together = ['name', 'project']\n\n def save(self, *args, **kwargs):\n super(CertifyingOrganisation, self).save(*args, **kwargs)\n\n def __unicode__(self):\n return '%s - %s' % (self.project.name, self.name)\n\n def get_absolute_url(self):\n \"\"\"Return URL to certifying organisation detail page.\n\n :return: URL\n :rtype: str\n \"\"\"\n return reverse('certifying-organisation-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n", "path": "django_project/certification/models/certifying_organisation.py"}]} | 1,980 | 275 |
gh_patches_debug_33017 | rasdani/github-patches | git_diff | translate__pootle-4147 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Some update_tmserver flags and feedback are confusing
I was trying the `update_tmserver` command and I found some confusing and counter-intuitive points:
1. Missing usage
``` bash
$ ./manage.py update_tmserver
CommandError: Specified Translation Memory is not defined in POOTLE_TM_SERVER.
```
At this point the users are lost, as they didn't specify any TM yet it complains about the specified TM not being defined in `POOTLE_TM_SERVER`. At the same time, they have absolutely no clue what `POOTLE_TM_SERVER` is. Since this returns a `CommandError`, users might think they're doing something wrong when typing the command in, however the complain is about settings.
2. `--project` is ambiguous and confusing
```
$ ./manage.py update_tmserver --tm=default foo.po
CommandError: You must specify a project with --project.
```
The naming is confusing, as one can think this has to be a project existing in Pootle, which is not the case. If it's a label or display name, let's rather use something along those lines, as `--project` is ambiguous in this context.
3. Incomplete usage message
```
$ ./manage.py update_tmserver --help
Usage: ./manage.py update_tmserver [options]
...
```
Among other things, nowhere in the CLI is defined that one can pass file paths as arguments to the command.
</issue>
<code>
[start of pootle/apps/pootle_app/management/commands/update_tmserver.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 from hashlib import md5
11 from optparse import make_option
12 import os
13 import sys
14
15 # This must be run before importing Django.
16 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'
17
18 from elasticsearch import helpers, Elasticsearch
19 from translate.storage import factory
20
21 from django.conf import settings
22 from django.core.management.base import BaseCommand, CommandError
23
24 from pootle_store.models import Unit
25
26
27 BULK_CHUNK_SIZE = 5000
28
29
30 class DBParser(object):
31
32 def __init__(self, *args, **kwargs):
33 self.stdout = kwargs.pop('stdout')
34 self.INDEX_NAME = kwargs.pop('index', None)
35
36 def get_units(self, *filenames):
37 """Gets the units to import and its total count."""
38 units_qs = Unit.simple_objects \
39 .exclude(target_f__isnull=True) \
40 .exclude(target_f__exact='') \
41 .filter(revision__gt=self.last_indexed_revision) \
42 .select_related(
43 'submitted_by',
44 'store',
45 'store__translation_project__project',
46 'store__translation_project__language'
47 ).values(
48 'id',
49 'revision',
50 'source_f',
51 'target_f',
52 'submitted_by__username',
53 'submitted_by__full_name',
54 'submitted_by__email',
55 'store__translation_project__project__fullname',
56 'store__pootle_path',
57 'store__translation_project__language__code'
58 ).order_by()
59
60 return units_qs.iterator(), units_qs.count()
61
62 def get_unit_data(self, unit):
63 """Return dict with data to import for a single unit."""
64 fullname = (unit['submitted_by__full_name'] or
65 unit['submitted_by__username'])
66
67 email_md5 = None
68 if unit['submitted_by__email']:
69 email_md5 = md5(unit['submitted_by__email']).hexdigest()
70
71 return {
72 '_index': self.INDEX_NAME,
73 '_type': unit['store__translation_project__language__code'],
74 '_id': unit['id'],
75 'revision': int(unit['revision']),
76 'project': unit['store__translation_project__project__fullname'],
77 'path': unit['store__pootle_path'],
78 'username': unit['submitted_by__username'],
79 'fullname': fullname,
80 'email_md5': email_md5,
81 'source': unit['source_f'],
82 'target': unit['target_f'],
83 }
84
85
86 class FileParser(object):
87
88 def __init__(self, *args, **kwargs):
89 self.stdout = kwargs.pop('stdout')
90 self.INDEX_NAME = kwargs.pop('index', None)
91 self.target_language = kwargs.pop('language', None)
92 self.project = kwargs.pop('project', None)
93
94 def get_units(self, *filenames):
95 """Gets the units to import and its total count."""
96 units = []
97 all_filenames = set()
98
99 for filename in filenames:
100 if not os.path.exists(filename):
101 self.stdout.write("File %s doesn't exist. Skipping it." %
102 filename)
103 continue
104
105 if os.path.isdir(filename):
106 for dirpath, dirs, fnames in os.walk(filename):
107 if (os.path.basename(dirpath) in
108 ["CVS", ".svn", "_darcs", ".git", ".hg", ".bzr"]):
109
110 continue
111
112 for f in fnames:
113 all_filenames.add(os.path.join(dirpath, f))
114 else:
115 all_filenames.add(filename)
116
117 for filename in all_filenames:
118 store = factory.getobject(filename)
119 if not store.gettargetlanguage() and not self.target_language:
120 raise CommandError("Unable to determine target language for "
121 "'%s'. Try again specifying a fallback "
122 "target language with --target-language" %
123 filename)
124
125 self.filename = filename
126 units.extend([unit for unit in store.units if unit.istranslated()])
127
128 return units, len(units)
129
130 def get_unit_data(self, unit):
131 """Return dict with data to import for a single unit."""
132 target_language = unit.gettargetlanguage()
133 if target_language is None:
134 target_language = self.target_language
135
136 return {
137 '_index': self.INDEX_NAME,
138 '_type': target_language,
139 '_id': unit.getid(),
140 'revision': 0,
141 'project': self.project,
142 'path': self.filename,
143 'username': None,
144 'fullname': None,
145 'email_md5': None,
146 'source': unit.source,
147 'target': unit.target,
148 }
149
150
151 class Command(BaseCommand):
152 help = "Load Translation Memory with translations"
153 option_list = BaseCommand.option_list + (
154 make_option('--refresh',
155 action='store_true',
156 dest='refresh',
157 default=False,
158 help='Process all items, not just the new ones, so '
159 'existing translations are refreshed'),
160 make_option('--rebuild',
161 action='store_true',
162 dest='rebuild',
163 default=False,
164 help='Drop the entire TM on start and update everything '
165 'from scratch'),
166 make_option('--dry-run',
167 action='store_true',
168 dest='dry_run',
169 default=False,
170 help='Report the number of translations to index and quit'),
171 # External TM specific options.
172 make_option('--tm',
173 action='store',
174 dest='tm',
175 default='local',
176 help="TM to use. TM must exist on settings. TM will be "
177 "created on the server if it doesn't exist"),
178 make_option('--target-language',
179 action='store',
180 dest='target_language',
181 default='',
182 help="Target language to fallback to use in case it can't "
183 "be guessed for any of the input files."),
184 make_option('--project',
185 action='store',
186 dest='project',
187 default='',
188 help='Project to use when displaying TM matches for this '
189 'translations.'),
190 )
191
192 def _parse_translations(self, *args, **options):
193 units, total = self.parser.get_units(*args)
194
195 if total == 0:
196 self.stdout.write("No translations to index")
197 sys.exit()
198
199 self.stdout.write("%s translations to index" % total)
200
201 if options['dry_run']:
202 sys.exit()
203
204 self.stdout.write("")
205
206 for i, unit in enumerate(units, start=1):
207 if (i % 1000 == 0) or (i == total):
208 percent = "%.1f" % (i * 100.0 / total)
209 self.stdout.write("%s (%s%%)" % (i, percent), ending='\r')
210 self.stdout.flush()
211
212 yield self.parser.get_unit_data(unit)
213
214 if i != total:
215 self.stdout.write("Expected %d, loaded %d." % (total, i))
216
217 def _initialize(self, *args, **options):
218 if not getattr(settings, 'POOTLE_TM_SERVER', False):
219 raise CommandError('POOTLE_TM_SERVER setting is missing.')
220
221 try:
222 self.tm_settings = settings.POOTLE_TM_SERVER[options.get('tm')]
223 except KeyError:
224 raise CommandError('Specified Translation Memory is not defined '
225 'in POOTLE_TM_SERVER.')
226
227 self.INDEX_NAME = self.tm_settings['INDEX_NAME']
228 self.is_local_tm = options.get('tm') == 'local'
229
230 self.es = Elasticsearch([{
231 'host': self.tm_settings['HOST'],
232 'port': self.tm_settings['PORT'],
233 }],
234 retry_on_timeout=True
235 )
236
237 # If files to import have been provided.
238 if len(args):
239 if self.is_local_tm:
240 raise CommandError('You cannot add translations from files to '
241 'a local TM.')
242
243 self.target_language = options.pop('target_language')
244 self.project = options.pop('project')
245
246 if not self.project:
247 raise CommandError('You must specify a project with '
248 '--project.')
249 self.parser = FileParser(stdout=self.stdout, index=self.INDEX_NAME,
250 language=self.target_language,
251 project=self.project)
252 elif not self.is_local_tm:
253 raise CommandError('You cannot add translations from database to '
254 'an external TM.')
255 else:
256 self.parser = DBParser(stdout=self.stdout, index=self.INDEX_NAME)
257
258 def _set_latest_indexed_revision(self, **options):
259 self.last_indexed_revision = -1
260
261 if (not options['rebuild'] and
262 not options['refresh'] and
263 self.es.indices.exists(self.INDEX_NAME)):
264
265 result = self.es.search(
266 index=self.INDEX_NAME,
267 body={
268 'query': {
269 'match_all': {}
270 },
271 'facets': {
272 'stat1': {
273 'statistical': {
274 'field': 'revision'
275 }
276 }
277 }
278 }
279 )
280 self.last_indexed_revision = result['facets']['stat1']['max']
281
282 self.parser.last_indexed_revision = self.last_indexed_revision
283
284 self.stdout.write("Last indexed revision = %s" %
285 self.last_indexed_revision)
286
287 def handle(self, *args, **options):
288 self._initialize(*args, **options)
289
290 if (options['rebuild'] and
291 not options['dry_run'] and
292 self.es.indices.exists(self.INDEX_NAME)):
293
294 self.es.indices.delete(index=self.INDEX_NAME)
295
296 if (not options['dry_run'] and
297 not self.es.indices.exists(self.INDEX_NAME)):
298
299 self.es.indices.create(index=self.INDEX_NAME)
300
301 if self.is_local_tm:
302 self._set_latest_indexed_revision(**options)
303
304 success, _ = helpers.bulk(self.es,
305 self._parse_translations(*args, **options))
306
[end of pootle/apps/pootle_app/management/commands/update_tmserver.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_app/management/commands/update_tmserver.py b/pootle/apps/pootle_app/management/commands/update_tmserver.py
--- a/pootle/apps/pootle_app/management/commands/update_tmserver.py
+++ b/pootle/apps/pootle_app/management/commands/update_tmserver.py
@@ -150,6 +150,7 @@
class Command(BaseCommand):
help = "Load Translation Memory with translations"
+ args = "[files]"
option_list = BaseCommand.option_list + (
make_option('--refresh',
action='store_true',
@@ -181,11 +182,11 @@
default='',
help="Target language to fallback to use in case it can't "
"be guessed for any of the input files."),
- make_option('--project',
+ make_option('--display-name',
action='store',
dest='project',
default='',
- help='Project to use when displaying TM matches for this '
+ help='Name used when displaying TM matches for these '
'translations.'),
)
@@ -221,8 +222,10 @@
try:
self.tm_settings = settings.POOTLE_TM_SERVER[options.get('tm')]
except KeyError:
- raise CommandError('Specified Translation Memory is not defined '
- 'in POOTLE_TM_SERVER.')
+ raise CommandError("Translation Memory '%s' is not defined in the "
+ "POOTLE_TM_SERVER setting. Please ensure it "
+ "exists and double-check you typed it "
+ "correctly." % options.get('tm'))
self.INDEX_NAME = self.tm_settings['INDEX_NAME']
self.is_local_tm = options.get('tm') == 'local'
| {"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/update_tmserver.py b/pootle/apps/pootle_app/management/commands/update_tmserver.py\n--- a/pootle/apps/pootle_app/management/commands/update_tmserver.py\n+++ b/pootle/apps/pootle_app/management/commands/update_tmserver.py\n@@ -150,6 +150,7 @@\n \n class Command(BaseCommand):\n help = \"Load Translation Memory with translations\"\n+ args = \"[files]\"\n option_list = BaseCommand.option_list + (\n make_option('--refresh',\n action='store_true',\n@@ -181,11 +182,11 @@\n default='',\n help=\"Target language to fallback to use in case it can't \"\n \"be guessed for any of the input files.\"),\n- make_option('--project',\n+ make_option('--display-name',\n action='store',\n dest='project',\n default='',\n- help='Project to use when displaying TM matches for this '\n+ help='Name used when displaying TM matches for these '\n 'translations.'),\n )\n \n@@ -221,8 +222,10 @@\n try:\n self.tm_settings = settings.POOTLE_TM_SERVER[options.get('tm')]\n except KeyError:\n- raise CommandError('Specified Translation Memory is not defined '\n- 'in POOTLE_TM_SERVER.')\n+ raise CommandError(\"Translation Memory '%s' is not defined in the \"\n+ \"POOTLE_TM_SERVER setting. Please ensure it \"\n+ \"exists and double-check you typed it \"\n+ \"correctly.\" % options.get('tm'))\n \n self.INDEX_NAME = self.tm_settings['INDEX_NAME']\n self.is_local_tm = options.get('tm') == 'local'\n", "issue": "Some update_tmserver flags and feedback are confusing\nI was trying the `update_tmserver` command and I found some confusing and counter-intuitive points:\n1. Missing usage\n \n ``` bash\n $ ./manage.py update_tmserver\n CommandError: Specified Translation Memory is not defined in POOTLE_TM_SERVER.\n ```\n \n At this point the users are lost, as they didn't specify any TM yet it complains about the specified TM not being defined in `POOTLE_TM_SERVER`. At the same time, they have absolutely no clue what `POOTLE_TM_SERVER` is. Since this returns a `CommandError`, users might think they're doing something wrong when typing the command in, however the complain is about settings.\n2. `--project` is ambiguous and confusing\n \n ```\n $ ./manage.py update_tmserver --tm=default foo.po\n CommandError: You must specify a project with --project.\n ```\n \n The naming is confusing, as one can think this has to be a project existing in Pootle, which is not the case. If it's a label or display name, let's rather use something along those lines, as `--project` is ambiguous in this context.\n3. Incomplete usage message\n \n ```\n $ ./manage.py update_tmserver --help\n Usage: ./manage.py update_tmserver [options]\n ...\n ```\n \n Among other things, nowhere in the CLI is defined that one can pass file paths as arguments to the command.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom hashlib import md5\nfrom optparse import make_option\nimport os\nimport sys\n\n# This must be run before importing Django.\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom elasticsearch import helpers, Elasticsearch\nfrom translate.storage import factory\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom pootle_store.models import Unit\n\n\nBULK_CHUNK_SIZE = 5000\n\n\nclass DBParser(object):\n\n def __init__(self, *args, **kwargs):\n self.stdout = kwargs.pop('stdout')\n self.INDEX_NAME = kwargs.pop('index', None)\n\n def get_units(self, *filenames):\n \"\"\"Gets the units to import and its total count.\"\"\"\n units_qs = Unit.simple_objects \\\n .exclude(target_f__isnull=True) \\\n .exclude(target_f__exact='') \\\n .filter(revision__gt=self.last_indexed_revision) \\\n .select_related(\n 'submitted_by',\n 'store',\n 'store__translation_project__project',\n 'store__translation_project__language'\n ).values(\n 'id',\n 'revision',\n 'source_f',\n 'target_f',\n 'submitted_by__username',\n 'submitted_by__full_name',\n 'submitted_by__email',\n 'store__translation_project__project__fullname',\n 'store__pootle_path',\n 'store__translation_project__language__code'\n ).order_by()\n\n return units_qs.iterator(), units_qs.count()\n\n def get_unit_data(self, unit):\n \"\"\"Return dict with data to import for a single unit.\"\"\"\n fullname = (unit['submitted_by__full_name'] or\n unit['submitted_by__username'])\n\n email_md5 = None\n if unit['submitted_by__email']:\n email_md5 = md5(unit['submitted_by__email']).hexdigest()\n\n return {\n '_index': self.INDEX_NAME,\n '_type': unit['store__translation_project__language__code'],\n '_id': unit['id'],\n 'revision': int(unit['revision']),\n 'project': unit['store__translation_project__project__fullname'],\n 'path': unit['store__pootle_path'],\n 'username': unit['submitted_by__username'],\n 'fullname': fullname,\n 'email_md5': email_md5,\n 'source': unit['source_f'],\n 'target': unit['target_f'],\n }\n\n\nclass FileParser(object):\n\n def __init__(self, *args, **kwargs):\n self.stdout = kwargs.pop('stdout')\n self.INDEX_NAME = kwargs.pop('index', None)\n self.target_language = kwargs.pop('language', None)\n self.project = kwargs.pop('project', None)\n\n def get_units(self, *filenames):\n \"\"\"Gets the units to import and its total count.\"\"\"\n units = []\n all_filenames = set()\n\n for filename in filenames:\n if not os.path.exists(filename):\n self.stdout.write(\"File %s doesn't exist. Skipping it.\" %\n filename)\n continue\n\n if os.path.isdir(filename):\n for dirpath, dirs, fnames in os.walk(filename):\n if (os.path.basename(dirpath) in\n [\"CVS\", \".svn\", \"_darcs\", \".git\", \".hg\", \".bzr\"]):\n\n continue\n\n for f in fnames:\n all_filenames.add(os.path.join(dirpath, f))\n else:\n all_filenames.add(filename)\n\n for filename in all_filenames:\n store = factory.getobject(filename)\n if not store.gettargetlanguage() and not self.target_language:\n raise CommandError(\"Unable to determine target language for \"\n \"'%s'. Try again specifying a fallback \"\n \"target language with --target-language\" %\n filename)\n\n self.filename = filename\n units.extend([unit for unit in store.units if unit.istranslated()])\n\n return units, len(units)\n\n def get_unit_data(self, unit):\n \"\"\"Return dict with data to import for a single unit.\"\"\"\n target_language = unit.gettargetlanguage()\n if target_language is None:\n target_language = self.target_language\n\n return {\n '_index': self.INDEX_NAME,\n '_type': target_language,\n '_id': unit.getid(),\n 'revision': 0,\n 'project': self.project,\n 'path': self.filename,\n 'username': None,\n 'fullname': None,\n 'email_md5': None,\n 'source': unit.source,\n 'target': unit.target,\n }\n\n\nclass Command(BaseCommand):\n help = \"Load Translation Memory with translations\"\n option_list = BaseCommand.option_list + (\n make_option('--refresh',\n action='store_true',\n dest='refresh',\n default=False,\n help='Process all items, not just the new ones, so '\n 'existing translations are refreshed'),\n make_option('--rebuild',\n action='store_true',\n dest='rebuild',\n default=False,\n help='Drop the entire TM on start and update everything '\n 'from scratch'),\n make_option('--dry-run',\n action='store_true',\n dest='dry_run',\n default=False,\n help='Report the number of translations to index and quit'),\n # External TM specific options.\n make_option('--tm',\n action='store',\n dest='tm',\n default='local',\n help=\"TM to use. TM must exist on settings. TM will be \"\n \"created on the server if it doesn't exist\"),\n make_option('--target-language',\n action='store',\n dest='target_language',\n default='',\n help=\"Target language to fallback to use in case it can't \"\n \"be guessed for any of the input files.\"),\n make_option('--project',\n action='store',\n dest='project',\n default='',\n help='Project to use when displaying TM matches for this '\n 'translations.'),\n )\n\n def _parse_translations(self, *args, **options):\n units, total = self.parser.get_units(*args)\n\n if total == 0:\n self.stdout.write(\"No translations to index\")\n sys.exit()\n\n self.stdout.write(\"%s translations to index\" % total)\n\n if options['dry_run']:\n sys.exit()\n\n self.stdout.write(\"\")\n\n for i, unit in enumerate(units, start=1):\n if (i % 1000 == 0) or (i == total):\n percent = \"%.1f\" % (i * 100.0 / total)\n self.stdout.write(\"%s (%s%%)\" % (i, percent), ending='\\r')\n self.stdout.flush()\n\n yield self.parser.get_unit_data(unit)\n\n if i != total:\n self.stdout.write(\"Expected %d, loaded %d.\" % (total, i))\n\n def _initialize(self, *args, **options):\n if not getattr(settings, 'POOTLE_TM_SERVER', False):\n raise CommandError('POOTLE_TM_SERVER setting is missing.')\n\n try:\n self.tm_settings = settings.POOTLE_TM_SERVER[options.get('tm')]\n except KeyError:\n raise CommandError('Specified Translation Memory is not defined '\n 'in POOTLE_TM_SERVER.')\n\n self.INDEX_NAME = self.tm_settings['INDEX_NAME']\n self.is_local_tm = options.get('tm') == 'local'\n\n self.es = Elasticsearch([{\n 'host': self.tm_settings['HOST'],\n 'port': self.tm_settings['PORT'],\n }],\n retry_on_timeout=True\n )\n\n # If files to import have been provided.\n if len(args):\n if self.is_local_tm:\n raise CommandError('You cannot add translations from files to '\n 'a local TM.')\n\n self.target_language = options.pop('target_language')\n self.project = options.pop('project')\n\n if not self.project:\n raise CommandError('You must specify a project with '\n '--project.')\n self.parser = FileParser(stdout=self.stdout, index=self.INDEX_NAME,\n language=self.target_language,\n project=self.project)\n elif not self.is_local_tm:\n raise CommandError('You cannot add translations from database to '\n 'an external TM.')\n else:\n self.parser = DBParser(stdout=self.stdout, index=self.INDEX_NAME)\n\n def _set_latest_indexed_revision(self, **options):\n self.last_indexed_revision = -1\n\n if (not options['rebuild'] and\n not options['refresh'] and\n self.es.indices.exists(self.INDEX_NAME)):\n\n result = self.es.search(\n index=self.INDEX_NAME,\n body={\n 'query': {\n 'match_all': {}\n },\n 'facets': {\n 'stat1': {\n 'statistical': {\n 'field': 'revision'\n }\n }\n }\n }\n )\n self.last_indexed_revision = result['facets']['stat1']['max']\n\n self.parser.last_indexed_revision = self.last_indexed_revision\n\n self.stdout.write(\"Last indexed revision = %s\" %\n self.last_indexed_revision)\n\n def handle(self, *args, **options):\n self._initialize(*args, **options)\n\n if (options['rebuild'] and\n not options['dry_run'] and\n self.es.indices.exists(self.INDEX_NAME)):\n\n self.es.indices.delete(index=self.INDEX_NAME)\n\n if (not options['dry_run'] and\n not self.es.indices.exists(self.INDEX_NAME)):\n\n self.es.indices.create(index=self.INDEX_NAME)\n\n if self.is_local_tm:\n self._set_latest_indexed_revision(**options)\n\n success, _ = helpers.bulk(self.es,\n self._parse_translations(*args, **options))\n", "path": "pootle/apps/pootle_app/management/commands/update_tmserver.py"}]} | 3,856 | 388 |
gh_patches_debug_14062 | rasdani/github-patches | git_diff | OCA__manufacture-130 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
is:issue is:open [8.0][mrp_production_real_cost] Error when produce product
Hi,
there's new error from mrp_production_real_cost, after I do git pull from the last commit
```
ERROR demo1 openerp.sql_db: Programming error: can't adapt type 'mrp.production', in query SELECT "mrp_production"."id" FROM "mrp_production"
WHERE "mrp_production".id IN %s ORDER BY "mrp_production"."priority" DESC,"mrp_production"."date_planned" ASC
File "/opt/odoo/server/addons/mrp_production_real_cost/models/mrp_production.py", line 34, in action_production_end
self.mapped('move_created_ids2').filtered(
File "/usr/lib/python2.7/dist-packages/psycopg2/extensions.py", line 129, in getquoted
pobjs = [adapt(o) for o in self._seq]
ValueError: "can't adapt type 'mrp.production'" while evaluating
u'action_production_end()'
```
regards
</issue>
<code>
[start of mrp_production_real_cost/models/mrp_production.py]
1 # -*- coding: utf-8 -*-
2 # © 2014-2015 Avanzosc
3 # © 2014-2015 Pedro M. Baeza
4 # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
5
6 from openerp import api, fields, models
7
8
9 class MrpProduction(models.Model):
10 _inherit = 'mrp.production'
11
12 @api.multi
13 @api.depends('analytic_line_ids', 'analytic_line_ids.amount',
14 'product_qty')
15 def _compute_real_cost(self):
16 for production in self:
17 cost_lines = production.analytic_line_ids.filtered(
18 lambda l: l.amount < 0)
19 production.real_cost = -sum(cost_lines.mapped('amount'))
20 production.unit_real_cost = (
21 production.real_cost / production.product_qty)
22
23 analytic_line_ids = fields.One2many(
24 comodel_name="account.analytic.line", inverse_name="mrp_production_id",
25 string="Cost Lines")
26 real_cost = fields.Float(
27 "Total Real Cost", compute="_compute_real_cost", store=True)
28 unit_real_cost = fields.Float(
29 "Unit Real Cost", compute="_compute_real_cost", store=True)
30
31 @api.multi
32 def action_production_end(self):
33 res = super(MrpProduction, self).action_production_end()
34 self.mapped('move_created_ids2').filtered(
35 lambda l: l.state == 'done').product_price_update_production_done()
36 return res
37
38 @api.model
39 def _prepare_real_cost_analytic_line(
40 self, journal, name, production, product, general_account=None,
41 workorder=None, qty=1, amount=0):
42 """
43 Prepare the vals for creating an analytic entry for real cost
44 :param journal: Journal of the entry
45 :param name: Name of the entry
46 :param production: Origin product
47 :param product: Product for the entry
48 :param general_account: General account for the entry
49 :param workorder: Origin workorder
50 :param qty: Quantity for the entry. This quantity will multiply both
51 standard and average costs for the entry costs.
52 :param amount: Cost for calculating real cost.
53 :return: Dictionary with the analytic entry vals.
54 """
55 analytic_line_obj = self.env['account.analytic.line']
56 property_obj = self.env['ir.property']
57 general_account = (
58 general_account or product.property_account_expense or
59 product.categ_id.property_account_expense_categ or
60 property_obj.get('property_account_expense_categ',
61 'product.category'))
62 return {
63 'name': name,
64 'mrp_production_id': production.id,
65 'workorder': workorder and workorder.id or False,
66 'account_id': self.analytic_account_id.id,
67 'journal_id': journal.id,
68 'user_id': self.env.uid,
69 'date': analytic_line_obj._get_default_date(),
70 'product_id': product and product.id or False,
71 'unit_amount': qty,
72 'amount': amount,
73 'product_uom_id': product.uom_id.id,
74 'general_account_id': general_account.id,
75 }
76
77 @api.multi
78 def _costs_generate(self):
79 """
80 As we are generating the account_analytic_lines for MO in the
81 current module, we override this method in order to avoid
82 duplicates created in the parent class. Any other module
83 inheriting this method should take this into account!
84 """
85 return
86
[end of mrp_production_real_cost/models/mrp_production.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mrp_production_real_cost/models/mrp_production.py b/mrp_production_real_cost/models/mrp_production.py
--- a/mrp_production_real_cost/models/mrp_production.py
+++ b/mrp_production_real_cost/models/mrp_production.py
@@ -31,8 +31,15 @@
@api.multi
def action_production_end(self):
res = super(MrpProduction, self).action_production_end()
- self.mapped('move_created_ids2').filtered(
- lambda l: l.state == 'done').product_price_update_production_done()
+ for production in self:
+ # This is needed because commit
+ # https://github.com/odoo/odoo/commit/
+ # 6f29bfc181d23d70d29776d96b4318e9ee2c93a9
+ # introduces a weird behavior on the next call, provoking an error.
+ production.sudo().refresh()
+ production.mapped('move_created_ids2').filtered(
+ lambda l: l.state == 'done'
+ ).product_price_update_production_done()
return res
@api.model
| {"golden_diff": "diff --git a/mrp_production_real_cost/models/mrp_production.py b/mrp_production_real_cost/models/mrp_production.py\n--- a/mrp_production_real_cost/models/mrp_production.py\n+++ b/mrp_production_real_cost/models/mrp_production.py\n@@ -31,8 +31,15 @@\n @api.multi\n def action_production_end(self):\n res = super(MrpProduction, self).action_production_end()\n- self.mapped('move_created_ids2').filtered(\n- lambda l: l.state == 'done').product_price_update_production_done()\n+ for production in self:\n+ # This is needed because commit\n+ # https://github.com/odoo/odoo/commit/\n+ # 6f29bfc181d23d70d29776d96b4318e9ee2c93a9\n+ # introduces a weird behavior on the next call, provoking an error.\n+ production.sudo().refresh()\n+ production.mapped('move_created_ids2').filtered(\n+ lambda l: l.state == 'done'\n+ ).product_price_update_production_done()\n return res\n \n @api.model\n", "issue": "is:issue is:open [8.0][mrp_production_real_cost] Error when produce product\nHi,\n\nthere's new error from mrp_production_real_cost, after I do git pull from the last commit \n\n```\n\nERROR demo1 openerp.sql_db: Programming error: can't adapt type 'mrp.production', in query SELECT \"mrp_production\".\"id\" FROM \"mrp_production\"\n WHERE \"mrp_production\".id IN %s ORDER BY \"mrp_production\".\"priority\" DESC,\"mrp_production\".\"date_planned\" ASC \n\n File \"/opt/odoo/server/addons/mrp_production_real_cost/models/mrp_production.py\", line 34, in action_production_end\n self.mapped('move_created_ids2').filtered(\n\n\n File \"/usr/lib/python2.7/dist-packages/psycopg2/extensions.py\", line 129, in getquoted\n pobjs = [adapt(o) for o in self._seq]\nValueError: \"can't adapt type 'mrp.production'\" while evaluating\nu'action_production_end()'\n```\n\nregards\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2014-2015 Avanzosc\n# \u00a9 2014-2015 Pedro M. Baeza\n# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html\n\nfrom openerp import api, fields, models\n\n\nclass MrpProduction(models.Model):\n _inherit = 'mrp.production'\n\n @api.multi\n @api.depends('analytic_line_ids', 'analytic_line_ids.amount',\n 'product_qty')\n def _compute_real_cost(self):\n for production in self:\n cost_lines = production.analytic_line_ids.filtered(\n lambda l: l.amount < 0)\n production.real_cost = -sum(cost_lines.mapped('amount'))\n production.unit_real_cost = (\n production.real_cost / production.product_qty)\n\n analytic_line_ids = fields.One2many(\n comodel_name=\"account.analytic.line\", inverse_name=\"mrp_production_id\",\n string=\"Cost Lines\")\n real_cost = fields.Float(\n \"Total Real Cost\", compute=\"_compute_real_cost\", store=True)\n unit_real_cost = fields.Float(\n \"Unit Real Cost\", compute=\"_compute_real_cost\", store=True)\n\n @api.multi\n def action_production_end(self):\n res = super(MrpProduction, self).action_production_end()\n self.mapped('move_created_ids2').filtered(\n lambda l: l.state == 'done').product_price_update_production_done()\n return res\n\n @api.model\n def _prepare_real_cost_analytic_line(\n self, journal, name, production, product, general_account=None,\n workorder=None, qty=1, amount=0):\n \"\"\"\n Prepare the vals for creating an analytic entry for real cost\n :param journal: Journal of the entry\n :param name: Name of the entry\n :param production: Origin product\n :param product: Product for the entry\n :param general_account: General account for the entry\n :param workorder: Origin workorder\n :param qty: Quantity for the entry. This quantity will multiply both\n standard and average costs for the entry costs.\n :param amount: Cost for calculating real cost.\n :return: Dictionary with the analytic entry vals.\n \"\"\"\n analytic_line_obj = self.env['account.analytic.line']\n property_obj = self.env['ir.property']\n general_account = (\n general_account or product.property_account_expense or\n product.categ_id.property_account_expense_categ or\n property_obj.get('property_account_expense_categ',\n 'product.category'))\n return {\n 'name': name,\n 'mrp_production_id': production.id,\n 'workorder': workorder and workorder.id or False,\n 'account_id': self.analytic_account_id.id,\n 'journal_id': journal.id,\n 'user_id': self.env.uid,\n 'date': analytic_line_obj._get_default_date(),\n 'product_id': product and product.id or False,\n 'unit_amount': qty,\n 'amount': amount,\n 'product_uom_id': product.uom_id.id,\n 'general_account_id': general_account.id,\n }\n\n @api.multi\n def _costs_generate(self):\n \"\"\"\n As we are generating the account_analytic_lines for MO in the\n current module, we override this method in order to avoid\n duplicates created in the parent class. Any other module\n inheriting this method should take this into account!\n \"\"\"\n return\n", "path": "mrp_production_real_cost/models/mrp_production.py"}]} | 1,707 | 263 |
gh_patches_debug_22165 | rasdani/github-patches | git_diff | pydantic__pydantic-6431 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError in class attribute access for validate_call
### Initial Checks
- [X] I confirm that I'm using Pydantic V2 installed directly from the `main` branch, or equivalent
### Description
https://github.com/pydantic/pydantic/pull/6406 introduced an AttributeError when accessing a method on a class (not instance):
```
draft @ pip-run 'git+https://github.com/pydantic/pydantic' -- pyd2.py
Traceback (most recent call last):
File "/Users/jaraco/draft/pyd2.py", line 14, in <module>
assert Thing.c == Thing.c
^^^^^^^
File "/var/folders/sx/n5gkrgfx6zd91ymxr2sr9wvw00n8zm/T/pip-run-o94_k6_k/pydantic/_internal/_validate_call.py", line 101, in __get__
setattr(obj, self._name, result)
AttributeError: 'NoneType' object has no attribute 'c'
```
### Example Code
```Python
from pydantic import validate_call
class Thing:
def a(self):
pass
c = validate_call(a)
thing = Thing()
assert thing.a == thing.a
assert thing.c == thing.c, f'{thing.c} != {thing.c}'
assert Thing.c == Thing.c
```
### Python, Pydantic & OS Version
```Text
draft @ pip-run 'git+https://github.com/pydantic/pydantic' -- -c 'import pydantic.version; print(pydantic.version.version_info())'
pydantic version: 2.0
pydantic-core version: 2.0.2 release build profile
install path: /private/var/folders/sx/n5gkrgfx6zd91ymxr2sr9wvw00n8zm/T/pip-run-hek15lsq/pydantic
python version: 3.11.4 (main, Jun 15 2023, 07:55:38) [Clang 14.0.3 (clang-1403.0.22.14.1)]
platform: macOS-13.4.1-arm64-arm-64bit
optional deps. installed: ['typing-extensions']
```
Selected Assignee: @Kludex
</issue>
<code>
[start of pydantic/_internal/_validate_call.py]
1 from __future__ import annotations as _annotations
2
3 import inspect
4 from dataclasses import dataclass
5 from functools import partial
6 from typing import Any, Callable
7
8 import pydantic_core
9
10 from ..config import ConfigDict
11 from . import _discriminated_union, _generate_schema, _typing_extra
12 from ._config import ConfigWrapper
13 from ._core_utils import flatten_schema_defs, inline_schema_defs
14
15
16 @dataclass
17 class CallMarker:
18 function: Callable[..., Any]
19 validate_return: bool
20
21
22 class ValidateCallWrapper:
23 """This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.
24
25 It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so
26 these functions can be applied to instance methods, class methods, static methods, as well as normal functions.
27 """
28
29 __slots__ = (
30 'raw_function',
31 '_config',
32 '_validate_return',
33 '__pydantic_core_schema__',
34 '__pydantic_validator__',
35 '__signature__',
36 '__name__',
37 '__qualname__',
38 '__annotations__',
39 '__dict__', # required for __module__
40 )
41
42 def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):
43 self.raw_function = function
44 self._config = config
45 self._validate_return = validate_return
46 self.__signature__ = inspect.signature(function)
47 if isinstance(function, partial):
48 func = function.func
49 self.__name__ = f'partial({func.__name__})'
50 self.__qualname__ = f'partial({func.__qualname__})'
51 self.__annotations__ = func.__annotations__
52 self.__module__ = func.__module__
53 self.__doc__ = func.__doc__
54 else:
55 self.__name__ = function.__name__
56 self.__qualname__ = function.__qualname__
57 self.__annotations__ = function.__annotations__
58 self.__module__ = function.__module__
59 self.__doc__ = function.__doc__
60
61 namespace = _typing_extra.add_module_globals(function, None)
62 config_wrapper = ConfigWrapper(config)
63 gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)
64 self.__pydantic_core_schema__ = schema = gen_schema.collect_definitions(gen_schema.generate_schema(function))
65 core_config = config_wrapper.core_config(self)
66 schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))
67 simplified_schema = inline_schema_defs(schema)
68 self.__pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)
69
70 if self._validate_return:
71 return_type = (
72 self.__signature__.return_annotation
73 if self.__signature__.return_annotation is not self.__signature__.empty
74 else Any
75 )
76 gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)
77 self.__return_pydantic_core_schema__ = schema = gen_schema.collect_definitions(
78 gen_schema.generate_schema(return_type)
79 )
80 core_config = config_wrapper.core_config(self)
81 schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))
82 simplified_schema = inline_schema_defs(schema)
83 self.__return_pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)
84 else:
85 self.__return_pydantic_core_schema__ = None
86 self.__return_pydantic_validator__ = None
87
88 self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods
89
90 def __call__(self, *args: Any, **kwargs: Any) -> Any:
91 res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))
92 if self.__return_pydantic_validator__:
93 return self.__return_pydantic_validator__.validate_python(res)
94 return res
95
96 def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:
97 """Bind the raw function and return another ValidateCallWrapper wrapping that."""
98 bound_function = self.raw_function.__get__(obj, objtype)
99 result = self.__class__(bound_function, self._config, self._validate_return)
100 if self._name is not None:
101 setattr(obj, self._name, result)
102 return result
103
104 def __set_name__(self, owner: Any, name: str) -> None:
105 self._name = name
106
107 def __repr__(self) -> str:
108 return f'ValidateCallWrapper({self.raw_function})'
109
[end of pydantic/_internal/_validate_call.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pydantic/_internal/_validate_call.py b/pydantic/_internal/_validate_call.py
--- a/pydantic/_internal/_validate_call.py
+++ b/pydantic/_internal/_validate_call.py
@@ -95,10 +95,21 @@
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:
"""Bind the raw function and return another ValidateCallWrapper wrapping that."""
+ if obj is None:
+ try:
+ # Handle the case where a method is accessed as a class attribute
+ return objtype.__getattribute__(objtype, self._name) # type: ignore
+ except AttributeError:
+ # This will happen the first time the attribute is accessed
+ pass
+
bound_function = self.raw_function.__get__(obj, objtype)
result = self.__class__(bound_function, self._config, self._validate_return)
if self._name is not None:
- setattr(obj, self._name, result)
+ if obj is not None:
+ setattr(obj, self._name, result)
+ else:
+ setattr(objtype, self._name, result)
return result
def __set_name__(self, owner: Any, name: str) -> None:
| {"golden_diff": "diff --git a/pydantic/_internal/_validate_call.py b/pydantic/_internal/_validate_call.py\n--- a/pydantic/_internal/_validate_call.py\n+++ b/pydantic/_internal/_validate_call.py\n@@ -95,10 +95,21 @@\n \n def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:\n \"\"\"Bind the raw function and return another ValidateCallWrapper wrapping that.\"\"\"\n+ if obj is None:\n+ try:\n+ # Handle the case where a method is accessed as a class attribute\n+ return objtype.__getattribute__(objtype, self._name) # type: ignore\n+ except AttributeError:\n+ # This will happen the first time the attribute is accessed\n+ pass\n+\n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n if self._name is not None:\n- setattr(obj, self._name, result)\n+ if obj is not None:\n+ setattr(obj, self._name, result)\n+ else:\n+ setattr(objtype, self._name, result)\n return result\n \n def __set_name__(self, owner: Any, name: str) -> None:\n", "issue": "AttributeError in class attribute access for validate_call\n### Initial Checks\n\n- [X] I confirm that I'm using Pydantic V2 installed directly from the `main` branch, or equivalent\n\n### Description\n\nhttps://github.com/pydantic/pydantic/pull/6406 introduced an AttributeError when accessing a method on a class (not instance):\r\n\r\n```\r\n draft @ pip-run 'git+https://github.com/pydantic/pydantic' -- pyd2.py\r\nTraceback (most recent call last):\r\n File \"/Users/jaraco/draft/pyd2.py\", line 14, in <module>\r\n assert Thing.c == Thing.c\r\n ^^^^^^^\r\n File \"/var/folders/sx/n5gkrgfx6zd91ymxr2sr9wvw00n8zm/T/pip-run-o94_k6_k/pydantic/_internal/_validate_call.py\", line 101, in __get__\r\n setattr(obj, self._name, result)\r\nAttributeError: 'NoneType' object has no attribute 'c'\r\n```\n\n### Example Code\n\n```Python\nfrom pydantic import validate_call\r\n\r\n\r\nclass Thing:\r\n def a(self):\r\n pass\r\n\r\n c = validate_call(a)\r\n\r\n\r\nthing = Thing()\r\nassert thing.a == thing.a\r\nassert thing.c == thing.c, f'{thing.c} != {thing.c}'\r\nassert Thing.c == Thing.c\n```\n\n\n### Python, Pydantic & OS Version\n\n```Text\ndraft @ pip-run 'git+https://github.com/pydantic/pydantic' -- -c 'import pydantic.version; print(pydantic.version.version_info())'\r\n pydantic version: 2.0\r\n pydantic-core version: 2.0.2 release build profile\r\n install path: /private/var/folders/sx/n5gkrgfx6zd91ymxr2sr9wvw00n8zm/T/pip-run-hek15lsq/pydantic\r\n python version: 3.11.4 (main, Jun 15 2023, 07:55:38) [Clang 14.0.3 (clang-1403.0.22.14.1)]\r\n platform: macOS-13.4.1-arm64-arm-64bit\r\n optional deps. installed: ['typing-extensions']\n```\n\n\nSelected Assignee: @Kludex\n", "before_files": [{"content": "from __future__ import annotations as _annotations\n\nimport inspect\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Callable\n\nimport pydantic_core\n\nfrom ..config import ConfigDict\nfrom . import _discriminated_union, _generate_schema, _typing_extra\nfrom ._config import ConfigWrapper\nfrom ._core_utils import flatten_schema_defs, inline_schema_defs\n\n\n@dataclass\nclass CallMarker:\n function: Callable[..., Any]\n validate_return: bool\n\n\nclass ValidateCallWrapper:\n \"\"\"This is a wrapper around a function that validates the arguments passed to it, and optionally the return value.\n\n It's partially inspired by `wraps` which in turn uses `partial`, but extended to be a descriptor so\n these functions can be applied to instance methods, class methods, static methods, as well as normal functions.\n \"\"\"\n\n __slots__ = (\n 'raw_function',\n '_config',\n '_validate_return',\n '__pydantic_core_schema__',\n '__pydantic_validator__',\n '__signature__',\n '__name__',\n '__qualname__',\n '__annotations__',\n '__dict__', # required for __module__\n )\n\n def __init__(self, function: Callable[..., Any], config: ConfigDict | None, validate_return: bool):\n self.raw_function = function\n self._config = config\n self._validate_return = validate_return\n self.__signature__ = inspect.signature(function)\n if isinstance(function, partial):\n func = function.func\n self.__name__ = f'partial({func.__name__})'\n self.__qualname__ = f'partial({func.__qualname__})'\n self.__annotations__ = func.__annotations__\n self.__module__ = func.__module__\n self.__doc__ = func.__doc__\n else:\n self.__name__ = function.__name__\n self.__qualname__ = function.__qualname__\n self.__annotations__ = function.__annotations__\n self.__module__ = function.__module__\n self.__doc__ = function.__doc__\n\n namespace = _typing_extra.add_module_globals(function, None)\n config_wrapper = ConfigWrapper(config)\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n self.__pydantic_core_schema__ = schema = gen_schema.collect_definitions(gen_schema.generate_schema(function))\n core_config = config_wrapper.core_config(self)\n schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))\n simplified_schema = inline_schema_defs(schema)\n self.__pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)\n\n if self._validate_return:\n return_type = (\n self.__signature__.return_annotation\n if self.__signature__.return_annotation is not self.__signature__.empty\n else Any\n )\n gen_schema = _generate_schema.GenerateSchema(config_wrapper, namespace)\n self.__return_pydantic_core_schema__ = schema = gen_schema.collect_definitions(\n gen_schema.generate_schema(return_type)\n )\n core_config = config_wrapper.core_config(self)\n schema = _discriminated_union.apply_discriminators(flatten_schema_defs(schema))\n simplified_schema = inline_schema_defs(schema)\n self.__return_pydantic_validator__ = pydantic_core.SchemaValidator(simplified_schema, core_config)\n else:\n self.__return_pydantic_core_schema__ = None\n self.__return_pydantic_validator__ = None\n\n self._name: str | None = None # set by __get__, used to set the instance attribute when decorating methods\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n res = self.__pydantic_validator__.validate_python(pydantic_core.ArgsKwargs(args, kwargs))\n if self.__return_pydantic_validator__:\n return self.__return_pydantic_validator__.validate_python(res)\n return res\n\n def __get__(self, obj: Any, objtype: type[Any] | None = None) -> ValidateCallWrapper:\n \"\"\"Bind the raw function and return another ValidateCallWrapper wrapping that.\"\"\"\n bound_function = self.raw_function.__get__(obj, objtype)\n result = self.__class__(bound_function, self._config, self._validate_return)\n if self._name is not None:\n setattr(obj, self._name, result)\n return result\n\n def __set_name__(self, owner: Any, name: str) -> None:\n self._name = name\n\n def __repr__(self) -> str:\n return f'ValidateCallWrapper({self.raw_function})'\n", "path": "pydantic/_internal/_validate_call.py"}]} | 2,289 | 290 |
gh_patches_debug_13972 | rasdani/github-patches | git_diff | jazzband__pip-tools-723 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip-sync broken on Windows
Calling pip-sync on Windows returns:
```
Could not open requirements file: [Errno 13] Permission denied: 'c:\\users\\<username>\\appdata\\local\\temp\\tmpe9jswo'
```
This was introduced in 3.3.1 by the change to using `tempfile.NamedTemporaryFile` in [sync.py](https://github.com/jazzband/pip-tools/blob/3.3.1/piptools/sync.py#L167) to create the requirements file before passing it to `pip install` via `subprocess.check_call`. This is caused by Windows requiring reopened temporary files to be opened with the `O_TEMPORARY` flag. For context see https://stackoverflow.com/a/15235559.
##### Environment Versions
1. OS Type: Windows 10
1. Python version: Python 2.7.12
1. pip version: pip 9.0.3
1. pip-tools version: pip-compile, version 3.3.1
##### Steps to replicate
1. `pip-sync -r <requirements_file>`
##### Expected result
Command should complete successfully.
##### Actual result
```
Could not open requirements file: [Errno 13] Permission denied: 'c:\\users\\<username>\\appdata\\local\\temp\\tmpe9jswo'
```
</issue>
<code>
[start of piptools/sync.py]
1 import collections
2 import os
3 import sys
4 import tempfile
5 from subprocess import check_call
6
7 from piptools._compat import stdlib_pkgs, DEV_PKGS
8 from . import click
9 from .exceptions import IncompatibleRequirements, UnsupportedConstraint
10 from .utils import flat_map, format_requirement, key_from_ireq, key_from_req, get_hashes_from_ireq
11
12 PACKAGES_TO_IGNORE = [
13 '-markerlib',
14 'pip',
15 'pip-tools',
16 'pip-review',
17 'pkg-resources',
18 ] + list(stdlib_pkgs) + list(DEV_PKGS)
19
20
21 def dependency_tree(installed_keys, root_key):
22 """
23 Calculate the dependency tree for the package `root_key` and return
24 a collection of all its dependencies. Uses a DFS traversal algorithm.
25
26 `installed_keys` should be a {key: requirement} mapping, e.g.
27 {'django': from_line('django==1.8')}
28 `root_key` should be the key to return the dependency tree for.
29 """
30 dependencies = set()
31 queue = collections.deque()
32
33 if root_key in installed_keys:
34 dep = installed_keys[root_key]
35 queue.append(dep)
36
37 while queue:
38 v = queue.popleft()
39 key = key_from_req(v)
40 if key in dependencies:
41 continue
42
43 dependencies.add(key)
44
45 for dep_specifier in v.requires():
46 dep_name = key_from_req(dep_specifier)
47 if dep_name in installed_keys:
48 dep = installed_keys[dep_name]
49
50 if dep_specifier.specifier.contains(dep.version):
51 queue.append(dep)
52
53 return dependencies
54
55
56 def get_dists_to_ignore(installed):
57 """
58 Returns a collection of package names to ignore when performing pip-sync,
59 based on the currently installed environment. For example, when pip-tools
60 is installed in the local environment, it should be ignored, including all
61 of its dependencies (e.g. click). When pip-tools is not installed
62 locally, click should also be installed/uninstalled depending on the given
63 requirements.
64 """
65 installed_keys = {key_from_req(r): r for r in installed}
66 return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE))
67
68
69 def merge(requirements, ignore_conflicts):
70 by_key = {}
71
72 for ireq in requirements:
73 if ireq.link is not None and not ireq.editable:
74 msg = ('pip-compile does not support URLs as packages, unless they are editable. '
75 'Perhaps add -e option?')
76 raise UnsupportedConstraint(msg, ireq)
77
78 key = ireq.link or key_from_req(ireq.req)
79
80 if not ignore_conflicts:
81 existing_ireq = by_key.get(key)
82 if existing_ireq:
83 # NOTE: We check equality here since we can assume that the
84 # requirements are all pinned
85 if ireq.specifier != existing_ireq.specifier:
86 raise IncompatibleRequirements(ireq, existing_ireq)
87
88 # TODO: Always pick the largest specifier in case of a conflict
89 by_key[key] = ireq
90
91 return by_key.values()
92
93
94 def diff(compiled_requirements, installed_dists):
95 """
96 Calculate which packages should be installed or uninstalled, given a set
97 of compiled requirements and a list of currently installed modules.
98 """
99 requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}
100
101 satisfied = set() # holds keys
102 to_install = set() # holds InstallRequirement objects
103 to_uninstall = set() # holds keys
104
105 pkgs_to_ignore = get_dists_to_ignore(installed_dists)
106 for dist in installed_dists:
107 key = key_from_req(dist)
108 if key not in requirements_lut or not requirements_lut[key].match_markers():
109 to_uninstall.add(key)
110 elif requirements_lut[key].specifier.contains(dist.version):
111 satisfied.add(key)
112
113 for key, requirement in requirements_lut.items():
114 if key not in satisfied and requirement.match_markers():
115 to_install.add(requirement)
116
117 # Make sure to not uninstall any packages that should be ignored
118 to_uninstall -= set(pkgs_to_ignore)
119
120 return (to_install, to_uninstall)
121
122
123 def sync(to_install, to_uninstall, verbose=False, dry_run=False, pip_flags=None, install_flags=None):
124 """
125 Install and uninstalls the given sets of modules.
126 """
127 if not to_uninstall and not to_install:
128 click.echo("Everything up-to-date")
129
130 if pip_flags is None:
131 pip_flags = []
132
133 if not verbose:
134 pip_flags += ['-q']
135
136 if os.environ.get('VIRTUAL_ENV'):
137 # find pip via PATH
138 pip = 'pip'
139 else:
140 # find pip in same directory as pip-sync entry-point script
141 pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')
142
143 if to_uninstall:
144 if dry_run:
145 click.echo("Would uninstall:")
146 for pkg in to_uninstall:
147 click.echo(" {}".format(pkg))
148 else:
149 check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))
150
151 if to_install:
152 if install_flags is None:
153 install_flags = []
154 if dry_run:
155 click.echo("Would install:")
156 for ireq in to_install:
157 click.echo(" {}".format(format_requirement(ireq)))
158 else:
159 # prepare requirement lines
160 req_lines = []
161 for ireq in sorted(to_install, key=key_from_ireq):
162 ireq_hashes = get_hashes_from_ireq(ireq)
163 req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
164
165 # save requirement lines to a temporary file
166 with tempfile.NamedTemporaryFile(mode='wt') as tmp_req_file:
167 tmp_req_file.write('\n'.join(req_lines))
168 tmp_req_file.flush()
169
170 check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)
171 return 0
172
[end of piptools/sync.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/piptools/sync.py b/piptools/sync.py
--- a/piptools/sync.py
+++ b/piptools/sync.py
@@ -163,9 +163,13 @@
req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
# save requirement lines to a temporary file
- with tempfile.NamedTemporaryFile(mode='wt') as tmp_req_file:
- tmp_req_file.write('\n'.join(req_lines))
- tmp_req_file.flush()
+ tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)
+ tmp_req_file.write('\n'.join(req_lines))
+ tmp_req_file.close()
+ try:
check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)
+ finally:
+ os.unlink(tmp_req_file.name)
+
return 0
| {"golden_diff": "diff --git a/piptools/sync.py b/piptools/sync.py\n--- a/piptools/sync.py\n+++ b/piptools/sync.py\n@@ -163,9 +163,13 @@\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n \n # save requirement lines to a temporary file\n- with tempfile.NamedTemporaryFile(mode='wt') as tmp_req_file:\n- tmp_req_file.write('\\n'.join(req_lines))\n- tmp_req_file.flush()\n+ tmp_req_file = tempfile.NamedTemporaryFile(mode='wt', delete=False)\n+ tmp_req_file.write('\\n'.join(req_lines))\n+ tmp_req_file.close()\n \n+ try:\n check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)\n+ finally:\n+ os.unlink(tmp_req_file.name)\n+\n return 0\n", "issue": "pip-sync broken on Windows\nCalling pip-sync on Windows returns:\r\n```\r\nCould not open requirements file: [Errno 13] Permission denied: 'c:\\\\users\\\\<username>\\\\appdata\\\\local\\\\temp\\\\tmpe9jswo'\r\n```\r\n\r\nThis was introduced in 3.3.1 by the change to using `tempfile.NamedTemporaryFile` in [sync.py](https://github.com/jazzband/pip-tools/blob/3.3.1/piptools/sync.py#L167) to create the requirements file before passing it to `pip install` via `subprocess.check_call`. This is caused by Windows requiring reopened temporary files to be opened with the `O_TEMPORARY` flag. For context see https://stackoverflow.com/a/15235559. \r\n\r\n##### Environment Versions\r\n\r\n1. OS Type: Windows 10\r\n1. Python version: Python 2.7.12\r\n1. pip version: pip 9.0.3\r\n1. pip-tools version: pip-compile, version 3.3.1\r\n\r\n##### Steps to replicate\r\n\r\n1. `pip-sync -r <requirements_file>`\r\n\r\n##### Expected result\r\n\r\nCommand should complete successfully.\r\n\r\n##### Actual result\r\n\r\n```\r\nCould not open requirements file: [Errno 13] Permission denied: 'c:\\\\users\\\\<username>\\\\appdata\\\\local\\\\temp\\\\tmpe9jswo'\r\n```\r\n\n", "before_files": [{"content": "import collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import check_call\n\nfrom piptools._compat import stdlib_pkgs, DEV_PKGS\nfrom . import click\nfrom .exceptions import IncompatibleRequirements, UnsupportedConstraint\nfrom .utils import flat_map, format_requirement, key_from_ireq, key_from_req, get_hashes_from_ireq\n\nPACKAGES_TO_IGNORE = [\n '-markerlib',\n 'pip',\n 'pip-tools',\n 'pip-review',\n 'pkg-resources',\n] + list(stdlib_pkgs) + list(DEV_PKGS)\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE))\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n if ireq.link is not None and not ireq.editable:\n msg = ('pip-compile does not support URLs as packages, unless they are editable. '\n 'Perhaps add -e option?')\n raise UnsupportedConstraint(msg, ireq)\n\n key = ireq.link or key_from_req(ireq.req)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n\n return by_key.values()\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {r.link or key_from_req(r.req): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(to_install, to_uninstall, verbose=False, dry_run=False, pip_flags=None, install_flags=None):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n if not to_uninstall and not to_install:\n click.echo(\"Everything up-to-date\")\n\n if pip_flags is None:\n pip_flags = []\n\n if not verbose:\n pip_flags += ['-q']\n\n if os.environ.get('VIRTUAL_ENV'):\n # find pip via PATH\n pip = 'pip'\n else:\n # find pip in same directory as pip-sync entry-point script\n pip = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'pip')\n\n if to_uninstall:\n if dry_run:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n else:\n check_call([pip, 'uninstall', '-y'] + pip_flags + sorted(to_uninstall))\n\n if to_install:\n if install_flags is None:\n install_flags = []\n if dry_run:\n click.echo(\"Would install:\")\n for ireq in to_install:\n click.echo(\" {}\".format(format_requirement(ireq)))\n else:\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n with tempfile.NamedTemporaryFile(mode='wt') as tmp_req_file:\n tmp_req_file.write('\\n'.join(req_lines))\n tmp_req_file.flush()\n\n check_call([pip, 'install', '-r', tmp_req_file.name] + pip_flags + install_flags)\n return 0\n", "path": "piptools/sync.py"}]} | 2,574 | 200 |
gh_patches_debug_31129 | rasdani/github-patches | git_diff | e-valuation__EvaP-1614 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Log M2M Clear
With #1579 participants and contributors of an evaluation can be replaced. In the code, the participants/contributors are cleared first and then the new ones are added.
The log only lists the newly added participants/contributors and does not show an entry for the deleted ones.
The deleted participants/contributors should also be logged.
</issue>
<code>
[start of evap/evaluation/models_logging.py]
1 from collections import defaultdict, namedtuple
2 from datetime import date, datetime, time
3 from enum import Enum
4 import itertools
5 import threading
6 from json import JSONEncoder
7
8 from django.conf import settings
9 from django.contrib.contenttypes.fields import GenericForeignKey
10 from django.contrib.contenttypes.models import ContentType
11 from django.db import models
12 from django.db.models.signals import m2m_changed
13 from django.dispatch import receiver
14 from django.forms.models import model_to_dict
15 from django.template.defaultfilters import yesno
16 from django.utils.formats import localize
17 from django.utils.translation import gettext_lazy as _
18
19 from evap.evaluation.tools import capitalize_first
20
21
22 class FieldActionType(str, Enum):
23 M2M_ADD = "add"
24 M2M_REMOVE = "remove"
25 M2M_CLEAR = "clear"
26 INSTANCE_CREATE = "create"
27 VALUE_CHANGE = "change"
28 INSTANCE_DELETE = "delete"
29
30
31 FieldAction = namedtuple("FieldAction", "label type items")
32
33
34 class InstanceActionType(str, Enum):
35 CREATE = "create"
36 CHANGE = "change"
37 DELETE = "delete"
38
39
40 class LogJSONEncoder(JSONEncoder):
41 """
42 As JSON can't store datetime objects, we localize them to strings.
43 """
44
45 def default(self, o):
46 # o is the object to serialize -- we can't rename the argument in JSONEncoder
47 if isinstance(o, (date, time, datetime)):
48 return localize(o)
49 return super().default(o)
50
51
52 def _choice_to_display(field, choice): # does not support nested choices
53 for key, label in field.choices:
54 if key == choice:
55 return label
56 return choice
57
58
59 def _field_actions_for_field(field, actions):
60 label = capitalize_first(getattr(field, "verbose_name", field.name))
61
62 for field_action_type, items in actions.items():
63 if field.many_to_many or field.many_to_one or field.one_to_one:
64 # convert item values from primary keys to string-representation for relation-based fields
65 related_objects = field.related_model.objects.filter(pk__in=items)
66 missing = len(items) - related_objects.count()
67 items = [str(obj) for obj in related_objects] + [_("<deleted object>")] * missing
68 elif hasattr(field, "choices") and field.choices:
69 # convert values from choice-based fields to their display equivalent
70 items = [_choice_to_display(field, item) for item in items]
71 elif isinstance(field, models.BooleanField):
72 # convert boolean to yes/no
73 items = list(map(yesno, items))
74 yield FieldAction(label, field_action_type, items)
75
76
77 class LogEntry(models.Model):
78 content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="logs_about_me")
79 content_object_id = models.PositiveIntegerField(db_index=True)
80 content_object = GenericForeignKey("content_type", "content_object_id")
81 attached_to_object_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name="logs_for_me")
82 attached_to_object_id = models.PositiveIntegerField(db_index=True)
83 attached_to_object = GenericForeignKey("attached_to_object_type", "attached_to_object_id")
84 datetime = models.DateTimeField(auto_now_add=True)
85 user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.PROTECT)
86 action_type = models.CharField(max_length=255, choices=[(value, value) for value in InstanceActionType])
87 request_id = models.CharField(max_length=36, null=True, blank=True)
88 data = models.JSONField(default=dict, encoder=LogJSONEncoder)
89
90 class Meta:
91 ordering = ["-datetime", "-id"]
92
93 @property
94 def field_context_data(self):
95 model = self.content_type.model_class()
96 return {
97 field_name: [
98 getattr(model, "transform_log_action", LoggedModel.transform_log_action)(field_action)
99 for field_action in _field_actions_for_field(model._meta.get_field(field_name), actions)
100 ]
101 for field_name, actions in self.data.items()
102 }
103
104 @property
105 def message(self):
106 if self.action_type == InstanceActionType.CHANGE:
107 if self.content_object:
108 message = _("The {cls} {obj} was changed.")
109 else: # content_object might be deleted
110 message = _("A {cls} was changed.")
111 elif self.action_type == InstanceActionType.CREATE:
112 if self.content_object:
113 message = _("The {cls} {obj} was created.")
114 else:
115 message = _("A {cls} was created.")
116 elif self.action_type == InstanceActionType.DELETE:
117 message = _("A {cls} was deleted.")
118
119 return message.format(
120 cls=capitalize_first(self.content_type.model_class()._meta.verbose_name),
121 obj=f'"{str(self.content_object)}"' if self.content_object else "",
122 )
123
124
125 class LoggedModel(models.Model):
126 thread = threading.local()
127
128 class Meta:
129 abstract = True
130
131 def __init__(self, *args, **kwargs):
132 super().__init__(*args, **kwargs)
133 self._logentry = None
134
135 def _as_dict(self):
136 """
137 Return a dict mapping field names to values saved in this instance.
138 Only include field names that are not to be ignored for logging and
139 that don't name m2m fields.
140 """
141 fields = [
142 field.name
143 for field in type(self)._meta.get_fields()
144 if field.name not in self.unlogged_fields and not field.many_to_many
145 ]
146 return model_to_dict(self, fields)
147
148 def _get_change_data(self, action_type: InstanceActionType):
149 """
150 Return a dict mapping field names to changes that happened in this model instance,
151 depending on the action that is being done to the instance.
152 """
153 self_dict = self._as_dict()
154 if action_type == InstanceActionType.CREATE:
155 changes = {
156 field_name: {FieldActionType.INSTANCE_CREATE: [created_value]}
157 for field_name, created_value in self_dict.items()
158 if created_value is not None
159 }
160 elif action_type == InstanceActionType.CHANGE:
161 old_dict = type(self).objects.get(pk=self.pk)._as_dict()
162 changes = {
163 field_name: {FieldActionType.VALUE_CHANGE: [old_value, self_dict[field_name]]}
164 for field_name, old_value in old_dict.items()
165 if old_value != self_dict[field_name]
166 }
167 elif action_type == InstanceActionType.DELETE:
168 old_dict = type(self).objects.get(pk=self.pk)._as_dict()
169 changes = {
170 field_name: {FieldActionType.INSTANCE_DELETE: [deleted_value]}
171 for field_name, deleted_value in old_dict.items()
172 if deleted_value is not None
173 }
174 # as the instance is being deleted, we also need to pull out all m2m values
175 m2m_field_names = [
176 field.name for field in type(self)._meta.many_to_many if field.name not in self.unlogged_fields
177 ]
178 for field_name, related_objects in model_to_dict(self, m2m_field_names).items():
179 changes[field_name] = {FieldActionType.INSTANCE_DELETE: [obj.pk for obj in related_objects]}
180 else:
181 raise ValueError("Unknown action type: '{}'".format(action_type))
182
183 return changes
184
185 def log_m2m_change(self, changes):
186 self._update_log(changes, InstanceActionType.CHANGE)
187
188 def log_instance_create(self):
189 changes = self._get_change_data(InstanceActionType.CREATE)
190 self._update_log(changes, InstanceActionType.CREATE)
191
192 def log_instance_change(self):
193 changes = self._get_change_data(InstanceActionType.CHANGE)
194 self._update_log(changes, InstanceActionType.CHANGE)
195
196 def log_instance_delete(self):
197 changes = self._get_change_data(InstanceActionType.DELETE)
198 self._update_log(changes, InstanceActionType.DELETE)
199
200 def _update_log(self, changes, action_type: InstanceActionType):
201 if not changes:
202 return
203
204 if not self._logentry:
205 try:
206 user = self.thread.request.user
207 request_id = self.thread.request_id
208 except AttributeError:
209 user = None
210 request_id = None
211 attach_to_model, attached_to_object_id = self.object_to_attach_logentries_to
212 attached_to_object_type = ContentType.objects.get_for_model(attach_to_model)
213 self._logentry = LogEntry(
214 content_object=self,
215 attached_to_object_type=attached_to_object_type,
216 attached_to_object_id=attached_to_object_id,
217 user=user,
218 request_id=request_id,
219 action_type=action_type,
220 data=changes,
221 )
222 else:
223 self._logentry.data.update(changes)
224
225 self._logentry.save()
226
227 def save(self, *args, **kw):
228 # Are we creating a new instance?
229 # https://docs.djangoproject.com/en/3.0/ref/models/instances/#customizing-model-loading
230 if self._state.adding:
231 # we need to attach a logentry to an existing object, so we save this newly created instance first
232 super().save(*args, **kw)
233 self.log_instance_create()
234 else:
235 # when saving an existing instance, we get changes by comparing to the version from the database
236 # therefore we save the instance after building the logentry
237 self.log_instance_change()
238 super().save(*args, **kw)
239
240 def delete(self, *args, **kw):
241 self.log_instance_delete()
242 self.related_logentries().delete()
243 super().delete(*args, **kw)
244
245 def related_logentries(self):
246 """
247 Return a queryset with all logentries that should be shown with this model.
248 """
249 return LogEntry.objects.filter(
250 attached_to_object_type=ContentType.objects.get_for_model(type(self)),
251 attached_to_object_id=self.pk,
252 )
253
254 def grouped_logentries(self):
255 """
256 Returns a list of lists of logentries for display. The order is not changed.
257 Logentries are grouped if they have a matching request_id.
258 """
259 yield from (
260 list(group)
261 for key, group in itertools.groupby(
262 self.related_logentries().select_related("user"),
263 lambda entry: entry.request_id or entry.pk,
264 )
265 )
266
267 @property
268 def object_to_attach_logentries_to(self):
269 """
270 Return a model class and primary key for the object for which this logentry should be shown.
271 By default, show it to the object described by the logentry itself.
272
273 Returning the model instance directly might rely on fetching that object from the database,
274 which can break bulk loading in some cases, so we don't do that.
275 """
276 return type(self), self.pk
277
278 @property
279 def unlogged_fields(self):
280 """Specify a list of field names so that these fields don't get logged."""
281 return ["id", "order"]
282
283 @staticmethod
284 def transform_log_action(field_action):
285 return field_action
286
287
288 @receiver(m2m_changed)
289 def _m2m_changed(sender, instance, action, reverse, model, pk_set, **kwargs): # pylint: disable=unused-argument
290 if reverse:
291 return
292 if not isinstance(instance, LoggedModel):
293 return
294
295 field_name = next(
296 (
297 field.name
298 for field in type(instance)._meta.many_to_many
299 if getattr(type(instance), field.name).through == sender
300 ),
301 None,
302 )
303
304 if field_name in instance.unlogged_fields:
305 return
306
307 m2m_changes = defaultdict(lambda: defaultdict(list))
308 if action == "pre_remove":
309 m2m_changes[field_name][FieldActionType.M2M_REMOVE] += list(pk_set)
310 elif action == "pre_add":
311 m2m_changes[field_name][FieldActionType.M2M_ADD] += list(pk_set)
312 elif action == "pre_clear":
313 m2m_changes[field_name][FieldActionType.M2M_CLEAR] = []
314
315 if m2m_changes:
316 instance.log_m2m_change(m2m_changes)
317
[end of evap/evaluation/models_logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/evap/evaluation/models_logging.py b/evap/evaluation/models_logging.py
--- a/evap/evaluation/models_logging.py
+++ b/evap/evaluation/models_logging.py
@@ -131,6 +131,7 @@
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._logentry = None
+ self._m2m_changes = defaultdict(lambda: defaultdict(list))
def _as_dict(self):
"""
@@ -182,8 +183,12 @@
return changes
- def log_m2m_change(self, changes):
- self._update_log(changes, InstanceActionType.CHANGE)
+ def log_m2m_change(self, field_name, action_type: FieldActionType, change_list):
+ # This might be called multiple times with cumulating changes
+ # But this is fine, since the old changes will be included in the latest log update
+ # See https://github.com/e-valuation/EvaP/issues/1594
+ self._m2m_changes[field_name][action_type] += change_list
+ self._update_log(self._m2m_changes, InstanceActionType.CHANGE)
def log_instance_create(self):
changes = self._get_change_data(InstanceActionType.CREATE)
@@ -304,13 +309,9 @@
if field_name in instance.unlogged_fields:
return
- m2m_changes = defaultdict(lambda: defaultdict(list))
if action == "pre_remove":
- m2m_changes[field_name][FieldActionType.M2M_REMOVE] += list(pk_set)
+ instance.log_m2m_change(field_name, FieldActionType.M2M_REMOVE, list(pk_set))
elif action == "pre_add":
- m2m_changes[field_name][FieldActionType.M2M_ADD] += list(pk_set)
+ instance.log_m2m_change(field_name, FieldActionType.M2M_ADD, list(pk_set))
elif action == "pre_clear":
- m2m_changes[field_name][FieldActionType.M2M_CLEAR] = []
-
- if m2m_changes:
- instance.log_m2m_change(m2m_changes)
+ instance.log_m2m_change(field_name, FieldActionType.M2M_CLEAR, [])
| {"golden_diff": "diff --git a/evap/evaluation/models_logging.py b/evap/evaluation/models_logging.py\n--- a/evap/evaluation/models_logging.py\n+++ b/evap/evaluation/models_logging.py\n@@ -131,6 +131,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._logentry = None\n+ self._m2m_changes = defaultdict(lambda: defaultdict(list))\n \n def _as_dict(self):\n \"\"\"\n@@ -182,8 +183,12 @@\n \n return changes\n \n- def log_m2m_change(self, changes):\n- self._update_log(changes, InstanceActionType.CHANGE)\n+ def log_m2m_change(self, field_name, action_type: FieldActionType, change_list):\n+ # This might be called multiple times with cumulating changes\n+ # But this is fine, since the old changes will be included in the latest log update\n+ # See https://github.com/e-valuation/EvaP/issues/1594\n+ self._m2m_changes[field_name][action_type] += change_list\n+ self._update_log(self._m2m_changes, InstanceActionType.CHANGE)\n \n def log_instance_create(self):\n changes = self._get_change_data(InstanceActionType.CREATE)\n@@ -304,13 +309,9 @@\n if field_name in instance.unlogged_fields:\n return\n \n- m2m_changes = defaultdict(lambda: defaultdict(list))\n if action == \"pre_remove\":\n- m2m_changes[field_name][FieldActionType.M2M_REMOVE] += list(pk_set)\n+ instance.log_m2m_change(field_name, FieldActionType.M2M_REMOVE, list(pk_set))\n elif action == \"pre_add\":\n- m2m_changes[field_name][FieldActionType.M2M_ADD] += list(pk_set)\n+ instance.log_m2m_change(field_name, FieldActionType.M2M_ADD, list(pk_set))\n elif action == \"pre_clear\":\n- m2m_changes[field_name][FieldActionType.M2M_CLEAR] = []\n-\n- if m2m_changes:\n- instance.log_m2m_change(m2m_changes)\n+ instance.log_m2m_change(field_name, FieldActionType.M2M_CLEAR, [])\n", "issue": "Log M2M Clear\nWith #1579 participants and contributors of an evaluation can be replaced. In the code, the participants/contributors are cleared first and then the new ones are added.\r\nThe log only lists the newly added participants/contributors and does not show an entry for the deleted ones.\r\nThe deleted participants/contributors should also be logged.\n", "before_files": [{"content": "from collections import defaultdict, namedtuple\nfrom datetime import date, datetime, time\nfrom enum import Enum\nimport itertools\nimport threading\nfrom json import JSONEncoder\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.db.models.signals import m2m_changed\nfrom django.dispatch import receiver\nfrom django.forms.models import model_to_dict\nfrom django.template.defaultfilters import yesno\nfrom django.utils.formats import localize\nfrom django.utils.translation import gettext_lazy as _\n\nfrom evap.evaluation.tools import capitalize_first\n\n\nclass FieldActionType(str, Enum):\n M2M_ADD = \"add\"\n M2M_REMOVE = \"remove\"\n M2M_CLEAR = \"clear\"\n INSTANCE_CREATE = \"create\"\n VALUE_CHANGE = \"change\"\n INSTANCE_DELETE = \"delete\"\n\n\nFieldAction = namedtuple(\"FieldAction\", \"label type items\")\n\n\nclass InstanceActionType(str, Enum):\n CREATE = \"create\"\n CHANGE = \"change\"\n DELETE = \"delete\"\n\n\nclass LogJSONEncoder(JSONEncoder):\n \"\"\"\n As JSON can't store datetime objects, we localize them to strings.\n \"\"\"\n\n def default(self, o):\n # o is the object to serialize -- we can't rename the argument in JSONEncoder\n if isinstance(o, (date, time, datetime)):\n return localize(o)\n return super().default(o)\n\n\ndef _choice_to_display(field, choice): # does not support nested choices\n for key, label in field.choices:\n if key == choice:\n return label\n return choice\n\n\ndef _field_actions_for_field(field, actions):\n label = capitalize_first(getattr(field, \"verbose_name\", field.name))\n\n for field_action_type, items in actions.items():\n if field.many_to_many or field.many_to_one or field.one_to_one:\n # convert item values from primary keys to string-representation for relation-based fields\n related_objects = field.related_model.objects.filter(pk__in=items)\n missing = len(items) - related_objects.count()\n items = [str(obj) for obj in related_objects] + [_(\"<deleted object>\")] * missing\n elif hasattr(field, \"choices\") and field.choices:\n # convert values from choice-based fields to their display equivalent\n items = [_choice_to_display(field, item) for item in items]\n elif isinstance(field, models.BooleanField):\n # convert boolean to yes/no\n items = list(map(yesno, items))\n yield FieldAction(label, field_action_type, items)\n\n\nclass LogEntry(models.Model):\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name=\"logs_about_me\")\n content_object_id = models.PositiveIntegerField(db_index=True)\n content_object = GenericForeignKey(\"content_type\", \"content_object_id\")\n attached_to_object_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, related_name=\"logs_for_me\")\n attached_to_object_id = models.PositiveIntegerField(db_index=True)\n attached_to_object = GenericForeignKey(\"attached_to_object_type\", \"attached_to_object_id\")\n datetime = models.DateTimeField(auto_now_add=True)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.PROTECT)\n action_type = models.CharField(max_length=255, choices=[(value, value) for value in InstanceActionType])\n request_id = models.CharField(max_length=36, null=True, blank=True)\n data = models.JSONField(default=dict, encoder=LogJSONEncoder)\n\n class Meta:\n ordering = [\"-datetime\", \"-id\"]\n\n @property\n def field_context_data(self):\n model = self.content_type.model_class()\n return {\n field_name: [\n getattr(model, \"transform_log_action\", LoggedModel.transform_log_action)(field_action)\n for field_action in _field_actions_for_field(model._meta.get_field(field_name), actions)\n ]\n for field_name, actions in self.data.items()\n }\n\n @property\n def message(self):\n if self.action_type == InstanceActionType.CHANGE:\n if self.content_object:\n message = _(\"The {cls} {obj} was changed.\")\n else: # content_object might be deleted\n message = _(\"A {cls} was changed.\")\n elif self.action_type == InstanceActionType.CREATE:\n if self.content_object:\n message = _(\"The {cls} {obj} was created.\")\n else:\n message = _(\"A {cls} was created.\")\n elif self.action_type == InstanceActionType.DELETE:\n message = _(\"A {cls} was deleted.\")\n\n return message.format(\n cls=capitalize_first(self.content_type.model_class()._meta.verbose_name),\n obj=f'\"{str(self.content_object)}\"' if self.content_object else \"\",\n )\n\n\nclass LoggedModel(models.Model):\n thread = threading.local()\n\n class Meta:\n abstract = True\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._logentry = None\n\n def _as_dict(self):\n \"\"\"\n Return a dict mapping field names to values saved in this instance.\n Only include field names that are not to be ignored for logging and\n that don't name m2m fields.\n \"\"\"\n fields = [\n field.name\n for field in type(self)._meta.get_fields()\n if field.name not in self.unlogged_fields and not field.many_to_many\n ]\n return model_to_dict(self, fields)\n\n def _get_change_data(self, action_type: InstanceActionType):\n \"\"\"\n Return a dict mapping field names to changes that happened in this model instance,\n depending on the action that is being done to the instance.\n \"\"\"\n self_dict = self._as_dict()\n if action_type == InstanceActionType.CREATE:\n changes = {\n field_name: {FieldActionType.INSTANCE_CREATE: [created_value]}\n for field_name, created_value in self_dict.items()\n if created_value is not None\n }\n elif action_type == InstanceActionType.CHANGE:\n old_dict = type(self).objects.get(pk=self.pk)._as_dict()\n changes = {\n field_name: {FieldActionType.VALUE_CHANGE: [old_value, self_dict[field_name]]}\n for field_name, old_value in old_dict.items()\n if old_value != self_dict[field_name]\n }\n elif action_type == InstanceActionType.DELETE:\n old_dict = type(self).objects.get(pk=self.pk)._as_dict()\n changes = {\n field_name: {FieldActionType.INSTANCE_DELETE: [deleted_value]}\n for field_name, deleted_value in old_dict.items()\n if deleted_value is not None\n }\n # as the instance is being deleted, we also need to pull out all m2m values\n m2m_field_names = [\n field.name for field in type(self)._meta.many_to_many if field.name not in self.unlogged_fields\n ]\n for field_name, related_objects in model_to_dict(self, m2m_field_names).items():\n changes[field_name] = {FieldActionType.INSTANCE_DELETE: [obj.pk for obj in related_objects]}\n else:\n raise ValueError(\"Unknown action type: '{}'\".format(action_type))\n\n return changes\n\n def log_m2m_change(self, changes):\n self._update_log(changes, InstanceActionType.CHANGE)\n\n def log_instance_create(self):\n changes = self._get_change_data(InstanceActionType.CREATE)\n self._update_log(changes, InstanceActionType.CREATE)\n\n def log_instance_change(self):\n changes = self._get_change_data(InstanceActionType.CHANGE)\n self._update_log(changes, InstanceActionType.CHANGE)\n\n def log_instance_delete(self):\n changes = self._get_change_data(InstanceActionType.DELETE)\n self._update_log(changes, InstanceActionType.DELETE)\n\n def _update_log(self, changes, action_type: InstanceActionType):\n if not changes:\n return\n\n if not self._logentry:\n try:\n user = self.thread.request.user\n request_id = self.thread.request_id\n except AttributeError:\n user = None\n request_id = None\n attach_to_model, attached_to_object_id = self.object_to_attach_logentries_to\n attached_to_object_type = ContentType.objects.get_for_model(attach_to_model)\n self._logentry = LogEntry(\n content_object=self,\n attached_to_object_type=attached_to_object_type,\n attached_to_object_id=attached_to_object_id,\n user=user,\n request_id=request_id,\n action_type=action_type,\n data=changes,\n )\n else:\n self._logentry.data.update(changes)\n\n self._logentry.save()\n\n def save(self, *args, **kw):\n # Are we creating a new instance?\n # https://docs.djangoproject.com/en/3.0/ref/models/instances/#customizing-model-loading\n if self._state.adding:\n # we need to attach a logentry to an existing object, so we save this newly created instance first\n super().save(*args, **kw)\n self.log_instance_create()\n else:\n # when saving an existing instance, we get changes by comparing to the version from the database\n # therefore we save the instance after building the logentry\n self.log_instance_change()\n super().save(*args, **kw)\n\n def delete(self, *args, **kw):\n self.log_instance_delete()\n self.related_logentries().delete()\n super().delete(*args, **kw)\n\n def related_logentries(self):\n \"\"\"\n Return a queryset with all logentries that should be shown with this model.\n \"\"\"\n return LogEntry.objects.filter(\n attached_to_object_type=ContentType.objects.get_for_model(type(self)),\n attached_to_object_id=self.pk,\n )\n\n def grouped_logentries(self):\n \"\"\"\n Returns a list of lists of logentries for display. The order is not changed.\n Logentries are grouped if they have a matching request_id.\n \"\"\"\n yield from (\n list(group)\n for key, group in itertools.groupby(\n self.related_logentries().select_related(\"user\"),\n lambda entry: entry.request_id or entry.pk,\n )\n )\n\n @property\n def object_to_attach_logentries_to(self):\n \"\"\"\n Return a model class and primary key for the object for which this logentry should be shown.\n By default, show it to the object described by the logentry itself.\n\n Returning the model instance directly might rely on fetching that object from the database,\n which can break bulk loading in some cases, so we don't do that.\n \"\"\"\n return type(self), self.pk\n\n @property\n def unlogged_fields(self):\n \"\"\"Specify a list of field names so that these fields don't get logged.\"\"\"\n return [\"id\", \"order\"]\n\n @staticmethod\n def transform_log_action(field_action):\n return field_action\n\n\n@receiver(m2m_changed)\ndef _m2m_changed(sender, instance, action, reverse, model, pk_set, **kwargs): # pylint: disable=unused-argument\n if reverse:\n return\n if not isinstance(instance, LoggedModel):\n return\n\n field_name = next(\n (\n field.name\n for field in type(instance)._meta.many_to_many\n if getattr(type(instance), field.name).through == sender\n ),\n None,\n )\n\n if field_name in instance.unlogged_fields:\n return\n\n m2m_changes = defaultdict(lambda: defaultdict(list))\n if action == \"pre_remove\":\n m2m_changes[field_name][FieldActionType.M2M_REMOVE] += list(pk_set)\n elif action == \"pre_add\":\n m2m_changes[field_name][FieldActionType.M2M_ADD] += list(pk_set)\n elif action == \"pre_clear\":\n m2m_changes[field_name][FieldActionType.M2M_CLEAR] = []\n\n if m2m_changes:\n instance.log_m2m_change(m2m_changes)\n", "path": "evap/evaluation/models_logging.py"}]} | 4,034 | 514 |
gh_patches_debug_27704 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-4761 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
junos_package.py: package_version undefined
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
junos_package
##### ANSIBLE VERSION
devel
##### CONFIGURATION
##### OS / ENVIRONMENT
##### SUMMARY
https://github.com/ansible/ansible-modules-core/blame/devel/network/junos/junos_package.py#L141
`wants_ver = module.params['version'] or package_version(module)`
I can't find anywhere in the `ansible/ansible` code base where `package_version` is defined
</issue>
<code>
[start of network/junos/junos_package.py]
1 #!/usr/bin/python
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17 #
18
19 DOCUMENTATION = """
20 ---
21 module: junos_package
22 version_added: "2.1"
23 author: "Peter Sprygada (@privateip)"
24 short_description: Installs packages on remote devices running Junos
25 description:
26 - This module can install new and updated packages on remote
27 devices running Junos. The module will compare the specified
28 package with the one running on the remote device and install
29 the specified version if there is a mismatch
30 extends_documentation_fragment: junos
31 options:
32 src:
33 description:
34 - The I(src) argument specifies the path to the source package to be
35 installed on the remote device in the advent of a version mismatch.
36 The I(src) argument can be either a localized path or a full
37 path to the package file to install.
38 required: true
39 default: null
40 aliases: ['package']
41 version:
42 description:
43 - The I(version) argument can be used to explicitly specify the
44 version of the package that should be installed on the remote
45 device. If the I(version) argument is not specified, then
46 the version is extracts from the I(src) filename.
47 required: false
48 default: null
49 reboot:
50 description:
51 - In order for a package to take effect, the remote device must be
52 restarted. When enabled, this argument will instruct the module
53 to reboot the device once the updated package has been installed.
54 If disabled or the remote package does not need to be changed,
55 the device will not be started.
56 required: true
57 default: true
58 choices: ['true', 'false']
59 no_copy:
60 description:
61 - The I(no_copy) argument is responsible for instructing the remote
62 device on where to install the package from. When enabled, the
63 package is transferred to the remote device prior to installing.
64 required: false
65 default: false
66 choices: ['true', 'false']
67 force:
68 description:
69 - The I(force) argument instructs the module to bypass the package
70 version check and install the packaged identified in I(src) on
71 the remote device.
72 required: true
73 default: false
74 choices: ['true', 'false']
75 requirements:
76 - junos-eznc
77 notes:
78 - This module requires the netconf system service be enabled on
79 the remote device being managed
80 """
81
82 EXAMPLES = """
83 # the required set of connection arguments have been purposely left off
84 # the examples for brevity
85
86 - name: install local package on remote device
87 junos_package:
88 src: junos-vsrx-12.1X46-D10.2-domestic.tgz
89
90 - name: install local package on remote device without rebooting
91 junos_package:
92 src: junos-vsrx-12.1X46-D10.2-domestic.tgz
93 reboot: no
94 """
95
96 try:
97 from jnpr.junos.utils.sw import SW
98 HAS_SW = True
99 except ImportError:
100 HAS_SW = False
101
102 def install_package(module):
103 junos = SW(module.connection.device)
104 package = module.params['src']
105 no_copy = module.params['no_copy']
106
107 progress_log = lambda x, y: module.log(y)
108
109 module.log('installing package')
110 result = junos.install(package, progress=progress_log, no_copy=no_copy)
111
112 if not result:
113 module.fail_json(msg='Unable to install package on device')
114
115 if module.params['reboot']:
116 module.log('rebooting system')
117 junos.reboot()
118
119
120 def main():
121 spec = dict(
122 src=dict(type='path', required=True, aliases=['package']),
123 version=dict(),
124 reboot=dict(type='bool', default=True),
125 no_copy=dict(default=False, type='bool'),
126 force=dict(type='bool', default=False),
127 transport=dict(default='netconf', choices=['netconf'])
128 )
129
130 module = get_module(argument_spec=spec,
131 supports_check_mode=True)
132
133 if not HAS_SW:
134 module.fail_json(msg='Missing jnpr.junos.utils.sw module')
135
136 result = dict(changed=False)
137
138 do_upgrade = module.params['force'] or False
139 if not module.params['force']:
140 has_ver = module.get_facts().get('version')
141 wants_ver = module.params['version'] or package_version(module)
142 do_upgrade = has_ver != wants_ver
143
144 if do_upgrade:
145 if not module.check_mode:
146 install_package(module)
147 result['changed'] = True
148
149 module.exit_json(**result)
150
151 from ansible.module_utils.basic import *
152 from ansible.module_utils.junos import *
153
154 if __name__ == '__main__':
155 main()
156
[end of network/junos/junos_package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/network/junos/junos_package.py b/network/junos/junos_package.py
--- a/network/junos/junos_package.py
+++ b/network/junos/junos_package.py
@@ -92,6 +92,7 @@
src: junos-vsrx-12.1X46-D10.2-domestic.tgz
reboot: no
"""
+from ansible.module_utils.junos import NetworkModule
try:
from jnpr.junos.utils.sw import SW
@@ -127,8 +128,8 @@
transport=dict(default='netconf', choices=['netconf'])
)
- module = get_module(argument_spec=spec,
- supports_check_mode=True)
+ module = NetworkModule(argument_spec=spec,
+ supports_check_mode=True)
if not HAS_SW:
module.fail_json(msg='Missing jnpr.junos.utils.sw module')
@@ -137,8 +138,8 @@
do_upgrade = module.params['force'] or False
if not module.params['force']:
- has_ver = module.get_facts().get('version')
- wants_ver = module.params['version'] or package_version(module)
+ has_ver = module.connection.get_facts().get('version')
+ wants_ver = module.params['version']
do_upgrade = has_ver != wants_ver
if do_upgrade:
@@ -148,8 +149,6 @@
module.exit_json(**result)
-from ansible.module_utils.basic import *
-from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
| {"golden_diff": "diff --git a/network/junos/junos_package.py b/network/junos/junos_package.py\n--- a/network/junos/junos_package.py\n+++ b/network/junos/junos_package.py\n@@ -92,6 +92,7 @@\n src: junos-vsrx-12.1X46-D10.2-domestic.tgz\n reboot: no\n \"\"\"\n+from ansible.module_utils.junos import NetworkModule\n \n try:\n from jnpr.junos.utils.sw import SW\n@@ -127,8 +128,8 @@\n transport=dict(default='netconf', choices=['netconf'])\n )\n \n- module = get_module(argument_spec=spec,\n- supports_check_mode=True)\n+ module = NetworkModule(argument_spec=spec,\n+ supports_check_mode=True)\n \n if not HAS_SW:\n module.fail_json(msg='Missing jnpr.junos.utils.sw module')\n@@ -137,8 +138,8 @@\n \n do_upgrade = module.params['force'] or False\n if not module.params['force']:\n- has_ver = module.get_facts().get('version')\n- wants_ver = module.params['version'] or package_version(module)\n+ has_ver = module.connection.get_facts().get('version')\n+ wants_ver = module.params['version']\n do_upgrade = has_ver != wants_ver\n \n if do_upgrade:\n@@ -148,8 +149,6 @@\n \n module.exit_json(**result)\n \n-from ansible.module_utils.basic import *\n-from ansible.module_utils.junos import *\n \n if __name__ == '__main__':\n main()\n", "issue": "junos_package.py: package_version undefined\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\njunos_package\n##### ANSIBLE VERSION\n\ndevel\n##### CONFIGURATION\n##### OS / ENVIRONMENT\n##### SUMMARY\n\nhttps://github.com/ansible/ansible-modules-core/blame/devel/network/junos/junos_package.py#L141\n\n`wants_ver = module.params['version'] or package_version(module)`\n\nI can't find anywhere in the `ansible/ansible` code base where `package_version` is defined\n\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = \"\"\"\n---\nmodule: junos_package\nversion_added: \"2.1\"\nauthor: \"Peter Sprygada (@privateip)\"\nshort_description: Installs packages on remote devices running Junos\ndescription:\n - This module can install new and updated packages on remote\n devices running Junos. The module will compare the specified\n package with the one running on the remote device and install\n the specified version if there is a mismatch\nextends_documentation_fragment: junos\noptions:\n src:\n description:\n - The I(src) argument specifies the path to the source package to be\n installed on the remote device in the advent of a version mismatch.\n The I(src) argument can be either a localized path or a full\n path to the package file to install.\n required: true\n default: null\n aliases: ['package']\n version:\n description:\n - The I(version) argument can be used to explicitly specify the\n version of the package that should be installed on the remote\n device. If the I(version) argument is not specified, then\n the version is extracts from the I(src) filename.\n required: false\n default: null\n reboot:\n description:\n - In order for a package to take effect, the remote device must be\n restarted. When enabled, this argument will instruct the module\n to reboot the device once the updated package has been installed.\n If disabled or the remote package does not need to be changed,\n the device will not be started.\n required: true\n default: true\n choices: ['true', 'false']\n no_copy:\n description:\n - The I(no_copy) argument is responsible for instructing the remote\n device on where to install the package from. When enabled, the\n package is transferred to the remote device prior to installing.\n required: false\n default: false\n choices: ['true', 'false']\n force:\n description:\n - The I(force) argument instructs the module to bypass the package\n version check and install the packaged identified in I(src) on\n the remote device.\n required: true\n default: false\n choices: ['true', 'false']\nrequirements:\n - junos-eznc\nnotes:\n - This module requires the netconf system service be enabled on\n the remote device being managed\n\"\"\"\n\nEXAMPLES = \"\"\"\n# the required set of connection arguments have been purposely left off\n# the examples for brevity\n\n- name: install local package on remote device\n junos_package:\n src: junos-vsrx-12.1X46-D10.2-domestic.tgz\n\n- name: install local package on remote device without rebooting\n junos_package:\n src: junos-vsrx-12.1X46-D10.2-domestic.tgz\n reboot: no\n\"\"\"\n\ntry:\n from jnpr.junos.utils.sw import SW\n HAS_SW = True\nexcept ImportError:\n HAS_SW = False\n\ndef install_package(module):\n junos = SW(module.connection.device)\n package = module.params['src']\n no_copy = module.params['no_copy']\n\n progress_log = lambda x, y: module.log(y)\n\n module.log('installing package')\n result = junos.install(package, progress=progress_log, no_copy=no_copy)\n\n if not result:\n module.fail_json(msg='Unable to install package on device')\n\n if module.params['reboot']:\n module.log('rebooting system')\n junos.reboot()\n\n\ndef main():\n spec = dict(\n src=dict(type='path', required=True, aliases=['package']),\n version=dict(),\n reboot=dict(type='bool', default=True),\n no_copy=dict(default=False, type='bool'),\n force=dict(type='bool', default=False),\n transport=dict(default='netconf', choices=['netconf'])\n )\n\n module = get_module(argument_spec=spec,\n supports_check_mode=True)\n\n if not HAS_SW:\n module.fail_json(msg='Missing jnpr.junos.utils.sw module')\n\n result = dict(changed=False)\n\n do_upgrade = module.params['force'] or False\n if not module.params['force']:\n has_ver = module.get_facts().get('version')\n wants_ver = module.params['version'] or package_version(module)\n do_upgrade = has_ver != wants_ver\n\n if do_upgrade:\n if not module.check_mode:\n install_package(module)\n result['changed'] = True\n\n module.exit_json(**result)\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.junos import *\n\nif __name__ == '__main__':\n main()\n", "path": "network/junos/junos_package.py"}]} | 2,187 | 351 |
gh_patches_debug_4786 | rasdani/github-patches | git_diff | jazzband__pip-tools-314 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip-compile looses `via` with pip 8
```
$ echo jinja2 > req
$ pip-compile --version
pip-compile, version 1.5
$ pip --version
pip 7.1.2 from <snip>lib/python2.7/site-packages (python 2.7)
pip-compile req
#
# This file is autogenerated by pip-compile
# Make changes in req, then run this to update:
#
# pip-compile req
#
jinja2==2.8
markupsafe==0.23 # via jinja2
$ pip install -U pip
<snip>
$ pip --version
pip 8.0.2 from <snip>lib/python2.7/site-packages (python 2.7)
pip-compile req
#
# This file is autogenerated by pip-compile
# Make changes in req, then run this to update:
#
# pip-compile req
#
jinja2==2.8
MarkupSafe==0.23
```
note the missing `via jinja2` for pip 8
</issue>
<code>
[start of piptools/writer.py]
1 import os
2 from os.path import basename
3
4 from ._compat import ExitStack
5 from .click import unstyle
6 from .io import AtomicSaver
7 from .logging import log
8 from .utils import comment, format_requirement
9
10
11 class OutputWriter(object):
12 def __init__(self, src_file, dst_file, dry_run, emit_header, emit_index, annotate,
13 default_index_url, index_urls):
14 self.src_file = src_file
15 self.dst_file = dst_file
16 self.dry_run = dry_run
17 self.emit_header = emit_header
18 self.emit_index = emit_index
19 self.annotate = annotate
20 self.default_index_url = default_index_url
21 self.index_urls = index_urls
22
23 def _sort_key(self, ireq):
24 return (not ireq.editable, str(ireq.req).lower())
25
26 def write_header(self):
27 if self.emit_header:
28 yield comment('#')
29 yield comment('# This file is autogenerated by pip-compile')
30 yield comment('# Make changes in {}, then run this to update:'.format(basename(self.src_file)))
31 yield comment('#')
32 args = ''
33 if not self.emit_index:
34 args += '--no-index '
35 if not self.annotate:
36 args += '--no-annotate '
37 yield comment('# pip-compile {args}{filename}'.format(
38 args=args,
39 filename=basename(self.src_file)))
40 yield comment('#')
41
42 def write_index_options(self):
43 if self.emit_index:
44 emitted = False
45 for index, index_url in enumerate(self.index_urls):
46 if index_url.rstrip('/') == self.default_index_url:
47 continue
48 flag = '--index-url' if index == 0 else '--extra-index-url'
49 yield '{} {}'.format(flag, index_url)
50 emitted = True
51 if emitted:
52 yield '' # extra line of whitespace
53
54 def _iter_lines(self, results, reverse_dependencies, primary_packages):
55 for line in self.write_header():
56 yield line
57 for line in self.write_index_options():
58 yield line
59
60 UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'}
61 unsafe_packages = {r for r in results if r.name in UNSAFE_PACKAGES}
62 packages = {r for r in results if r.name not in UNSAFE_PACKAGES}
63
64 packages = sorted(packages, key=self._sort_key)
65 unsafe_packages = sorted(unsafe_packages, key=self._sort_key)
66
67 for ireq in packages:
68 line = self._format_requirement(ireq, reverse_dependencies, primary_packages)
69 yield line
70
71 if unsafe_packages:
72 yield ''
73 yield comment('# The following packages are commented out because they are')
74 yield comment('# considered to be unsafe in a requirements file:')
75
76 for ireq in unsafe_packages:
77 line = self._format_requirement(ireq, reverse_dependencies, primary_packages, include_specifier=False)
78 yield comment('# ' + line)
79
80 def write(self, results, reverse_dependencies, primary_packages):
81 with ExitStack() as stack:
82 f = None
83 if not self.dry_run:
84 f = stack.enter_context(AtomicSaver(self.dst_file))
85
86 for line in self._iter_lines(results, reverse_dependencies, primary_packages):
87 log.info(line)
88 if f:
89 f.write(unstyle(line).encode('utf-8'))
90 f.write(os.linesep.encode('utf-8'))
91
92 def _format_requirement(self, ireq, reverse_dependencies, primary_packages, include_specifier=True):
93 line = format_requirement(ireq, include_specifier=include_specifier)
94 if not self.annotate or ireq.name in primary_packages:
95 return line
96
97 # Annotate what packages this package is required by
98 required_by = reverse_dependencies.get(ireq.name, [])
99 if required_by:
100 line = line.ljust(24)
101 annotation = ', '.join(sorted(required_by))
102 line += comment(' # via ' + annotation)
103 return line
104
[end of piptools/writer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/piptools/writer.py b/piptools/writer.py
--- a/piptools/writer.py
+++ b/piptools/writer.py
@@ -95,7 +95,7 @@
return line
# Annotate what packages this package is required by
- required_by = reverse_dependencies.get(ireq.name, [])
+ required_by = reverse_dependencies.get(ireq.name.lower(), [])
if required_by:
line = line.ljust(24)
annotation = ', '.join(sorted(required_by))
| {"golden_diff": "diff --git a/piptools/writer.py b/piptools/writer.py\n--- a/piptools/writer.py\n+++ b/piptools/writer.py\n@@ -95,7 +95,7 @@\n return line\n \n # Annotate what packages this package is required by\n- required_by = reverse_dependencies.get(ireq.name, [])\n+ required_by = reverse_dependencies.get(ireq.name.lower(), [])\n if required_by:\n line = line.ljust(24)\n annotation = ', '.join(sorted(required_by))\n", "issue": "pip-compile looses `via` with pip 8\n```\n$ echo jinja2 > req\n$ pip-compile --version\npip-compile, version 1.5\n$ pip --version\npip 7.1.2 from <snip>lib/python2.7/site-packages (python 2.7)\n\npip-compile req\n#\n# This file is autogenerated by pip-compile\n# Make changes in req, then run this to update:\n#\n# pip-compile req\n#\n\njinja2==2.8\nmarkupsafe==0.23 # via jinja2\n\n$ pip install -U pip\n<snip>\n\n$ pip --version\npip 8.0.2 from <snip>lib/python2.7/site-packages (python 2.7)\n\npip-compile req\n#\n# This file is autogenerated by pip-compile\n# Make changes in req, then run this to update:\n#\n# pip-compile req\n#\n\njinja2==2.8\nMarkupSafe==0.23\n```\n\nnote the missing `via jinja2` for pip 8\n\n", "before_files": [{"content": "import os\nfrom os.path import basename\n\nfrom ._compat import ExitStack\nfrom .click import unstyle\nfrom .io import AtomicSaver\nfrom .logging import log\nfrom .utils import comment, format_requirement\n\n\nclass OutputWriter(object):\n def __init__(self, src_file, dst_file, dry_run, emit_header, emit_index, annotate,\n default_index_url, index_urls):\n self.src_file = src_file\n self.dst_file = dst_file\n self.dry_run = dry_run\n self.emit_header = emit_header\n self.emit_index = emit_index\n self.annotate = annotate\n self.default_index_url = default_index_url\n self.index_urls = index_urls\n\n def _sort_key(self, ireq):\n return (not ireq.editable, str(ireq.req).lower())\n\n def write_header(self):\n if self.emit_header:\n yield comment('#')\n yield comment('# This file is autogenerated by pip-compile')\n yield comment('# Make changes in {}, then run this to update:'.format(basename(self.src_file)))\n yield comment('#')\n args = ''\n if not self.emit_index:\n args += '--no-index '\n if not self.annotate:\n args += '--no-annotate '\n yield comment('# pip-compile {args}{filename}'.format(\n args=args,\n filename=basename(self.src_file)))\n yield comment('#')\n\n def write_index_options(self):\n if self.emit_index:\n emitted = False\n for index, index_url in enumerate(self.index_urls):\n if index_url.rstrip('/') == self.default_index_url:\n continue\n flag = '--index-url' if index == 0 else '--extra-index-url'\n yield '{} {}'.format(flag, index_url)\n emitted = True\n if emitted:\n yield '' # extra line of whitespace\n\n def _iter_lines(self, results, reverse_dependencies, primary_packages):\n for line in self.write_header():\n yield line\n for line in self.write_index_options():\n yield line\n\n UNSAFE_PACKAGES = {'setuptools', 'distribute', 'pip'}\n unsafe_packages = {r for r in results if r.name in UNSAFE_PACKAGES}\n packages = {r for r in results if r.name not in UNSAFE_PACKAGES}\n\n packages = sorted(packages, key=self._sort_key)\n unsafe_packages = sorted(unsafe_packages, key=self._sort_key)\n\n for ireq in packages:\n line = self._format_requirement(ireq, reverse_dependencies, primary_packages)\n yield line\n\n if unsafe_packages:\n yield ''\n yield comment('# The following packages are commented out because they are')\n yield comment('# considered to be unsafe in a requirements file:')\n\n for ireq in unsafe_packages:\n line = self._format_requirement(ireq, reverse_dependencies, primary_packages, include_specifier=False)\n yield comment('# ' + line)\n\n def write(self, results, reverse_dependencies, primary_packages):\n with ExitStack() as stack:\n f = None\n if not self.dry_run:\n f = stack.enter_context(AtomicSaver(self.dst_file))\n\n for line in self._iter_lines(results, reverse_dependencies, primary_packages):\n log.info(line)\n if f:\n f.write(unstyle(line).encode('utf-8'))\n f.write(os.linesep.encode('utf-8'))\n\n def _format_requirement(self, ireq, reverse_dependencies, primary_packages, include_specifier=True):\n line = format_requirement(ireq, include_specifier=include_specifier)\n if not self.annotate or ireq.name in primary_packages:\n return line\n\n # Annotate what packages this package is required by\n required_by = reverse_dependencies.get(ireq.name, [])\n if required_by:\n line = line.ljust(24)\n annotation = ', '.join(sorted(required_by))\n line += comment(' # via ' + annotation)\n return line\n", "path": "piptools/writer.py"}]} | 1,836 | 119 |
gh_patches_debug_23719 | rasdani/github-patches | git_diff | vaexio__vaex-405 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failing to open arrow stream with categoricals
Vaex fails to open arrow streams that contain categorical columns. It would be great if this was working as categoricals have given a great performance in many of my applications.
```import pandas as pd
import pyarrow as pa
import numpy as np
import vaex
df = pd.DataFrame(
{
'col1': pd.Categorical.from_codes(np.full(1, 1), categories=['ABC', 'DEF'])
}
)
table = pa.Table.from_pandas(df)
with pa.OSFile('test2.arrow', 'wb') as sink:
with pa.RecordBatchStreamWriter(sink, table.schema) as writer:
writer.write_table(table)
with pa.OSFile('test2.arrow', 'rb') as source:
df = pa.ipc.open_stream(source).read_pandas()
df = vaex.open('test2.arrow')
```
Output:
```
ERROR:MainThread:vaex:error opening 'test2.arrow'
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
in
17 df = pa.ipc.open_stream(source).read_pandas()
18
---> 19 df = vaex.open('test2.arrow')
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex\__init__.py in open(path, convert, shuffle, copy_index, *args, **kwargs)
189 ds = from_csv(path, copy_index=copy_index, **kwargs)
190 else:
--> 191 ds = vaex.file.open(path, *args, **kwargs)
192 if convert and ds:
193 ds.export_hdf5(filename_hdf5, shuffle=shuffle)
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex\file\__init__.py in open(path, *args, **kwargs)
28 for opener in opener_classes:
29 if opener.can_open(path, *args, **kwargs):
---> 30 return opener.open(path, *args, **kwargs)
31 if hdf5:
32 openers.extend(hdf5.dataset.dataset_type_map.items())
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\opener.py in open(path, *args, **kwargs)
9 def open(path, *args, **kwargs):
10 from .dataset import DatasetArrow
---> 11 return DatasetArrow(path, *args, **kwargs)
12
13 class ParquetOpener:
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\dataset.py in __init__(self, filename, table, write)
18 self._write = write
19 if table is None:
---> 20 self._load()
21 else:
22 self._load_table(table)
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\dataset.py in _load(self)
26 reader = pa.ipc.open_stream(source)
27 table = pa.Table.from_batches([b for b in reader])
---> 28 self._load_table(table)
29
30 def _load_table(self, table):
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\dataset.py in _load_table(self, table)
35 # TODO: keep the arrow columns, and support and test chunks
36 arrow_array = col.data.chunks[0]
---> 37 column = column_from_arrow_array(arrow_array)
38
39 self.columns[name] = column
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\convert.py in column_from_arrow_array(arrow_array)
28 buffers = arrow_array.buffers()
29 if len(buffers) == 2:
---> 30 return numpy_array_from_arrow_array(arrow_array)
31 elif len(buffers) == 3 and isinstance(arrow_array.type, type(pyarrow.string())):
32 bitmap_buffer, offsets, string_bytes = arrow_array.buffers()
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\vaex_arrow\convert.py in numpy_array_from_arrow_array(arrow_array)
62 array = np.frombuffer(buffers[-1], dtype, len(arrow_array))# TODO: deal with offset ? [arrow_array.offset:arrow_array.offset + len(arrow_array)]
63 else:
---> 64 dtype = arrow_array.type.to_pandas_dtype()
65 if np.bool_ == dtype:
66 # TODO: this will also be a copy, we probably want to support bitmasks as well
Z:\Systemdateien\Miniconda3\envs\finance\lib\site-packages\pyarrow\types.pxi in pyarrow.lib.DataType.to_pandas_dtype()
NotImplementedError: dictionary
```
</issue>
<code>
[start of packages/vaex-arrow/vaex_arrow/dataset.py]
1 __author__ = 'maartenbreddels'
2 import logging
3
4 import pyarrow as pa
5 import pyarrow.parquet as pq
6
7 import vaex.dataset
8 import vaex.file.other
9 from .convert import column_from_arrow_array
10 logger = logging.getLogger("vaex_arrow")
11
12
13 class DatasetArrow(vaex.dataset.DatasetLocal):
14 """Implements storage using arrow"""
15
16 def __init__(self, filename=None, table=None, write=False):
17 super(DatasetArrow, self).__init__(name=filename, path=filename, column_names=[])
18 self._write = write
19 if table is None:
20 self._load()
21 else:
22 self._load_table(table)
23
24 def _load(self):
25 source = pa.memory_map(self.path)
26 reader = pa.ipc.open_stream(source)
27 table = pa.Table.from_batches([b for b in reader])
28 self._load_table(table)
29
30 def _load_table(self, table):
31 self._length_unfiltered = self._length_original = table.num_rows
32 self._index_end = self._length_original = table.num_rows
33 for col in table.columns:
34 name = col.name
35 # TODO: keep the arrow columns, and support and test chunks
36 arrow_array = col.data.chunks[0]
37 column = column_from_arrow_array(arrow_array)
38
39 self.columns[name] = column
40 self.column_names.append(name)
41 self._save_assign_expression(name, vaex.expression.Expression(self, name))
42
43
44 @classmethod
45 def can_open(cls, path, *args, **kwargs):
46 return path.rpartition('.')[2] == 'arrow'
47
48 @classmethod
49 def get_options(cls, path):
50 return []
51
52 @classmethod
53 def option_to_args(cls, option):
54 return []
55
56 class DatasetParquet(DatasetArrow):
57 def _load(self):
58 # might not be optimal, but it works, we can always see if we can
59 # do mmapping later on
60 table = pq.read_table(self.path)
61 self._load_table(table)
62
63 vaex.file.other.dataset_type_map["arrow"] = DatasetArrow
64 vaex.file.other.dataset_type_map["parquet"] = DatasetParquet
65
66
[end of packages/vaex-arrow/vaex_arrow/dataset.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/vaex-arrow/vaex_arrow/dataset.py b/packages/vaex-arrow/vaex_arrow/dataset.py
--- a/packages/vaex-arrow/vaex_arrow/dataset.py
+++ b/packages/vaex-arrow/vaex_arrow/dataset.py
@@ -28,14 +28,18 @@
self._load_table(table)
def _load_table(self, table):
- self._length_unfiltered = self._length_original = table.num_rows
- self._index_end = self._length_original = table.num_rows
+ self._length_unfiltered = self._length_original = table.num_rows
+ self._index_end = self._length_original = table.num_rows
for col in table.columns:
name = col.name
# TODO: keep the arrow columns, and support and test chunks
arrow_array = col.data.chunks[0]
- column = column_from_arrow_array(arrow_array)
-
+ if isinstance(arrow_array.type, pa.DictionaryType):
+ column = column_from_arrow_array(arrow_array.indices)
+ labels = column_from_arrow_array(arrow_array.dictionary).tolist()
+ self._categories[name] = dict(labels=labels, N=len(labels))
+ else:
+ column = column_from_arrow_array(arrow_array)
self.columns[name] = column
self.column_names.append(name)
self._save_assign_expression(name, vaex.expression.Expression(self, name))
| {"golden_diff": "diff --git a/packages/vaex-arrow/vaex_arrow/dataset.py b/packages/vaex-arrow/vaex_arrow/dataset.py\n--- a/packages/vaex-arrow/vaex_arrow/dataset.py\n+++ b/packages/vaex-arrow/vaex_arrow/dataset.py\n@@ -28,14 +28,18 @@\n self._load_table(table)\n \n def _load_table(self, table):\n- self._length_unfiltered = self._length_original = table.num_rows\n- self._index_end = self._length_original = table.num_rows\n+ self._length_unfiltered = self._length_original = table.num_rows\n+ self._index_end = self._length_original = table.num_rows\n for col in table.columns:\n name = col.name\n # TODO: keep the arrow columns, and support and test chunks\n arrow_array = col.data.chunks[0]\n- column = column_from_arrow_array(arrow_array)\n-\n+ if isinstance(arrow_array.type, pa.DictionaryType):\n+ column = column_from_arrow_array(arrow_array.indices)\n+ labels = column_from_arrow_array(arrow_array.dictionary).tolist()\n+ self._categories[name] = dict(labels=labels, N=len(labels))\n+ else:\n+ column = column_from_arrow_array(arrow_array)\n self.columns[name] = column\n self.column_names.append(name)\n self._save_assign_expression(name, vaex.expression.Expression(self, name))\n", "issue": "Failing to open arrow stream with categoricals\nVaex fails to open arrow streams that contain categorical columns. It would be great if this was working as categoricals have given a great performance in many of my applications.\r\n\r\n```import pandas as pd\r\nimport pyarrow as pa\r\nimport numpy as np\r\nimport vaex\r\ndf = pd.DataFrame(\r\n {\r\n 'col1': pd.Categorical.from_codes(np.full(1, 1), categories=['ABC', 'DEF'])\r\n }\r\n)\r\ntable = pa.Table.from_pandas(df)\r\n\r\nwith pa.OSFile('test2.arrow', 'wb') as sink:\r\n with pa.RecordBatchStreamWriter(sink, table.schema) as writer:\r\n writer.write_table(table)\r\n\r\nwith pa.OSFile('test2.arrow', 'rb') as source:\r\n df = pa.ipc.open_stream(source).read_pandas()\r\n\r\ndf = vaex.open('test2.arrow')\r\n```\r\n\r\nOutput:\r\n```\r\nERROR:MainThread:vaex:error opening 'test2.arrow'\r\n---------------------------------------------------------------------------\r\nNotImplementedError Traceback (most recent call last)\r\n in \r\n 17 df = pa.ipc.open_stream(source).read_pandas()\r\n 18 \r\n---> 19 df = vaex.open('test2.arrow')\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex\\__init__.py in open(path, convert, shuffle, copy_index, *args, **kwargs)\r\n 189 ds = from_csv(path, copy_index=copy_index, **kwargs)\r\n 190 else:\r\n--> 191 ds = vaex.file.open(path, *args, **kwargs)\r\n 192 if convert and ds:\r\n 193 ds.export_hdf5(filename_hdf5, shuffle=shuffle)\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex\\file\\__init__.py in open(path, *args, **kwargs)\r\n 28 for opener in opener_classes:\r\n 29 if opener.can_open(path, *args, **kwargs):\r\n---> 30 return opener.open(path, *args, **kwargs)\r\n 31 if hdf5:\r\n 32 openers.extend(hdf5.dataset.dataset_type_map.items())\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\opener.py in open(path, *args, **kwargs)\r\n 9 def open(path, *args, **kwargs):\r\n 10 from .dataset import DatasetArrow\r\n---> 11 return DatasetArrow(path, *args, **kwargs)\r\n 12 \r\n 13 class ParquetOpener:\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\dataset.py in __init__(self, filename, table, write)\r\n 18 self._write = write\r\n 19 if table is None:\r\n---> 20 self._load()\r\n 21 else:\r\n 22 self._load_table(table)\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\dataset.py in _load(self)\r\n 26 reader = pa.ipc.open_stream(source)\r\n 27 table = pa.Table.from_batches([b for b in reader])\r\n---> 28 self._load_table(table)\r\n 29 \r\n 30 def _load_table(self, table):\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\dataset.py in _load_table(self, table)\r\n 35 # TODO: keep the arrow columns, and support and test chunks\r\n 36 arrow_array = col.data.chunks[0]\r\n---> 37 column = column_from_arrow_array(arrow_array)\r\n 38 \r\n 39 self.columns[name] = column\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\convert.py in column_from_arrow_array(arrow_array)\r\n 28 buffers = arrow_array.buffers()\r\n 29 if len(buffers) == 2:\r\n---> 30 return numpy_array_from_arrow_array(arrow_array)\r\n 31 elif len(buffers) == 3 and isinstance(arrow_array.type, type(pyarrow.string())):\r\n 32 bitmap_buffer, offsets, string_bytes = arrow_array.buffers()\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\vaex_arrow\\convert.py in numpy_array_from_arrow_array(arrow_array)\r\n 62 array = np.frombuffer(buffers[-1], dtype, len(arrow_array))# TODO: deal with offset ? [arrow_array.offset:arrow_array.offset + len(arrow_array)]\r\n 63 else:\r\n---> 64 dtype = arrow_array.type.to_pandas_dtype()\r\n 65 if np.bool_ == dtype:\r\n 66 # TODO: this will also be a copy, we probably want to support bitmasks as well\r\n\r\nZ:\\Systemdateien\\Miniconda3\\envs\\finance\\lib\\site-packages\\pyarrow\\types.pxi in pyarrow.lib.DataType.to_pandas_dtype()\r\n\r\nNotImplementedError: dictionary\r\n```\n", "before_files": [{"content": "__author__ = 'maartenbreddels'\nimport logging\n\nimport pyarrow as pa\nimport pyarrow.parquet as pq\n\nimport vaex.dataset\nimport vaex.file.other\nfrom .convert import column_from_arrow_array\nlogger = logging.getLogger(\"vaex_arrow\")\n\n\nclass DatasetArrow(vaex.dataset.DatasetLocal):\n \"\"\"Implements storage using arrow\"\"\"\n\n def __init__(self, filename=None, table=None, write=False):\n super(DatasetArrow, self).__init__(name=filename, path=filename, column_names=[])\n self._write = write\n if table is None:\n self._load()\n else:\n self._load_table(table)\n\n def _load(self):\n source = pa.memory_map(self.path)\n reader = pa.ipc.open_stream(source)\n table = pa.Table.from_batches([b for b in reader])\n self._load_table(table)\n \n def _load_table(self, table):\n self._length_unfiltered = self._length_original = table.num_rows\n self._index_end = self._length_original = table.num_rows\n for col in table.columns:\n name = col.name\n # TODO: keep the arrow columns, and support and test chunks\n arrow_array = col.data.chunks[0]\n column = column_from_arrow_array(arrow_array)\n\n self.columns[name] = column\n self.column_names.append(name)\n self._save_assign_expression(name, vaex.expression.Expression(self, name))\n\n\n @classmethod\n def can_open(cls, path, *args, **kwargs):\n return path.rpartition('.')[2] == 'arrow'\n\n @classmethod\n def get_options(cls, path):\n return []\n\n @classmethod\n def option_to_args(cls, option):\n return []\n\nclass DatasetParquet(DatasetArrow):\n def _load(self):\n # might not be optimal, but it works, we can always see if we can\n # do mmapping later on\n table = pq.read_table(self.path)\n self._load_table(table)\n\nvaex.file.other.dataset_type_map[\"arrow\"] = DatasetArrow\nvaex.file.other.dataset_type_map[\"parquet\"] = DatasetParquet\n\n", "path": "packages/vaex-arrow/vaex_arrow/dataset.py"}]} | 2,326 | 316 |
gh_patches_debug_19901 | rasdani/github-patches | git_diff | python-discord__bot-216 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
!watch alias is not working.
The `!watch` alias broke when we changed the watch command to take a note instead of a channel - this is due to converters in the alias. I'll fix it.
</issue>
<code>
[start of bot/cogs/alias.py]
1 import inspect
2 import logging
3
4 from discord import Colour, Embed, TextChannel, User
5 from discord.ext.commands import (
6 Command, Context, clean_content, command, group
7 )
8
9 from bot.converters import TagNameConverter
10 from bot.pagination import LinePaginator
11
12 log = logging.getLogger(__name__)
13
14
15 class Alias:
16 """
17 Aliases for more used commands
18 """
19
20 def __init__(self, bot):
21 self.bot = bot
22
23 async def invoke(self, ctx, cmd_name, *args, **kwargs):
24 """
25 Invokes a command with args and kwargs.
26 Fail early through `command.can_run`, and logs warnings.
27
28 :param ctx: Context instance for command call
29 :param cmd_name: Name of command/subcommand to be invoked
30 :param args: args to be passed to the command
31 :param kwargs: kwargs to be passed to the command
32 :return: None
33 """
34
35 log.debug(f"{cmd_name} was invoked through an alias")
36 cmd = self.bot.get_command(cmd_name)
37 if not cmd:
38 return log.warning(f'Did not find command "{cmd_name}" to invoke.')
39 elif not await cmd.can_run(ctx):
40 return log.warning(
41 f'{str(ctx.author)} tried to run the command "{cmd_name}"'
42 )
43
44 await ctx.invoke(cmd, *args, **kwargs)
45
46 @command(name='aliases')
47 async def aliases_command(self, ctx):
48 """Show configured aliases on the bot."""
49
50 embed = Embed(
51 title='Configured aliases',
52 colour=Colour.blue()
53 )
54 await LinePaginator.paginate(
55 (
56 f"• `{ctx.prefix}{value.name}` "
57 f"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`"
58 for name, value in inspect.getmembers(self)
59 if isinstance(value, Command) and name.endswith('_alias')
60 ),
61 ctx, embed, empty=False, max_lines=20
62 )
63
64 @command(name="resources", aliases=("resource",), hidden=True)
65 async def site_resources_alias(self, ctx):
66 """
67 Alias for invoking <prefix>site resources.
68 """
69
70 await self.invoke(ctx, "site resources")
71
72 @command(name="watch", hidden=True)
73 async def bigbrother_watch_alias(
74 self, ctx, user: User, channel: TextChannel = None
75 ):
76 """
77 Alias for invoking <prefix>bigbrother watch user [text_channel].
78 """
79
80 await self.invoke(ctx, "bigbrother watch", user, channel)
81
82 @command(name="unwatch", hidden=True)
83 async def bigbrother_unwatch_alias(self, ctx, user: User):
84 """
85 Alias for invoking <prefix>bigbrother unwatch user.
86
87 user: discord.User - A user instance to unwatch
88 """
89
90 await self.invoke(ctx, "bigbrother unwatch", user)
91
92 @command(name="home", hidden=True)
93 async def site_home_alias(self, ctx):
94 """
95 Alias for invoking <prefix>site home.
96 """
97
98 await self.invoke(ctx, "site home")
99
100 @command(name="faq", hidden=True)
101 async def site_faq_alias(self, ctx):
102 """
103 Alias for invoking <prefix>site faq.
104 """
105
106 await self.invoke(ctx, "site faq")
107
108 @command(name="rules", hidden=True)
109 async def site_rules_alias(self, ctx):
110 """
111 Alias for invoking <prefix>site rules.
112 """
113
114 await self.invoke(ctx, "site rules")
115
116 @command(name="reload", hidden=True)
117 async def cogs_reload_alias(self, ctx, *, cog_name: str):
118 """
119 Alias for invoking <prefix>cogs reload cog_name.
120
121 cog_name: str - name of the cog to be reloaded.
122 """
123
124 await self.invoke(ctx, "cogs reload", cog_name)
125
126 @command(name="defon", hidden=True)
127 async def defcon_enable_alias(self, ctx):
128 """
129 Alias for invoking <prefix>defcon enable.
130 """
131
132 await self.invoke(ctx, "defcon enable")
133
134 @command(name="defoff", hidden=True)
135 async def defcon_disable_alias(self, ctx):
136 """
137 Alias for invoking <prefix>defcon disable.
138 """
139
140 await self.invoke(ctx, "defcon disable")
141
142 @group(name="get",
143 aliases=("show", "g"),
144 hidden=True,
145 invoke_without_command=True)
146 async def get_group_alias(self, ctx):
147 """
148 Group for reverse aliases for commands like `tags get`,
149 allowing for `get tags` or `get docs`.
150 """
151
152 pass
153
154 @get_group_alias.command(name="tags", aliases=("tag", "t"), hidden=True)
155 async def tags_get_alias(
156 self, ctx: Context, *, tag_name: TagNameConverter = None
157 ):
158 """
159 Alias for invoking <prefix>tags get [tag_name].
160
161 tag_name: str - tag to be viewed.
162 """
163
164 await self.invoke(ctx, "tags get", tag_name)
165
166 @get_group_alias.command(name="docs", aliases=("doc", "d"), hidden=True)
167 async def docs_get_alias(
168 self, ctx: Context, symbol: clean_content = None
169 ):
170 """
171 Alias for invoking <prefix>docs get [symbol].
172
173 symbol: str - name of doc to be viewed.
174 """
175
176 await self.invoke(ctx, "docs get", symbol)
177
178
179 def setup(bot):
180 bot.add_cog(Alias(bot))
181 log.info("Cog loaded: Alias")
182
[end of bot/cogs/alias.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py
--- a/bot/cogs/alias.py
+++ b/bot/cogs/alias.py
@@ -1,7 +1,7 @@
import inspect
import logging
-from discord import Colour, Embed, TextChannel, User
+from discord import Colour, Embed, User
from discord.ext.commands import (
Command, Context, clean_content, command, group
)
@@ -71,13 +71,13 @@
@command(name="watch", hidden=True)
async def bigbrother_watch_alias(
- self, ctx, user: User, channel: TextChannel = None
+ self, ctx, user: User, *, reason: str = None
):
"""
Alias for invoking <prefix>bigbrother watch user [text_channel].
"""
- await self.invoke(ctx, "bigbrother watch", user, channel)
+ await self.invoke(ctx, "bigbrother watch", user, reason=reason)
@command(name="unwatch", hidden=True)
async def bigbrother_unwatch_alias(self, ctx, user: User):
| {"golden_diff": "diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py\n--- a/bot/cogs/alias.py\n+++ b/bot/cogs/alias.py\n@@ -1,7 +1,7 @@\n import inspect\n import logging\n \n-from discord import Colour, Embed, TextChannel, User\n+from discord import Colour, Embed, User\n from discord.ext.commands import (\n Command, Context, clean_content, command, group\n )\n@@ -71,13 +71,13 @@\n \n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(\n- self, ctx, user: User, channel: TextChannel = None\n+ self, ctx, user: User, *, reason: str = None\n ):\n \"\"\"\n Alias for invoking <prefix>bigbrother watch user [text_channel].\n \"\"\"\n \n- await self.invoke(ctx, \"bigbrother watch\", user, channel)\n+ await self.invoke(ctx, \"bigbrother watch\", user, reason=reason)\n \n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx, user: User):\n", "issue": "!watch alias is not working.\nThe `!watch` alias broke when we changed the watch command to take a note instead of a channel - this is due to converters in the alias. I'll fix it.\n", "before_files": [{"content": "import inspect\nimport logging\n\nfrom discord import Colour, Embed, TextChannel, User\nfrom discord.ext.commands import (\n Command, Context, clean_content, command, group\n)\n\nfrom bot.converters import TagNameConverter\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\n\nclass Alias:\n \"\"\"\n Aliases for more used commands\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n async def invoke(self, ctx, cmd_name, *args, **kwargs):\n \"\"\"\n Invokes a command with args and kwargs.\n Fail early through `command.can_run`, and logs warnings.\n\n :param ctx: Context instance for command call\n :param cmd_name: Name of command/subcommand to be invoked\n :param args: args to be passed to the command\n :param kwargs: kwargs to be passed to the command\n :return: None\n \"\"\"\n\n log.debug(f\"{cmd_name} was invoked through an alias\")\n cmd = self.bot.get_command(cmd_name)\n if not cmd:\n return log.warning(f'Did not find command \"{cmd_name}\" to invoke.')\n elif not await cmd.can_run(ctx):\n return log.warning(\n f'{str(ctx.author)} tried to run the command \"{cmd_name}\"'\n )\n\n await ctx.invoke(cmd, *args, **kwargs)\n\n @command(name='aliases')\n async def aliases_command(self, ctx):\n \"\"\"Show configured aliases on the bot.\"\"\"\n\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"\u2022 `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )\n\n @command(name=\"resources\", aliases=(\"resource\",), hidden=True)\n async def site_resources_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site resources.\n \"\"\"\n\n await self.invoke(ctx, \"site resources\")\n\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(\n self, ctx, user: User, channel: TextChannel = None\n ):\n \"\"\"\n Alias for invoking <prefix>bigbrother watch user [text_channel].\n \"\"\"\n\n await self.invoke(ctx, \"bigbrother watch\", user, channel)\n\n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx, user: User):\n \"\"\"\n Alias for invoking <prefix>bigbrother unwatch user.\n\n user: discord.User - A user instance to unwatch\n \"\"\"\n\n await self.invoke(ctx, \"bigbrother unwatch\", user)\n\n @command(name=\"home\", hidden=True)\n async def site_home_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site home.\n \"\"\"\n\n await self.invoke(ctx, \"site home\")\n\n @command(name=\"faq\", hidden=True)\n async def site_faq_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site faq.\n \"\"\"\n\n await self.invoke(ctx, \"site faq\")\n\n @command(name=\"rules\", hidden=True)\n async def site_rules_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>site rules.\n \"\"\"\n\n await self.invoke(ctx, \"site rules\")\n\n @command(name=\"reload\", hidden=True)\n async def cogs_reload_alias(self, ctx, *, cog_name: str):\n \"\"\"\n Alias for invoking <prefix>cogs reload cog_name.\n\n cog_name: str - name of the cog to be reloaded.\n \"\"\"\n\n await self.invoke(ctx, \"cogs reload\", cog_name)\n\n @command(name=\"defon\", hidden=True)\n async def defcon_enable_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>defcon enable.\n \"\"\"\n\n await self.invoke(ctx, \"defcon enable\")\n\n @command(name=\"defoff\", hidden=True)\n async def defcon_disable_alias(self, ctx):\n \"\"\"\n Alias for invoking <prefix>defcon disable.\n \"\"\"\n\n await self.invoke(ctx, \"defcon disable\")\n\n @group(name=\"get\",\n aliases=(\"show\", \"g\"),\n hidden=True,\n invoke_without_command=True)\n async def get_group_alias(self, ctx):\n \"\"\"\n Group for reverse aliases for commands like `tags get`,\n allowing for `get tags` or `get docs`.\n \"\"\"\n\n pass\n\n @get_group_alias.command(name=\"tags\", aliases=(\"tag\", \"t\"), hidden=True)\n async def tags_get_alias(\n self, ctx: Context, *, tag_name: TagNameConverter = None\n ):\n \"\"\"\n Alias for invoking <prefix>tags get [tag_name].\n\n tag_name: str - tag to be viewed.\n \"\"\"\n\n await self.invoke(ctx, \"tags get\", tag_name)\n\n @get_group_alias.command(name=\"docs\", aliases=(\"doc\", \"d\"), hidden=True)\n async def docs_get_alias(\n self, ctx: Context, symbol: clean_content = None\n ):\n \"\"\"\n Alias for invoking <prefix>docs get [symbol].\n\n symbol: str - name of doc to be viewed.\n \"\"\"\n\n await self.invoke(ctx, \"docs get\", symbol)\n\n\ndef setup(bot):\n bot.add_cog(Alias(bot))\n log.info(\"Cog loaded: Alias\")\n", "path": "bot/cogs/alias.py"}]} | 2,228 | 253 |
gh_patches_debug_14194 | rasdani/github-patches | git_diff | elastic__ecs-1459 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Multi-level self-nesting support
### Problem
In ECS a self-nesting is when one field set is nested inside itself using a different name:
Two examples:
* `process.parent.*` self-nests the `process` field set renamed as `parent`
* `user.target.*` self-nests the `user.*` field set renamed as `target` (same applies to `user.changes` and `user.effective`)
In a _very_ specific case, you may want to self nest the same field set twice:
1. Reuse `process.*` as `process.target.*`
2. Reuse `process` again but underneath `process.target.*` to create `process.target.parent.*`
The current ECS tooling allows specifying this in the `reusable.expected` section, but it will not produce the desired result:
```json
- name: process
reusable:
expected:
- at: process
as: target
# collect the parent of the target process at process.target.parent
- at: process.target
as: parent
```
### Solution
Implement logic into the [self-nesting phase](https://github.com/elastic/ecs/blob/master/scripts/schema/finalizer.py#L78-L98) of `.finalize` to account for when the `at` attribute is not the top-level field set but an existing self-nesting.
</issue>
<code>
[start of scripts/schema/finalizer.py]
1 import copy
2 import re
3
4 from schema import visitor
5
6 # This script takes the fleshed out deeply nested fields dictionary as emitted by
7 # cleaner.py, and performs field reuse in two phases.
8 #
9 # Phase 1 performs field reuse across field sets. E.g. `group` fields should also be under `user`.
10 # This type of reuse is then carried around if the receiving field set is also reused.
11 # In other words, user.group.* will be in other places where user is nested:
12 # source.user.* will contain source.user.group.*
13
14 # Phase 2 performs field reuse where field sets are reused within themselves, with a different name.
15 # Examples are nesting `process` within itself, as `process.parent.*`,
16 # or nesting `user` within itself at `user.target.*`.
17 # This second kind of nesting is not carried around everywhere else the receiving field set is reused.
18 # So `user.target.*` is *not* carried over to `source.user.target*` when we reuse `user` under `source`.
19
20
21 def finalize(fields):
22 """Intended entrypoint of the finalizer."""
23 perform_reuse(fields)
24 calculate_final_values(fields)
25
26
27 def order_reuses(fields):
28 foreign_reuses = {}
29 self_nestings = {}
30 for schema_name, schema in fields.items():
31 if not 'reusable' in schema['schema_details']:
32 continue
33 reuse_order = schema['schema_details']['reusable']['order']
34 for reuse_entry in schema['schema_details']['reusable']['expected']:
35 destination_schema_name = reuse_entry['full'].split('.')[0]
36 if destination_schema_name == schema_name:
37 # Accumulate self-nestings for phase 2.
38 self_nestings.setdefault(destination_schema_name, [])
39 self_nestings[destination_schema_name].extend([reuse_entry])
40 else:
41 # Group foreign reuses by 'order' attribute.
42 foreign_reuses.setdefault(reuse_order, {})
43 foreign_reuses[reuse_order].setdefault(schema_name, [])
44 foreign_reuses[reuse_order][schema_name].extend([reuse_entry])
45 return foreign_reuses, self_nestings
46
47
48 def perform_reuse(fields):
49 """Performs field reuse in two phases"""
50 foreign_reuses, self_nestings = order_reuses(fields)
51
52 # Phase 1: foreign reuse
53 # These are done respecting the reusable.order attribute.
54 # This lets us force the order for chained reuses (e.g. group => user, then user => many places)
55 for order in sorted(foreign_reuses.keys()):
56 for schema_name, reuse_entries in foreign_reuses[order].items():
57 schema = fields[schema_name]
58 for reuse_entry in reuse_entries:
59 # print(order, "{} => {}".format(schema_name, reuse_entry['full']))
60 nest_as = reuse_entry['as']
61 destination_schema_name = reuse_entry['full'].split('.')[0]
62 destination_schema = fields[destination_schema_name]
63 ensure_valid_reuse(schema, destination_schema)
64
65 new_field_details = copy.deepcopy(schema['field_details'])
66 new_field_details['name'] = nest_as
67 new_field_details['original_fieldset'] = schema_name
68 new_field_details['intermediate'] = True
69 reused_fields = copy.deepcopy(schema['fields'])
70 set_original_fieldset(reused_fields, schema_name)
71 destination_fields = field_group_at_path(reuse_entry['at'], fields)
72 destination_fields[nest_as] = {
73 'field_details': new_field_details,
74 'fields': reused_fields,
75 }
76 append_reused_here(schema, reuse_entry, destination_schema)
77
78 # Phase 2: self-nesting
79 for schema_name, reuse_entries in self_nestings.items():
80 schema = fields[schema_name]
81 ensure_valid_reuse(schema)
82 # Since we're about self-nest more fields within these, make a pristine copy first
83 reused_fields = copy.deepcopy(schema['fields'])
84 set_original_fieldset(reused_fields, schema_name)
85 for reuse_entry in reuse_entries:
86 # print("x {} => {}".format(schema_name, reuse_entry['full']))
87 nest_as = reuse_entry['as']
88 new_field_details = copy.deepcopy(schema['field_details'])
89 new_field_details['name'] = nest_as
90 new_field_details['original_fieldset'] = schema_name
91 new_field_details['intermediate'] = True
92 destination_fields = schema['fields']
93 destination_fields[nest_as] = {
94 'field_details': new_field_details,
95 # Make a new copy of the pristine copy
96 'fields': copy.deepcopy(reused_fields),
97 }
98 append_reused_here(schema, reuse_entry, fields[schema_name])
99
100
101 def ensure_valid_reuse(reused_schema, destination_schema=None):
102 """
103 Raise if either the reused schema or destination schema have root=true.
104
105 Second param is optional, if testing for a self-nesting (where source=destination).
106 """
107 if reused_schema['schema_details']['root']:
108 msg = "Schema {} has attribute root=true and therefore cannot be reused.".format(
109 reused_schema['field_details']['name'])
110 raise ValueError(msg)
111 elif destination_schema and destination_schema['schema_details']['root']:
112 msg = "Schema {} has attribute root=true and therefore cannot have other field sets reused inside it.".format(
113 destination_schema['field_details']['name'])
114 raise ValueError(msg)
115
116
117 def append_reused_here(reused_schema, reuse_entry, destination_schema):
118 """Captures two ways of denoting what field sets are reused under a given field set"""
119 # Legacy, too limited
120 destination_schema['schema_details'].setdefault('nestings', [])
121 destination_schema['schema_details']['nestings'] = sorted(
122 destination_schema['schema_details']['nestings'] + [reuse_entry['full']]
123 )
124 # New roomier way: we could eventually include contextual description here
125 destination_schema['schema_details'].setdefault('reused_here', [])
126 reused_here_entry = {
127 'schema_name': reused_schema['field_details']['name'],
128 'full': reuse_entry['full'],
129 # Check for a short override, if not present, fall back to the top-level fieldset's short
130 'short': reuse_entry['short_override'] if 'short_override' in reuse_entry else reused_schema['field_details']['short']
131 }
132 # Check for beta attribute
133 if 'beta' in reuse_entry:
134 reused_here_entry['beta'] = reuse_entry['beta']
135 destination_schema['schema_details']['reused_here'].extend([reused_here_entry])
136
137
138 def set_original_fieldset(fields, original_fieldset):
139 """Recursively set the 'original_fieldset' attribute for all fields in a group of fields"""
140 def func(details):
141 # Don't override if already set (e.g. 'group' for user.group.* fields)
142 details['field_details'].setdefault('original_fieldset', original_fieldset)
143 visitor.visit_fields(fields, field_func=func)
144
145
146 def field_group_at_path(dotted_path, fields):
147 """Returns the ['fields'] hash at the dotted_path."""
148 path = dotted_path.split('.')
149 nesting = fields
150 for next_field in path:
151 field = nesting.get(next_field, None)
152 if not field:
153 raise ValueError("Field {} not found, failed to find {}".format(dotted_path, next_field))
154 nesting = field.get('fields', None)
155 if not nesting:
156 field_type = field['field_details']['type']
157 if field_type in ['object', 'group', 'nested']:
158 nesting = field['fields'] = {}
159 else:
160 raise ValueError("Field {} (type {}) already exists and cannot have nested fields".format(
161 dotted_path, field_type))
162 return nesting
163
164
165 def calculate_final_values(fields):
166 """
167 This function navigates all fields recursively.
168
169 It populates a few more values for the fields, especially path-based values
170 like flat_name.
171 """
172 visitor.visit_fields_with_path(fields, field_finalizer)
173
174
175 def field_finalizer(details, path):
176 """This is the function called by the visitor to perform the work of calculate_final_values"""
177 name_array = path + [details['field_details']['node_name']]
178 flat_name = '.'.join(name_array)
179 details['field_details']['flat_name'] = flat_name
180 details['field_details']['dashed_name'] = re.sub('[_\.]', '-', flat_name).replace('@', '')
181 if 'multi_fields' in details['field_details']:
182 for mf in details['field_details']['multi_fields']:
183 mf['flat_name'] = flat_name + '.' + mf['name']
184
[end of scripts/schema/finalizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/schema/finalizer.py b/scripts/schema/finalizer.py
--- a/scripts/schema/finalizer.py
+++ b/scripts/schema/finalizer.py
@@ -89,7 +89,11 @@
new_field_details['name'] = nest_as
new_field_details['original_fieldset'] = schema_name
new_field_details['intermediate'] = True
- destination_fields = schema['fields']
+ # to handle multi-level self-nesting
+ if reuse_entry['at'] != schema_name:
+ destination_fields = field_group_at_path(reuse_entry['at'], fields)
+ else:
+ destination_fields = schema['fields']
destination_fields[nest_as] = {
'field_details': new_field_details,
# Make a new copy of the pristine copy
| {"golden_diff": "diff --git a/scripts/schema/finalizer.py b/scripts/schema/finalizer.py\n--- a/scripts/schema/finalizer.py\n+++ b/scripts/schema/finalizer.py\n@@ -89,7 +89,11 @@\n new_field_details['name'] = nest_as\n new_field_details['original_fieldset'] = schema_name\n new_field_details['intermediate'] = True\n- destination_fields = schema['fields']\n+ # to handle multi-level self-nesting\n+ if reuse_entry['at'] != schema_name:\n+ destination_fields = field_group_at_path(reuse_entry['at'], fields)\n+ else:\n+ destination_fields = schema['fields']\n destination_fields[nest_as] = {\n 'field_details': new_field_details,\n # Make a new copy of the pristine copy\n", "issue": "Multi-level self-nesting support\n### Problem\r\n\r\nIn ECS a self-nesting is when one field set is nested inside itself using a different name:\r\n\r\nTwo examples:\r\n\r\n* `process.parent.*` self-nests the `process` field set renamed as `parent`\r\n* `user.target.*` self-nests the `user.*` field set renamed as `target` (same applies to `user.changes` and `user.effective`)\r\n\r\nIn a _very_ specific case, you may want to self nest the same field set twice:\r\n\r\n1. Reuse `process.*` as `process.target.*`\r\n2. Reuse `process` again but underneath `process.target.*` to create `process.target.parent.*`\r\n\r\nThe current ECS tooling allows specifying this in the `reusable.expected` section, but it will not produce the desired result:\r\n\r\n```json\r\n- name: process\r\n reusable:\r\n expected:\r\n - at: process\r\n as: target\r\n # collect the parent of the target process at process.target.parent\r\n - at: process.target\r\n as: parent\r\n```\r\n\r\n### Solution\r\n\r\nImplement logic into the [self-nesting phase](https://github.com/elastic/ecs/blob/master/scripts/schema/finalizer.py#L78-L98) of `.finalize` to account for when the `at` attribute is not the top-level field set but an existing self-nesting.\n", "before_files": [{"content": "import copy\nimport re\n\nfrom schema import visitor\n\n# This script takes the fleshed out deeply nested fields dictionary as emitted by\n# cleaner.py, and performs field reuse in two phases.\n#\n# Phase 1 performs field reuse across field sets. E.g. `group` fields should also be under `user`.\n# This type of reuse is then carried around if the receiving field set is also reused.\n# In other words, user.group.* will be in other places where user is nested:\n# source.user.* will contain source.user.group.*\n\n# Phase 2 performs field reuse where field sets are reused within themselves, with a different name.\n# Examples are nesting `process` within itself, as `process.parent.*`,\n# or nesting `user` within itself at `user.target.*`.\n# This second kind of nesting is not carried around everywhere else the receiving field set is reused.\n# So `user.target.*` is *not* carried over to `source.user.target*` when we reuse `user` under `source`.\n\n\ndef finalize(fields):\n \"\"\"Intended entrypoint of the finalizer.\"\"\"\n perform_reuse(fields)\n calculate_final_values(fields)\n\n\ndef order_reuses(fields):\n foreign_reuses = {}\n self_nestings = {}\n for schema_name, schema in fields.items():\n if not 'reusable' in schema['schema_details']:\n continue\n reuse_order = schema['schema_details']['reusable']['order']\n for reuse_entry in schema['schema_details']['reusable']['expected']:\n destination_schema_name = reuse_entry['full'].split('.')[0]\n if destination_schema_name == schema_name:\n # Accumulate self-nestings for phase 2.\n self_nestings.setdefault(destination_schema_name, [])\n self_nestings[destination_schema_name].extend([reuse_entry])\n else:\n # Group foreign reuses by 'order' attribute.\n foreign_reuses.setdefault(reuse_order, {})\n foreign_reuses[reuse_order].setdefault(schema_name, [])\n foreign_reuses[reuse_order][schema_name].extend([reuse_entry])\n return foreign_reuses, self_nestings\n\n\ndef perform_reuse(fields):\n \"\"\"Performs field reuse in two phases\"\"\"\n foreign_reuses, self_nestings = order_reuses(fields)\n\n # Phase 1: foreign reuse\n # These are done respecting the reusable.order attribute.\n # This lets us force the order for chained reuses (e.g. group => user, then user => many places)\n for order in sorted(foreign_reuses.keys()):\n for schema_name, reuse_entries in foreign_reuses[order].items():\n schema = fields[schema_name]\n for reuse_entry in reuse_entries:\n # print(order, \"{} => {}\".format(schema_name, reuse_entry['full']))\n nest_as = reuse_entry['as']\n destination_schema_name = reuse_entry['full'].split('.')[0]\n destination_schema = fields[destination_schema_name]\n ensure_valid_reuse(schema, destination_schema)\n\n new_field_details = copy.deepcopy(schema['field_details'])\n new_field_details['name'] = nest_as\n new_field_details['original_fieldset'] = schema_name\n new_field_details['intermediate'] = True\n reused_fields = copy.deepcopy(schema['fields'])\n set_original_fieldset(reused_fields, schema_name)\n destination_fields = field_group_at_path(reuse_entry['at'], fields)\n destination_fields[nest_as] = {\n 'field_details': new_field_details,\n 'fields': reused_fields,\n }\n append_reused_here(schema, reuse_entry, destination_schema)\n\n # Phase 2: self-nesting\n for schema_name, reuse_entries in self_nestings.items():\n schema = fields[schema_name]\n ensure_valid_reuse(schema)\n # Since we're about self-nest more fields within these, make a pristine copy first\n reused_fields = copy.deepcopy(schema['fields'])\n set_original_fieldset(reused_fields, schema_name)\n for reuse_entry in reuse_entries:\n # print(\"x {} => {}\".format(schema_name, reuse_entry['full']))\n nest_as = reuse_entry['as']\n new_field_details = copy.deepcopy(schema['field_details'])\n new_field_details['name'] = nest_as\n new_field_details['original_fieldset'] = schema_name\n new_field_details['intermediate'] = True\n destination_fields = schema['fields']\n destination_fields[nest_as] = {\n 'field_details': new_field_details,\n # Make a new copy of the pristine copy\n 'fields': copy.deepcopy(reused_fields),\n }\n append_reused_here(schema, reuse_entry, fields[schema_name])\n\n\ndef ensure_valid_reuse(reused_schema, destination_schema=None):\n \"\"\"\n Raise if either the reused schema or destination schema have root=true.\n\n Second param is optional, if testing for a self-nesting (where source=destination).\n \"\"\"\n if reused_schema['schema_details']['root']:\n msg = \"Schema {} has attribute root=true and therefore cannot be reused.\".format(\n reused_schema['field_details']['name'])\n raise ValueError(msg)\n elif destination_schema and destination_schema['schema_details']['root']:\n msg = \"Schema {} has attribute root=true and therefore cannot have other field sets reused inside it.\".format(\n destination_schema['field_details']['name'])\n raise ValueError(msg)\n\n\ndef append_reused_here(reused_schema, reuse_entry, destination_schema):\n \"\"\"Captures two ways of denoting what field sets are reused under a given field set\"\"\"\n # Legacy, too limited\n destination_schema['schema_details'].setdefault('nestings', [])\n destination_schema['schema_details']['nestings'] = sorted(\n destination_schema['schema_details']['nestings'] + [reuse_entry['full']]\n )\n # New roomier way: we could eventually include contextual description here\n destination_schema['schema_details'].setdefault('reused_here', [])\n reused_here_entry = {\n 'schema_name': reused_schema['field_details']['name'],\n 'full': reuse_entry['full'],\n # Check for a short override, if not present, fall back to the top-level fieldset's short\n 'short': reuse_entry['short_override'] if 'short_override' in reuse_entry else reused_schema['field_details']['short']\n }\n # Check for beta attribute\n if 'beta' in reuse_entry:\n reused_here_entry['beta'] = reuse_entry['beta']\n destination_schema['schema_details']['reused_here'].extend([reused_here_entry])\n\n\ndef set_original_fieldset(fields, original_fieldset):\n \"\"\"Recursively set the 'original_fieldset' attribute for all fields in a group of fields\"\"\"\n def func(details):\n # Don't override if already set (e.g. 'group' for user.group.* fields)\n details['field_details'].setdefault('original_fieldset', original_fieldset)\n visitor.visit_fields(fields, field_func=func)\n\n\ndef field_group_at_path(dotted_path, fields):\n \"\"\"Returns the ['fields'] hash at the dotted_path.\"\"\"\n path = dotted_path.split('.')\n nesting = fields\n for next_field in path:\n field = nesting.get(next_field, None)\n if not field:\n raise ValueError(\"Field {} not found, failed to find {}\".format(dotted_path, next_field))\n nesting = field.get('fields', None)\n if not nesting:\n field_type = field['field_details']['type']\n if field_type in ['object', 'group', 'nested']:\n nesting = field['fields'] = {}\n else:\n raise ValueError(\"Field {} (type {}) already exists and cannot have nested fields\".format(\n dotted_path, field_type))\n return nesting\n\n\ndef calculate_final_values(fields):\n \"\"\"\n This function navigates all fields recursively.\n\n It populates a few more values for the fields, especially path-based values\n like flat_name.\n \"\"\"\n visitor.visit_fields_with_path(fields, field_finalizer)\n\n\ndef field_finalizer(details, path):\n \"\"\"This is the function called by the visitor to perform the work of calculate_final_values\"\"\"\n name_array = path + [details['field_details']['node_name']]\n flat_name = '.'.join(name_array)\n details['field_details']['flat_name'] = flat_name\n details['field_details']['dashed_name'] = re.sub('[_\\.]', '-', flat_name).replace('@', '')\n if 'multi_fields' in details['field_details']:\n for mf in details['field_details']['multi_fields']:\n mf['flat_name'] = flat_name + '.' + mf['name']\n", "path": "scripts/schema/finalizer.py"}]} | 3,086 | 175 |
gh_patches_debug_18879 | rasdani/github-patches | git_diff | netbox-community__netbox-8292 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Circuits list view to display formatted commit rate
### NetBox version
3.1.2
### Feature type
Change to existing functionality
### Proposed functionality
The current circuit list view (/circuits/circuits/) has a column called "Commit Rate (kbps) and shows the rate in kbps i.e. 1000000
However when looking at the circuit details, the commit rate is translated into something more human readable i.e 1 Gbps
Proposing either changing the existing Commit Rate (kbps) column to also translate the commit rate or the addition of an extra column simply called Commit Rate that has the human readable version.
### Use case
Easier for non-technical users to quickly see the commit rate of a circuit in the table view. Brings more parity to the circuit details view.
### Database changes
_No response_
### External dependencies
_No response_
</issue>
<code>
[start of netbox/circuits/tables.py]
1 import django_tables2 as tables
2 from django_tables2.utils import Accessor
3
4 from tenancy.tables import TenantColumn
5 from utilities.tables import BaseTable, ButtonsColumn, ChoiceFieldColumn, MarkdownColumn, TagColumn, ToggleColumn
6 from .models import *
7
8
9 __all__ = (
10 'CircuitTable',
11 'CircuitTypeTable',
12 'ProviderTable',
13 'ProviderNetworkTable',
14 )
15
16
17 CIRCUITTERMINATION_LINK = """
18 {% if value.site %}
19 <a href="{{ value.site.get_absolute_url }}">{{ value.site }}</a>
20 {% elif value.provider_network %}
21 <a href="{{ value.provider_network.get_absolute_url }}">{{ value.provider_network }}</a>
22 {% endif %}
23 """
24
25
26 #
27 # Providers
28 #
29
30 class ProviderTable(BaseTable):
31 pk = ToggleColumn()
32 name = tables.Column(
33 linkify=True
34 )
35 circuit_count = tables.Column(
36 accessor=Accessor('count_circuits'),
37 verbose_name='Circuits'
38 )
39 comments = MarkdownColumn()
40 tags = TagColumn(
41 url_name='circuits:provider_list'
42 )
43
44 class Meta(BaseTable.Meta):
45 model = Provider
46 fields = (
47 'pk', 'id', 'name', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'circuit_count',
48 'comments', 'tags',
49 )
50 default_columns = ('pk', 'name', 'asn', 'account', 'circuit_count')
51
52
53 #
54 # Provider networks
55 #
56
57 class ProviderNetworkTable(BaseTable):
58 pk = ToggleColumn()
59 name = tables.Column(
60 linkify=True
61 )
62 provider = tables.Column(
63 linkify=True
64 )
65 comments = MarkdownColumn()
66 tags = TagColumn(
67 url_name='circuits:providernetwork_list'
68 )
69
70 class Meta(BaseTable.Meta):
71 model = ProviderNetwork
72 fields = ('pk', 'id', 'name', 'provider', 'description', 'comments', 'tags')
73 default_columns = ('pk', 'name', 'provider', 'description')
74
75
76 #
77 # Circuit types
78 #
79
80 class CircuitTypeTable(BaseTable):
81 pk = ToggleColumn()
82 name = tables.Column(
83 linkify=True
84 )
85 tags = TagColumn(
86 url_name='circuits:circuittype_list'
87 )
88 circuit_count = tables.Column(
89 verbose_name='Circuits'
90 )
91 actions = ButtonsColumn(CircuitType)
92
93 class Meta(BaseTable.Meta):
94 model = CircuitType
95 fields = ('pk', 'id', 'name', 'circuit_count', 'description', 'slug', 'tags', 'actions')
96 default_columns = ('pk', 'name', 'circuit_count', 'description', 'slug', 'actions')
97
98
99 #
100 # Circuits
101 #
102
103 class CircuitTable(BaseTable):
104 pk = ToggleColumn()
105 cid = tables.Column(
106 linkify=True,
107 verbose_name='Circuit ID'
108 )
109 provider = tables.Column(
110 linkify=True
111 )
112 status = ChoiceFieldColumn()
113 tenant = TenantColumn()
114 termination_a = tables.TemplateColumn(
115 template_code=CIRCUITTERMINATION_LINK,
116 verbose_name='Side A'
117 )
118 termination_z = tables.TemplateColumn(
119 template_code=CIRCUITTERMINATION_LINK,
120 verbose_name='Side Z'
121 )
122 comments = MarkdownColumn()
123 tags = TagColumn(
124 url_name='circuits:circuit_list'
125 )
126
127 class Meta(BaseTable.Meta):
128 model = Circuit
129 fields = (
130 'pk', 'id', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'install_date',
131 'commit_rate', 'description', 'comments', 'tags',
132 )
133 default_columns = (
134 'pk', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'description',
135 )
136
[end of netbox/circuits/tables.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netbox/circuits/tables.py b/netbox/circuits/tables.py
--- a/netbox/circuits/tables.py
+++ b/netbox/circuits/tables.py
@@ -22,11 +22,32 @@
{% endif %}
"""
+#
+# Table columns
+#
+
+
+class CommitRateColumn(tables.TemplateColumn):
+ """
+ Humanize the commit rate in the column view
+ """
+
+ template_code = """
+ {% load helpers %}
+ {{ record.commit_rate|humanize_speed }}
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(template_code=self.template_code, *args, **kwargs)
+
+ def value(self, value):
+ return str(value) if value else None
#
# Providers
#
+
class ProviderTable(BaseTable):
pk = ToggleColumn()
name = tables.Column(
@@ -119,6 +140,7 @@
template_code=CIRCUITTERMINATION_LINK,
verbose_name='Side Z'
)
+ commit_rate = CommitRateColumn()
comments = MarkdownColumn()
tags = TagColumn(
url_name='circuits:circuit_list'
| {"golden_diff": "diff --git a/netbox/circuits/tables.py b/netbox/circuits/tables.py\n--- a/netbox/circuits/tables.py\n+++ b/netbox/circuits/tables.py\n@@ -22,11 +22,32 @@\n {% endif %}\n \"\"\"\n \n+#\n+# Table columns\n+#\n+\n+\n+class CommitRateColumn(tables.TemplateColumn):\n+ \"\"\"\n+ Humanize the commit rate in the column view\n+ \"\"\"\n+\n+ template_code = \"\"\"\n+ {% load helpers %}\n+ {{ record.commit_rate|humanize_speed }}\n+ \"\"\"\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(template_code=self.template_code, *args, **kwargs)\n+\n+ def value(self, value):\n+ return str(value) if value else None\n \n #\n # Providers\n #\n \n+\n class ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n@@ -119,6 +140,7 @@\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side Z'\n )\n+ commit_rate = CommitRateColumn()\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:circuit_list'\n", "issue": "Circuits list view to display formatted commit rate\n### NetBox version\n\n3.1.2\n\n### Feature type\n\nChange to existing functionality\n\n### Proposed functionality\n\nThe current circuit list view (/circuits/circuits/) has a column called \"Commit Rate (kbps) and shows the rate in kbps i.e. 1000000\r\n\r\nHowever when looking at the circuit details, the commit rate is translated into something more human readable i.e 1 Gbps\r\n\r\nProposing either changing the existing Commit Rate (kbps) column to also translate the commit rate or the addition of an extra column simply called Commit Rate that has the human readable version.\n\n### Use case\n\nEasier for non-technical users to quickly see the commit rate of a circuit in the table view. Brings more parity to the circuit details view.\n\n### Database changes\n\n_No response_\n\n### External dependencies\n\n_No response_\n", "before_files": [{"content": "import django_tables2 as tables\nfrom django_tables2.utils import Accessor\n\nfrom tenancy.tables import TenantColumn\nfrom utilities.tables import BaseTable, ButtonsColumn, ChoiceFieldColumn, MarkdownColumn, TagColumn, ToggleColumn\nfrom .models import *\n\n\n__all__ = (\n 'CircuitTable',\n 'CircuitTypeTable',\n 'ProviderTable',\n 'ProviderNetworkTable',\n)\n\n\nCIRCUITTERMINATION_LINK = \"\"\"\n{% if value.site %}\n <a href=\"{{ value.site.get_absolute_url }}\">{{ value.site }}</a>\n{% elif value.provider_network %}\n <a href=\"{{ value.provider_network.get_absolute_url }}\">{{ value.provider_network }}</a>\n{% endif %}\n\"\"\"\n\n\n#\n# Providers\n#\n\nclass ProviderTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n circuit_count = tables.Column(\n accessor=Accessor('count_circuits'),\n verbose_name='Circuits'\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:provider_list'\n )\n\n class Meta(BaseTable.Meta):\n model = Provider\n fields = (\n 'pk', 'id', 'name', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'circuit_count',\n 'comments', 'tags',\n )\n default_columns = ('pk', 'name', 'asn', 'account', 'circuit_count')\n\n\n#\n# Provider networks\n#\n\nclass ProviderNetworkTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n provider = tables.Column(\n linkify=True\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:providernetwork_list'\n )\n\n class Meta(BaseTable.Meta):\n model = ProviderNetwork\n fields = ('pk', 'id', 'name', 'provider', 'description', 'comments', 'tags')\n default_columns = ('pk', 'name', 'provider', 'description')\n\n\n#\n# Circuit types\n#\n\nclass CircuitTypeTable(BaseTable):\n pk = ToggleColumn()\n name = tables.Column(\n linkify=True\n )\n tags = TagColumn(\n url_name='circuits:circuittype_list'\n )\n circuit_count = tables.Column(\n verbose_name='Circuits'\n )\n actions = ButtonsColumn(CircuitType)\n\n class Meta(BaseTable.Meta):\n model = CircuitType\n fields = ('pk', 'id', 'name', 'circuit_count', 'description', 'slug', 'tags', 'actions')\n default_columns = ('pk', 'name', 'circuit_count', 'description', 'slug', 'actions')\n\n\n#\n# Circuits\n#\n\nclass CircuitTable(BaseTable):\n pk = ToggleColumn()\n cid = tables.Column(\n linkify=True,\n verbose_name='Circuit ID'\n )\n provider = tables.Column(\n linkify=True\n )\n status = ChoiceFieldColumn()\n tenant = TenantColumn()\n termination_a = tables.TemplateColumn(\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side A'\n )\n termination_z = tables.TemplateColumn(\n template_code=CIRCUITTERMINATION_LINK,\n verbose_name='Side Z'\n )\n comments = MarkdownColumn()\n tags = TagColumn(\n url_name='circuits:circuit_list'\n )\n\n class Meta(BaseTable.Meta):\n model = Circuit\n fields = (\n 'pk', 'id', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'install_date',\n 'commit_rate', 'description', 'comments', 'tags',\n )\n default_columns = (\n 'pk', 'cid', 'provider', 'type', 'status', 'tenant', 'termination_a', 'termination_z', 'description',\n )\n", "path": "netbox/circuits/tables.py"}]} | 1,866 | 271 |
gh_patches_debug_34996 | rasdani/github-patches | git_diff | easybuilders__easybuild-easyblocks-1618 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make CMakeMake easyblock immune to system boost.
As mentioned in
https://github.com/easybuilders/easybuild-easyconfigs/pull/7149
if an old boost-devel rpm installed on CentOS6:
```
boost-devel-1.41.0-25.el6.centos.x86_64
$ rpm -ql boost-devel-1.41.0-25.el6.centos.x86_64 | grep -i cmake
/usr/lib64/boost/Boost-relwithdebinfo.cmake
/usr/lib64/boost/Boost.cmake
/usr/lib64/boost/BoostConfig.cmake
/usr/lib64/boost/BoostConfigVersion.cmake
-- Boost found.
Boost Include: /usr/include
Boost Linkdir: /usr/lib64
```
then anything that combines cmake and boost gets confused by this as the EB boost does not include any cmake files.
adding `-DBoost_NO_SYSTEM_PATHS=ON -DBoost_NO_BOOST_CMAKE=ON`
avoids this.
ref:
https://github.com/Kitware/CMake/blob/master/Modules/FindBoost.cmake
(which shows that if it finds a Boost-cmake config file, it's game over)
https://cmake.org/cmake/help/v3.12/module/FindBoost.html
</issue>
<code>
[start of easybuild/easyblocks/generic/cmakemake.py]
1 ##
2 # Copyright 2009-2019 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for software that is configured with CMake, implemented as an easyblock
27
28 @author: Stijn De Weirdt (Ghent University)
29 @author: Dries Verdegem (Ghent University)
30 @author: Kenneth Hoste (Ghent University)
31 @author: Pieter De Baets (Ghent University)
32 @author: Jens Timmerman (Ghent University)
33 @author: Ward Poelmans (Ghent University)
34 @author: Maxime Boissonneault (Compute Canada - Universite Laval)
35 """
36 import os
37
38 from easybuild.easyblocks.generic.configuremake import ConfigureMake
39 from easybuild.framework.easyconfig import CUSTOM
40 from easybuild.tools.config import build_option
41 from easybuild.tools.filetools import change_dir, mkdir, which
42 from easybuild.tools.environment import setvar
43 from easybuild.tools.run import run_cmd
44 from vsc.utils.missing import nub
45
46
47 DEFAULT_CONFIGURE_CMD = 'cmake'
48
49
50 class CMakeMake(ConfigureMake):
51 """Support for configuring build with CMake instead of traditional configure script"""
52
53 @staticmethod
54 def extra_options(extra_vars=None):
55 """Define extra easyconfig parameters specific to CMakeMake."""
56 extra_vars = ConfigureMake.extra_options(extra_vars)
57 extra_vars.update({
58 'abs_path_compilers': [False, "Specify compilers via absolute file path (not via command names)", CUSTOM],
59 'configure_cmd': [DEFAULT_CONFIGURE_CMD, "Configure command to use", CUSTOM],
60 'srcdir': [None, "Source directory location to provide to cmake command", CUSTOM],
61 'separate_build_dir': [False, "Perform build in a separate directory", CUSTOM],
62 })
63 return extra_vars
64
65 def configure_step(self, srcdir=None, builddir=None):
66 """Configure build using cmake"""
67
68 # Set the search paths for CMake
69 tc_ipaths = self.toolchain.get_variable("CPPFLAGS", list)
70 tc_lpaths = self.toolchain.get_variable("LDFLAGS", list)
71 cpaths = os.getenv('CPATH', '').split(os.pathsep)
72 lpaths = os.getenv('LD_LIBRARY_PATH', '').split(os.pathsep)
73 include_paths = os.pathsep.join(nub(tc_ipaths + cpaths))
74 library_paths = os.pathsep.join(nub(tc_lpaths + lpaths))
75 setvar("CMAKE_INCLUDE_PATH", include_paths)
76 setvar("CMAKE_LIBRARY_PATH", library_paths)
77
78 if builddir is None and self.cfg.get('separate_build_dir', False):
79 builddir = os.path.join(self.builddir, 'easybuild_obj')
80
81 if builddir:
82 mkdir(builddir, parents=True)
83 change_dir(builddir)
84 default_srcdir = self.cfg['start_dir']
85 else:
86 default_srcdir = '.'
87
88 if srcdir is None:
89 if self.cfg.get('srcdir', None) is not None:
90 srcdir = self.cfg['srcdir']
91 else:
92 srcdir = default_srcdir
93
94 options = ['-DCMAKE_INSTALL_PREFIX=%s' % self.installdir]
95 env_to_options = {
96 'CC': 'CMAKE_C_COMPILER',
97 'CFLAGS': 'CMAKE_C_FLAGS',
98 'CXX': 'CMAKE_CXX_COMPILER',
99 'CXXFLAGS': 'CMAKE_CXX_FLAGS',
100 'F90': 'CMAKE_Fortran_COMPILER',
101 'FFLAGS': 'CMAKE_Fortran_FLAGS',
102 }
103 for env_name, option in env_to_options.items():
104 value = os.getenv(env_name)
105 if value is not None:
106 if option.endswith('_COMPILER') and self.cfg.get('abs_path_compilers', False):
107 value = which(value)
108 self.log.info("Using absolute path to compiler command: %s", value)
109 options.append("-D%s='%s'" % (option, value))
110
111 if build_option('rpath'):
112 # instruct CMake not to fiddle with RPATH when --rpath is used, since it will undo stuff on install...
113 # https://github.com/LLNL/spack/blob/0f6a5cd38538e8969d11bd2167f11060b1f53b43/lib/spack/spack/build_environment.py#L416
114 options.append('-DCMAKE_SKIP_RPATH=ON')
115
116 # show what CMake is doing by default
117 options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
118
119 options_string = ' '.join(options)
120
121 command = ' '.join([
122 self.cfg['preconfigopts'],
123 self.cfg.get('configure_cmd') or DEFAULT_CONFIGURE_CMD,
124 options_string,
125 self.cfg['configopts'],
126 srcdir])
127 (out, _) = run_cmd(command, log_all=True, simple=False)
128
129 return out
130
[end of easybuild/easyblocks/generic/cmakemake.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/easybuild/easyblocks/generic/cmakemake.py b/easybuild/easyblocks/generic/cmakemake.py
--- a/easybuild/easyblocks/generic/cmakemake.py
+++ b/easybuild/easyblocks/generic/cmakemake.py
@@ -40,6 +40,7 @@
from easybuild.tools.config import build_option
from easybuild.tools.filetools import change_dir, mkdir, which
from easybuild.tools.environment import setvar
+from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from vsc.utils.missing import nub
@@ -56,6 +57,8 @@
extra_vars = ConfigureMake.extra_options(extra_vars)
extra_vars.update({
'abs_path_compilers': [False, "Specify compilers via absolute file path (not via command names)", CUSTOM],
+ 'allow_system_boost': [False, "Always allow CMake to pick up on Boost installed in OS "
+ "(even if Boost is included as a dependency)", CUSTOM],
'configure_cmd': [DEFAULT_CONFIGURE_CMD, "Configure command to use", CUSTOM],
'srcdir': [None, "Source directory location to provide to cmake command", CUSTOM],
'separate_build_dir': [False, "Perform build in a separate directory", CUSTOM],
@@ -116,6 +119,19 @@
# show what CMake is doing by default
options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')
+ if not self.cfg.get('allow_system_boost', False):
+ # don't pick up on system Boost if Boost is included as dependency
+ # - specify Boost location via -DBOOST_ROOT
+ # - instruct CMake to not search for Boost headers/libraries in other places
+ # - disable search for Boost CMake package configuration file
+ boost_root = get_software_root('Boost')
+ if boost_root:
+ options.extend([
+ '-DBOOST_ROOT=%s' % boost_root,
+ '-DBoost_NO_SYSTEM_PATHS=ON',
+ '-DBoost_NO_BOOST_CMAKE=ON',
+ ])
+
options_string = ' '.join(options)
command = ' '.join([
| {"golden_diff": "diff --git a/easybuild/easyblocks/generic/cmakemake.py b/easybuild/easyblocks/generic/cmakemake.py\n--- a/easybuild/easyblocks/generic/cmakemake.py\n+++ b/easybuild/easyblocks/generic/cmakemake.py\n@@ -40,6 +40,7 @@\n from easybuild.tools.config import build_option\n from easybuild.tools.filetools import change_dir, mkdir, which\n from easybuild.tools.environment import setvar\n+from easybuild.tools.modules import get_software_root\n from easybuild.tools.run import run_cmd\n from vsc.utils.missing import nub\n \n@@ -56,6 +57,8 @@\n extra_vars = ConfigureMake.extra_options(extra_vars)\n extra_vars.update({\n 'abs_path_compilers': [False, \"Specify compilers via absolute file path (not via command names)\", CUSTOM],\n+ 'allow_system_boost': [False, \"Always allow CMake to pick up on Boost installed in OS \"\n+ \"(even if Boost is included as a dependency)\", CUSTOM],\n 'configure_cmd': [DEFAULT_CONFIGURE_CMD, \"Configure command to use\", CUSTOM],\n 'srcdir': [None, \"Source directory location to provide to cmake command\", CUSTOM],\n 'separate_build_dir': [False, \"Perform build in a separate directory\", CUSTOM],\n@@ -116,6 +119,19 @@\n # show what CMake is doing by default\n options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')\n \n+ if not self.cfg.get('allow_system_boost', False):\n+ # don't pick up on system Boost if Boost is included as dependency\n+ # - specify Boost location via -DBOOST_ROOT\n+ # - instruct CMake to not search for Boost headers/libraries in other places\n+ # - disable search for Boost CMake package configuration file\n+ boost_root = get_software_root('Boost')\n+ if boost_root:\n+ options.extend([\n+ '-DBOOST_ROOT=%s' % boost_root,\n+ '-DBoost_NO_SYSTEM_PATHS=ON',\n+ '-DBoost_NO_BOOST_CMAKE=ON',\n+ ])\n+\n options_string = ' '.join(options)\n \n command = ' '.join([\n", "issue": "Make CMakeMake easyblock immune to system boost.\nAs mentioned in \r\nhttps://github.com/easybuilders/easybuild-easyconfigs/pull/7149\r\nif an old boost-devel rpm installed on CentOS6:\r\n```\r\nboost-devel-1.41.0-25.el6.centos.x86_64\r\n\r\n$ rpm -ql boost-devel-1.41.0-25.el6.centos.x86_64 | grep -i cmake\r\n/usr/lib64/boost/Boost-relwithdebinfo.cmake\r\n/usr/lib64/boost/Boost.cmake\r\n/usr/lib64/boost/BoostConfig.cmake\r\n/usr/lib64/boost/BoostConfigVersion.cmake\r\n\r\n-- Boost found.\r\nBoost Include: /usr/include\r\nBoost Linkdir: /usr/lib64\r\n```\r\nthen anything that combines cmake and boost gets confused by this as the EB boost does not include any cmake files.\r\n\r\nadding `-DBoost_NO_SYSTEM_PATHS=ON -DBoost_NO_BOOST_CMAKE=ON`\r\navoids this.\r\n\r\nref:\r\nhttps://github.com/Kitware/CMake/blob/master/Modules/FindBoost.cmake\r\n(which shows that if it finds a Boost-cmake config file, it's game over)\r\nhttps://cmake.org/cmake/help/v3.12/module/FindBoost.html\n", "before_files": [{"content": "##\n# Copyright 2009-2019 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for software that is configured with CMake, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Ward Poelmans (Ghent University)\n@author: Maxime Boissonneault (Compute Canada - Universite Laval)\n\"\"\"\nimport os\n\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.config import build_option\nfrom easybuild.tools.filetools import change_dir, mkdir, which\nfrom easybuild.tools.environment import setvar\nfrom easybuild.tools.run import run_cmd\nfrom vsc.utils.missing import nub\n\n\nDEFAULT_CONFIGURE_CMD = 'cmake'\n\n\nclass CMakeMake(ConfigureMake):\n \"\"\"Support for configuring build with CMake instead of traditional configure script\"\"\"\n\n @staticmethod\n def extra_options(extra_vars=None):\n \"\"\"Define extra easyconfig parameters specific to CMakeMake.\"\"\"\n extra_vars = ConfigureMake.extra_options(extra_vars)\n extra_vars.update({\n 'abs_path_compilers': [False, \"Specify compilers via absolute file path (not via command names)\", CUSTOM],\n 'configure_cmd': [DEFAULT_CONFIGURE_CMD, \"Configure command to use\", CUSTOM],\n 'srcdir': [None, \"Source directory location to provide to cmake command\", CUSTOM],\n 'separate_build_dir': [False, \"Perform build in a separate directory\", CUSTOM],\n })\n return extra_vars\n\n def configure_step(self, srcdir=None, builddir=None):\n \"\"\"Configure build using cmake\"\"\"\n\n # Set the search paths for CMake\n tc_ipaths = self.toolchain.get_variable(\"CPPFLAGS\", list)\n tc_lpaths = self.toolchain.get_variable(\"LDFLAGS\", list)\n cpaths = os.getenv('CPATH', '').split(os.pathsep)\n lpaths = os.getenv('LD_LIBRARY_PATH', '').split(os.pathsep)\n include_paths = os.pathsep.join(nub(tc_ipaths + cpaths))\n library_paths = os.pathsep.join(nub(tc_lpaths + lpaths))\n setvar(\"CMAKE_INCLUDE_PATH\", include_paths)\n setvar(\"CMAKE_LIBRARY_PATH\", library_paths)\n\n if builddir is None and self.cfg.get('separate_build_dir', False):\n builddir = os.path.join(self.builddir, 'easybuild_obj')\n\n if builddir:\n mkdir(builddir, parents=True)\n change_dir(builddir)\n default_srcdir = self.cfg['start_dir']\n else:\n default_srcdir = '.'\n\n if srcdir is None:\n if self.cfg.get('srcdir', None) is not None:\n srcdir = self.cfg['srcdir']\n else:\n srcdir = default_srcdir\n\n options = ['-DCMAKE_INSTALL_PREFIX=%s' % self.installdir]\n env_to_options = {\n 'CC': 'CMAKE_C_COMPILER',\n 'CFLAGS': 'CMAKE_C_FLAGS',\n 'CXX': 'CMAKE_CXX_COMPILER',\n 'CXXFLAGS': 'CMAKE_CXX_FLAGS',\n 'F90': 'CMAKE_Fortran_COMPILER',\n 'FFLAGS': 'CMAKE_Fortran_FLAGS',\n }\n for env_name, option in env_to_options.items():\n value = os.getenv(env_name)\n if value is not None:\n if option.endswith('_COMPILER') and self.cfg.get('abs_path_compilers', False):\n value = which(value)\n self.log.info(\"Using absolute path to compiler command: %s\", value)\n options.append(\"-D%s='%s'\" % (option, value))\n\n if build_option('rpath'):\n # instruct CMake not to fiddle with RPATH when --rpath is used, since it will undo stuff on install...\n # https://github.com/LLNL/spack/blob/0f6a5cd38538e8969d11bd2167f11060b1f53b43/lib/spack/spack/build_environment.py#L416\n options.append('-DCMAKE_SKIP_RPATH=ON')\n\n # show what CMake is doing by default\n options.append('-DCMAKE_VERBOSE_MAKEFILE=ON')\n\n options_string = ' '.join(options)\n\n command = ' '.join([\n self.cfg['preconfigopts'],\n self.cfg.get('configure_cmd') or DEFAULT_CONFIGURE_CMD,\n options_string,\n self.cfg['configopts'],\n srcdir])\n (out, _) = run_cmd(command, log_all=True, simple=False)\n\n return out\n", "path": "easybuild/easyblocks/generic/cmakemake.py"}]} | 2,439 | 489 |
gh_patches_debug_22839 | rasdani/github-patches | git_diff | beetbox__beets-4086 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
unimported: Add an option to ignore some folders
I use a hard drive as my Beets library 'folder'.
Because of its size I also store some other non-imported music folders on that drive.
I ran into the situation that running 'beets unimported' showed me all the files in those unimported folders.
It's logical that the plugin scans those too but a more specific scan would be great.
I could circumvent this by placing all Beets folders in another folder instead of the root of the drive but that would make for a deeper hierarchy which I wouldn't like.
### Proposed solution
Add extra options for the command line
`beets unimported /specific_folder`
or in config.yaml
```
unimported:
ignore_folders: folder-with-non-imported-files
```
</issue>
<code>
[start of beetsplug/unimported.py]
1 # This file is part of beets.
2 # Copyright 2019, Joris Jensen
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """
16 List all files in the library folder which are not listed in the
17 beets library database, including art files
18 """
19
20 import os
21
22 from beets import util
23 from beets.plugins import BeetsPlugin
24 from beets.ui import Subcommand, print_
25
26 __author__ = 'https://github.com/MrNuggelz'
27
28
29 class Unimported(BeetsPlugin):
30
31 def __init__(self):
32 super().__init__()
33 self.config.add(
34 {
35 'ignore_extensions': []
36 }
37 )
38
39 def commands(self):
40 def print_unimported(lib, opts, args):
41 ignore_exts = [('.' + x).encode() for x
42 in self.config['ignore_extensions'].as_str_seq()]
43 in_folder = {
44 os.path.join(r, file) for r, d, f in os.walk(lib.directory)
45 for file in f if not any(
46 [file.endswith(extension) for extension in
47 ignore_exts])}
48 in_library = {x.path for x in lib.items()}
49 art_files = {x.artpath for x in lib.albums()}
50 for f in in_folder - in_library - art_files:
51 print_(util.displayable_path(f))
52
53 unimported = Subcommand(
54 'unimported',
55 help='list all files in the library folder which are not listed'
56 ' in the beets library database')
57 unimported.func = print_unimported
58 return [unimported]
59
[end of beetsplug/unimported.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/unimported.py b/beetsplug/unimported.py
--- a/beetsplug/unimported.py
+++ b/beetsplug/unimported.py
@@ -38,13 +38,23 @@
def commands(self):
def print_unimported(lib, opts, args):
- ignore_exts = [('.' + x).encode() for x
- in self.config['ignore_extensions'].as_str_seq()]
+ ignore_exts = [
+ ('.' + x).encode()
+ for x in self.config["ignore_extensions"].as_str_seq()
+ ]
+ ignore_dirs = [
+ os.path.join(lib.directory, x.encode())
+ for x in self.config["ignore_subdirectories"].as_str_seq()
+ ]
in_folder = {
- os.path.join(r, file) for r, d, f in os.walk(lib.directory)
- for file in f if not any(
- [file.endswith(extension) for extension in
- ignore_exts])}
+ os.path.join(r, file)
+ for r, d, f in os.walk(lib.directory)
+ for file in f
+ if not any(
+ [file.endswith(ext) for ext in ignore_exts]
+ + [r in ignore_dirs]
+ )
+ }
in_library = {x.path for x in lib.items()}
art_files = {x.artpath for x in lib.albums()}
for f in in_folder - in_library - art_files:
| {"golden_diff": "diff --git a/beetsplug/unimported.py b/beetsplug/unimported.py\n--- a/beetsplug/unimported.py\n+++ b/beetsplug/unimported.py\n@@ -38,13 +38,23 @@\n \n def commands(self):\n def print_unimported(lib, opts, args):\n- ignore_exts = [('.' + x).encode() for x\n- in self.config['ignore_extensions'].as_str_seq()]\n+ ignore_exts = [\n+ ('.' + x).encode()\n+ for x in self.config[\"ignore_extensions\"].as_str_seq()\n+ ]\n+ ignore_dirs = [\n+ os.path.join(lib.directory, x.encode())\n+ for x in self.config[\"ignore_subdirectories\"].as_str_seq()\n+ ]\n in_folder = {\n- os.path.join(r, file) for r, d, f in os.walk(lib.directory)\n- for file in f if not any(\n- [file.endswith(extension) for extension in\n- ignore_exts])}\n+ os.path.join(r, file)\n+ for r, d, f in os.walk(lib.directory)\n+ for file in f\n+ if not any(\n+ [file.endswith(ext) for ext in ignore_exts]\n+ + [r in ignore_dirs]\n+ )\n+ }\n in_library = {x.path for x in lib.items()}\n art_files = {x.artpath for x in lib.albums()}\n for f in in_folder - in_library - art_files:\n", "issue": "unimported: Add an option to ignore some folders\nI use a hard drive as my Beets library 'folder'. \r\nBecause of its size I also store some other non-imported music folders on that drive.\r\nI ran into the situation that running 'beets unimported' showed me all the files in those unimported folders. \r\nIt's logical that the plugin scans those too but a more specific scan would be great.\r\nI could circumvent this by placing all Beets folders in another folder instead of the root of the drive but that would make for a deeper hierarchy which I wouldn't like.\r\n\r\n### Proposed solution\r\n\r\nAdd extra options for the command line\r\n`beets unimported /specific_folder`\r\nor in config.yaml\r\n```\r\nunimported:\r\n ignore_folders: folder-with-non-imported-files\r\n```\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2019, Joris Jensen\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"\nList all files in the library folder which are not listed in the\n beets library database, including art files\n\"\"\"\n\nimport os\n\nfrom beets import util\nfrom beets.plugins import BeetsPlugin\nfrom beets.ui import Subcommand, print_\n\n__author__ = 'https://github.com/MrNuggelz'\n\n\nclass Unimported(BeetsPlugin):\n\n def __init__(self):\n super().__init__()\n self.config.add(\n {\n 'ignore_extensions': []\n }\n )\n\n def commands(self):\n def print_unimported(lib, opts, args):\n ignore_exts = [('.' + x).encode() for x\n in self.config['ignore_extensions'].as_str_seq()]\n in_folder = {\n os.path.join(r, file) for r, d, f in os.walk(lib.directory)\n for file in f if not any(\n [file.endswith(extension) for extension in\n ignore_exts])}\n in_library = {x.path for x in lib.items()}\n art_files = {x.artpath for x in lib.albums()}\n for f in in_folder - in_library - art_files:\n print_(util.displayable_path(f))\n\n unimported = Subcommand(\n 'unimported',\n help='list all files in the library folder which are not listed'\n ' in the beets library database')\n unimported.func = print_unimported\n return [unimported]\n", "path": "beetsplug/unimported.py"}]} | 1,276 | 331 |
gh_patches_debug_2859 | rasdani/github-patches | git_diff | spack__spack-26095 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CentOS 6 image doesn't build with clingo on Dockerhub
### Steps to reproduce
Has to do with failure on centos:6
```
Step 17/19 : RUN spack spec hdf5+mpi
---> Running in 8335d48ff53f
==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.
==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.
==> Warning: the original concretizer is currently being used.
Upgrade to "clingo" at your earliest convenience. The original concretizer will be removed from Spack starting at v0.18.0
==> Error: cannot bootstrap the "clingo" Python module from spec "clingo-bootstrap@spack+python %gcc target=x86_64"
Input spec
--------------------------------
hdf5+mpi
Concretized
--------------------------------
==> Bootstrapping clingo from pre-built binaries
The command 'docker-shell spack spec hdf5+mpi' returned a non-zero code: 3
```
---
So it bootstraps *during* concretization?
</issue>
<code>
[start of lib/spack/spack/schema/container.py]
1 # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
2 # Spack Project Developers. See the top-level COPYRIGHT file for details.
3 #
4 # SPDX-License-Identifier: (Apache-2.0 OR MIT)
5 """Schema for the 'container' subsection of Spack environments."""
6
7 _stages_from_dockerhub = {
8 'type': 'object',
9 'additionalProperties': False,
10 'properties': {
11 'os': {
12 'type': 'string',
13 'enum': ['ubuntu:18.04',
14 'ubuntu:16.04',
15 'centos:7',
16 'centos:6']
17 },
18 'spack': {
19 'type': 'string',
20 },
21 },
22 'required': ['os', 'spack']
23 }
24
25 _custom_stages = {
26 'type': 'object',
27 'additionalProperties': False,
28 'properties': {
29 'build': {'type': 'string'},
30 'final': {'type': 'string'}
31 },
32 'required': ['build', 'final']
33 }
34
35 #: List of packages for the schema below
36 _list_of_packages = {
37 'type': 'array',
38 'items': {
39 'type': 'string'
40 }
41 }
42
43 #: Schema for the container attribute included in Spack environments
44 container_schema = {
45 'type': 'object',
46 'additionalProperties': False,
47 'properties': {
48 # The recipe formats that are currently supported by the command
49 'format': {
50 'type': 'string',
51 'enum': ['docker', 'singularity']
52 },
53 # Describes the base image to start from and the version
54 # of Spack to be used
55 'images': {'anyOf': [_stages_from_dockerhub, _custom_stages]},
56 # Whether or not to strip installed binaries
57 'strip': {
58 'type': 'boolean',
59 'default': True
60 },
61 # Additional system packages that are needed at runtime
62 'os_packages': {
63 'type': 'object',
64 'properties': {
65 'command': {'type': 'string', 'enum': ['apt', 'yum']},
66 'update': {'type': 'boolean'},
67 'build': _list_of_packages,
68 'final': _list_of_packages
69 },
70 'additionalProperties': False
71 },
72 # Add labels to the image
73 'labels': {
74 'type': 'object',
75 },
76 # Add a custom extra section at the bottom of a stage
77 'extra_instructions': {
78 'type': 'object',
79 'additionalProperties': False,
80 'properties': {
81 'build': {'type': 'string'},
82 'final': {'type': 'string'}
83 }
84 },
85 # Reserved for properties that are specific to each format
86 'singularity': {
87 'type': 'object',
88 'additionalProperties': False,
89 'default': {},
90 'properties': {
91 'runscript': {'type': 'string'},
92 'startscript': {'type': 'string'},
93 'test': {'type': 'string'},
94 'help': {'type': 'string'}
95 }
96 },
97 'docker': {
98 'type': 'object',
99 'additionalProperties': False,
100 'default': {},
101 }
102 }
103 }
104
105 properties = {'container': container_schema}
106
[end of lib/spack/spack/schema/container.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/spack/spack/schema/container.py b/lib/spack/spack/schema/container.py
--- a/lib/spack/spack/schema/container.py
+++ b/lib/spack/spack/schema/container.py
@@ -12,8 +12,7 @@
'type': 'string',
'enum': ['ubuntu:18.04',
'ubuntu:16.04',
- 'centos:7',
- 'centos:6']
+ 'centos:7']
},
'spack': {
'type': 'string',
| {"golden_diff": "diff --git a/lib/spack/spack/schema/container.py b/lib/spack/spack/schema/container.py\n--- a/lib/spack/spack/schema/container.py\n+++ b/lib/spack/spack/schema/container.py\n@@ -12,8 +12,7 @@\n 'type': 'string',\n 'enum': ['ubuntu:18.04',\n 'ubuntu:16.04',\n- 'centos:7',\n- 'centos:6']\n+ 'centos:7']\n },\n 'spack': {\n 'type': 'string',\n", "issue": "CentOS 6 image doesn't build with clingo on Dockerhub\n### Steps to reproduce\r\n\r\nHas to do with failure on centos:6\r\n\r\n```\r\nStep 17/19 : RUN spack spec hdf5+mpi\r\n ---> Running in 8335d48ff53f\r\n==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.\r\n==> Warning: Spack will not check SSL certificates. You need to update your Python to enable certificate verification.\r\n==> Warning: the original concretizer is currently being used.\r\n Upgrade to \"clingo\" at your earliest convenience. The original concretizer will be removed from Spack starting at v0.18.0\r\n==> Error: cannot bootstrap the \"clingo\" Python module from spec \"clingo-bootstrap@spack+python %gcc target=x86_64\"\r\nInput spec\r\n--------------------------------\r\nhdf5+mpi\r\n\r\nConcretized\r\n--------------------------------\r\n==> Bootstrapping clingo from pre-built binaries\r\nThe command 'docker-shell spack spec hdf5+mpi' returned a non-zero code: 3\r\n```\r\n\r\n---\r\n\r\nSo it bootstraps *during* concretization?\n", "before_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\"\"\"Schema for the 'container' subsection of Spack environments.\"\"\"\n\n_stages_from_dockerhub = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'os': {\n 'type': 'string',\n 'enum': ['ubuntu:18.04',\n 'ubuntu:16.04',\n 'centos:7',\n 'centos:6']\n },\n 'spack': {\n 'type': 'string',\n },\n },\n 'required': ['os', 'spack']\n}\n\n_custom_stages = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n },\n 'required': ['build', 'final']\n}\n\n#: List of packages for the schema below\n_list_of_packages = {\n 'type': 'array',\n 'items': {\n 'type': 'string'\n }\n}\n\n#: Schema for the container attribute included in Spack environments\ncontainer_schema = {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n # The recipe formats that are currently supported by the command\n 'format': {\n 'type': 'string',\n 'enum': ['docker', 'singularity']\n },\n # Describes the base image to start from and the version\n # of Spack to be used\n 'images': {'anyOf': [_stages_from_dockerhub, _custom_stages]},\n # Whether or not to strip installed binaries\n 'strip': {\n 'type': 'boolean',\n 'default': True\n },\n # Additional system packages that are needed at runtime\n 'os_packages': {\n 'type': 'object',\n 'properties': {\n 'command': {'type': 'string', 'enum': ['apt', 'yum']},\n 'update': {'type': 'boolean'},\n 'build': _list_of_packages,\n 'final': _list_of_packages\n },\n 'additionalProperties': False\n },\n # Add labels to the image\n 'labels': {\n 'type': 'object',\n },\n # Add a custom extra section at the bottom of a stage\n 'extra_instructions': {\n 'type': 'object',\n 'additionalProperties': False,\n 'properties': {\n 'build': {'type': 'string'},\n 'final': {'type': 'string'}\n }\n },\n # Reserved for properties that are specific to each format\n 'singularity': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n 'properties': {\n 'runscript': {'type': 'string'},\n 'startscript': {'type': 'string'},\n 'test': {'type': 'string'},\n 'help': {'type': 'string'}\n }\n },\n 'docker': {\n 'type': 'object',\n 'additionalProperties': False,\n 'default': {},\n }\n }\n}\n\nproperties = {'container': container_schema}\n", "path": "lib/spack/spack/schema/container.py"}]} | 1,723 | 125 |
gh_patches_debug_17893 | rasdani/github-patches | git_diff | huggingface__transformers-1911 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation error in GPT2Tokenizer
Documentation page:
https://huggingface.co/transformers/model_doc/gpt2.html#transformers.GPT2Tokenizer
Code place:
https://github.com/huggingface/transformers/blob/d7d36181fdefdabadc53adf51bed4a2680f5880a/transformers/tokenization_gpt2.py#L112-L113
This phrase:
> Otherwise, this tokenizer encode and decode method will not conserve the absence of a space at the beginning of a string: tokenizer.decode(tokenizer.encode(“Hello”)) = ” Hello”
is **NOT** correct.
Actually:
> tokenizer.decode(tokenizer.encode(“Hello”)) = ”Hello”
Try this example:
```
from transformers import GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
print("'" + tokenizer.decode(tokenizer.encode("Hello")) + "'")
```
Output:
```
'Hello'
```
</issue>
<code>
[start of transformers/tokenization_gpt2.py]
1 # coding=utf-8
2 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Tokenization classes for OpenAI GPT."""
16 from __future__ import (absolute_import, division, print_function,
17 unicode_literals)
18
19 import sys
20 import json
21 import logging
22 import os
23 import regex as re
24 from io import open
25
26 try:
27 from functools import lru_cache
28 except ImportError:
29 # Just a dummy decorator to get the checks to run on python2
30 # because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
31 def lru_cache():
32 return lambda func: func
33
34 from .tokenization_utils import PreTrainedTokenizer
35
36 logger = logging.getLogger(__name__)
37
38 VOCAB_FILES_NAMES = {
39 'vocab_file': 'vocab.json',
40 'merges_file': 'merges.txt',
41 }
42
43 PRETRAINED_VOCAB_FILES_MAP = {
44 'vocab_file':
45 {
46 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json",
47 'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json",
48 'gpt2-large': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-vocab.json",
49 'gpt2-xl': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-vocab.json",
50 'distilgpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-vocab.json",
51 },
52 'merges_file':
53 {
54 'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt",
55 'gpt2-medium': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt",
56 'gpt2-large': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-merges.txt",
57 'gpt2-xl': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-merges.txt",
58 'distilgpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-merges.txt",
59 },
60 }
61
62 PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
63 'gpt2': 1024,
64 'gpt2-medium': 1024,
65 'gpt2-large': 1024,
66 'gpt2-xl': 1024,
67 'distilgpt2': 1024,
68 }
69
70 @lru_cache()
71 def bytes_to_unicode():
72 """
73 Returns list of utf-8 byte and a mapping to unicode strings.
74 We specifically avoids mapping to whitespace/control characters the bpe code barfs on.
75
76 The reversible bpe codes work on unicode strings.
77 This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
78 When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
79 This is a signficant percentage of your normal, say, 32K bpe vocab.
80 To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
81 """
82 _chr = unichr if sys.version_info[0] == 2 else chr
83 bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
84 cs = bs[:]
85 n = 0
86 for b in range(2**8):
87 if b not in bs:
88 bs.append(b)
89 cs.append(2**8+n)
90 n += 1
91 cs = [_chr(n) for n in cs]
92 return dict(zip(bs, cs))
93
94 def get_pairs(word):
95 """Return set of symbol pairs in a word.
96
97 Word is represented as tuple of symbols (symbols being variable-length strings).
98 """
99 pairs = set()
100 prev_char = word[0]
101 for char in word[1:]:
102 pairs.add((prev_char, char))
103 prev_char = char
104 return pairs
105
106 class GPT2Tokenizer(PreTrainedTokenizer):
107 """
108 GPT-2 BPE tokenizer. Peculiarities:
109 - Byte-level Byte-Pair-Encoding
110 - Requires a space to start the input string => the encoding methods should be called with the
111 ``add_prefix_space`` flag set to ``True``.
112 Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve
113 the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode("Hello")) = " Hello"`
114 """
115 vocab_files_names = VOCAB_FILES_NAMES
116 pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
117 max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
118
119 def __init__(self, vocab_file, merges_file, errors='replace', unk_token="<|endoftext|>",
120 bos_token="<|endoftext|>", eos_token="<|endoftext|>", **kwargs):
121 super(GPT2Tokenizer, self).__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)
122 self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens
123 self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens
124
125 self.encoder = json.load(open(vocab_file, encoding="utf-8"))
126 self.decoder = {v: k for k, v in self.encoder.items()}
127 self.errors = errors # how to handle errors in decoding
128 self.byte_encoder = bytes_to_unicode()
129 self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
130 bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
131 bpe_merges = [tuple(merge.split()) for merge in bpe_data]
132 self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
133 self.cache = {}
134
135 # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
136 self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
137
138 @property
139 def vocab_size(self):
140 return len(self.encoder)
141
142 def bpe(self, token):
143 if token in self.cache:
144 return self.cache[token]
145 word = tuple(token)
146 pairs = get_pairs(word)
147
148 if not pairs:
149 return token
150
151 while True:
152 bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
153 if bigram not in self.bpe_ranks:
154 break
155 first, second = bigram
156 new_word = []
157 i = 0
158 while i < len(word):
159 try:
160 j = word.index(first, i)
161 new_word.extend(word[i:j])
162 i = j
163 except:
164 new_word.extend(word[i:])
165 break
166
167 if word[i] == first and i < len(word)-1 and word[i+1] == second:
168 new_word.append(first+second)
169 i += 2
170 else:
171 new_word.append(word[i])
172 i += 1
173 new_word = tuple(new_word)
174 word = new_word
175 if len(word) == 1:
176 break
177 else:
178 pairs = get_pairs(word)
179 word = ' '.join(word)
180 self.cache[token] = word
181 return word
182
183 def _tokenize(self, text, add_prefix_space=False):
184 """ Tokenize a string.
185 Args:
186 - add_prefix_space (boolean, default False):
187 Begin the sentence with at least one space toto get invariance to word order in GPT-2 (and RoBERTa) tokenizers.
188 """
189 if add_prefix_space:
190 text = ' ' + text
191
192 bpe_tokens = []
193 for token in re.findall(self.pat, text):
194 if sys.version_info[0] == 2:
195 token = ''.join(self.byte_encoder[ord(b)] for b in token) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)
196 else:
197 token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)
198 bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' '))
199 return bpe_tokens
200
201 def _convert_token_to_id(self, token):
202 """ Converts a token (str/unicode) in an id using the vocab. """
203 return self.encoder.get(token, self.encoder.get(self.unk_token))
204
205 def _convert_id_to_token(self, index):
206 """Converts an index (integer) in a token (string/unicode) using the vocab."""
207 return self.decoder.get(index)
208
209 def convert_tokens_to_string(self, tokens):
210 """ Converts a sequence of tokens (string) in a single string. """
211 text = ''.join(tokens)
212 text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
213 return text
214
215 def save_vocabulary(self, save_directory):
216 """Save the tokenizer vocabulary and merge files to a directory."""
217 if not os.path.isdir(save_directory):
218 logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
219 return
220 vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
221 merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])
222
223 with open(vocab_file, 'w', encoding='utf-8') as f:
224 f.write(json.dumps(self.encoder, ensure_ascii=False))
225
226 index = 0
227 with open(merge_file, "w", encoding="utf-8") as writer:
228 writer.write(u'#version: 0.2\n')
229 for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
230 if index != token_index:
231 logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
232 " Please check that the tokenizer is not corrupted!".format(merge_file))
233 index = token_index
234 writer.write(' '.join(bpe_tokens) + u'\n')
235 index += 1
236
237 return vocab_file, merge_file
[end of transformers/tokenization_gpt2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/transformers/tokenization_gpt2.py b/transformers/tokenization_gpt2.py
--- a/transformers/tokenization_gpt2.py
+++ b/transformers/tokenization_gpt2.py
@@ -107,10 +107,10 @@
"""
GPT-2 BPE tokenizer. Peculiarities:
- Byte-level Byte-Pair-Encoding
- - Requires a space to start the input string => the encoding methods should be called with the
+ - Requires a space to start the input string => the encoding and tokenize methods should be called with the
``add_prefix_space`` flag set to ``True``.
- Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve
- the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode("Hello")) = " Hello"`
+ Otherwise, this tokenizer's ``encode``, ``decode``, and ``tokenize`` methods will not conserve
+ the spaces at the beginning of a string: `tokenizer.decode(tokenizer.encode(" Hello")) = "Hello"`
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
| {"golden_diff": "diff --git a/transformers/tokenization_gpt2.py b/transformers/tokenization_gpt2.py\n--- a/transformers/tokenization_gpt2.py\n+++ b/transformers/tokenization_gpt2.py\n@@ -107,10 +107,10 @@\n \"\"\"\n GPT-2 BPE tokenizer. Peculiarities:\n - Byte-level Byte-Pair-Encoding\n- - Requires a space to start the input string => the encoding methods should be called with the\n+ - Requires a space to start the input string => the encoding and tokenize methods should be called with the\n ``add_prefix_space`` flag set to ``True``.\n- Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve\n- the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode(\"Hello\")) = \" Hello\"`\n+ Otherwise, this tokenizer's ``encode``, ``decode``, and ``tokenize`` methods will not conserve\n+ the spaces at the beginning of a string: `tokenizer.decode(tokenizer.encode(\" Hello\")) = \"Hello\"`\n \"\"\"\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n", "issue": "Documentation error in GPT2Tokenizer\nDocumentation page:\r\nhttps://huggingface.co/transformers/model_doc/gpt2.html#transformers.GPT2Tokenizer\r\nCode place:\r\nhttps://github.com/huggingface/transformers/blob/d7d36181fdefdabadc53adf51bed4a2680f5880a/transformers/tokenization_gpt2.py#L112-L113\r\n\r\nThis phrase:\r\n> Otherwise, this tokenizer encode and decode method will not conserve the absence of a space at the beginning of a string: tokenizer.decode(tokenizer.encode(\u201cHello\u201d)) = \u201d Hello\u201d\r\n\r\nis **NOT** correct.\r\n\r\nActually:\r\n> tokenizer.decode(tokenizer.encode(\u201cHello\u201d)) = \u201dHello\u201d\r\n\r\nTry this example:\r\n```\r\nfrom transformers import GPT2Tokenizer\r\ntokenizer = GPT2Tokenizer.from_pretrained('gpt2')\r\nprint(\"'\" + tokenizer.decode(tokenizer.encode(\"Hello\")) + \"'\")\r\n```\r\nOutput:\r\n```\r\n'Hello'\r\n```\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tokenization classes for OpenAI GPT.\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport sys\nimport json\nimport logging\nimport os\nimport regex as re\nfrom io import open\n\ntry:\n from functools import lru_cache\nexcept ImportError:\n # Just a dummy decorator to get the checks to run on python2\n # because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.\n def lru_cache():\n return lambda func: func\n\nfrom .tokenization_utils import PreTrainedTokenizer\n\nlogger = logging.getLogger(__name__)\n\nVOCAB_FILES_NAMES = {\n 'vocab_file': 'vocab.json',\n 'merges_file': 'merges.txt',\n}\n\nPRETRAINED_VOCAB_FILES_MAP = {\n 'vocab_file':\n {\n 'gpt2': \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json\",\n 'gpt2-medium': \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-vocab.json\",\n 'gpt2-large': \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-vocab.json\",\n 'gpt2-xl': \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-vocab.json\",\n 'distilgpt2': \"https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-vocab.json\",\n },\n 'merges_file':\n {\n 'gpt2': \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt\",\n 'gpt2-medium': \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-medium-merges.txt\",\n 'gpt2-large': \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-large-merges.txt\",\n 'gpt2-xl': \"https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-xl-merges.txt\",\n 'distilgpt2': \"https://s3.amazonaws.com/models.huggingface.co/bert/distilgpt2-merges.txt\",\n },\n}\n\nPRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {\n 'gpt2': 1024,\n 'gpt2-medium': 1024,\n 'gpt2-large': 1024,\n 'gpt2-xl': 1024,\n 'distilgpt2': 1024,\n}\n\n@lru_cache()\ndef bytes_to_unicode():\n \"\"\"\n Returns list of utf-8 byte and a mapping to unicode strings.\n We specifically avoids mapping to whitespace/control characters the bpe code barfs on.\n \n The reversible bpe codes work on unicode strings.\n This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.\n When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.\n This is a signficant percentage of your normal, say, 32K bpe vocab.\n To avoid that, we want lookup tables between utf-8 bytes and unicode strings.\n \"\"\"\n _chr = unichr if sys.version_info[0] == 2 else chr\n bs = list(range(ord(\"!\"), ord(\"~\")+1))+list(range(ord(\"\u00a1\"), ord(\"\u00ac\")+1))+list(range(ord(\"\u00ae\"), ord(\"\u00ff\")+1))\n cs = bs[:]\n n = 0\n for b in range(2**8):\n if b not in bs:\n bs.append(b)\n cs.append(2**8+n)\n n += 1\n cs = [_chr(n) for n in cs]\n return dict(zip(bs, cs))\n\ndef get_pairs(word):\n \"\"\"Return set of symbol pairs in a word.\n\n Word is represented as tuple of symbols (symbols being variable-length strings).\n \"\"\"\n pairs = set()\n prev_char = word[0]\n for char in word[1:]:\n pairs.add((prev_char, char))\n prev_char = char\n return pairs\n\nclass GPT2Tokenizer(PreTrainedTokenizer):\n \"\"\"\n GPT-2 BPE tokenizer. Peculiarities:\n - Byte-level Byte-Pair-Encoding\n - Requires a space to start the input string => the encoding methods should be called with the\n ``add_prefix_space`` flag set to ``True``.\n Otherwise, this tokenizer ``encode`` and ``decode`` method will not conserve\n the absence of a space at the beginning of a string: `tokenizer.decode(tokenizer.encode(\"Hello\")) = \" Hello\"`\n \"\"\"\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n def __init__(self, vocab_file, merges_file, errors='replace', unk_token=\"<|endoftext|>\",\n bos_token=\"<|endoftext|>\", eos_token=\"<|endoftext|>\", **kwargs):\n super(GPT2Tokenizer, self).__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, **kwargs)\n self.max_len_single_sentence = self.max_len # no default special tokens - you can update this value if you add special tokens\n self.max_len_sentences_pair = self.max_len # no default special tokens - you can update this value if you add special tokens\n\n self.encoder = json.load(open(vocab_file, encoding=\"utf-8\"))\n self.decoder = {v: k for k, v in self.encoder.items()}\n self.errors = errors # how to handle errors in decoding\n self.byte_encoder = bytes_to_unicode()\n self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}\n bpe_data = open(merges_file, encoding='utf-8').read().split('\\n')[1:-1]\n bpe_merges = [tuple(merge.split()) for merge in bpe_data]\n self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))\n self.cache = {}\n\n # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions\n self.pat = re.compile(r\"\"\"'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)|\\s+\"\"\")\n\n @property\n def vocab_size(self):\n return len(self.encoder)\n\n def bpe(self, token):\n if token in self.cache:\n return self.cache[token]\n word = tuple(token)\n pairs = get_pairs(word)\n\n if not pairs:\n return token\n\n while True:\n bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))\n if bigram not in self.bpe_ranks:\n break\n first, second = bigram\n new_word = []\n i = 0\n while i < len(word):\n try:\n j = word.index(first, i)\n new_word.extend(word[i:j])\n i = j\n except:\n new_word.extend(word[i:])\n break\n\n if word[i] == first and i < len(word)-1 and word[i+1] == second:\n new_word.append(first+second)\n i += 2\n else:\n new_word.append(word[i])\n i += 1\n new_word = tuple(new_word)\n word = new_word\n if len(word) == 1:\n break\n else:\n pairs = get_pairs(word)\n word = ' '.join(word)\n self.cache[token] = word\n return word\n\n def _tokenize(self, text, add_prefix_space=False):\n \"\"\" Tokenize a string.\n Args:\n - add_prefix_space (boolean, default False):\n Begin the sentence with at least one space toto get invariance to word order in GPT-2 (and RoBERTa) tokenizers.\n \"\"\"\n if add_prefix_space:\n text = ' ' + text\n\n bpe_tokens = []\n for token in re.findall(self.pat, text):\n if sys.version_info[0] == 2:\n token = ''.join(self.byte_encoder[ord(b)] for b in token) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)\n else:\n token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding controle tokens of the BPE (spaces in our case)\n bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' '))\n return bpe_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str/unicode) in an id using the vocab. \"\"\"\n return self.encoder.get(token, self.encoder.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (string/unicode) using the vocab.\"\"\"\n return self.decoder.get(index)\n\n def convert_tokens_to_string(self, tokens):\n \"\"\" Converts a sequence of tokens (string) in a single string. \"\"\"\n text = ''.join(tokens)\n text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)\n return text\n\n def save_vocabulary(self, save_directory):\n \"\"\"Save the tokenizer vocabulary and merge files to a directory.\"\"\"\n if not os.path.isdir(save_directory):\n logger.error(\"Vocabulary path ({}) should be a directory\".format(save_directory))\n return\n vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])\n merge_file = os.path.join(save_directory, VOCAB_FILES_NAMES['merges_file'])\n\n with open(vocab_file, 'w', encoding='utf-8') as f:\n f.write(json.dumps(self.encoder, ensure_ascii=False))\n\n index = 0\n with open(merge_file, \"w\", encoding=\"utf-8\") as writer:\n writer.write(u'#version: 0.2\\n')\n for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\"Saving vocabulary to {}: BPE merge indices are not consecutive.\"\n \" Please check that the tokenizer is not corrupted!\".format(merge_file))\n index = token_index\n writer.write(' '.join(bpe_tokens) + u'\\n')\n index += 1\n\n return vocab_file, merge_file", "path": "transformers/tokenization_gpt2.py"}]} | 3,867 | 265 |
gh_patches_debug_15441 | rasdani/github-patches | git_diff | bids-standard__pybids-264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
BIDS Validator data not installed
When installing pybids through pip using an archive (e.g. tarball, zip, instead of from github with `-e`), the data for the `BIDSValidator` is not installed, causing `BIDSLayout` to fail initating
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.6/dist-packages/bids/layout/layout.py", line 143, in __init__
**kwargs)
File "/usr/local/lib/python3.6/dist-packages/grabbit/core.py", line 419, in __init__
self.index()
File "/usr/local/lib/python3.6/dist-packages/grabbit/core.py", line 610, in index
_index_dir(path, list(domains))
File "/usr/local/lib/python3.6/dist-packages/grabbit/core.py", line 603, in _index_dir
elif self._validate_file(full_path):
File "/usr/local/lib/python3.6/dist-packages/bids/layout/layout.py", line 204, in _validate_file
return self.validator.is_bids(to_check)
File "/usr/local/lib/python3.6/dist-packages/bids/layout/validation.py", line 64, in is_bids
conditions.append(self.is_top_level(path))
File "/usr/local/lib/python3.6/dist-packages/bids/layout/validation.py", line 75, in is_top_level
with open(join(self.rule_dir, 'fixed_top_level_names.json'), 'r') as f:
FileNotFoundError: [Errno 2] No such file or directory: '/usr/local/lib/python3.6/dist-packages/bids/layout/config/validator/fixed_top_level_names.json'
```
</issue>
<code>
[start of bids/version.py]
1 from __future__ import absolute_import, division, print_function
2 import os
3
4 CLASSIFIERS = ["Development Status :: 3 - Alpha",
5 "Environment :: Console",
6 "Intended Audience :: Science/Research",
7 "License :: OSI Approved :: MIT License",
8 "Operating System :: OS Independent",
9 "Programming Language :: Python",
10 "Topic :: Scientific/Engineering"]
11
12 # Description should be a one-liner:
13 description = "bids: interface with datasets conforming BIDS"
14 # Long description will go up on the pypi page
15 long_description = """
16
17 PyBIDS
18 ======
19 PyBIDS is a Python module to interface with datasets conforming BIDS.
20 See BIDS paper_ and http://bids.neuroimaging.io website for more information.
21
22 .. paper_: http://www.nature.com/articles/sdata201644
23
24 License
25 =======
26 ``pybids`` is licensed under the terms of the MIT license. See the file
27 "LICENSE" for information on the history of this software, terms & conditions
28 for usage, and a DISCLAIMER OF ALL WARRANTIES.
29
30 All trademarks referenced herein are property of their respective holders.
31
32 Copyright (c) 2016--, PyBIDS developers, Planet Earth
33
34 """
35
36 NAME = "pybids"
37 MAINTAINER = "PyBIDS Developers"
38 MAINTAINER_EMAIL = "[email protected]"
39 DESCRIPTION = description
40 LONG_DESCRIPTION = long_description
41 URL = "http://github.com/bids-standard/pybids"
42 DOWNLOAD_URL = ""
43 LICENSE = "MIT"
44 AUTHOR = "PyBIDS developers"
45 AUTHOR_EMAIL = "[email protected]"
46 PLATFORMS = "OS Independent"
47 # No data for now
48 REQUIRES = ["grabbit==0.2.4", "six", "num2words", "numpy", "scipy", "pandas",
49 "nibabel", "patsy"]
50 EXTRAS_REQUIRE = {
51 # Just to not break compatibility with externals requiring
52 # now deprecated installation schemes
53 'analysis': []
54 }
55 TESTS_REQUIRE = ["pytest>=3.3.0"]
56
57
58 def package_files(directory):
59 # from https://stackoverflow.com/questions/27664504/how-to-add-package-data-recursively-in-python-setup-py
60 paths = []
61 for (path, directories, filenames) in os.walk(directory):
62 for filename in filenames:
63 paths.append(os.path.join('..', path, filename))
64 return paths
65
66 extra_files = package_files('path_to/extra_files_dir')
67 PACKAGE_DATA = {
68 'bids.layout': ['config/*.json'],
69 'bids.reports': ['config/*.json'],
70 'bids': package_files('bids/tests/data')
71 }
72
[end of bids/version.py]
[start of bids/layout/validation.py]
1 """Tools for validating BIDS projects."""
2
3 import re
4 import json
5 from os.path import join, abspath, dirname
6
7 __all__ = ['BIDSValidator']
8
9
10 class BIDSValidator():
11 """An object for BIDS (Brain Imaging Data Structure) verification in a data.
12
13 The main method of this class is `is_bids()`. You should use it for
14 checking whether a file path compatible with BIDS.
15
16 Parameters
17 ----------
18 index_associated : bool, default: True
19 Specifies if an associated data should be checked. If it is true then
20 any file paths in directories `code/`, `derivatives/`, `sourcedata/`
21 and `stimuli/` will pass the validation, else they won't.
22
23 Examples
24 --------
25 >>> from bids.grabbids import BIDSValidator
26 >>> validator = BIDSValidator()
27 >>> filepaths = ["/sub-01/anat/sub-01_rec-CSD_T1w.nii.gz",
28 >>> "/sub-01/anat/sub-01_acq-23_rec-CSD_T1w.exe", #wrong extension
29 >>> "/participants.tsv"]
30 >>> for filepath in filepaths:
31 >>> print( validator.is_bids(filepath) )
32 True
33 False
34 True
35 """
36
37 def __init__(self, index_associated=True):
38 self.rule_dir = join(dirname(abspath(__file__)), 'config',
39 'validator')
40 self.index_associated = index_associated
41
42 def is_bids(self, path):
43 """Check if a file path appropriate for BIDS.
44
45 Main method of the validator. uses other class methods for checking
46 different aspects of the file path.
47
48 Parameters
49 ----------
50 path: string
51 A path of a file you want to check.
52
53 Examples
54 --------
55 >>> from bids.grabbids import BIDSValidator
56 >>> validator = BIDSValidator()
57 >>> validator.is_bids("/sub-01/ses-test/anat/sub-01_ses-test_rec-CSD_run-23_T1w.nii.gz")
58 True
59 >>> validator.is_bids("/sub-01/ses-test/sub-01_run-01_dwi.bvec") # missed session in the filename
60 False
61 """
62 conditions = []
63
64 conditions.append(self.is_top_level(path))
65 conditions.append(self.is_associated_data(path))
66 conditions.append(self.is_session_level(path))
67 conditions.append(self.is_subject_level(path))
68 conditions.append(self.is_phenotypic(path))
69 conditions.append(self.is_file(path))
70
71 return (any(conditions))
72
73 def is_top_level(self, path):
74 """Check if the file has appropriate name for a top-level file."""
75 with open(join(self.rule_dir, 'fixed_top_level_names.json'), 'r') as f:
76 fixed_top_level_json = json.load(f)
77 fixed_top_level_names = fixed_top_level_json['fixed_top_level_names']
78
79 regexps = self.get_regular_expressions('top_level_rules.json')
80
81 conditions = [False if re.compile(x).search(path) is None else True
82 for x in regexps]
83
84 conditions.append(path in fixed_top_level_names)
85
86 return (any(conditions))
87
88 def is_associated_data(self, path):
89 """Check if file is appropriate associated data."""
90 if not self.index_associated:
91 return False
92
93 regexps = self.get_regular_expressions('associated_data_rules.json')
94
95 conditions = [(re.compile(x).search(path) is not None)
96 for x in regexps]
97
98 return any(conditions)
99
100 def is_session_level(self, path):
101 """Check if the file has appropriate name for a session level."""
102 regexps = self.get_regular_expressions('session_level_rules.json')
103
104 conditions = [self.conditional_match(x, path) for x in regexps]
105
106 return (any(conditions))
107
108 def is_subject_level(self, path):
109 """Check if the file has appropriate name for a subject level."""
110 regexps = self.get_regular_expressions('subject_level_rules.json')
111
112 conditions = [(re.compile(x).search(path) is not None)
113 for x in regexps]
114
115 return (any(conditions))
116
117 def is_phenotypic(self, path):
118 """Check if file is phenotypic data."""
119 regexps = self.get_regular_expressions('phenotypic_rules.json')
120
121 conditions = [(re.compile(x).search(path) is not None)
122 for x in regexps]
123
124 return (any(conditions))
125
126 def is_file(self, path):
127 """Check if file is phenotypic data."""
128 regexps = self.get_regular_expressions('file_level_rules.json')
129
130 conditions = [(re.compile(x).search(path) is not None)
131 for x in regexps]
132
133 return (any(conditions))
134
135 def get_regular_expressions(self, filename):
136 """Get regular expressions from file."""
137 regexps = []
138
139 filename = join(self.rule_dir, filename)
140
141 with open(filename, 'r') as f:
142 rules = json.load(f)
143
144 for key in list(rules.keys()):
145 rule = rules[key]
146
147 regexp = rule["regexp"]
148
149 if "tokens" in rule:
150 tokens = rule["tokens"]
151
152 for token in list(tokens):
153 regexp = regexp.replace(token, "|".join(tokens[token]))
154
155 regexps.append(regexp)
156
157 return regexps
158
159 def get_path_values(self, path):
160 """Takes a file path and returns values found for the following path
161 keys:
162 sub-
163 ses-
164 """
165 values = {}
166
167 regexps = self.get_regular_expressions('path.json')
168
169 # capture subject
170 for paths in ['sub', 'ses']:
171 match = re.compile(regexps[paths]).findall(path)
172 values[paths] = match[1] if match & match[1] else None
173
174 return values
175
176 def conditional_match(self, expression, path):
177 match = re.compile(expression).findall(path)
178 match = match[0] if len(match) >= 1 else False
179 # adapted from JS code and JS does not support conditional groups
180 if (match):
181 return ((match[1] == match[2][1:]) | (not match[1]))
182 else:
183 return False
184
[end of bids/layout/validation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bids/layout/validation.py b/bids/layout/validation.py
--- a/bids/layout/validation.py
+++ b/bids/layout/validation.py
@@ -35,8 +35,7 @@
"""
def __init__(self, index_associated=True):
- self.rule_dir = join(dirname(abspath(__file__)), 'config',
- 'validator')
+ self.rule_dir = join(dirname(abspath(__file__)),'config', 'validator')
self.index_associated = index_associated
def is_bids(self, path):
diff --git a/bids/version.py b/bids/version.py
--- a/bids/version.py
+++ b/bids/version.py
@@ -65,7 +65,7 @@
extra_files = package_files('path_to/extra_files_dir')
PACKAGE_DATA = {
- 'bids.layout': ['config/*.json'],
+ 'bids.layout': ['config/*.json', 'config/validator/*.json'],
'bids.reports': ['config/*.json'],
'bids': package_files('bids/tests/data')
}
| {"golden_diff": "diff --git a/bids/layout/validation.py b/bids/layout/validation.py\n--- a/bids/layout/validation.py\n+++ b/bids/layout/validation.py\n@@ -35,8 +35,7 @@\n \"\"\"\n \n def __init__(self, index_associated=True):\n- self.rule_dir = join(dirname(abspath(__file__)), 'config',\n- 'validator')\n+ self.rule_dir = join(dirname(abspath(__file__)),'config', 'validator')\n self.index_associated = index_associated\n \n def is_bids(self, path):\ndiff --git a/bids/version.py b/bids/version.py\n--- a/bids/version.py\n+++ b/bids/version.py\n@@ -65,7 +65,7 @@\n \n extra_files = package_files('path_to/extra_files_dir')\n PACKAGE_DATA = {\n- 'bids.layout': ['config/*.json'],\n+ 'bids.layout': ['config/*.json', 'config/validator/*.json'],\n 'bids.reports': ['config/*.json'],\n 'bids': package_files('bids/tests/data')\n }\n", "issue": "BIDS Validator data not installed\nWhen installing pybids through pip using an archive (e.g. tarball, zip, instead of from github with `-e`), the data for the `BIDSValidator` is not installed, causing `BIDSLayout` to fail initating\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.6/dist-packages/bids/layout/layout.py\", line 143, in __init__\r\n **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/grabbit/core.py\", line 419, in __init__\r\n self.index()\r\n File \"/usr/local/lib/python3.6/dist-packages/grabbit/core.py\", line 610, in index\r\n _index_dir(path, list(domains))\r\n File \"/usr/local/lib/python3.6/dist-packages/grabbit/core.py\", line 603, in _index_dir\r\n elif self._validate_file(full_path):\r\n File \"/usr/local/lib/python3.6/dist-packages/bids/layout/layout.py\", line 204, in _validate_file\r\n return self.validator.is_bids(to_check)\r\n File \"/usr/local/lib/python3.6/dist-packages/bids/layout/validation.py\", line 64, in is_bids\r\n conditions.append(self.is_top_level(path))\r\n File \"/usr/local/lib/python3.6/dist-packages/bids/layout/validation.py\", line 75, in is_top_level\r\n with open(join(self.rule_dir, 'fixed_top_level_names.json'), 'r') as f:\r\nFileNotFoundError: [Errno 2] No such file or directory: '/usr/local/lib/python3.6/dist-packages/bids/layout/config/validator/fixed_top_level_names.json'\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\nimport os\n\nCLASSIFIERS = [\"Development Status :: 3 - Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Topic :: Scientific/Engineering\"]\n\n# Description should be a one-liner:\ndescription = \"bids: interface with datasets conforming BIDS\"\n# Long description will go up on the pypi page\nlong_description = \"\"\"\n\nPyBIDS\n======\nPyBIDS is a Python module to interface with datasets conforming BIDS.\nSee BIDS paper_ and http://bids.neuroimaging.io website for more information.\n\n.. paper_: http://www.nature.com/articles/sdata201644\n\nLicense\n=======\n``pybids`` is licensed under the terms of the MIT license. See the file\n\"LICENSE\" for information on the history of this software, terms & conditions\nfor usage, and a DISCLAIMER OF ALL WARRANTIES.\n\nAll trademarks referenced herein are property of their respective holders.\n\nCopyright (c) 2016--, PyBIDS developers, Planet Earth\n\n\"\"\"\n\nNAME = \"pybids\"\nMAINTAINER = \"PyBIDS Developers\"\nMAINTAINER_EMAIL = \"[email protected]\"\nDESCRIPTION = description\nLONG_DESCRIPTION = long_description\nURL = \"http://github.com/bids-standard/pybids\"\nDOWNLOAD_URL = \"\"\nLICENSE = \"MIT\"\nAUTHOR = \"PyBIDS developers\"\nAUTHOR_EMAIL = \"[email protected]\"\nPLATFORMS = \"OS Independent\"\n# No data for now\nREQUIRES = [\"grabbit==0.2.4\", \"six\", \"num2words\", \"numpy\", \"scipy\", \"pandas\",\n \"nibabel\", \"patsy\"]\nEXTRAS_REQUIRE = {\n # Just to not break compatibility with externals requiring\n # now deprecated installation schemes\n 'analysis': []\n}\nTESTS_REQUIRE = [\"pytest>=3.3.0\"]\n\n\ndef package_files(directory):\n # from https://stackoverflow.com/questions/27664504/how-to-add-package-data-recursively-in-python-setup-py\n paths = []\n for (path, directories, filenames) in os.walk(directory):\n for filename in filenames:\n paths.append(os.path.join('..', path, filename))\n return paths\n\nextra_files = package_files('path_to/extra_files_dir')\nPACKAGE_DATA = {\n 'bids.layout': ['config/*.json'],\n 'bids.reports': ['config/*.json'],\n 'bids': package_files('bids/tests/data')\n}\n", "path": "bids/version.py"}, {"content": "\"\"\"Tools for validating BIDS projects.\"\"\"\n\nimport re\nimport json\nfrom os.path import join, abspath, dirname\n\n__all__ = ['BIDSValidator']\n\n\nclass BIDSValidator():\n \"\"\"An object for BIDS (Brain Imaging Data Structure) verification in a data.\n\n The main method of this class is `is_bids()`. You should use it for\n checking whether a file path compatible with BIDS.\n\n Parameters\n ----------\n index_associated : bool, default: True\n Specifies if an associated data should be checked. If it is true then\n any file paths in directories `code/`, `derivatives/`, `sourcedata/`\n and `stimuli/` will pass the validation, else they won't.\n\n Examples\n --------\n >>> from bids.grabbids import BIDSValidator\n >>> validator = BIDSValidator()\n >>> filepaths = [\"/sub-01/anat/sub-01_rec-CSD_T1w.nii.gz\",\n >>> \"/sub-01/anat/sub-01_acq-23_rec-CSD_T1w.exe\", #wrong extension\n >>> \"/participants.tsv\"]\n >>> for filepath in filepaths:\n >>> print( validator.is_bids(filepath) )\n True\n False\n True\n \"\"\"\n\n def __init__(self, index_associated=True):\n self.rule_dir = join(dirname(abspath(__file__)), 'config',\n 'validator')\n self.index_associated = index_associated\n\n def is_bids(self, path):\n \"\"\"Check if a file path appropriate for BIDS.\n\n Main method of the validator. uses other class methods for checking\n different aspects of the file path.\n\n Parameters\n ----------\n path: string\n A path of a file you want to check.\n\n Examples\n --------\n >>> from bids.grabbids import BIDSValidator\n >>> validator = BIDSValidator()\n >>> validator.is_bids(\"/sub-01/ses-test/anat/sub-01_ses-test_rec-CSD_run-23_T1w.nii.gz\")\n True\n >>> validator.is_bids(\"/sub-01/ses-test/sub-01_run-01_dwi.bvec\") # missed session in the filename\n False\n \"\"\"\n conditions = []\n\n conditions.append(self.is_top_level(path))\n conditions.append(self.is_associated_data(path))\n conditions.append(self.is_session_level(path))\n conditions.append(self.is_subject_level(path))\n conditions.append(self.is_phenotypic(path))\n conditions.append(self.is_file(path))\n\n return (any(conditions))\n\n def is_top_level(self, path):\n \"\"\"Check if the file has appropriate name for a top-level file.\"\"\"\n with open(join(self.rule_dir, 'fixed_top_level_names.json'), 'r') as f:\n fixed_top_level_json = json.load(f)\n fixed_top_level_names = fixed_top_level_json['fixed_top_level_names']\n\n regexps = self.get_regular_expressions('top_level_rules.json')\n\n conditions = [False if re.compile(x).search(path) is None else True\n for x in regexps]\n\n conditions.append(path in fixed_top_level_names)\n\n return (any(conditions))\n\n def is_associated_data(self, path):\n \"\"\"Check if file is appropriate associated data.\"\"\"\n if not self.index_associated:\n return False\n\n regexps = self.get_regular_expressions('associated_data_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return any(conditions)\n\n def is_session_level(self, path):\n \"\"\"Check if the file has appropriate name for a session level.\"\"\"\n regexps = self.get_regular_expressions('session_level_rules.json')\n\n conditions = [self.conditional_match(x, path) for x in regexps]\n\n return (any(conditions))\n\n def is_subject_level(self, path):\n \"\"\"Check if the file has appropriate name for a subject level.\"\"\"\n regexps = self.get_regular_expressions('subject_level_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return (any(conditions))\n\n def is_phenotypic(self, path):\n \"\"\"Check if file is phenotypic data.\"\"\"\n regexps = self.get_regular_expressions('phenotypic_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return (any(conditions))\n\n def is_file(self, path):\n \"\"\"Check if file is phenotypic data.\"\"\"\n regexps = self.get_regular_expressions('file_level_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None)\n for x in regexps]\n\n return (any(conditions))\n\n def get_regular_expressions(self, filename):\n \"\"\"Get regular expressions from file.\"\"\"\n regexps = []\n\n filename = join(self.rule_dir, filename)\n\n with open(filename, 'r') as f:\n rules = json.load(f)\n\n for key in list(rules.keys()):\n rule = rules[key]\n\n regexp = rule[\"regexp\"]\n\n if \"tokens\" in rule:\n tokens = rule[\"tokens\"]\n\n for token in list(tokens):\n regexp = regexp.replace(token, \"|\".join(tokens[token]))\n\n regexps.append(regexp)\n\n return regexps\n\n def get_path_values(self, path):\n \"\"\"Takes a file path and returns values found for the following path\n keys:\n sub-\n ses-\n \"\"\"\n values = {}\n\n regexps = self.get_regular_expressions('path.json')\n\n # capture subject\n for paths in ['sub', 'ses']:\n match = re.compile(regexps[paths]).findall(path)\n values[paths] = match[1] if match & match[1] else None\n\n return values\n\n def conditional_match(self, expression, path):\n match = re.compile(expression).findall(path)\n match = match[0] if len(match) >= 1 else False\n # adapted from JS code and JS does not support conditional groups\n if (match):\n return ((match[1] == match[2][1:]) | (not match[1]))\n else:\n return False\n", "path": "bids/layout/validation.py"}]} | 3,469 | 234 |
gh_patches_debug_10429 | rasdani/github-patches | git_diff | safe-global__safe-config-service-1107 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bad logo URL when creating a new Safe App
**Describe the bug**
When inserting a new Safe App, `None` is added to the logo image URL instead of the `app_id`.
Re-uploading the image for the Safe App solves the problem.
**To Reproduce**
Steps to reproduce the behavior:
- Create a new Safe App.
- Check the path for the logo image is not correct (it includes `None` as ID).
**Expected behavior**
A correct Safe App `app_id` is added to the logo path instead of `None`.
**Environment (please complete the following information):**
- Staging and production.
</issue>
<code>
[start of src/safe_apps/models.py]
1 import os
2 from enum import Enum
3 from typing import IO, Union
4
5 from django.contrib.postgres.fields import ArrayField
6 from django.core.exceptions import ValidationError
7 from django.core.files.images import get_image_dimensions
8 from django.core.validators import RegexValidator
9 from django.db import models
10
11 _HOSTNAME_VALIDATOR = RegexValidator(
12 r"^(https?:\/\/)?(www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}\/?$",
13 message="Enter a valid hostname (Without a resource path)",
14 code="invalid_hostname",
15 )
16
17
18 def safe_app_icon_path(instance: "SafeApp", filename: str) -> str:
19 _, file_extension = os.path.splitext(filename)
20 return f"safe_apps/{instance.app_id}/icon{file_extension}"
21
22
23 def validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:
24 width, height = get_image_dimensions(image)
25 if not width or not height:
26 raise ValidationError(
27 f"Could not get image dimensions. Width={width}, Height={height}"
28 )
29 if width > 512 or height > 512:
30 raise ValidationError("Image width and height need to be at most 512 pixels")
31
32
33 class Provider(models.Model):
34 url = models.URLField(primary_key=True)
35 name = models.CharField(max_length=200)
36
37 def __str__(self) -> str:
38 return f"{self.name} | {self.url}"
39
40
41 class Client(models.Model):
42 url = models.CharField(
43 unique=True,
44 help_text="The domain URL client is hosted at",
45 # The maximum length of a full host name is 253 characters per RFC 1034
46 max_length=255,
47 validators=[_HOSTNAME_VALIDATOR],
48 )
49
50 def __str__(self) -> str:
51 return f"Client: {self.url}"
52
53
54 class SafeApp(models.Model):
55 class AccessControlPolicy(str, Enum):
56 NO_RESTRICTIONS = "NO_RESTRICTIONS"
57 DOMAIN_ALLOWLIST = "DOMAIN_ALLOWLIST"
58
59 app_id = models.BigAutoField(primary_key=True)
60 visible = models.BooleanField(
61 default=True
62 ) # True if this safe-app should be visible from the view. False otherwise
63 url = models.URLField()
64 name = models.CharField(max_length=200)
65 icon_url = models.ImageField(
66 validators=[validate_safe_app_icon_size],
67 upload_to=safe_app_icon_path,
68 max_length=255,
69 null=True,
70 blank=True,
71 )
72 description = models.CharField(max_length=200)
73 chain_ids = ArrayField(models.PositiveBigIntegerField())
74 provider = models.ForeignKey(
75 Provider, null=True, blank=True, on_delete=models.SET_NULL
76 )
77 exclusive_clients = models.ManyToManyField(
78 Client,
79 blank=True,
80 help_text="Clients that are only allowed to use this SafeApp",
81 )
82 developer_website = models.URLField(null=True, blank=True)
83
84 def get_access_control_type(self) -> AccessControlPolicy:
85 if self.exclusive_clients.exists():
86 return SafeApp.AccessControlPolicy.DOMAIN_ALLOWLIST
87 return SafeApp.AccessControlPolicy.NO_RESTRICTIONS
88
89 def __str__(self) -> str:
90 return f"{self.name} | {self.url} | chain_ids={self.chain_ids}"
91
92
93 class Tag(models.Model):
94 name = models.CharField(max_length=255)
95 safe_apps = models.ManyToManyField(SafeApp, blank=True)
96
97 def __str__(self) -> str:
98 return f"Tag: {self.name}"
99
100
101 class Feature(models.Model):
102 # A feature can be enabled for multiple Safe Apps and a Safe App can have multiple features enabled
103 safe_apps = models.ManyToManyField(
104 SafeApp, blank=True, help_text="Safe Apps where this feature is enabled."
105 )
106 key = models.CharField(
107 unique=True,
108 max_length=255,
109 help_text="The unique name/key that identifies this feature",
110 )
111
112 def __str__(self) -> str:
113 return f"Safe App Feature: {self.key}"
114
115
116 class SocialProfile(models.Model):
117 class Platform(models.TextChoices):
118 DISCORD = "DISCORD"
119 GITHUB = "GITHUB"
120 TWITTER = "TWITTER"
121
122 safe_app = models.ForeignKey(SafeApp, on_delete=models.CASCADE)
123 platform = models.CharField(choices=Platform.choices, max_length=255)
124 url = models.URLField()
125
126 def __str__(self) -> str:
127 return f"Social Profile: {self.platform} | {self.url}"
128
[end of src/safe_apps/models.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py
--- a/src/safe_apps/models.py
+++ b/src/safe_apps/models.py
@@ -1,4 +1,5 @@
import os
+import uuid
from enum import Enum
from typing import IO, Union
@@ -17,7 +18,7 @@
def safe_app_icon_path(instance: "SafeApp", filename: str) -> str:
_, file_extension = os.path.splitext(filename)
- return f"safe_apps/{instance.app_id}/icon{file_extension}"
+ return f"safe_apps/{uuid.uuid4()}/icon{file_extension}"
def validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:
| {"golden_diff": "diff --git a/src/safe_apps/models.py b/src/safe_apps/models.py\n--- a/src/safe_apps/models.py\n+++ b/src/safe_apps/models.py\n@@ -1,4 +1,5 @@\n import os\n+import uuid\n from enum import Enum\n from typing import IO, Union\n \n@@ -17,7 +18,7 @@\n \n def safe_app_icon_path(instance: \"SafeApp\", filename: str) -> str:\n _, file_extension = os.path.splitext(filename)\n- return f\"safe_apps/{instance.app_id}/icon{file_extension}\"\n+ return f\"safe_apps/{uuid.uuid4()}/icon{file_extension}\"\n \n \n def validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:\n", "issue": "Bad logo URL when creating a new Safe App\n**Describe the bug**\r\nWhen inserting a new Safe App, `None` is added to the logo image URL instead of the `app_id`. \r\n\r\nRe-uploading the image for the Safe App solves the problem.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n- Create a new Safe App.\r\n- Check the path for the logo image is not correct (it includes `None` as ID).\r\n\r\n**Expected behavior**\r\nA correct Safe App `app_id` is added to the logo path instead of `None`.\r\n\r\n**Environment (please complete the following information):**\r\n - Staging and production.\r\n\n", "before_files": [{"content": "import os\nfrom enum import Enum\nfrom typing import IO, Union\n\nfrom django.contrib.postgres.fields import ArrayField\nfrom django.core.exceptions import ValidationError\nfrom django.core.files.images import get_image_dimensions\nfrom django.core.validators import RegexValidator\nfrom django.db import models\n\n_HOSTNAME_VALIDATOR = RegexValidator(\n r\"^(https?:\\/\\/)?(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{2,256}\\.[a-z]{2,6}\\/?$\",\n message=\"Enter a valid hostname (Without a resource path)\",\n code=\"invalid_hostname\",\n)\n\n\ndef safe_app_icon_path(instance: \"SafeApp\", filename: str) -> str:\n _, file_extension = os.path.splitext(filename)\n return f\"safe_apps/{instance.app_id}/icon{file_extension}\"\n\n\ndef validate_safe_app_icon_size(image: Union[str, IO[bytes]]) -> None:\n width, height = get_image_dimensions(image)\n if not width or not height:\n raise ValidationError(\n f\"Could not get image dimensions. Width={width}, Height={height}\"\n )\n if width > 512 or height > 512:\n raise ValidationError(\"Image width and height need to be at most 512 pixels\")\n\n\nclass Provider(models.Model):\n url = models.URLField(primary_key=True)\n name = models.CharField(max_length=200)\n\n def __str__(self) -> str:\n return f\"{self.name} | {self.url}\"\n\n\nclass Client(models.Model):\n url = models.CharField(\n unique=True,\n help_text=\"The domain URL client is hosted at\",\n # The maximum length of a full host name is 253 characters per RFC 1034\n max_length=255,\n validators=[_HOSTNAME_VALIDATOR],\n )\n\n def __str__(self) -> str:\n return f\"Client: {self.url}\"\n\n\nclass SafeApp(models.Model):\n class AccessControlPolicy(str, Enum):\n NO_RESTRICTIONS = \"NO_RESTRICTIONS\"\n DOMAIN_ALLOWLIST = \"DOMAIN_ALLOWLIST\"\n\n app_id = models.BigAutoField(primary_key=True)\n visible = models.BooleanField(\n default=True\n ) # True if this safe-app should be visible from the view. False otherwise\n url = models.URLField()\n name = models.CharField(max_length=200)\n icon_url = models.ImageField(\n validators=[validate_safe_app_icon_size],\n upload_to=safe_app_icon_path,\n max_length=255,\n null=True,\n blank=True,\n )\n description = models.CharField(max_length=200)\n chain_ids = ArrayField(models.PositiveBigIntegerField())\n provider = models.ForeignKey(\n Provider, null=True, blank=True, on_delete=models.SET_NULL\n )\n exclusive_clients = models.ManyToManyField(\n Client,\n blank=True,\n help_text=\"Clients that are only allowed to use this SafeApp\",\n )\n developer_website = models.URLField(null=True, blank=True)\n\n def get_access_control_type(self) -> AccessControlPolicy:\n if self.exclusive_clients.exists():\n return SafeApp.AccessControlPolicy.DOMAIN_ALLOWLIST\n return SafeApp.AccessControlPolicy.NO_RESTRICTIONS\n\n def __str__(self) -> str:\n return f\"{self.name} | {self.url} | chain_ids={self.chain_ids}\"\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=255)\n safe_apps = models.ManyToManyField(SafeApp, blank=True)\n\n def __str__(self) -> str:\n return f\"Tag: {self.name}\"\n\n\nclass Feature(models.Model):\n # A feature can be enabled for multiple Safe Apps and a Safe App can have multiple features enabled\n safe_apps = models.ManyToManyField(\n SafeApp, blank=True, help_text=\"Safe Apps where this feature is enabled.\"\n )\n key = models.CharField(\n unique=True,\n max_length=255,\n help_text=\"The unique name/key that identifies this feature\",\n )\n\n def __str__(self) -> str:\n return f\"Safe App Feature: {self.key}\"\n\n\nclass SocialProfile(models.Model):\n class Platform(models.TextChoices):\n DISCORD = \"DISCORD\"\n GITHUB = \"GITHUB\"\n TWITTER = \"TWITTER\"\n\n safe_app = models.ForeignKey(SafeApp, on_delete=models.CASCADE)\n platform = models.CharField(choices=Platform.choices, max_length=255)\n url = models.URLField()\n\n def __str__(self) -> str:\n return f\"Social Profile: {self.platform} | {self.url}\"\n", "path": "src/safe_apps/models.py"}]} | 1,945 | 160 |
gh_patches_debug_31871 | rasdani/github-patches | git_diff | pyca__cryptography-2250 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Documentation of DSA signature references incorrect RFC
Documentation of DSA signatures (https://cryptography.io/en/latest/hazmat/primitives/asymmetric/dsa/#signing) references RFC 6979 which sounds strange. Same for the naming of de/encoding functions at https://cryptography.io/en/latest/hazmat/primitives/asymmetric/utils/#cryptography.hazmat.primitives.asymmetric.utils.decode_rfc6979_signature
But that RFC doesn't actually define the {r,s} encoding. The actual asn1 module can be found in RFC 3279 which defines both dsa-with-sha1 signature and Dss-Sig-Value which is the {r,s} sequence.
The references to RFC 6979 are actually unfortunate, because it defines deterministic DSA signatures, while cryptography.io exposes the randomised version using openssl's `DSA_sign`.
</issue>
<code>
[start of src/cryptography/utils.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8 import binascii
9 import inspect
10 import struct
11 import sys
12 import warnings
13
14
15 DeprecatedIn09 = DeprecationWarning
16
17
18 def read_only_property(name):
19 return property(lambda self: getattr(self, name))
20
21
22 def register_interface(iface):
23 def register_decorator(klass):
24 verify_interface(iface, klass)
25 iface.register(klass)
26 return klass
27 return register_decorator
28
29
30 if hasattr(int, "from_bytes"):
31 int_from_bytes = int.from_bytes
32 else:
33 def int_from_bytes(data, byteorder, signed=False):
34 assert byteorder == 'big'
35 assert not signed
36
37 if len(data) % 4 != 0:
38 data = (b'\x00' * (4 - (len(data) % 4))) + data
39
40 result = 0
41
42 while len(data) > 0:
43 digit, = struct.unpack('>I', data[:4])
44 result = (result << 32) + digit
45 data = data[4:]
46
47 return result
48
49
50 def int_to_bytes(integer):
51 hex_string = '%x' % integer
52 n = len(hex_string)
53 return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
54
55
56 class InterfaceNotImplemented(Exception):
57 pass
58
59
60 def verify_interface(iface, klass):
61 for method in iface.__abstractmethods__:
62 if not hasattr(klass, method):
63 raise InterfaceNotImplemented(
64 "{0} is missing a {1!r} method".format(klass, method)
65 )
66 if isinstance(getattr(iface, method), abc.abstractproperty):
67 # Can't properly verify these yet.
68 continue
69 spec = inspect.getargspec(getattr(iface, method))
70 actual = inspect.getargspec(getattr(klass, method))
71 if spec != actual:
72 raise InterfaceNotImplemented(
73 "{0}.{1}'s signature differs from the expected. Expected: "
74 "{2!r}. Received: {3!r}".format(
75 klass, method, spec, actual
76 )
77 )
78
79
80 if sys.version_info >= (2, 7):
81 def bit_length(x):
82 return x.bit_length()
83 else:
84 def bit_length(x):
85 return len(bin(x)) - (2 + (x <= 0))
86
87
88 class _DeprecatedValue(object):
89 def __init__(self, value, message, warning_class):
90 self.value = value
91 self.message = message
92 self.warning_class = warning_class
93
94
95 class _ModuleWithDeprecations(object):
96 def __init__(self, module):
97 self.__dict__["_module"] = module
98
99 def __getattr__(self, attr):
100 obj = getattr(self._module, attr)
101 if isinstance(obj, _DeprecatedValue):
102 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
103 obj = obj.value
104 return obj
105
106 def __setattr__(self, attr, value):
107 setattr(self._module, attr, value)
108
109 def __dir__(self):
110 return ["_module"] + dir(self._module)
111
112
113 def deprecated(value, module_name, message, warning_class):
114 module = sys.modules[module_name]
115 if not isinstance(module, _ModuleWithDeprecations):
116 sys.modules[module_name] = module = _ModuleWithDeprecations(module)
117 return _DeprecatedValue(value, message, warning_class)
118
[end of src/cryptography/utils.py]
[start of src/cryptography/hazmat/primitives/asymmetric/utils.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 from pyasn1.codec.der import decoder, encoder
8 from pyasn1.error import PyAsn1Error
9 from pyasn1.type import namedtype, univ
10
11 import six
12
13
14 class _DSSSigValue(univ.Sequence):
15 componentType = namedtype.NamedTypes(
16 namedtype.NamedType('r', univ.Integer()),
17 namedtype.NamedType('s', univ.Integer())
18 )
19
20
21 def decode_rfc6979_signature(signature):
22 try:
23 data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())
24 except PyAsn1Error:
25 raise ValueError("Invalid signature data. Unable to decode ASN.1")
26
27 if remaining:
28 raise ValueError(
29 "The signature contains bytes after the end of the ASN.1 sequence."
30 )
31
32 r = int(data.getComponentByName('r'))
33 s = int(data.getComponentByName('s'))
34 return (r, s)
35
36
37 def encode_rfc6979_signature(r, s):
38 if (
39 not isinstance(r, six.integer_types) or
40 not isinstance(s, six.integer_types)
41 ):
42 raise ValueError("Both r and s must be integers")
43
44 sig = _DSSSigValue()
45 sig.setComponentByName('r', r)
46 sig.setComponentByName('s', s)
47 return encoder.encode(sig)
48
[end of src/cryptography/hazmat/primitives/asymmetric/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/hazmat/primitives/asymmetric/utils.py b/src/cryptography/hazmat/primitives/asymmetric/utils.py
--- a/src/cryptography/hazmat/primitives/asymmetric/utils.py
+++ b/src/cryptography/hazmat/primitives/asymmetric/utils.py
@@ -4,12 +4,16 @@
from __future__ import absolute_import, division, print_function
+import warnings
+
from pyasn1.codec.der import decoder, encoder
from pyasn1.error import PyAsn1Error
from pyasn1.type import namedtype, univ
import six
+from cryptography import utils
+
class _DSSSigValue(univ.Sequence):
componentType = namedtype.NamedTypes(
@@ -19,6 +23,17 @@
def decode_rfc6979_signature(signature):
+ warnings.warn(
+ "decode_rfc6979_signature is deprecated and will "
+ "be removed in a future version, use decode_dss_signature instead "
+ "instead.",
+ utils.DeprecatedIn10,
+ stacklevel=2
+ )
+ return decode_dss_signature(signature)
+
+
+def decode_dss_signature(signature):
try:
data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())
except PyAsn1Error:
@@ -35,6 +50,17 @@
def encode_rfc6979_signature(r, s):
+ warnings.warn(
+ "encode_rfc6979_signature is deprecated and will "
+ "be removed in a future version, use encode_dss_signature instead "
+ "instead.",
+ utils.DeprecatedIn10,
+ stacklevel=2
+ )
+ return encode_dss_signature(r, s)
+
+
+def encode_dss_signature(r, s):
if (
not isinstance(r, six.integer_types) or
not isinstance(s, six.integer_types)
diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py
--- a/src/cryptography/utils.py
+++ b/src/cryptography/utils.py
@@ -13,6 +13,7 @@
DeprecatedIn09 = DeprecationWarning
+DeprecatedIn10 = PendingDeprecationWarning
def read_only_property(name):
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/asymmetric/utils.py b/src/cryptography/hazmat/primitives/asymmetric/utils.py\n--- a/src/cryptography/hazmat/primitives/asymmetric/utils.py\n+++ b/src/cryptography/hazmat/primitives/asymmetric/utils.py\n@@ -4,12 +4,16 @@\n \n from __future__ import absolute_import, division, print_function\n \n+import warnings\n+\n from pyasn1.codec.der import decoder, encoder\n from pyasn1.error import PyAsn1Error\n from pyasn1.type import namedtype, univ\n \n import six\n \n+from cryptography import utils\n+\n \n class _DSSSigValue(univ.Sequence):\n componentType = namedtype.NamedTypes(\n@@ -19,6 +23,17 @@\n \n \n def decode_rfc6979_signature(signature):\n+ warnings.warn(\n+ \"decode_rfc6979_signature is deprecated and will \"\n+ \"be removed in a future version, use decode_dss_signature instead \"\n+ \"instead.\",\n+ utils.DeprecatedIn10,\n+ stacklevel=2\n+ )\n+ return decode_dss_signature(signature)\n+\n+\n+def decode_dss_signature(signature):\n try:\n data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())\n except PyAsn1Error:\n@@ -35,6 +50,17 @@\n \n \n def encode_rfc6979_signature(r, s):\n+ warnings.warn(\n+ \"encode_rfc6979_signature is deprecated and will \"\n+ \"be removed in a future version, use encode_dss_signature instead \"\n+ \"instead.\",\n+ utils.DeprecatedIn10,\n+ stacklevel=2\n+ )\n+ return encode_dss_signature(r, s)\n+\n+\n+def encode_dss_signature(r, s):\n if (\n not isinstance(r, six.integer_types) or\n not isinstance(s, six.integer_types)\ndiff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -13,6 +13,7 @@\n \n \n DeprecatedIn09 = DeprecationWarning\n+DeprecatedIn10 = PendingDeprecationWarning\n \n \n def read_only_property(name):\n", "issue": "Documentation of DSA signature references incorrect RFC\nDocumentation of DSA signatures (https://cryptography.io/en/latest/hazmat/primitives/asymmetric/dsa/#signing) references RFC 6979 which sounds strange. Same for the naming of de/encoding functions at https://cryptography.io/en/latest/hazmat/primitives/asymmetric/utils/#cryptography.hazmat.primitives.asymmetric.utils.decode_rfc6979_signature\n\nBut that RFC doesn't actually define the {r,s} encoding. The actual asn1 module can be found in RFC 3279 which defines both dsa-with-sha1 signature and Dss-Sig-Value which is the {r,s} sequence.\n\nThe references to RFC 6979 are actually unfortunate, because it defines deterministic DSA signatures, while cryptography.io exposes the randomised version using openssl's `DSA_sign`.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport struct\nimport sys\nimport warnings\n\n\nDeprecatedIn09 = DeprecationWarning\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n data = data[4:]\n\n return result\n\n\ndef int_to_bytes(integer):\n hex_string = '%x' % integer\n n = len(hex_string)\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n spec = inspect.getargspec(getattr(iface, method))\n actual = inspect.getargspec(getattr(klass, method))\n if spec != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, spec, actual\n )\n )\n\n\nif sys.version_info >= (2, 7):\n def bit_length(x):\n return x.bit_length()\nelse:\n def bit_length(x):\n return len(bin(x)) - (2 + (x <= 0))\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n", "path": "src/cryptography/utils.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom pyasn1.codec.der import decoder, encoder\nfrom pyasn1.error import PyAsn1Error\nfrom pyasn1.type import namedtype, univ\n\nimport six\n\n\nclass _DSSSigValue(univ.Sequence):\n componentType = namedtype.NamedTypes(\n namedtype.NamedType('r', univ.Integer()),\n namedtype.NamedType('s', univ.Integer())\n )\n\n\ndef decode_rfc6979_signature(signature):\n try:\n data, remaining = decoder.decode(signature, asn1Spec=_DSSSigValue())\n except PyAsn1Error:\n raise ValueError(\"Invalid signature data. Unable to decode ASN.1\")\n\n if remaining:\n raise ValueError(\n \"The signature contains bytes after the end of the ASN.1 sequence.\"\n )\n\n r = int(data.getComponentByName('r'))\n s = int(data.getComponentByName('s'))\n return (r, s)\n\n\ndef encode_rfc6979_signature(r, s):\n if (\n not isinstance(r, six.integer_types) or\n not isinstance(s, six.integer_types)\n ):\n raise ValueError(\"Both r and s must be integers\")\n\n sig = _DSSSigValue()\n sig.setComponentByName('r', r)\n sig.setComponentByName('s', s)\n return encoder.encode(sig)\n", "path": "src/cryptography/hazmat/primitives/asymmetric/utils.py"}]} | 2,213 | 500 |
gh_patches_debug_17244 | rasdani/github-patches | git_diff | networkx__networkx-6132 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve test coverage for algorithms in load centrality
<!-- If you have a general question about NetworkX, please use the discussions tab to create a new discussion -->
<!--- Provide a general summary of the issue in the Title above -->
Currently we don't have full coverage for the algorithms in load centrality. Code blocks which are highlighted with red at codcov https://app.codecov.io/gh/networkx/networkx/blob/main/networkx/algorithms/centrality/load.py don't have corresponding tests. The tests should be added in https://github.com/networkx/networkx/blob/main/networkx/algorithms/centrality/tests/test_load_centrality.py
### Current Behavior
<!--- Tell us what happens instead of the expected behavior -->
We don't test all the paths the code can take us.
### Expected Behavior
<!--- Tell us what should happen -->
We should be testing everything so there aren't any surprises.
### Steps to Reproduce
<!--- Provide a minimal example that reproduces the bug -->
Visit https://app.codecov.io/gh/networkx/networkx/blob/main/networkx/algorithms/centrality/load.py
</issue>
<code>
[start of networkx/readwrite/adjlist.py]
1 """
2 **************
3 Adjacency List
4 **************
5 Read and write NetworkX graphs as adjacency lists.
6
7 Adjacency list format is useful for graphs without data associated
8 with nodes or edges and for nodes that can be meaningfully represented
9 as strings.
10
11 Format
12 ------
13 The adjacency list format consists of lines with node labels. The
14 first label in a line is the source node. Further labels in the line
15 are considered target nodes and are added to the graph along with an edge
16 between the source node and target node.
17
18 The graph with edges a-b, a-c, d-e can be represented as the following
19 adjacency list (anything following the # in a line is a comment)::
20
21 a b c # source target target
22 d e
23 """
24
25 __all__ = ["generate_adjlist", "write_adjlist", "parse_adjlist", "read_adjlist"]
26
27 import networkx as nx
28 from networkx.utils import open_file
29
30
31 def generate_adjlist(G, delimiter=" "):
32 """Generate a single line of the graph G in adjacency list format.
33
34 Parameters
35 ----------
36 G : NetworkX graph
37
38 delimiter : string, optional
39 Separator for node labels
40
41 Returns
42 -------
43 lines : string
44 Lines of data in adjlist format.
45
46 Examples
47 --------
48 >>> G = nx.lollipop_graph(4, 3)
49 >>> for line in nx.generate_adjlist(G):
50 ... print(line)
51 0 1 2 3
52 1 2 3
53 2 3
54 3 4
55 4 5
56 5 6
57 6
58
59 See Also
60 --------
61 write_adjlist, read_adjlist
62
63 """
64 directed = G.is_directed()
65 seen = set()
66 for s, nbrs in G.adjacency():
67 line = str(s) + delimiter
68 for t, data in nbrs.items():
69 if not directed and t in seen:
70 continue
71 if G.is_multigraph():
72 for d in data.values():
73 line += str(t) + delimiter
74 else:
75 line += str(t) + delimiter
76 if not directed:
77 seen.add(s)
78 yield line[: -len(delimiter)]
79
80
81 @open_file(1, mode="wb")
82 def write_adjlist(G, path, comments="#", delimiter=" ", encoding="utf-8"):
83 """Write graph G in single-line adjacency-list format to path.
84
85
86 Parameters
87 ----------
88 G : NetworkX graph
89
90 path : string or file
91 Filename or file handle for data output.
92 Filenames ending in .gz or .bz2 will be compressed.
93
94 comments : string, optional
95 Marker for comment lines
96
97 delimiter : string, optional
98 Separator for node labels
99
100 encoding : string, optional
101 Text encoding.
102
103 Examples
104 --------
105 >>> G = nx.path_graph(4)
106 >>> nx.write_adjlist(G, "test.adjlist")
107
108 The path can be a filehandle or a string with the name of the file. If a
109 filehandle is provided, it has to be opened in 'wb' mode.
110
111 >>> fh = open("test.adjlist", "wb")
112 >>> nx.write_adjlist(G, fh)
113
114 Notes
115 -----
116 This format does not store graph, node, or edge data.
117
118 See Also
119 --------
120 read_adjlist, generate_adjlist
121 """
122 import sys
123 import time
124
125 pargs = comments + " ".join(sys.argv) + "\n"
126 header = (
127 pargs
128 + comments
129 + f" GMT {time.asctime(time.gmtime())}\n"
130 + comments
131 + f" {G.name}\n"
132 )
133 path.write(header.encode(encoding))
134
135 for line in generate_adjlist(G, delimiter):
136 line += "\n"
137 path.write(line.encode(encoding))
138
139
140 def parse_adjlist(
141 lines, comments="#", delimiter=None, create_using=None, nodetype=None
142 ):
143 """Parse lines of a graph adjacency list representation.
144
145 Parameters
146 ----------
147 lines : list or iterator of strings
148 Input data in adjlist format
149
150 create_using : NetworkX graph constructor, optional (default=nx.Graph)
151 Graph type to create. If graph instance, then cleared before populated.
152
153 nodetype : Python type, optional
154 Convert nodes to this type.
155
156 comments : string, optional
157 Marker for comment lines
158
159 delimiter : string, optional
160 Separator for node labels. The default is whitespace.
161
162 Returns
163 -------
164 G: NetworkX graph
165 The graph corresponding to the lines in adjacency list format.
166
167 Examples
168 --------
169 >>> lines = ["1 2 5", "2 3 4", "3 5", "4", "5"]
170 >>> G = nx.parse_adjlist(lines, nodetype=int)
171 >>> nodes = [1, 2, 3, 4, 5]
172 >>> all(node in G for node in nodes)
173 True
174 >>> edges = [(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)]
175 >>> all((u, v) in G.edges() or (v, u) in G.edges() for (u, v) in edges)
176 True
177
178 See Also
179 --------
180 read_adjlist
181
182 """
183 G = nx.empty_graph(0, create_using)
184 for line in lines:
185 p = line.find(comments)
186 if p >= 0:
187 line = line[:p]
188 if not len(line):
189 continue
190 vlist = line.strip().split(delimiter)
191 u = vlist.pop(0)
192 # convert types
193 if nodetype is not None:
194 try:
195 u = nodetype(u)
196 except BaseException as err:
197 raise TypeError(
198 f"Failed to convert node ({u}) to type " f"{nodetype}"
199 ) from err
200 G.add_node(u)
201 if nodetype is not None:
202 try:
203 vlist = list(map(nodetype, vlist))
204 except BaseException as err:
205 raise TypeError(
206 f"Failed to convert nodes ({','.join(vlist)}) to type {nodetype}"
207 ) from err
208 G.add_edges_from([(u, v) for v in vlist])
209 return G
210
211
212 @open_file(0, mode="rb")
213 def read_adjlist(
214 path,
215 comments="#",
216 delimiter=None,
217 create_using=None,
218 nodetype=None,
219 encoding="utf-8",
220 ):
221 """Read graph in adjacency list format from path.
222
223 Parameters
224 ----------
225 path : string or file
226 Filename or file handle to read.
227 Filenames ending in .gz or .bz2 will be uncompressed.
228
229 create_using : NetworkX graph constructor, optional (default=nx.Graph)
230 Graph type to create. If graph instance, then cleared before populated.
231
232 nodetype : Python type, optional
233 Convert nodes to this type.
234
235 comments : string, optional
236 Marker for comment lines
237
238 delimiter : string, optional
239 Separator for node labels. The default is whitespace.
240
241 Returns
242 -------
243 G: NetworkX graph
244 The graph corresponding to the lines in adjacency list format.
245
246 Examples
247 --------
248 >>> G = nx.path_graph(4)
249 >>> nx.write_adjlist(G, "test.adjlist")
250 >>> G = nx.read_adjlist("test.adjlist")
251
252 The path can be a filehandle or a string with the name of the file. If a
253 filehandle is provided, it has to be opened in 'rb' mode.
254
255 >>> fh = open("test.adjlist", "rb")
256 >>> G = nx.read_adjlist(fh)
257
258 Filenames ending in .gz or .bz2 will be compressed.
259
260 >>> nx.write_adjlist(G, "test.adjlist.gz")
261 >>> G = nx.read_adjlist("test.adjlist.gz")
262
263 The optional nodetype is a function to convert node strings to nodetype.
264
265 For example
266
267 >>> G = nx.read_adjlist("test.adjlist", nodetype=int)
268
269 will attempt to convert all nodes to integer type.
270
271 Since nodes must be hashable, the function nodetype must return hashable
272 types (e.g. int, float, str, frozenset - or tuples of those, etc.)
273
274 The optional create_using parameter indicates the type of NetworkX graph
275 created. The default is `nx.Graph`, an undirected graph.
276 To read the data as a directed graph use
277
278 >>> G = nx.read_adjlist("test.adjlist", create_using=nx.DiGraph)
279
280 Notes
281 -----
282 This format does not store graph or node data.
283
284 See Also
285 --------
286 write_adjlist
287 """
288 lines = (line.decode(encoding) for line in path)
289 return parse_adjlist(
290 lines,
291 comments=comments,
292 delimiter=delimiter,
293 create_using=create_using,
294 nodetype=nodetype,
295 )
296
[end of networkx/readwrite/adjlist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/networkx/readwrite/adjlist.py b/networkx/readwrite/adjlist.py
--- a/networkx/readwrite/adjlist.py
+++ b/networkx/readwrite/adjlist.py
@@ -60,6 +60,14 @@
--------
write_adjlist, read_adjlist
+ Notes
+ -----
+ The default `delimiter=" "` will result in unexpected results if node names contain
+ whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are
+ valid in node names.
+
+ NB: This option is not available for data that isn't user-generated.
+
"""
directed = G.is_directed()
seen = set()
@@ -113,6 +121,11 @@
Notes
-----
+ The default `delimiter=" "` will result in unexpected results if node names contain
+ whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are
+ valid in node names.
+ NB: This option is not available for data that isn't user-generated.
+
This format does not store graph, node, or edge data.
See Also
| {"golden_diff": "diff --git a/networkx/readwrite/adjlist.py b/networkx/readwrite/adjlist.py\n--- a/networkx/readwrite/adjlist.py\n+++ b/networkx/readwrite/adjlist.py\n@@ -60,6 +60,14 @@\n --------\n write_adjlist, read_adjlist\n \n+ Notes\n+ -----\n+ The default `delimiter=\" \"` will result in unexpected results if node names contain\n+ whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are\n+ valid in node names.\n+\n+ NB: This option is not available for data that isn't user-generated.\n+\n \"\"\"\n directed = G.is_directed()\n seen = set()\n@@ -113,6 +121,11 @@\n \n Notes\n -----\n+ The default `delimiter=\" \"` will result in unexpected results if node names contain\n+ whitespace characters. To avoid this problem, specify an alternate delimiter when spaces are\n+ valid in node names.\n+ NB: This option is not available for data that isn't user-generated.\n+\n This format does not store graph, node, or edge data.\n \n See Also\n", "issue": " Improve test coverage for algorithms in load centrality\n<!-- If you have a general question about NetworkX, please use the discussions tab to create a new discussion -->\r\n\r\n<!--- Provide a general summary of the issue in the Title above -->\r\n\r\nCurrently we don't have full coverage for the algorithms in load centrality. Code blocks which are highlighted with red at codcov https://app.codecov.io/gh/networkx/networkx/blob/main/networkx/algorithms/centrality/load.py don't have corresponding tests. The tests should be added in https://github.com/networkx/networkx/blob/main/networkx/algorithms/centrality/tests/test_load_centrality.py\r\n\r\n### Current Behavior\r\n\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n\r\nWe don't test all the paths the code can take us.\r\n\r\n### Expected Behavior\r\n\r\n<!--- Tell us what should happen -->\r\n\r\nWe should be testing everything so there aren't any surprises.\r\n\r\n### Steps to Reproduce\r\n\r\n<!--- Provide a minimal example that reproduces the bug -->\r\n\r\nVisit https://app.codecov.io/gh/networkx/networkx/blob/main/networkx/algorithms/centrality/load.py\r\n\n", "before_files": [{"content": "\"\"\"\n**************\nAdjacency List\n**************\nRead and write NetworkX graphs as adjacency lists.\n\nAdjacency list format is useful for graphs without data associated\nwith nodes or edges and for nodes that can be meaningfully represented\nas strings.\n\nFormat\n------\nThe adjacency list format consists of lines with node labels. The\nfirst label in a line is the source node. Further labels in the line\nare considered target nodes and are added to the graph along with an edge\nbetween the source node and target node.\n\nThe graph with edges a-b, a-c, d-e can be represented as the following\nadjacency list (anything following the # in a line is a comment)::\n\n a b c # source target target\n d e\n\"\"\"\n\n__all__ = [\"generate_adjlist\", \"write_adjlist\", \"parse_adjlist\", \"read_adjlist\"]\n\nimport networkx as nx\nfrom networkx.utils import open_file\n\n\ndef generate_adjlist(G, delimiter=\" \"):\n \"\"\"Generate a single line of the graph G in adjacency list format.\n\n Parameters\n ----------\n G : NetworkX graph\n\n delimiter : string, optional\n Separator for node labels\n\n Returns\n -------\n lines : string\n Lines of data in adjlist format.\n\n Examples\n --------\n >>> G = nx.lollipop_graph(4, 3)\n >>> for line in nx.generate_adjlist(G):\n ... print(line)\n 0 1 2 3\n 1 2 3\n 2 3\n 3 4\n 4 5\n 5 6\n 6\n\n See Also\n --------\n write_adjlist, read_adjlist\n\n \"\"\"\n directed = G.is_directed()\n seen = set()\n for s, nbrs in G.adjacency():\n line = str(s) + delimiter\n for t, data in nbrs.items():\n if not directed and t in seen:\n continue\n if G.is_multigraph():\n for d in data.values():\n line += str(t) + delimiter\n else:\n line += str(t) + delimiter\n if not directed:\n seen.add(s)\n yield line[: -len(delimiter)]\n\n\n@open_file(1, mode=\"wb\")\ndef write_adjlist(G, path, comments=\"#\", delimiter=\" \", encoding=\"utf-8\"):\n \"\"\"Write graph G in single-line adjacency-list format to path.\n\n\n Parameters\n ----------\n G : NetworkX graph\n\n path : string or file\n Filename or file handle for data output.\n Filenames ending in .gz or .bz2 will be compressed.\n\n comments : string, optional\n Marker for comment lines\n\n delimiter : string, optional\n Separator for node labels\n\n encoding : string, optional\n Text encoding.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> nx.write_adjlist(G, \"test.adjlist\")\n\n The path can be a filehandle or a string with the name of the file. If a\n filehandle is provided, it has to be opened in 'wb' mode.\n\n >>> fh = open(\"test.adjlist\", \"wb\")\n >>> nx.write_adjlist(G, fh)\n\n Notes\n -----\n This format does not store graph, node, or edge data.\n\n See Also\n --------\n read_adjlist, generate_adjlist\n \"\"\"\n import sys\n import time\n\n pargs = comments + \" \".join(sys.argv) + \"\\n\"\n header = (\n pargs\n + comments\n + f\" GMT {time.asctime(time.gmtime())}\\n\"\n + comments\n + f\" {G.name}\\n\"\n )\n path.write(header.encode(encoding))\n\n for line in generate_adjlist(G, delimiter):\n line += \"\\n\"\n path.write(line.encode(encoding))\n\n\ndef parse_adjlist(\n lines, comments=\"#\", delimiter=None, create_using=None, nodetype=None\n):\n \"\"\"Parse lines of a graph adjacency list representation.\n\n Parameters\n ----------\n lines : list or iterator of strings\n Input data in adjlist format\n\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n nodetype : Python type, optional\n Convert nodes to this type.\n\n comments : string, optional\n Marker for comment lines\n\n delimiter : string, optional\n Separator for node labels. The default is whitespace.\n\n Returns\n -------\n G: NetworkX graph\n The graph corresponding to the lines in adjacency list format.\n\n Examples\n --------\n >>> lines = [\"1 2 5\", \"2 3 4\", \"3 5\", \"4\", \"5\"]\n >>> G = nx.parse_adjlist(lines, nodetype=int)\n >>> nodes = [1, 2, 3, 4, 5]\n >>> all(node in G for node in nodes)\n True\n >>> edges = [(1, 2), (1, 5), (2, 3), (2, 4), (3, 5)]\n >>> all((u, v) in G.edges() or (v, u) in G.edges() for (u, v) in edges)\n True\n\n See Also\n --------\n read_adjlist\n\n \"\"\"\n G = nx.empty_graph(0, create_using)\n for line in lines:\n p = line.find(comments)\n if p >= 0:\n line = line[:p]\n if not len(line):\n continue\n vlist = line.strip().split(delimiter)\n u = vlist.pop(0)\n # convert types\n if nodetype is not None:\n try:\n u = nodetype(u)\n except BaseException as err:\n raise TypeError(\n f\"Failed to convert node ({u}) to type \" f\"{nodetype}\"\n ) from err\n G.add_node(u)\n if nodetype is not None:\n try:\n vlist = list(map(nodetype, vlist))\n except BaseException as err:\n raise TypeError(\n f\"Failed to convert nodes ({','.join(vlist)}) to type {nodetype}\"\n ) from err\n G.add_edges_from([(u, v) for v in vlist])\n return G\n\n\n@open_file(0, mode=\"rb\")\ndef read_adjlist(\n path,\n comments=\"#\",\n delimiter=None,\n create_using=None,\n nodetype=None,\n encoding=\"utf-8\",\n):\n \"\"\"Read graph in adjacency list format from path.\n\n Parameters\n ----------\n path : string or file\n Filename or file handle to read.\n Filenames ending in .gz or .bz2 will be uncompressed.\n\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n nodetype : Python type, optional\n Convert nodes to this type.\n\n comments : string, optional\n Marker for comment lines\n\n delimiter : string, optional\n Separator for node labels. The default is whitespace.\n\n Returns\n -------\n G: NetworkX graph\n The graph corresponding to the lines in adjacency list format.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> nx.write_adjlist(G, \"test.adjlist\")\n >>> G = nx.read_adjlist(\"test.adjlist\")\n\n The path can be a filehandle or a string with the name of the file. If a\n filehandle is provided, it has to be opened in 'rb' mode.\n\n >>> fh = open(\"test.adjlist\", \"rb\")\n >>> G = nx.read_adjlist(fh)\n\n Filenames ending in .gz or .bz2 will be compressed.\n\n >>> nx.write_adjlist(G, \"test.adjlist.gz\")\n >>> G = nx.read_adjlist(\"test.adjlist.gz\")\n\n The optional nodetype is a function to convert node strings to nodetype.\n\n For example\n\n >>> G = nx.read_adjlist(\"test.adjlist\", nodetype=int)\n\n will attempt to convert all nodes to integer type.\n\n Since nodes must be hashable, the function nodetype must return hashable\n types (e.g. int, float, str, frozenset - or tuples of those, etc.)\n\n The optional create_using parameter indicates the type of NetworkX graph\n created. The default is `nx.Graph`, an undirected graph.\n To read the data as a directed graph use\n\n >>> G = nx.read_adjlist(\"test.adjlist\", create_using=nx.DiGraph)\n\n Notes\n -----\n This format does not store graph or node data.\n\n See Also\n --------\n write_adjlist\n \"\"\"\n lines = (line.decode(encoding) for line in path)\n return parse_adjlist(\n lines,\n comments=comments,\n delimiter=delimiter,\n create_using=create_using,\n nodetype=nodetype,\n )\n", "path": "networkx/readwrite/adjlist.py"}]} | 3,526 | 253 |
gh_patches_debug_32996 | rasdani/github-patches | git_diff | openmc-dev__openmc-1755 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Expose the set_contained option through the C-API
In Cardinal, we're setting the temperatures of pebbles (which are filled with many cells representing TRISOs, etc.) by calling `cell->set_temperature(T, instance, true /* set_contained */)`. Unfortunately, the `runtime_error`s thrown by OpenMC in this function do not get routed through MOOSE, so if OpenMC encounters an error, all that shows up on the MOOSE side is an unhelpful `MPI_Abort`.
For instance, if we try to set a temperature of `0` to an OpenMC cell, but don't have data at that temperature, we get this error message:
```
Time Step 1, time = 1, dt = 1
Sending temperature to OpenMC...
application called MPI_Abort(MPI_COMM_WORLD, 1) - process 0
[unset]: write_line error; fd=-1 buf=:cmd=abort exitcode=1
```
even though OpenMC internally is throwing an exception with a helpful message:
*in Cell::set_temperature*:
```
throw std::runtime_error{"Temperature is below minimum temperature at "
"which data is available."};
```
Because the `openmc_cell_set_temperature` function _does_ catch this exception and saves the error message, I'd like to use `openmc_cell_set_temperature`. *But*, the interface right now does not let you control the `set_contained` parameter.
### Desired Change
Allow `openmc_cell_set_temperature` to control the `set_contained` parameter so that we can catch exceptions for cell temperatures in MOOSE.
</issue>
<code>
[start of openmc/lib/cell.py]
1 import sys
2
3 from collections.abc import Mapping, Iterable
4 from ctypes import c_int, c_int32, c_double, c_char_p, POINTER
5 from weakref import WeakValueDictionary
6
7 import numpy as np
8
9 from ..exceptions import AllocationError, InvalidIDError
10 from . import _dll
11 from .core import _FortranObjectWithID
12 from .error import _error_handler
13 from .material import Material
14
15 __all__ = ['Cell', 'cells']
16
17 # Cell functions
18 _dll.openmc_extend_cells.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]
19 _dll.openmc_extend_cells.restype = c_int
20 _dll.openmc_extend_cells.errcheck = _error_handler
21 _dll.openmc_cell_get_id.argtypes = [c_int32, POINTER(c_int32)]
22 _dll.openmc_cell_get_id.restype = c_int
23 _dll.openmc_cell_get_id.errcheck = _error_handler
24 _dll.openmc_cell_get_fill.argtypes = [
25 c_int32, POINTER(c_int), POINTER(POINTER(c_int32)), POINTER(c_int32)]
26 _dll.openmc_cell_get_fill.restype = c_int
27 _dll.openmc_cell_get_fill.errcheck = _error_handler
28 _dll.openmc_cell_get_temperature.argtypes = [
29 c_int32, POINTER(c_int32), POINTER(c_double)]
30 _dll.openmc_cell_get_temperature.restype = c_int
31 _dll.openmc_cell_get_temperature.errcheck = _error_handler
32 _dll.openmc_cell_get_name.argtypes = [c_int32, POINTER(c_char_p)]
33 _dll.openmc_cell_get_name.restype = c_int
34 _dll.openmc_cell_get_name.errcheck = _error_handler
35 _dll.openmc_cell_set_name.argtypes = [c_int32, c_char_p]
36 _dll.openmc_cell_set_name.restype = c_int
37 _dll.openmc_cell_set_name.errcheck = _error_handler
38 _dll.openmc_cell_set_fill.argtypes = [
39 c_int32, c_int, c_int32, POINTER(c_int32)]
40 _dll.openmc_cell_set_fill.restype = c_int
41 _dll.openmc_cell_set_fill.errcheck = _error_handler
42 _dll.openmc_cell_set_id.argtypes = [c_int32, c_int32]
43 _dll.openmc_cell_set_id.restype = c_int
44 _dll.openmc_cell_set_id.errcheck = _error_handler
45 _dll.openmc_cell_set_temperature.argtypes = [
46 c_int32, c_double, POINTER(c_int32)]
47 _dll.openmc_cell_set_temperature.restype = c_int
48 _dll.openmc_cell_set_temperature.errcheck = _error_handler
49 _dll.openmc_get_cell_index.argtypes = [c_int32, POINTER(c_int32)]
50 _dll.openmc_get_cell_index.restype = c_int
51 _dll.openmc_get_cell_index.errcheck = _error_handler
52 _dll.cells_size.restype = c_int
53 _dll.openmc_cell_bounding_box.argtypes = [c_int,
54 POINTER(c_double),
55 POINTER(c_double)]
56 _dll.openmc_cell_bounding_box.restype = c_int
57 _dll.openmc_cell_bounding_box.errcheck = _error_handler
58
59
60 class Cell(_FortranObjectWithID):
61 """Cell stored internally.
62
63 This class exposes a cell that is stored internally in the OpenMC
64 library. To obtain a view of a cell with a given ID, use the
65 :data:`openmc.lib.cells` mapping.
66
67 Parameters
68 ----------
69 index : int
70 Index in the `cells` array.
71
72 Attributes
73 ----------
74 id : int
75 ID of the cell
76
77 """
78 __instances = WeakValueDictionary()
79
80 def __new__(cls, uid=None, new=True, index=None):
81 mapping = cells
82 if index is None:
83 if new:
84 # Determine ID to assign
85 if uid is None:
86 uid = max(mapping, default=0) + 1
87 else:
88 if uid in mapping:
89 raise AllocationError('A cell with ID={} has already '
90 'been allocated.'.format(uid))
91
92 index = c_int32()
93 _dll.openmc_extend_cells(1, index, None)
94 index = index.value
95 else:
96 index = mapping[uid]._index
97
98 if index not in cls.__instances:
99 instance = super().__new__(cls)
100 instance._index = index
101 if uid is not None:
102 instance.id = uid
103 cls.__instances[index] = instance
104
105 return cls.__instances[index]
106
107 @property
108 def id(self):
109 cell_id = c_int32()
110 _dll.openmc_cell_get_id(self._index, cell_id)
111 return cell_id.value
112
113 @id.setter
114 def id(self, cell_id):
115 _dll.openmc_cell_set_id(self._index, cell_id)
116
117 @property
118 def name(self):
119 name = c_char_p()
120 _dll.openmc_cell_get_name(self._index, name)
121 return name.value.decode()
122
123 @name.setter
124 def name(self, name):
125 name_ptr = c_char_p(name.encode())
126 _dll.openmc_cell_set_name(self._index, name_ptr)
127
128 @property
129 def fill(self):
130 fill_type = c_int()
131 indices = POINTER(c_int32)()
132 n = c_int32()
133 _dll.openmc_cell_get_fill(self._index, fill_type, indices, n)
134 if fill_type.value == 0:
135 if n.value > 1:
136 return [Material(index=i) for i in indices[:n.value]]
137 else:
138 index = indices[0]
139 return Material(index=index)
140 else:
141 raise NotImplementedError
142
143 @fill.setter
144 def fill(self, fill):
145 if isinstance(fill, Iterable):
146 n = len(fill)
147 indices = (c_int32*n)(*(m._index if m is not None else -1
148 for m in fill))
149 _dll.openmc_cell_set_fill(self._index, 0, n, indices)
150 elif isinstance(fill, Material):
151 indices = (c_int32*1)(fill._index)
152 _dll.openmc_cell_set_fill(self._index, 0, 1, indices)
153 elif fill is None:
154 indices = (c_int32*1)(-1)
155 _dll.openmc_cell_set_fill(self._index, 0, 1, indices)
156
157 def get_temperature(self, instance=None):
158 """Get the temperature of a cell
159
160 Parameters
161 ----------
162 instance: int or None
163 Which instance of the cell
164
165 """
166
167 if instance is not None:
168 instance = c_int32(instance)
169
170 T = c_double()
171 _dll.openmc_cell_get_temperature(self._index, instance, T)
172 return T.value
173
174 def set_temperature(self, T, instance=None):
175 """Set the temperature of a cell
176
177 Parameters
178 ----------
179 T : float
180 Temperature in K
181 instance : int or None
182 Which instance of the cell
183
184 """
185
186 if instance is not None:
187 instance = c_int32(instance)
188
189 _dll.openmc_cell_set_temperature(self._index, T, instance)
190
191 @property
192 def bounding_box(self):
193 inf = sys.float_info.max
194 llc = np.zeros(3)
195 urc = np.zeros(3)
196 _dll.openmc_cell_bounding_box(self._index,
197 llc.ctypes.data_as(POINTER(c_double)),
198 urc.ctypes.data_as(POINTER(c_double)))
199 llc[llc == inf] = np.inf
200 urc[urc == inf] = np.inf
201 llc[llc == -inf] = -np.inf
202 urc[urc == -inf] = -np.inf
203
204 return llc, urc
205
206 class _CellMapping(Mapping):
207 def __getitem__(self, key):
208 index = c_int32()
209 try:
210 _dll.openmc_get_cell_index(key, index)
211 except (AllocationError, InvalidIDError) as e:
212 # __contains__ expects a KeyError to work correctly
213 raise KeyError(str(e))
214 return Cell(index=index.value)
215
216 def __iter__(self):
217 for i in range(len(self)):
218 yield Cell(index=i).id
219
220 def __len__(self):
221 return _dll.cells_size()
222
223 def __repr__(self):
224 return repr(dict(self))
225
226 cells = _CellMapping()
227
[end of openmc/lib/cell.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/openmc/lib/cell.py b/openmc/lib/cell.py
--- a/openmc/lib/cell.py
+++ b/openmc/lib/cell.py
@@ -1,7 +1,7 @@
import sys
from collections.abc import Mapping, Iterable
-from ctypes import c_int, c_int32, c_double, c_char_p, POINTER
+from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_bool
from weakref import WeakValueDictionary
import numpy as np
@@ -43,7 +43,7 @@
_dll.openmc_cell_set_id.restype = c_int
_dll.openmc_cell_set_id.errcheck = _error_handler
_dll.openmc_cell_set_temperature.argtypes = [
- c_int32, c_double, POINTER(c_int32)]
+ c_int32, c_double, POINTER(c_int32), c_bool]
_dll.openmc_cell_set_temperature.restype = c_int
_dll.openmc_cell_set_temperature.errcheck = _error_handler
_dll.openmc_get_cell_index.argtypes = [c_int32, POINTER(c_int32)]
@@ -171,7 +171,7 @@
_dll.openmc_cell_get_temperature(self._index, instance, T)
return T.value
- def set_temperature(self, T, instance=None):
+ def set_temperature(self, T, instance=None, set_contained=False):
"""Set the temperature of a cell
Parameters
@@ -180,13 +180,16 @@
Temperature in K
instance : int or None
Which instance of the cell
+ set_contained: bool
+ If cell is not filled by a material, whether to set the temperature of
+ all filled cells
"""
if instance is not None:
instance = c_int32(instance)
- _dll.openmc_cell_set_temperature(self._index, T, instance)
+ _dll.openmc_cell_set_temperature(self._index, T, instance, set_contained)
@property
def bounding_box(self):
| {"golden_diff": "diff --git a/openmc/lib/cell.py b/openmc/lib/cell.py\n--- a/openmc/lib/cell.py\n+++ b/openmc/lib/cell.py\n@@ -1,7 +1,7 @@\n import sys\n \n from collections.abc import Mapping, Iterable\n-from ctypes import c_int, c_int32, c_double, c_char_p, POINTER\n+from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, c_bool\n from weakref import WeakValueDictionary\n \n import numpy as np\n@@ -43,7 +43,7 @@\n _dll.openmc_cell_set_id.restype = c_int\n _dll.openmc_cell_set_id.errcheck = _error_handler\n _dll.openmc_cell_set_temperature.argtypes = [\n- c_int32, c_double, POINTER(c_int32)]\n+ c_int32, c_double, POINTER(c_int32), c_bool]\n _dll.openmc_cell_set_temperature.restype = c_int\n _dll.openmc_cell_set_temperature.errcheck = _error_handler\n _dll.openmc_get_cell_index.argtypes = [c_int32, POINTER(c_int32)]\n@@ -171,7 +171,7 @@\n _dll.openmc_cell_get_temperature(self._index, instance, T)\n return T.value\n \n- def set_temperature(self, T, instance=None):\n+ def set_temperature(self, T, instance=None, set_contained=False):\n \"\"\"Set the temperature of a cell\n \n Parameters\n@@ -180,13 +180,16 @@\n Temperature in K\n instance : int or None\n Which instance of the cell\n+ set_contained: bool\n+ If cell is not filled by a material, whether to set the temperature of\n+ all filled cells\n \n \"\"\"\n \n if instance is not None:\n instance = c_int32(instance)\n \n- _dll.openmc_cell_set_temperature(self._index, T, instance)\n+ _dll.openmc_cell_set_temperature(self._index, T, instance, set_contained)\n \n @property\n def bounding_box(self):\n", "issue": "Expose the set_contained option through the C-API\nIn Cardinal, we're setting the temperatures of pebbles (which are filled with many cells representing TRISOs, etc.) by calling `cell->set_temperature(T, instance, true /* set_contained */)`. Unfortunately, the `runtime_error`s thrown by OpenMC in this function do not get routed through MOOSE, so if OpenMC encounters an error, all that shows up on the MOOSE side is an unhelpful `MPI_Abort`. \r\n\r\nFor instance, if we try to set a temperature of `0` to an OpenMC cell, but don't have data at that temperature, we get this error message:\r\n\r\n```\r\nTime Step 1, time = 1, dt = 1\r\nSending temperature to OpenMC...\r\napplication called MPI_Abort(MPI_COMM_WORLD, 1) - process 0\r\n[unset]: write_line error; fd=-1 buf=:cmd=abort exitcode=1\r\n```\r\n\r\neven though OpenMC internally is throwing an exception with a helpful message:\r\n\r\n*in Cell::set_temperature*:\r\n```\r\n throw std::runtime_error{\"Temperature is below minimum temperature at \"\r\n \"which data is available.\"};\r\n```\r\n\r\nBecause the `openmc_cell_set_temperature` function _does_ catch this exception and saves the error message, I'd like to use `openmc_cell_set_temperature`. *But*, the interface right now does not let you control the `set_contained` parameter.\r\n\r\n### Desired Change\r\n\r\nAllow `openmc_cell_set_temperature` to control the `set_contained` parameter so that we can catch exceptions for cell temperatures in MOOSE.\n", "before_files": [{"content": "import sys\n\nfrom collections.abc import Mapping, Iterable\nfrom ctypes import c_int, c_int32, c_double, c_char_p, POINTER\nfrom weakref import WeakValueDictionary\n\nimport numpy as np\n\nfrom ..exceptions import AllocationError, InvalidIDError\nfrom . import _dll\nfrom .core import _FortranObjectWithID\nfrom .error import _error_handler\nfrom .material import Material\n\n__all__ = ['Cell', 'cells']\n\n# Cell functions\n_dll.openmc_extend_cells.argtypes = [c_int32, POINTER(c_int32), POINTER(c_int32)]\n_dll.openmc_extend_cells.restype = c_int\n_dll.openmc_extend_cells.errcheck = _error_handler\n_dll.openmc_cell_get_id.argtypes = [c_int32, POINTER(c_int32)]\n_dll.openmc_cell_get_id.restype = c_int\n_dll.openmc_cell_get_id.errcheck = _error_handler\n_dll.openmc_cell_get_fill.argtypes = [\n c_int32, POINTER(c_int), POINTER(POINTER(c_int32)), POINTER(c_int32)]\n_dll.openmc_cell_get_fill.restype = c_int\n_dll.openmc_cell_get_fill.errcheck = _error_handler\n_dll.openmc_cell_get_temperature.argtypes = [\n c_int32, POINTER(c_int32), POINTER(c_double)]\n_dll.openmc_cell_get_temperature.restype = c_int\n_dll.openmc_cell_get_temperature.errcheck = _error_handler\n_dll.openmc_cell_get_name.argtypes = [c_int32, POINTER(c_char_p)]\n_dll.openmc_cell_get_name.restype = c_int\n_dll.openmc_cell_get_name.errcheck = _error_handler\n_dll.openmc_cell_set_name.argtypes = [c_int32, c_char_p]\n_dll.openmc_cell_set_name.restype = c_int\n_dll.openmc_cell_set_name.errcheck = _error_handler\n_dll.openmc_cell_set_fill.argtypes = [\n c_int32, c_int, c_int32, POINTER(c_int32)]\n_dll.openmc_cell_set_fill.restype = c_int\n_dll.openmc_cell_set_fill.errcheck = _error_handler\n_dll.openmc_cell_set_id.argtypes = [c_int32, c_int32]\n_dll.openmc_cell_set_id.restype = c_int\n_dll.openmc_cell_set_id.errcheck = _error_handler\n_dll.openmc_cell_set_temperature.argtypes = [\n c_int32, c_double, POINTER(c_int32)]\n_dll.openmc_cell_set_temperature.restype = c_int\n_dll.openmc_cell_set_temperature.errcheck = _error_handler\n_dll.openmc_get_cell_index.argtypes = [c_int32, POINTER(c_int32)]\n_dll.openmc_get_cell_index.restype = c_int\n_dll.openmc_get_cell_index.errcheck = _error_handler\n_dll.cells_size.restype = c_int\n_dll.openmc_cell_bounding_box.argtypes = [c_int,\n POINTER(c_double),\n POINTER(c_double)]\n_dll.openmc_cell_bounding_box.restype = c_int\n_dll.openmc_cell_bounding_box.errcheck = _error_handler\n\n\nclass Cell(_FortranObjectWithID):\n \"\"\"Cell stored internally.\n\n This class exposes a cell that is stored internally in the OpenMC\n library. To obtain a view of a cell with a given ID, use the\n :data:`openmc.lib.cells` mapping.\n\n Parameters\n ----------\n index : int\n Index in the `cells` array.\n\n Attributes\n ----------\n id : int\n ID of the cell\n\n \"\"\"\n __instances = WeakValueDictionary()\n\n def __new__(cls, uid=None, new=True, index=None):\n mapping = cells\n if index is None:\n if new:\n # Determine ID to assign\n if uid is None:\n uid = max(mapping, default=0) + 1\n else:\n if uid in mapping:\n raise AllocationError('A cell with ID={} has already '\n 'been allocated.'.format(uid))\n\n index = c_int32()\n _dll.openmc_extend_cells(1, index, None)\n index = index.value\n else:\n index = mapping[uid]._index\n\n if index not in cls.__instances:\n instance = super().__new__(cls)\n instance._index = index\n if uid is not None:\n instance.id = uid\n cls.__instances[index] = instance\n\n return cls.__instances[index]\n\n @property\n def id(self):\n cell_id = c_int32()\n _dll.openmc_cell_get_id(self._index, cell_id)\n return cell_id.value\n\n @id.setter\n def id(self, cell_id):\n _dll.openmc_cell_set_id(self._index, cell_id)\n\n @property\n def name(self):\n name = c_char_p()\n _dll.openmc_cell_get_name(self._index, name)\n return name.value.decode()\n\n @name.setter\n def name(self, name):\n name_ptr = c_char_p(name.encode())\n _dll.openmc_cell_set_name(self._index, name_ptr)\n\n @property\n def fill(self):\n fill_type = c_int()\n indices = POINTER(c_int32)()\n n = c_int32()\n _dll.openmc_cell_get_fill(self._index, fill_type, indices, n)\n if fill_type.value == 0:\n if n.value > 1:\n return [Material(index=i) for i in indices[:n.value]]\n else:\n index = indices[0]\n return Material(index=index)\n else:\n raise NotImplementedError\n\n @fill.setter\n def fill(self, fill):\n if isinstance(fill, Iterable):\n n = len(fill)\n indices = (c_int32*n)(*(m._index if m is not None else -1\n for m in fill))\n _dll.openmc_cell_set_fill(self._index, 0, n, indices)\n elif isinstance(fill, Material):\n indices = (c_int32*1)(fill._index)\n _dll.openmc_cell_set_fill(self._index, 0, 1, indices)\n elif fill is None:\n indices = (c_int32*1)(-1)\n _dll.openmc_cell_set_fill(self._index, 0, 1, indices)\n\n def get_temperature(self, instance=None):\n \"\"\"Get the temperature of a cell\n\n Parameters\n ----------\n instance: int or None\n Which instance of the cell\n\n \"\"\"\n\n if instance is not None:\n instance = c_int32(instance)\n\n T = c_double()\n _dll.openmc_cell_get_temperature(self._index, instance, T)\n return T.value\n\n def set_temperature(self, T, instance=None):\n \"\"\"Set the temperature of a cell\n\n Parameters\n ----------\n T : float\n Temperature in K\n instance : int or None\n Which instance of the cell\n\n \"\"\"\n\n if instance is not None:\n instance = c_int32(instance)\n\n _dll.openmc_cell_set_temperature(self._index, T, instance)\n\n @property\n def bounding_box(self):\n inf = sys.float_info.max\n llc = np.zeros(3)\n urc = np.zeros(3)\n _dll.openmc_cell_bounding_box(self._index,\n llc.ctypes.data_as(POINTER(c_double)),\n urc.ctypes.data_as(POINTER(c_double)))\n llc[llc == inf] = np.inf\n urc[urc == inf] = np.inf\n llc[llc == -inf] = -np.inf\n urc[urc == -inf] = -np.inf\n\n return llc, urc\n\nclass _CellMapping(Mapping):\n def __getitem__(self, key):\n index = c_int32()\n try:\n _dll.openmc_get_cell_index(key, index)\n except (AllocationError, InvalidIDError) as e:\n # __contains__ expects a KeyError to work correctly\n raise KeyError(str(e))\n return Cell(index=index.value)\n\n def __iter__(self):\n for i in range(len(self)):\n yield Cell(index=i).id\n\n def __len__(self):\n return _dll.cells_size()\n\n def __repr__(self):\n return repr(dict(self))\n\ncells = _CellMapping()\n", "path": "openmc/lib/cell.py"}]} | 3,300 | 461 |
gh_patches_debug_43468 | rasdani/github-patches | git_diff | beetbox__beets-1176 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ftintitle: be less verbose during import
During an import, with the ftintitle enabled it seems to have very verbose output. This causes the user to not notice any prompts that beets has, and is somewhat annoying when doing a large import.
As seen here:

My suggestion would be to add a configuration option that would make ftintitle be less verbose. Or, making it not be verbose by default and adding a verbosity configuration option.
</issue>
<code>
[start of beetsplug/ftintitle.py]
1 # This file is part of beets.
2 # Copyright 2013, Verrus, <github.com/Verrus/beets-plugin-featInTitle>
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining
5 # a copy of this software and associated documentation files (the
6 # "Software"), to deal in the Software without restriction, including
7 # without limitation the rights to use, copy, modify, merge, publish,
8 # distribute, sublicense, and/or sell copies of the Software, and to
9 # permit persons to whom the Software is furnished to do so, subject to
10 # the following conditions:
11 #
12 # The above copyright notice and this permission notice shall be
13 # included in all copies or substantial portions of the Software.
14
15 """Moves "featured" artists to the title from the artist field.
16 """
17 from beets import plugins
18 from beets import ui
19 from beets.util import displayable_path
20 from beets import config
21 import logging
22 import re
23
24 log = logging.getLogger('beets')
25
26
27 def split_on_feat(artist):
28 """Given an artist string, split the "main" artist from any artist
29 on the right-hand side of a string like "feat". Return the main
30 artist, which is always a string, and the featuring artist, which
31 may be a string or None if none is present.
32 """
33 # split on the first "feat".
34 regex = re.compile(plugins.feat_tokens(), re.IGNORECASE)
35 parts = [s.strip() for s in regex.split(artist, 1)]
36 if len(parts) == 1:
37 return parts[0], None
38 else:
39 return tuple(parts)
40
41
42 def contains_feat(title):
43 """Determine whether the title contains a "featured" marker.
44 """
45 return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))
46
47
48 def update_metadata(item, feat_part, drop_feat):
49 """Choose how to add new artists to the title and set the new
50 metadata. Also, print out messages about any changes that are made.
51 If `drop_feat` is set, then do not add the artist to the title; just
52 remove it from the artist field.
53 """
54 # In all cases, update the artist fields.
55 ui.print_(u'artist: {0} -> {1}'.format(item.artist, item.albumartist))
56 item.artist = item.albumartist
57 if item.artist_sort:
58 # Just strip the featured artist from the sort name.
59 item.artist_sort, _ = split_on_feat(item.artist_sort)
60
61 # Only update the title if it does not already contain a featured
62 # artist and if we do not drop featuring information.
63 if not drop_feat and not contains_feat(item.title):
64 new_title = u"{0} feat. {1}".format(item.title, feat_part)
65 ui.print_(u'title: {0} -> {1}'.format(item.title, new_title))
66 item.title = new_title
67
68
69 def ft_in_title(item, drop_feat):
70 """Look for featured artists in the item's artist fields and move
71 them to the title.
72 """
73 artist = item.artist.strip()
74 albumartist = item.albumartist.strip()
75
76 # Check whether there is a featured artist on this track and the
77 # artist field does not exactly match the album artist field. In
78 # that case, we attempt to move the featured artist to the title.
79 _, featured = split_on_feat(artist)
80 if featured and albumartist != artist and albumartist:
81 ui.print_(displayable_path(item.path))
82 feat_part = None
83
84 # Look for the album artist in the artist field. If it's not
85 # present, give up.
86 albumartist_split = artist.split(albumartist)
87 if len(albumartist_split) <= 1:
88 ui.print_('album artist not present in artist')
89
90 # If the last element of the split (the right-hand side of the
91 # album artist) is nonempty, then it probably contains the
92 # featured artist.
93 elif albumartist_split[-1] != '':
94 # Extract the featured artist from the right-hand side.
95 _, feat_part = split_on_feat(albumartist_split[-1])
96
97 # Otherwise, if there's nothing on the right-hand side, look for a
98 # featuring artist on the left-hand side.
99 else:
100 lhs, rhs = split_on_feat(albumartist_split[0])
101 if rhs:
102 feat_part = lhs
103
104 # If we have a featuring artist, move it to the title.
105 if feat_part:
106 update_metadata(item, feat_part, drop_feat)
107 else:
108 ui.print_(u'no featuring artists found')
109
110 ui.print_()
111
112
113 class FtInTitlePlugin(plugins.BeetsPlugin):
114 def __init__(self):
115 super(FtInTitlePlugin, self).__init__()
116
117 self.config.add({
118 'auto': True,
119 'drop': False,
120 })
121
122 self._command = ui.Subcommand(
123 'ftintitle',
124 help='move featured artists to the title field')
125
126 self._command.parser.add_option(
127 '-d', '--drop', dest='drop',
128 action='store_true', default=False,
129 help='drop featuring from artists and ignore title update')
130
131 if self.config['auto']:
132 self.import_stages = [self.imported]
133
134 def commands(self):
135
136 def func(lib, opts, args):
137 self.config.set_args(opts)
138 drop_feat = self.config['drop'].get(bool)
139 write = config['import']['write'].get(bool)
140
141 for item in lib.items(ui.decargs(args)):
142 ft_in_title(item, drop_feat)
143 item.store()
144 if write:
145 item.try_write()
146
147 self._command.func = func
148 return [self._command]
149
150 def imported(self, session, task):
151 """Import hook for moving featuring artist automatically.
152 """
153 drop_feat = self.config['drop'].get(bool)
154
155 for item in task.imported_items():
156 ft_in_title(item, drop_feat)
157 item.store()
158
[end of beetsplug/ftintitle.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/beetsplug/ftintitle.py b/beetsplug/ftintitle.py
--- a/beetsplug/ftintitle.py
+++ b/beetsplug/ftintitle.py
@@ -45,14 +45,15 @@
return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))
-def update_metadata(item, feat_part, drop_feat):
+def update_metadata(item, feat_part, drop_feat, loglevel=logging.DEBUG):
"""Choose how to add new artists to the title and set the new
metadata. Also, print out messages about any changes that are made.
If `drop_feat` is set, then do not add the artist to the title; just
remove it from the artist field.
"""
# In all cases, update the artist fields.
- ui.print_(u'artist: {0} -> {1}'.format(item.artist, item.albumartist))
+ log.log(loglevel, u'artist: {0} -> {1}'.format(
+ item.artist, item.albumartist))
item.artist = item.albumartist
if item.artist_sort:
# Just strip the featured artist from the sort name.
@@ -62,11 +63,11 @@
# artist and if we do not drop featuring information.
if not drop_feat and not contains_feat(item.title):
new_title = u"{0} feat. {1}".format(item.title, feat_part)
- ui.print_(u'title: {0} -> {1}'.format(item.title, new_title))
+ log.log(loglevel, u'title: {0} -> {1}'.format(item.title, new_title))
item.title = new_title
-def ft_in_title(item, drop_feat):
+def ft_in_title(item, drop_feat, loglevel=logging.DEBUG):
"""Look for featured artists in the item's artist fields and move
them to the title.
"""
@@ -78,14 +79,14 @@
# that case, we attempt to move the featured artist to the title.
_, featured = split_on_feat(artist)
if featured and albumartist != artist and albumartist:
- ui.print_(displayable_path(item.path))
+ log.log(loglevel, displayable_path(item.path))
feat_part = None
# Look for the album artist in the artist field. If it's not
# present, give up.
albumartist_split = artist.split(albumartist)
if len(albumartist_split) <= 1:
- ui.print_('album artist not present in artist')
+ log.log(loglevel, 'album artist not present in artist')
# If the last element of the split (the right-hand side of the
# album artist) is nonempty, then it probably contains the
@@ -103,11 +104,9 @@
# If we have a featuring artist, move it to the title.
if feat_part:
- update_metadata(item, feat_part, drop_feat)
+ update_metadata(item, feat_part, drop_feat, loglevel)
else:
- ui.print_(u'no featuring artists found')
-
- ui.print_()
+ log.log(loglevel, u'no featuring artists found')
class FtInTitlePlugin(plugins.BeetsPlugin):
@@ -139,7 +138,7 @@
write = config['import']['write'].get(bool)
for item in lib.items(ui.decargs(args)):
- ft_in_title(item, drop_feat)
+ ft_in_title(item, drop_feat, logging.INFO)
item.store()
if write:
item.try_write()
@@ -153,5 +152,5 @@
drop_feat = self.config['drop'].get(bool)
for item in task.imported_items():
- ft_in_title(item, drop_feat)
+ ft_in_title(item, drop_feat, logging.DEBUG)
item.store()
| {"golden_diff": "diff --git a/beetsplug/ftintitle.py b/beetsplug/ftintitle.py\n--- a/beetsplug/ftintitle.py\n+++ b/beetsplug/ftintitle.py\n@@ -45,14 +45,15 @@\n return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))\n \n \n-def update_metadata(item, feat_part, drop_feat):\n+def update_metadata(item, feat_part, drop_feat, loglevel=logging.DEBUG):\n \"\"\"Choose how to add new artists to the title and set the new\n metadata. Also, print out messages about any changes that are made.\n If `drop_feat` is set, then do not add the artist to the title; just\n remove it from the artist field.\n \"\"\"\n # In all cases, update the artist fields.\n- ui.print_(u'artist: {0} -> {1}'.format(item.artist, item.albumartist))\n+ log.log(loglevel, u'artist: {0} -> {1}'.format(\n+ item.artist, item.albumartist))\n item.artist = item.albumartist\n if item.artist_sort:\n # Just strip the featured artist from the sort name.\n@@ -62,11 +63,11 @@\n # artist and if we do not drop featuring information.\n if not drop_feat and not contains_feat(item.title):\n new_title = u\"{0} feat. {1}\".format(item.title, feat_part)\n- ui.print_(u'title: {0} -> {1}'.format(item.title, new_title))\n+ log.log(loglevel, u'title: {0} -> {1}'.format(item.title, new_title))\n item.title = new_title\n \n \n-def ft_in_title(item, drop_feat):\n+def ft_in_title(item, drop_feat, loglevel=logging.DEBUG):\n \"\"\"Look for featured artists in the item's artist fields and move\n them to the title.\n \"\"\"\n@@ -78,14 +79,14 @@\n # that case, we attempt to move the featured artist to the title.\n _, featured = split_on_feat(artist)\n if featured and albumartist != artist and albumartist:\n- ui.print_(displayable_path(item.path))\n+ log.log(loglevel, displayable_path(item.path))\n feat_part = None\n \n # Look for the album artist in the artist field. If it's not\n # present, give up.\n albumartist_split = artist.split(albumartist)\n if len(albumartist_split) <= 1:\n- ui.print_('album artist not present in artist')\n+ log.log(loglevel, 'album artist not present in artist')\n \n # If the last element of the split (the right-hand side of the\n # album artist) is nonempty, then it probably contains the\n@@ -103,11 +104,9 @@\n \n # If we have a featuring artist, move it to the title.\n if feat_part:\n- update_metadata(item, feat_part, drop_feat)\n+ update_metadata(item, feat_part, drop_feat, loglevel)\n else:\n- ui.print_(u'no featuring artists found')\n-\n- ui.print_()\n+ log.log(loglevel, u'no featuring artists found')\n \n \n class FtInTitlePlugin(plugins.BeetsPlugin):\n@@ -139,7 +138,7 @@\n write = config['import']['write'].get(bool)\n \n for item in lib.items(ui.decargs(args)):\n- ft_in_title(item, drop_feat)\n+ ft_in_title(item, drop_feat, logging.INFO)\n item.store()\n if write:\n item.try_write()\n@@ -153,5 +152,5 @@\n drop_feat = self.config['drop'].get(bool)\n \n for item in task.imported_items():\n- ft_in_title(item, drop_feat)\n+ ft_in_title(item, drop_feat, logging.DEBUG)\n item.store()\n", "issue": "ftintitle: be less verbose during import\nDuring an import, with the ftintitle enabled it seems to have very verbose output. This causes the user to not notice any prompts that beets has, and is somewhat annoying when doing a large import.\n\nAs seen here:\n\n\nMy suggestion would be to add a configuration option that would make ftintitle be less verbose. Or, making it not be verbose by default and adding a verbosity configuration option.\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Verrus, <github.com/Verrus/beets-plugin-featInTitle>\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Moves \"featured\" artists to the title from the artist field.\n\"\"\"\nfrom beets import plugins\nfrom beets import ui\nfrom beets.util import displayable_path\nfrom beets import config\nimport logging\nimport re\n\nlog = logging.getLogger('beets')\n\n\ndef split_on_feat(artist):\n \"\"\"Given an artist string, split the \"main\" artist from any artist\n on the right-hand side of a string like \"feat\". Return the main\n artist, which is always a string, and the featuring artist, which\n may be a string or None if none is present.\n \"\"\"\n # split on the first \"feat\".\n regex = re.compile(plugins.feat_tokens(), re.IGNORECASE)\n parts = [s.strip() for s in regex.split(artist, 1)]\n if len(parts) == 1:\n return parts[0], None\n else:\n return tuple(parts)\n\n\ndef contains_feat(title):\n \"\"\"Determine whether the title contains a \"featured\" marker.\n \"\"\"\n return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))\n\n\ndef update_metadata(item, feat_part, drop_feat):\n \"\"\"Choose how to add new artists to the title and set the new\n metadata. Also, print out messages about any changes that are made.\n If `drop_feat` is set, then do not add the artist to the title; just\n remove it from the artist field.\n \"\"\"\n # In all cases, update the artist fields.\n ui.print_(u'artist: {0} -> {1}'.format(item.artist, item.albumartist))\n item.artist = item.albumartist\n if item.artist_sort:\n # Just strip the featured artist from the sort name.\n item.artist_sort, _ = split_on_feat(item.artist_sort)\n\n # Only update the title if it does not already contain a featured\n # artist and if we do not drop featuring information.\n if not drop_feat and not contains_feat(item.title):\n new_title = u\"{0} feat. {1}\".format(item.title, feat_part)\n ui.print_(u'title: {0} -> {1}'.format(item.title, new_title))\n item.title = new_title\n\n\ndef ft_in_title(item, drop_feat):\n \"\"\"Look for featured artists in the item's artist fields and move\n them to the title.\n \"\"\"\n artist = item.artist.strip()\n albumartist = item.albumartist.strip()\n\n # Check whether there is a featured artist on this track and the\n # artist field does not exactly match the album artist field. In\n # that case, we attempt to move the featured artist to the title.\n _, featured = split_on_feat(artist)\n if featured and albumartist != artist and albumartist:\n ui.print_(displayable_path(item.path))\n feat_part = None\n\n # Look for the album artist in the artist field. If it's not\n # present, give up.\n albumartist_split = artist.split(albumartist)\n if len(albumartist_split) <= 1:\n ui.print_('album artist not present in artist')\n\n # If the last element of the split (the right-hand side of the\n # album artist) is nonempty, then it probably contains the\n # featured artist.\n elif albumartist_split[-1] != '':\n # Extract the featured artist from the right-hand side.\n _, feat_part = split_on_feat(albumartist_split[-1])\n\n # Otherwise, if there's nothing on the right-hand side, look for a\n # featuring artist on the left-hand side.\n else:\n lhs, rhs = split_on_feat(albumartist_split[0])\n if rhs:\n feat_part = lhs\n\n # If we have a featuring artist, move it to the title.\n if feat_part:\n update_metadata(item, feat_part, drop_feat)\n else:\n ui.print_(u'no featuring artists found')\n\n ui.print_()\n\n\nclass FtInTitlePlugin(plugins.BeetsPlugin):\n def __init__(self):\n super(FtInTitlePlugin, self).__init__()\n\n self.config.add({\n 'auto': True,\n 'drop': False,\n })\n\n self._command = ui.Subcommand(\n 'ftintitle',\n help='move featured artists to the title field')\n\n self._command.parser.add_option(\n '-d', '--drop', dest='drop',\n action='store_true', default=False,\n help='drop featuring from artists and ignore title update')\n\n if self.config['auto']:\n self.import_stages = [self.imported]\n\n def commands(self):\n\n def func(lib, opts, args):\n self.config.set_args(opts)\n drop_feat = self.config['drop'].get(bool)\n write = config['import']['write'].get(bool)\n\n for item in lib.items(ui.decargs(args)):\n ft_in_title(item, drop_feat)\n item.store()\n if write:\n item.try_write()\n\n self._command.func = func\n return [self._command]\n\n def imported(self, session, task):\n \"\"\"Import hook for moving featuring artist automatically.\n \"\"\"\n drop_feat = self.config['drop'].get(bool)\n\n for item in task.imported_items():\n ft_in_title(item, drop_feat)\n item.store()\n", "path": "beetsplug/ftintitle.py"}]} | 2,316 | 851 |
gh_patches_debug_12476 | rasdani/github-patches | git_diff | bokeh__bokeh-9068 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Development guide missing `test` argument for conda install and pytest install failure on windows
### Missing `test` argument
The current [developement guide](https://bokeh.pydata.org/en/latest/docs/dev_guide/setup.html#conda-packages) is missing the `test` argument for windows setups.
As for OSX / Linux (bash / sh), it is:
- ```conda install `python scripts/deps.py build run test```.
As for windows, the `test` argument is missing for the `deps.py`:
- ```conda install $(python scripts/deps.py build run).split() | where {$_}```
- ```for /F "delims=" %i in ('python scripts\deps.py build run') do (conda install %i)```
Instead, it should be:
- ```conda install $(python scripts/deps.py build run test).split() | where {$_}```
- ```for /F "delims=" %i in ('python scripts\deps.py build run test') do (conda install %i)```
### `pytest<5.0.0` fails
In addition, running ```for /F "delims=" %i in ('python scripts\deps.py build run test') do (conda install %i)``` fails with error `System can't find given file.` which is due to `pytest<5.0.0`. Providing double quotes actually fixes the issue -> `conda install "pytest<5.0.0"`.
</issue>
<code>
[start of scripts/deps.py]
1 import sys
2 import jinja2
3 import yaml
4
5
6 def load_setup_py_data():
7 import os
8 import setuptools
9 os.environ['CONDA_BUILD_STATE'] = 'RENDER'
10 data = {}
11
12 def _setup(**kw): data.update(kw)
13 setuptools.setup = _setup
14 return data
15
16 meta_src = jinja2.Template(open("conda.recipe/meta.yaml").read())
17 meta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),
18 Loader=yaml.FullLoader)
19
20 section = {
21 "build" : meta_src["requirements"]["build"],
22 "deploy" : meta_src["extra"]["deploy"],
23 "run" : meta_src["requirements"]["run"],
24 "test" : meta_src["test"]["requires"],
25 }
26
27 spec = []
28 for name in sys.argv[1:]:
29 spec += section[name]
30
31 # bare python unpins python version causing upgrade to latest
32 if 'python' in spec: spec.remove('python')
33
34 deps = ""
35 deps += " ".join(s for s in spec)
36 deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec
37 deps = deps.replace(' <', '<')
38 deps = deps.replace(' [unix]', ' ')
39
40 print(deps)
41
[end of scripts/deps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scripts/deps.py b/scripts/deps.py
--- a/scripts/deps.py
+++ b/scripts/deps.py
@@ -1,4 +1,5 @@
import sys
+import platform
import jinja2
import yaml
@@ -31,6 +32,10 @@
# bare python unpins python version causing upgrade to latest
if 'python' in spec: spec.remove('python')
+# add double quotes to specs for windows, fixes #9065
+if "windows" in platform.platform().lower():
+ spec = ['"{}"'.format(s) for s in spec]
+
deps = ""
deps += " ".join(s for s in spec)
deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec
| {"golden_diff": "diff --git a/scripts/deps.py b/scripts/deps.py\n--- a/scripts/deps.py\n+++ b/scripts/deps.py\n@@ -1,4 +1,5 @@\n import sys\n+import platform\n import jinja2\n import yaml\n \n@@ -31,6 +32,10 @@\n # bare python unpins python version causing upgrade to latest\n if 'python' in spec: spec.remove('python')\n \n+# add double quotes to specs for windows, fixes #9065\n+if \"windows\" in platform.platform().lower():\n+ spec = ['\"{}\"'.format(s) for s in spec]\n+\n deps = \"\"\n deps += \" \".join(s for s in spec)\n deps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec\n", "issue": "[BUG] Development guide missing `test` argument for conda install and pytest install failure on windows\n### Missing `test` argument\r\n\r\nThe current [developement guide](https://bokeh.pydata.org/en/latest/docs/dev_guide/setup.html#conda-packages) is missing the `test` argument for windows setups. \r\n\r\nAs for OSX / Linux (bash / sh), it is: \r\n- ```conda install `python scripts/deps.py build run test```.\r\n\r\nAs for windows, the `test` argument is missing for the `deps.py`:\r\n- ```conda install $(python scripts/deps.py build run).split() | where {$_}```\r\n- ```for /F \"delims=\" %i in ('python scripts\\deps.py build run') do (conda install %i)```\r\n\r\nInstead, it should be:\r\n- ```conda install $(python scripts/deps.py build run test).split() | where {$_}```\r\n- ```for /F \"delims=\" %i in ('python scripts\\deps.py build run test') do (conda install %i)```\r\n\r\n### `pytest<5.0.0` fails\r\nIn addition, running ```for /F \"delims=\" %i in ('python scripts\\deps.py build run test') do (conda install %i)``` fails with error `System can't find given file.` which is due to `pytest<5.0.0`. Providing double quotes actually fixes the issue -> `conda install \"pytest<5.0.0\"`.\n", "before_files": [{"content": "import sys\nimport jinja2\nimport yaml\n\n\ndef load_setup_py_data():\n import os\n import setuptools\n os.environ['CONDA_BUILD_STATE'] = 'RENDER'\n data = {}\n\n def _setup(**kw): data.update(kw)\n setuptools.setup = _setup\n return data\n\nmeta_src = jinja2.Template(open(\"conda.recipe/meta.yaml\").read())\nmeta_src = yaml.load(meta_src.render(load_setup_py_data=load_setup_py_data),\n Loader=yaml.FullLoader)\n\nsection = {\n \"build\" : meta_src[\"requirements\"][\"build\"],\n \"deploy\" : meta_src[\"extra\"][\"deploy\"],\n \"run\" : meta_src[\"requirements\"][\"run\"],\n \"test\" : meta_src[\"test\"][\"requires\"],\n}\n\nspec = []\nfor name in sys.argv[1:]:\n spec += section[name]\n\n# bare python unpins python version causing upgrade to latest\nif 'python' in spec: spec.remove('python')\n\ndeps = \"\"\ndeps += \" \".join(s for s in spec)\ndeps = deps.replace(' >=', '>=') # conda syntax doesn't allow spaces b/w pkg name and version spec\ndeps = deps.replace(' <', '<')\ndeps = deps.replace(' [unix]', ' ')\n\nprint(deps)\n", "path": "scripts/deps.py"}]} | 1,196 | 177 |
gh_patches_debug_15941 | rasdani/github-patches | git_diff | statsmodels__statsmodels-2529 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tsa_arma_0 notebook (sunspot data) is outdated
Not so hard to fix (`isinvertible` and `isstationary` are now attributes; `generate_sample` now takes a `nsample` argument instead of `size`) but because the notebook format changed recently I cannot produce a clean patch.
</issue>
<code>
[start of examples/python/tsa_arma_0.py]
1
2 ## Autoregressive Moving Average (ARMA): Sunspots data
3
4 from __future__ import print_function
5 import numpy as np
6 from scipy import stats
7 import pandas as pd
8 import matplotlib.pyplot as plt
9
10 import statsmodels.api as sm
11
12
13 from statsmodels.graphics.api import qqplot
14
15
16 ### Sunpots Data
17
18 print(sm.datasets.sunspots.NOTE)
19
20
21 dta = sm.datasets.sunspots.load_pandas().data
22
23
24 dta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008'))
25 del dta["YEAR"]
26
27
28 dta.plot(figsize=(12,8));
29
30
31 fig = plt.figure(figsize=(12,8))
32 ax1 = fig.add_subplot(211)
33 fig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1)
34 ax2 = fig.add_subplot(212)
35 fig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2)
36
37
38 arma_mod20 = sm.tsa.ARMA(dta, (2,0)).fit()
39 print(arma_mod20.params)
40
41
42 arma_mod30 = sm.tsa.ARMA(dta, (3,0)).fit()
43
44
45 print(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic)
46
47
48 print(arma_mod30.params)
49
50
51 print(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic)
52
53
54 # * Does our model obey the theory?
55
56 sm.stats.durbin_watson(arma_mod30.resid.values)
57
58
59 fig = plt.figure(figsize=(12,8))
60 ax = fig.add_subplot(111)
61 ax = arma_mod30.resid.plot(ax=ax);
62
63
64 resid = arma_mod30.resid
65
66
67 stats.normaltest(resid)
68
69
70 fig = plt.figure(figsize=(12,8))
71 ax = fig.add_subplot(111)
72 fig = qqplot(resid, line='q', ax=ax, fit=True)
73
74
75 fig = plt.figure(figsize=(12,8))
76 ax1 = fig.add_subplot(211)
77 fig = sm.graphics.tsa.plot_acf(resid.values.squeeze(), lags=40, ax=ax1)
78 ax2 = fig.add_subplot(212)
79 fig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2)
80
81
82 r,q,p = sm.tsa.acf(resid.values.squeeze(), qstat=True)
83 data = np.c_[range(1,41), r[1:], q, p]
84 table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
85 print(table.set_index('lag'))
86
87
88 # * This indicates a lack of fit.
89
90 # * In-sample dynamic prediction. How good does our model do?
91
92 predict_sunspots = arma_mod30.predict('1990', '2012', dynamic=True)
93 print(predict_sunspots)
94
95
96 ax = dta.ix['1950':].plot(figsize=(12,8))
97 ax = predict_sunspots.plot(ax=ax, style='r--', label='Dynamic Prediction')
98 ax.legend()
99 ax.axis((-20.0, 38.0, -4.0, 200.0))
100
101
102 def mean_forecast_err(y, yhat):
103 return y.sub(yhat).mean()
104
105
106 mean_forecast_err(dta.SUNACTIVITY, predict_sunspots)
107
108
109 #### Exercise: Can you obtain a better fit for the Sunspots model? (Hint: sm.tsa.AR has a method select_order)
110
111 #### Simulated ARMA(4,1): Model Identification is Difficult
112
113 from statsmodels.tsa.arima_process import arma_generate_sample, ArmaProcess
114
115
116 np.random.seed(1234)
117 # include zero-th lag
118 arparams = np.array([1, .75, -.65, -.55, .9])
119 maparams = np.array([1, .65])
120
121
122 # Let's make sure this model is estimable.
123
124 arma_t = ArmaProcess(arparams, maparams)
125
126
127 arma_t.isinvertible()
128
129
130 arma_t.isstationary()
131
132
133 # * What does this mean?
134
135 fig = plt.figure(figsize=(12,8))
136 ax = fig.add_subplot(111)
137 ax.plot(arma_t.generate_sample(size=50));
138
139
140 arparams = np.array([1, .35, -.15, .55, .1])
141 maparams = np.array([1, .65])
142 arma_t = ArmaProcess(arparams, maparams)
143 arma_t.isstationary()
144
145
146 arma_rvs = arma_t.generate_sample(size=500, burnin=250, scale=2.5)
147
148
149 fig = plt.figure(figsize=(12,8))
150 ax1 = fig.add_subplot(211)
151 fig = sm.graphics.tsa.plot_acf(arma_rvs, lags=40, ax=ax1)
152 ax2 = fig.add_subplot(212)
153 fig = sm.graphics.tsa.plot_pacf(arma_rvs, lags=40, ax=ax2)
154
155
156 # * For mixed ARMA processes the Autocorrelation function is a mixture of exponentials and damped sine waves after (q-p) lags.
157 # * The partial autocorrelation function is a mixture of exponentials and dampened sine waves after (p-q) lags.
158
159 arma11 = sm.tsa.ARMA(arma_rvs, (1,1)).fit()
160 resid = arma11.resid
161 r,q,p = sm.tsa.acf(resid, qstat=True)
162 data = np.c_[range(1,41), r[1:], q, p]
163 table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
164 print(table.set_index('lag'))
165
166
167 arma41 = sm.tsa.ARMA(arma_rvs, (4,1)).fit()
168 resid = arma41.resid
169 r,q,p = sm.tsa.acf(resid, qstat=True)
170 data = np.c_[range(1,41), r[1:], q, p]
171 table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"])
172 print(table.set_index('lag'))
173
174
175 #### Exercise: How good of in-sample prediction can you do for another series, say, CPI
176
177 macrodta = sm.datasets.macrodata.load_pandas().data
178 macrodta.index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))
179 cpi = macrodta["cpi"]
180
181
182 ##### Hint:
183
184 fig = plt.figure(figsize=(12,8))
185 ax = fig.add_subplot(111)
186 ax = cpi.plot(ax=ax)
187 ax.legend()
188
189
190 # P-value of the unit-root test, resoundly rejects the null of no unit-root.
191
192 print(sm.tsa.adfuller(cpi)[1])
193
194
[end of examples/python/tsa_arma_0.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/python/tsa_arma_0.py b/examples/python/tsa_arma_0.py
--- a/examples/python/tsa_arma_0.py
+++ b/examples/python/tsa_arma_0.py
@@ -124,26 +124,26 @@
arma_t = ArmaProcess(arparams, maparams)
-arma_t.isinvertible()
+arma_t.isinvertible
-arma_t.isstationary()
+arma_t.isstationary
# * What does this mean?
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
-ax.plot(arma_t.generate_sample(size=50));
+ax.plot(arma_t.generate_sample(nsample=50));
arparams = np.array([1, .35, -.15, .55, .1])
maparams = np.array([1, .65])
arma_t = ArmaProcess(arparams, maparams)
-arma_t.isstationary()
+arma_t.isstationary
-arma_rvs = arma_t.generate_sample(size=500, burnin=250, scale=2.5)
+arma_rvs = arma_t.generate_sample(nsample=500, burnin=250, scale=2.5)
fig = plt.figure(figsize=(12,8))
| {"golden_diff": "diff --git a/examples/python/tsa_arma_0.py b/examples/python/tsa_arma_0.py\n--- a/examples/python/tsa_arma_0.py\n+++ b/examples/python/tsa_arma_0.py\n@@ -124,26 +124,26 @@\n arma_t = ArmaProcess(arparams, maparams)\n \n \n-arma_t.isinvertible()\n+arma_t.isinvertible\n \n \n-arma_t.isstationary()\n+arma_t.isstationary\n \n \n # * What does this mean?\n \n fig = plt.figure(figsize=(12,8))\n ax = fig.add_subplot(111)\n-ax.plot(arma_t.generate_sample(size=50));\n+ax.plot(arma_t.generate_sample(nsample=50));\n \n \n arparams = np.array([1, .35, -.15, .55, .1])\n maparams = np.array([1, .65])\n arma_t = ArmaProcess(arparams, maparams)\n-arma_t.isstationary()\n+arma_t.isstationary\n \n \n-arma_rvs = arma_t.generate_sample(size=500, burnin=250, scale=2.5)\n+arma_rvs = arma_t.generate_sample(nsample=500, burnin=250, scale=2.5)\n \n \n fig = plt.figure(figsize=(12,8))\n", "issue": "tsa_arma_0 notebook (sunspot data) is outdated\nNot so hard to fix (`isinvertible` and `isstationary` are now attributes; `generate_sample` now takes a `nsample` argument instead of `size`) but because the notebook format changed recently I cannot produce a clean patch.\n\n", "before_files": [{"content": "\n## Autoregressive Moving Average (ARMA): Sunspots data\n\nfrom __future__ import print_function\nimport numpy as np\nfrom scipy import stats\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport statsmodels.api as sm\n\n\nfrom statsmodels.graphics.api import qqplot\n\n\n### Sunpots Data\n\nprint(sm.datasets.sunspots.NOTE)\n\n\ndta = sm.datasets.sunspots.load_pandas().data\n\n\ndta.index = pd.Index(sm.tsa.datetools.dates_from_range('1700', '2008'))\ndel dta[\"YEAR\"]\n\n\ndta.plot(figsize=(12,8));\n\n\nfig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(dta.values.squeeze(), lags=40, ax=ax1)\nax2 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(dta, lags=40, ax=ax2)\n\n\narma_mod20 = sm.tsa.ARMA(dta, (2,0)).fit()\nprint(arma_mod20.params)\n\n\narma_mod30 = sm.tsa.ARMA(dta, (3,0)).fit()\n\n\nprint(arma_mod20.aic, arma_mod20.bic, arma_mod20.hqic)\n\n\nprint(arma_mod30.params)\n\n\nprint(arma_mod30.aic, arma_mod30.bic, arma_mod30.hqic)\n\n\n# * Does our model obey the theory?\n\nsm.stats.durbin_watson(arma_mod30.resid.values)\n\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nax = arma_mod30.resid.plot(ax=ax);\n\n\nresid = arma_mod30.resid\n\n\nstats.normaltest(resid)\n\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nfig = qqplot(resid, line='q', ax=ax, fit=True)\n\n\nfig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(resid.values.squeeze(), lags=40, ax=ax1)\nax2 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(resid, lags=40, ax=ax2)\n\n\nr,q,p = sm.tsa.acf(resid.values.squeeze(), qstat=True)\ndata = np.c_[range(1,41), r[1:], q, p]\ntable = pd.DataFrame(data, columns=['lag', \"AC\", \"Q\", \"Prob(>Q)\"])\nprint(table.set_index('lag'))\n\n\n# * This indicates a lack of fit.\n\n# * In-sample dynamic prediction. How good does our model do?\n\npredict_sunspots = arma_mod30.predict('1990', '2012', dynamic=True)\nprint(predict_sunspots)\n\n\nax = dta.ix['1950':].plot(figsize=(12,8))\nax = predict_sunspots.plot(ax=ax, style='r--', label='Dynamic Prediction')\nax.legend()\nax.axis((-20.0, 38.0, -4.0, 200.0))\n\n\ndef mean_forecast_err(y, yhat):\n return y.sub(yhat).mean()\n\n\nmean_forecast_err(dta.SUNACTIVITY, predict_sunspots)\n\n\n#### Exercise: Can you obtain a better fit for the Sunspots model? (Hint: sm.tsa.AR has a method select_order)\n\n#### Simulated ARMA(4,1): Model Identification is Difficult\n\nfrom statsmodels.tsa.arima_process import arma_generate_sample, ArmaProcess\n\n\nnp.random.seed(1234)\n# include zero-th lag\narparams = np.array([1, .75, -.65, -.55, .9])\nmaparams = np.array([1, .65])\n\n\n# Let's make sure this model is estimable.\n\narma_t = ArmaProcess(arparams, maparams)\n\n\narma_t.isinvertible()\n\n\narma_t.isstationary()\n\n\n# * What does this mean?\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nax.plot(arma_t.generate_sample(size=50));\n\n\narparams = np.array([1, .35, -.15, .55, .1])\nmaparams = np.array([1, .65])\narma_t = ArmaProcess(arparams, maparams)\narma_t.isstationary()\n\n\narma_rvs = arma_t.generate_sample(size=500, burnin=250, scale=2.5)\n\n\nfig = plt.figure(figsize=(12,8))\nax1 = fig.add_subplot(211)\nfig = sm.graphics.tsa.plot_acf(arma_rvs, lags=40, ax=ax1)\nax2 = fig.add_subplot(212)\nfig = sm.graphics.tsa.plot_pacf(arma_rvs, lags=40, ax=ax2)\n\n\n# * For mixed ARMA processes the Autocorrelation function is a mixture of exponentials and damped sine waves after (q-p) lags.\n# * The partial autocorrelation function is a mixture of exponentials and dampened sine waves after (p-q) lags.\n\narma11 = sm.tsa.ARMA(arma_rvs, (1,1)).fit()\nresid = arma11.resid\nr,q,p = sm.tsa.acf(resid, qstat=True)\ndata = np.c_[range(1,41), r[1:], q, p]\ntable = pd.DataFrame(data, columns=['lag', \"AC\", \"Q\", \"Prob(>Q)\"])\nprint(table.set_index('lag'))\n\n\narma41 = sm.tsa.ARMA(arma_rvs, (4,1)).fit()\nresid = arma41.resid\nr,q,p = sm.tsa.acf(resid, qstat=True)\ndata = np.c_[range(1,41), r[1:], q, p]\ntable = pd.DataFrame(data, columns=['lag', \"AC\", \"Q\", \"Prob(>Q)\"])\nprint(table.set_index('lag'))\n\n\n#### Exercise: How good of in-sample prediction can you do for another series, say, CPI\n\nmacrodta = sm.datasets.macrodata.load_pandas().data\nmacrodta.index = pd.Index(sm.tsa.datetools.dates_from_range('1959Q1', '2009Q3'))\ncpi = macrodta[\"cpi\"]\n\n\n##### Hint:\n\nfig = plt.figure(figsize=(12,8))\nax = fig.add_subplot(111)\nax = cpi.plot(ax=ax)\nax.legend()\n\n\n# P-value of the unit-root test, resoundly rejects the null of no unit-root.\n\nprint(sm.tsa.adfuller(cpi)[1])\n\n", "path": "examples/python/tsa_arma_0.py"}]} | 2,642 | 298 |
gh_patches_debug_4586 | rasdani/github-patches | git_diff | mdn__kuma-1792 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Delete templates used in the old design, replace them with redesign-specific templates (like profile_redesign.html)
</issue>
<code>
[start of apps/devmo/views.py]
1 from django.conf import settings
2 from django.core.paginator import Paginator
3 from django.shortcuts import get_object_or_404, render
4 from django.http import (HttpResponseRedirect, HttpResponseForbidden)
5
6 from devmo.urlresolvers import reverse
7
8 import constance.config
9 import basket
10 from taggit.utils import parse_tags
11 from waffle import flag_is_active
12
13 from waffle import flag_is_active
14
15 from access.decorators import login_required
16 from demos.models import Submission
17 from teamwork.models import Team
18
19 from . import INTEREST_SUGGESTIONS
20 from .models import Calendar, Event, UserProfile
21 from .forms import (UserProfileEditForm, newsletter_subscribe,
22 get_subscription_details, subscribed_to_newsletter)
23
24
25 DOCS_ACTIVITY_MAX_ITEMS = getattr(settings,
26 'DOCS_ACTIVITY_MAX_ITEMS', 15)
27
28
29 def events(request):
30 """Developer Engagement Calendar"""
31 cal = Calendar.objects.get(shortname='devengage_events')
32 events = Event.objects.filter(calendar=cal)
33 upcoming_events = events.filter(done=False)
34 past_events = events.filter(done=True)
35 google_maps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY',
36 "ABQIAAAAijZqBZcz-rowoXZC1tt9iRT5rHVQFKUGOHoyfP"
37 "_4KyrflbHKcRTt9kQJVST5oKMRj8vKTQS2b7oNjQ")
38
39 return render(request, 'devmo/calendar.html', {
40 'upcoming_events': upcoming_events,
41 'past_events': past_events,
42 'google_maps_api_key': google_maps_api_key
43 })
44
45
46 def profile_view(request, username):
47 profile = get_object_or_404(UserProfile, user__username=username)
48 user = profile.user
49
50 DEMOS_PAGE_SIZE = getattr(settings, 'DEMOS_PAGE_SIZE', 12)
51 sort_order = request.GET.get('sort', 'created')
52 try:
53 page_number = int(request.GET.get('page', 1))
54 except ValueError:
55 page_number = 1
56 show_hidden = (user == request.user) or user.is_superuser
57
58 demos = Submission.objects.all_sorted(sort_order).filter(
59 creator=profile.user)
60 if not show_hidden:
61 demos = demos.exclude(hidden=True)
62
63 demos_paginator = Paginator(demos, DEMOS_PAGE_SIZE, True)
64 demos_page = demos_paginator.page(page_number)
65
66 wiki_activity, docs_feed_items = None, None
67 wiki_activity = profile.wiki_activity()
68
69 if request.user.is_anonymous():
70 show_manage_roles_button = False
71 else:
72 # TODO: This seems wasteful, just to decide whether to show the button
73 roles_by_team = Team.objects.get_team_roles_managed_by(request.user,
74 user)
75 show_manage_roles_button = (len(roles_by_team) > 0)
76
77 template = 'devmo/profile.html'
78 if flag_is_active(request, 'redesign'):
79 template = 'devmo/profile_redesign.html'
80
81 return render(request, template, dict(
82 profile=profile, demos=demos, demos_paginator=demos_paginator,
83 demos_page=demos_page, docs_feed_items=docs_feed_items,
84 wiki_activity=wiki_activity,
85 show_manage_roles_button=show_manage_roles_button,
86 ))
87
88
89 @login_required
90 def my_profile(request):
91 user = request.user
92 return HttpResponseRedirect(reverse(
93 'devmo.views.profile_view', args=(user.username,)))
94
95
96 def profile_edit(request, username):
97 """View and edit user profile"""
98 profile = get_object_or_404(UserProfile, user__username=username)
99 context = {'profile': profile}
100 if not profile.allows_editing_by(request.user):
101 return HttpResponseForbidden()
102
103 # Map of form field names to tag namespaces
104 field_to_tag_ns = (
105 ('interests', 'profile:interest:'),
106 ('expertise', 'profile:expertise:')
107 )
108
109
110 if request.method != 'POST':
111 initial = dict(email=profile.user.email, beta=profile.beta_tester)
112
113 # Load up initial websites with either user data or required base URL
114 for name, meta in UserProfile.website_choices:
115 initial['websites_%s' % name] = profile.websites.get(name, '')
116
117 # Form fields to receive tags filtered by namespace.
118 for field, ns in field_to_tag_ns:
119 initial[field] = ', '.join(t.name.replace(ns, '')
120 for t in profile.tags.all_ns(ns))
121
122 subscription_details = get_subscription_details(profile.user.email)
123 if subscribed_to_newsletter(subscription_details):
124 initial['newsletter'] = True
125 initial['agree'] = True
126
127 # Finally, set up the forms.
128 form = UserProfileEditForm(request.locale,
129 instance=profile,
130 initial=initial)
131
132 else:
133 form = UserProfileEditForm(request.locale,
134 request.POST,
135 request.FILES,
136 instance=profile)
137 if form.is_valid():
138 profile_new = form.save(commit=False)
139
140 # Gather up all websites defined by the model, save them.
141 sites = dict()
142 for name, meta in UserProfile.website_choices:
143 field_name = 'websites_%s' % name
144 field_value = form.cleaned_data.get(field_name, '')
145 if field_value and field_value != meta['prefix']:
146 sites[name] = field_value
147 profile_new.websites = sites
148
149 # Save the profile record now, since the rest of this deals with
150 # related resources...
151 profile_new.save()
152
153 # Update tags from form fields
154 for field, tag_ns in field_to_tag_ns:
155 tags = [t.lower() for t in parse_tags(
156 form.cleaned_data.get(field, ''))]
157 profile_new.tags.set_ns(tag_ns, *tags)
158
159 newsletter_subscribe(request, profile_new.user.email,
160 form.cleaned_data)
161 return HttpResponseRedirect(reverse(
162 'devmo.views.profile_view', args=(profile.user.username,)))
163 context['form'] = form
164 context['INTEREST_SUGGESTIONS'] = INTEREST_SUGGESTIONS
165
166 return render(request, 'devmo/profile_edit.html', context)
167
168
169 @login_required
170 def my_profile_edit(request):
171 user = request.user
172 return HttpResponseRedirect(reverse(
173 'devmo.views.profile_edit', args=(user.username,)))
174
[end of apps/devmo/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/devmo/views.py b/apps/devmo/views.py
--- a/apps/devmo/views.py
+++ b/apps/devmo/views.py
@@ -75,8 +75,6 @@
show_manage_roles_button = (len(roles_by_team) > 0)
template = 'devmo/profile.html'
- if flag_is_active(request, 'redesign'):
- template = 'devmo/profile_redesign.html'
return render(request, template, dict(
profile=profile, demos=demos, demos_paginator=demos_paginator,
| {"golden_diff": "diff --git a/apps/devmo/views.py b/apps/devmo/views.py\n--- a/apps/devmo/views.py\n+++ b/apps/devmo/views.py\n@@ -75,8 +75,6 @@\n show_manage_roles_button = (len(roles_by_team) > 0)\n \n template = 'devmo/profile.html'\n- if flag_is_active(request, 'redesign'):\n- template = 'devmo/profile_redesign.html'\n \n return render(request, template, dict(\n profile=profile, demos=demos, demos_paginator=demos_paginator,\n", "issue": "Delete templates used in the old design, replace them with redesign-specific templates (like profile_redesign.html)\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, render\nfrom django.http import (HttpResponseRedirect, HttpResponseForbidden)\n\nfrom devmo.urlresolvers import reverse\n\nimport constance.config\nimport basket\nfrom taggit.utils import parse_tags\nfrom waffle import flag_is_active\n\nfrom waffle import flag_is_active\n\nfrom access.decorators import login_required\nfrom demos.models import Submission\nfrom teamwork.models import Team\n\nfrom . import INTEREST_SUGGESTIONS\nfrom .models import Calendar, Event, UserProfile\nfrom .forms import (UserProfileEditForm, newsletter_subscribe,\n get_subscription_details, subscribed_to_newsletter)\n\n\nDOCS_ACTIVITY_MAX_ITEMS = getattr(settings,\n 'DOCS_ACTIVITY_MAX_ITEMS', 15)\n\n\ndef events(request):\n \"\"\"Developer Engagement Calendar\"\"\"\n cal = Calendar.objects.get(shortname='devengage_events')\n events = Event.objects.filter(calendar=cal)\n upcoming_events = events.filter(done=False)\n past_events = events.filter(done=True)\n google_maps_api_key = getattr(settings, 'GOOGLE_MAPS_API_KEY',\n \"ABQIAAAAijZqBZcz-rowoXZC1tt9iRT5rHVQFKUGOHoyfP\"\n \"_4KyrflbHKcRTt9kQJVST5oKMRj8vKTQS2b7oNjQ\")\n\n return render(request, 'devmo/calendar.html', {\n 'upcoming_events': upcoming_events,\n 'past_events': past_events,\n 'google_maps_api_key': google_maps_api_key\n })\n\n\ndef profile_view(request, username):\n profile = get_object_or_404(UserProfile, user__username=username)\n user = profile.user\n\n DEMOS_PAGE_SIZE = getattr(settings, 'DEMOS_PAGE_SIZE', 12)\n sort_order = request.GET.get('sort', 'created')\n try:\n page_number = int(request.GET.get('page', 1))\n except ValueError:\n page_number = 1\n show_hidden = (user == request.user) or user.is_superuser\n\n demos = Submission.objects.all_sorted(sort_order).filter(\n creator=profile.user)\n if not show_hidden:\n demos = demos.exclude(hidden=True)\n\n demos_paginator = Paginator(demos, DEMOS_PAGE_SIZE, True)\n demos_page = demos_paginator.page(page_number)\n\n wiki_activity, docs_feed_items = None, None\n wiki_activity = profile.wiki_activity()\n\n if request.user.is_anonymous():\n show_manage_roles_button = False\n else:\n # TODO: This seems wasteful, just to decide whether to show the button\n roles_by_team = Team.objects.get_team_roles_managed_by(request.user,\n user)\n show_manage_roles_button = (len(roles_by_team) > 0)\n\n template = 'devmo/profile.html'\n if flag_is_active(request, 'redesign'):\n template = 'devmo/profile_redesign.html'\n\n return render(request, template, dict(\n profile=profile, demos=demos, demos_paginator=demos_paginator,\n demos_page=demos_page, docs_feed_items=docs_feed_items,\n wiki_activity=wiki_activity,\n show_manage_roles_button=show_manage_roles_button,\n ))\n\n\n@login_required\ndef my_profile(request):\n user = request.user\n return HttpResponseRedirect(reverse(\n 'devmo.views.profile_view', args=(user.username,)))\n\n\ndef profile_edit(request, username):\n \"\"\"View and edit user profile\"\"\"\n profile = get_object_or_404(UserProfile, user__username=username)\n context = {'profile': profile}\n if not profile.allows_editing_by(request.user):\n return HttpResponseForbidden()\n\n # Map of form field names to tag namespaces\n field_to_tag_ns = (\n ('interests', 'profile:interest:'),\n ('expertise', 'profile:expertise:')\n )\n\n\n if request.method != 'POST':\n initial = dict(email=profile.user.email, beta=profile.beta_tester)\n\n # Load up initial websites with either user data or required base URL\n for name, meta in UserProfile.website_choices:\n initial['websites_%s' % name] = profile.websites.get(name, '')\n\n # Form fields to receive tags filtered by namespace.\n for field, ns in field_to_tag_ns:\n initial[field] = ', '.join(t.name.replace(ns, '')\n for t in profile.tags.all_ns(ns))\n\n subscription_details = get_subscription_details(profile.user.email)\n if subscribed_to_newsletter(subscription_details):\n initial['newsletter'] = True\n initial['agree'] = True\n\n # Finally, set up the forms.\n form = UserProfileEditForm(request.locale,\n instance=profile,\n initial=initial)\n\n else:\n form = UserProfileEditForm(request.locale,\n request.POST,\n request.FILES,\n instance=profile)\n if form.is_valid():\n profile_new = form.save(commit=False)\n\n # Gather up all websites defined by the model, save them.\n sites = dict()\n for name, meta in UserProfile.website_choices:\n field_name = 'websites_%s' % name\n field_value = form.cleaned_data.get(field_name, '')\n if field_value and field_value != meta['prefix']:\n sites[name] = field_value\n profile_new.websites = sites\n\n # Save the profile record now, since the rest of this deals with\n # related resources...\n profile_new.save()\n\n # Update tags from form fields\n for field, tag_ns in field_to_tag_ns:\n tags = [t.lower() for t in parse_tags(\n form.cleaned_data.get(field, ''))]\n profile_new.tags.set_ns(tag_ns, *tags)\n\n newsletter_subscribe(request, profile_new.user.email,\n form.cleaned_data)\n return HttpResponseRedirect(reverse(\n 'devmo.views.profile_view', args=(profile.user.username,)))\n context['form'] = form\n context['INTEREST_SUGGESTIONS'] = INTEREST_SUGGESTIONS\n\n return render(request, 'devmo/profile_edit.html', context)\n\n\n@login_required\ndef my_profile_edit(request):\n user = request.user\n return HttpResponseRedirect(reverse(\n 'devmo.views.profile_edit', args=(user.username,)))\n", "path": "apps/devmo/views.py"}]} | 2,322 | 122 |
gh_patches_debug_5888 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-6605 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AttributeError: 'ClientPlayback' object has no attribute 'options'
#### Problem Description
Client playback does not seem to work anymore, since 10.2.0.
On 10.1.6 it still works.
I am getting the following error:
>Traceback (most recent call last):
File "mitmproxy/addons/clientplayback.py", line 177, in playback
AttributeError: 'ClientPlayback' object has no attribute 'options'
#### Steps to reproduce the behavior:
1. Save a recorded flow
2. Replay it using `mitmdump -C stored.flow`
3. See error
#### System Information
Mitmproxy: 10.2.1 binary
Python: 3.12.1
OpenSSL: OpenSSL 3.1.4 24 Oct 2023
Platform: macOS-14.2.1-x86_64-i386-64bit
</issue>
<code>
[start of mitmproxy/addons/clientplayback.py]
1 from __future__ import annotations
2
3 import asyncio
4 import logging
5 import time
6 from collections.abc import Sequence
7 from types import TracebackType
8 from typing import cast
9 from typing import Literal
10
11 import mitmproxy.types
12 from mitmproxy import command
13 from mitmproxy import ctx
14 from mitmproxy import exceptions
15 from mitmproxy import flow
16 from mitmproxy import http
17 from mitmproxy import io
18 from mitmproxy.connection import ConnectionState
19 from mitmproxy.connection import Server
20 from mitmproxy.hooks import UpdateHook
21 from mitmproxy.log import ALERT
22 from mitmproxy.options import Options
23 from mitmproxy.proxy import commands
24 from mitmproxy.proxy import events
25 from mitmproxy.proxy import layers
26 from mitmproxy.proxy import server
27 from mitmproxy.proxy.context import Context
28 from mitmproxy.proxy.layer import CommandGenerator
29 from mitmproxy.proxy.layers.http import HTTPMode
30 from mitmproxy.proxy.mode_specs import UpstreamMode
31 from mitmproxy.utils import asyncio_utils
32
33 logger = logging.getLogger(__name__)
34
35
36 class MockServer(layers.http.HttpConnection):
37 """
38 A mock HTTP "server" that just pretends it received a full HTTP request,
39 which is then processed by the proxy core.
40 """
41
42 flow: http.HTTPFlow
43
44 def __init__(self, flow: http.HTTPFlow, context: Context):
45 super().__init__(context, context.client)
46 self.flow = flow
47
48 def _handle_event(self, event: events.Event) -> CommandGenerator[None]:
49 if isinstance(event, events.Start):
50 content = self.flow.request.raw_content
51 self.flow.request.timestamp_start = (
52 self.flow.request.timestamp_end
53 ) = time.time()
54 yield layers.http.ReceiveHttp(
55 layers.http.RequestHeaders(
56 1,
57 self.flow.request,
58 end_stream=not (content or self.flow.request.trailers),
59 replay_flow=self.flow,
60 )
61 )
62 if content:
63 yield layers.http.ReceiveHttp(layers.http.RequestData(1, content))
64 if self.flow.request.trailers: # pragma: no cover
65 # TODO: Cover this once we support HTTP/1 trailers.
66 yield layers.http.ReceiveHttp(
67 layers.http.RequestTrailers(1, self.flow.request.trailers)
68 )
69 yield layers.http.ReceiveHttp(layers.http.RequestEndOfMessage(1))
70 elif isinstance(
71 event,
72 (
73 layers.http.ResponseHeaders,
74 layers.http.ResponseData,
75 layers.http.ResponseTrailers,
76 layers.http.ResponseEndOfMessage,
77 layers.http.ResponseProtocolError,
78 ),
79 ):
80 pass
81 else: # pragma: no cover
82 logger.warning(f"Unexpected event during replay: {event}")
83
84
85 class ReplayHandler(server.ConnectionHandler):
86 layer: layers.HttpLayer
87
88 def __init__(self, flow: http.HTTPFlow, options: Options) -> None:
89 client = flow.client_conn.copy()
90 client.state = ConnectionState.OPEN
91
92 context = Context(client, options)
93 context.server = Server(address=(flow.request.host, flow.request.port))
94 if flow.request.scheme == "https":
95 context.server.tls = True
96 context.server.sni = flow.request.pretty_host
97 if options.mode and options.mode[0].startswith("upstream:"):
98 mode = UpstreamMode.parse(options.mode[0])
99 assert isinstance(mode, UpstreamMode) # remove once mypy supports Self.
100 context.server.via = flow.server_conn.via = (mode.scheme, mode.address)
101
102 super().__init__(context)
103
104 if options.mode and options.mode[0].startswith("upstream:"):
105 self.layer = layers.HttpLayer(context, HTTPMode.upstream)
106 else:
107 self.layer = layers.HttpLayer(context, HTTPMode.transparent)
108 self.layer.connections[client] = MockServer(flow, context.fork())
109 self.flow = flow
110 self.done = asyncio.Event()
111
112 async def replay(self) -> None:
113 self.server_event(events.Start())
114 await self.done.wait()
115
116 def log(
117 self,
118 message: str,
119 level: int = logging.INFO,
120 exc_info: Literal[True]
121 | tuple[type[BaseException] | None, BaseException | None, TracebackType | None]
122 | None = None,
123 ) -> None:
124 assert isinstance(level, int)
125 logger.log(level=level, msg=f"[replay] {message}")
126
127 async def handle_hook(self, hook: commands.StartHook) -> None:
128 (data,) = hook.args()
129 await ctx.master.addons.handle_lifecycle(hook)
130 if isinstance(data, flow.Flow):
131 await data.wait_for_resume()
132 if isinstance(hook, (layers.http.HttpResponseHook, layers.http.HttpErrorHook)):
133 if self.transports:
134 # close server connections
135 for x in self.transports.values():
136 if x.handler:
137 x.handler.cancel()
138 await asyncio.wait(
139 [x.handler for x in self.transports.values() if x.handler]
140 )
141 # signal completion
142 self.done.set()
143
144
145 class ClientPlayback:
146 playback_task: asyncio.Task | None = None
147 inflight: http.HTTPFlow | None
148 queue: asyncio.Queue
149 options: Options
150 replay_tasks: set[asyncio.Task]
151
152 def __init__(self):
153 self.queue = asyncio.Queue()
154 self.inflight = None
155 self.task = None
156 self.replay_tasks = set()
157
158 def running(self):
159 self.playback_task = asyncio_utils.create_task(
160 self.playback(), name="client playback"
161 )
162 self.options = ctx.options
163
164 async def done(self):
165 if self.playback_task:
166 self.playback_task.cancel()
167 try:
168 await self.playback_task
169 except asyncio.CancelledError:
170 pass
171
172 async def playback(self):
173 while True:
174 self.inflight = await self.queue.get()
175 try:
176 assert self.inflight
177 h = ReplayHandler(self.inflight, self.options)
178 if ctx.options.client_replay_concurrency == -1:
179 t = asyncio_utils.create_task(
180 h.replay(), name="client playback awaiting response"
181 )
182 # keep a reference so this is not garbage collected
183 self.replay_tasks.add(t)
184 t.add_done_callback(self.replay_tasks.remove)
185 else:
186 await h.replay()
187 except Exception:
188 logger.exception(f"Client replay has crashed!")
189 self.queue.task_done()
190 self.inflight = None
191
192 def check(self, f: flow.Flow) -> str | None:
193 if f.live or f == self.inflight:
194 return "Can't replay live flow."
195 if f.intercepted:
196 return "Can't replay intercepted flow."
197 if isinstance(f, http.HTTPFlow):
198 if not f.request:
199 return "Can't replay flow with missing request."
200 if f.request.raw_content is None:
201 return "Can't replay flow with missing content."
202 if f.websocket is not None:
203 return "Can't replay WebSocket flows."
204 else:
205 return "Can only replay HTTP flows."
206 return None
207
208 def load(self, loader):
209 loader.add_option(
210 "client_replay",
211 Sequence[str],
212 [],
213 "Replay client requests from a saved file.",
214 )
215 loader.add_option(
216 "client_replay_concurrency",
217 int,
218 1,
219 "Concurrency limit on in-flight client replay requests. Currently the only valid values are 1 and -1 (no limit).",
220 )
221
222 def configure(self, updated):
223 if "client_replay" in updated and ctx.options.client_replay:
224 try:
225 flows = io.read_flows_from_paths(ctx.options.client_replay)
226 except exceptions.FlowReadException as e:
227 raise exceptions.OptionsError(str(e))
228 self.start_replay(flows)
229
230 if "client_replay_concurrency" in updated:
231 if ctx.options.client_replay_concurrency not in [-1, 1]:
232 raise exceptions.OptionsError(
233 "Currently the only valid client_replay_concurrency values are -1 and 1."
234 )
235
236 @command.command("replay.client.count")
237 def count(self) -> int:
238 """
239 Approximate number of flows queued for replay.
240 """
241 return self.queue.qsize() + int(bool(self.inflight))
242
243 @command.command("replay.client.stop")
244 def stop_replay(self) -> None:
245 """
246 Clear the replay queue.
247 """
248 updated = []
249 while True:
250 try:
251 f = self.queue.get_nowait()
252 except asyncio.QueueEmpty:
253 break
254 else:
255 self.queue.task_done()
256 f.revert()
257 updated.append(f)
258
259 ctx.master.addons.trigger(UpdateHook(updated))
260 logger.log(ALERT, "Client replay queue cleared.")
261
262 @command.command("replay.client")
263 def start_replay(self, flows: Sequence[flow.Flow]) -> None:
264 """
265 Add flows to the replay queue, skipping flows that can't be replayed.
266 """
267 updated: list[http.HTTPFlow] = []
268 for f in flows:
269 err = self.check(f)
270 if err:
271 logger.warning(err)
272 continue
273
274 http_flow = cast(http.HTTPFlow, f)
275
276 # Prepare the flow for replay
277 http_flow.backup()
278 http_flow.is_replay = "request"
279 http_flow.response = None
280 http_flow.error = None
281 self.queue.put_nowait(http_flow)
282 updated.append(http_flow)
283 ctx.master.addons.trigger(UpdateHook(updated))
284
285 @command.command("replay.client.file")
286 def load_file(self, path: mitmproxy.types.Path) -> None:
287 """
288 Load flows from file, and add them to the replay queue.
289 """
290 try:
291 flows = io.read_flows_from_paths([path])
292 except exceptions.FlowReadException as e:
293 raise exceptions.CommandError(str(e))
294 self.start_replay(flows)
295
[end of mitmproxy/addons/clientplayback.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mitmproxy/addons/clientplayback.py b/mitmproxy/addons/clientplayback.py
--- a/mitmproxy/addons/clientplayback.py
+++ b/mitmproxy/addons/clientplayback.py
@@ -156,10 +156,10 @@
self.replay_tasks = set()
def running(self):
+ self.options = ctx.options
self.playback_task = asyncio_utils.create_task(
self.playback(), name="client playback"
)
- self.options = ctx.options
async def done(self):
if self.playback_task:
| {"golden_diff": "diff --git a/mitmproxy/addons/clientplayback.py b/mitmproxy/addons/clientplayback.py\n--- a/mitmproxy/addons/clientplayback.py\n+++ b/mitmproxy/addons/clientplayback.py\n@@ -156,10 +156,10 @@\n self.replay_tasks = set()\n \n def running(self):\n+ self.options = ctx.options\n self.playback_task = asyncio_utils.create_task(\n self.playback(), name=\"client playback\"\n )\n- self.options = ctx.options\n \n async def done(self):\n if self.playback_task:\n", "issue": "AttributeError: 'ClientPlayback' object has no attribute 'options'\n#### Problem Description\r\nClient playback does not seem to work anymore, since 10.2.0. \r\nOn 10.1.6 it still works.\r\n\r\nI am getting the following error:\r\n>Traceback (most recent call last):\r\n File \"mitmproxy/addons/clientplayback.py\", line 177, in playback\r\nAttributeError: 'ClientPlayback' object has no attribute 'options'\r\n\r\n#### Steps to reproduce the behavior:\r\n1. Save a recorded flow\r\n2. Replay it using `mitmdump -C stored.flow` \r\n3. See error\r\n\r\n#### System Information\r\nMitmproxy: 10.2.1 binary\r\nPython: 3.12.1\r\nOpenSSL: OpenSSL 3.1.4 24 Oct 2023\r\nPlatform: macOS-14.2.1-x86_64-i386-64bit\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport asyncio\nimport logging\nimport time\nfrom collections.abc import Sequence\nfrom types import TracebackType\nfrom typing import cast\nfrom typing import Literal\n\nimport mitmproxy.types\nfrom mitmproxy import command\nfrom mitmproxy import ctx\nfrom mitmproxy import exceptions\nfrom mitmproxy import flow\nfrom mitmproxy import http\nfrom mitmproxy import io\nfrom mitmproxy.connection import ConnectionState\nfrom mitmproxy.connection import Server\nfrom mitmproxy.hooks import UpdateHook\nfrom mitmproxy.log import ALERT\nfrom mitmproxy.options import Options\nfrom mitmproxy.proxy import commands\nfrom mitmproxy.proxy import events\nfrom mitmproxy.proxy import layers\nfrom mitmproxy.proxy import server\nfrom mitmproxy.proxy.context import Context\nfrom mitmproxy.proxy.layer import CommandGenerator\nfrom mitmproxy.proxy.layers.http import HTTPMode\nfrom mitmproxy.proxy.mode_specs import UpstreamMode\nfrom mitmproxy.utils import asyncio_utils\n\nlogger = logging.getLogger(__name__)\n\n\nclass MockServer(layers.http.HttpConnection):\n \"\"\"\n A mock HTTP \"server\" that just pretends it received a full HTTP request,\n which is then processed by the proxy core.\n \"\"\"\n\n flow: http.HTTPFlow\n\n def __init__(self, flow: http.HTTPFlow, context: Context):\n super().__init__(context, context.client)\n self.flow = flow\n\n def _handle_event(self, event: events.Event) -> CommandGenerator[None]:\n if isinstance(event, events.Start):\n content = self.flow.request.raw_content\n self.flow.request.timestamp_start = (\n self.flow.request.timestamp_end\n ) = time.time()\n yield layers.http.ReceiveHttp(\n layers.http.RequestHeaders(\n 1,\n self.flow.request,\n end_stream=not (content or self.flow.request.trailers),\n replay_flow=self.flow,\n )\n )\n if content:\n yield layers.http.ReceiveHttp(layers.http.RequestData(1, content))\n if self.flow.request.trailers: # pragma: no cover\n # TODO: Cover this once we support HTTP/1 trailers.\n yield layers.http.ReceiveHttp(\n layers.http.RequestTrailers(1, self.flow.request.trailers)\n )\n yield layers.http.ReceiveHttp(layers.http.RequestEndOfMessage(1))\n elif isinstance(\n event,\n (\n layers.http.ResponseHeaders,\n layers.http.ResponseData,\n layers.http.ResponseTrailers,\n layers.http.ResponseEndOfMessage,\n layers.http.ResponseProtocolError,\n ),\n ):\n pass\n else: # pragma: no cover\n logger.warning(f\"Unexpected event during replay: {event}\")\n\n\nclass ReplayHandler(server.ConnectionHandler):\n layer: layers.HttpLayer\n\n def __init__(self, flow: http.HTTPFlow, options: Options) -> None:\n client = flow.client_conn.copy()\n client.state = ConnectionState.OPEN\n\n context = Context(client, options)\n context.server = Server(address=(flow.request.host, flow.request.port))\n if flow.request.scheme == \"https\":\n context.server.tls = True\n context.server.sni = flow.request.pretty_host\n if options.mode and options.mode[0].startswith(\"upstream:\"):\n mode = UpstreamMode.parse(options.mode[0])\n assert isinstance(mode, UpstreamMode) # remove once mypy supports Self.\n context.server.via = flow.server_conn.via = (mode.scheme, mode.address)\n\n super().__init__(context)\n\n if options.mode and options.mode[0].startswith(\"upstream:\"):\n self.layer = layers.HttpLayer(context, HTTPMode.upstream)\n else:\n self.layer = layers.HttpLayer(context, HTTPMode.transparent)\n self.layer.connections[client] = MockServer(flow, context.fork())\n self.flow = flow\n self.done = asyncio.Event()\n\n async def replay(self) -> None:\n self.server_event(events.Start())\n await self.done.wait()\n\n def log(\n self,\n message: str,\n level: int = logging.INFO,\n exc_info: Literal[True]\n | tuple[type[BaseException] | None, BaseException | None, TracebackType | None]\n | None = None,\n ) -> None:\n assert isinstance(level, int)\n logger.log(level=level, msg=f\"[replay] {message}\")\n\n async def handle_hook(self, hook: commands.StartHook) -> None:\n (data,) = hook.args()\n await ctx.master.addons.handle_lifecycle(hook)\n if isinstance(data, flow.Flow):\n await data.wait_for_resume()\n if isinstance(hook, (layers.http.HttpResponseHook, layers.http.HttpErrorHook)):\n if self.transports:\n # close server connections\n for x in self.transports.values():\n if x.handler:\n x.handler.cancel()\n await asyncio.wait(\n [x.handler for x in self.transports.values() if x.handler]\n )\n # signal completion\n self.done.set()\n\n\nclass ClientPlayback:\n playback_task: asyncio.Task | None = None\n inflight: http.HTTPFlow | None\n queue: asyncio.Queue\n options: Options\n replay_tasks: set[asyncio.Task]\n\n def __init__(self):\n self.queue = asyncio.Queue()\n self.inflight = None\n self.task = None\n self.replay_tasks = set()\n\n def running(self):\n self.playback_task = asyncio_utils.create_task(\n self.playback(), name=\"client playback\"\n )\n self.options = ctx.options\n\n async def done(self):\n if self.playback_task:\n self.playback_task.cancel()\n try:\n await self.playback_task\n except asyncio.CancelledError:\n pass\n\n async def playback(self):\n while True:\n self.inflight = await self.queue.get()\n try:\n assert self.inflight\n h = ReplayHandler(self.inflight, self.options)\n if ctx.options.client_replay_concurrency == -1:\n t = asyncio_utils.create_task(\n h.replay(), name=\"client playback awaiting response\"\n )\n # keep a reference so this is not garbage collected\n self.replay_tasks.add(t)\n t.add_done_callback(self.replay_tasks.remove)\n else:\n await h.replay()\n except Exception:\n logger.exception(f\"Client replay has crashed!\")\n self.queue.task_done()\n self.inflight = None\n\n def check(self, f: flow.Flow) -> str | None:\n if f.live or f == self.inflight:\n return \"Can't replay live flow.\"\n if f.intercepted:\n return \"Can't replay intercepted flow.\"\n if isinstance(f, http.HTTPFlow):\n if not f.request:\n return \"Can't replay flow with missing request.\"\n if f.request.raw_content is None:\n return \"Can't replay flow with missing content.\"\n if f.websocket is not None:\n return \"Can't replay WebSocket flows.\"\n else:\n return \"Can only replay HTTP flows.\"\n return None\n\n def load(self, loader):\n loader.add_option(\n \"client_replay\",\n Sequence[str],\n [],\n \"Replay client requests from a saved file.\",\n )\n loader.add_option(\n \"client_replay_concurrency\",\n int,\n 1,\n \"Concurrency limit on in-flight client replay requests. Currently the only valid values are 1 and -1 (no limit).\",\n )\n\n def configure(self, updated):\n if \"client_replay\" in updated and ctx.options.client_replay:\n try:\n flows = io.read_flows_from_paths(ctx.options.client_replay)\n except exceptions.FlowReadException as e:\n raise exceptions.OptionsError(str(e))\n self.start_replay(flows)\n\n if \"client_replay_concurrency\" in updated:\n if ctx.options.client_replay_concurrency not in [-1, 1]:\n raise exceptions.OptionsError(\n \"Currently the only valid client_replay_concurrency values are -1 and 1.\"\n )\n\n @command.command(\"replay.client.count\")\n def count(self) -> int:\n \"\"\"\n Approximate number of flows queued for replay.\n \"\"\"\n return self.queue.qsize() + int(bool(self.inflight))\n\n @command.command(\"replay.client.stop\")\n def stop_replay(self) -> None:\n \"\"\"\n Clear the replay queue.\n \"\"\"\n updated = []\n while True:\n try:\n f = self.queue.get_nowait()\n except asyncio.QueueEmpty:\n break\n else:\n self.queue.task_done()\n f.revert()\n updated.append(f)\n\n ctx.master.addons.trigger(UpdateHook(updated))\n logger.log(ALERT, \"Client replay queue cleared.\")\n\n @command.command(\"replay.client\")\n def start_replay(self, flows: Sequence[flow.Flow]) -> None:\n \"\"\"\n Add flows to the replay queue, skipping flows that can't be replayed.\n \"\"\"\n updated: list[http.HTTPFlow] = []\n for f in flows:\n err = self.check(f)\n if err:\n logger.warning(err)\n continue\n\n http_flow = cast(http.HTTPFlow, f)\n\n # Prepare the flow for replay\n http_flow.backup()\n http_flow.is_replay = \"request\"\n http_flow.response = None\n http_flow.error = None\n self.queue.put_nowait(http_flow)\n updated.append(http_flow)\n ctx.master.addons.trigger(UpdateHook(updated))\n\n @command.command(\"replay.client.file\")\n def load_file(self, path: mitmproxy.types.Path) -> None:\n \"\"\"\n Load flows from file, and add them to the replay queue.\n \"\"\"\n try:\n flows = io.read_flows_from_paths([path])\n except exceptions.FlowReadException as e:\n raise exceptions.CommandError(str(e))\n self.start_replay(flows)\n", "path": "mitmproxy/addons/clientplayback.py"}]} | 3,643 | 130 |
gh_patches_debug_24085 | rasdani/github-patches | git_diff | conan-io__conan-2870 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CONAN_LOGIN_USERNAME and CONAN_PASSWORD are ignored in non-interactive mode
Conan 1.3.2
To reproduce:
1. Run `conan user -c`
2. Set `CONAN_LOGIN_USERNAME`, `CONAN_PASSWORD` and `CONAN_NON_INTERACTIVE` environment variables.
3. Run `conan upload -r staging` to remove which requires authentication.
**Expected result**: Conan uses credentials stored in environment variables (as was in Conan 1.2)
**Actual result**: Upload fails with message:
```
Please log in to "staging" to perform this action. Execute "conan user" command.
ERROR: Conan interactive mode disabled. [Remote: staging]
```
My best guess is that in `userio.py`
```python
def request_login(self, remote_name, username=None):
"""Request user to input their name and password
:param username If username is specified it only request password"""
self._raise_if_non_interactive()
```
call to `self._raise_if_non_interactive()` is unnecessary as this check performed by functions called from it (Having prompt printed is not that ugly, or move this check closer to `conan user` command).
Option to always require explicit `conan user` is also possible, although feels little bit inconsistent and makes use of credentials variables pointless.
</issue>
<code>
[start of conans/client/userio.py]
1 import os
2 import sys
3 from conans.client.output import ConanOutput
4 from conans.model.username import Username
5 from conans.errors import InvalidNameException, ConanException
6 import getpass
7 from six.moves import input as raw_input
8
9
10 class UserIO(object):
11 """Class to interact with the user, used to show messages and ask for information"""
12
13 def __init__(self, ins=sys.stdin, out=None):
14 """
15 Params:
16 ins: input stream
17 out: ConanOutput, should have "write" method
18 """
19 self._ins = ins
20 if not out:
21 out = ConanOutput(sys.stdout)
22 self.out = out
23 self._interactive = True
24
25 def disable_input(self):
26 self._interactive = False
27
28 def _raise_if_non_interactive(self):
29 if not self._interactive:
30 raise ConanException("Conan interactive mode disabled")
31
32 def raw_input(self):
33 self._raise_if_non_interactive()
34 return raw_input()
35
36 def get_pass(self):
37 self._raise_if_non_interactive()
38 return getpass.getpass("")
39
40 def request_login(self, remote_name, username=None):
41 """Request user to input their name and password
42 :param username If username is specified it only request password"""
43 self._raise_if_non_interactive()
44 user_input = ''
45 while not username:
46 try:
47 self.out.write("Remote '%s' username: " % remote_name)
48 user_input = self.get_username(remote_name)
49 username = Username(user_input)
50 except InvalidNameException:
51 self.out.error('%s is not a valid username' % user_input)
52
53 self.out.write('Please enter a password for "%s" account: ' % username)
54 try:
55 pwd = self.get_password(remote_name)
56 except ConanException:
57 raise
58 except Exception as e:
59 raise ConanException('Cancelled pass %s' % e)
60 return username, pwd
61
62 def get_username(self, remote_name):
63 """Overridable for testing purpose"""
64 return self._get_env_username(remote_name) or self.raw_input()
65
66 def get_password(self, remote_name):
67 """Overridable for testing purpose"""
68 return self._get_env_password(remote_name) or self.get_pass()
69
70 def request_string(self, msg, default_value=None):
71 """Request user to input a msg
72 :param msg Name of the msg
73 """
74 self._raise_if_non_interactive()
75
76 if default_value:
77 self.out.input_text('%s (%s): ' % (msg, default_value))
78 else:
79 self.out.input_text('%s: ' % msg)
80 s = self._ins.readline().replace("\n", "")
81 if default_value is not None and s == '':
82 return default_value
83 return s
84
85 def request_boolean(self, msg, default_option=None):
86 """Request user to input a boolean"""
87 ret = None
88 while ret is None:
89 if default_option is True:
90 s = self.request_string("%s (YES/no)" % msg)
91 elif default_option is False:
92 s = self.request_string("%s (NO/yes)" % msg)
93 else:
94 s = self.request_string("%s (yes/no)" % msg)
95 if default_option is not None and s == '':
96 return default_option
97 if s.lower() in ['yes', 'y']:
98 ret = True
99 elif s.lower() in ['no', 'n']:
100 ret = False
101 else:
102 self.out.error("%s is not a valid answer" % s)
103 return ret
104
105 def _get_env_password(self, remote_name):
106 """
107 Try CONAN_PASSWORD_REMOTE_NAME or CONAN_PASSWORD or return None
108 """
109 remote_name = remote_name.replace("-", "_").upper()
110 var_name = "CONAN_PASSWORD_%s" % remote_name
111 ret = os.getenv(var_name, None) or os.getenv("CONAN_PASSWORD", None)
112 if ret:
113 self.out.info("Got password '******' from environment")
114 return ret
115
116 def _get_env_username(self, remote_name):
117 """
118 Try CONAN_LOGIN_USERNAME_REMOTE_NAME or CONAN_LOGIN_USERNAME or return None
119 """
120 remote_name = remote_name.replace("-", "_").upper()
121 var_name = "CONAN_LOGIN_USERNAME_%s" % remote_name
122 ret = os.getenv(var_name, None) or os.getenv("CONAN_LOGIN_USERNAME", None)
123
124 if ret:
125 self.out.info("Got username '%s' from environment" % ret)
126 return ret
127
[end of conans/client/userio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conans/client/userio.py b/conans/client/userio.py
--- a/conans/client/userio.py
+++ b/conans/client/userio.py
@@ -40,17 +40,17 @@
def request_login(self, remote_name, username=None):
"""Request user to input their name and password
:param username If username is specified it only request password"""
- self._raise_if_non_interactive()
user_input = ''
while not username:
try:
- self.out.write("Remote '%s' username: " % remote_name)
+ if self._interactive:
+ self.out.write("Remote '%s' username: " % remote_name)
user_input = self.get_username(remote_name)
username = Username(user_input)
except InvalidNameException:
self.out.error('%s is not a valid username' % user_input)
-
- self.out.write('Please enter a password for "%s" account: ' % username)
+ if self._interactive:
+ self.out.write('Please enter a password for "%s" account: ' % username)
try:
pwd = self.get_password(remote_name)
except ConanException:
| {"golden_diff": "diff --git a/conans/client/userio.py b/conans/client/userio.py\n--- a/conans/client/userio.py\n+++ b/conans/client/userio.py\n@@ -40,17 +40,17 @@\n def request_login(self, remote_name, username=None):\n \"\"\"Request user to input their name and password\n :param username If username is specified it only request password\"\"\"\n- self._raise_if_non_interactive()\n user_input = ''\n while not username:\n try:\n- self.out.write(\"Remote '%s' username: \" % remote_name)\n+ if self._interactive:\n+ self.out.write(\"Remote '%s' username: \" % remote_name)\n user_input = self.get_username(remote_name)\n username = Username(user_input)\n except InvalidNameException:\n self.out.error('%s is not a valid username' % user_input)\n-\n- self.out.write('Please enter a password for \"%s\" account: ' % username)\n+ if self._interactive:\n+ self.out.write('Please enter a password for \"%s\" account: ' % username)\n try:\n pwd = self.get_password(remote_name)\n except ConanException:\n", "issue": "CONAN_LOGIN_USERNAME and CONAN_PASSWORD are ignored in non-interactive mode\nConan 1.3.2\r\n\r\nTo reproduce:\r\n1. Run `conan user -c`\r\n2. Set `CONAN_LOGIN_USERNAME`, `CONAN_PASSWORD` and `CONAN_NON_INTERACTIVE` environment variables.\r\n3. Run `conan upload -r staging` to remove which requires authentication.\r\n\r\n**Expected result**: Conan uses credentials stored in environment variables (as was in Conan 1.2)\r\n**Actual result**: Upload fails with message:\r\n```\r\nPlease log in to \"staging\" to perform this action. Execute \"conan user\" command.\r\nERROR: Conan interactive mode disabled. [Remote: staging]\r\n```\r\n\r\nMy best guess is that in `userio.py`\r\n```python\r\n def request_login(self, remote_name, username=None):\r\n \"\"\"Request user to input their name and password\r\n :param username If username is specified it only request password\"\"\"\r\n self._raise_if_non_interactive()\r\n```\r\ncall to `self._raise_if_non_interactive()` is unnecessary as this check performed by functions called from it (Having prompt printed is not that ugly, or move this check closer to `conan user` command).\r\n\r\nOption to always require explicit `conan user` is also possible, although feels little bit inconsistent and makes use of credentials variables pointless.\n", "before_files": [{"content": "import os\nimport sys\nfrom conans.client.output import ConanOutput\nfrom conans.model.username import Username\nfrom conans.errors import InvalidNameException, ConanException\nimport getpass\nfrom six.moves import input as raw_input\n\n\nclass UserIO(object):\n \"\"\"Class to interact with the user, used to show messages and ask for information\"\"\"\n\n def __init__(self, ins=sys.stdin, out=None):\n \"\"\"\n Params:\n ins: input stream\n out: ConanOutput, should have \"write\" method\n \"\"\"\n self._ins = ins\n if not out:\n out = ConanOutput(sys.stdout)\n self.out = out\n self._interactive = True\n\n def disable_input(self):\n self._interactive = False\n\n def _raise_if_non_interactive(self):\n if not self._interactive:\n raise ConanException(\"Conan interactive mode disabled\")\n\n def raw_input(self):\n self._raise_if_non_interactive()\n return raw_input()\n\n def get_pass(self):\n self._raise_if_non_interactive()\n return getpass.getpass(\"\")\n\n def request_login(self, remote_name, username=None):\n \"\"\"Request user to input their name and password\n :param username If username is specified it only request password\"\"\"\n self._raise_if_non_interactive()\n user_input = ''\n while not username:\n try:\n self.out.write(\"Remote '%s' username: \" % remote_name)\n user_input = self.get_username(remote_name)\n username = Username(user_input)\n except InvalidNameException:\n self.out.error('%s is not a valid username' % user_input)\n\n self.out.write('Please enter a password for \"%s\" account: ' % username)\n try:\n pwd = self.get_password(remote_name)\n except ConanException:\n raise\n except Exception as e:\n raise ConanException('Cancelled pass %s' % e)\n return username, pwd\n\n def get_username(self, remote_name):\n \"\"\"Overridable for testing purpose\"\"\"\n return self._get_env_username(remote_name) or self.raw_input()\n\n def get_password(self, remote_name):\n \"\"\"Overridable for testing purpose\"\"\"\n return self._get_env_password(remote_name) or self.get_pass()\n\n def request_string(self, msg, default_value=None):\n \"\"\"Request user to input a msg\n :param msg Name of the msg\n \"\"\"\n self._raise_if_non_interactive()\n\n if default_value:\n self.out.input_text('%s (%s): ' % (msg, default_value))\n else:\n self.out.input_text('%s: ' % msg)\n s = self._ins.readline().replace(\"\\n\", \"\")\n if default_value is not None and s == '':\n return default_value\n return s\n\n def request_boolean(self, msg, default_option=None):\n \"\"\"Request user to input a boolean\"\"\"\n ret = None\n while ret is None:\n if default_option is True:\n s = self.request_string(\"%s (YES/no)\" % msg)\n elif default_option is False:\n s = self.request_string(\"%s (NO/yes)\" % msg)\n else:\n s = self.request_string(\"%s (yes/no)\" % msg)\n if default_option is not None and s == '':\n return default_option\n if s.lower() in ['yes', 'y']:\n ret = True\n elif s.lower() in ['no', 'n']:\n ret = False\n else:\n self.out.error(\"%s is not a valid answer\" % s)\n return ret\n\n def _get_env_password(self, remote_name):\n \"\"\"\n Try CONAN_PASSWORD_REMOTE_NAME or CONAN_PASSWORD or return None\n \"\"\"\n remote_name = remote_name.replace(\"-\", \"_\").upper()\n var_name = \"CONAN_PASSWORD_%s\" % remote_name\n ret = os.getenv(var_name, None) or os.getenv(\"CONAN_PASSWORD\", None)\n if ret:\n self.out.info(\"Got password '******' from environment\")\n return ret\n\n def _get_env_username(self, remote_name):\n \"\"\"\n Try CONAN_LOGIN_USERNAME_REMOTE_NAME or CONAN_LOGIN_USERNAME or return None\n \"\"\"\n remote_name = remote_name.replace(\"-\", \"_\").upper()\n var_name = \"CONAN_LOGIN_USERNAME_%s\" % remote_name\n ret = os.getenv(var_name, None) or os.getenv(\"CONAN_LOGIN_USERNAME\", None)\n\n if ret:\n self.out.info(\"Got username '%s' from environment\" % ret)\n return ret\n", "path": "conans/client/userio.py"}]} | 2,059 | 254 |
gh_patches_debug_44509 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-361 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pickle dump/load is used on handling the signature file
</issue>
<code>
[start of nvflare/lighter/impl/signature.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import pickle
17
18 from nvflare.lighter.spec import Builder, Study
19 from nvflare.lighter.utils import sign_all
20
21
22 class SignatureBuilder(Builder):
23 """Sign files with rootCA's private key.
24
25 Creates signatures for all the files signed with the root CA for the startup kits so that they
26 can be cryptographically verified to ensure any tampering is detected. This builder writes the signature.pkl file.
27 """
28
29 def build(self, study: Study, ctx: dict):
30 servers = study.get_participants_by_type("server", first_only=False)
31 for server in servers:
32 dest_dir = self.get_kit_dir(server, ctx)
33 root_pri_key = ctx.get("root_pri_key")
34 signatures = sign_all(dest_dir, root_pri_key)
35 pickle.dump(signatures, open(os.path.join(dest_dir, "signature.pkl"), "wb"))
36 for p in study.get_participants_by_type("client", first_only=False):
37 dest_dir = self.get_kit_dir(p, ctx)
38 root_pri_key = ctx.get("root_pri_key")
39 signatures = sign_all(dest_dir, root_pri_key)
40 pickle.dump(signatures, open(os.path.join(dest_dir, "signature.pkl"), "wb"))
41
[end of nvflare/lighter/impl/signature.py]
[start of nvflare/lighter/utils.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import random
17
18 import yaml
19 from cryptography.hazmat.primitives import hashes
20 from cryptography.hazmat.primitives.asymmetric import padding
21
22
23 def generate_password():
24 s = "abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ"
25 passlen = 16
26 p = "".join(random.sample(s, passlen))
27 return p
28
29
30 def sign_all(content_folder, signing_pri_key):
31 signatures = dict()
32 for f in os.listdir(content_folder):
33 path = os.path.join(content_folder, f)
34 if os.path.isfile(path):
35 signature = signing_pri_key.sign(
36 data=open(path, "rb").read(),
37 padding=padding.PSS(
38 mgf=padding.MGF1(hashes.SHA256()),
39 salt_length=padding.PSS.MAX_LENGTH,
40 ),
41 algorithm=hashes.SHA256(),
42 )
43 signatures[f] = signature
44 return signatures
45
46
47 def load_yaml(file_name):
48 return yaml.load(open(file_name, "r"), Loader=yaml.Loader)
49
50
51 def sh_replace(src, mapping_dict):
52 result = src
53 for k, v in mapping_dict.items():
54 result = result.replace("{~~" + k + "~~}", str(v))
55 return result
56
[end of nvflare/lighter/utils.py]
[start of nvflare/fuel/sec/security_content_service.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import json
16 import os
17 import pickle
18 from enum import Enum
19
20 from cryptography import x509
21 from cryptography.exceptions import InvalidSignature
22 from cryptography.hazmat.backends import default_backend
23 from cryptography.hazmat.primitives import hashes
24 from cryptography.hazmat.primitives.asymmetric import padding
25
26
27 class LoadResult(Enum):
28 """Constants for different results when loading secure content."""
29
30 OK = "ok"
31 NOT_MANAGED = "notManaged"
32 NO_SUCH_CONTENT = "noSuchContent"
33 NOT_SIGNED = "notSigned"
34 INVALID_SIGNATURE = "invalidSignature"
35 INVALID_CONTENT = "invalidContent"
36
37
38 class SecurityContentManager(object):
39 def __init__(self, content_folder, signature_filename="signature.pkl", root_cert="rootCA.pem"):
40 """Content manager used by SecurityContentService to load secure content.
41
42 Args:
43 content_folder (str): the folder path that includes signature file
44 signature_filename (str, optional): the signature file (pickled dictionary). Defaults to "signature.pkl".
45 root_cert (str, optional): root CA certificate filename. Defaults to "rootCA.pem".
46 """
47 self.content_folder = content_folder
48 signature_path = os.path.join(self.content_folder, signature_filename)
49 rootCA_cert_path = os.path.join(self.content_folder, root_cert)
50 if os.path.exists(signature_path) and os.path.exists(rootCA_cert_path):
51 self.signature = pickle.load(open(signature_path, "rb"))
52 cert = x509.load_pem_x509_certificate(open(rootCA_cert_path, "rb").read(), default_backend())
53 self.public_key = cert.public_key()
54 self.valid_config = True
55 else:
56 self.signature = dict()
57 self.valid_config = False
58
59 def load_content(self, file_under_verification):
60 """Loads the data of the file under verification and verifies that the signature is valid.
61
62 Args:
63 file_under_verification: file to load and verify
64
65 Returns: Tuple of the file data and the LoadResult. File data may be None if the data cannot be loaded.
66
67 """
68 full_path = os.path.join(self.content_folder, file_under_verification)
69 data = None
70 if not os.path.exists(full_path):
71 return data, LoadResult.NO_SUCH_CONTENT
72
73 with open(full_path, "rb") as f:
74 data = f.read()
75 if not data:
76 return data, LoadResult.NO_SUCH_CONTENT
77
78 if self.valid_config and file_under_verification in self.signature:
79 signature = self.signature[file_under_verification]
80 try:
81 self.public_key.verify(
82 signature=signature,
83 data=data,
84 padding=padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH),
85 algorithm=hashes.SHA256(),
86 )
87 result = LoadResult.OK
88 except InvalidSignature:
89 result = LoadResult.INVALID_SIGNATURE
90 else:
91 result = LoadResult.NOT_SIGNED
92 return data, result
93
94
95 class SecurityContentService(object):
96 """Uses SecurityContentManager to load secure content."""
97
98 security_content_manager = None
99
100 @staticmethod
101 def initialize(content_folder: str, signature_filename="signature.pkl", root_cert="rootCA.pem"):
102 if SecurityContentService.security_content_manager is None:
103 SecurityContentService.security_content_manager = SecurityContentManager(
104 content_folder, signature_filename, root_cert
105 )
106
107 @staticmethod
108 def load_content(file_under_verification):
109 if not SecurityContentService.security_content_manager:
110 return None, LoadResult.NOT_MANAGED
111
112 return SecurityContentService.security_content_manager.load_content(file_under_verification)
113
114 @staticmethod
115 def load_json(file_under_verification):
116 json_data = None
117
118 data_bytes, result = SecurityContentService.security_content_manager.load_content(file_under_verification)
119
120 if data_bytes:
121 try:
122 data_text = data_bytes.decode("ascii")
123 json_data = json.loads(data_text)
124 except json.JSONDecodeError:
125 return None, LoadResult.INVALID_CONTENT
126
127 return json_data, result
128
[end of nvflare/fuel/sec/security_content_service.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvflare/fuel/sec/security_content_service.py b/nvflare/fuel/sec/security_content_service.py
--- a/nvflare/fuel/sec/security_content_service.py
+++ b/nvflare/fuel/sec/security_content_service.py
@@ -14,7 +14,7 @@
import json
import os
-import pickle
+from base64 import b64decode
from enum import Enum
from cryptography import x509
@@ -36,19 +36,21 @@
class SecurityContentManager(object):
- def __init__(self, content_folder, signature_filename="signature.pkl", root_cert="rootCA.pem"):
+ def __init__(self, content_folder, signature_filename="signature.json", root_cert="rootCA.pem"):
"""Content manager used by SecurityContentService to load secure content.
Args:
content_folder (str): the folder path that includes signature file
- signature_filename (str, optional): the signature file (pickled dictionary). Defaults to "signature.pkl".
+ signature_filename (str, optional): the signature file (signed dictionary). Defaults to "signature.json".
root_cert (str, optional): root CA certificate filename. Defaults to "rootCA.pem".
"""
self.content_folder = content_folder
signature_path = os.path.join(self.content_folder, signature_filename)
rootCA_cert_path = os.path.join(self.content_folder, root_cert)
if os.path.exists(signature_path) and os.path.exists(rootCA_cert_path):
- self.signature = pickle.load(open(signature_path, "rb"))
+ self.signature = json.load(open(signature_path, "rt"))
+ for k in self.signature:
+ self.signature[k] = b64decode(self.signature[k].encode("utf-8"))
cert = x509.load_pem_x509_certificate(open(rootCA_cert_path, "rb").read(), default_backend())
self.public_key = cert.public_key()
self.valid_config = True
@@ -98,7 +100,7 @@
security_content_manager = None
@staticmethod
- def initialize(content_folder: str, signature_filename="signature.pkl", root_cert="rootCA.pem"):
+ def initialize(content_folder: str, signature_filename="signature.json", root_cert="rootCA.pem"):
if SecurityContentService.security_content_manager is None:
SecurityContentService.security_content_manager = SecurityContentManager(
content_folder, signature_filename, root_cert
diff --git a/nvflare/lighter/impl/signature.py b/nvflare/lighter/impl/signature.py
--- a/nvflare/lighter/impl/signature.py
+++ b/nvflare/lighter/impl/signature.py
@@ -12,8 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import json
import os
-import pickle
from nvflare.lighter.spec import Builder, Study
from nvflare.lighter.utils import sign_all
@@ -32,9 +32,9 @@
dest_dir = self.get_kit_dir(server, ctx)
root_pri_key = ctx.get("root_pri_key")
signatures = sign_all(dest_dir, root_pri_key)
- pickle.dump(signatures, open(os.path.join(dest_dir, "signature.pkl"), "wb"))
+ json.dump(signatures, open(os.path.join(dest_dir, "signature.json"), "wt"))
for p in study.get_participants_by_type("client", first_only=False):
dest_dir = self.get_kit_dir(p, ctx)
root_pri_key = ctx.get("root_pri_key")
signatures = sign_all(dest_dir, root_pri_key)
- pickle.dump(signatures, open(os.path.join(dest_dir, "signature.pkl"), "wb"))
+ json.dump(signatures, open(os.path.join(dest_dir, "signature.json"), "wt"))
diff --git a/nvflare/lighter/utils.py b/nvflare/lighter/utils.py
--- a/nvflare/lighter/utils.py
+++ b/nvflare/lighter/utils.py
@@ -14,6 +14,7 @@
import os
import random
+from base64 import b64encode
import yaml
from cryptography.hazmat.primitives import hashes
@@ -40,7 +41,7 @@
),
algorithm=hashes.SHA256(),
)
- signatures[f] = signature
+ signatures[f] = b64encode(signature).decode("utf-8")
return signatures
| {"golden_diff": "diff --git a/nvflare/fuel/sec/security_content_service.py b/nvflare/fuel/sec/security_content_service.py\n--- a/nvflare/fuel/sec/security_content_service.py\n+++ b/nvflare/fuel/sec/security_content_service.py\n@@ -14,7 +14,7 @@\n \n import json\n import os\n-import pickle\n+from base64 import b64decode\n from enum import Enum\n \n from cryptography import x509\n@@ -36,19 +36,21 @@\n \n \n class SecurityContentManager(object):\n- def __init__(self, content_folder, signature_filename=\"signature.pkl\", root_cert=\"rootCA.pem\"):\n+ def __init__(self, content_folder, signature_filename=\"signature.json\", root_cert=\"rootCA.pem\"):\n \"\"\"Content manager used by SecurityContentService to load secure content.\n \n Args:\n content_folder (str): the folder path that includes signature file\n- signature_filename (str, optional): the signature file (pickled dictionary). Defaults to \"signature.pkl\".\n+ signature_filename (str, optional): the signature file (signed dictionary). Defaults to \"signature.json\".\n root_cert (str, optional): root CA certificate filename. Defaults to \"rootCA.pem\".\n \"\"\"\n self.content_folder = content_folder\n signature_path = os.path.join(self.content_folder, signature_filename)\n rootCA_cert_path = os.path.join(self.content_folder, root_cert)\n if os.path.exists(signature_path) and os.path.exists(rootCA_cert_path):\n- self.signature = pickle.load(open(signature_path, \"rb\"))\n+ self.signature = json.load(open(signature_path, \"rt\"))\n+ for k in self.signature:\n+ self.signature[k] = b64decode(self.signature[k].encode(\"utf-8\"))\n cert = x509.load_pem_x509_certificate(open(rootCA_cert_path, \"rb\").read(), default_backend())\n self.public_key = cert.public_key()\n self.valid_config = True\n@@ -98,7 +100,7 @@\n security_content_manager = None\n \n @staticmethod\n- def initialize(content_folder: str, signature_filename=\"signature.pkl\", root_cert=\"rootCA.pem\"):\n+ def initialize(content_folder: str, signature_filename=\"signature.json\", root_cert=\"rootCA.pem\"):\n if SecurityContentService.security_content_manager is None:\n SecurityContentService.security_content_manager = SecurityContentManager(\n content_folder, signature_filename, root_cert\ndiff --git a/nvflare/lighter/impl/signature.py b/nvflare/lighter/impl/signature.py\n--- a/nvflare/lighter/impl/signature.py\n+++ b/nvflare/lighter/impl/signature.py\n@@ -12,8 +12,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import json\n import os\n-import pickle\n \n from nvflare.lighter.spec import Builder, Study\n from nvflare.lighter.utils import sign_all\n@@ -32,9 +32,9 @@\n dest_dir = self.get_kit_dir(server, ctx)\n root_pri_key = ctx.get(\"root_pri_key\")\n signatures = sign_all(dest_dir, root_pri_key)\n- pickle.dump(signatures, open(os.path.join(dest_dir, \"signature.pkl\"), \"wb\"))\n+ json.dump(signatures, open(os.path.join(dest_dir, \"signature.json\"), \"wt\"))\n for p in study.get_participants_by_type(\"client\", first_only=False):\n dest_dir = self.get_kit_dir(p, ctx)\n root_pri_key = ctx.get(\"root_pri_key\")\n signatures = sign_all(dest_dir, root_pri_key)\n- pickle.dump(signatures, open(os.path.join(dest_dir, \"signature.pkl\"), \"wb\"))\n+ json.dump(signatures, open(os.path.join(dest_dir, \"signature.json\"), \"wt\"))\ndiff --git a/nvflare/lighter/utils.py b/nvflare/lighter/utils.py\n--- a/nvflare/lighter/utils.py\n+++ b/nvflare/lighter/utils.py\n@@ -14,6 +14,7 @@\n \n import os\n import random\n+from base64 import b64encode\n \n import yaml\n from cryptography.hazmat.primitives import hashes\n@@ -40,7 +41,7 @@\n ),\n algorithm=hashes.SHA256(),\n )\n- signatures[f] = signature\n+ signatures[f] = b64encode(signature).decode(\"utf-8\")\n return signatures\n", "issue": "Pickle dump/load is used on handling the signature file\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport pickle\n\nfrom nvflare.lighter.spec import Builder, Study\nfrom nvflare.lighter.utils import sign_all\n\n\nclass SignatureBuilder(Builder):\n \"\"\"Sign files with rootCA's private key.\n\n Creates signatures for all the files signed with the root CA for the startup kits so that they\n can be cryptographically verified to ensure any tampering is detected. This builder writes the signature.pkl file.\n \"\"\"\n\n def build(self, study: Study, ctx: dict):\n servers = study.get_participants_by_type(\"server\", first_only=False)\n for server in servers:\n dest_dir = self.get_kit_dir(server, ctx)\n root_pri_key = ctx.get(\"root_pri_key\")\n signatures = sign_all(dest_dir, root_pri_key)\n pickle.dump(signatures, open(os.path.join(dest_dir, \"signature.pkl\"), \"wb\"))\n for p in study.get_participants_by_type(\"client\", first_only=False):\n dest_dir = self.get_kit_dir(p, ctx)\n root_pri_key = ctx.get(\"root_pri_key\")\n signatures = sign_all(dest_dir, root_pri_key)\n pickle.dump(signatures, open(os.path.join(dest_dir, \"signature.pkl\"), \"wb\"))\n", "path": "nvflare/lighter/impl/signature.py"}, {"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport random\n\nimport yaml\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\n\n\ndef generate_password():\n s = \"abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n passlen = 16\n p = \"\".join(random.sample(s, passlen))\n return p\n\n\ndef sign_all(content_folder, signing_pri_key):\n signatures = dict()\n for f in os.listdir(content_folder):\n path = os.path.join(content_folder, f)\n if os.path.isfile(path):\n signature = signing_pri_key.sign(\n data=open(path, \"rb\").read(),\n padding=padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH,\n ),\n algorithm=hashes.SHA256(),\n )\n signatures[f] = signature\n return signatures\n\n\ndef load_yaml(file_name):\n return yaml.load(open(file_name, \"r\"), Loader=yaml.Loader)\n\n\ndef sh_replace(src, mapping_dict):\n result = src\n for k, v in mapping_dict.items():\n result = result.replace(\"{~~\" + k + \"~~}\", str(v))\n return result\n", "path": "nvflare/lighter/utils.py"}, {"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\nimport pickle\nfrom enum import Enum\n\nfrom cryptography import x509\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import padding\n\n\nclass LoadResult(Enum):\n \"\"\"Constants for different results when loading secure content.\"\"\"\n\n OK = \"ok\"\n NOT_MANAGED = \"notManaged\"\n NO_SUCH_CONTENT = \"noSuchContent\"\n NOT_SIGNED = \"notSigned\"\n INVALID_SIGNATURE = \"invalidSignature\"\n INVALID_CONTENT = \"invalidContent\"\n\n\nclass SecurityContentManager(object):\n def __init__(self, content_folder, signature_filename=\"signature.pkl\", root_cert=\"rootCA.pem\"):\n \"\"\"Content manager used by SecurityContentService to load secure content.\n\n Args:\n content_folder (str): the folder path that includes signature file\n signature_filename (str, optional): the signature file (pickled dictionary). Defaults to \"signature.pkl\".\n root_cert (str, optional): root CA certificate filename. Defaults to \"rootCA.pem\".\n \"\"\"\n self.content_folder = content_folder\n signature_path = os.path.join(self.content_folder, signature_filename)\n rootCA_cert_path = os.path.join(self.content_folder, root_cert)\n if os.path.exists(signature_path) and os.path.exists(rootCA_cert_path):\n self.signature = pickle.load(open(signature_path, \"rb\"))\n cert = x509.load_pem_x509_certificate(open(rootCA_cert_path, \"rb\").read(), default_backend())\n self.public_key = cert.public_key()\n self.valid_config = True\n else:\n self.signature = dict()\n self.valid_config = False\n\n def load_content(self, file_under_verification):\n \"\"\"Loads the data of the file under verification and verifies that the signature is valid.\n\n Args:\n file_under_verification: file to load and verify\n\n Returns: Tuple of the file data and the LoadResult. File data may be None if the data cannot be loaded.\n\n \"\"\"\n full_path = os.path.join(self.content_folder, file_under_verification)\n data = None\n if not os.path.exists(full_path):\n return data, LoadResult.NO_SUCH_CONTENT\n\n with open(full_path, \"rb\") as f:\n data = f.read()\n if not data:\n return data, LoadResult.NO_SUCH_CONTENT\n\n if self.valid_config and file_under_verification in self.signature:\n signature = self.signature[file_under_verification]\n try:\n self.public_key.verify(\n signature=signature,\n data=data,\n padding=padding.PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH),\n algorithm=hashes.SHA256(),\n )\n result = LoadResult.OK\n except InvalidSignature:\n result = LoadResult.INVALID_SIGNATURE\n else:\n result = LoadResult.NOT_SIGNED\n return data, result\n\n\nclass SecurityContentService(object):\n \"\"\"Uses SecurityContentManager to load secure content.\"\"\"\n\n security_content_manager = None\n\n @staticmethod\n def initialize(content_folder: str, signature_filename=\"signature.pkl\", root_cert=\"rootCA.pem\"):\n if SecurityContentService.security_content_manager is None:\n SecurityContentService.security_content_manager = SecurityContentManager(\n content_folder, signature_filename, root_cert\n )\n\n @staticmethod\n def load_content(file_under_verification):\n if not SecurityContentService.security_content_manager:\n return None, LoadResult.NOT_MANAGED\n\n return SecurityContentService.security_content_manager.load_content(file_under_verification)\n\n @staticmethod\n def load_json(file_under_verification):\n json_data = None\n\n data_bytes, result = SecurityContentService.security_content_manager.load_content(file_under_verification)\n\n if data_bytes:\n try:\n data_text = data_bytes.decode(\"ascii\")\n json_data = json.loads(data_text)\n except json.JSONDecodeError:\n return None, LoadResult.INVALID_CONTENT\n\n return json_data, result\n", "path": "nvflare/fuel/sec/security_content_service.py"}]} | 2,880 | 965 |
gh_patches_debug_12473 | rasdani/github-patches | git_diff | urllib3__urllib3-2216 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Is HTTPHeaderDict a public API to make requests?
`HTTPHeaderDict` was initially designed to handle response headers, and is not documented for request headers.
* Should it be documented?
* How should it be imported? The current options are `from urllib3.response import HTTPHeaderDict` and `from urllib3._collections import HTTPHeaderDict`, and they don't feel right.
</issue>
<code>
[start of src/urllib3/__init__.py]
1 """
2 Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
3 """
4
5 # Set default logging handler to avoid "No handler found" warnings.
6 import logging
7 import warnings
8 from logging import NullHandler
9
10 from . import exceptions
11 from ._version import __version__
12 from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
13 from .filepost import encode_multipart_formdata
14 from .poolmanager import PoolManager, ProxyManager, proxy_from_url
15 from .response import HTTPResponse
16 from .util.request import make_headers
17 from .util.retry import Retry
18 from .util.timeout import Timeout
19
20 __author__ = "Andrey Petrov ([email protected])"
21 __license__ = "MIT"
22 __version__ = __version__
23
24 __all__ = (
25 "HTTPConnectionPool",
26 "HTTPSConnectionPool",
27 "PoolManager",
28 "ProxyManager",
29 "HTTPResponse",
30 "Retry",
31 "Timeout",
32 "add_stderr_logger",
33 "connection_from_url",
34 "disable_warnings",
35 "encode_multipart_formdata",
36 "make_headers",
37 "proxy_from_url",
38 "request",
39 )
40
41 logging.getLogger(__name__).addHandler(NullHandler())
42
43
44 def add_stderr_logger(level=logging.DEBUG):
45 """
46 Helper for quickly adding a StreamHandler to the logger. Useful for
47 debugging.
48
49 Returns the handler after adding it.
50 """
51 # This method needs to be in this __init__.py to get the __name__ correct
52 # even if urllib3 is vendored within another package.
53 logger = logging.getLogger(__name__)
54 handler = logging.StreamHandler()
55 handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
56 logger.addHandler(handler)
57 logger.setLevel(level)
58 logger.debug("Added a stderr logging handler to logger: %s", __name__)
59 return handler
60
61
62 # ... Clean up.
63 del NullHandler
64
65
66 # All warning filters *must* be appended unless you're really certain that they
67 # shouldn't be: otherwise, it's very hard for users to use most Python
68 # mechanisms to silence them.
69 # SecurityWarning's always go off by default.
70 warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
71 # InsecurePlatformWarning's don't vary between requests, so we keep it default.
72 warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
73 # SNIMissingWarnings should go off only once.
74 warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
75
76
77 def disable_warnings(category=exceptions.HTTPWarning):
78 """
79 Helper for quickly disabling all urllib3 warnings.
80 """
81 warnings.simplefilter("ignore", category)
82
83
84 _DEFAULT_POOL = PoolManager()
85
86
87 def request(method, url, fields=None, headers=None):
88 """
89 A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.
90 Therefore, its side effects could be shared across dependencies relying on it.
91 To avoid side effects create a new ``PoolManager`` instance and use it instead.
92 The method does not accept low-level ``**urlopen_kw`` keyword arguments.
93 """
94
95 return _DEFAULT_POOL.request(method, url, fields=fields, headers=headers)
96
[end of src/urllib3/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/urllib3/__init__.py b/src/urllib3/__init__.py
--- a/src/urllib3/__init__.py
+++ b/src/urllib3/__init__.py
@@ -8,6 +8,7 @@
from logging import NullHandler
from . import exceptions
+from ._collections import HTTPHeaderDict
from ._version import __version__
from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
from .filepost import encode_multipart_formdata
@@ -23,6 +24,7 @@
__all__ = (
"HTTPConnectionPool",
+ "HTTPHeaderDict",
"HTTPSConnectionPool",
"PoolManager",
"ProxyManager",
| {"golden_diff": "diff --git a/src/urllib3/__init__.py b/src/urllib3/__init__.py\n--- a/src/urllib3/__init__.py\n+++ b/src/urllib3/__init__.py\n@@ -8,6 +8,7 @@\n from logging import NullHandler\n \n from . import exceptions\n+from ._collections import HTTPHeaderDict\n from ._version import __version__\n from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url\n from .filepost import encode_multipart_formdata\n@@ -23,6 +24,7 @@\n \n __all__ = (\n \"HTTPConnectionPool\",\n+ \"HTTPHeaderDict\",\n \"HTTPSConnectionPool\",\n \"PoolManager\",\n \"ProxyManager\",\n", "issue": "Is HTTPHeaderDict a public API to make requests?\n`HTTPHeaderDict` was initially designed to handle response headers, and is not documented for request headers.\r\n\r\n * Should it be documented?\r\n * How should it be imported? The current options are `from urllib3.response import HTTPHeaderDict` and `from urllib3._collections import HTTPHeaderDict`, and they don't feel right.\n", "before_files": [{"content": "\"\"\"\nPython HTTP library with thread-safe connection pooling, file post support, user friendly, and more\n\"\"\"\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nimport logging\nimport warnings\nfrom logging import NullHandler\n\nfrom . import exceptions\nfrom ._version import __version__\nfrom .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url\nfrom .filepost import encode_multipart_formdata\nfrom .poolmanager import PoolManager, ProxyManager, proxy_from_url\nfrom .response import HTTPResponse\nfrom .util.request import make_headers\nfrom .util.retry import Retry\nfrom .util.timeout import Timeout\n\n__author__ = \"Andrey Petrov ([email protected])\"\n__license__ = \"MIT\"\n__version__ = __version__\n\n__all__ = (\n \"HTTPConnectionPool\",\n \"HTTPSConnectionPool\",\n \"PoolManager\",\n \"ProxyManager\",\n \"HTTPResponse\",\n \"Retry\",\n \"Timeout\",\n \"add_stderr_logger\",\n \"connection_from_url\",\n \"disable_warnings\",\n \"encode_multipart_formdata\",\n \"make_headers\",\n \"proxy_from_url\",\n \"request\",\n)\n\nlogging.getLogger(__name__).addHandler(NullHandler())\n\n\ndef add_stderr_logger(level=logging.DEBUG):\n \"\"\"\n Helper for quickly adding a StreamHandler to the logger. Useful for\n debugging.\n\n Returns the handler after adding it.\n \"\"\"\n # This method needs to be in this __init__.py to get the __name__ correct\n # even if urllib3 is vendored within another package.\n logger = logging.getLogger(__name__)\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\"))\n logger.addHandler(handler)\n logger.setLevel(level)\n logger.debug(\"Added a stderr logging handler to logger: %s\", __name__)\n return handler\n\n\n# ... Clean up.\ndel NullHandler\n\n\n# All warning filters *must* be appended unless you're really certain that they\n# shouldn't be: otherwise, it's very hard for users to use most Python\n# mechanisms to silence them.\n# SecurityWarning's always go off by default.\nwarnings.simplefilter(\"always\", exceptions.SecurityWarning, append=True)\n# InsecurePlatformWarning's don't vary between requests, so we keep it default.\nwarnings.simplefilter(\"default\", exceptions.InsecurePlatformWarning, append=True)\n# SNIMissingWarnings should go off only once.\nwarnings.simplefilter(\"default\", exceptions.SNIMissingWarning, append=True)\n\n\ndef disable_warnings(category=exceptions.HTTPWarning):\n \"\"\"\n Helper for quickly disabling all urllib3 warnings.\n \"\"\"\n warnings.simplefilter(\"ignore\", category)\n\n\n_DEFAULT_POOL = PoolManager()\n\n\ndef request(method, url, fields=None, headers=None):\n \"\"\"\n A convenience, top-level request method. It uses a module-global ``PoolManager`` instance.\n Therefore, its side effects could be shared across dependencies relying on it.\n To avoid side effects create a new ``PoolManager`` instance and use it instead.\n The method does not accept low-level ``**urlopen_kw`` keyword arguments.\n \"\"\"\n\n return _DEFAULT_POOL.request(method, url, fields=fields, headers=headers)\n", "path": "src/urllib3/__init__.py"}]} | 1,491 | 159 |
gh_patches_debug_24919 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3646 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[DOC]: Clarify More about Datasets for Chat Training Stage 3
### 📚 The doc issue
As raised by some users, the current guide to training Stages 3 regarding the required datasets is yet clear. Some user has composed a [guide](https://juejin.cn/post/7222250219570757690) which still confuses the datasets.
Some clarification is needed in this [section](https://github.com/hpcaitech/ColossalAI/tree/main/applications/Chat/examples#stage3---training-model-using-prompts-with-rl), regarding how to download and set `prompt_path` and `pretrain_dataset`.
Below is an example user query I received on Slack.
```
torchrun --standalone --nproc_per_node=4 train_prompts.py \
--pretrain "bigscience/bloom-560m" \
--model 'bloom' \
--strategy colossalai_zero2 \
--prompt_path /data/chenhao/train/ColossalAI/prompt_dataset/data.json \ -------Where is the data.json data
--pretrain_dataset /data/chenhao/train/ColossalAI/pretrain_dataset/data.json \ -----------Where is the data.json data
--rm_pretrain /data/chenhao/train/ColossalAI/Coati-7B \
--rm_path /data/chenhao/train/ColossalAI/rmstatic.pt \
--train_batch_size 4 \
--experience_batch_size 4 \
--max_epochs 1 \
--num_episodes 1
```
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Chat/examples/train_prompts.py]
1 import argparse
2
3 import pandas as pd
4 import torch
5 import torch.distributed as dist
6 from coati.dataset import DataCollatorForSupervisedDataset, PromptDataset, SupervisedDataset
7 from coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic
8 from coati.models.gpt import GPTRM, GPTActor, GPTCritic
9 from coati.models.llama import LlamaActor, LlamaCritic, LlamaRM
10 from coati.models.opt import OPTRM, OPTActor, OPTCritic
11 from coati.models.roberta import RoBERTaActor, RoBERTaCritic, RoBERTaRM
12 from coati.trainer import PPOTrainer
13 from coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy
14 from coati.utils import prepare_llama_tokenizer_and_embedding
15 from torch.optim import Adam
16 from torch.utils.data import DataLoader
17 from torch.utils.data.distributed import DistributedSampler
18 from transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer, RobertaTokenizer
19
20 from colossalai.nn.optimizer import HybridAdam
21
22
23 def main(args):
24 # configure strategy
25 if args.strategy == 'naive':
26 strategy = NaiveStrategy()
27 elif args.strategy == 'ddp':
28 strategy = DDPStrategy()
29 elif args.strategy == 'colossalai_gemini':
30 strategy = ColossalAIStrategy(stage=3, placement_policy='cuda', initial_scale=2**5)
31 elif args.strategy == 'colossalai_zero2':
32 strategy = ColossalAIStrategy(stage=2, placement_policy='cuda')
33 else:
34 raise ValueError(f'Unsupported strategy "{args.strategy}"')
35
36 if args.rm_path is not None:
37 state_dict = torch.load(args.rm_path, map_location='cpu')
38
39 # configure model
40 if args.model == 'gpt2':
41 initial_model = GPTActor(pretrained=args.pretrain)
42 elif args.model == 'bloom':
43 initial_model = BLOOMActor(pretrained=args.pretrain)
44 elif args.model == 'opt':
45 initial_model = OPTActor(pretrained=args.pretrain)
46 elif args.model == 'llama':
47 initial_model = LlamaActor(pretrained=args.pretrain)
48 elif args.model == 'roberta':
49 initial_model = RoBERTaActor(pretrained=args.pretrain)
50 else:
51 raise ValueError(f'Unsupported actor model "{args.model}"')
52
53 if args.rm_model == None:
54 rm_model_name = args.model
55 else:
56 rm_model_name = args.rm_model
57
58 if rm_model_name == 'gpt2':
59 reward_model = GPTRM(pretrained=args.rm_pretrain)
60 elif rm_model_name == 'bloom':
61 reward_model = BLOOMRM(pretrained=args.rm_pretrain)
62 elif rm_model_name == 'opt':
63 reward_model = OPTRM(pretrained=args.rm_pretrain)
64 elif rm_model_name == 'llama':
65 reward_model = LlamaRM(pretrained=args.rm_pretrain)
66 elif rm_model_name == 'roberta':
67 reward_model = RoBERTaRM(pretrained=args.rm_pretrain)
68 else:
69 raise ValueError(f'Unsupported reward model "{rm_model_name}"')
70
71 if args.rm_path is not None:
72 reward_model.load_state_dict(state_dict)
73
74 initial_model.to(torch.float16).to(torch.cuda.current_device())
75 reward_model.to(torch.float16).to(torch.cuda.current_device())
76
77 with strategy.model_init_context():
78 if args.model == 'gpt2':
79 actor = GPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
80 elif args.model == 'bloom':
81 actor = BLOOMActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
82 elif args.model == 'opt':
83 actor = OPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
84 elif args.model == 'llama':
85 actor = LlamaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
86 elif args.model == 'roberta':
87 actor = RoBERTaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)
88 else:
89 raise ValueError(f'Unsupported actor model "{args.model}"')
90
91 if rm_model_name == 'gpt2':
92 critic = GPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
93 elif rm_model_name == 'bloom':
94 critic = BLOOMCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
95 elif rm_model_name == 'opt':
96 critic = OPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
97 elif rm_model_name == 'llama':
98 critic = LlamaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
99 elif rm_model_name == 'roberta':
100 critic = RoBERTaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)
101 else:
102 raise ValueError(f'Unsupported reward model "{rm_model_name}"')
103
104 if args.rm_path is not None:
105 critic.load_state_dict(state_dict)
106 del state_dict
107
108 if args.strategy != 'colossalai_gemini':
109 critic.to(torch.float16).to(torch.cuda.current_device())
110 actor.to(torch.float16).to(torch.cuda.current_device())
111
112 # configure optimizer
113 if args.strategy.startswith('colossalai'):
114 actor_optim = HybridAdam(actor.parameters(), lr=1e-7)
115 critic_optim = HybridAdam(critic.parameters(), lr=1e-7)
116 else:
117 actor_optim = Adam(actor.parameters(), lr=1e-7)
118 critic_optim = Adam(critic.parameters(), lr=1e-7)
119
120 # configure tokenizer
121 if args.model == 'gpt2':
122 tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
123 elif args.model == 'bloom':
124 tokenizer = BloomTokenizerFast.from_pretrained('bigscience/bloom-560m')
125 elif args.model == 'opt':
126 tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
127 elif args.model == 'llama':
128 tokenizer = LlamaTokenizer.from_pretrained(args.pretrain)
129 tokenizer.eos_token = '<\s>'
130 elif args.model == 'roberta':
131 tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
132 else:
133 raise ValueError(f'Unsupported model "{args.model}"')
134
135 if args.model == 'llama':
136 tokenizer = prepare_llama_tokenizer_and_embedding(tokenizer, actor)
137 else:
138 tokenizer.pad_token = tokenizer.eos_token
139
140 data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
141
142 prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_path, max_datasets_size=16384)
143 if dist.is_initialized() and dist.get_world_size() > 1:
144 prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)
145 else:
146 prompt_sampler = None
147 prompt_dataloader = DataLoader(prompt_dataset,
148 shuffle=(prompt_sampler is None),
149 sampler=prompt_sampler,
150 batch_size=args.experience_batch_size)
151
152 pretrain_dataset = SupervisedDataset(tokenizer=tokenizer,
153 data_path=args.pretrain_dataset,
154 max_datasets_size=16384,
155 max_length=args.max_input_len)
156 if dist.is_initialized() and dist.get_world_size() > 1:
157 pretrain_sampler = DistributedSampler(pretrain_dataset, shuffle=True, seed=42, drop_last=True)
158 else:
159 pretrain_sampler = None
160 pretrain_dataloader = DataLoader(pretrain_dataset,
161 shuffle=(pretrain_sampler is None),
162 sampler=pretrain_sampler,
163 batch_size=args.ptx_batch_size,
164 collate_fn=data_collator)
165
166 (actor, actor_optim), (critic, critic_optim) = strategy.prepare((actor, actor_optim), (critic, critic_optim))
167
168 # configure trainer
169 trainer = PPOTrainer(
170 strategy,
171 actor,
172 critic,
173 reward_model,
174 initial_model,
175 actor_optim,
176 critic_optim,
177 kl_coef=args.kl_coef,
178 ptx_coef=args.ptx_coef,
179 max_epochs=args.max_epochs,
180 train_batch_size=args.train_batch_size,
181 max_length=args.max_seq_len,
182 use_cache=True,
183 do_sample=True,
184 temperature=1.0,
185 top_k=50,
186 pad_token_id=tokenizer.pad_token_id,
187 eos_token_id=tokenizer.eos_token_id,
188 )
189
190 trainer.fit(prompt_dataloader=prompt_dataloader,
191 pretrain_dataloader=pretrain_dataloader,
192 num_episodes=args.num_episodes,
193 max_timesteps=args.max_timesteps,
194 update_timesteps=args.update_timesteps)
195
196 # save model checkpoint after fitting
197 trainer.save_model(args.save_path, only_rank0=True, tokenizer=tokenizer)
198 # save optimizer checkpoint on all ranks
199 if args.need_optim_ckpt:
200 strategy.save_optimizer(actor_optim,
201 'actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()),
202 only_rank0=False)
203
204
205 if __name__ == '__main__':
206 parser = argparse.ArgumentParser()
207 parser.add_argument('--prompt_path', type=str, default=None, help='path to the prompt dataset')
208 parser.add_argument('--pretrain_dataset', type=str, default=None, help='path to the pretrained dataset')
209 parser.add_argument('--strategy',
210 choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'],
211 default='naive',
212 help='strategy to use')
213 parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])
214 parser.add_argument('--pretrain', type=str, default=None)
215 parser.add_argument('--rm_model', default=None, choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])
216 parser.add_argument('--rm_path', type=str, default=None)
217 parser.add_argument('--rm_pretrain', type=str, default=None)
218 parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts')
219 parser.add_argument('--need_optim_ckpt', type=bool, default=False)
220 parser.add_argument('--num_episodes', type=int, default=10)
221 parser.add_argument('--max_timesteps', type=int, default=10)
222 parser.add_argument('--update_timesteps', type=int, default=10)
223 parser.add_argument('--max_epochs', type=int, default=5)
224 parser.add_argument('--train_batch_size', type=int, default=8)
225 parser.add_argument('--ptx_batch_size', type=int, default=1)
226 parser.add_argument('--experience_batch_size', type=int, default=8)
227 parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
228 parser.add_argument('--kl_coef', type=float, default=0.1)
229 parser.add_argument('--ptx_coef', type=float, default=0.9)
230 parser.add_argument('--max_input_len', type=int, default=96)
231 parser.add_argument('--max_seq_len', type=int, default=128)
232 args = parser.parse_args()
233 main(args)
234
[end of applications/Chat/examples/train_prompts.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/Chat/examples/train_prompts.py b/applications/Chat/examples/train_prompts.py
--- a/applications/Chat/examples/train_prompts.py
+++ b/applications/Chat/examples/train_prompts.py
@@ -139,7 +139,7 @@
data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)
- prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_path, max_datasets_size=16384)
+ prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_dataset, max_datasets_size=16384)
if dist.is_initialized() and dist.get_world_size() > 1:
prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)
else:
@@ -204,7 +204,7 @@
if __name__ == '__main__':
parser = argparse.ArgumentParser()
- parser.add_argument('--prompt_path', type=str, default=None, help='path to the prompt dataset')
+ parser.add_argument('--prompt_dataset', type=str, default=None, help='path to the prompt dataset')
parser.add_argument('--pretrain_dataset', type=str, default=None, help='path to the pretrained dataset')
parser.add_argument('--strategy',
choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'],
| {"golden_diff": "diff --git a/applications/Chat/examples/train_prompts.py b/applications/Chat/examples/train_prompts.py\n--- a/applications/Chat/examples/train_prompts.py\n+++ b/applications/Chat/examples/train_prompts.py\n@@ -139,7 +139,7 @@\n \n data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)\n \n- prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_path, max_datasets_size=16384)\n+ prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_dataset, max_datasets_size=16384)\n if dist.is_initialized() and dist.get_world_size() > 1:\n prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)\n else:\n@@ -204,7 +204,7 @@\n \n if __name__ == '__main__':\n parser = argparse.ArgumentParser()\n- parser.add_argument('--prompt_path', type=str, default=None, help='path to the prompt dataset')\n+ parser.add_argument('--prompt_dataset', type=str, default=None, help='path to the prompt dataset')\n parser.add_argument('--pretrain_dataset', type=str, default=None, help='path to the pretrained dataset')\n parser.add_argument('--strategy',\n choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'],\n", "issue": "[DOC]: Clarify More about Datasets for Chat Training Stage 3\n### \ud83d\udcda The doc issue\n\nAs raised by some users, the current guide to training Stages 3 regarding the required datasets is yet clear. Some user has composed a [guide](https://juejin.cn/post/7222250219570757690) which still confuses the datasets. \r\n\r\nSome clarification is needed in this [section](https://github.com/hpcaitech/ColossalAI/tree/main/applications/Chat/examples#stage3---training-model-using-prompts-with-rl), regarding how to download and set `prompt_path` and `pretrain_dataset`.\r\n\r\nBelow is an example user query I received on Slack.\r\n\r\n```\r\ntorchrun --standalone --nproc_per_node=4 train_prompts.py \\\r\n --pretrain \"bigscience/bloom-560m\" \\\r\n --model 'bloom' \\\r\n --strategy colossalai_zero2 \\\r\n --prompt_path /data/chenhao/train/ColossalAI/prompt_dataset/data.json \\ -------Where is the data.json data\r\n --pretrain_dataset /data/chenhao/train/ColossalAI/pretrain_dataset/data.json \\ -----------Where is the data.json data\r\n --rm_pretrain /data/chenhao/train/ColossalAI/Coati-7B \\\r\n --rm_path /data/chenhao/train/ColossalAI/rmstatic.pt \\\r\n --train_batch_size 4 \\\r\n --experience_batch_size 4 \\\r\n --max_epochs 1 \\\r\n --num_episodes 1\r\n```\r\n\r\n\r\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\n\nimport pandas as pd\nimport torch\nimport torch.distributed as dist\nfrom coati.dataset import DataCollatorForSupervisedDataset, PromptDataset, SupervisedDataset\nfrom coati.models.bloom import BLOOMRM, BLOOMActor, BLOOMCritic\nfrom coati.models.gpt import GPTRM, GPTActor, GPTCritic\nfrom coati.models.llama import LlamaActor, LlamaCritic, LlamaRM\nfrom coati.models.opt import OPTRM, OPTActor, OPTCritic\nfrom coati.models.roberta import RoBERTaActor, RoBERTaCritic, RoBERTaRM\nfrom coati.trainer import PPOTrainer\nfrom coati.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy\nfrom coati.utils import prepare_llama_tokenizer_and_embedding\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\nfrom transformers import AutoTokenizer, BloomTokenizerFast, GPT2Tokenizer, LlamaTokenizer, RobertaTokenizer\n\nfrom colossalai.nn.optimizer import HybridAdam\n\n\ndef main(args):\n # configure strategy\n if args.strategy == 'naive':\n strategy = NaiveStrategy()\n elif args.strategy == 'ddp':\n strategy = DDPStrategy()\n elif args.strategy == 'colossalai_gemini':\n strategy = ColossalAIStrategy(stage=3, placement_policy='cuda', initial_scale=2**5)\n elif args.strategy == 'colossalai_zero2':\n strategy = ColossalAIStrategy(stage=2, placement_policy='cuda')\n else:\n raise ValueError(f'Unsupported strategy \"{args.strategy}\"')\n\n if args.rm_path is not None:\n state_dict = torch.load(args.rm_path, map_location='cpu')\n\n # configure model\n if args.model == 'gpt2':\n initial_model = GPTActor(pretrained=args.pretrain)\n elif args.model == 'bloom':\n initial_model = BLOOMActor(pretrained=args.pretrain)\n elif args.model == 'opt':\n initial_model = OPTActor(pretrained=args.pretrain)\n elif args.model == 'llama':\n initial_model = LlamaActor(pretrained=args.pretrain)\n elif args.model == 'roberta':\n initial_model = RoBERTaActor(pretrained=args.pretrain)\n else:\n raise ValueError(f'Unsupported actor model \"{args.model}\"')\n\n if args.rm_model == None:\n rm_model_name = args.model\n else:\n rm_model_name = args.rm_model\n\n if rm_model_name == 'gpt2':\n reward_model = GPTRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'bloom':\n reward_model = BLOOMRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'opt':\n reward_model = OPTRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'llama':\n reward_model = LlamaRM(pretrained=args.rm_pretrain)\n elif rm_model_name == 'roberta':\n reward_model = RoBERTaRM(pretrained=args.rm_pretrain)\n else:\n raise ValueError(f'Unsupported reward model \"{rm_model_name}\"')\n\n if args.rm_path is not None:\n reward_model.load_state_dict(state_dict)\n\n initial_model.to(torch.float16).to(torch.cuda.current_device())\n reward_model.to(torch.float16).to(torch.cuda.current_device())\n\n with strategy.model_init_context():\n if args.model == 'gpt2':\n actor = GPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'bloom':\n actor = BLOOMActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'opt':\n actor = OPTActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'llama':\n actor = LlamaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n elif args.model == 'roberta':\n actor = RoBERTaActor(pretrained=args.pretrain, lora_rank=args.lora_rank)\n else:\n raise ValueError(f'Unsupported actor model \"{args.model}\"')\n\n if rm_model_name == 'gpt2':\n critic = GPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'bloom':\n critic = BLOOMCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'opt':\n critic = OPTCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'llama':\n critic = LlamaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n elif rm_model_name == 'roberta':\n critic = RoBERTaCritic(pretrained=args.rm_pretrain, lora_rank=args.lora_rank, use_action_mask=True)\n else:\n raise ValueError(f'Unsupported reward model \"{rm_model_name}\"')\n\n if args.rm_path is not None:\n critic.load_state_dict(state_dict)\n del state_dict\n\n if args.strategy != 'colossalai_gemini':\n critic.to(torch.float16).to(torch.cuda.current_device())\n actor.to(torch.float16).to(torch.cuda.current_device())\n\n # configure optimizer\n if args.strategy.startswith('colossalai'):\n actor_optim = HybridAdam(actor.parameters(), lr=1e-7)\n critic_optim = HybridAdam(critic.parameters(), lr=1e-7)\n else:\n actor_optim = Adam(actor.parameters(), lr=1e-7)\n critic_optim = Adam(critic.parameters(), lr=1e-7)\n\n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n elif args.model == 'bloom':\n tokenizer = BloomTokenizerFast.from_pretrained('bigscience/bloom-560m')\n elif args.model == 'opt':\n tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n elif args.model == 'llama':\n tokenizer = LlamaTokenizer.from_pretrained(args.pretrain)\n tokenizer.eos_token = '<\\s>'\n elif args.model == 'roberta':\n tokenizer = RobertaTokenizer.from_pretrained(\"roberta-base\")\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n if args.model == 'llama':\n tokenizer = prepare_llama_tokenizer_and_embedding(tokenizer, actor)\n else:\n tokenizer.pad_token = tokenizer.eos_token\n\n data_collator = DataCollatorForSupervisedDataset(tokenizer=tokenizer)\n\n prompt_dataset = PromptDataset(tokenizer=tokenizer, data_path=args.prompt_path, max_datasets_size=16384)\n if dist.is_initialized() and dist.get_world_size() > 1:\n prompt_sampler = DistributedSampler(prompt_dataset, shuffle=True, seed=42, drop_last=True)\n else:\n prompt_sampler = None\n prompt_dataloader = DataLoader(prompt_dataset,\n shuffle=(prompt_sampler is None),\n sampler=prompt_sampler,\n batch_size=args.experience_batch_size)\n\n pretrain_dataset = SupervisedDataset(tokenizer=tokenizer,\n data_path=args.pretrain_dataset,\n max_datasets_size=16384,\n max_length=args.max_input_len)\n if dist.is_initialized() and dist.get_world_size() > 1:\n pretrain_sampler = DistributedSampler(pretrain_dataset, shuffle=True, seed=42, drop_last=True)\n else:\n pretrain_sampler = None\n pretrain_dataloader = DataLoader(pretrain_dataset,\n shuffle=(pretrain_sampler is None),\n sampler=pretrain_sampler,\n batch_size=args.ptx_batch_size,\n collate_fn=data_collator)\n\n (actor, actor_optim), (critic, critic_optim) = strategy.prepare((actor, actor_optim), (critic, critic_optim))\n\n # configure trainer\n trainer = PPOTrainer(\n strategy,\n actor,\n critic,\n reward_model,\n initial_model,\n actor_optim,\n critic_optim,\n kl_coef=args.kl_coef,\n ptx_coef=args.ptx_coef,\n max_epochs=args.max_epochs,\n train_batch_size=args.train_batch_size,\n max_length=args.max_seq_len,\n use_cache=True,\n do_sample=True,\n temperature=1.0,\n top_k=50,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=tokenizer.eos_token_id,\n )\n\n trainer.fit(prompt_dataloader=prompt_dataloader,\n pretrain_dataloader=pretrain_dataloader,\n num_episodes=args.num_episodes,\n max_timesteps=args.max_timesteps,\n update_timesteps=args.update_timesteps)\n\n # save model checkpoint after fitting\n trainer.save_model(args.save_path, only_rank0=True, tokenizer=tokenizer)\n # save optimizer checkpoint on all ranks\n if args.need_optim_ckpt:\n strategy.save_optimizer(actor_optim,\n 'actor_optim_checkpoint_prompts_%d.pt' % (torch.cuda.current_device()),\n only_rank0=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--prompt_path', type=str, default=None, help='path to the prompt dataset')\n parser.add_argument('--pretrain_dataset', type=str, default=None, help='path to the pretrained dataset')\n parser.add_argument('--strategy',\n choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'],\n default='naive',\n help='strategy to use')\n parser.add_argument('--model', default='gpt2', choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--rm_model', default=None, choices=['gpt2', 'bloom', 'opt', 'llama', 'roberta'])\n parser.add_argument('--rm_path', type=str, default=None)\n parser.add_argument('--rm_pretrain', type=str, default=None)\n parser.add_argument('--save_path', type=str, default='actor_checkpoint_prompts')\n parser.add_argument('--need_optim_ckpt', type=bool, default=False)\n parser.add_argument('--num_episodes', type=int, default=10)\n parser.add_argument('--max_timesteps', type=int, default=10)\n parser.add_argument('--update_timesteps', type=int, default=10)\n parser.add_argument('--max_epochs', type=int, default=5)\n parser.add_argument('--train_batch_size', type=int, default=8)\n parser.add_argument('--ptx_batch_size', type=int, default=1)\n parser.add_argument('--experience_batch_size', type=int, default=8)\n parser.add_argument('--lora_rank', type=int, default=0, help=\"low-rank adaptation matrices rank\")\n parser.add_argument('--kl_coef', type=float, default=0.1)\n parser.add_argument('--ptx_coef', type=float, default=0.9)\n parser.add_argument('--max_input_len', type=int, default=96)\n parser.add_argument('--max_seq_len', type=int, default=128)\n args = parser.parse_args()\n main(args)\n", "path": "applications/Chat/examples/train_prompts.py"}]} | 3,990 | 306 |
gh_patches_debug_10599 | rasdani/github-patches | git_diff | saleor__saleor-13172 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug: wrong usage in a dataloader
### What are you trying to achieve?
Please take a look at this code:
https://github.com/saleor/saleor/blob/e3078c188b1ff270f533a71d49ce7d178cda79e4/saleor/graphql/shipping/dataloaders.py#L99
The keys parameter of batch_load method has type of:
```python
List[Tuple[str, str]]
```
So we must extract shipping method ids from the `keys` before using them to filter `ShippingMethod`
I think it's wrong to filter by the whole `keys` param here.
If I was wrong, please kindly let me know and I will close this issue.
Thank you for reaching out. Mr. @patrys
### Steps to reproduce the problem
Just look at the code and you will understand right away.
### What did you expect to happen?
I think we must extract the `keys` param properly first
### Logs
_No response_
### Environment
Saleor version: Main branch
OS and version: anything
</issue>
<code>
[start of saleor/graphql/shipping/dataloaders.py]
1 from collections import defaultdict
2
3 from django.db.models import Exists, F, OuterRef
4
5 from ...channel.models import Channel
6 from ...shipping.models import (
7 ShippingMethod,
8 ShippingMethodChannelListing,
9 ShippingMethodPostalCodeRule,
10 ShippingZone,
11 )
12 from ..core.dataloaders import DataLoader
13
14
15 class ShippingMethodByIdLoader(DataLoader):
16 context_key = "shippingmethod_by_id"
17
18 def batch_load(self, keys):
19 shipping_methods = ShippingMethod.objects.using(
20 self.database_connection_name
21 ).in_bulk(keys)
22 return [shipping_methods.get(shipping_method_id) for shipping_method_id in keys]
23
24
25 class ShippingZoneByIdLoader(DataLoader):
26 context_key = "shippingzone_by_id"
27
28 def batch_load(self, keys):
29 shipping_zones = ShippingZone.objects.using(
30 self.database_connection_name
31 ).in_bulk(keys)
32 return [shipping_zones.get(shipping_zone_id) for shipping_zone_id in keys]
33
34
35 class ShippingZonesByChannelIdLoader(DataLoader):
36 context_key = "shippingzone_by_channel_id"
37
38 def batch_load(self, keys):
39 shipping_zones_channel = ShippingZone.channels.through.objects.using(
40 self.database_connection_name
41 ).filter(channel_id__in=keys)
42 shipping_zones_map = (
43 ShippingZone.objects.using(self.database_connection_name)
44 .filter(
45 Exists(shipping_zones_channel.filter(shippingzone_id=OuterRef("pk")))
46 )
47 .in_bulk()
48 )
49
50 shipping_zones_by_channel_map = defaultdict(list)
51 for shipping_zone_id, channel_id in shipping_zones_channel.values_list(
52 "shippingzone_id", "channel_id"
53 ):
54 shipping_zones_by_channel_map[channel_id].append(
55 shipping_zones_map[shipping_zone_id]
56 )
57 return [
58 shipping_zones_by_channel_map.get(channel_id, []) for channel_id in keys
59 ]
60
61
62 class ShippingMethodsByShippingZoneIdLoader(DataLoader):
63 context_key = "shippingmethod_by_shippingzone"
64
65 def batch_load(self, keys):
66 shipping_methods = ShippingMethod.objects.using(
67 self.database_connection_name
68 ).filter(shipping_zone_id__in=keys)
69 shipping_methods_by_shipping_zone_map = defaultdict(list)
70 for shipping_method in shipping_methods:
71 shipping_methods_by_shipping_zone_map[
72 shipping_method.shipping_zone_id
73 ].append(shipping_method)
74 return [
75 shipping_methods_by_shipping_zone_map.get(shipping_zone_id, [])
76 for shipping_zone_id in keys
77 ]
78
79
80 class PostalCodeRulesByShippingMethodIdLoader(DataLoader):
81 context_key = "postal_code_rules_by_shipping_method"
82
83 def batch_load(self, keys):
84 postal_code_rules = (
85 ShippingMethodPostalCodeRule.objects.using(self.database_connection_name)
86 .filter(shipping_method_id__in=keys)
87 .order_by("id")
88 )
89
90 postal_code_rules_map = defaultdict(list)
91 for postal_code in postal_code_rules:
92 postal_code_rules_map[postal_code.shipping_method_id].append(postal_code)
93 return [
94 postal_code_rules_map.get(shipping_method_id, [])
95 for shipping_method_id in keys
96 ]
97
98
99 class ShippingMethodsByShippingZoneIdAndChannelSlugLoader(DataLoader):
100 context_key = "shippingmethod_by_shippingzone_and_channel"
101
102 def batch_load(self, keys):
103 shipping_methods = (
104 ShippingMethod.objects.using(self.database_connection_name)
105 .filter(shipping_zone_id__in=keys)
106 .annotate(channel_slug=F("channel_listings__channel__slug"))
107 )
108
109 shipping_methods_by_shipping_zone_and_channel_map = defaultdict(list)
110 for shipping_method in shipping_methods:
111 key = (
112 shipping_method.shipping_zone_id,
113 getattr(shipping_method, "channel_slug"), # annotation
114 )
115 shipping_methods_by_shipping_zone_and_channel_map[key].append(
116 shipping_method
117 )
118 return [
119 shipping_methods_by_shipping_zone_and_channel_map.get(key, [])
120 for key in keys
121 ]
122
123
124 class ShippingMethodChannelListingByShippingMethodIdLoader(DataLoader):
125 context_key = "shippingmethodchannellisting_by_shippingmethod"
126
127 def batch_load(self, keys):
128 shipping_method_channel_listings = ShippingMethodChannelListing.objects.using(
129 self.database_connection_name
130 ).filter(shipping_method_id__in=keys)
131 shipping_method_channel_listings_by_shipping_method_map = defaultdict(list)
132 for shipping_method_channel_listing in shipping_method_channel_listings:
133 shipping_method_channel_listings_by_shipping_method_map[
134 shipping_method_channel_listing.shipping_method_id
135 ].append(shipping_method_channel_listing)
136 return [
137 shipping_method_channel_listings_by_shipping_method_map.get(
138 shipping_method_id, []
139 )
140 for shipping_method_id in keys
141 ]
142
143
144 class ShippingMethodChannelListingByChannelSlugLoader(DataLoader):
145 context_key = "shippingmethodchannellisting_by_channel"
146
147 def batch_load(self, keys):
148 shipping_method_channel_listings = (
149 ShippingMethodChannelListing.objects.using(self.database_connection_name)
150 .filter(channel__slug__in=keys)
151 .annotate(channel_slug=F("channel__slug"))
152 )
153 shipping_method_channel_listings_by_channel_slug = defaultdict(list)
154 for shipping_method_channel_listing in shipping_method_channel_listings:
155 shipping_method_channel_listings_by_channel_slug[
156 shipping_method_channel_listing.channel_slug
157 ].append(shipping_method_channel_listing)
158 return [
159 shipping_method_channel_listings_by_channel_slug.get(channel_slug, [])
160 for channel_slug in keys
161 ]
162
163
164 class ShippingMethodChannelListingByShippingMethodIdAndChannelSlugLoader(DataLoader):
165 context_key = "shippingmethodchannellisting_by_shippingmethod_and_channel"
166
167 def batch_load(self, keys):
168 shipping_method_ids = [key[0] for key in keys]
169 channel_slugs = [key[1] for key in keys]
170
171 def _find_listing_by_shipping_method_id(listings_by_channel):
172 listings_by_method = []
173 for method_id, listings in zip(shipping_method_ids, listings_by_channel):
174 for listing in listings:
175 if method_id == listing.shipping_method_id:
176 listings_by_method.append(listing)
177 break
178 else:
179 listings_by_method.append(None)
180
181 return listings_by_method
182
183 return (
184 ShippingMethodChannelListingByChannelSlugLoader(self.context)
185 .load_many(channel_slugs)
186 .then(_find_listing_by_shipping_method_id)
187 )
188
189
190 class ChannelsByShippingZoneIdLoader(DataLoader):
191 context_key = "channels_by_shippingzone"
192
193 def batch_load(self, keys):
194 from ..channel.dataloaders import ChannelByIdLoader
195
196 channel_and_zone_is_pairs = (
197 Channel.objects.using(self.database_connection_name)
198 .filter(shipping_zones__id__in=keys)
199 .values_list("pk", "shipping_zones__id")
200 )
201 shipping_zone_channel_map = defaultdict(list)
202 for channel_id, zone_id in channel_and_zone_is_pairs:
203 shipping_zone_channel_map[zone_id].append(channel_id)
204
205 def map_channels(channels):
206 channel_map = {channel.pk: channel for channel in channels}
207 return [
208 [
209 channel_map[channel_id]
210 for channel_id in shipping_zone_channel_map.get(zone_id, [])
211 ]
212 for zone_id in keys
213 ]
214
215 return (
216 ChannelByIdLoader(self.context)
217 .load_many({pk for pk, _ in channel_and_zone_is_pairs})
218 .then(map_channels)
219 )
220
[end of saleor/graphql/shipping/dataloaders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/saleor/graphql/shipping/dataloaders.py b/saleor/graphql/shipping/dataloaders.py
--- a/saleor/graphql/shipping/dataloaders.py
+++ b/saleor/graphql/shipping/dataloaders.py
@@ -100,9 +100,10 @@
context_key = "shippingmethod_by_shippingzone_and_channel"
def batch_load(self, keys):
+ shipping_zone_ids = [zone_id for (zone_id, _) in keys]
shipping_methods = (
ShippingMethod.objects.using(self.database_connection_name)
- .filter(shipping_zone_id__in=keys)
+ .filter(shipping_zone_id__in=shipping_zone_ids)
.annotate(channel_slug=F("channel_listings__channel__slug"))
)
| {"golden_diff": "diff --git a/saleor/graphql/shipping/dataloaders.py b/saleor/graphql/shipping/dataloaders.py\n--- a/saleor/graphql/shipping/dataloaders.py\n+++ b/saleor/graphql/shipping/dataloaders.py\n@@ -100,9 +100,10 @@\n context_key = \"shippingmethod_by_shippingzone_and_channel\"\n \n def batch_load(self, keys):\n+ shipping_zone_ids = [zone_id for (zone_id, _) in keys]\n shipping_methods = (\n ShippingMethod.objects.using(self.database_connection_name)\n- .filter(shipping_zone_id__in=keys)\n+ .filter(shipping_zone_id__in=shipping_zone_ids)\n .annotate(channel_slug=F(\"channel_listings__channel__slug\"))\n )\n", "issue": "Bug: wrong usage in a dataloader\n### What are you trying to achieve?\r\n\r\nPlease take a look at this code:\r\nhttps://github.com/saleor/saleor/blob/e3078c188b1ff270f533a71d49ce7d178cda79e4/saleor/graphql/shipping/dataloaders.py#L99\r\n\r\nThe keys parameter of batch_load method has type of:\r\n\r\n```python\r\nList[Tuple[str, str]]\r\n```\r\n\r\nSo we must extract shipping method ids from the `keys` before using them to filter `ShippingMethod`\r\nI think it's wrong to filter by the whole `keys` param here.\r\n\r\nIf I was wrong, please kindly let me know and I will close this issue.\r\n\r\nThank you for reaching out. Mr. @patrys \r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\nJust look at the code and you will understand right away.\r\n\r\n### What did you expect to happen?\r\n\r\nI think we must extract the `keys` param properly first\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\nSaleor version: Main branch\r\nOS and version: anything\r\n\n", "before_files": [{"content": "from collections import defaultdict\n\nfrom django.db.models import Exists, F, OuterRef\n\nfrom ...channel.models import Channel\nfrom ...shipping.models import (\n ShippingMethod,\n ShippingMethodChannelListing,\n ShippingMethodPostalCodeRule,\n ShippingZone,\n)\nfrom ..core.dataloaders import DataLoader\n\n\nclass ShippingMethodByIdLoader(DataLoader):\n context_key = \"shippingmethod_by_id\"\n\n def batch_load(self, keys):\n shipping_methods = ShippingMethod.objects.using(\n self.database_connection_name\n ).in_bulk(keys)\n return [shipping_methods.get(shipping_method_id) for shipping_method_id in keys]\n\n\nclass ShippingZoneByIdLoader(DataLoader):\n context_key = \"shippingzone_by_id\"\n\n def batch_load(self, keys):\n shipping_zones = ShippingZone.objects.using(\n self.database_connection_name\n ).in_bulk(keys)\n return [shipping_zones.get(shipping_zone_id) for shipping_zone_id in keys]\n\n\nclass ShippingZonesByChannelIdLoader(DataLoader):\n context_key = \"shippingzone_by_channel_id\"\n\n def batch_load(self, keys):\n shipping_zones_channel = ShippingZone.channels.through.objects.using(\n self.database_connection_name\n ).filter(channel_id__in=keys)\n shipping_zones_map = (\n ShippingZone.objects.using(self.database_connection_name)\n .filter(\n Exists(shipping_zones_channel.filter(shippingzone_id=OuterRef(\"pk\")))\n )\n .in_bulk()\n )\n\n shipping_zones_by_channel_map = defaultdict(list)\n for shipping_zone_id, channel_id in shipping_zones_channel.values_list(\n \"shippingzone_id\", \"channel_id\"\n ):\n shipping_zones_by_channel_map[channel_id].append(\n shipping_zones_map[shipping_zone_id]\n )\n return [\n shipping_zones_by_channel_map.get(channel_id, []) for channel_id in keys\n ]\n\n\nclass ShippingMethodsByShippingZoneIdLoader(DataLoader):\n context_key = \"shippingmethod_by_shippingzone\"\n\n def batch_load(self, keys):\n shipping_methods = ShippingMethod.objects.using(\n self.database_connection_name\n ).filter(shipping_zone_id__in=keys)\n shipping_methods_by_shipping_zone_map = defaultdict(list)\n for shipping_method in shipping_methods:\n shipping_methods_by_shipping_zone_map[\n shipping_method.shipping_zone_id\n ].append(shipping_method)\n return [\n shipping_methods_by_shipping_zone_map.get(shipping_zone_id, [])\n for shipping_zone_id in keys\n ]\n\n\nclass PostalCodeRulesByShippingMethodIdLoader(DataLoader):\n context_key = \"postal_code_rules_by_shipping_method\"\n\n def batch_load(self, keys):\n postal_code_rules = (\n ShippingMethodPostalCodeRule.objects.using(self.database_connection_name)\n .filter(shipping_method_id__in=keys)\n .order_by(\"id\")\n )\n\n postal_code_rules_map = defaultdict(list)\n for postal_code in postal_code_rules:\n postal_code_rules_map[postal_code.shipping_method_id].append(postal_code)\n return [\n postal_code_rules_map.get(shipping_method_id, [])\n for shipping_method_id in keys\n ]\n\n\nclass ShippingMethodsByShippingZoneIdAndChannelSlugLoader(DataLoader):\n context_key = \"shippingmethod_by_shippingzone_and_channel\"\n\n def batch_load(self, keys):\n shipping_methods = (\n ShippingMethod.objects.using(self.database_connection_name)\n .filter(shipping_zone_id__in=keys)\n .annotate(channel_slug=F(\"channel_listings__channel__slug\"))\n )\n\n shipping_methods_by_shipping_zone_and_channel_map = defaultdict(list)\n for shipping_method in shipping_methods:\n key = (\n shipping_method.shipping_zone_id,\n getattr(shipping_method, \"channel_slug\"), # annotation\n )\n shipping_methods_by_shipping_zone_and_channel_map[key].append(\n shipping_method\n )\n return [\n shipping_methods_by_shipping_zone_and_channel_map.get(key, [])\n for key in keys\n ]\n\n\nclass ShippingMethodChannelListingByShippingMethodIdLoader(DataLoader):\n context_key = \"shippingmethodchannellisting_by_shippingmethod\"\n\n def batch_load(self, keys):\n shipping_method_channel_listings = ShippingMethodChannelListing.objects.using(\n self.database_connection_name\n ).filter(shipping_method_id__in=keys)\n shipping_method_channel_listings_by_shipping_method_map = defaultdict(list)\n for shipping_method_channel_listing in shipping_method_channel_listings:\n shipping_method_channel_listings_by_shipping_method_map[\n shipping_method_channel_listing.shipping_method_id\n ].append(shipping_method_channel_listing)\n return [\n shipping_method_channel_listings_by_shipping_method_map.get(\n shipping_method_id, []\n )\n for shipping_method_id in keys\n ]\n\n\nclass ShippingMethodChannelListingByChannelSlugLoader(DataLoader):\n context_key = \"shippingmethodchannellisting_by_channel\"\n\n def batch_load(self, keys):\n shipping_method_channel_listings = (\n ShippingMethodChannelListing.objects.using(self.database_connection_name)\n .filter(channel__slug__in=keys)\n .annotate(channel_slug=F(\"channel__slug\"))\n )\n shipping_method_channel_listings_by_channel_slug = defaultdict(list)\n for shipping_method_channel_listing in shipping_method_channel_listings:\n shipping_method_channel_listings_by_channel_slug[\n shipping_method_channel_listing.channel_slug\n ].append(shipping_method_channel_listing)\n return [\n shipping_method_channel_listings_by_channel_slug.get(channel_slug, [])\n for channel_slug in keys\n ]\n\n\nclass ShippingMethodChannelListingByShippingMethodIdAndChannelSlugLoader(DataLoader):\n context_key = \"shippingmethodchannellisting_by_shippingmethod_and_channel\"\n\n def batch_load(self, keys):\n shipping_method_ids = [key[0] for key in keys]\n channel_slugs = [key[1] for key in keys]\n\n def _find_listing_by_shipping_method_id(listings_by_channel):\n listings_by_method = []\n for method_id, listings in zip(shipping_method_ids, listings_by_channel):\n for listing in listings:\n if method_id == listing.shipping_method_id:\n listings_by_method.append(listing)\n break\n else:\n listings_by_method.append(None)\n\n return listings_by_method\n\n return (\n ShippingMethodChannelListingByChannelSlugLoader(self.context)\n .load_many(channel_slugs)\n .then(_find_listing_by_shipping_method_id)\n )\n\n\nclass ChannelsByShippingZoneIdLoader(DataLoader):\n context_key = \"channels_by_shippingzone\"\n\n def batch_load(self, keys):\n from ..channel.dataloaders import ChannelByIdLoader\n\n channel_and_zone_is_pairs = (\n Channel.objects.using(self.database_connection_name)\n .filter(shipping_zones__id__in=keys)\n .values_list(\"pk\", \"shipping_zones__id\")\n )\n shipping_zone_channel_map = defaultdict(list)\n for channel_id, zone_id in channel_and_zone_is_pairs:\n shipping_zone_channel_map[zone_id].append(channel_id)\n\n def map_channels(channels):\n channel_map = {channel.pk: channel for channel in channels}\n return [\n [\n channel_map[channel_id]\n for channel_id in shipping_zone_channel_map.get(zone_id, [])\n ]\n for zone_id in keys\n ]\n\n return (\n ChannelByIdLoader(self.context)\n .load_many({pk for pk, _ in channel_and_zone_is_pairs})\n .then(map_channels)\n )\n", "path": "saleor/graphql/shipping/dataloaders.py"}]} | 2,918 | 172 |
gh_patches_debug_26276 | rasdani/github-patches | git_diff | elastic__apm-agent-python-994 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
APM agent crashes if the SQL query is too large
**Describe the bug**: ...
Same as https://github.com/elastic/apm-agent-python/issues/827, but with `psycopg2` library instead of Django ORM.
**To Reproduce**
1. use any of `psycopg2` extra utilities, for example `execute_values`, with a query that is sufficiently long.
**Environment (please complete the following information)**
- OS: mac
- Python version: 3.7
- APM Server version: ?
- Agent version: 5.10.0
**Additional context**
The issue is that `psycopg2` is passing the sql as bytes object (bytes array). The agent sql instrumentation [code](https://github.com/elastic/apm-agent-python/blob/master/elasticapm/instrumentation/packages/dbapi2.py#L222-L231) is calling the [`shorten()`](https://github.com/elastic/apm-agent-python/blob/f8977bf5c5d9921f83eed174b94cc48922bc72b2/elasticapm/utils/encoding.py#L182-L217) function which does not support bytes object.
I'm not sure if same behavior applies to `psycopg2` core `execute()` function, or only in the "extras" the query is passed as bytes object.
APM agent crashes if the SQL query is too large
**Describe the bug**: ...
Same as https://github.com/elastic/apm-agent-python/issues/827, but with `psycopg2` library instead of Django ORM.
**To Reproduce**
1. use any of `psycopg2` extra utilities, for example `execute_values`, with a query that is sufficiently long.
**Environment (please complete the following information)**
- OS: mac
- Python version: 3.7
- APM Server version: ?
- Agent version: 5.10.0
**Additional context**
The issue is that `psycopg2` is passing the sql as bytes object (bytes array). The agent sql instrumentation [code](https://github.com/elastic/apm-agent-python/blob/master/elasticapm/instrumentation/packages/dbapi2.py#L222-L231) is calling the [`shorten()`](https://github.com/elastic/apm-agent-python/blob/f8977bf5c5d9921f83eed174b94cc48922bc72b2/elasticapm/utils/encoding.py#L182-L217) function which does not support bytes object.
I'm not sure if same behavior applies to `psycopg2` core `execute()` function, or only in the "extras" the query is passed as bytes object.
</issue>
<code>
[start of elasticapm/instrumentation/packages/psycopg2.py]
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from elasticapm.instrumentation.packages.dbapi2 import (
32 ConnectionProxy,
33 CursorProxy,
34 DbApi2Instrumentation,
35 extract_signature,
36 )
37 from elasticapm.traces import capture_span
38 from elasticapm.utils import compat, default_ports
39
40
41 class PGCursorProxy(CursorProxy):
42 provider_name = "postgresql"
43
44 def _bake_sql(self, sql):
45 # if this is a Composable object, use its `as_string` method
46 # see http://initd.org/psycopg/docs/sql.html
47 if hasattr(sql, "as_string"):
48 return sql.as_string(self.__wrapped__)
49 return sql
50
51 def extract_signature(self, sql):
52 return extract_signature(sql)
53
54 def __enter__(self):
55 return PGCursorProxy(self.__wrapped__.__enter__(), destination_info=self._self_destination_info)
56
57
58 class PGConnectionProxy(ConnectionProxy):
59 cursor_proxy = PGCursorProxy
60
61 def __enter__(self):
62 return PGConnectionProxy(self.__wrapped__.__enter__(), destination_info=self._self_destination_info)
63
64
65 class Psycopg2Instrumentation(DbApi2Instrumentation):
66 name = "psycopg2"
67
68 instrument_list = [("psycopg2", "connect")]
69
70 def call(self, module, method, wrapped, instance, args, kwargs):
71 signature = "psycopg2.connect"
72
73 host = kwargs.get("host")
74 if host:
75 signature += " " + compat.text_type(host)
76
77 port = kwargs.get("port")
78 if port:
79 port = str(port)
80 if int(port) != default_ports.get("postgresql"):
81 host += ":" + port
82 signature += " " + compat.text_type(host)
83 else:
84 # Parse connection string and extract host/port
85 pass
86 destination_info = {
87 "address": kwargs.get("host", "localhost"),
88 "port": int(kwargs.get("port", default_ports.get("postgresql"))),
89 "service": {"name": "postgresql", "resource": "postgresql", "type": "db"},
90 }
91 with capture_span(
92 signature,
93 span_type="db",
94 span_subtype="postgresql",
95 span_action="connect",
96 extra={"destination": destination_info},
97 ):
98 return PGConnectionProxy(wrapped(*args, **kwargs), destination_info=destination_info)
99
100
101 class Psycopg2ExtensionsInstrumentation(DbApi2Instrumentation):
102 """
103 Some extensions do a type check on the Connection/Cursor in C-code, which our
104 proxy fails. For these extensions, we need to ensure that the unwrapped
105 Connection/Cursor is passed.
106 """
107
108 name = "psycopg2"
109
110 instrument_list = [
111 ("psycopg2.extensions", "register_type"),
112 # specifically instrument `register_json` as it bypasses `register_type`
113 ("psycopg2._json", "register_json"),
114 ("psycopg2.extensions", "quote_ident"),
115 ("psycopg2.extensions", "encrypt_password"),
116 ]
117
118 def call(self, module, method, wrapped, instance, args, kwargs):
119 if "conn_or_curs" in kwargs and hasattr(kwargs["conn_or_curs"], "__wrapped__"):
120 kwargs["conn_or_curs"] = kwargs["conn_or_curs"].__wrapped__
121 # register_type takes the connection as second argument
122 elif len(args) == 2 and hasattr(args[1], "__wrapped__"):
123 args = (args[0], args[1].__wrapped__)
124 # register_json takes the connection as first argument, and can have
125 # several more arguments
126 elif method == "register_json":
127 if args and hasattr(args[0], "__wrapped__"):
128 args = (args[0].__wrapped__,) + args[1:]
129
130 elif method == "encrypt_password":
131 # connection/cursor is either 3rd argument, or "scope" keyword argument
132 if len(args) >= 3 and hasattr(args[2], "__wrapped__"):
133 args = args[:2] + (args[2].__wrapped__,) + args[3:]
134 elif "scope" in kwargs and hasattr(kwargs["scope"], "__wrapped__"):
135 kwargs["scope"] = kwargs["scope"].__wrapped__
136
137 return wrapped(*args, **kwargs)
138
[end of elasticapm/instrumentation/packages/psycopg2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/elasticapm/instrumentation/packages/psycopg2.py b/elasticapm/instrumentation/packages/psycopg2.py
--- a/elasticapm/instrumentation/packages/psycopg2.py
+++ b/elasticapm/instrumentation/packages/psycopg2.py
@@ -27,6 +27,7 @@
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+from __future__ import absolute_import
from elasticapm.instrumentation.packages.dbapi2 import (
ConnectionProxy,
@@ -42,10 +43,15 @@
provider_name = "postgresql"
def _bake_sql(self, sql):
+ from psycopg2 import extensions as psycopg2_extensions
+
# if this is a Composable object, use its `as_string` method
# see http://initd.org/psycopg/docs/sql.html
if hasattr(sql, "as_string"):
- return sql.as_string(self.__wrapped__)
+ sql = sql.as_string(self.__wrapped__)
+ # if the sql string is already a byte string, we need to decode it using the connection encoding
+ if isinstance(sql, compat.binary_type):
+ sql = sql.decode(psycopg2_extensions.encodings[self.__wrapped__.connection.encoding])
return sql
def extract_signature(self, sql):
| {"golden_diff": "diff --git a/elasticapm/instrumentation/packages/psycopg2.py b/elasticapm/instrumentation/packages/psycopg2.py\n--- a/elasticapm/instrumentation/packages/psycopg2.py\n+++ b/elasticapm/instrumentation/packages/psycopg2.py\n@@ -27,6 +27,7 @@\n # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n+from __future__ import absolute_import\n \n from elasticapm.instrumentation.packages.dbapi2 import (\n ConnectionProxy,\n@@ -42,10 +43,15 @@\n provider_name = \"postgresql\"\n \n def _bake_sql(self, sql):\n+ from psycopg2 import extensions as psycopg2_extensions\n+\n # if this is a Composable object, use its `as_string` method\n # see http://initd.org/psycopg/docs/sql.html\n if hasattr(sql, \"as_string\"):\n- return sql.as_string(self.__wrapped__)\n+ sql = sql.as_string(self.__wrapped__)\n+ # if the sql string is already a byte string, we need to decode it using the connection encoding\n+ if isinstance(sql, compat.binary_type):\n+ sql = sql.decode(psycopg2_extensions.encodings[self.__wrapped__.connection.encoding])\n return sql\n \n def extract_signature(self, sql):\n", "issue": "APM agent crashes if the SQL query is too large\n**Describe the bug**: ...\r\nSame as https://github.com/elastic/apm-agent-python/issues/827, but with `psycopg2` library instead of Django ORM.\r\n\r\n**To Reproduce**\r\n\r\n1. use any of `psycopg2` extra utilities, for example `execute_values`, with a query that is sufficiently long.\r\n\r\n**Environment (please complete the following information)**\r\n- OS: mac\r\n- Python version: 3.7\r\n- APM Server version: ?\r\n- Agent version: 5.10.0\r\n\r\n\r\n**Additional context**\r\n\r\nThe issue is that `psycopg2` is passing the sql as bytes object (bytes array). The agent sql instrumentation [code](https://github.com/elastic/apm-agent-python/blob/master/elasticapm/instrumentation/packages/dbapi2.py#L222-L231) is calling the [`shorten()`](https://github.com/elastic/apm-agent-python/blob/f8977bf5c5d9921f83eed174b94cc48922bc72b2/elasticapm/utils/encoding.py#L182-L217) function which does not support bytes object.\r\n\r\nI'm not sure if same behavior applies to `psycopg2` core `execute()` function, or only in the \"extras\" the query is passed as bytes object.\nAPM agent crashes if the SQL query is too large\n**Describe the bug**: ...\r\nSame as https://github.com/elastic/apm-agent-python/issues/827, but with `psycopg2` library instead of Django ORM.\r\n\r\n**To Reproduce**\r\n\r\n1. use any of `psycopg2` extra utilities, for example `execute_values`, with a query that is sufficiently long.\r\n\r\n**Environment (please complete the following information)**\r\n- OS: mac\r\n- Python version: 3.7\r\n- APM Server version: ?\r\n- Agent version: 5.10.0\r\n\r\n\r\n**Additional context**\r\n\r\nThe issue is that `psycopg2` is passing the sql as bytes object (bytes array). The agent sql instrumentation [code](https://github.com/elastic/apm-agent-python/blob/master/elasticapm/instrumentation/packages/dbapi2.py#L222-L231) is calling the [`shorten()`](https://github.com/elastic/apm-agent-python/blob/f8977bf5c5d9921f83eed174b94cc48922bc72b2/elasticapm/utils/encoding.py#L182-L217) function which does not support bytes object.\r\n\r\nI'm not sure if same behavior applies to `psycopg2` core `execute()` function, or only in the \"extras\" the query is passed as bytes object.\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.instrumentation.packages.dbapi2 import (\n ConnectionProxy,\n CursorProxy,\n DbApi2Instrumentation,\n extract_signature,\n)\nfrom elasticapm.traces import capture_span\nfrom elasticapm.utils import compat, default_ports\n\n\nclass PGCursorProxy(CursorProxy):\n provider_name = \"postgresql\"\n\n def _bake_sql(self, sql):\n # if this is a Composable object, use its `as_string` method\n # see http://initd.org/psycopg/docs/sql.html\n if hasattr(sql, \"as_string\"):\n return sql.as_string(self.__wrapped__)\n return sql\n\n def extract_signature(self, sql):\n return extract_signature(sql)\n\n def __enter__(self):\n return PGCursorProxy(self.__wrapped__.__enter__(), destination_info=self._self_destination_info)\n\n\nclass PGConnectionProxy(ConnectionProxy):\n cursor_proxy = PGCursorProxy\n\n def __enter__(self):\n return PGConnectionProxy(self.__wrapped__.__enter__(), destination_info=self._self_destination_info)\n\n\nclass Psycopg2Instrumentation(DbApi2Instrumentation):\n name = \"psycopg2\"\n\n instrument_list = [(\"psycopg2\", \"connect\")]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n signature = \"psycopg2.connect\"\n\n host = kwargs.get(\"host\")\n if host:\n signature += \" \" + compat.text_type(host)\n\n port = kwargs.get(\"port\")\n if port:\n port = str(port)\n if int(port) != default_ports.get(\"postgresql\"):\n host += \":\" + port\n signature += \" \" + compat.text_type(host)\n else:\n # Parse connection string and extract host/port\n pass\n destination_info = {\n \"address\": kwargs.get(\"host\", \"localhost\"),\n \"port\": int(kwargs.get(\"port\", default_ports.get(\"postgresql\"))),\n \"service\": {\"name\": \"postgresql\", \"resource\": \"postgresql\", \"type\": \"db\"},\n }\n with capture_span(\n signature,\n span_type=\"db\",\n span_subtype=\"postgresql\",\n span_action=\"connect\",\n extra={\"destination\": destination_info},\n ):\n return PGConnectionProxy(wrapped(*args, **kwargs), destination_info=destination_info)\n\n\nclass Psycopg2ExtensionsInstrumentation(DbApi2Instrumentation):\n \"\"\"\n Some extensions do a type check on the Connection/Cursor in C-code, which our\n proxy fails. For these extensions, we need to ensure that the unwrapped\n Connection/Cursor is passed.\n \"\"\"\n\n name = \"psycopg2\"\n\n instrument_list = [\n (\"psycopg2.extensions\", \"register_type\"),\n # specifically instrument `register_json` as it bypasses `register_type`\n (\"psycopg2._json\", \"register_json\"),\n (\"psycopg2.extensions\", \"quote_ident\"),\n (\"psycopg2.extensions\", \"encrypt_password\"),\n ]\n\n def call(self, module, method, wrapped, instance, args, kwargs):\n if \"conn_or_curs\" in kwargs and hasattr(kwargs[\"conn_or_curs\"], \"__wrapped__\"):\n kwargs[\"conn_or_curs\"] = kwargs[\"conn_or_curs\"].__wrapped__\n # register_type takes the connection as second argument\n elif len(args) == 2 and hasattr(args[1], \"__wrapped__\"):\n args = (args[0], args[1].__wrapped__)\n # register_json takes the connection as first argument, and can have\n # several more arguments\n elif method == \"register_json\":\n if args and hasattr(args[0], \"__wrapped__\"):\n args = (args[0].__wrapped__,) + args[1:]\n\n elif method == \"encrypt_password\":\n # connection/cursor is either 3rd argument, or \"scope\" keyword argument\n if len(args) >= 3 and hasattr(args[2], \"__wrapped__\"):\n args = args[:2] + (args[2].__wrapped__,) + args[3:]\n elif \"scope\" in kwargs and hasattr(kwargs[\"scope\"], \"__wrapped__\"):\n kwargs[\"scope\"] = kwargs[\"scope\"].__wrapped__\n\n return wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/psycopg2.py"}]} | 2,708 | 326 |
gh_patches_debug_3103 | rasdani/github-patches | git_diff | conan-io__conan-center-index-1534 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[conan.io/center] parallel-hashmap/1.31 merged but not found in conan center
Even though https://github.com/conan-io/conan-center-index/pull/1253 has been merged, `parallel-hashmap/1.31` can't be found in Web UI or with `conan search`
</issue>
<code>
[start of recipes/parallel-hashmap/all/conanfile.py]
1 import os
2
3 from conans import ConanFile, tools
4
5 class ParallelHashmapConan(ConanFile):
6 name = "parallel-hashmap"
7 description = "A family of header-only, very fast and memory-friendly hashmap and btree containers."
8 license = "Apache-2.0"
9 topics = ("conan", "parallel-hashmap", "parallel", "hashmap", "btree")
10 homepage = "https://github.com/greg7mdp/parallel-hashmap"
11 url = "https://github.com/conan-io/conan-center-index"
12 no_copy_source = True
13
14 @property
15 def _source_subfolder(self):
16 return "source_subfolder"
17
18 def source(self):
19 tools.get(**self.conan_data["sources"][self.version])
20 os.rename(self.name + "-" + self.version, self._source_subfolder)
21
22 def package(self):
23 self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
24 self.copy("*.h",
25 dst=os.path.join("include", "parallel_hashmap"),
26 src=os.path.join(self._source_subfolder, "parallel_hashmap"))
27 self.copy("phmap.natvis", dst="res", src=self._source_subfolder)
28
29 def package_id(self):
30 self.info.header_only()
31
[end of recipes/parallel-hashmap/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/parallel-hashmap/all/conanfile.py b/recipes/parallel-hashmap/all/conanfile.py
--- a/recipes/parallel-hashmap/all/conanfile.py
+++ b/recipes/parallel-hashmap/all/conanfile.py
@@ -1,7 +1,7 @@
import os
-
from conans import ConanFile, tools
+
class ParallelHashmapConan(ConanFile):
name = "parallel-hashmap"
description = "A family of header-only, very fast and memory-friendly hashmap and btree containers."
| {"golden_diff": "diff --git a/recipes/parallel-hashmap/all/conanfile.py b/recipes/parallel-hashmap/all/conanfile.py\n--- a/recipes/parallel-hashmap/all/conanfile.py\n+++ b/recipes/parallel-hashmap/all/conanfile.py\n@@ -1,7 +1,7 @@\n import os\n-\n from conans import ConanFile, tools\n \n+\n class ParallelHashmapConan(ConanFile):\n name = \"parallel-hashmap\"\n description = \"A family of header-only, very fast and memory-friendly hashmap and btree containers.\"\n", "issue": "[conan.io/center] parallel-hashmap/1.31 merged but not found in conan center\nEven though https://github.com/conan-io/conan-center-index/pull/1253 has been merged, `parallel-hashmap/1.31` can't be found in Web UI or with `conan search`\r\n\n", "before_files": [{"content": "import os\n\nfrom conans import ConanFile, tools\n\nclass ParallelHashmapConan(ConanFile):\n name = \"parallel-hashmap\"\n description = \"A family of header-only, very fast and memory-friendly hashmap and btree containers.\"\n license = \"Apache-2.0\"\n topics = (\"conan\", \"parallel-hashmap\", \"parallel\", \"hashmap\", \"btree\")\n homepage = \"https://github.com/greg7mdp/parallel-hashmap\"\n url = \"https://github.com/conan-io/conan-center-index\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(self.name + \"-\" + self.version, self._source_subfolder)\n\n def package(self):\n self.copy(\"LICENSE\", dst=\"licenses\", src=self._source_subfolder)\n self.copy(\"*.h\",\n dst=os.path.join(\"include\", \"parallel_hashmap\"),\n src=os.path.join(self._source_subfolder, \"parallel_hashmap\"))\n self.copy(\"phmap.natvis\", dst=\"res\", src=self._source_subfolder)\n\n def package_id(self):\n self.info.header_only()\n", "path": "recipes/parallel-hashmap/all/conanfile.py"}]} | 949 | 120 |
gh_patches_debug_12578 | rasdani/github-patches | git_diff | Kinto__kinto-986 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Heartbeat leaves tombstones
Heartbeat leaves tombstones
</issue>
<code>
[start of kinto/core/storage/__init__.py]
1 import random
2 from collections import namedtuple
3 from pyramid.settings import asbool
4
5 from kinto.core.logs import logger
6 from . import generators
7
8
9 Filter = namedtuple('Filter', ['field', 'value', 'operator'])
10 """Filtering properties."""
11
12 Sort = namedtuple('Sort', ['field', 'direction'])
13 """Sorting properties."""
14
15 DEFAULT_ID_FIELD = 'id'
16 DEFAULT_MODIFIED_FIELD = 'last_modified'
17 DEFAULT_DELETED_FIELD = 'deleted'
18
19 _HEARTBEAT_DELETE_RATE = 0.6
20 _HEARTBEAT_COLLECTION_ID = '__heartbeat__'
21 _HEART_PARENT_ID = _HEARTBEAT_COLLECTION_ID
22 _HEARTBEAT_RECORD = {'__heartbeat__': True}
23
24
25 class StorageBase(object):
26 """Storage abstraction used by resource views.
27
28 It is meant to be instantiated at application startup.
29 Any operation may raise a `HTTPServiceUnavailable` error if an error
30 occurs with the underlying service.
31
32 Configuration can be changed to choose which storage backend will
33 persist the objects.
34
35 :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPServiceUnavailable`
36 """
37
38 id_generator = generators.UUID4()
39 """Id generator used when no one is provided for create."""
40
41 def initialize_schema(self, dry_run=False):
42 """Create every necessary objects (like tables or indices) in the
43 backend.
44
45 This is executed when the ``kinto migrate`` command is run.
46
47 :param bool dry_run: simulate instead of executing the operations.
48 """
49 raise NotImplementedError
50
51 def flush(self, auth=None):
52 """Remove **every** object from this storage.
53 """
54 raise NotImplementedError
55
56 def collection_timestamp(self, collection_id, parent_id, auth=None):
57 """Get the highest timestamp of every objects in this `collection_id` for
58 this `parent_id`.
59
60 .. note::
61
62 This should take deleted objects into account.
63
64 :param str collection_id: the collection id.
65 :param str parent_id: the collection parent.
66
67 :returns: the latest timestamp of the collection.
68 :rtype: int
69 """
70 raise NotImplementedError
71
72 def create(self, collection_id, parent_id, record, id_generator=None,
73 id_field=DEFAULT_ID_FIELD,
74 modified_field=DEFAULT_MODIFIED_FIELD,
75 auth=None):
76 """Create the specified `object` in this `collection_id` for this `parent_id`.
77 Assign the id to the object, using the attribute
78 :attr:`kinto.core.resource.model.Model.id_field`.
79
80 .. note::
81
82 This will update the collection timestamp.
83
84 :raises: :exc:`kinto.core.storage.exceptions.UnicityError`
85
86 :param str collection_id: the collection id.
87 :param str parent_id: the collection parent.
88 :param dict record: the object to create.
89
90 :returns: the newly created object.
91 :rtype: dict
92 """
93 raise NotImplementedError
94
95 def get(self, collection_id, parent_id, object_id,
96 id_field=DEFAULT_ID_FIELD,
97 modified_field=DEFAULT_MODIFIED_FIELD,
98 auth=None):
99 """Retrieve the object with specified `object_id`, or raise error
100 if not found.
101
102 :raises: :exc:`kinto.core.storage.exceptions.RecordNotFoundError`
103
104 :param str collection_id: the collection id.
105 :param str parent_id: the collection parent.
106
107 :param str object_id: unique identifier of the object
108
109 :returns: the object object.
110 :rtype: dict
111 """
112 raise NotImplementedError
113
114 def update(self, collection_id, parent_id, object_id, record,
115 id_field=DEFAULT_ID_FIELD,
116 modified_field=DEFAULT_MODIFIED_FIELD,
117 auth=None):
118 """Overwrite the `object` with the specified `object_id`.
119
120 If the specified id is not found, the object is created with the
121 specified id.
122
123 .. note::
124
125 This will update the collection timestamp.
126
127 :param str collection_id: the collection id.
128 :param str parent_id: the collection parent.
129 :param str object_id: unique identifier of the object
130 :param dict record: the object to update or create.
131
132 :returns: the updated object.
133 :rtype: dict
134 """
135 raise NotImplementedError
136
137 def delete(self, collection_id, parent_id, object_id,
138 id_field=DEFAULT_ID_FIELD, with_deleted=True,
139 modified_field=DEFAULT_MODIFIED_FIELD,
140 deleted_field=DEFAULT_DELETED_FIELD,
141 auth=None, last_modified=None):
142 """Delete the object with specified `object_id`, and raise error
143 if not found.
144
145 Deleted objects must be removed from the database, but their ids and
146 timestamps of deletion must be tracked for synchronization purposes.
147 (See :meth:`kinto.core.storage.StorageBase.get_all`)
148
149 .. note::
150
151 This will update the collection timestamp.
152
153 :raises: :exc:`kinto.core.storage.exceptions.RecordNotFoundError`
154
155 :param str collection_id: the collection id.
156 :param str parent_id: the collection parent.
157
158 :param str object_id: unique identifier of the object
159 :param bool with_deleted: track deleted record with a tombstone
160
161 :returns: the deleted object, with minimal set of attributes.
162 :rtype: dict
163 """
164 raise NotImplementedError
165
166 def delete_all(self, collection_id, parent_id, filters=None,
167 id_field=DEFAULT_ID_FIELD, with_deleted=True,
168 modified_field=DEFAULT_MODIFIED_FIELD,
169 deleted_field=DEFAULT_DELETED_FIELD,
170 auth=None):
171 """Delete all objects in this `collection_id` for this `parent_id`.
172
173 :param str collection_id: the collection id.
174 :param str parent_id: the collection parent.
175
176 :param filters: Optionnally filter the objects to delete.
177 :type filters: list of :class:`kinto.core.storage.Filter`
178 :param bool with_deleted: track deleted records with a tombstone
179
180 :returns: the list of deleted objects, with minimal set of attributes.
181 :rtype: list
182 """
183 raise NotImplementedError
184
185 def purge_deleted(self, collection_id, parent_id, before=None,
186 id_field=DEFAULT_ID_FIELD,
187 modified_field=DEFAULT_MODIFIED_FIELD,
188 auth=None):
189 """Delete all deleted object tombstones in this `collection_id`
190 for this `parent_id`.
191
192 :param str collection_id: the collection id.
193 :param str parent_id: the collection parent.
194
195 :param int before: Optionnal timestamp to limit deletion (exclusive)
196
197 :returns: The number of deleted objects.
198 :rtype: int
199
200 """
201 raise NotImplementedError
202
203 def get_all(self, collection_id, parent_id, filters=None, sorting=None,
204 pagination_rules=None, limit=None, include_deleted=False,
205 id_field=DEFAULT_ID_FIELD,
206 modified_field=DEFAULT_MODIFIED_FIELD,
207 deleted_field=DEFAULT_DELETED_FIELD,
208 auth=None):
209 """Retrieve all objects in this `collection_id` for this `parent_id`.
210
211 :param str collection_id: the collection id.
212 :param str parent_id: the collection parent.
213
214 :param filters: Optionally filter the objects by their attribute.
215 Each filter in this list is a tuple of a field, a value and a
216 comparison (see `kinto.core.utils.COMPARISON`). All filters
217 are combined using *AND*.
218 :type filters: list of :class:`kinto.core.storage.Filter`
219
220 :param sorting: Optionnally sort the objects by attribute.
221 Each sort instruction in this list refers to a field and a
222 direction (negative means descending). All sort instructions are
223 cumulative.
224 :type sorting: list of :class:`kinto.core.storage.Sort`
225
226 :param pagination_rules: Optionnally paginate the list of objects.
227 This list of rules aims to reduce the set of objects to the current
228 page. A rule is a list of filters (see `filters` parameter),
229 and all rules are combined using *OR*.
230 :type pagination_rules: list of list of
231 :class:`kinto.core.storage.Filter`
232
233 :param int limit: Optionnally limit the number of objects to be
234 retrieved.
235
236 :param bool include_deleted: Optionnally include the deleted objects
237 that match the filters.
238
239 :returns: the limited list of objects, and the total number of
240 matching objects in the collection (deleted ones excluded).
241 :rtype: tuple
242 """
243 raise NotImplementedError
244
245
246 def heartbeat(backend):
247 def ping(request):
248 """Test that storage is operationnal.
249
250 :param request: current request object
251 :type request: :class:`~pyramid:pyramid.request.Request`
252 :returns: ``True`` is everything is ok, ``False`` otherwise.
253 :rtype: bool
254 """
255 try:
256 auth = request.headers.get('Authorization')
257 if asbool(request.registry.settings.get('readonly')):
258 # Do not try to write in readonly mode.
259 backend.get_all(_HEARTBEAT_COLLECTION_ID, _HEART_PARENT_ID,
260 auth=auth)
261 else:
262 if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:
263 backend.delete_all(_HEARTBEAT_COLLECTION_ID,
264 _HEART_PARENT_ID, auth=auth)
265 else:
266 backend.create(_HEARTBEAT_COLLECTION_ID, _HEART_PARENT_ID,
267 _HEARTBEAT_RECORD, auth=auth)
268 return True
269 except:
270 logger.exception("Heartbeat Error")
271 return False
272
273 return ping
274
[end of kinto/core/storage/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kinto/core/storage/__init__.py b/kinto/core/storage/__init__.py
--- a/kinto/core/storage/__init__.py
+++ b/kinto/core/storage/__init__.py
@@ -261,7 +261,8 @@
else:
if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:
backend.delete_all(_HEARTBEAT_COLLECTION_ID,
- _HEART_PARENT_ID, auth=auth)
+ _HEART_PARENT_ID, with_deleted=False,
+ auth=auth)
else:
backend.create(_HEARTBEAT_COLLECTION_ID, _HEART_PARENT_ID,
_HEARTBEAT_RECORD, auth=auth)
| {"golden_diff": "diff --git a/kinto/core/storage/__init__.py b/kinto/core/storage/__init__.py\n--- a/kinto/core/storage/__init__.py\n+++ b/kinto/core/storage/__init__.py\n@@ -261,7 +261,8 @@\n else:\n if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:\n backend.delete_all(_HEARTBEAT_COLLECTION_ID,\n- _HEART_PARENT_ID, auth=auth)\n+ _HEART_PARENT_ID, with_deleted=False,\n+ auth=auth)\n else:\n backend.create(_HEARTBEAT_COLLECTION_ID, _HEART_PARENT_ID,\n _HEARTBEAT_RECORD, auth=auth)\n", "issue": "Heartbeat leaves tombstones\n\nHeartbeat leaves tombstones\n\n", "before_files": [{"content": "import random\nfrom collections import namedtuple\nfrom pyramid.settings import asbool\n\nfrom kinto.core.logs import logger\nfrom . import generators\n\n\nFilter = namedtuple('Filter', ['field', 'value', 'operator'])\n\"\"\"Filtering properties.\"\"\"\n\nSort = namedtuple('Sort', ['field', 'direction'])\n\"\"\"Sorting properties.\"\"\"\n\nDEFAULT_ID_FIELD = 'id'\nDEFAULT_MODIFIED_FIELD = 'last_modified'\nDEFAULT_DELETED_FIELD = 'deleted'\n\n_HEARTBEAT_DELETE_RATE = 0.6\n_HEARTBEAT_COLLECTION_ID = '__heartbeat__'\n_HEART_PARENT_ID = _HEARTBEAT_COLLECTION_ID\n_HEARTBEAT_RECORD = {'__heartbeat__': True}\n\n\nclass StorageBase(object):\n \"\"\"Storage abstraction used by resource views.\n\n It is meant to be instantiated at application startup.\n Any operation may raise a `HTTPServiceUnavailable` error if an error\n occurs with the underlying service.\n\n Configuration can be changed to choose which storage backend will\n persist the objects.\n\n :raises: :exc:`~pyramid:pyramid.httpexceptions.HTTPServiceUnavailable`\n \"\"\"\n\n id_generator = generators.UUID4()\n \"\"\"Id generator used when no one is provided for create.\"\"\"\n\n def initialize_schema(self, dry_run=False):\n \"\"\"Create every necessary objects (like tables or indices) in the\n backend.\n\n This is executed when the ``kinto migrate`` command is run.\n\n :param bool dry_run: simulate instead of executing the operations.\n \"\"\"\n raise NotImplementedError\n\n def flush(self, auth=None):\n \"\"\"Remove **every** object from this storage.\n \"\"\"\n raise NotImplementedError\n\n def collection_timestamp(self, collection_id, parent_id, auth=None):\n \"\"\"Get the highest timestamp of every objects in this `collection_id` for\n this `parent_id`.\n\n .. note::\n\n This should take deleted objects into account.\n\n :param str collection_id: the collection id.\n :param str parent_id: the collection parent.\n\n :returns: the latest timestamp of the collection.\n :rtype: int\n \"\"\"\n raise NotImplementedError\n\n def create(self, collection_id, parent_id, record, id_generator=None,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n \"\"\"Create the specified `object` in this `collection_id` for this `parent_id`.\n Assign the id to the object, using the attribute\n :attr:`kinto.core.resource.model.Model.id_field`.\n\n .. note::\n\n This will update the collection timestamp.\n\n :raises: :exc:`kinto.core.storage.exceptions.UnicityError`\n\n :param str collection_id: the collection id.\n :param str parent_id: the collection parent.\n :param dict record: the object to create.\n\n :returns: the newly created object.\n :rtype: dict\n \"\"\"\n raise NotImplementedError\n\n def get(self, collection_id, parent_id, object_id,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n \"\"\"Retrieve the object with specified `object_id`, or raise error\n if not found.\n\n :raises: :exc:`kinto.core.storage.exceptions.RecordNotFoundError`\n\n :param str collection_id: the collection id.\n :param str parent_id: the collection parent.\n\n :param str object_id: unique identifier of the object\n\n :returns: the object object.\n :rtype: dict\n \"\"\"\n raise NotImplementedError\n\n def update(self, collection_id, parent_id, object_id, record,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n \"\"\"Overwrite the `object` with the specified `object_id`.\n\n If the specified id is not found, the object is created with the\n specified id.\n\n .. note::\n\n This will update the collection timestamp.\n\n :param str collection_id: the collection id.\n :param str parent_id: the collection parent.\n :param str object_id: unique identifier of the object\n :param dict record: the object to update or create.\n\n :returns: the updated object.\n :rtype: dict\n \"\"\"\n raise NotImplementedError\n\n def delete(self, collection_id, parent_id, object_id,\n id_field=DEFAULT_ID_FIELD, with_deleted=True,\n modified_field=DEFAULT_MODIFIED_FIELD,\n deleted_field=DEFAULT_DELETED_FIELD,\n auth=None, last_modified=None):\n \"\"\"Delete the object with specified `object_id`, and raise error\n if not found.\n\n Deleted objects must be removed from the database, but their ids and\n timestamps of deletion must be tracked for synchronization purposes.\n (See :meth:`kinto.core.storage.StorageBase.get_all`)\n\n .. note::\n\n This will update the collection timestamp.\n\n :raises: :exc:`kinto.core.storage.exceptions.RecordNotFoundError`\n\n :param str collection_id: the collection id.\n :param str parent_id: the collection parent.\n\n :param str object_id: unique identifier of the object\n :param bool with_deleted: track deleted record with a tombstone\n\n :returns: the deleted object, with minimal set of attributes.\n :rtype: dict\n \"\"\"\n raise NotImplementedError\n\n def delete_all(self, collection_id, parent_id, filters=None,\n id_field=DEFAULT_ID_FIELD, with_deleted=True,\n modified_field=DEFAULT_MODIFIED_FIELD,\n deleted_field=DEFAULT_DELETED_FIELD,\n auth=None):\n \"\"\"Delete all objects in this `collection_id` for this `parent_id`.\n\n :param str collection_id: the collection id.\n :param str parent_id: the collection parent.\n\n :param filters: Optionnally filter the objects to delete.\n :type filters: list of :class:`kinto.core.storage.Filter`\n :param bool with_deleted: track deleted records with a tombstone\n\n :returns: the list of deleted objects, with minimal set of attributes.\n :rtype: list\n \"\"\"\n raise NotImplementedError\n\n def purge_deleted(self, collection_id, parent_id, before=None,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n \"\"\"Delete all deleted object tombstones in this `collection_id`\n for this `parent_id`.\n\n :param str collection_id: the collection id.\n :param str parent_id: the collection parent.\n\n :param int before: Optionnal timestamp to limit deletion (exclusive)\n\n :returns: The number of deleted objects.\n :rtype: int\n\n \"\"\"\n raise NotImplementedError\n\n def get_all(self, collection_id, parent_id, filters=None, sorting=None,\n pagination_rules=None, limit=None, include_deleted=False,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n deleted_field=DEFAULT_DELETED_FIELD,\n auth=None):\n \"\"\"Retrieve all objects in this `collection_id` for this `parent_id`.\n\n :param str collection_id: the collection id.\n :param str parent_id: the collection parent.\n\n :param filters: Optionally filter the objects by their attribute.\n Each filter in this list is a tuple of a field, a value and a\n comparison (see `kinto.core.utils.COMPARISON`). All filters\n are combined using *AND*.\n :type filters: list of :class:`kinto.core.storage.Filter`\n\n :param sorting: Optionnally sort the objects by attribute.\n Each sort instruction in this list refers to a field and a\n direction (negative means descending). All sort instructions are\n cumulative.\n :type sorting: list of :class:`kinto.core.storage.Sort`\n\n :param pagination_rules: Optionnally paginate the list of objects.\n This list of rules aims to reduce the set of objects to the current\n page. A rule is a list of filters (see `filters` parameter),\n and all rules are combined using *OR*.\n :type pagination_rules: list of list of\n :class:`kinto.core.storage.Filter`\n\n :param int limit: Optionnally limit the number of objects to be\n retrieved.\n\n :param bool include_deleted: Optionnally include the deleted objects\n that match the filters.\n\n :returns: the limited list of objects, and the total number of\n matching objects in the collection (deleted ones excluded).\n :rtype: tuple\n \"\"\"\n raise NotImplementedError\n\n\ndef heartbeat(backend):\n def ping(request):\n \"\"\"Test that storage is operationnal.\n\n :param request: current request object\n :type request: :class:`~pyramid:pyramid.request.Request`\n :returns: ``True`` is everything is ok, ``False`` otherwise.\n :rtype: bool\n \"\"\"\n try:\n auth = request.headers.get('Authorization')\n if asbool(request.registry.settings.get('readonly')):\n # Do not try to write in readonly mode.\n backend.get_all(_HEARTBEAT_COLLECTION_ID, _HEART_PARENT_ID,\n auth=auth)\n else:\n if random.SystemRandom().random() < _HEARTBEAT_DELETE_RATE:\n backend.delete_all(_HEARTBEAT_COLLECTION_ID,\n _HEART_PARENT_ID, auth=auth)\n else:\n backend.create(_HEARTBEAT_COLLECTION_ID, _HEART_PARENT_ID,\n _HEARTBEAT_RECORD, auth=auth)\n return True\n except:\n logger.exception(\"Heartbeat Error\")\n return False\n\n return ping\n", "path": "kinto/core/storage/__init__.py"}]} | 3,301 | 152 |
gh_patches_debug_25403 | rasdani/github-patches | git_diff | encode__uvicorn-701 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
UnicodeDecodeError when decoding bad headers
Someone (or some bot) was spamming my sever with requests to potential vulnerabilities.
One of the attacks is for a potential vulnerability in php, which sets the the `x-forwarded-for` header to the following value:
```
}__test|O:21:"JDatabaseDriverMysqli":3:{s:2:"fc";O:17:"JSimplepieFactory":0:{}s:21:"\\0\\0\\0disconnectHandlers";a:1:{i:0;a:2:{i:0;O:9:"SimplePie":5:{s:8:"sanitize";O:20:"JDatabaseDriverMysql":0:{}s:8:"feed_url";s:56:"die(md5(DIRECTORY_SEPARATOR));JFactory::getConfig();exit";s:19:"cache_name_function";s:6:"assert";s:5:"cache";b:1;s:11:"cache_class";O:20:"JDatabaseDriverMysql":0:{}}i:1;s:4:"init";}}s:13:"\\0\\0\\0connection";b:1;}\xf0\xfd\xfd\xfd, ...
```
This leads to this exception:
```
Exception in ASGI application
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/uvicorn/protocols/http/httptools_impl.py", line 385, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/usr/local/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py", line 40, in __call__
x_forwarded_for = headers[b"x-forwarded-for"].decode("ascii")
UnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 427: ordinal not in range(128)
```
As it's due to malformed header from the client, I would expect this should be a 400 error instead?
</issue>
<code>
[start of uvicorn/middleware/proxy_headers.py]
1 """
2 This middleware can be used when a known proxy is fronting the application,
3 and is trusted to be properly setting the `X-Forwarded-Proto` and
4 `X-Forwarded-For` headers with the connecting client information.
5
6 Modifies the `client` and `scheme` information so that they reference
7 the connecting client, rather that the connecting proxy.
8
9 https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies
10 """
11
12
13 class ProxyHeadersMiddleware:
14 def __init__(self, app, trusted_hosts="127.0.0.1"):
15 self.app = app
16 if isinstance(trusted_hosts, str):
17 self.trusted_hosts = [item.strip() for item in trusted_hosts.split(",")]
18 else:
19 self.trusted_hosts = trusted_hosts
20 self.always_trust = "*" in self.trusted_hosts
21
22 async def __call__(self, scope, receive, send):
23 if scope["type"] in ("http", "websocket"):
24 client_addr = scope.get("client")
25 client_host = client_addr[0] if client_addr else None
26
27 if self.always_trust or client_host in self.trusted_hosts:
28 headers = dict(scope["headers"])
29
30 if b"x-forwarded-proto" in headers:
31 # Determine if the incoming request was http or https based on
32 # the X-Forwarded-Proto header.
33 x_forwarded_proto = headers[b"x-forwarded-proto"].decode("ascii")
34 scope["scheme"] = x_forwarded_proto.strip()
35
36 if b"x-forwarded-for" in headers:
37 # Determine the client address from the last trusted IP in the
38 # X-Forwarded-For header. We've lost the connecting client's port
39 # information by now, so only include the host.
40 x_forwarded_for = headers[b"x-forwarded-for"].decode("ascii")
41 host = x_forwarded_for.split(",")[-1].strip()
42 port = 0
43 scope["client"] = (host, port)
44
45 return await self.app(scope, receive, send)
46
[end of uvicorn/middleware/proxy_headers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/uvicorn/middleware/proxy_headers.py b/uvicorn/middleware/proxy_headers.py
--- a/uvicorn/middleware/proxy_headers.py
+++ b/uvicorn/middleware/proxy_headers.py
@@ -30,14 +30,14 @@
if b"x-forwarded-proto" in headers:
# Determine if the incoming request was http or https based on
# the X-Forwarded-Proto header.
- x_forwarded_proto = headers[b"x-forwarded-proto"].decode("ascii")
+ x_forwarded_proto = headers[b"x-forwarded-proto"].decode("latin1")
scope["scheme"] = x_forwarded_proto.strip()
if b"x-forwarded-for" in headers:
# Determine the client address from the last trusted IP in the
# X-Forwarded-For header. We've lost the connecting client's port
# information by now, so only include the host.
- x_forwarded_for = headers[b"x-forwarded-for"].decode("ascii")
+ x_forwarded_for = headers[b"x-forwarded-for"].decode("latin1")
host = x_forwarded_for.split(",")[-1].strip()
port = 0
scope["client"] = (host, port)
| {"golden_diff": "diff --git a/uvicorn/middleware/proxy_headers.py b/uvicorn/middleware/proxy_headers.py\n--- a/uvicorn/middleware/proxy_headers.py\n+++ b/uvicorn/middleware/proxy_headers.py\n@@ -30,14 +30,14 @@\n if b\"x-forwarded-proto\" in headers:\n # Determine if the incoming request was http or https based on\n # the X-Forwarded-Proto header.\n- x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"ascii\")\n+ x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"latin1\")\n scope[\"scheme\"] = x_forwarded_proto.strip()\n \n if b\"x-forwarded-for\" in headers:\n # Determine the client address from the last trusted IP in the\n # X-Forwarded-For header. We've lost the connecting client's port\n # information by now, so only include the host.\n- x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"ascii\")\n+ x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"latin1\")\n host = x_forwarded_for.split(\",\")[-1].strip()\n port = 0\n scope[\"client\"] = (host, port)\n", "issue": "UnicodeDecodeError when decoding bad headers\nSomeone (or some bot) was spamming my sever with requests to potential vulnerabilities.\r\n\r\nOne of the attacks is for a potential vulnerability in php, which sets the the `x-forwarded-for` header to the following value:\r\n```\r\n}__test|O:21:\"JDatabaseDriverMysqli\":3:{s:2:\"fc\";O:17:\"JSimplepieFactory\":0:{}s:21:\"\\\\0\\\\0\\\\0disconnectHandlers\";a:1:{i:0;a:2:{i:0;O:9:\"SimplePie\":5:{s:8:\"sanitize\";O:20:\"JDatabaseDriverMysql\":0:{}s:8:\"feed_url\";s:56:\"die(md5(DIRECTORY_SEPARATOR));JFactory::getConfig();exit\";s:19:\"cache_name_function\";s:6:\"assert\";s:5:\"cache\";b:1;s:11:\"cache_class\";O:20:\"JDatabaseDriverMysql\":0:{}}i:1;s:4:\"init\";}}s:13:\"\\\\0\\\\0\\\\0connection\";b:1;}\\xf0\\xfd\\xfd\\xfd, ...\r\n```\r\n\r\nThis leads to this exception:\r\n\r\n```\r\nException in ASGI application\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/uvicorn/protocols/http/httptools_impl.py\", line 385, in run_asgi\r\n result = await app(self.scope, self.receive, self.send)\r\n File \"/usr/local/lib/python3.8/site-packages/uvicorn/middleware/proxy_headers.py\", line 40, in __call__\r\n x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"ascii\")\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xf0 in position 427: ordinal not in range(128)\r\n```\r\n\r\nAs it's due to malformed header from the client, I would expect this should be a 400 error instead?\n", "before_files": [{"content": "\"\"\"\nThis middleware can be used when a known proxy is fronting the application,\nand is trusted to be properly setting the `X-Forwarded-Proto` and\n`X-Forwarded-For` headers with the connecting client information.\n\nModifies the `client` and `scheme` information so that they reference\nthe connecting client, rather that the connecting proxy.\n\nhttps://developer.mozilla.org/en-US/docs/Web/HTTP/Headers#Proxies\n\"\"\"\n\n\nclass ProxyHeadersMiddleware:\n def __init__(self, app, trusted_hosts=\"127.0.0.1\"):\n self.app = app\n if isinstance(trusted_hosts, str):\n self.trusted_hosts = [item.strip() for item in trusted_hosts.split(\",\")]\n else:\n self.trusted_hosts = trusted_hosts\n self.always_trust = \"*\" in self.trusted_hosts\n\n async def __call__(self, scope, receive, send):\n if scope[\"type\"] in (\"http\", \"websocket\"):\n client_addr = scope.get(\"client\")\n client_host = client_addr[0] if client_addr else None\n\n if self.always_trust or client_host in self.trusted_hosts:\n headers = dict(scope[\"headers\"])\n\n if b\"x-forwarded-proto\" in headers:\n # Determine if the incoming request was http or https based on\n # the X-Forwarded-Proto header.\n x_forwarded_proto = headers[b\"x-forwarded-proto\"].decode(\"ascii\")\n scope[\"scheme\"] = x_forwarded_proto.strip()\n\n if b\"x-forwarded-for\" in headers:\n # Determine the client address from the last trusted IP in the\n # X-Forwarded-For header. We've lost the connecting client's port\n # information by now, so only include the host.\n x_forwarded_for = headers[b\"x-forwarded-for\"].decode(\"ascii\")\n host = x_forwarded_for.split(\",\")[-1].strip()\n port = 0\n scope[\"client\"] = (host, port)\n\n return await self.app(scope, receive, send)\n", "path": "uvicorn/middleware/proxy_headers.py"}]} | 1,509 | 276 |
gh_patches_debug_3991 | rasdani/github-patches | git_diff | sublimelsp__LSP-450 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't move cursor at the end when populating the diagnostics panel
When the diagnostics are populated the cursor in the panel is moved to the end. See [here](https://github.com/tomv564/LSP/blob/2869978d8b46d717da27eb0ac7a7e234840b218d/plugin/core/panels.py#L53-L56).
Is there a reason for this?
Because I can't use the `f4` keybinding to goto the next result when it is opened.
Instead I need first to press the `shift + f4`, which is the backward direction.
Here is a simple solution.
```diff
def run(self, edit, characters):
self.view.replace(edit, sublime.Region(0, self.view.size()), characters)
- # Move cursor to the end
+ # Clear the selection
selection = self.view.sel()
selection.clear()
- selection.add(sublime.Region(self.view.size(), self.view.size()))
```
</issue>
<code>
[start of plugin/core/panels.py]
1 import sublime
2 import sublime_plugin
3
4
5 OUTPUT_PANEL_SETTINGS = {
6 "auto_indent": False,
7 "draw_indent_guides": False,
8 "draw_white_space": "None",
9 "gutter": False,
10 'is_widget': True,
11 "line_numbers": False,
12 "margin": 3,
13 "match_brackets": False,
14 "scroll_past_end": False,
15 "tab_size": 4,
16 "translate_tabs_to_spaces": False,
17 "word_wrap": False
18 }
19
20
21 def create_output_panel(window: sublime.Window, name: str) -> sublime.View:
22 panel = window.create_output_panel(name)
23 settings = panel.settings()
24 for key, value in OUTPUT_PANEL_SETTINGS.items():
25 settings.set(key, value)
26 return panel
27
28
29 def destroy_output_panels(window: sublime.Window):
30 for panel_name in ["references", "diagnostics"]:
31 window.destroy_output_panel(panel_name)
32
33
34 class LspClearPanelCommand(sublime_plugin.TextCommand):
35 """
36 A clear_panel command to clear the error panel.
37 """
38
39 def run(self, edit):
40 self.view.set_read_only(False)
41 self.view.erase(edit, sublime.Region(0, self.view.size()))
42 self.view.set_read_only(True)
43
44
45 class LspUpdatePanelCommand(sublime_plugin.TextCommand):
46 """
47 A update_panel command to update the error panel with new text.
48 """
49
50 def run(self, edit, characters):
51 self.view.replace(edit, sublime.Region(0, self.view.size()), characters)
52
53 # Move cursor to the end
54 selection = self.view.sel()
55 selection.clear()
56 selection.add(sublime.Region(self.view.size(), self.view.size()))
57
[end of plugin/core/panels.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/plugin/core/panels.py b/plugin/core/panels.py
--- a/plugin/core/panels.py
+++ b/plugin/core/panels.py
@@ -50,7 +50,6 @@
def run(self, edit, characters):
self.view.replace(edit, sublime.Region(0, self.view.size()), characters)
- # Move cursor to the end
+ # Clear the selection
selection = self.view.sel()
selection.clear()
- selection.add(sublime.Region(self.view.size(), self.view.size()))
| {"golden_diff": "diff --git a/plugin/core/panels.py b/plugin/core/panels.py\n--- a/plugin/core/panels.py\n+++ b/plugin/core/panels.py\n@@ -50,7 +50,6 @@\n def run(self, edit, characters):\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\n \n- # Move cursor to the end\n+ # Clear the selection\n selection = self.view.sel()\n selection.clear()\n- selection.add(sublime.Region(self.view.size(), self.view.size()))\n", "issue": "Don't move cursor at the end when populating the diagnostics panel\nWhen the diagnostics are populated the cursor in the panel is moved to the end. See [here](https://github.com/tomv564/LSP/blob/2869978d8b46d717da27eb0ac7a7e234840b218d/plugin/core/panels.py#L53-L56).\r\n\r\nIs there a reason for this? \r\n\r\nBecause I can't use the `f4` keybinding to goto the next result when it is opened.\r\nInstead I need first to press the `shift + f4`, which is the backward direction.\r\n\r\n\r\nHere is a simple solution.\r\n```diff\r\n def run(self, edit, characters):\r\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\r\n \r\n- # Move cursor to the end\r\n+ # Clear the selection\r\n selection = self.view.sel()\r\n selection.clear()\r\n- selection.add(sublime.Region(self.view.size(), self.view.size()))\r\n```\r\n\n", "before_files": [{"content": "import sublime\nimport sublime_plugin\n\n\nOUTPUT_PANEL_SETTINGS = {\n \"auto_indent\": False,\n \"draw_indent_guides\": False,\n \"draw_white_space\": \"None\",\n \"gutter\": False,\n 'is_widget': True,\n \"line_numbers\": False,\n \"margin\": 3,\n \"match_brackets\": False,\n \"scroll_past_end\": False,\n \"tab_size\": 4,\n \"translate_tabs_to_spaces\": False,\n \"word_wrap\": False\n}\n\n\ndef create_output_panel(window: sublime.Window, name: str) -> sublime.View:\n panel = window.create_output_panel(name)\n settings = panel.settings()\n for key, value in OUTPUT_PANEL_SETTINGS.items():\n settings.set(key, value)\n return panel\n\n\ndef destroy_output_panels(window: sublime.Window):\n for panel_name in [\"references\", \"diagnostics\"]:\n window.destroy_output_panel(panel_name)\n\n\nclass LspClearPanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A clear_panel command to clear the error panel.\n \"\"\"\n\n def run(self, edit):\n self.view.set_read_only(False)\n self.view.erase(edit, sublime.Region(0, self.view.size()))\n self.view.set_read_only(True)\n\n\nclass LspUpdatePanelCommand(sublime_plugin.TextCommand):\n \"\"\"\n A update_panel command to update the error panel with new text.\n \"\"\"\n\n def run(self, edit, characters):\n self.view.replace(edit, sublime.Region(0, self.view.size()), characters)\n\n # Move cursor to the end\n selection = self.view.sel()\n selection.clear()\n selection.add(sublime.Region(self.view.size(), self.view.size()))\n", "path": "plugin/core/panels.py"}]} | 1,227 | 113 |
gh_patches_debug_33059 | rasdani/github-patches | git_diff | translate__pootle-6734 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Adjust initdb to use {POOTLE_TRANSLATION_DIRECTORY} placeholder
</issue>
<code>
[start of pootle/core/initdb.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import logging
10 import os
11
12 from django.conf import settings
13 from django.contrib.auth import get_user_model
14 from django.contrib.auth.models import Permission
15 from django.contrib.contenttypes.models import ContentType
16 from django.db import transaction
17 from django.utils.translation import ugettext_noop as _
18
19 from pootle.core.contextmanagers import keep_data
20 from pootle.core.models import Revision
21 from pootle.core.signals import update_revisions
22 from pootle_app.models import Directory
23 from pootle_app.models.permissions import PermissionSet, get_pootle_permission
24 from pootle_format.models import Format
25 from pootle_fs.utils import FSPlugin
26 from pootle_language.models import Language
27 from pootle_project.models import Project
28 from staticpages.models import StaticPage as Announcement
29
30
31 logger = logging.getLogger(__name__)
32
33
34 class InitDB(object):
35
36 def init_db(self, create_projects=True):
37 with transaction.atomic():
38 with keep_data(signals=(update_revisions, )):
39 self._init_db(create_projects)
40
41 def _init_db(self, create_projects=True):
42 """Populate the database with default initial data.
43
44 This creates the default database to get a working Pootle installation.
45 """
46 self.create_formats()
47 self.create_revision()
48 self.create_essential_users()
49 self.create_root_directories()
50 self.create_template_languages()
51 self.create_default_languages()
52 if create_projects:
53 self.create_terminology_project()
54 self.create_pootle_permissions()
55 self.create_pootle_permission_sets()
56 if create_projects:
57 self.create_default_projects()
58
59 def create_formats(self):
60 from pootle.core.delegate import formats
61
62 formats.get().initialize()
63
64 def _create_object(self, model_klass, **criteria):
65 instance, created = model_klass.objects.get_or_create(**criteria)
66 if created:
67 logger.debug(
68 "Created %s: '%s'",
69 instance.__class__.__name__, instance)
70 else:
71 logger.debug(
72 "%s already exists - skipping: '%s'",
73 instance.__class__.__name__, instance)
74 return instance, created
75
76 def _create_pootle_user(self, **criteria):
77 user, created = self._create_object(get_user_model(), **criteria)
78 if created:
79 user.set_unusable_password()
80 user.save()
81 return user
82
83 def _create_pootle_permission_set(self, permissions, **criteria):
84 permission_set, created = self._create_object(PermissionSet,
85 **criteria)
86 if created:
87 permission_set.positive_permissions.set(permissions)
88 permission_set.save()
89 return permission_set
90
91 def create_revision(self):
92 Revision.initialize()
93
94 def create_essential_users(self):
95 """Create the 'default' and 'nobody' User instances.
96
97 These users are required for Pootle's permission system.
98 """
99 # The nobody user is used to represent an anonymous user in cases
100 # where we need to associate model information with such a user. An
101 # example is in the permission system: we need a way to store rights
102 # for anonymous users; thus we use the nobody user.
103 criteria = {
104 'username': u"nobody",
105 'full_name': u"any anonymous user",
106 'is_active': True,
107 }
108 self._create_pootle_user(**criteria)
109
110 # The 'default' user represents any valid, non-anonymous user and is
111 # used to associate information any such user. An example is in the
112 # permission system: we need a way to store default rights for users.
113 # We use the 'default' user for this.
114 #
115 # In a future version of Pootle we should think about using Django's
116 # groups to do better permissions handling.
117 criteria = {
118 'username': u"default",
119 'full_name': u"any authenticated user",
120 'is_active': True,
121 }
122 self._create_pootle_user(**criteria)
123
124 def create_pootle_permissions(self):
125 """Create Pootle's directory level permissions."""
126
127 args = {
128 'app_label': "pootle_app",
129 'model': "directory",
130 }
131
132 pootle_content_type = self._create_object(ContentType, **args)[0]
133 pootle_content_type.save()
134
135 # Create the permissions.
136 permissions = [
137 {
138 'name': _("Can access a project"),
139 'codename': "view",
140 },
141 {
142 'name': _("Cannot access a project"),
143 'codename': "hide",
144 },
145 {
146 'name': _("Can make a suggestion for a translation"),
147 'codename': "suggest",
148 },
149 {
150 'name': _("Can submit a translation"),
151 'codename': "translate",
152 },
153 {
154 'name': _("Can review suggestions"),
155 'codename': "review",
156 },
157 {
158 'name': _("Can perform administrative tasks"),
159 'codename': "administrate",
160 },
161 ]
162
163 criteria = {
164 'content_type': pootle_content_type,
165 }
166
167 for permission in permissions:
168 criteria.update(permission)
169 self._create_object(Permission, **criteria)
170
171 def create_pootle_permission_sets(self):
172 """Create the default permission set for the 'nobody' and 'default' users.
173
174 'nobody' is the anonymous (non-logged in) user, and 'default' is the
175 logged in user.
176 """
177 User = get_user_model()
178
179 nobody = User.objects.get(username='nobody')
180 default = User.objects.get(username='default')
181
182 view = get_pootle_permission('view')
183 suggest = get_pootle_permission('suggest')
184 translate = get_pootle_permission('translate')
185
186 # Default permissions for tree root.
187 criteria = {
188 'user': nobody,
189 'directory': Directory.objects.root,
190 }
191 self._create_pootle_permission_set([view, suggest], **criteria)
192
193 criteria['user'] = default
194 self._create_pootle_permission_set(
195 [view, suggest, translate], **criteria)
196
197 # Default permissions for templates language.
198 # Override with no permissions for templates language.
199 criteria = {
200 'user': nobody,
201 'directory': Directory.objects.get(pootle_path="/templates/"),
202 }
203 self._create_pootle_permission_set([], **criteria)
204
205 criteria['user'] = default
206 self._create_pootle_permission_set([], **criteria)
207
208 def require_english(self):
209 """Create the English Language item."""
210 criteria = {
211 'code': "en",
212 'fullname': u"English",
213 'nplurals': 2,
214 'pluralequation': "(n != 1)",
215 }
216 en = self._create_object(Language, **criteria)[0]
217 return en
218
219 def create_root_directories(self):
220 """Create the root Directory items."""
221 root = self._create_object(Directory, **dict(name=""))[0]
222 self._create_object(Directory, **dict(name="projects", parent=root))
223
224 def create_template_languages(self):
225 """Create the 'templates' and English languages.
226
227 The 'templates' language is used to give users access to the
228 untranslated template files.
229 """
230 self._create_object(
231 Language, **dict(code="templates", fullname="Templates"))
232 self.require_english()
233
234 def create_terminology_project(self):
235 """Create the terminology project.
236
237 The terminology project is used to display terminology suggestions
238 while translating.
239 """
240 criteria = {
241 'code': "terminology",
242 'fullname': u"Terminology",
243 'source_language': self.require_english(),
244 'checkstyle': "terminology",
245 }
246 po = Format.objects.get(name="po")
247 terminology = self._create_object(Project, **criteria)[0]
248 terminology.filetypes.add(po)
249 terminology.config["pootle_fs.fs_url"] = os.path.join(
250 settings.POOTLE_TRANSLATION_DIRECTORY,
251 terminology.code)
252 terminology.config["pootle_fs.fs_type"] = "localfs"
253 terminology.config["pootle_fs.translation_mappings"] = dict(
254 default="/<language_code>/<dir_path>/<filename>.<ext>")
255 plugin = FSPlugin(terminology)
256 plugin.fetch()
257 plugin.add()
258 plugin.sync()
259
260 def create_default_projects(self):
261 """Create the default projects that we host.
262
263 You might want to add your projects here, although you can also add
264 things through the web interface later.
265 """
266 en = self.require_english()
267 po = Format.objects.get(name="po")
268
269 criteria = {
270 'code': u"tutorial",
271 'source_language': en,
272 'fullname': u"Tutorial",
273 'checkstyle': "standard"}
274 tutorial = self._create_object(Project, **criteria)[0]
275 tutorial.filetypes.add(po)
276 tutorial.config["pootle_fs.fs_url"] = os.path.join(
277 settings.POOTLE_TRANSLATION_DIRECTORY,
278 tutorial.code)
279 tutorial.config["pootle_fs.fs_type"] = "localfs"
280 tutorial.config["pootle_fs.translation_mappings"] = dict(
281 default="/<language_code>/<dir_path>/<filename>.<ext>")
282 plugin = FSPlugin(tutorial)
283 plugin.fetch()
284 plugin.add()
285 plugin.sync()
286 criteria = {
287 'active': True,
288 'title': "Project instructions",
289 'body': (
290 'Tutorial project where users can play with Pootle and learn '
291 'more about translation and localisation.\n'
292 '\n'
293 'For more help on localisation, visit the [localization '
294 'guide](http://docs.translatehouse.org/projects/'
295 'localization-guide/en/latest/guide/start.html).'),
296 'virtual_path': "announcements/projects/"+tutorial.code,
297 }
298 self._create_object(Announcement, **criteria)
299
300 def create_default_languages(self):
301 """Create the default languages."""
302 from translate.lang import data, factory
303
304 # import languages from toolkit
305 for code in data.languages.keys():
306 ttk_lang = factory.getlanguage(code)
307 criteria = {
308 'code': code,
309 'fullname': ttk_lang.fullname,
310 'nplurals': ttk_lang.nplurals,
311 'pluralequation': ttk_lang.pluralequation}
312 if hasattr(ttk_lang, "specialchars"):
313 criteria['specialchars'] = ttk_lang.specialchars
314 self._create_object(Language, **criteria)
315
[end of pootle/core/initdb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/core/initdb.py b/pootle/core/initdb.py
--- a/pootle/core/initdb.py
+++ b/pootle/core/initdb.py
@@ -7,9 +7,7 @@
# AUTHORS file for copyright and authorship information.
import logging
-import os
-from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
@@ -246,9 +244,9 @@
po = Format.objects.get(name="po")
terminology = self._create_object(Project, **criteria)[0]
terminology.filetypes.add(po)
- terminology.config["pootle_fs.fs_url"] = os.path.join(
- settings.POOTLE_TRANSLATION_DIRECTORY,
- terminology.code)
+ terminology.config["pootle_fs.fs_url"] = (
+ "{POOTLE_TRANSLATION_DIRECTORY}%s"
+ % terminology.code)
terminology.config["pootle_fs.fs_type"] = "localfs"
terminology.config["pootle_fs.translation_mappings"] = dict(
default="/<language_code>/<dir_path>/<filename>.<ext>")
@@ -273,9 +271,9 @@
'checkstyle': "standard"}
tutorial = self._create_object(Project, **criteria)[0]
tutorial.filetypes.add(po)
- tutorial.config["pootle_fs.fs_url"] = os.path.join(
- settings.POOTLE_TRANSLATION_DIRECTORY,
- tutorial.code)
+ tutorial.config["pootle_fs.fs_url"] = (
+ "{POOTLE_TRANSLATION_DIRECTORY}%s"
+ % tutorial.code)
tutorial.config["pootle_fs.fs_type"] = "localfs"
tutorial.config["pootle_fs.translation_mappings"] = dict(
default="/<language_code>/<dir_path>/<filename>.<ext>")
| {"golden_diff": "diff --git a/pootle/core/initdb.py b/pootle/core/initdb.py\n--- a/pootle/core/initdb.py\n+++ b/pootle/core/initdb.py\n@@ -7,9 +7,7 @@\n # AUTHORS file for copyright and authorship information.\n \n import logging\n-import os\n \n-from django.conf import settings\n from django.contrib.auth import get_user_model\n from django.contrib.auth.models import Permission\n from django.contrib.contenttypes.models import ContentType\n@@ -246,9 +244,9 @@\n po = Format.objects.get(name=\"po\")\n terminology = self._create_object(Project, **criteria)[0]\n terminology.filetypes.add(po)\n- terminology.config[\"pootle_fs.fs_url\"] = os.path.join(\n- settings.POOTLE_TRANSLATION_DIRECTORY,\n- terminology.code)\n+ terminology.config[\"pootle_fs.fs_url\"] = (\n+ \"{POOTLE_TRANSLATION_DIRECTORY}%s\"\n+ % terminology.code)\n terminology.config[\"pootle_fs.fs_type\"] = \"localfs\"\n terminology.config[\"pootle_fs.translation_mappings\"] = dict(\n default=\"/<language_code>/<dir_path>/<filename>.<ext>\")\n@@ -273,9 +271,9 @@\n 'checkstyle': \"standard\"}\n tutorial = self._create_object(Project, **criteria)[0]\n tutorial.filetypes.add(po)\n- tutorial.config[\"pootle_fs.fs_url\"] = os.path.join(\n- settings.POOTLE_TRANSLATION_DIRECTORY,\n- tutorial.code)\n+ tutorial.config[\"pootle_fs.fs_url\"] = (\n+ \"{POOTLE_TRANSLATION_DIRECTORY}%s\"\n+ % tutorial.code)\n tutorial.config[\"pootle_fs.fs_type\"] = \"localfs\"\n tutorial.config[\"pootle_fs.translation_mappings\"] = dict(\n default=\"/<language_code>/<dir_path>/<filename>.<ext>\")\n", "issue": "Adjust initdb to use {POOTLE_TRANSLATION_DIRECTORY} placeholder\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport os\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import transaction\nfrom django.utils.translation import ugettext_noop as _\n\nfrom pootle.core.contextmanagers import keep_data\nfrom pootle.core.models import Revision\nfrom pootle.core.signals import update_revisions\nfrom pootle_app.models import Directory\nfrom pootle_app.models.permissions import PermissionSet, get_pootle_permission\nfrom pootle_format.models import Format\nfrom pootle_fs.utils import FSPlugin\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom staticpages.models import StaticPage as Announcement\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass InitDB(object):\n\n def init_db(self, create_projects=True):\n with transaction.atomic():\n with keep_data(signals=(update_revisions, )):\n self._init_db(create_projects)\n\n def _init_db(self, create_projects=True):\n \"\"\"Populate the database with default initial data.\n\n This creates the default database to get a working Pootle installation.\n \"\"\"\n self.create_formats()\n self.create_revision()\n self.create_essential_users()\n self.create_root_directories()\n self.create_template_languages()\n self.create_default_languages()\n if create_projects:\n self.create_terminology_project()\n self.create_pootle_permissions()\n self.create_pootle_permission_sets()\n if create_projects:\n self.create_default_projects()\n\n def create_formats(self):\n from pootle.core.delegate import formats\n\n formats.get().initialize()\n\n def _create_object(self, model_klass, **criteria):\n instance, created = model_klass.objects.get_or_create(**criteria)\n if created:\n logger.debug(\n \"Created %s: '%s'\",\n instance.__class__.__name__, instance)\n else:\n logger.debug(\n \"%s already exists - skipping: '%s'\",\n instance.__class__.__name__, instance)\n return instance, created\n\n def _create_pootle_user(self, **criteria):\n user, created = self._create_object(get_user_model(), **criteria)\n if created:\n user.set_unusable_password()\n user.save()\n return user\n\n def _create_pootle_permission_set(self, permissions, **criteria):\n permission_set, created = self._create_object(PermissionSet,\n **criteria)\n if created:\n permission_set.positive_permissions.set(permissions)\n permission_set.save()\n return permission_set\n\n def create_revision(self):\n Revision.initialize()\n\n def create_essential_users(self):\n \"\"\"Create the 'default' and 'nobody' User instances.\n\n These users are required for Pootle's permission system.\n \"\"\"\n # The nobody user is used to represent an anonymous user in cases\n # where we need to associate model information with such a user. An\n # example is in the permission system: we need a way to store rights\n # for anonymous users; thus we use the nobody user.\n criteria = {\n 'username': u\"nobody\",\n 'full_name': u\"any anonymous user\",\n 'is_active': True,\n }\n self._create_pootle_user(**criteria)\n\n # The 'default' user represents any valid, non-anonymous user and is\n # used to associate information any such user. An example is in the\n # permission system: we need a way to store default rights for users.\n # We use the 'default' user for this.\n #\n # In a future version of Pootle we should think about using Django's\n # groups to do better permissions handling.\n criteria = {\n 'username': u\"default\",\n 'full_name': u\"any authenticated user\",\n 'is_active': True,\n }\n self._create_pootle_user(**criteria)\n\n def create_pootle_permissions(self):\n \"\"\"Create Pootle's directory level permissions.\"\"\"\n\n args = {\n 'app_label': \"pootle_app\",\n 'model': \"directory\",\n }\n\n pootle_content_type = self._create_object(ContentType, **args)[0]\n pootle_content_type.save()\n\n # Create the permissions.\n permissions = [\n {\n 'name': _(\"Can access a project\"),\n 'codename': \"view\",\n },\n {\n 'name': _(\"Cannot access a project\"),\n 'codename': \"hide\",\n },\n {\n 'name': _(\"Can make a suggestion for a translation\"),\n 'codename': \"suggest\",\n },\n {\n 'name': _(\"Can submit a translation\"),\n 'codename': \"translate\",\n },\n {\n 'name': _(\"Can review suggestions\"),\n 'codename': \"review\",\n },\n {\n 'name': _(\"Can perform administrative tasks\"),\n 'codename': \"administrate\",\n },\n ]\n\n criteria = {\n 'content_type': pootle_content_type,\n }\n\n for permission in permissions:\n criteria.update(permission)\n self._create_object(Permission, **criteria)\n\n def create_pootle_permission_sets(self):\n \"\"\"Create the default permission set for the 'nobody' and 'default' users.\n\n 'nobody' is the anonymous (non-logged in) user, and 'default' is the\n logged in user.\n \"\"\"\n User = get_user_model()\n\n nobody = User.objects.get(username='nobody')\n default = User.objects.get(username='default')\n\n view = get_pootle_permission('view')\n suggest = get_pootle_permission('suggest')\n translate = get_pootle_permission('translate')\n\n # Default permissions for tree root.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.root,\n }\n self._create_pootle_permission_set([view, suggest], **criteria)\n\n criteria['user'] = default\n self._create_pootle_permission_set(\n [view, suggest, translate], **criteria)\n\n # Default permissions for templates language.\n # Override with no permissions for templates language.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.get(pootle_path=\"/templates/\"),\n }\n self._create_pootle_permission_set([], **criteria)\n\n criteria['user'] = default\n self._create_pootle_permission_set([], **criteria)\n\n def require_english(self):\n \"\"\"Create the English Language item.\"\"\"\n criteria = {\n 'code': \"en\",\n 'fullname': u\"English\",\n 'nplurals': 2,\n 'pluralequation': \"(n != 1)\",\n }\n en = self._create_object(Language, **criteria)[0]\n return en\n\n def create_root_directories(self):\n \"\"\"Create the root Directory items.\"\"\"\n root = self._create_object(Directory, **dict(name=\"\"))[0]\n self._create_object(Directory, **dict(name=\"projects\", parent=root))\n\n def create_template_languages(self):\n \"\"\"Create the 'templates' and English languages.\n\n The 'templates' language is used to give users access to the\n untranslated template files.\n \"\"\"\n self._create_object(\n Language, **dict(code=\"templates\", fullname=\"Templates\"))\n self.require_english()\n\n def create_terminology_project(self):\n \"\"\"Create the terminology project.\n\n The terminology project is used to display terminology suggestions\n while translating.\n \"\"\"\n criteria = {\n 'code': \"terminology\",\n 'fullname': u\"Terminology\",\n 'source_language': self.require_english(),\n 'checkstyle': \"terminology\",\n }\n po = Format.objects.get(name=\"po\")\n terminology = self._create_object(Project, **criteria)[0]\n terminology.filetypes.add(po)\n terminology.config[\"pootle_fs.fs_url\"] = os.path.join(\n settings.POOTLE_TRANSLATION_DIRECTORY,\n terminology.code)\n terminology.config[\"pootle_fs.fs_type\"] = \"localfs\"\n terminology.config[\"pootle_fs.translation_mappings\"] = dict(\n default=\"/<language_code>/<dir_path>/<filename>.<ext>\")\n plugin = FSPlugin(terminology)\n plugin.fetch()\n plugin.add()\n plugin.sync()\n\n def create_default_projects(self):\n \"\"\"Create the default projects that we host.\n\n You might want to add your projects here, although you can also add\n things through the web interface later.\n \"\"\"\n en = self.require_english()\n po = Format.objects.get(name=\"po\")\n\n criteria = {\n 'code': u\"tutorial\",\n 'source_language': en,\n 'fullname': u\"Tutorial\",\n 'checkstyle': \"standard\"}\n tutorial = self._create_object(Project, **criteria)[0]\n tutorial.filetypes.add(po)\n tutorial.config[\"pootle_fs.fs_url\"] = os.path.join(\n settings.POOTLE_TRANSLATION_DIRECTORY,\n tutorial.code)\n tutorial.config[\"pootle_fs.fs_type\"] = \"localfs\"\n tutorial.config[\"pootle_fs.translation_mappings\"] = dict(\n default=\"/<language_code>/<dir_path>/<filename>.<ext>\")\n plugin = FSPlugin(tutorial)\n plugin.fetch()\n plugin.add()\n plugin.sync()\n criteria = {\n 'active': True,\n 'title': \"Project instructions\",\n 'body': (\n 'Tutorial project where users can play with Pootle and learn '\n 'more about translation and localisation.\\n'\n '\\n'\n 'For more help on localisation, visit the [localization '\n 'guide](http://docs.translatehouse.org/projects/'\n 'localization-guide/en/latest/guide/start.html).'),\n 'virtual_path': \"announcements/projects/\"+tutorial.code,\n }\n self._create_object(Announcement, **criteria)\n\n def create_default_languages(self):\n \"\"\"Create the default languages.\"\"\"\n from translate.lang import data, factory\n\n # import languages from toolkit\n for code in data.languages.keys():\n ttk_lang = factory.getlanguage(code)\n criteria = {\n 'code': code,\n 'fullname': ttk_lang.fullname,\n 'nplurals': ttk_lang.nplurals,\n 'pluralequation': ttk_lang.pluralequation}\n if hasattr(ttk_lang, \"specialchars\"):\n criteria['specialchars'] = ttk_lang.specialchars\n self._create_object(Language, **criteria)\n", "path": "pootle/core/initdb.py"}]} | 3,714 | 405 |
gh_patches_debug_35334 | rasdani/github-patches | git_diff | fedora-infra__bodhi-423 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
enhance release page with moar links
Like this page https://bodhi.fedoraproject.org/releases/F23
Let's extend the table on the left to include some more stats:
- number of updates in stable
- number of updates in testing
- number of security updates
- number of bugfix updates
etc.. every type, status, and request.. let's put them there.
_importantly_, let's make each one of those entries _also_ a link to a page that shows you the _list_ of each of those kinds of updates, so people don't have to manually frob the querystring to find what they want.
</issue>
<code>
[start of bodhi/services/releases.py]
1 # This program is free software; you can redistribute it and/or
2 # modify it under the terms of the GNU General Public License
3 # as published by the Free Software Foundation; either version 2
4 # of the License, or (at your option) any later version.
5 #
6 # This program is distributed in the hope that it will be useful,
7 # but WITHOUT ANY WARRANTY; without even the implied warranty of
8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 # GNU General Public License for more details.
10 #
11 # You should have received a copy of the GNU General Public License
12 # along with this program; if not, write to the Free Software
13 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14
15 import math
16
17 from cornice import Service
18 from pyramid.exceptions import HTTPNotFound
19 from sqlalchemy import func, distinct
20 from sqlalchemy.sql import or_
21
22 from bodhi import log
23 from bodhi.models import Update, Build, Package, Release
24 import bodhi.schemas
25 import bodhi.security
26 from bodhi.validators import (
27 validate_tags,
28 validate_enums,
29 validate_updates,
30 validate_packages,
31 validate_release,
32 )
33
34
35 release = Service(name='release', path='/releases/{name}',
36 description='Fedora Releases',
37 cors_origins=bodhi.security.cors_origins_ro)
38 releases = Service(name='releases', path='/releases/',
39 description='Fedora Releases',
40 # Note, this 'rw' is not a typo. the @comments service has
41 # a ``post`` section at the bottom.
42 cors_origins=bodhi.security.cors_origins_rw)
43
44 @release.get(accept="text/html", renderer="release.html")
45 def get_release_html(request):
46 id = request.matchdict.get('name')
47 release = Release.get(id, request.db)
48 if not release:
49 request.errors.add('body', 'name', 'No such release')
50 request.errors.status = HTTPNotFound.code
51 updates = request.db.query(Update).filter(
52 Update.release==release).order_by(
53 Update.date_submitted.desc())
54
55 updates_count = request.db.query(Update.date_submitted, Update.type).filter(
56 Update.release==release).order_by(
57 Update.date_submitted.desc())
58
59 date_commits = {}
60 dates = set()
61
62 for update in updates_count.all():
63 d = update.date_submitted
64 yearmonth = str(d.year) + '/' + str(d.month).zfill(2)
65 dates.add(yearmonth)
66 if not update.type.description in date_commits:
67 date_commits[update.type.description] = {}
68 if yearmonth in date_commits[update.type.description]:
69 date_commits[update.type.description][yearmonth] += 1
70 else:
71 date_commits[update.type.description][yearmonth] = 0
72
73 return dict(release=release,
74 latest_updates=updates.limit(25).all(),
75 count=updates.count(),
76 date_commits=date_commits,
77 dates = sorted(dates))
78
79 @release.get(accept=('application/json', 'text/json'), renderer='json')
80 @release.get(accept=('application/javascript'), renderer='jsonp')
81 def get_release_json(request):
82 id = request.matchdict.get('name')
83 release = Release.get(id, request.db)
84 if not release:
85 request.errors.add('body', 'name', 'No such release')
86 request.errors.status = HTTPNotFound.code
87 return release
88
89 @releases.get(accept="text/html", schema=bodhi.schemas.ListReleaseSchema,
90 renderer='releases.html',
91 validators=(validate_release, validate_updates,
92 validate_packages))
93 def query_releases_html(request):
94 def collect_releases(releases):
95 x = {}
96 for r in releases:
97 if r['state'] in x:
98 x[r['state']].append(r)
99 else:
100 x[r['state']] = [r]
101 return x
102
103 db = request.db
104 releases = db.query(Release).order_by(Release.id.desc()).all()
105 return dict(releases=collect_releases(releases))
106
107 @releases.get(accept=('application/json', 'text/json'),
108 schema=bodhi.schemas.ListReleaseSchema, renderer='json',
109 validators=(validate_release, validate_updates,
110 validate_packages))
111 def query_releases_json(request):
112 db = request.db
113 data = request.validated
114 query = db.query(Release)
115
116 name = data.get('name')
117 if name is not None:
118 query = query.filter(Release.name.like(name))
119
120 updates = data.get('updates')
121 if updates is not None:
122 query = query.join(Release.builds).join(Build.update)
123 args = \
124 [Update.title == update.title for update in updates] +\
125 [Update.alias == update.alias for update in updates]
126 query = query.filter(or_(*args))
127
128 packages = data.get('packages')
129 if packages is not None:
130 query = query.join(Release.builds).join(Build.package)
131 query = query.filter(or_(*[Package.id == p.id for p in packages]))
132
133 # We can't use ``query.count()`` here because it is naive with respect to
134 # all the joins that we're doing above.
135 count_query = query.with_labels().statement\
136 .with_only_columns([func.count(distinct(Release.id))])\
137 .order_by(None)
138 total = db.execute(count_query).scalar()
139
140 page = data.get('page')
141 rows_per_page = data.get('rows_per_page')
142 pages = int(math.ceil(total / float(rows_per_page)))
143 query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)
144
145 return dict(
146 releases=query.all(),
147 page=page,
148 pages=pages,
149 rows_per_page=rows_per_page,
150 total=total,
151 )
152
153 @releases.post(schema=bodhi.schemas.SaveReleaseSchema,
154 acl=bodhi.security.admin_only_acl, renderer='json',
155 validators=(validate_tags, validate_enums)
156 )
157 def save_release(request):
158 """Save a release
159
160 This entails either creating a new release, or editing an existing one. To
161 edit an existing release, the release's original name must be specified in
162 the ``edited`` parameter.
163 """
164 data = request.validated
165
166 edited = data.pop("edited", None)
167
168 # This has already been validated at this point, but we need to ditch
169 # it since the models don't care about a csrf argument.
170 data.pop('csrf_token')
171
172 try:
173 if edited is None:
174 log.info("Creating a new release: %s" % data['name'])
175 r = Release(**data)
176
177 else:
178 log.info("Editing release: %s" % edited)
179 r = request.db.query(Release).filter(Release.name==edited).one()
180 for k, v in data.items():
181 setattr(r, k, v)
182
183 except Exception as e:
184 log.exception(e)
185 request.errors.add('body', 'release',
186 'Unable to create update: %s' % e)
187 return
188
189
190 request.db.add(r)
191 request.db.flush()
192
193 return r
194
[end of bodhi/services/releases.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bodhi/services/releases.py b/bodhi/services/releases.py
--- a/bodhi/services/releases.py
+++ b/bodhi/services/releases.py
@@ -20,7 +20,14 @@
from sqlalchemy.sql import or_
from bodhi import log
-from bodhi.models import Update, Build, Package, Release
+from bodhi.models import (
+ Update,
+ UpdateStatus,
+ UpdateType,
+ Build,
+ Package,
+ Release,
+)
import bodhi.schemas
import bodhi.security
from bodhi.validators import (
@@ -70,11 +77,46 @@
else:
date_commits[update.type.description][yearmonth] = 0
+ base_count_query = request.db.query(Update)\
+ .filter(Update.release==release)
+
+ num_updates_pending = base_count_query\
+ .filter(Update.status==UpdateStatus.pending).count()
+ num_updates_testing = base_count_query\
+ .filter(Update.status==UpdateStatus.testing).count()
+ num_updates_stable = base_count_query\
+ .filter(Update.status==UpdateStatus.stable).count()
+ num_updates_unpushed = base_count_query\
+ .filter(Update.status==UpdateStatus.unpushed).count()
+ num_updates_obsolete = base_count_query\
+ .filter(Update.status==UpdateStatus.obsolete).count()
+
+ num_updates_security = base_count_query\
+ .filter(Update.type==UpdateType.security).count()
+ num_updates_bugfix = base_count_query\
+ .filter(Update.type==UpdateType.bugfix).count()
+ num_updates_enhancement = base_count_query\
+ .filter(Update.type==UpdateType.enhancement).count()
+ num_updates_newpackage = base_count_query\
+ .filter(Update.type==UpdateType.newpackage).count()
+
return dict(release=release,
latest_updates=updates.limit(25).all(),
count=updates.count(),
date_commits=date_commits,
- dates = sorted(dates))
+ dates=sorted(dates),
+
+ num_updates_pending=num_updates_pending,
+ num_updates_testing=num_updates_testing,
+ num_updates_stable=num_updates_stable,
+ num_updates_unpushed=num_updates_unpushed,
+ num_updates_obsolete=num_updates_obsolete,
+
+ num_updates_security=num_updates_security,
+ num_updates_bugfix=num_updates_bugfix,
+ num_updates_enhancement=num_updates_enhancement,
+ num_updates_newpackage=num_updates_newpackage,
+ )
@release.get(accept=('application/json', 'text/json'), renderer='json')
@release.get(accept=('application/javascript'), renderer='jsonp')
| {"golden_diff": "diff --git a/bodhi/services/releases.py b/bodhi/services/releases.py\n--- a/bodhi/services/releases.py\n+++ b/bodhi/services/releases.py\n@@ -20,7 +20,14 @@\n from sqlalchemy.sql import or_\n \n from bodhi import log\n-from bodhi.models import Update, Build, Package, Release\n+from bodhi.models import (\n+ Update,\n+ UpdateStatus,\n+ UpdateType,\n+ Build,\n+ Package,\n+ Release,\n+)\n import bodhi.schemas\n import bodhi.security\n from bodhi.validators import (\n@@ -70,11 +77,46 @@\n else:\n date_commits[update.type.description][yearmonth] = 0\n \n+ base_count_query = request.db.query(Update)\\\n+ .filter(Update.release==release)\n+\n+ num_updates_pending = base_count_query\\\n+ .filter(Update.status==UpdateStatus.pending).count()\n+ num_updates_testing = base_count_query\\\n+ .filter(Update.status==UpdateStatus.testing).count()\n+ num_updates_stable = base_count_query\\\n+ .filter(Update.status==UpdateStatus.stable).count()\n+ num_updates_unpushed = base_count_query\\\n+ .filter(Update.status==UpdateStatus.unpushed).count()\n+ num_updates_obsolete = base_count_query\\\n+ .filter(Update.status==UpdateStatus.obsolete).count()\n+\n+ num_updates_security = base_count_query\\\n+ .filter(Update.type==UpdateType.security).count()\n+ num_updates_bugfix = base_count_query\\\n+ .filter(Update.type==UpdateType.bugfix).count()\n+ num_updates_enhancement = base_count_query\\\n+ .filter(Update.type==UpdateType.enhancement).count()\n+ num_updates_newpackage = base_count_query\\\n+ .filter(Update.type==UpdateType.newpackage).count()\n+\n return dict(release=release,\n latest_updates=updates.limit(25).all(),\n count=updates.count(),\n date_commits=date_commits,\n- dates = sorted(dates))\n+ dates=sorted(dates),\n+\n+ num_updates_pending=num_updates_pending,\n+ num_updates_testing=num_updates_testing,\n+ num_updates_stable=num_updates_stable,\n+ num_updates_unpushed=num_updates_unpushed,\n+ num_updates_obsolete=num_updates_obsolete,\n+\n+ num_updates_security=num_updates_security,\n+ num_updates_bugfix=num_updates_bugfix,\n+ num_updates_enhancement=num_updates_enhancement,\n+ num_updates_newpackage=num_updates_newpackage,\n+ )\n \n @release.get(accept=('application/json', 'text/json'), renderer='json')\n @release.get(accept=('application/javascript'), renderer='jsonp')\n", "issue": "enhance release page with moar links\nLike this page https://bodhi.fedoraproject.org/releases/F23\n\nLet's extend the table on the left to include some more stats:\n- number of updates in stable\n- number of updates in testing\n- number of security updates\n- number of bugfix updates\n\netc.. every type, status, and request.. let's put them there.\n\n_importantly_, let's make each one of those entries _also_ a link to a page that shows you the _list_ of each of those kinds of updates, so people don't have to manually frob the querystring to find what they want.\n\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nimport math\n\nfrom cornice import Service\nfrom pyramid.exceptions import HTTPNotFound\nfrom sqlalchemy import func, distinct\nfrom sqlalchemy.sql import or_\n\nfrom bodhi import log\nfrom bodhi.models import Update, Build, Package, Release\nimport bodhi.schemas\nimport bodhi.security\nfrom bodhi.validators import (\n validate_tags,\n validate_enums,\n validate_updates,\n validate_packages,\n validate_release,\n)\n\n\nrelease = Service(name='release', path='/releases/{name}',\n description='Fedora Releases',\n cors_origins=bodhi.security.cors_origins_ro)\nreleases = Service(name='releases', path='/releases/',\n description='Fedora Releases',\n # Note, this 'rw' is not a typo. the @comments service has\n # a ``post`` section at the bottom.\n cors_origins=bodhi.security.cors_origins_rw)\n\[email protected](accept=\"text/html\", renderer=\"release.html\")\ndef get_release_html(request):\n id = request.matchdict.get('name')\n release = Release.get(id, request.db)\n if not release:\n request.errors.add('body', 'name', 'No such release')\n request.errors.status = HTTPNotFound.code\n updates = request.db.query(Update).filter(\n Update.release==release).order_by(\n Update.date_submitted.desc())\n\n updates_count = request.db.query(Update.date_submitted, Update.type).filter(\n Update.release==release).order_by(\n Update.date_submitted.desc())\n\n date_commits = {}\n dates = set()\n\n for update in updates_count.all():\n d = update.date_submitted\n yearmonth = str(d.year) + '/' + str(d.month).zfill(2)\n dates.add(yearmonth)\n if not update.type.description in date_commits:\n date_commits[update.type.description] = {}\n if yearmonth in date_commits[update.type.description]:\n date_commits[update.type.description][yearmonth] += 1\n else:\n date_commits[update.type.description][yearmonth] = 0\n\n return dict(release=release,\n latest_updates=updates.limit(25).all(),\n count=updates.count(),\n date_commits=date_commits,\n dates = sorted(dates))\n\[email protected](accept=('application/json', 'text/json'), renderer='json')\[email protected](accept=('application/javascript'), renderer='jsonp')\ndef get_release_json(request):\n id = request.matchdict.get('name')\n release = Release.get(id, request.db)\n if not release:\n request.errors.add('body', 'name', 'No such release')\n request.errors.status = HTTPNotFound.code\n return release\n\[email protected](accept=\"text/html\", schema=bodhi.schemas.ListReleaseSchema,\n renderer='releases.html',\n validators=(validate_release, validate_updates,\n validate_packages))\ndef query_releases_html(request):\n def collect_releases(releases):\n x = {}\n for r in releases:\n if r['state'] in x:\n x[r['state']].append(r)\n else:\n x[r['state']] = [r]\n return x\n\n db = request.db\n releases = db.query(Release).order_by(Release.id.desc()).all()\n return dict(releases=collect_releases(releases))\n\[email protected](accept=('application/json', 'text/json'),\n schema=bodhi.schemas.ListReleaseSchema, renderer='json',\n validators=(validate_release, validate_updates,\n validate_packages))\ndef query_releases_json(request):\n db = request.db\n data = request.validated\n query = db.query(Release)\n\n name = data.get('name')\n if name is not None:\n query = query.filter(Release.name.like(name))\n\n updates = data.get('updates')\n if updates is not None:\n query = query.join(Release.builds).join(Build.update)\n args = \\\n [Update.title == update.title for update in updates] +\\\n [Update.alias == update.alias for update in updates]\n query = query.filter(or_(*args))\n\n packages = data.get('packages')\n if packages is not None:\n query = query.join(Release.builds).join(Build.package)\n query = query.filter(or_(*[Package.id == p.id for p in packages]))\n\n # We can't use ``query.count()`` here because it is naive with respect to\n # all the joins that we're doing above.\n count_query = query.with_labels().statement\\\n .with_only_columns([func.count(distinct(Release.id))])\\\n .order_by(None)\n total = db.execute(count_query).scalar()\n\n page = data.get('page')\n rows_per_page = data.get('rows_per_page')\n pages = int(math.ceil(total / float(rows_per_page)))\n query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)\n\n return dict(\n releases=query.all(),\n page=page,\n pages=pages,\n rows_per_page=rows_per_page,\n total=total,\n )\n\[email protected](schema=bodhi.schemas.SaveReleaseSchema,\n acl=bodhi.security.admin_only_acl, renderer='json',\n validators=(validate_tags, validate_enums)\n )\ndef save_release(request):\n \"\"\"Save a release\n\n This entails either creating a new release, or editing an existing one. To\n edit an existing release, the release's original name must be specified in\n the ``edited`` parameter.\n \"\"\"\n data = request.validated\n\n edited = data.pop(\"edited\", None)\n\n # This has already been validated at this point, but we need to ditch\n # it since the models don't care about a csrf argument.\n data.pop('csrf_token')\n\n try:\n if edited is None:\n log.info(\"Creating a new release: %s\" % data['name'])\n r = Release(**data)\n\n else:\n log.info(\"Editing release: %s\" % edited)\n r = request.db.query(Release).filter(Release.name==edited).one()\n for k, v in data.items():\n setattr(r, k, v)\n\n except Exception as e:\n log.exception(e)\n request.errors.add('body', 'release',\n 'Unable to create update: %s' % e)\n return\n\n\n request.db.add(r)\n request.db.flush()\n\n return r\n", "path": "bodhi/services/releases.py"}]} | 2,707 | 601 |
gh_patches_debug_55627 | rasdani/github-patches | git_diff | xonsh__xonsh-3527 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Weird completion issue
<!--- Provide a general summary of the issue in the Title above -->
<!--- If you have a question along the lines of "How do I do this Bash command in xonsh"
please first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html
If you don't find an answer there, please do open an issue! -->
## xonfig
<details>
```
$ xonfig
+------------------+-----------------+
| xonsh | 0.9.12 |
| Python | 3.7.4 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 2.0.9 |
| shell type | prompt_toolkit2 |
| pygments | 2.4.2 |
| on posix | True |
| on linux | False |
| on darwin | True |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | False |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
+------------------+-----------------+
```
</details>
## Expected Behavior
<!--- Tell us what should happen -->
Tab completion behind shell command `vim` should work
## Current Behavior
<!--- Tell us what happens instead of the expected behavior -->
<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error
To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.
On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->
existing file is not being found by completion (see screenshot).
As you can see in the lower part of the screenshot, the file `pip_packages_to_install.txt` exists in the current folder but isn't found when used behind the shell command `vim` (but does work behind `cat`).
Is this maybe created by interfering completions installed elsewhere? Maybe some vim completions from homebrew?
<img width="822" alt="Screenshot 2019-10-31 14 11 02" src="https://user-images.githubusercontent.com/69774/67982582-99090380-fbe8-11e9-839a-b6fd0536a3ed.png">
</issue>
<code>
[start of xonsh/completers/pip.py]
1 """Completers for pip."""
2 # pylint: disable=invalid-name, missing-docstring, unsupported-membership-test
3 # pylint: disable=unused-argument, not-an-iterable
4 import re
5 import subprocess
6
7 import xonsh.lazyasd as xl
8
9
10 @xl.lazyobject
11 def PIP_RE():
12 return re.compile(r"\bx?pip(?:\d|\.)*")
13
14
15 @xl.lazyobject
16 def PIP_LIST_RE():
17 return re.compile(r"\bx?pip(?:\d|\.)* (?:uninstall|show)")
18
19
20 @xl.lazyobject
21 def ALL_COMMANDS():
22 try:
23 help_text = str(
24 subprocess.check_output(["pip", "--help"], stderr=subprocess.DEVNULL)
25 )
26 except FileNotFoundError:
27 return []
28 commands = re.findall(r" (\w+) ", help_text)
29 return [c for c in commands if c not in ["completion", "help"]]
30
31
32 def complete_pip(prefix, line, begidx, endidx, ctx):
33 """Completes python's package manager pip"""
34 line_len = len(line.split())
35 if (
36 (line_len > 3)
37 or (line_len > 2 and line.endswith(" "))
38 or (not PIP_RE.search(line))
39 ):
40 return
41 if PIP_LIST_RE.search(line):
42 try:
43 items = subprocess.check_output(["pip", "list"], stderr=subprocess.DEVNULL)
44 except FileNotFoundError:
45 return set()
46 items = items.decode("utf-8").splitlines()
47 return set(i.split()[0] for i in items if i.split()[0].startswith(prefix))
48
49 if (line_len > 1 and line.endswith(" ")) or line_len > 2:
50 # "pip show " -> no complete (note space)
51 return
52 if prefix not in ALL_COMMANDS:
53 suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)]
54 if suggestions:
55 return suggestions, len(prefix)
56 return ALL_COMMANDS, len(prefix)
57
[end of xonsh/completers/pip.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/xonsh/completers/pip.py b/xonsh/completers/pip.py
--- a/xonsh/completers/pip.py
+++ b/xonsh/completers/pip.py
@@ -9,12 +9,12 @@
@xl.lazyobject
def PIP_RE():
- return re.compile(r"\bx?pip(?:\d|\.)*")
+ return re.compile(r"\bx?pip(?:\d|\.)*\b")
@xl.lazyobject
def PIP_LIST_RE():
- return re.compile(r"\bx?pip(?:\d|\.)* (?:uninstall|show)")
+ return re.compile(r"\bx?pip(?:\d|\.)*\b (?:uninstall|show)")
@xl.lazyobject
| {"golden_diff": "diff --git a/xonsh/completers/pip.py b/xonsh/completers/pip.py\n--- a/xonsh/completers/pip.py\n+++ b/xonsh/completers/pip.py\n@@ -9,12 +9,12 @@\n \n @xl.lazyobject\n def PIP_RE():\n- return re.compile(r\"\\bx?pip(?:\\d|\\.)*\")\n+ return re.compile(r\"\\bx?pip(?:\\d|\\.)*\\b\")\n \n \n @xl.lazyobject\n def PIP_LIST_RE():\n- return re.compile(r\"\\bx?pip(?:\\d|\\.)* (?:uninstall|show)\")\n+ return re.compile(r\"\\bx?pip(?:\\d|\\.)*\\b (?:uninstall|show)\")\n \n \n @xl.lazyobject\n", "issue": "Weird completion issue\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- If you have a question along the lines of \"How do I do this Bash command in xonsh\"\r\nplease first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html\r\nIf you don't find an answer there, please do open an issue! -->\r\n\r\n## xonfig\r\n\r\n<details>\r\n\r\n```\r\n$ xonfig\r\n+------------------+-----------------+\r\n| xonsh | 0.9.12 |\r\n| Python | 3.7.4 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | 2.0.9 |\r\n| shell type | prompt_toolkit2 |\r\n| pygments | 2.4.2 |\r\n| on posix | True |\r\n| on linux | False |\r\n| on darwin | True |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n+------------------+-----------------+\r\n```\r\n\r\n</details>\r\n\r\n## Expected Behavior\r\n<!--- Tell us what should happen -->\r\nTab completion behind shell command `vim` should work\r\n\r\n## Current Behavior\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error\r\nTo enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.\r\nOn Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->\r\nexisting file is not being found by completion (see screenshot).\r\nAs you can see in the lower part of the screenshot, the file `pip_packages_to_install.txt` exists in the current folder but isn't found when used behind the shell command `vim` (but does work behind `cat`).\r\nIs this maybe created by interfering completions installed elsewhere? Maybe some vim completions from homebrew?\r\n\r\n\r\n<img width=\"822\" alt=\"Screenshot 2019-10-31 14 11 02\" src=\"https://user-images.githubusercontent.com/69774/67982582-99090380-fbe8-11e9-839a-b6fd0536a3ed.png\">\n", "before_files": [{"content": "\"\"\"Completers for pip.\"\"\"\n# pylint: disable=invalid-name, missing-docstring, unsupported-membership-test\n# pylint: disable=unused-argument, not-an-iterable\nimport re\nimport subprocess\n\nimport xonsh.lazyasd as xl\n\n\[email protected]\ndef PIP_RE():\n return re.compile(r\"\\bx?pip(?:\\d|\\.)*\")\n\n\[email protected]\ndef PIP_LIST_RE():\n return re.compile(r\"\\bx?pip(?:\\d|\\.)* (?:uninstall|show)\")\n\n\[email protected]\ndef ALL_COMMANDS():\n try:\n help_text = str(\n subprocess.check_output([\"pip\", \"--help\"], stderr=subprocess.DEVNULL)\n )\n except FileNotFoundError:\n return []\n commands = re.findall(r\" (\\w+) \", help_text)\n return [c for c in commands if c not in [\"completion\", \"help\"]]\n\n\ndef complete_pip(prefix, line, begidx, endidx, ctx):\n \"\"\"Completes python's package manager pip\"\"\"\n line_len = len(line.split())\n if (\n (line_len > 3)\n or (line_len > 2 and line.endswith(\" \"))\n or (not PIP_RE.search(line))\n ):\n return\n if PIP_LIST_RE.search(line):\n try:\n items = subprocess.check_output([\"pip\", \"list\"], stderr=subprocess.DEVNULL)\n except FileNotFoundError:\n return set()\n items = items.decode(\"utf-8\").splitlines()\n return set(i.split()[0] for i in items if i.split()[0].startswith(prefix))\n\n if (line_len > 1 and line.endswith(\" \")) or line_len > 2:\n # \"pip show \" -> no complete (note space)\n return\n if prefix not in ALL_COMMANDS:\n suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)]\n if suggestions:\n return suggestions, len(prefix)\n return ALL_COMMANDS, len(prefix)\n", "path": "xonsh/completers/pip.py"}]} | 1,648 | 173 |
gh_patches_debug_35217 | rasdani/github-patches | git_diff | fonttools__fonttools-799 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[ttx] Flexible manual page install directory
https://sourceforge.net/p/fonttools/bugs/51/
"""
The BSDs are patching setup.py during their build process so that they get the manual page installed in /usr/man instead of /usr/share/man. The setup.py script should either know where to install manual pages on BSD or have a --mandir argument that can be used to set the right directory for the manual page.
"""
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2
3 from __future__ import print_function
4 import io
5 import sys
6 from setuptools import setup, find_packages, Command
7 from distutils import log
8 import subprocess as sp
9 import contextlib
10
11 # Force distutils to use py_compile.compile() function with 'doraise' argument
12 # set to True, in order to raise an exception on compilation errors
13 import py_compile
14 orig_py_compile = py_compile.compile
15
16 def doraise_py_compile(file, cfile=None, dfile=None, doraise=False):
17 orig_py_compile(file, cfile=cfile, dfile=dfile, doraise=True)
18
19 py_compile.compile = doraise_py_compile
20
21 needs_pytest = {'pytest', 'test'}.intersection(sys.argv)
22 pytest_runner = ['pytest_runner'] if needs_pytest else []
23 needs_wheel = {'bdist_wheel'}.intersection(sys.argv)
24 wheel = ['wheel'] if needs_wheel else []
25 needs_bumpversion = {'release'}.intersection(sys.argv)
26 bumpversion = ['bumpversion'] if needs_bumpversion else []
27
28 # Trove classifiers for PyPI
29 classifiers = {"classifiers": [
30 "Development Status :: 5 - Production/Stable",
31 "Environment :: Console",
32 "Environment :: Other Environment",
33 "Intended Audience :: Developers",
34 "Intended Audience :: End Users/Desktop",
35 "License :: OSI Approved :: BSD License",
36 "Natural Language :: English",
37 "Operating System :: OS Independent",
38 "Programming Language :: Python",
39 "Programming Language :: Python :: 2",
40 "Programming Language :: Python :: 3",
41 "Topic :: Text Processing :: Fonts",
42 "Topic :: Multimedia :: Graphics",
43 "Topic :: Multimedia :: Graphics :: Graphics Conversion",
44 ]}
45
46 long_description = """\
47 FontTools/TTX is a library to manipulate font files from Python.
48 It supports reading and writing of TrueType/OpenType fonts, reading
49 and writing of AFM files, reading (and partially writing) of PS Type 1
50 fonts. The package also contains a tool called "TTX" which converts
51 TrueType/OpenType fonts to and from an XML-based format.
52 """
53
54
55 @contextlib.contextmanager
56 def capture_logger(name):
57 """ Context manager to capture a logger output with a StringIO stream.
58 """
59 import logging
60
61 logger = logging.getLogger(name)
62 try:
63 import StringIO
64 stream = StringIO.StringIO()
65 except ImportError:
66 stream = io.StringIO()
67 handler = logging.StreamHandler(stream)
68 logger.addHandler(handler)
69 try:
70 yield stream
71 finally:
72 logger.removeHandler(handler)
73
74
75 class release(Command):
76 """
77 Tag a new release with a single command, using the 'bumpversion' tool
78 to update all the version strings in the source code.
79 The version scheme conforms to 'SemVer' and PEP 440 specifications.
80
81 Firstly, the pre-release '.devN' suffix is dropped to signal that this is
82 a stable release. If '--major' or '--minor' options are passed, the
83 the first or second 'semver' digit is also incremented. Major is usually
84 for backward-incompatible API changes, while minor is used when adding
85 new backward-compatible functionalities. No options imply 'patch' or bug-fix
86 release.
87
88 A new header is also added to the changelog file ("NEWS"), containing the
89 new version string and the current 'YYYY-MM-DD' date.
90
91 All changes are committed, and an annotated git tag is generated. With the
92 --sign option, the tag is GPG-signed with the user's default key.
93
94 Finally, the 'patch' part of the version string is bumped again, and a
95 pre-release suffix '.dev0' is appended to mark the opening of a new
96 development cycle.
97
98 Links:
99 - http://semver.org/
100 - https://www.python.org/dev/peps/pep-0440/
101 - https://github.com/peritus/bumpversion
102 """
103
104 description = "update version strings for release"
105
106 user_options = [
107 ("major", None, "bump the first digit (incompatible API changes)"),
108 ("minor", None, "bump the second digit (new backward-compatible features)"),
109 ("sign", "s", "make a GPG-signed tag, using the default key"),
110 ("allow-dirty", None, "don't abort if working directory is dirty"),
111 ]
112
113 changelog_name = "NEWS"
114 changelog_header = u"## TTX/FontTools Version "
115 changelog_date_fmt = "%Y-%m-%d"
116 commit_message = "Release {new_version}"
117 tag_name = "{new_version}"
118 version_files = [
119 "setup.cfg",
120 "setup.py",
121 "Lib/fontTools/__init__.py",
122 ]
123
124 def initialize_options(self):
125 self.minor = False
126 self.major = False
127 self.sign = False
128 self.allow_dirty = False
129
130 def finalize_options(self):
131 if all([self.major, self.minor]):
132 from distutils.errors import DistutilsOptionError
133 raise DistutilsOptionError("--major/--minor are mutually exclusive")
134 self.part = "major" if self.major else "minor" if self.minor else None
135
136 def run(self):
137 if self.part is not None:
138 log.info("bumping '%s' version" % self.part)
139 self.bumpversion(self.part, commit=False)
140 release_version = self.bumpversion(
141 "release", commit=False, allow_dirty=True)
142 else:
143 log.info("stripping pre-release suffix")
144 release_version = self.bumpversion("release")
145 log.info(" version = %s" % release_version)
146
147 changes = self.format_changelog(release_version)
148
149 self.git_commit(release_version)
150 self.git_tag(release_version, changes, self.sign)
151
152 log.info("bumping 'patch' version and pre-release suffix")
153 next_dev_version = self.bumpversion('patch', commit=True)
154 log.info(" version = %s" % next_dev_version)
155
156 def git_commit(self, version):
157 """ Stage and commit all relevant version files, and format the commit
158 message with specified 'version' string.
159 """
160 files = self.version_files + [self.changelog_name]
161
162 log.info("committing changes")
163 for f in files:
164 log.info(" %s" % f)
165 if self.dry_run:
166 return
167 sp.check_call(["git", "add"] + files)
168 msg = self.commit_message.format(new_version=version)
169 sp.check_call(["git", "commit", "-m", msg], stdout=sp.PIPE)
170
171 def git_tag(self, version, message, sign=False):
172 """ Create annotated git tag with given 'version' and 'message'.
173 Optionally 'sign' the tag with the user's GPG key.
174 """
175 log.info("creating %s git tag '%s'" % (
176 "signed" if sign else "annotated", version))
177 if self.dry_run:
178 return
179 # create an annotated (or signed) tag from the new version
180 tag_opt = "-s" if sign else "-a"
181 tag_name = self.tag_name.format(new_version=version)
182 proc = sp.Popen(
183 ["git", "tag", tag_opt, "-F", "-", tag_name], stdin=sp.PIPE)
184 # use the latest changes from the changelog file as the tag message
185 tag_message = u"%s\n\n%s" % (tag_name, message)
186 proc.communicate(tag_message.encode('utf-8'))
187 if proc.returncode != 0:
188 sys.exit(proc.returncode)
189
190 def bumpversion(self, part, commit=False, message=None, allow_dirty=None):
191 """ Run bumpversion.main() with the specified arguments, and return the
192 new computed version string (cf. 'bumpversion --help' for more info)
193 """
194 import bumpversion
195
196 args = (
197 (['--verbose'] if self.verbose > 1 else []) +
198 (['--dry-run'] if self.dry_run else []) +
199 (['--allow-dirty'] if (allow_dirty or self.allow_dirty) else []) +
200 (['--commit'] if commit else ['--no-commit']) +
201 (['--message', message] if message is not None else []) +
202 ['--list', part]
203 )
204 log.debug("$ bumpversion %s" % " ".join(a.replace(" ", "\\ ") for a in args))
205
206 with capture_logger("bumpversion.list") as out:
207 bumpversion.main(args)
208
209 last_line = out.getvalue().splitlines()[-1]
210 new_version = last_line.replace("new_version=", "")
211 return new_version
212
213 def format_changelog(self, version):
214 """ Write new header at beginning of changelog file with the specified
215 'version' and the current date.
216 Return the changelog content for the current release.
217 """
218 from datetime import datetime
219
220 log.info("formatting changelog")
221
222 changes = []
223 with io.open(self.changelog_name, "r+", encoding="utf-8") as f:
224 for ln in f:
225 if ln.startswith(self.changelog_header):
226 break
227 else:
228 changes.append(ln)
229 if not self.dry_run:
230 f.seek(0)
231 content = f.read()
232 f.seek(0)
233 f.write(u"%s%s\n\n%s\n\n%s" % (
234 self.changelog_header,
235 version,
236 datetime.today().strftime(self.changelog_date_fmt),
237 content))
238
239 return u"".join(changes)
240
241
242 class PassCommand(Command):
243 """ This is used with Travis `dpl` tool so that it skips creating sdist
244 and wheel packages, but simply uploads to PyPI the files found in ./dist
245 folder, that were previously built inside the tox 'bdist' environment.
246 This ensures that the same files are uploaded to Github Releases and PyPI.
247 """
248
249 description = "do nothing"
250 user_options = []
251
252 def initialize_options(self):
253 pass
254
255 def finalize_options(self):
256 pass
257
258 def run(self):
259 pass
260
261
262 setup(
263 name="fonttools",
264 version="3.5.0.dev0",
265 description="Tools to manipulate font files",
266 author="Just van Rossum",
267 author_email="[email protected]",
268 maintainer="Behdad Esfahbod",
269 maintainer_email="[email protected]",
270 url="http://github.com/fonttools/fonttools",
271 license="OpenSource, BSD-style",
272 platforms=["Any"],
273 long_description=long_description,
274 package_dir={'': 'Lib'},
275 packages=find_packages("Lib"),
276 include_package_data=True,
277 data_files=[
278 ('share/man/man1', ["Doc/ttx.1"])
279 ] if sys.platform.startswith('linux') else [],
280 setup_requires=pytest_runner + wheel + bumpversion,
281 tests_require=[
282 'pytest>=2.8',
283 ],
284 entry_points={
285 'console_scripts': [
286 "fonttools = fontTools.__main__:main",
287 "ttx = fontTools.ttx:main",
288 "pyftsubset = fontTools.subset:main",
289 "pyftmerge = fontTools.merge:main",
290 "pyftinspect = fontTools.inspect:main"
291 ]
292 },
293 cmdclass={
294 "release": release,
295 'pass': PassCommand,
296 },
297 **classifiers
298 )
299
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,8 +3,12 @@
from __future__ import print_function
import io
import sys
+import os
+from os.path import isfile, join as pjoin
+from glob import glob
from setuptools import setup, find_packages, Command
from distutils import log
+from distutils.util import convert_path
import subprocess as sp
import contextlib
@@ -259,6 +263,49 @@
pass
+def find_data_files(manpath="share/man"):
+ """ Find FontTools's data_files (just man pages at this point).
+
+ By default, we install man pages to "share/man" directory relative to the
+ base installation directory for data_files. The latter can be changed with
+ the --install-data option of 'setup.py install' sub-command.
+
+ E.g., if the data files installation directory is "/usr", the default man
+ page installation directory will be "/usr/share/man".
+
+ You can override this via the $FONTTOOLS_MANPATH environment variable.
+
+ E.g., on some BSD systems man pages are installed to 'man' instead of
+ 'share/man'; you can export $FONTTOOLS_MANPATH variable just before
+ installing:
+
+ $ FONTTOOLS_MANPATH="man" pip install -v .
+ [...]
+ running install_data
+ copying Doc/man/ttx.1 -> /usr/man/man1
+
+ When installing from PyPI, for this variable to have effect you need to
+ force pip to install from the source distribution instead of the wheel
+ package (otherwise setup.py is not run), by using the --no-binary option:
+
+ $ FONTTOOLS_MANPATH="man" pip install --no-binary=fonttools fonttools
+
+ Note that you can only override the base man path, i.e. without the
+ section number (man1, man3, etc.). The latter is always implied to be 1,
+ for "general commands".
+ """
+
+ # get base installation directory for man pages
+ manpagebase = os.environ.get('FONTTOOLS_MANPATH', convert_path(manpath))
+ # all our man pages go to section 1
+ manpagedir = pjoin(manpagebase, 'man1')
+
+ manpages = [f for f in glob(pjoin('Doc', 'man', 'man1', '*.1')) if isfile(f)]
+
+ data_files = [(manpagedir, manpages)]
+ return data_files
+
+
setup(
name="fonttools",
version="3.5.0.dev0",
@@ -274,9 +321,7 @@
package_dir={'': 'Lib'},
packages=find_packages("Lib"),
include_package_data=True,
- data_files=[
- ('share/man/man1', ["Doc/ttx.1"])
- ] if sys.platform.startswith('linux') else [],
+ data_files=find_data_files(),
setup_requires=pytest_runner + wheel + bumpversion,
tests_require=[
'pytest>=2.8',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,8 +3,12 @@\n from __future__ import print_function\n import io\n import sys\n+import os\n+from os.path import isfile, join as pjoin\n+from glob import glob\n from setuptools import setup, find_packages, Command\n from distutils import log\n+from distutils.util import convert_path\n import subprocess as sp\n import contextlib\n \n@@ -259,6 +263,49 @@\n \t\tpass\n \n \n+def find_data_files(manpath=\"share/man\"):\n+\t\"\"\" Find FontTools's data_files (just man pages at this point).\n+\n+\tBy default, we install man pages to \"share/man\" directory relative to the\n+\tbase installation directory for data_files. The latter can be changed with\n+\tthe --install-data option of 'setup.py install' sub-command.\n+\n+\tE.g., if the data files installation directory is \"/usr\", the default man\n+\tpage installation directory will be \"/usr/share/man\".\n+\n+\tYou can override this via the $FONTTOOLS_MANPATH environment variable.\n+\n+\tE.g., on some BSD systems man pages are installed to 'man' instead of\n+\t'share/man'; you can export $FONTTOOLS_MANPATH variable just before\n+\tinstalling:\n+\n+\t$ FONTTOOLS_MANPATH=\"man\" pip install -v .\n+\t [...]\n+\t running install_data\n+\t copying Doc/man/ttx.1 -> /usr/man/man1\n+\n+\tWhen installing from PyPI, for this variable to have effect you need to\n+\tforce pip to install from the source distribution instead of the wheel\n+\tpackage (otherwise setup.py is not run), by using the --no-binary option:\n+\n+\t$ FONTTOOLS_MANPATH=\"man\" pip install --no-binary=fonttools fonttools\n+\n+\tNote that you can only override the base man path, i.e. without the\n+\tsection number (man1, man3, etc.). The latter is always implied to be 1,\n+\tfor \"general commands\".\n+\t\"\"\"\n+\n+\t# get base installation directory for man pages\n+\tmanpagebase = os.environ.get('FONTTOOLS_MANPATH', convert_path(manpath))\n+\t# all our man pages go to section 1\n+\tmanpagedir = pjoin(manpagebase, 'man1')\n+\n+\tmanpages = [f for f in glob(pjoin('Doc', 'man', 'man1', '*.1')) if isfile(f)]\n+\n+\tdata_files = [(manpagedir, manpages)]\n+\treturn data_files\n+\n+\n setup(\n \tname=\"fonttools\",\n \tversion=\"3.5.0.dev0\",\n@@ -274,9 +321,7 @@\n \tpackage_dir={'': 'Lib'},\n \tpackages=find_packages(\"Lib\"),\n \tinclude_package_data=True,\n-\tdata_files=[\n-\t\t('share/man/man1', [\"Doc/ttx.1\"])\n-\t] if sys.platform.startswith('linux') else [],\n+\tdata_files=find_data_files(),\n \tsetup_requires=pytest_runner + wheel + bumpversion,\n \ttests_require=[\n \t\t'pytest>=2.8',\n", "issue": "[ttx] Flexible manual page install directory \nhttps://sourceforge.net/p/fonttools/bugs/51/\n\n\"\"\"\nThe BSDs are patching setup.py during their build process so that they get the manual page installed in /usr/man instead of /usr/share/man. The setup.py script should either know where to install manual pages on BSD or have a --mandir argument that can be used to set the right directory for the manual page. \n\"\"\"\n\n", "before_files": [{"content": "#! /usr/bin/env python\n\nfrom __future__ import print_function\nimport io\nimport sys\nfrom setuptools import setup, find_packages, Command\nfrom distutils import log\nimport subprocess as sp\nimport contextlib\n\n# Force distutils to use py_compile.compile() function with 'doraise' argument\n# set to True, in order to raise an exception on compilation errors\nimport py_compile\norig_py_compile = py_compile.compile\n\ndef doraise_py_compile(file, cfile=None, dfile=None, doraise=False):\n\torig_py_compile(file, cfile=cfile, dfile=dfile, doraise=True)\n\npy_compile.compile = doraise_py_compile\n\nneeds_pytest = {'pytest', 'test'}.intersection(sys.argv)\npytest_runner = ['pytest_runner'] if needs_pytest else []\nneeds_wheel = {'bdist_wheel'}.intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\nneeds_bumpversion = {'release'}.intersection(sys.argv)\nbumpversion = ['bumpversion'] if needs_bumpversion else []\n\n# Trove classifiers for PyPI\nclassifiers = {\"classifiers\": [\n\t\"Development Status :: 5 - Production/Stable\",\n\t\"Environment :: Console\",\n\t\"Environment :: Other Environment\",\n\t\"Intended Audience :: Developers\",\n\t\"Intended Audience :: End Users/Desktop\",\n\t\"License :: OSI Approved :: BSD License\",\n\t\"Natural Language :: English\",\n\t\"Operating System :: OS Independent\",\n\t\"Programming Language :: Python\",\n\t\"Programming Language :: Python :: 2\",\n\t\"Programming Language :: Python :: 3\",\n\t\"Topic :: Text Processing :: Fonts\",\n\t\"Topic :: Multimedia :: Graphics\",\n\t\"Topic :: Multimedia :: Graphics :: Graphics Conversion\",\n]}\n\nlong_description = \"\"\"\\\nFontTools/TTX is a library to manipulate font files from Python.\nIt supports reading and writing of TrueType/OpenType fonts, reading\nand writing of AFM files, reading (and partially writing) of PS Type 1\nfonts. The package also contains a tool called \"TTX\" which converts\nTrueType/OpenType fonts to and from an XML-based format.\n\"\"\"\n\n\[email protected]\ndef capture_logger(name):\n\t\"\"\" Context manager to capture a logger output with a StringIO stream.\n\t\"\"\"\n\timport logging\n\n\tlogger = logging.getLogger(name)\n\ttry:\n\t\timport StringIO\n\t\tstream = StringIO.StringIO()\n\texcept ImportError:\n\t\tstream = io.StringIO()\n\thandler = logging.StreamHandler(stream)\n\tlogger.addHandler(handler)\n\ttry:\n\t\tyield stream\n\tfinally:\n\t\tlogger.removeHandler(handler)\n\n\nclass release(Command):\n\t\"\"\"\n\tTag a new release with a single command, using the 'bumpversion' tool\n\tto update all the version strings in the source code.\n\tThe version scheme conforms to 'SemVer' and PEP 440 specifications.\n\n\tFirstly, the pre-release '.devN' suffix is dropped to signal that this is\n\ta stable release. If '--major' or '--minor' options are passed, the\n\tthe first or second 'semver' digit is also incremented. Major is usually\n\tfor backward-incompatible API changes, while minor is used when adding\n\tnew backward-compatible functionalities. No options imply 'patch' or bug-fix\n\trelease.\n\n\tA new header is also added to the changelog file (\"NEWS\"), containing the\n\tnew version string and the current 'YYYY-MM-DD' date.\n\n\tAll changes are committed, and an annotated git tag is generated. With the\n\t--sign option, the tag is GPG-signed with the user's default key.\n\n\tFinally, the 'patch' part of the version string is bumped again, and a\n\tpre-release suffix '.dev0' is appended to mark the opening of a new\n\tdevelopment cycle.\n\n\tLinks:\n\t- http://semver.org/\n\t- https://www.python.org/dev/peps/pep-0440/\n\t- https://github.com/peritus/bumpversion\n\t\"\"\"\n\n\tdescription = \"update version strings for release\"\n\n\tuser_options = [\n\t\t(\"major\", None, \"bump the first digit (incompatible API changes)\"),\n\t\t(\"minor\", None, \"bump the second digit (new backward-compatible features)\"),\n\t\t(\"sign\", \"s\", \"make a GPG-signed tag, using the default key\"),\n\t\t(\"allow-dirty\", None, \"don't abort if working directory is dirty\"),\n\t]\n\n\tchangelog_name = \"NEWS\"\n\tchangelog_header = u\"## TTX/FontTools Version \"\n\tchangelog_date_fmt = \"%Y-%m-%d\"\n\tcommit_message = \"Release {new_version}\"\n\ttag_name = \"{new_version}\"\n\tversion_files = [\n\t\t\"setup.cfg\",\n\t\t\"setup.py\",\n\t\t\"Lib/fontTools/__init__.py\",\n\t]\n\n\tdef initialize_options(self):\n\t\tself.minor = False\n\t\tself.major = False\n\t\tself.sign = False\n\t\tself.allow_dirty = False\n\n\tdef finalize_options(self):\n\t\tif all([self.major, self.minor]):\n\t\t\tfrom distutils.errors import DistutilsOptionError\n\t\t\traise DistutilsOptionError(\"--major/--minor are mutually exclusive\")\n\t\tself.part = \"major\" if self.major else \"minor\" if self.minor else None\n\n\tdef run(self):\n\t\tif self.part is not None:\n\t\t\tlog.info(\"bumping '%s' version\" % self.part)\n\t\t\tself.bumpversion(self.part, commit=False)\n\t\t\trelease_version = self.bumpversion(\n\t\t\t\t\"release\", commit=False, allow_dirty=True)\n\t\telse:\n\t\t\tlog.info(\"stripping pre-release suffix\")\n\t\t\trelease_version = self.bumpversion(\"release\")\n\t\tlog.info(\" version = %s\" % release_version)\n\n\t\tchanges = self.format_changelog(release_version)\n\n\t\tself.git_commit(release_version)\n\t\tself.git_tag(release_version, changes, self.sign)\n\n\t\tlog.info(\"bumping 'patch' version and pre-release suffix\")\n\t\tnext_dev_version = self.bumpversion('patch', commit=True)\n\t\tlog.info(\" version = %s\" % next_dev_version)\n\n\tdef git_commit(self, version):\n\t\t\"\"\" Stage and commit all relevant version files, and format the commit\n\t\tmessage with specified 'version' string.\n\t\t\"\"\"\n\t\tfiles = self.version_files + [self.changelog_name]\n\n\t\tlog.info(\"committing changes\")\n\t\tfor f in files:\n\t\t\tlog.info(\" %s\" % f)\n\t\tif self.dry_run:\n\t\t\treturn\n\t\tsp.check_call([\"git\", \"add\"] + files)\n\t\tmsg = self.commit_message.format(new_version=version)\n\t\tsp.check_call([\"git\", \"commit\", \"-m\", msg], stdout=sp.PIPE)\n\n\tdef git_tag(self, version, message, sign=False):\n\t\t\"\"\" Create annotated git tag with given 'version' and 'message'.\n\t\tOptionally 'sign' the tag with the user's GPG key.\n\t\t\"\"\"\n\t\tlog.info(\"creating %s git tag '%s'\" % (\n\t\t\t\"signed\" if sign else \"annotated\", version))\n\t\tif self.dry_run:\n\t\t\treturn\n\t\t# create an annotated (or signed) tag from the new version\n\t\ttag_opt = \"-s\" if sign else \"-a\"\n\t\ttag_name = self.tag_name.format(new_version=version)\n\t\tproc = sp.Popen(\n\t\t\t[\"git\", \"tag\", tag_opt, \"-F\", \"-\", tag_name], stdin=sp.PIPE)\n\t\t# use the latest changes from the changelog file as the tag message\n\t\ttag_message = u\"%s\\n\\n%s\" % (tag_name, message)\n\t\tproc.communicate(tag_message.encode('utf-8'))\n\t\tif proc.returncode != 0:\n\t\t\tsys.exit(proc.returncode)\n\n\tdef bumpversion(self, part, commit=False, message=None, allow_dirty=None):\n\t\t\"\"\" Run bumpversion.main() with the specified arguments, and return the\n\t\tnew computed version string (cf. 'bumpversion --help' for more info)\n\t\t\"\"\"\n\t\timport bumpversion\n\n\t\targs = (\n\t\t\t(['--verbose'] if self.verbose > 1 else []) +\n\t\t\t(['--dry-run'] if self.dry_run else []) +\n\t\t\t(['--allow-dirty'] if (allow_dirty or self.allow_dirty) else []) +\n\t\t\t(['--commit'] if commit else ['--no-commit']) +\n\t\t\t(['--message', message] if message is not None else []) +\n\t\t\t['--list', part]\n\t\t)\n\t\tlog.debug(\"$ bumpversion %s\" % \" \".join(a.replace(\" \", \"\\\\ \") for a in args))\n\n\t\twith capture_logger(\"bumpversion.list\") as out:\n\t\t\tbumpversion.main(args)\n\n\t\tlast_line = out.getvalue().splitlines()[-1]\n\t\tnew_version = last_line.replace(\"new_version=\", \"\")\n\t\treturn new_version\n\n\tdef format_changelog(self, version):\n\t\t\"\"\" Write new header at beginning of changelog file with the specified\n\t\t'version' and the current date.\n\t\tReturn the changelog content for the current release.\n\t\t\"\"\"\n\t\tfrom datetime import datetime\n\n\t\tlog.info(\"formatting changelog\")\n\n\t\tchanges = []\n\t\twith io.open(self.changelog_name, \"r+\", encoding=\"utf-8\") as f:\n\t\t\tfor ln in f:\n\t\t\t\tif ln.startswith(self.changelog_header):\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tchanges.append(ln)\n\t\t\tif not self.dry_run:\n\t\t\t\tf.seek(0)\n\t\t\t\tcontent = f.read()\n\t\t\t\tf.seek(0)\n\t\t\t\tf.write(u\"%s%s\\n\\n%s\\n\\n%s\" % (\n\t\t\t\t\tself.changelog_header,\n\t\t\t\t\tversion,\n\t\t\t\t\tdatetime.today().strftime(self.changelog_date_fmt),\n\t\t\t\t\tcontent))\n\n\t\treturn u\"\".join(changes)\n\n\nclass PassCommand(Command):\n\t\"\"\" This is used with Travis `dpl` tool so that it skips creating sdist\n\tand wheel packages, but simply uploads to PyPI the files found in ./dist\n\tfolder, that were previously built inside the tox 'bdist' environment.\n\tThis ensures that the same files are uploaded to Github Releases and PyPI.\n\t\"\"\"\n\n\tdescription = \"do nothing\"\n\tuser_options = []\n\n\tdef initialize_options(self):\n\t\tpass\n\n\tdef finalize_options(self):\n\t\tpass\n\n\tdef run(self):\n\t\tpass\n\n\nsetup(\n\tname=\"fonttools\",\n\tversion=\"3.5.0.dev0\",\n\tdescription=\"Tools to manipulate font files\",\n\tauthor=\"Just van Rossum\",\n\tauthor_email=\"[email protected]\",\n\tmaintainer=\"Behdad Esfahbod\",\n\tmaintainer_email=\"[email protected]\",\n\turl=\"http://github.com/fonttools/fonttools\",\n\tlicense=\"OpenSource, BSD-style\",\n\tplatforms=[\"Any\"],\n\tlong_description=long_description,\n\tpackage_dir={'': 'Lib'},\n\tpackages=find_packages(\"Lib\"),\n\tinclude_package_data=True,\n\tdata_files=[\n\t\t('share/man/man1', [\"Doc/ttx.1\"])\n\t] if sys.platform.startswith('linux') else [],\n\tsetup_requires=pytest_runner + wheel + bumpversion,\n\ttests_require=[\n\t\t'pytest>=2.8',\n\t],\n\tentry_points={\n\t\t'console_scripts': [\n\t\t\t\"fonttools = fontTools.__main__:main\",\n\t\t\t\"ttx = fontTools.ttx:main\",\n\t\t\t\"pyftsubset = fontTools.subset:main\",\n\t\t\t\"pyftmerge = fontTools.merge:main\",\n\t\t\t\"pyftinspect = fontTools.inspect:main\"\n\t\t]\n\t},\n\tcmdclass={\n\t\t\"release\": release,\n\t\t'pass': PassCommand,\n\t},\n\t**classifiers\n)\n", "path": "setup.py"}]} | 3,975 | 691 |
gh_patches_debug_33399 | rasdani/github-patches | git_diff | plotly__plotly.py-1832 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Does Plotly 4.2.0 depend on scikit-image?
I failed to `import plotly.figure_factory` because plotly seems not to install `scikit-image` when running `pip install -U plotly`. After I manually installed `scikit-image`, `import plotly.figure_factory` worked.
This was not a problem in version 4.1.1.
But the source code shows it depends on it.
https://github.com/plotly/plotly.py/blob/b7ad5433c4e0882715781fa6c4816fc7fff62965/packages/python/plotly/plotly/figure_factory/_ternary_contour.py#L11
</issue>
<code>
[start of packages/python/plotly/plotly/express/__init__.py]
1 """
2 `plotly_express` is a terse, consistent, high-level wrapper around `plotly` for rapid \
3 data exploration and figure generation. See the gallery at https://plotly.github.io/plotly_express
4 """
5
6 from ._chart_types import ( # noqa: F401
7 scatter,
8 scatter_3d,
9 scatter_polar,
10 scatter_ternary,
11 scatter_mapbox,
12 scatter_geo,
13 line,
14 line_3d,
15 line_polar,
16 line_ternary,
17 line_mapbox,
18 line_geo,
19 area,
20 bar,
21 bar_polar,
22 violin,
23 box,
24 strip,
25 histogram,
26 scatter_matrix,
27 parallel_coordinates,
28 parallel_categories,
29 choropleth,
30 density_contour,
31 density_heatmap,
32 )
33
34 from ._core import ( # noqa: F401
35 set_mapbox_access_token,
36 defaults,
37 get_trendline_results,
38 )
39
40 from . import data, colors # noqa: F401
41
42 __all__ = [
43 "scatter",
44 "scatter_3d",
45 "scatter_polar",
46 "scatter_ternary",
47 "scatter_mapbox",
48 "scatter_geo",
49 "scatter_matrix",
50 "density_contour",
51 "density_heatmap",
52 "line",
53 "line_3d",
54 "line_polar",
55 "line_ternary",
56 "line_mapbox",
57 "line_geo",
58 "parallel_coordinates",
59 "parallel_categories",
60 "area",
61 "bar",
62 "bar_polar",
63 "violin",
64 "box",
65 "strip",
66 "histogram",
67 "choropleth",
68 "data",
69 "colors",
70 "set_mapbox_access_token",
71 "get_trendline_results",
72 ]
73
[end of packages/python/plotly/plotly/express/__init__.py]
[start of packages/python/plotly/plotly/figure_factory/__init__.py]
1 from __future__ import absolute_import
2
3 from plotly import optional_imports
4
5 # Require that numpy exists for figure_factory
6 np = optional_imports.get_module("numpy")
7 if np is None:
8 raise ImportError(
9 """\
10 The figure factory module requires the numpy package"""
11 )
12
13
14 from plotly.figure_factory._2d_density import create_2d_density
15 from plotly.figure_factory._annotated_heatmap import create_annotated_heatmap
16 from plotly.figure_factory._bullet import create_bullet
17 from plotly.figure_factory._candlestick import create_candlestick
18 from plotly.figure_factory._dendrogram import create_dendrogram
19 from plotly.figure_factory._distplot import create_distplot
20 from plotly.figure_factory._facet_grid import create_facet_grid
21 from plotly.figure_factory._gantt import create_gantt
22 from plotly.figure_factory._ohlc import create_ohlc
23 from plotly.figure_factory._quiver import create_quiver
24 from plotly.figure_factory._scatterplot import create_scatterplotmatrix
25 from plotly.figure_factory._streamline import create_streamline
26 from plotly.figure_factory._table import create_table
27 from plotly.figure_factory._ternary_contour import create_ternary_contour
28 from plotly.figure_factory._trisurf import create_trisurf
29 from plotly.figure_factory._violin import create_violin
30
31 if optional_imports.get_module("pandas") is not None:
32 from plotly.figure_factory._county_choropleth import create_choropleth
33
34 __all__ = [
35 "create_2d_density",
36 "create_annotated_heatmap",
37 "create_bullet",
38 "create_candlestick",
39 "create_dendrogram",
40 "create_distplot",
41 "create_facet_grid",
42 "create_gantt",
43 "create_ohlc",
44 "create_quiver",
45 "create_scatterplotmatrix",
46 "create_streamline",
47 "create_table",
48 "create_ternary_contour",
49 "create_trisurf",
50 "create_violin",
51 ]
52
[end of packages/python/plotly/plotly/figure_factory/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/packages/python/plotly/plotly/express/__init__.py b/packages/python/plotly/plotly/express/__init__.py
--- a/packages/python/plotly/plotly/express/__init__.py
+++ b/packages/python/plotly/plotly/express/__init__.py
@@ -2,6 +2,16 @@
`plotly_express` is a terse, consistent, high-level wrapper around `plotly` for rapid \
data exploration and figure generation. See the gallery at https://plotly.github.io/plotly_express
"""
+from __future__ import absolute_import
+from plotly import optional_imports
+
+pd = optional_imports.get_module("pandas")
+if pd is None:
+ raise ImportError(
+ """\
+Plotly express requires pandas to be installed."""
+ )
+
from ._chart_types import ( # noqa: F401
scatter,
diff --git a/packages/python/plotly/plotly/figure_factory/__init__.py b/packages/python/plotly/plotly/figure_factory/__init__.py
--- a/packages/python/plotly/plotly/figure_factory/__init__.py
+++ b/packages/python/plotly/plotly/figure_factory/__init__.py
@@ -24,18 +24,31 @@
from plotly.figure_factory._scatterplot import create_scatterplotmatrix
from plotly.figure_factory._streamline import create_streamline
from plotly.figure_factory._table import create_table
-from plotly.figure_factory._ternary_contour import create_ternary_contour
from plotly.figure_factory._trisurf import create_trisurf
from plotly.figure_factory._violin import create_violin
if optional_imports.get_module("pandas") is not None:
from plotly.figure_factory._county_choropleth import create_choropleth
+else:
+
+ def create_choropleth(*args, **kwargs):
+ raise ImportError("Please install pandas to use `create_choropleth`")
+
+
+if optional_imports.get_module("skimage") is not None:
+ from plotly.figure_factory._ternary_contour import create_ternary_contour
+else:
+
+ def create_ternary_contour(*args, **kwargs):
+ raise ImportError("Please install scikit-image to use `create_ternary_contour`")
+
__all__ = [
"create_2d_density",
"create_annotated_heatmap",
"create_bullet",
"create_candlestick",
+ "create_choropleth",
"create_dendrogram",
"create_distplot",
"create_facet_grid",
| {"golden_diff": "diff --git a/packages/python/plotly/plotly/express/__init__.py b/packages/python/plotly/plotly/express/__init__.py\n--- a/packages/python/plotly/plotly/express/__init__.py\n+++ b/packages/python/plotly/plotly/express/__init__.py\n@@ -2,6 +2,16 @@\n `plotly_express` is a terse, consistent, high-level wrapper around `plotly` for rapid \\\n data exploration and figure generation. See the gallery at https://plotly.github.io/plotly_express\n \"\"\"\n+from __future__ import absolute_import\n+from plotly import optional_imports\n+\n+pd = optional_imports.get_module(\"pandas\")\n+if pd is None:\n+ raise ImportError(\n+ \"\"\"\\\n+Plotly express requires pandas to be installed.\"\"\"\n+ )\n+\n \n from ._chart_types import ( # noqa: F401\n scatter,\ndiff --git a/packages/python/plotly/plotly/figure_factory/__init__.py b/packages/python/plotly/plotly/figure_factory/__init__.py\n--- a/packages/python/plotly/plotly/figure_factory/__init__.py\n+++ b/packages/python/plotly/plotly/figure_factory/__init__.py\n@@ -24,18 +24,31 @@\n from plotly.figure_factory._scatterplot import create_scatterplotmatrix\n from plotly.figure_factory._streamline import create_streamline\n from plotly.figure_factory._table import create_table\n-from plotly.figure_factory._ternary_contour import create_ternary_contour\n from plotly.figure_factory._trisurf import create_trisurf\n from plotly.figure_factory._violin import create_violin\n \n if optional_imports.get_module(\"pandas\") is not None:\n from plotly.figure_factory._county_choropleth import create_choropleth\n+else:\n+\n+ def create_choropleth(*args, **kwargs):\n+ raise ImportError(\"Please install pandas to use `create_choropleth`\")\n+\n+\n+if optional_imports.get_module(\"skimage\") is not None:\n+ from plotly.figure_factory._ternary_contour import create_ternary_contour\n+else:\n+\n+ def create_ternary_contour(*args, **kwargs):\n+ raise ImportError(\"Please install scikit-image to use `create_ternary_contour`\")\n+\n \n __all__ = [\n \"create_2d_density\",\n \"create_annotated_heatmap\",\n \"create_bullet\",\n \"create_candlestick\",\n+ \"create_choropleth\",\n \"create_dendrogram\",\n \"create_distplot\",\n \"create_facet_grid\",\n", "issue": "Does Plotly 4.2.0 depend on scikit-image?\nI failed to `import plotly.figure_factory` because plotly seems not to install `scikit-image` when running `pip install -U plotly`. After I manually installed `scikit-image`, `import plotly.figure_factory` worked.\r\n\r\nThis was not a problem in version 4.1.1.\r\n\r\nBut the source code shows it depends on it.\r\nhttps://github.com/plotly/plotly.py/blob/b7ad5433c4e0882715781fa6c4816fc7fff62965/packages/python/plotly/plotly/figure_factory/_ternary_contour.py#L11\n", "before_files": [{"content": "\"\"\"\n`plotly_express` is a terse, consistent, high-level wrapper around `plotly` for rapid \\\ndata exploration and figure generation. See the gallery at https://plotly.github.io/plotly_express\n\"\"\"\n\nfrom ._chart_types import ( # noqa: F401\n scatter,\n scatter_3d,\n scatter_polar,\n scatter_ternary,\n scatter_mapbox,\n scatter_geo,\n line,\n line_3d,\n line_polar,\n line_ternary,\n line_mapbox,\n line_geo,\n area,\n bar,\n bar_polar,\n violin,\n box,\n strip,\n histogram,\n scatter_matrix,\n parallel_coordinates,\n parallel_categories,\n choropleth,\n density_contour,\n density_heatmap,\n)\n\nfrom ._core import ( # noqa: F401\n set_mapbox_access_token,\n defaults,\n get_trendline_results,\n)\n\nfrom . import data, colors # noqa: F401\n\n__all__ = [\n \"scatter\",\n \"scatter_3d\",\n \"scatter_polar\",\n \"scatter_ternary\",\n \"scatter_mapbox\",\n \"scatter_geo\",\n \"scatter_matrix\",\n \"density_contour\",\n \"density_heatmap\",\n \"line\",\n \"line_3d\",\n \"line_polar\",\n \"line_ternary\",\n \"line_mapbox\",\n \"line_geo\",\n \"parallel_coordinates\",\n \"parallel_categories\",\n \"area\",\n \"bar\",\n \"bar_polar\",\n \"violin\",\n \"box\",\n \"strip\",\n \"histogram\",\n \"choropleth\",\n \"data\",\n \"colors\",\n \"set_mapbox_access_token\",\n \"get_trendline_results\",\n]\n", "path": "packages/python/plotly/plotly/express/__init__.py"}, {"content": "from __future__ import absolute_import\n\nfrom plotly import optional_imports\n\n# Require that numpy exists for figure_factory\nnp = optional_imports.get_module(\"numpy\")\nif np is None:\n raise ImportError(\n \"\"\"\\\nThe figure factory module requires the numpy package\"\"\"\n )\n\n\nfrom plotly.figure_factory._2d_density import create_2d_density\nfrom plotly.figure_factory._annotated_heatmap import create_annotated_heatmap\nfrom plotly.figure_factory._bullet import create_bullet\nfrom plotly.figure_factory._candlestick import create_candlestick\nfrom plotly.figure_factory._dendrogram import create_dendrogram\nfrom plotly.figure_factory._distplot import create_distplot\nfrom plotly.figure_factory._facet_grid import create_facet_grid\nfrom plotly.figure_factory._gantt import create_gantt\nfrom plotly.figure_factory._ohlc import create_ohlc\nfrom plotly.figure_factory._quiver import create_quiver\nfrom plotly.figure_factory._scatterplot import create_scatterplotmatrix\nfrom plotly.figure_factory._streamline import create_streamline\nfrom plotly.figure_factory._table import create_table\nfrom plotly.figure_factory._ternary_contour import create_ternary_contour\nfrom plotly.figure_factory._trisurf import create_trisurf\nfrom plotly.figure_factory._violin import create_violin\n\nif optional_imports.get_module(\"pandas\") is not None:\n from plotly.figure_factory._county_choropleth import create_choropleth\n\n__all__ = [\n \"create_2d_density\",\n \"create_annotated_heatmap\",\n \"create_bullet\",\n \"create_candlestick\",\n \"create_dendrogram\",\n \"create_distplot\",\n \"create_facet_grid\",\n \"create_gantt\",\n \"create_ohlc\",\n \"create_quiver\",\n \"create_scatterplotmatrix\",\n \"create_streamline\",\n \"create_table\",\n \"create_ternary_contour\",\n \"create_trisurf\",\n \"create_violin\",\n]\n", "path": "packages/python/plotly/plotly/figure_factory/__init__.py"}]} | 1,808 | 587 |
gh_patches_debug_15492 | rasdani/github-patches | git_diff | AnalogJ__lexicon-447 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
LuaDNS _list_records fails due to missing "id" in API response
I'm using certbot with the luadns plugin which in turn uses lexicon.
I've been experiencing the error reported here: https://community.letsencrypt.org/t/luadns-renew-error/89634
```
2019-08-29 06:32:43,517:DEBUG:urllib3.connectionpool:https://api.luadns.com:443 "GET /v1/zones/[XXXXXX]/records HTTP/1.1" 200 None
2019-08-29 06:32:43,520:ERROR:certbot.error_handler:Encountered exception during recovery:
Traceback (most recent call last):
File "/home/ubuntu/.local/lib/python2.7/site-packages/certbot/error_handler.py", line 124, in _call_registered
self.funcs[-1]()
File "/home/ubuntu/.local/lib/python2.7/site-packages/certbot/auth_handler.py", line 220, in _cleanup_challenges
self.auth.cleanup(achalls)
File "/home/ubuntu/.local/lib/python2.7/site-packages/certbot/plugins/dns_common.py", line 77, in cleanup
self._cleanup(domain, validation_domain_name, validation)
File "/usr/local/lib/python2.7/dist-packages/certbot_dns_luadns/dns_luadns.py", line 55, in _cleanup
self._get_luadns_client().del_txt_record(domain, validation_name, validation)
File "/home/ubuntu/.local/lib/python2.7/site-packages/certbot/plugins/dns_common_lexicon.py", line 65, in del_txt_record
self.provider.delete_record(type='TXT', name=record_name, content=record_content)
File "/usr/local/lib/python2.7/dist-packages/lexicon/providers/base.py", line 118, in delete_record
return self._delete_record(identifier=identifier, rtype=rtype, name=name, content=content)
File "/usr/local/lib/python2.7/dist-packages/lexicon/providers/luadns.py", line 111, in _delete_record
records = self._list_records(rtype, name, content)
File "/usr/local/lib/python2.7/dist-packages/lexicon/providers/luadns.py", line 71, in _list_records
'id': record['id']
KeyError: 'id'
```
I've replaced the zone id with [XXXXXX].
Code line that errors: https://github.com/AnalogJ/lexicon/blob/master/lexicon/providers/luadns.py#L71
Running the call directly towards the LuaDNS api shows that my REDIRECT record does not return an "id" -> no wonder it fails.
```
...
[
{
"id": XXXXXXXXX,
"name": "sub.some.domain.",
"type": "A",
"content": "111.111.111.111",
"ttl": 300,
"zone_id": XXXXX,
"generated": false,
"created_at": "2019-08-27T05:59:34.219211Z",
"updated_at": "2019-08-27T05:59:34.219213Z"
},
{
"name": "some.domain.",
"type": "REDIRECT",
"content": "1 https://sub.some.domain",
"ttl": 300,
"zone_id": XXXXX,
"generated": false,
"created_at": "2019-08-29T07:05:09.757404499Z",
"updated_at": "2019-08-29T07:05:09.757406053Z"
}
]
...
```
My question is: How do I go forward with this? (for now, I've just deleted the REDIRECT record and everything works fine)
1) Send PR to lexicon to ignore records that does not have id? ( if 'id' in record )
2) Reach out to LuaDNS for API fix?
Any input would be great :)
</issue>
<code>
[start of lexicon/providers/luadns.py]
1 """Module provider for luadns"""
2 from __future__ import absolute_import
3 import json
4 import logging
5
6 import requests
7 from lexicon.providers.base import Provider as BaseProvider
8
9
10 LOGGER = logging.getLogger(__name__)
11
12 NAMESERVER_DOMAINS = ['luadns.com']
13
14
15 def provider_parser(subparser):
16 """Configure provider parser for luadns"""
17 subparser.add_argument(
18 "--auth-username", help="specify email address for authentication")
19 subparser.add_argument(
20 "--auth-token", help="specify token for authentication")
21
22
23 class Provider(BaseProvider):
24 """Provider class for luadns"""
25 def __init__(self, config):
26 super(Provider, self).__init__(config)
27 self.domain_id = None
28 self.api_endpoint = 'https://api.luadns.com/v1'
29
30 def _authenticate(self):
31 payload = self._get('/zones')
32
33 domain_info = next(
34 (domain for domain in payload if domain['name'] == self.domain), None)
35
36 if not domain_info:
37 raise Exception('No domain found')
38
39 self.domain_id = domain_info['id']
40
41 # Create record. If record already exists with the same content, do nothing'
42
43 def _create_record(self, rtype, name, content):
44 # check if record already exists
45 existing_records = self._list_records(rtype, name, content)
46 if len(existing_records) == 1:
47 return True
48
49 self._post('/zones/{0}/records'.format(self.domain_id),
50 {'type': rtype,
51 'name': self._fqdn_name(name),
52 'content': content,
53 'ttl': self._get_lexicon_option('ttl')})
54
55 LOGGER.debug('create_record: %s', True)
56 return True
57
58 # List all records. Return an empty list if no records found
59 # type, name and content are used to filter records.
60 # If possible filter during the query, otherwise filter after response is received.
61 def _list_records(self, rtype=None, name=None, content=None):
62 payload = self._get('/zones/{0}/records'.format(self.domain_id))
63
64 records = []
65 for record in payload:
66 processed_record = {
67 'type': record['type'],
68 'name': self._full_name(record['name']),
69 'ttl': record['ttl'],
70 'content': record['content'],
71 'id': record['id']
72 }
73 records.append(processed_record)
74
75 if rtype:
76 records = [record for record in records if record['type'] == rtype]
77 if name:
78 records = [record for record in records if record['name']
79 == self._full_name(name)]
80 if content:
81 records = [
82 record for record in records if record['content'] == content]
83
84 LOGGER.debug('list_records: %s', records)
85 return records
86
87 # Create or update a record.
88 def _update_record(self, identifier, rtype=None, name=None, content=None):
89
90 data = {
91 'ttl': self._get_lexicon_option('ttl')
92 }
93 if rtype:
94 data['type'] = rtype
95 if name:
96 data['name'] = self._fqdn_name(name)
97 if content:
98 data['content'] = content
99
100 self._put(
101 '/zones/{0}/records/{1}'.format(self.domain_id, identifier), data)
102
103 LOGGER.debug('update_record: %s', True)
104 return True
105
106 # Delete an existing record.
107 # If record does not exist, do nothing.
108 def _delete_record(self, identifier=None, rtype=None, name=None, content=None):
109 delete_record_id = []
110 if not identifier:
111 records = self._list_records(rtype, name, content)
112 delete_record_id = [record['id'] for record in records]
113 else:
114 delete_record_id.append(identifier)
115
116 LOGGER.debug('delete_records: %s', delete_record_id)
117
118 for record_id in delete_record_id:
119 self._delete(
120 '/zones/{0}/records/{1}'.format(self.domain_id, record_id))
121
122 LOGGER.debug('delete_record: %s', True)
123 return True
124
125 # Helpers
126 def _request(self, action='GET', url='/', data=None, query_params=None):
127 if data is None:
128 data = {}
129 if query_params is None:
130 query_params = {}
131 response = requests.request(action, self.api_endpoint + url, params=query_params,
132 data=json.dumps(data),
133 auth=requests.auth.HTTPBasicAuth(self._get_provider_option(
134 'auth_username'), self._get_provider_option('auth_token')),
135 headers={
136 'Content-Type': 'application/json',
137 'Accept': 'application/json'
138 })
139 # if the request fails for any reason, throw an error.
140 response.raise_for_status()
141 return response.json()
142
[end of lexicon/providers/luadns.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lexicon/providers/luadns.py b/lexicon/providers/luadns.py
--- a/lexicon/providers/luadns.py
+++ b/lexicon/providers/luadns.py
@@ -63,14 +63,15 @@
records = []
for record in payload:
- processed_record = {
- 'type': record['type'],
- 'name': self._full_name(record['name']),
- 'ttl': record['ttl'],
- 'content': record['content'],
- 'id': record['id']
- }
- records.append(processed_record)
+ if 'id' in record:
+ processed_record = {
+ 'id': record['id'],
+ 'type': record['type'],
+ 'name': self._full_name(record['name']),
+ 'ttl': record['ttl'],
+ 'content': record['content']
+ }
+ records.append(processed_record)
if rtype:
records = [record for record in records if record['type'] == rtype]
| {"golden_diff": "diff --git a/lexicon/providers/luadns.py b/lexicon/providers/luadns.py\n--- a/lexicon/providers/luadns.py\n+++ b/lexicon/providers/luadns.py\n@@ -63,14 +63,15 @@\n \n records = []\n for record in payload:\n- processed_record = {\n- 'type': record['type'],\n- 'name': self._full_name(record['name']),\n- 'ttl': record['ttl'],\n- 'content': record['content'],\n- 'id': record['id']\n- }\n- records.append(processed_record)\n+ if 'id' in record:\n+ processed_record = {\n+ 'id': record['id'],\n+ 'type': record['type'],\n+ 'name': self._full_name(record['name']),\n+ 'ttl': record['ttl'],\n+ 'content': record['content']\n+ }\n+ records.append(processed_record)\n \n if rtype:\n records = [record for record in records if record['type'] == rtype]\n", "issue": "LuaDNS _list_records fails due to missing \"id\" in API response\nI'm using certbot with the luadns plugin which in turn uses lexicon.\r\nI've been experiencing the error reported here: https://community.letsencrypt.org/t/luadns-renew-error/89634\r\n\r\n```\r\n2019-08-29 06:32:43,517:DEBUG:urllib3.connectionpool:https://api.luadns.com:443 \"GET /v1/zones/[XXXXXX]/records HTTP/1.1\" 200 None\r\n2019-08-29 06:32:43,520:ERROR:certbot.error_handler:Encountered exception during recovery: \r\nTraceback (most recent call last):\r\n File \"/home/ubuntu/.local/lib/python2.7/site-packages/certbot/error_handler.py\", line 124, in _call_registered\r\n self.funcs[-1]()\r\n File \"/home/ubuntu/.local/lib/python2.7/site-packages/certbot/auth_handler.py\", line 220, in _cleanup_challenges\r\n self.auth.cleanup(achalls)\r\n File \"/home/ubuntu/.local/lib/python2.7/site-packages/certbot/plugins/dns_common.py\", line 77, in cleanup\r\n self._cleanup(domain, validation_domain_name, validation)\r\n File \"/usr/local/lib/python2.7/dist-packages/certbot_dns_luadns/dns_luadns.py\", line 55, in _cleanup\r\n self._get_luadns_client().del_txt_record(domain, validation_name, validation)\r\n File \"/home/ubuntu/.local/lib/python2.7/site-packages/certbot/plugins/dns_common_lexicon.py\", line 65, in del_txt_record\r\n self.provider.delete_record(type='TXT', name=record_name, content=record_content)\r\n File \"/usr/local/lib/python2.7/dist-packages/lexicon/providers/base.py\", line 118, in delete_record\r\n return self._delete_record(identifier=identifier, rtype=rtype, name=name, content=content)\r\n File \"/usr/local/lib/python2.7/dist-packages/lexicon/providers/luadns.py\", line 111, in _delete_record\r\n records = self._list_records(rtype, name, content)\r\n File \"/usr/local/lib/python2.7/dist-packages/lexicon/providers/luadns.py\", line 71, in _list_records\r\n 'id': record['id']\r\nKeyError: 'id'\r\n```\r\nI've replaced the zone id with [XXXXXX].\r\n\r\nCode line that errors: https://github.com/AnalogJ/lexicon/blob/master/lexicon/providers/luadns.py#L71\r\n\r\nRunning the call directly towards the LuaDNS api shows that my REDIRECT record does not return an \"id\" -> no wonder it fails.\r\n\r\n```\r\n...\r\n[\r\n {\r\n \"id\": XXXXXXXXX,\r\n \"name\": \"sub.some.domain.\",\r\n \"type\": \"A\",\r\n \"content\": \"111.111.111.111\",\r\n \"ttl\": 300,\r\n \"zone_id\": XXXXX,\r\n \"generated\": false,\r\n \"created_at\": \"2019-08-27T05:59:34.219211Z\",\r\n \"updated_at\": \"2019-08-27T05:59:34.219213Z\"\r\n },\r\n {\r\n \"name\": \"some.domain.\",\r\n \"type\": \"REDIRECT\",\r\n \"content\": \"1 https://sub.some.domain\",\r\n \"ttl\": 300,\r\n \"zone_id\": XXXXX,\r\n \"generated\": false,\r\n \"created_at\": \"2019-08-29T07:05:09.757404499Z\",\r\n \"updated_at\": \"2019-08-29T07:05:09.757406053Z\"\r\n }\r\n]\r\n...\r\n```\r\n\r\nMy question is: How do I go forward with this? (for now, I've just deleted the REDIRECT record and everything works fine)\r\n1) Send PR to lexicon to ignore records that does not have id? ( if 'id' in record )\r\n2) Reach out to LuaDNS for API fix?\r\n\r\nAny input would be great :)\n", "before_files": [{"content": "\"\"\"Module provider for luadns\"\"\"\nfrom __future__ import absolute_import\nimport json\nimport logging\n\nimport requests\nfrom lexicon.providers.base import Provider as BaseProvider\n\n\nLOGGER = logging.getLogger(__name__)\n\nNAMESERVER_DOMAINS = ['luadns.com']\n\n\ndef provider_parser(subparser):\n \"\"\"Configure provider parser for luadns\"\"\"\n subparser.add_argument(\n \"--auth-username\", help=\"specify email address for authentication\")\n subparser.add_argument(\n \"--auth-token\", help=\"specify token for authentication\")\n\n\nclass Provider(BaseProvider):\n \"\"\"Provider class for luadns\"\"\"\n def __init__(self, config):\n super(Provider, self).__init__(config)\n self.domain_id = None\n self.api_endpoint = 'https://api.luadns.com/v1'\n\n def _authenticate(self):\n payload = self._get('/zones')\n\n domain_info = next(\n (domain for domain in payload if domain['name'] == self.domain), None)\n\n if not domain_info:\n raise Exception('No domain found')\n\n self.domain_id = domain_info['id']\n\n # Create record. If record already exists with the same content, do nothing'\n\n def _create_record(self, rtype, name, content):\n # check if record already exists\n existing_records = self._list_records(rtype, name, content)\n if len(existing_records) == 1:\n return True\n\n self._post('/zones/{0}/records'.format(self.domain_id),\n {'type': rtype,\n 'name': self._fqdn_name(name),\n 'content': content,\n 'ttl': self._get_lexicon_option('ttl')})\n\n LOGGER.debug('create_record: %s', True)\n return True\n\n # List all records. Return an empty list if no records found\n # type, name and content are used to filter records.\n # If possible filter during the query, otherwise filter after response is received.\n def _list_records(self, rtype=None, name=None, content=None):\n payload = self._get('/zones/{0}/records'.format(self.domain_id))\n\n records = []\n for record in payload:\n processed_record = {\n 'type': record['type'],\n 'name': self._full_name(record['name']),\n 'ttl': record['ttl'],\n 'content': record['content'],\n 'id': record['id']\n }\n records.append(processed_record)\n\n if rtype:\n records = [record for record in records if record['type'] == rtype]\n if name:\n records = [record for record in records if record['name']\n == self._full_name(name)]\n if content:\n records = [\n record for record in records if record['content'] == content]\n\n LOGGER.debug('list_records: %s', records)\n return records\n\n # Create or update a record.\n def _update_record(self, identifier, rtype=None, name=None, content=None):\n\n data = {\n 'ttl': self._get_lexicon_option('ttl')\n }\n if rtype:\n data['type'] = rtype\n if name:\n data['name'] = self._fqdn_name(name)\n if content:\n data['content'] = content\n\n self._put(\n '/zones/{0}/records/{1}'.format(self.domain_id, identifier), data)\n\n LOGGER.debug('update_record: %s', True)\n return True\n\n # Delete an existing record.\n # If record does not exist, do nothing.\n def _delete_record(self, identifier=None, rtype=None, name=None, content=None):\n delete_record_id = []\n if not identifier:\n records = self._list_records(rtype, name, content)\n delete_record_id = [record['id'] for record in records]\n else:\n delete_record_id.append(identifier)\n\n LOGGER.debug('delete_records: %s', delete_record_id)\n\n for record_id in delete_record_id:\n self._delete(\n '/zones/{0}/records/{1}'.format(self.domain_id, record_id))\n\n LOGGER.debug('delete_record: %s', True)\n return True\n\n # Helpers\n def _request(self, action='GET', url='/', data=None, query_params=None):\n if data is None:\n data = {}\n if query_params is None:\n query_params = {}\n response = requests.request(action, self.api_endpoint + url, params=query_params,\n data=json.dumps(data),\n auth=requests.auth.HTTPBasicAuth(self._get_provider_option(\n 'auth_username'), self._get_provider_option('auth_token')),\n headers={\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n })\n # if the request fails for any reason, throw an error.\n response.raise_for_status()\n return response.json()\n", "path": "lexicon/providers/luadns.py"}]} | 2,896 | 238 |
gh_patches_debug_40629 | rasdani/github-patches | git_diff | pypi__warehouse-1471 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Optimize reindexing
It's possible to tweak the index settings before and after a full reindex speed up reindexing.
Things you can do before reindexing:
- Set `num_replicas` to `0`. During reindexing this will stop the network traffic of Elasticsearch replicating your data across the nodes. This can happen in bulk when you set `num_replicas` back to a value greater than `0` after reindexing is finished.
- Set `refresh_interval` to `-1`. I.e. disable index refreshes completely during bulk indexing. (See: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html#bulk)
Things you can do after reindexing:
- Perform a `force_merge` on the index. The defaults should be fine. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html
- Set the `num_replicas` back to its default value.
- Set the `refresh_interval` back to its default value.
[WIP] Add some optimizations to reindex
WORK IN PROGRESS
I consider the code to be relatively complete (besides the tests), but local testing is revealing a lot of timeout errors connecting to the Elasticsearch cluster. I don't know if a change here causes this or something else.
</issue>
<code>
[start of warehouse/search.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import urllib.parse
14
15 import certifi
16 import elasticsearch
17 import venusian
18
19 from elasticsearch_dsl import Index
20
21
22 def doc_type(cls):
23 def callback(scanner, _name, item):
24 types_ = scanner.config.registry.setdefault("search.doc_types", set())
25 types_.add(item)
26
27 venusian.attach(cls, callback)
28
29 return cls
30
31
32 def get_index(name, doc_types, *, using, shards=1, replicas=0):
33 index = Index(name, using=using)
34 for doc_type in doc_types:
35 index.doc_type(doc_type)
36 index.settings(number_of_shards=shards, number_of_replicas=replicas)
37 return index
38
39
40 def es(request):
41 client = request.registry["elasticsearch.client"]
42 doc_types = request.registry.get("search.doc_types", set())
43 index_name = request.registry["elasticsearch.index"]
44 index = get_index(
45 index_name,
46 doc_types,
47 using=client,
48 shards=request.registry.get("elasticsearch.shards", 1),
49 replicas=request.registry.get("elasticsearch.replicas", 0),
50 )
51 return index.search()
52
53
54 def includeme(config):
55 p = urllib.parse.urlparse(config.registry.settings["elasticsearch.url"])
56 qs = urllib.parse.parse_qs(p.query)
57 config.registry["elasticsearch.client"] = elasticsearch.Elasticsearch(
58 [urllib.parse.urlunparse(p[:2] + ("",) * 4)],
59 verify_certs=True,
60 ca_certs=certifi.where(),
61 )
62 config.registry["elasticsearch.index"] = p.path.strip("/")
63 config.registry["elasticsearch.shards"] = int(qs.get("shards", ["1"])[0])
64 config.registry["elasticsearch.replicas"] = \
65 int(qs.get("replicas", ["0"])[0])
66 config.add_request_method(es, name="es", reify=True)
67
[end of warehouse/search.py]
[start of warehouse/cli/search/reindex.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import binascii
14 import os
15
16 import click
17
18 from elasticsearch.helpers import parallel_bulk
19 from sqlalchemy.orm import lazyload, joinedload, load_only
20
21 from warehouse.cli.search import search
22 from warehouse.db import Session
23 from warehouse.packaging.models import Release, Project
24 from warehouse.packaging.search import Project as ProjectDocType
25 from warehouse.search import get_index
26 from warehouse.utils.db import windowed_query
27
28
29 def _project_docs(db):
30 releases = (
31 db.query(Release)
32 .options(load_only(
33 "summary", "description", "author",
34 "author_email", "maintainer", "maintainer_email",
35 "home_page", "download_url", "keywords", "platform",
36 "created"))
37 .options(lazyload("*"),
38 (joinedload(Release.project)
39 .load_only("normalized_name", "name")
40 .joinedload(Project.releases)
41 .load_only("version")),
42 joinedload(Release._classifiers).load_only("classifier"))
43 .distinct(Release.name)
44 .order_by(Release.name, Release._pypi_ordering.desc())
45 )
46 for release in windowed_query(releases, Release.name, 1000):
47 p = ProjectDocType.from_db(release)
48 p.full_clean()
49 yield p.to_dict(include_meta=True)
50
51
52 @search.command()
53 @click.pass_obj
54 def reindex(config, **kwargs):
55 """
56 Recreate the Search Index.
57 """
58 client = config.registry["elasticsearch.client"]
59 db = Session(bind=config.registry["sqlalchemy.engine"])
60
61 # We use a randomly named index so that we can do a zero downtime reindex.
62 # Essentially we'll use a randomly named index which we will use until all
63 # of the data has been reindexed, at which point we'll point an alias at
64 # our randomly named index, and then delete the old randomly named index.
65
66 # Create the new index and associate all of our doc types with it.
67 index_base = config.registry["elasticsearch.index"]
68 random_token = binascii.hexlify(os.urandom(5)).decode("ascii")
69 new_index_name = "{}-{}".format(index_base, random_token)
70 doc_types = config.registry.get("search.doc_types", set())
71 new_index = get_index(
72 new_index_name,
73 doc_types,
74 using=client,
75 shards=config.registry.get("elasticsearch.shards", 1),
76 replicas=config.registry.get("elasticsearch.replicas", 0),
77 )
78 new_index.create()
79
80 # From this point on, if any error occurs, we want to be able to delete our
81 # in progress index.
82 try:
83 db.execute("SET statement_timeout = '600s'")
84
85 for _ in parallel_bulk(client, _project_docs(db)):
86 pass
87 except:
88 new_index.delete()
89 raise
90 finally:
91 db.rollback()
92 db.close()
93
94 # Now that we've finished indexing all of our data, we'll point the alias
95 # at our new randomly named index and delete the old index.
96 if client.indices.exists_alias(name=index_base):
97 to_delete = set()
98 actions = []
99 for name in client.indices.get_alias(name=index_base):
100 to_delete.add(name)
101 actions.append({"remove": {"index": name, "alias": index_base}})
102 actions.append({"add": {"index": new_index_name, "alias": index_base}})
103 client.indices.update_aliases({"actions": actions})
104 client.indices.delete(",".join(to_delete))
105 else:
106 client.indices.put_alias(name=index_base, index=new_index_name)
107
[end of warehouse/cli/search/reindex.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/warehouse/cli/search/reindex.py b/warehouse/cli/search/reindex.py
--- a/warehouse/cli/search/reindex.py
+++ b/warehouse/cli/search/reindex.py
@@ -57,6 +57,8 @@
"""
client = config.registry["elasticsearch.client"]
db = Session(bind=config.registry["sqlalchemy.engine"])
+ number_of_replicas = config.registry.get("elasticsearch.replicas", 0)
+ refresh_interval = config.registry.get("elasticsearch.interval", "1s")
# We use a randomly named index so that we can do a zero downtime reindex.
# Essentially we'll use a randomly named index which we will use until all
@@ -68,12 +70,16 @@
random_token = binascii.hexlify(os.urandom(5)).decode("ascii")
new_index_name = "{}-{}".format(index_base, random_token)
doc_types = config.registry.get("search.doc_types", set())
+
+ # Create the new index with zero replicas and index refreshes disabled
+ # while we are bulk indexing.
new_index = get_index(
new_index_name,
doc_types,
using=client,
shards=config.registry.get("elasticsearch.shards", 1),
- replicas=config.registry.get("elasticsearch.replicas", 0),
+ replicas=0,
+ interval="-1",
)
new_index.create()
@@ -91,8 +97,20 @@
db.rollback()
db.close()
- # Now that we've finished indexing all of our data, we'll point the alias
- # at our new randomly named index and delete the old index.
+ # Now that we've finished indexing all of our data we can optimize it and
+ # update the replicas and refresh intervals.
+ client.indices.forcemerge(index=new_index_name)
+ client.indices.put_settings(
+ index=new_index_name,
+ body={
+ "index": {
+ "number_of_replicas": number_of_replicas,
+ "refresh_interval": refresh_interval,
+ }
+ }
+ )
+
+ # Point the alias at our new randomly named index and delete the old index.
if client.indices.exists_alias(name=index_base):
to_delete = set()
actions = []
diff --git a/warehouse/search.py b/warehouse/search.py
--- a/warehouse/search.py
+++ b/warehouse/search.py
@@ -29,11 +29,15 @@
return cls
-def get_index(name, doc_types, *, using, shards=1, replicas=0):
+def get_index(name, doc_types, *, using, shards=1, replicas=0, interval="1s"):
index = Index(name, using=using)
for doc_type in doc_types:
index.doc_type(doc_type)
- index.settings(number_of_shards=shards, number_of_replicas=replicas)
+ index.settings(
+ number_of_shards=shards,
+ number_of_replicas=replicas,
+ refresh_interval=interval,
+ )
return index
@@ -58,6 +62,8 @@
[urllib.parse.urlunparse(p[:2] + ("",) * 4)],
verify_certs=True,
ca_certs=certifi.where(),
+ timeout=30,
+ retry_on_timeout=True,
)
config.registry["elasticsearch.index"] = p.path.strip("/")
config.registry["elasticsearch.shards"] = int(qs.get("shards", ["1"])[0])
| {"golden_diff": "diff --git a/warehouse/cli/search/reindex.py b/warehouse/cli/search/reindex.py\n--- a/warehouse/cli/search/reindex.py\n+++ b/warehouse/cli/search/reindex.py\n@@ -57,6 +57,8 @@\n \"\"\"\n client = config.registry[\"elasticsearch.client\"]\n db = Session(bind=config.registry[\"sqlalchemy.engine\"])\n+ number_of_replicas = config.registry.get(\"elasticsearch.replicas\", 0)\n+ refresh_interval = config.registry.get(\"elasticsearch.interval\", \"1s\")\n \n # We use a randomly named index so that we can do a zero downtime reindex.\n # Essentially we'll use a randomly named index which we will use until all\n@@ -68,12 +70,16 @@\n random_token = binascii.hexlify(os.urandom(5)).decode(\"ascii\")\n new_index_name = \"{}-{}\".format(index_base, random_token)\n doc_types = config.registry.get(\"search.doc_types\", set())\n+\n+ # Create the new index with zero replicas and index refreshes disabled\n+ # while we are bulk indexing.\n new_index = get_index(\n new_index_name,\n doc_types,\n using=client,\n shards=config.registry.get(\"elasticsearch.shards\", 1),\n- replicas=config.registry.get(\"elasticsearch.replicas\", 0),\n+ replicas=0,\n+ interval=\"-1\",\n )\n new_index.create()\n \n@@ -91,8 +97,20 @@\n db.rollback()\n db.close()\n \n- # Now that we've finished indexing all of our data, we'll point the alias\n- # at our new randomly named index and delete the old index.\n+ # Now that we've finished indexing all of our data we can optimize it and\n+ # update the replicas and refresh intervals.\n+ client.indices.forcemerge(index=new_index_name)\n+ client.indices.put_settings(\n+ index=new_index_name,\n+ body={\n+ \"index\": {\n+ \"number_of_replicas\": number_of_replicas,\n+ \"refresh_interval\": refresh_interval,\n+ }\n+ }\n+ )\n+\n+ # Point the alias at our new randomly named index and delete the old index.\n if client.indices.exists_alias(name=index_base):\n to_delete = set()\n actions = []\ndiff --git a/warehouse/search.py b/warehouse/search.py\n--- a/warehouse/search.py\n+++ b/warehouse/search.py\n@@ -29,11 +29,15 @@\n return cls\n \n \n-def get_index(name, doc_types, *, using, shards=1, replicas=0):\n+def get_index(name, doc_types, *, using, shards=1, replicas=0, interval=\"1s\"):\n index = Index(name, using=using)\n for doc_type in doc_types:\n index.doc_type(doc_type)\n- index.settings(number_of_shards=shards, number_of_replicas=replicas)\n+ index.settings(\n+ number_of_shards=shards,\n+ number_of_replicas=replicas,\n+ refresh_interval=interval,\n+ )\n return index\n \n \n@@ -58,6 +62,8 @@\n [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n verify_certs=True,\n ca_certs=certifi.where(),\n+ timeout=30,\n+ retry_on_timeout=True,\n )\n config.registry[\"elasticsearch.index\"] = p.path.strip(\"/\")\n config.registry[\"elasticsearch.shards\"] = int(qs.get(\"shards\", [\"1\"])[0])\n", "issue": "Optimize reindexing\nIt's possible to tweak the index settings before and after a full reindex speed up reindexing.\n\nThings you can do before reindexing:\n- Set `num_replicas` to `0`. During reindexing this will stop the network traffic of Elasticsearch replicating your data across the nodes. This can happen in bulk when you set `num_replicas` back to a value greater than `0` after reindexing is finished.\n- Set `refresh_interval` to `-1`. I.e. disable index refreshes completely during bulk indexing. (See: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-update-settings.html#bulk)\n\nThings you can do after reindexing:\n- Perform a `force_merge` on the index. The defaults should be fine. See: https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html\n- Set the `num_replicas` back to its default value.\n- Set the `refresh_interval` back to its default value.\n\n[WIP] Add some optimizations to reindex\nWORK IN PROGRESS\n\nI consider the code to be relatively complete (besides the tests), but local testing is revealing a lot of timeout errors connecting to the Elasticsearch cluster. I don't know if a change here causes this or something else.\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport urllib.parse\n\nimport certifi\nimport elasticsearch\nimport venusian\n\nfrom elasticsearch_dsl import Index\n\n\ndef doc_type(cls):\n def callback(scanner, _name, item):\n types_ = scanner.config.registry.setdefault(\"search.doc_types\", set())\n types_.add(item)\n\n venusian.attach(cls, callback)\n\n return cls\n\n\ndef get_index(name, doc_types, *, using, shards=1, replicas=0):\n index = Index(name, using=using)\n for doc_type in doc_types:\n index.doc_type(doc_type)\n index.settings(number_of_shards=shards, number_of_replicas=replicas)\n return index\n\n\ndef es(request):\n client = request.registry[\"elasticsearch.client\"]\n doc_types = request.registry.get(\"search.doc_types\", set())\n index_name = request.registry[\"elasticsearch.index\"]\n index = get_index(\n index_name,\n doc_types,\n using=client,\n shards=request.registry.get(\"elasticsearch.shards\", 1),\n replicas=request.registry.get(\"elasticsearch.replicas\", 0),\n )\n return index.search()\n\n\ndef includeme(config):\n p = urllib.parse.urlparse(config.registry.settings[\"elasticsearch.url\"])\n qs = urllib.parse.parse_qs(p.query)\n config.registry[\"elasticsearch.client\"] = elasticsearch.Elasticsearch(\n [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n verify_certs=True,\n ca_certs=certifi.where(),\n )\n config.registry[\"elasticsearch.index\"] = p.path.strip(\"/\")\n config.registry[\"elasticsearch.shards\"] = int(qs.get(\"shards\", [\"1\"])[0])\n config.registry[\"elasticsearch.replicas\"] = \\\n int(qs.get(\"replicas\", [\"0\"])[0])\n config.add_request_method(es, name=\"es\", reify=True)\n", "path": "warehouse/search.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport os\n\nimport click\n\nfrom elasticsearch.helpers import parallel_bulk\nfrom sqlalchemy.orm import lazyload, joinedload, load_only\n\nfrom warehouse.cli.search import search\nfrom warehouse.db import Session\nfrom warehouse.packaging.models import Release, Project\nfrom warehouse.packaging.search import Project as ProjectDocType\nfrom warehouse.search import get_index\nfrom warehouse.utils.db import windowed_query\n\n\ndef _project_docs(db):\n releases = (\n db.query(Release)\n .options(load_only(\n \"summary\", \"description\", \"author\",\n \"author_email\", \"maintainer\", \"maintainer_email\",\n \"home_page\", \"download_url\", \"keywords\", \"platform\",\n \"created\"))\n .options(lazyload(\"*\"),\n (joinedload(Release.project)\n .load_only(\"normalized_name\", \"name\")\n .joinedload(Project.releases)\n .load_only(\"version\")),\n joinedload(Release._classifiers).load_only(\"classifier\"))\n .distinct(Release.name)\n .order_by(Release.name, Release._pypi_ordering.desc())\n )\n for release in windowed_query(releases, Release.name, 1000):\n p = ProjectDocType.from_db(release)\n p.full_clean()\n yield p.to_dict(include_meta=True)\n\n\[email protected]()\[email protected]_obj\ndef reindex(config, **kwargs):\n \"\"\"\n Recreate the Search Index.\n \"\"\"\n client = config.registry[\"elasticsearch.client\"]\n db = Session(bind=config.registry[\"sqlalchemy.engine\"])\n\n # We use a randomly named index so that we can do a zero downtime reindex.\n # Essentially we'll use a randomly named index which we will use until all\n # of the data has been reindexed, at which point we'll point an alias at\n # our randomly named index, and then delete the old randomly named index.\n\n # Create the new index and associate all of our doc types with it.\n index_base = config.registry[\"elasticsearch.index\"]\n random_token = binascii.hexlify(os.urandom(5)).decode(\"ascii\")\n new_index_name = \"{}-{}\".format(index_base, random_token)\n doc_types = config.registry.get(\"search.doc_types\", set())\n new_index = get_index(\n new_index_name,\n doc_types,\n using=client,\n shards=config.registry.get(\"elasticsearch.shards\", 1),\n replicas=config.registry.get(\"elasticsearch.replicas\", 0),\n )\n new_index.create()\n\n # From this point on, if any error occurs, we want to be able to delete our\n # in progress index.\n try:\n db.execute(\"SET statement_timeout = '600s'\")\n\n for _ in parallel_bulk(client, _project_docs(db)):\n pass\n except:\n new_index.delete()\n raise\n finally:\n db.rollback()\n db.close()\n\n # Now that we've finished indexing all of our data, we'll point the alias\n # at our new randomly named index and delete the old index.\n if client.indices.exists_alias(name=index_base):\n to_delete = set()\n actions = []\n for name in client.indices.get_alias(name=index_base):\n to_delete.add(name)\n actions.append({\"remove\": {\"index\": name, \"alias\": index_base}})\n actions.append({\"add\": {\"index\": new_index_name, \"alias\": index_base}})\n client.indices.update_aliases({\"actions\": actions})\n client.indices.delete(\",\".join(to_delete))\n else:\n client.indices.put_alias(name=index_base, index=new_index_name)\n", "path": "warehouse/cli/search/reindex.py"}]} | 2,572 | 774 |
gh_patches_debug_6837 | rasdani/github-patches | git_diff | ckan__ckan-5032 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Resource URL gets changed to ___
### CKAN Version if known (or site URL)
latest master
### The probem
When I create or edit a resource using the web form, whatever I set the resource.url to be, it gets saved as something like this: `http://myckansite.com/dataset/713c8c32-cbd8-46ce-90fb-86de1f0811d2/resource/59c0e90c-11e2-4d7f-af43-e098660745dc/download/___` (!)
It shouldn't change it. And the URL it sets it to gives a 404.
</issue>
<code>
[start of ckan/lib/uploader.py]
1 # encoding: utf-8
2
3 import os
4 import cgi
5 import datetime
6 import logging
7 import magic
8 import mimetypes
9
10 from werkzeug.datastructures import FileStorage as FlaskFileStorage
11
12 import ckan.lib.munge as munge
13 import ckan.logic as logic
14 import ckan.plugins as plugins
15 from ckan.common import config
16
17 ALLOWED_UPLOAD_TYPES = (cgi.FieldStorage, FlaskFileStorage)
18 MB = 1 << 20
19
20 log = logging.getLogger(__name__)
21
22 _storage_path = None
23 _max_resource_size = None
24 _max_image_size = None
25
26
27 def _copy_file(input_file, output_file, max_size):
28 input_file.seek(0)
29 current_size = 0
30 while True:
31 current_size = current_size + 1
32 # MB chunks
33 data = input_file.read(MB)
34
35 if not data:
36 break
37 output_file.write(data)
38 if current_size > max_size:
39 raise logic.ValidationError({'upload': ['File upload too large']})
40
41
42 def _get_underlying_file(wrapper):
43 if isinstance(wrapper, FlaskFileStorage):
44 return wrapper.stream
45 return wrapper.file
46
47
48 def get_uploader(upload_to, old_filename=None):
49 '''Query IUploader plugins and return an uploader instance for general
50 files.'''
51 upload = None
52 for plugin in plugins.PluginImplementations(plugins.IUploader):
53 upload = plugin.get_uploader(upload_to, old_filename)
54
55 # default uploader
56 if upload is None:
57 upload = Upload(upload_to, old_filename)
58
59 return upload
60
61
62 def get_resource_uploader(data_dict):
63 '''Query IUploader plugins and return a resource uploader instance.'''
64 upload = None
65 for plugin in plugins.PluginImplementations(plugins.IUploader):
66 upload = plugin.get_resource_uploader(data_dict)
67
68 # default uploader
69 if upload is None:
70 upload = ResourceUpload(data_dict)
71
72 return upload
73
74
75 def get_storage_path():
76 '''Function to cache storage path'''
77 global _storage_path
78
79 # None means it has not been set. False means not in config.
80 if _storage_path is None:
81 storage_path = config.get('ckan.storage_path')
82 if storage_path:
83 _storage_path = storage_path
84 else:
85 log.critical('''Please specify a ckan.storage_path in your config
86 for your uploads''')
87 _storage_path = False
88
89 return _storage_path
90
91
92 def get_max_image_size():
93 global _max_image_size
94 if _max_image_size is None:
95 _max_image_size = int(config.get('ckan.max_image_size', 2))
96 return _max_image_size
97
98
99 def get_max_resource_size():
100 global _max_resource_size
101 if _max_resource_size is None:
102 _max_resource_size = int(config.get('ckan.max_resource_size', 10))
103 return _max_resource_size
104
105
106 class Upload(object):
107 def __init__(self, object_type, old_filename=None):
108 ''' Setup upload by creating a subdirectory of the storage directory
109 of name object_type. old_filename is the name of the file in the url
110 field last time'''
111
112 self.storage_path = None
113 self.filename = None
114 self.filepath = None
115 path = get_storage_path()
116 if not path:
117 return
118 self.storage_path = os.path.join(path, 'storage',
119 'uploads', object_type)
120 try:
121 os.makedirs(self.storage_path)
122 except OSError as e:
123 # errno 17 is file already exists
124 if e.errno != 17:
125 raise
126 self.object_type = object_type
127 self.old_filename = old_filename
128 if old_filename:
129 self.old_filepath = os.path.join(self.storage_path, old_filename)
130
131 def update_data_dict(self, data_dict, url_field, file_field, clear_field):
132 ''' Manipulate data from the data_dict. url_field is the name of the
133 field where the upload is going to be. file_field is name of the key
134 where the FieldStorage is kept (i.e the field where the file data
135 actually is). clear_field is the name of a boolean field which
136 requests the upload to be deleted. This needs to be called before
137 it reaches any validators'''
138
139 self.url = data_dict.get(url_field, '')
140 self.clear = data_dict.pop(clear_field, None)
141 self.file_field = file_field
142 self.upload_field_storage = data_dict.pop(file_field, None)
143
144 if not self.storage_path:
145 return
146
147 if isinstance(self.upload_field_storage, (ALLOWED_UPLOAD_TYPES,)):
148 if self.upload_field_storage.filename:
149 self.filename = self.upload_field_storage.filename
150 self.filename = str(datetime.datetime.utcnow()) + self.filename
151 self.filename = munge.munge_filename_legacy(self.filename)
152 self.filepath = os.path.join(self.storage_path, self.filename)
153 data_dict[url_field] = self.filename
154 self.upload_file = _get_underlying_file(
155 self.upload_field_storage)
156 self.tmp_filepath = self.filepath + '~'
157 # keep the file if there has been no change
158 elif self.old_filename and not self.old_filename.startswith('http'):
159 if not self.clear:
160 data_dict[url_field] = self.old_filename
161 if self.clear and self.url == self.old_filename:
162 data_dict[url_field] = ''
163
164 def upload(self, max_size=2):
165 ''' Actually upload the file.
166 This should happen just before a commit but after the data has
167 been validated and flushed to the db. This is so we do not store
168 anything unless the request is actually good.
169 max_size is size in MB maximum of the file'''
170
171 if self.filename:
172 with open(self.tmp_filepath, 'wb+') as output_file:
173 try:
174 _copy_file(self.upload_file, output_file, max_size)
175 except logic.ValidationError:
176 os.remove(self.tmp_filepath)
177 raise
178 finally:
179 self.upload_file.close()
180 os.rename(self.tmp_filepath, self.filepath)
181 self.clear = True
182
183 if (self.clear and self.old_filename
184 and not self.old_filename.startswith('http')):
185 try:
186 os.remove(self.old_filepath)
187 except OSError:
188 pass
189
190
191 class ResourceUpload(object):
192 def __init__(self, resource):
193 path = get_storage_path()
194 config_mimetype_guess = config.get('ckan.mimetype_guess', 'file_ext')
195
196 if not path:
197 self.storage_path = None
198 return
199 self.storage_path = os.path.join(path, 'resources')
200 try:
201 os.makedirs(self.storage_path)
202 except OSError as e:
203 # errno 17 is file already exists
204 if e.errno != 17:
205 raise
206 self.filename = None
207 self.mimetype = None
208
209 url = resource.get('url')
210
211 upload_field_storage = resource.pop('upload', None)
212 self.clear = resource.pop('clear_upload', None)
213
214 if url and config_mimetype_guess == 'file_ext':
215 self.mimetype = mimetypes.guess_type(url)[0]
216
217 if isinstance(upload_field_storage, ALLOWED_UPLOAD_TYPES):
218 self.filesize = 0 # bytes
219
220 self.filename = upload_field_storage.filename
221 self.filename = munge.munge_filename(self.filename)
222 resource['url'] = self.filename
223 resource['url_type'] = 'upload'
224 resource['last_modified'] = datetime.datetime.utcnow()
225 self.upload_file = _get_underlying_file(upload_field_storage)
226 self.upload_file.seek(0, os.SEEK_END)
227 self.filesize = self.upload_file.tell()
228 # go back to the beginning of the file buffer
229 self.upload_file.seek(0, os.SEEK_SET)
230
231 # check if the mimetype failed from guessing with the url
232 if not self.mimetype and config_mimetype_guess == 'file_ext':
233 self.mimetype = mimetypes.guess_type(self.filename)[0]
234
235 if not self.mimetype and config_mimetype_guess == 'file_contents':
236 try:
237 self.mimetype = magic.from_buffer(self.upload_file.read(),
238 mime=True)
239 self.upload_file.seek(0, os.SEEK_SET)
240 except IOError as e:
241 # Not that important if call above fails
242 self.mimetype = None
243
244 elif self.clear:
245 resource['url_type'] = ''
246
247 def get_directory(self, id):
248 directory = os.path.join(self.storage_path,
249 id[0:3], id[3:6])
250 return directory
251
252 def get_path(self, id):
253 directory = self.get_directory(id)
254 filepath = os.path.join(directory, id[6:])
255 return filepath
256
257 def upload(self, id, max_size=10):
258 '''Actually upload the file.
259
260 :returns: ``'file uploaded'`` if a new file was successfully uploaded
261 (whether it overwrote a previously uploaded file or not),
262 ``'file deleted'`` if an existing uploaded file was deleted,
263 or ``None`` if nothing changed
264 :rtype: ``string`` or ``None``
265
266 '''
267 if not self.storage_path:
268 return
269
270 # Get directory and filepath on the system
271 # where the file for this resource will be stored
272 directory = self.get_directory(id)
273 filepath = self.get_path(id)
274
275 # If a filename has been provided (a file is being uploaded)
276 # we write it to the filepath (and overwrite it if it already
277 # exists). This way the uploaded file will always be stored
278 # in the same location
279 if self.filename:
280 try:
281 os.makedirs(directory)
282 except OSError as e:
283 # errno 17 is file already exists
284 if e.errno != 17:
285 raise
286 tmp_filepath = filepath + '~'
287 with open(tmp_filepath, 'wb+') as output_file:
288 try:
289 _copy_file(self.upload_file, output_file, max_size)
290 except logic.ValidationError:
291 os.remove(tmp_filepath)
292 raise
293 finally:
294 self.upload_file.close()
295 os.rename(tmp_filepath, filepath)
296 return
297
298 # The resource form only sets self.clear (via the input clear_upload)
299 # to True when an uploaded file is not replaced by another uploaded
300 # file, only if it is replaced by a link to file.
301 # If the uploaded file is replaced by a link, we should remove the
302 # previously uploaded file to clean up the file system.
303 if self.clear:
304 try:
305 os.remove(filepath)
306 except OSError as e:
307 pass
308
[end of ckan/lib/uploader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/ckan/lib/uploader.py b/ckan/lib/uploader.py
--- a/ckan/lib/uploader.py
+++ b/ckan/lib/uploader.py
@@ -214,7 +214,8 @@
if url and config_mimetype_guess == 'file_ext':
self.mimetype = mimetypes.guess_type(url)[0]
- if isinstance(upload_field_storage, ALLOWED_UPLOAD_TYPES):
+ if bool(upload_field_storage) and \
+ isinstance(upload_field_storage, ALLOWED_UPLOAD_TYPES):
self.filesize = 0 # bytes
self.filename = upload_field_storage.filename
| {"golden_diff": "diff --git a/ckan/lib/uploader.py b/ckan/lib/uploader.py\n--- a/ckan/lib/uploader.py\n+++ b/ckan/lib/uploader.py\n@@ -214,7 +214,8 @@\n if url and config_mimetype_guess == 'file_ext':\n self.mimetype = mimetypes.guess_type(url)[0]\n \n- if isinstance(upload_field_storage, ALLOWED_UPLOAD_TYPES):\n+ if bool(upload_field_storage) and \\\n+ isinstance(upload_field_storage, ALLOWED_UPLOAD_TYPES):\n self.filesize = 0 # bytes\n \n self.filename = upload_field_storage.filename\n", "issue": "Resource URL gets changed to ___\n### CKAN Version if known (or site URL)\r\nlatest master\r\n\r\n### The probem\r\nWhen I create or edit a resource using the web form, whatever I set the resource.url to be, it gets saved as something like this: `http://myckansite.com/dataset/713c8c32-cbd8-46ce-90fb-86de1f0811d2/resource/59c0e90c-11e2-4d7f-af43-e098660745dc/download/___` (!)\r\nIt shouldn't change it. And the URL it sets it to gives a 404.\n", "before_files": [{"content": "# encoding: utf-8\n\nimport os\nimport cgi\nimport datetime\nimport logging\nimport magic\nimport mimetypes\n\nfrom werkzeug.datastructures import FileStorage as FlaskFileStorage\n\nimport ckan.lib.munge as munge\nimport ckan.logic as logic\nimport ckan.plugins as plugins\nfrom ckan.common import config\n\nALLOWED_UPLOAD_TYPES = (cgi.FieldStorage, FlaskFileStorage)\nMB = 1 << 20\n\nlog = logging.getLogger(__name__)\n\n_storage_path = None\n_max_resource_size = None\n_max_image_size = None\n\n\ndef _copy_file(input_file, output_file, max_size):\n input_file.seek(0)\n current_size = 0\n while True:\n current_size = current_size + 1\n # MB chunks\n data = input_file.read(MB)\n\n if not data:\n break\n output_file.write(data)\n if current_size > max_size:\n raise logic.ValidationError({'upload': ['File upload too large']})\n\n\ndef _get_underlying_file(wrapper):\n if isinstance(wrapper, FlaskFileStorage):\n return wrapper.stream\n return wrapper.file\n\n\ndef get_uploader(upload_to, old_filename=None):\n '''Query IUploader plugins and return an uploader instance for general\n files.'''\n upload = None\n for plugin in plugins.PluginImplementations(plugins.IUploader):\n upload = plugin.get_uploader(upload_to, old_filename)\n\n # default uploader\n if upload is None:\n upload = Upload(upload_to, old_filename)\n\n return upload\n\n\ndef get_resource_uploader(data_dict):\n '''Query IUploader plugins and return a resource uploader instance.'''\n upload = None\n for plugin in plugins.PluginImplementations(plugins.IUploader):\n upload = plugin.get_resource_uploader(data_dict)\n\n # default uploader\n if upload is None:\n upload = ResourceUpload(data_dict)\n\n return upload\n\n\ndef get_storage_path():\n '''Function to cache storage path'''\n global _storage_path\n\n # None means it has not been set. False means not in config.\n if _storage_path is None:\n storage_path = config.get('ckan.storage_path')\n if storage_path:\n _storage_path = storage_path\n else:\n log.critical('''Please specify a ckan.storage_path in your config\n for your uploads''')\n _storage_path = False\n\n return _storage_path\n\n\ndef get_max_image_size():\n global _max_image_size\n if _max_image_size is None:\n _max_image_size = int(config.get('ckan.max_image_size', 2))\n return _max_image_size\n\n\ndef get_max_resource_size():\n global _max_resource_size\n if _max_resource_size is None:\n _max_resource_size = int(config.get('ckan.max_resource_size', 10))\n return _max_resource_size\n\n\nclass Upload(object):\n def __init__(self, object_type, old_filename=None):\n ''' Setup upload by creating a subdirectory of the storage directory\n of name object_type. old_filename is the name of the file in the url\n field last time'''\n\n self.storage_path = None\n self.filename = None\n self.filepath = None\n path = get_storage_path()\n if not path:\n return\n self.storage_path = os.path.join(path, 'storage',\n 'uploads', object_type)\n try:\n os.makedirs(self.storage_path)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n self.object_type = object_type\n self.old_filename = old_filename\n if old_filename:\n self.old_filepath = os.path.join(self.storage_path, old_filename)\n\n def update_data_dict(self, data_dict, url_field, file_field, clear_field):\n ''' Manipulate data from the data_dict. url_field is the name of the\n field where the upload is going to be. file_field is name of the key\n where the FieldStorage is kept (i.e the field where the file data\n actually is). clear_field is the name of a boolean field which\n requests the upload to be deleted. This needs to be called before\n it reaches any validators'''\n\n self.url = data_dict.get(url_field, '')\n self.clear = data_dict.pop(clear_field, None)\n self.file_field = file_field\n self.upload_field_storage = data_dict.pop(file_field, None)\n\n if not self.storage_path:\n return\n\n if isinstance(self.upload_field_storage, (ALLOWED_UPLOAD_TYPES,)):\n if self.upload_field_storage.filename:\n self.filename = self.upload_field_storage.filename\n self.filename = str(datetime.datetime.utcnow()) + self.filename\n self.filename = munge.munge_filename_legacy(self.filename)\n self.filepath = os.path.join(self.storage_path, self.filename)\n data_dict[url_field] = self.filename\n self.upload_file = _get_underlying_file(\n self.upload_field_storage)\n self.tmp_filepath = self.filepath + '~'\n # keep the file if there has been no change\n elif self.old_filename and not self.old_filename.startswith('http'):\n if not self.clear:\n data_dict[url_field] = self.old_filename\n if self.clear and self.url == self.old_filename:\n data_dict[url_field] = ''\n\n def upload(self, max_size=2):\n ''' Actually upload the file.\n This should happen just before a commit but after the data has\n been validated and flushed to the db. This is so we do not store\n anything unless the request is actually good.\n max_size is size in MB maximum of the file'''\n\n if self.filename:\n with open(self.tmp_filepath, 'wb+') as output_file:\n try:\n _copy_file(self.upload_file, output_file, max_size)\n except logic.ValidationError:\n os.remove(self.tmp_filepath)\n raise\n finally:\n self.upload_file.close()\n os.rename(self.tmp_filepath, self.filepath)\n self.clear = True\n\n if (self.clear and self.old_filename\n and not self.old_filename.startswith('http')):\n try:\n os.remove(self.old_filepath)\n except OSError:\n pass\n\n\nclass ResourceUpload(object):\n def __init__(self, resource):\n path = get_storage_path()\n config_mimetype_guess = config.get('ckan.mimetype_guess', 'file_ext')\n\n if not path:\n self.storage_path = None\n return\n self.storage_path = os.path.join(path, 'resources')\n try:\n os.makedirs(self.storage_path)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n self.filename = None\n self.mimetype = None\n\n url = resource.get('url')\n\n upload_field_storage = resource.pop('upload', None)\n self.clear = resource.pop('clear_upload', None)\n\n if url and config_mimetype_guess == 'file_ext':\n self.mimetype = mimetypes.guess_type(url)[0]\n\n if isinstance(upload_field_storage, ALLOWED_UPLOAD_TYPES):\n self.filesize = 0 # bytes\n\n self.filename = upload_field_storage.filename\n self.filename = munge.munge_filename(self.filename)\n resource['url'] = self.filename\n resource['url_type'] = 'upload'\n resource['last_modified'] = datetime.datetime.utcnow()\n self.upload_file = _get_underlying_file(upload_field_storage)\n self.upload_file.seek(0, os.SEEK_END)\n self.filesize = self.upload_file.tell()\n # go back to the beginning of the file buffer\n self.upload_file.seek(0, os.SEEK_SET)\n\n # check if the mimetype failed from guessing with the url\n if not self.mimetype and config_mimetype_guess == 'file_ext':\n self.mimetype = mimetypes.guess_type(self.filename)[0]\n\n if not self.mimetype and config_mimetype_guess == 'file_contents':\n try:\n self.mimetype = magic.from_buffer(self.upload_file.read(),\n mime=True)\n self.upload_file.seek(0, os.SEEK_SET)\n except IOError as e:\n # Not that important if call above fails\n self.mimetype = None\n\n elif self.clear:\n resource['url_type'] = ''\n\n def get_directory(self, id):\n directory = os.path.join(self.storage_path,\n id[0:3], id[3:6])\n return directory\n\n def get_path(self, id):\n directory = self.get_directory(id)\n filepath = os.path.join(directory, id[6:])\n return filepath\n\n def upload(self, id, max_size=10):\n '''Actually upload the file.\n\n :returns: ``'file uploaded'`` if a new file was successfully uploaded\n (whether it overwrote a previously uploaded file or not),\n ``'file deleted'`` if an existing uploaded file was deleted,\n or ``None`` if nothing changed\n :rtype: ``string`` or ``None``\n\n '''\n if not self.storage_path:\n return\n\n # Get directory and filepath on the system\n # where the file for this resource will be stored\n directory = self.get_directory(id)\n filepath = self.get_path(id)\n\n # If a filename has been provided (a file is being uploaded)\n # we write it to the filepath (and overwrite it if it already\n # exists). This way the uploaded file will always be stored\n # in the same location\n if self.filename:\n try:\n os.makedirs(directory)\n except OSError as e:\n # errno 17 is file already exists\n if e.errno != 17:\n raise\n tmp_filepath = filepath + '~'\n with open(tmp_filepath, 'wb+') as output_file:\n try:\n _copy_file(self.upload_file, output_file, max_size)\n except logic.ValidationError:\n os.remove(tmp_filepath)\n raise\n finally:\n self.upload_file.close()\n os.rename(tmp_filepath, filepath)\n return\n\n # The resource form only sets self.clear (via the input clear_upload)\n # to True when an uploaded file is not replaced by another uploaded\n # file, only if it is replaced by a link to file.\n # If the uploaded file is replaced by a link, we should remove the\n # previously uploaded file to clean up the file system.\n if self.clear:\n try:\n os.remove(filepath)\n except OSError as e:\n pass\n", "path": "ckan/lib/uploader.py"}]} | 3,788 | 137 |
gh_patches_debug_33729 | rasdani/github-patches | git_diff | translate__pootle-5882 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dont create project if command fails in init_fs_project
atm if for some reason this command fails it leaves a project behind
</issue>
<code>
[start of pootle/apps/pootle_fs/management/commands/init_fs_project.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import logging
10
11 from django.core.exceptions import ValidationError
12 from django.core.management import BaseCommand, CommandError
13
14 from pootle_format.models import Format
15 from pootle_fs.utils import FSPlugin, parse_fs_url
16 from pootle_language.models import Language
17 from pootle_project.models import Project
18
19
20 logger = logging.getLogger('pootle.fs')
21
22
23 class Command(BaseCommand):
24 help = "Init a new Pootle FS project."
25
26 def add_arguments(self, parser):
27 parser.add_argument(
28 'code',
29 metavar='CODE',
30 help='Project code'
31 )
32 parser.add_argument(
33 'fs',
34 metavar='FS_URL',
35 help='FS url "filesystem_type+/repo/path/"'
36 )
37 parser.add_argument(
38 'translation_mapping',
39 help='Translation mapping "<language_code>/<filename>.<ext>"',
40 metavar='TRANSLATION_MAPPING'
41 )
42 parser.add_argument(
43 '-n', '--name',
44 action='store',
45 dest='name',
46 nargs='?',
47 help='Project name',
48 )
49 parser.add_argument(
50 '--filetypes',
51 action='append',
52 dest='filetypes',
53 help='File types',
54 )
55 parser.add_argument(
56 '--checkstyle',
57 action='store',
58 dest='checkstyle',
59 help='Checkstyle',
60 nargs='?',
61 default='standard'
62 )
63 parser.add_argument(
64 '-l', '--source-language',
65 action='store',
66 dest='source_language',
67 help="Code for the project's source language",
68 nargs='?',
69 default='en'
70 )
71 parser.add_argument(
72 '--nosync',
73 action='store_false',
74 dest='sync',
75 help='Flag if sync is unnecessary',
76 default=True
77 )
78
79 def handle(self, **options):
80 source_language_code = options['source_language']
81 try:
82 source_language = Language.objects.get(code=source_language_code)
83 except Language.DoesNotExist as e:
84 self.stdout.write('%s: Unknown language code.' %
85 source_language_code)
86 raise CommandError(e)
87
88 fs_type, fs_url = parse_fs_url(options['fs'])
89 code = options['code']
90 name = options['name'] or code.capitalize()
91
92 try:
93 project = Project.objects.create(
94 code=code,
95 fullname=name,
96 treestyle='pootle_fs',
97 checkstyle=options['checkstyle'],
98 source_language=source_language)
99 except ValidationError as e:
100 raise CommandError(e)
101
102 for filetype in options["filetypes"] or ["po"]:
103 try:
104 filetype = Format.objects.get(name=filetype)
105 project.filetypes.add(filetype)
106 except Format.DoesNotExist as e:
107 raise CommandError(e)
108
109 project.config['pootle_fs.fs_type'] = fs_type
110 project.config['pootle_fs.fs_url'] = fs_url
111 project.config['pootle_fs.translation_mappings'] = {
112 'default': options['translation_mapping']
113 }
114 if options['sync']:
115 plugin = FSPlugin(project)
116 plugin.fetch()
117 plugin.add()
118 plugin.sync()
119
[end of pootle/apps/pootle_fs/management/commands/init_fs_project.py]
[start of pootle/apps/pootle_fs/localfs.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 import logging
10 import uuid
11
12 import dirsync
13
14 from django import forms
15
16 from pootle.core.delegate import revision
17 from pootle_project.models import Project
18
19 from .plugin import Plugin
20
21
22 class LocalFSPlugin(Plugin):
23
24 fs_type = "localfs"
25 _pulled = False
26
27 @property
28 def latest_hash(self):
29 return revision.get(Project)(
30 self.project).get(key="pootle.fs.fs_hash")
31
32 def push(self, response):
33 dirsync.sync(
34 self.project.local_fs_path,
35 self.fs_url,
36 "sync",
37 purge=True,
38 logger=logging.getLogger(dirsync.__name__))
39 return response
40
41 def fetch(self):
42 synced = dirsync.sync(
43 self.fs_url,
44 self.project.local_fs_path,
45 "sync",
46 create=True,
47 purge=True,
48 logger=logging.getLogger(dirsync.__name__))
49 if synced:
50 revision.get(Project)(self.project).set(
51 keys=["pootle.fs.fs_hash"], value=uuid.uuid4().hex)
52
53
54 class LocalFSUrlValidator(object):
55
56 help_text = "Enter an absolute path to a directory on your filesystem"
57
58 def validate(self, url):
59 if not url.startswith("/"):
60 raise forms.ValidationError(self.help_text)
61
[end of pootle/apps/pootle_fs/localfs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pootle/apps/pootle_fs/localfs.py b/pootle/apps/pootle_fs/localfs.py
--- a/pootle/apps/pootle_fs/localfs.py
+++ b/pootle/apps/pootle_fs/localfs.py
@@ -16,6 +16,7 @@
from pootle.core.delegate import revision
from pootle_project.models import Project
+from .exceptions import FSFetchError
from .plugin import Plugin
@@ -39,13 +40,16 @@
return response
def fetch(self):
- synced = dirsync.sync(
- self.fs_url,
- self.project.local_fs_path,
- "sync",
- create=True,
- purge=True,
- logger=logging.getLogger(dirsync.__name__))
+ try:
+ synced = dirsync.sync(
+ self.fs_url,
+ self.project.local_fs_path,
+ "sync",
+ create=True,
+ purge=True,
+ logger=logging.getLogger(dirsync.__name__))
+ except ValueError as e:
+ raise FSFetchError(e)
if synced:
revision.get(Project)(self.project).set(
keys=["pootle.fs.fs_hash"], value=uuid.uuid4().hex)
diff --git a/pootle/apps/pootle_fs/management/commands/init_fs_project.py b/pootle/apps/pootle_fs/management/commands/init_fs_project.py
--- a/pootle/apps/pootle_fs/management/commands/init_fs_project.py
+++ b/pootle/apps/pootle_fs/management/commands/init_fs_project.py
@@ -12,6 +12,7 @@
from django.core.management import BaseCommand, CommandError
from pootle_format.models import Format
+from pootle_fs.exceptions import FSFetchError
from pootle_fs.utils import FSPlugin, parse_fs_url
from pootle_language.models import Language
from pootle_project.models import Project
@@ -112,7 +113,11 @@
'default': options['translation_mapping']
}
if options['sync']:
- plugin = FSPlugin(project)
- plugin.fetch()
- plugin.add()
- plugin.sync()
+ try:
+ plugin = FSPlugin(project)
+ plugin.fetch()
+ plugin.add()
+ plugin.sync()
+ except FSFetchError as e:
+ project.delete()
+ raise CommandError(e)
| {"golden_diff": "diff --git a/pootle/apps/pootle_fs/localfs.py b/pootle/apps/pootle_fs/localfs.py\n--- a/pootle/apps/pootle_fs/localfs.py\n+++ b/pootle/apps/pootle_fs/localfs.py\n@@ -16,6 +16,7 @@\n from pootle.core.delegate import revision\n from pootle_project.models import Project\n \n+from .exceptions import FSFetchError\n from .plugin import Plugin\n \n \n@@ -39,13 +40,16 @@\n return response\n \n def fetch(self):\n- synced = dirsync.sync(\n- self.fs_url,\n- self.project.local_fs_path,\n- \"sync\",\n- create=True,\n- purge=True,\n- logger=logging.getLogger(dirsync.__name__))\n+ try:\n+ synced = dirsync.sync(\n+ self.fs_url,\n+ self.project.local_fs_path,\n+ \"sync\",\n+ create=True,\n+ purge=True,\n+ logger=logging.getLogger(dirsync.__name__))\n+ except ValueError as e:\n+ raise FSFetchError(e)\n if synced:\n revision.get(Project)(self.project).set(\n keys=[\"pootle.fs.fs_hash\"], value=uuid.uuid4().hex)\ndiff --git a/pootle/apps/pootle_fs/management/commands/init_fs_project.py b/pootle/apps/pootle_fs/management/commands/init_fs_project.py\n--- a/pootle/apps/pootle_fs/management/commands/init_fs_project.py\n+++ b/pootle/apps/pootle_fs/management/commands/init_fs_project.py\n@@ -12,6 +12,7 @@\n from django.core.management import BaseCommand, CommandError\n \n from pootle_format.models import Format\n+from pootle_fs.exceptions import FSFetchError\n from pootle_fs.utils import FSPlugin, parse_fs_url\n from pootle_language.models import Language\n from pootle_project.models import Project\n@@ -112,7 +113,11 @@\n 'default': options['translation_mapping']\n }\n if options['sync']:\n- plugin = FSPlugin(project)\n- plugin.fetch()\n- plugin.add()\n- plugin.sync()\n+ try:\n+ plugin = FSPlugin(project)\n+ plugin.fetch()\n+ plugin.add()\n+ plugin.sync()\n+ except FSFetchError as e:\n+ project.delete()\n+ raise CommandError(e)\n", "issue": "Dont create project if command fails in init_fs_project\natm if for some reason this command fails it leaves a project behind\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.management import BaseCommand, CommandError\n\nfrom pootle_format.models import Format\nfrom pootle_fs.utils import FSPlugin, parse_fs_url\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\n\n\nlogger = logging.getLogger('pootle.fs')\n\n\nclass Command(BaseCommand):\n help = \"Init a new Pootle FS project.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n 'code',\n metavar='CODE',\n help='Project code'\n )\n parser.add_argument(\n 'fs',\n metavar='FS_URL',\n help='FS url \"filesystem_type+/repo/path/\"'\n )\n parser.add_argument(\n 'translation_mapping',\n help='Translation mapping \"<language_code>/<filename>.<ext>\"',\n metavar='TRANSLATION_MAPPING'\n )\n parser.add_argument(\n '-n', '--name',\n action='store',\n dest='name',\n nargs='?',\n help='Project name',\n )\n parser.add_argument(\n '--filetypes',\n action='append',\n dest='filetypes',\n help='File types',\n )\n parser.add_argument(\n '--checkstyle',\n action='store',\n dest='checkstyle',\n help='Checkstyle',\n nargs='?',\n default='standard'\n )\n parser.add_argument(\n '-l', '--source-language',\n action='store',\n dest='source_language',\n help=\"Code for the project's source language\",\n nargs='?',\n default='en'\n )\n parser.add_argument(\n '--nosync',\n action='store_false',\n dest='sync',\n help='Flag if sync is unnecessary',\n default=True\n )\n\n def handle(self, **options):\n source_language_code = options['source_language']\n try:\n source_language = Language.objects.get(code=source_language_code)\n except Language.DoesNotExist as e:\n self.stdout.write('%s: Unknown language code.' %\n source_language_code)\n raise CommandError(e)\n\n fs_type, fs_url = parse_fs_url(options['fs'])\n code = options['code']\n name = options['name'] or code.capitalize()\n\n try:\n project = Project.objects.create(\n code=code,\n fullname=name,\n treestyle='pootle_fs',\n checkstyle=options['checkstyle'],\n source_language=source_language)\n except ValidationError as e:\n raise CommandError(e)\n\n for filetype in options[\"filetypes\"] or [\"po\"]:\n try:\n filetype = Format.objects.get(name=filetype)\n project.filetypes.add(filetype)\n except Format.DoesNotExist as e:\n raise CommandError(e)\n\n project.config['pootle_fs.fs_type'] = fs_type\n project.config['pootle_fs.fs_url'] = fs_url\n project.config['pootle_fs.translation_mappings'] = {\n 'default': options['translation_mapping']\n }\n if options['sync']:\n plugin = FSPlugin(project)\n plugin.fetch()\n plugin.add()\n plugin.sync()\n", "path": "pootle/apps/pootle_fs/management/commands/init_fs_project.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport uuid\n\nimport dirsync\n\nfrom django import forms\n\nfrom pootle.core.delegate import revision\nfrom pootle_project.models import Project\n\nfrom .plugin import Plugin\n\n\nclass LocalFSPlugin(Plugin):\n\n fs_type = \"localfs\"\n _pulled = False\n\n @property\n def latest_hash(self):\n return revision.get(Project)(\n self.project).get(key=\"pootle.fs.fs_hash\")\n\n def push(self, response):\n dirsync.sync(\n self.project.local_fs_path,\n self.fs_url,\n \"sync\",\n purge=True,\n logger=logging.getLogger(dirsync.__name__))\n return response\n\n def fetch(self):\n synced = dirsync.sync(\n self.fs_url,\n self.project.local_fs_path,\n \"sync\",\n create=True,\n purge=True,\n logger=logging.getLogger(dirsync.__name__))\n if synced:\n revision.get(Project)(self.project).set(\n keys=[\"pootle.fs.fs_hash\"], value=uuid.uuid4().hex)\n\n\nclass LocalFSUrlValidator(object):\n\n help_text = \"Enter an absolute path to a directory on your filesystem\"\n\n def validate(self, url):\n if not url.startswith(\"/\"):\n raise forms.ValidationError(self.help_text)\n", "path": "pootle/apps/pootle_fs/localfs.py"}]} | 2,044 | 525 |
gh_patches_debug_37806 | rasdani/github-patches | git_diff | ansible__ansible-19454 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
include_role tasks has ansible-playbook seek roles before the actual task is run
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
include_role
##### ANSIBLE VERSION
```
ansible 2.2.0.0
reproduced on ansible 2.2.1.0 (rc2) as well
```
##### CONFIGURATION
Not relevant
##### OS / ENVIRONMENT
Not relevant
##### SUMMARY
If a role is installed as part of a playbook run (i.e, a task that runs ``ansible-galaxy`` or something similar) and then we attempt to use ``include_role`` in a later task, ansible-playbook will not run at all because the role from ``include_role`` is missing (has not been installed yet).
It's sort of like the static parameter of the ``include`` task where a ``static: no`` on the include can allow you to add a ``when: <something>`` clause to conditionally/dynamically run the include.
There *is* a [documented](https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/utilities/logic/include_role.py) static parameter in the ``include_role`` task, however it does not alleviate this issue.
The issue is present even when running the role setup and the include role in two different plays of the same playbook. The only workaround for now is to run completely separated playbooks.
It's like ``ansible-playbook`` tries to "compile" all the roles before starting the run.
The obvious answer would be:
> Why don't you use ansible-galaxy and requirements.yml file and *\<insert other alternative here\>* before running your playbook ?
This is a more complicated use case where there are actual setup tasks required and I would like them to be embedded as ansible tasks.
##### STEPS TO REPRODUCE
```yaml
- name: Setup and use a role
hosts: localhost
tasks:
- name: Get geerlingguy.apache
command: ansible-galaxy install geerlingguy.apache
- name: Include geerlingguy.apache
include_role:
name: geerlingguy.apache
```
##### EXPECTED RESULTS
I would expect the include_role task to seek the role only when running the actual include_role task and thus this example playbook should work.
##### ACTUAL RESULTS
```
# ansible-playbook -i 'localhost' playbook.yml
[WARNING]: Host file not found: localhost
[WARNING]: provided hosts list is empty, only localhost is available
ERROR! the role 'geerlingguy.apache' was not found in /home/centos/roles:/etc/ansible/roles:/home/centos
The error appears to have been in '/home/centos/playbook.yml': line 9, column 15, but may
be elsewhere in the file depending on the exact syntax problem.
The offending line appears to be:
include_role:
name: geerlingguy.apache
^ here
```
</issue>
<code>
[start of lib/ansible/playbook/role_include.py]
1
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17
18 # Make coding more python3-ish
19 from __future__ import (absolute_import, division, print_function)
20 __metaclass__ = type
21
22 from os.path import basename
23
24 from ansible.errors import AnsibleParserError
25 from ansible.playbook.attribute import FieldAttribute
26 from ansible.playbook.task import Task
27 from ansible.playbook.role import Role
28 from ansible.playbook.role.include import RoleInclude
29
30 try:
31 from __main__ import display
32 except ImportError:
33 from ansible.utils.display import Display
34 display = Display()
35
36 __all__ = ['IncludeRole']
37
38
39 class IncludeRole(Task):
40
41 """
42 A Role include is derived from a regular role to handle the special
43 circumstances related to the `- include_role: ...`
44 """
45
46 # =================================================================================
47 # ATTRIBUTES
48
49 # private as this is a 'module options' vs a task property
50 _allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
51 _static = FieldAttribute(isa='bool', default=None, private=True)
52 _private = FieldAttribute(isa='bool', default=None, private=True)
53
54 def __init__(self, block=None, role=None, task_include=None):
55
56 super(IncludeRole, self).__init__(block=block, role=role, task_include=task_include)
57
58 self.statically_loaded = False
59 self._from_files = {}
60 self._parent_role = role
61 self._role_name = None
62
63
64 def get_block_list(self, play=None, variable_manager=None, loader=None):
65
66 # only need play passed in when dynamic
67 if play is None:
68 myplay = self._parent._play
69 else:
70 myplay = play
71
72 ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader)
73 ri.vars.update(self.vars)
74
75 # build role
76 actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=self._from_files)
77 actual_role._metadata.allow_duplicates = self.allow_duplicates
78
79 # compile role with parent roles as dependencies to ensure they inherit
80 # variables
81 if not self._parent_role:
82 dep_chain = []
83 else:
84 dep_chain = list(self._parent_role._parents)
85 dep_chain.extend(self._parent_role.get_all_dependencies())
86 dep_chain.append(self._parent_role)
87
88 blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)
89 for b in blocks:
90 b._parent = self
91
92 # updated available handlers in play
93 myplay.handlers = myplay.handlers + actual_role.get_handler_blocks(play=myplay)
94
95 return blocks
96
97 @staticmethod
98 def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
99
100 ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)
101
102 ### Process options
103 # name is needed, or use role as alias
104 ir._role_name = ir.args.get('name', ir.args.get('role'))
105 if ir._role_name is None:
106 raise AnsibleParserError("'name' is a required field for include_role.")
107
108 # build options for role includes
109 for key in ['tasks', 'vars', 'defaults']:
110 from_key ='%s_from' % key
111 if ir.args.get(from_key):
112 ir._from_files[key] = basename(ir.args.get(from_key))
113
114 #FIXME: find a way to make this list come from object ( attributes does not work as per below)
115 # manual list as otherwise the options would set other task parameters we don't want.
116 for option in ['static', 'private', 'allow_duplicates']:
117 if option in ir.args:
118 setattr(ir, option, ir.args.get(option))
119
120 return ir.load_data(data, variable_manager=variable_manager, loader=loader)
121
122 def copy(self, exclude_parent=False, exclude_tasks=False):
123
124 new_me = super(IncludeRole, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)
125 new_me.statically_loaded = self.statically_loaded
126 new_me._from_files = self._from_files.copy()
127 new_me._parent_role = self._parent_role
128 new_me._role_name = self._role_name
129
130 return new_me
131
132 def get_include_params(self):
133 v = super(IncludeRole, self).get_include_params()
134 if self._parent_role:
135 v.update(self._parent_role.get_role_params())
136 return v
137
[end of lib/ansible/playbook/role_include.py]
[start of lib/ansible/modules/utilities/logic/include_role.py]
1 #!/usr/bin/python
2 # -*- mode: python -*-
3 # Ansible is distributed in the hope that it will be useful,
4 # but WITHOUT ANY WARRANTY; without even the implied warranty of
5 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
6 # GNU General Public License for more details.
7 #
8 # You should have received a copy of the GNU General Public License
9 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
10
11 ANSIBLE_METADATA = {'status': ['preview'],
12 'supported_by': 'core',
13 'version': '1.0'}
14
15 DOCUMENTATION = '''
16 ---
17 author:
18 - "Ansible Core Team (@ansible)"
19 module: include_role
20 short_description: Load and execute a role
21 description:
22 - "Loads and executes a role as a task, this frees roles from the `role:` directive and allows them to be treated more as tasks."
23 version_added: "2.2"
24 options:
25 name:
26 description:
27 - The name of the role to be executed.
28 required: True
29 tasks_from:
30 description:
31 - "File to load from a Role's tasks/ directory."
32 required: False
33 default: 'main'
34 vars_from:
35 description:
36 - "File to load from a Role's vars/ directory."
37 required: False
38 default: 'main'
39 defaults_from:
40 description:
41 - "File to load from a Role's defaults/ directory."
42 required: False
43 default: 'main'
44 static:
45 description:
46 - Gives Ansible a hint if this is a 'static' include or not. If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options.
47 required: False
48 default: None
49 allow_duplicates:
50 description:
51 - Overrides the role's metadata setting to allow using a role more than once with the same parameters.
52 required: False
53 default: True
54 private:
55 description:
56 - If True the variables from defaults/ and vars/ in a role will not be made available to the rest of the play.
57 default: None
58 notes:
59 - THIS IS EARLY PREVIEW, THINGS MAY CHANGE
60 - Handlers are made available to the whole play.
61 - simple dependencies seem to work fine.
62 - "Things not tested (yet): plugin overrides, nesting includes, used as handler, other stuff I did not think of when I wrote this."
63 '''
64
65 EXAMPLES = """
66 - include_role:
67 name: myrole
68
69 - name: Run tasks/other.yml instead of 'main'
70 include_role:
71 name: myrole
72 tasks_from: other
73
74 - name: Pass variables to role
75 include_role:
76 name: myrole
77 vars:
78 rolevar1: 'value from task'
79
80 - name: Use role in loop
81 include_role:
82 name: myrole
83 with_items:
84 - '{{ roleinput1 }}'
85 - '{{ roleinput2 }}'
86 loop_control:
87 loop_var: roleinputvar
88
89 - name: conditional role
90 include_role:
91 name: myrole
92 when: not idontwanttorun
93 """
94
95 RETURN = """
96 # this module does not return anything except tasks to execute
97 """
98
[end of lib/ansible/modules/utilities/logic/include_role.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lib/ansible/modules/utilities/logic/include_role.py b/lib/ansible/modules/utilities/logic/include_role.py
--- a/lib/ansible/modules/utilities/logic/include_role.py
+++ b/lib/ansible/modules/utilities/logic/include_role.py
@@ -41,11 +41,6 @@
- "File to load from a Role's defaults/ directory."
required: False
default: 'main'
- static:
- description:
- - Gives Ansible a hint if this is a 'static' include or not. If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options.
- required: False
- default: None
allow_duplicates:
description:
- Overrides the role's metadata setting to allow using a role more than once with the same parameters.
@@ -56,10 +51,9 @@
- If True the variables from defaults/ and vars/ in a role will not be made available to the rest of the play.
default: None
notes:
- - THIS IS EARLY PREVIEW, THINGS MAY CHANGE
- Handlers are made available to the whole play.
- simple dependencies seem to work fine.
- - "Things not tested (yet): plugin overrides, nesting includes, used as handler, other stuff I did not think of when I wrote this."
+ - As with C(include) this task can be static or dynamic, If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options. Ansible will try to autodetect what is needed, but you can set `static: yes|no` at task level to control this.
'''
EXAMPLES = """
diff --git a/lib/ansible/playbook/role_include.py b/lib/ansible/playbook/role_include.py
--- a/lib/ansible/playbook/role_include.py
+++ b/lib/ansible/playbook/role_include.py
@@ -48,7 +48,6 @@
# private as this is a 'module options' vs a task property
_allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)
- _static = FieldAttribute(isa='bool', default=None, private=True)
_private = FieldAttribute(isa='bool', default=None, private=True)
def __init__(self, block=None, role=None, task_include=None):
@@ -113,7 +112,7 @@
#FIXME: find a way to make this list come from object ( attributes does not work as per below)
# manual list as otherwise the options would set other task parameters we don't want.
- for option in ['static', 'private', 'allow_duplicates']:
+ for option in ['private', 'allow_duplicates']:
if option in ir.args:
setattr(ir, option, ir.args.get(option))
| {"golden_diff": "diff --git a/lib/ansible/modules/utilities/logic/include_role.py b/lib/ansible/modules/utilities/logic/include_role.py\n--- a/lib/ansible/modules/utilities/logic/include_role.py\n+++ b/lib/ansible/modules/utilities/logic/include_role.py\n@@ -41,11 +41,6 @@\n - \"File to load from a Role's defaults/ directory.\"\n required: False\n default: 'main'\n- static:\n- description:\n- - Gives Ansible a hint if this is a 'static' include or not. If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options.\n- required: False\n- default: None\n allow_duplicates:\n description:\n - Overrides the role's metadata setting to allow using a role more than once with the same parameters.\n@@ -56,10 +51,9 @@\n - If True the variables from defaults/ and vars/ in a role will not be made available to the rest of the play.\n default: None\n notes:\n- - THIS IS EARLY PREVIEW, THINGS MAY CHANGE\n - Handlers are made available to the whole play.\n - simple dependencies seem to work fine.\n- - \"Things not tested (yet): plugin overrides, nesting includes, used as handler, other stuff I did not think of when I wrote this.\"\n+ - As with C(include) this task can be static or dynamic, If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options. Ansible will try to autodetect what is needed, but you can set `static: yes|no` at task level to control this.\n '''\n \n EXAMPLES = \"\"\"\ndiff --git a/lib/ansible/playbook/role_include.py b/lib/ansible/playbook/role_include.py\n--- a/lib/ansible/playbook/role_include.py\n+++ b/lib/ansible/playbook/role_include.py\n@@ -48,7 +48,6 @@\n \n # private as this is a 'module options' vs a task property\n _allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)\n- _static = FieldAttribute(isa='bool', default=None, private=True)\n _private = FieldAttribute(isa='bool', default=None, private=True)\n \n def __init__(self, block=None, role=None, task_include=None):\n@@ -113,7 +112,7 @@\n \n #FIXME: find a way to make this list come from object ( attributes does not work as per below)\n # manual list as otherwise the options would set other task parameters we don't want.\n- for option in ['static', 'private', 'allow_duplicates']:\n+ for option in ['private', 'allow_duplicates']:\n if option in ir.args:\n setattr(ir, option, ir.args.get(option))\n", "issue": "include_role tasks has ansible-playbook seek roles before the actual task is run\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\ninclude_role\r\n\r\n##### ANSIBLE VERSION\r\n```\r\nansible 2.2.0.0\r\nreproduced on ansible 2.2.1.0 (rc2) as well\r\n```\r\n\r\n##### CONFIGURATION\r\nNot relevant\r\n\r\n##### OS / ENVIRONMENT\r\nNot relevant\r\n\r\n##### SUMMARY\r\nIf a role is installed as part of a playbook run (i.e, a task that runs ``ansible-galaxy`` or something similar) and then we attempt to use ``include_role`` in a later task, ansible-playbook will not run at all because the role from ``include_role`` is missing (has not been installed yet).\r\n\r\nIt's sort of like the static parameter of the ``include`` task where a ``static: no`` on the include can allow you to add a ``when: <something>`` clause to conditionally/dynamically run the include.\r\n\r\nThere *is* a [documented](https://github.com/ansible/ansible/blob/devel/lib/ansible/modules/utilities/logic/include_role.py) static parameter in the ``include_role`` task, however it does not alleviate this issue.\r\n\r\nThe issue is present even when running the role setup and the include role in two different plays of the same playbook. The only workaround for now is to run completely separated playbooks.\r\n\r\nIt's like ``ansible-playbook`` tries to \"compile\" all the roles before starting the run.\r\n\r\nThe obvious answer would be:\r\n\r\n> Why don't you use ansible-galaxy and requirements.yml file and *\\<insert other alternative here\\>* before running your playbook ?\r\n\r\nThis is a more complicated use case where there are actual setup tasks required and I would like them to be embedded as ansible tasks.\r\n\r\n##### STEPS TO REPRODUCE\r\n```yaml\r\n- name: Setup and use a role\r\n hosts: localhost\r\n tasks:\r\n - name: Get geerlingguy.apache\r\n command: ansible-galaxy install geerlingguy.apache\r\n\r\n - name: Include geerlingguy.apache\r\n include_role:\r\n name: geerlingguy.apache\r\n```\r\n\r\n##### EXPECTED RESULTS\r\nI would expect the include_role task to seek the role only when running the actual include_role task and thus this example playbook should work.\r\n\r\n##### ACTUAL RESULTS\r\n```\r\n# ansible-playbook -i 'localhost' playbook.yml \r\n [WARNING]: Host file not found: localhost\r\n\r\n [WARNING]: provided hosts list is empty, only localhost is available\r\n\r\nERROR! the role 'geerlingguy.apache' was not found in /home/centos/roles:/etc/ansible/roles:/home/centos\r\n\r\nThe error appears to have been in '/home/centos/playbook.yml': line 9, column 15, but may\r\nbe elsewhere in the file depending on the exact syntax problem.\r\n\r\nThe offending line appears to be:\r\n\r\n include_role:\r\n name: geerlingguy.apache\r\n ^ here\r\n```\r\n\n", "before_files": [{"content": "\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom os.path import basename\n\nfrom ansible.errors import AnsibleParserError\nfrom ansible.playbook.attribute import FieldAttribute\nfrom ansible.playbook.task import Task\nfrom ansible.playbook.role import Role\nfrom ansible.playbook.role.include import RoleInclude\n\ntry:\n from __main__ import display\nexcept ImportError:\n from ansible.utils.display import Display\n display = Display()\n\n__all__ = ['IncludeRole']\n\n\nclass IncludeRole(Task):\n\n \"\"\"\n A Role include is derived from a regular role to handle the special\n circumstances related to the `- include_role: ...`\n \"\"\"\n\n # =================================================================================\n # ATTRIBUTES\n\n # private as this is a 'module options' vs a task property\n _allow_duplicates = FieldAttribute(isa='bool', default=True, private=True)\n _static = FieldAttribute(isa='bool', default=None, private=True)\n _private = FieldAttribute(isa='bool', default=None, private=True)\n\n def __init__(self, block=None, role=None, task_include=None):\n\n super(IncludeRole, self).__init__(block=block, role=role, task_include=task_include)\n\n self.statically_loaded = False\n self._from_files = {}\n self._parent_role = role\n self._role_name = None\n\n\n def get_block_list(self, play=None, variable_manager=None, loader=None):\n\n # only need play passed in when dynamic\n if play is None:\n myplay = self._parent._play\n else:\n myplay = play\n\n ri = RoleInclude.load(self._role_name, play=myplay, variable_manager=variable_manager, loader=loader)\n ri.vars.update(self.vars)\n\n # build role\n actual_role = Role.load(ri, myplay, parent_role=self._parent_role, from_files=self._from_files)\n actual_role._metadata.allow_duplicates = self.allow_duplicates\n\n # compile role with parent roles as dependencies to ensure they inherit\n # variables\n if not self._parent_role:\n dep_chain = []\n else:\n dep_chain = list(self._parent_role._parents)\n dep_chain.extend(self._parent_role.get_all_dependencies())\n dep_chain.append(self._parent_role)\n\n blocks = actual_role.compile(play=myplay, dep_chain=dep_chain)\n for b in blocks:\n b._parent = self\n\n # updated available handlers in play\n myplay.handlers = myplay.handlers + actual_role.get_handler_blocks(play=myplay)\n\n return blocks\n\n @staticmethod\n def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):\n\n ir = IncludeRole(block, role, task_include=task_include).load_data(data, variable_manager=variable_manager, loader=loader)\n\n ### Process options\n # name is needed, or use role as alias\n ir._role_name = ir.args.get('name', ir.args.get('role'))\n if ir._role_name is None:\n raise AnsibleParserError(\"'name' is a required field for include_role.\")\n\n # build options for role includes\n for key in ['tasks', 'vars', 'defaults']:\n from_key ='%s_from' % key\n if ir.args.get(from_key):\n ir._from_files[key] = basename(ir.args.get(from_key))\n\n #FIXME: find a way to make this list come from object ( attributes does not work as per below)\n # manual list as otherwise the options would set other task parameters we don't want.\n for option in ['static', 'private', 'allow_duplicates']:\n if option in ir.args:\n setattr(ir, option, ir.args.get(option))\n\n return ir.load_data(data, variable_manager=variable_manager, loader=loader)\n\n def copy(self, exclude_parent=False, exclude_tasks=False):\n\n new_me = super(IncludeRole, self).copy(exclude_parent=exclude_parent, exclude_tasks=exclude_tasks)\n new_me.statically_loaded = self.statically_loaded\n new_me._from_files = self._from_files.copy()\n new_me._parent_role = self._parent_role\n new_me._role_name = self._role_name\n\n return new_me\n\n def get_include_params(self):\n v = super(IncludeRole, self).get_include_params()\n if self._parent_role:\n v.update(self._parent_role.get_role_params())\n return v\n", "path": "lib/ansible/playbook/role_include.py"}, {"content": "#!/usr/bin/python\n# -*- mode: python -*-\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'status': ['preview'],\n 'supported_by': 'core',\n 'version': '1.0'}\n\nDOCUMENTATION = '''\n---\nauthor:\n - \"Ansible Core Team (@ansible)\"\nmodule: include_role\nshort_description: Load and execute a role\ndescription:\n - \"Loads and executes a role as a task, this frees roles from the `role:` directive and allows them to be treated more as tasks.\"\nversion_added: \"2.2\"\noptions:\n name:\n description:\n - The name of the role to be executed.\n required: True\n tasks_from:\n description:\n - \"File to load from a Role's tasks/ directory.\"\n required: False\n default: 'main'\n vars_from:\n description:\n - \"File to load from a Role's vars/ directory.\"\n required: False\n default: 'main'\n defaults_from:\n description:\n - \"File to load from a Role's defaults/ directory.\"\n required: False\n default: 'main'\n static:\n description:\n - Gives Ansible a hint if this is a 'static' include or not. If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options.\n required: False\n default: None\n allow_duplicates:\n description:\n - Overrides the role's metadata setting to allow using a role more than once with the same parameters.\n required: False\n default: True\n private:\n description:\n - If True the variables from defaults/ and vars/ in a role will not be made available to the rest of the play.\n default: None\nnotes:\n - THIS IS EARLY PREVIEW, THINGS MAY CHANGE\n - Handlers are made available to the whole play.\n - simple dependencies seem to work fine.\n - \"Things not tested (yet): plugin overrides, nesting includes, used as handler, other stuff I did not think of when I wrote this.\"\n'''\n\nEXAMPLES = \"\"\"\n- include_role:\n name: myrole\n\n- name: Run tasks/other.yml instead of 'main'\n include_role:\n name: myrole\n tasks_from: other\n\n- name: Pass variables to role\n include_role:\n name: myrole\n vars:\n rolevar1: 'value from task'\n\n- name: Use role in loop\n include_role:\n name: myrole\n with_items:\n - '{{ roleinput1 }}'\n - '{{ roleinput2 }}'\n loop_control:\n loop_var: roleinputvar\n\n- name: conditional role\n include_role:\n name: myrole\n when: not idontwanttorun\n\"\"\"\n\nRETURN = \"\"\"\n# this module does not return anything except tasks to execute\n\"\"\"\n", "path": "lib/ansible/modules/utilities/logic/include_role.py"}]} | 3,528 | 630 |
gh_patches_debug_39250 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-6429 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
S'inscrire avec Facebook crée un pseudo sans les lettres accentuées
Si je me m'inscrit à partir de mon compte Facebook dont le nom est « Clémentine Sanpépins », mon pseudo sur Zeste de Savoir sera « ClmentineSanppins ».
C'est le cas en local sur `upstream/dev` à la création de ce ticket. Je ne sais pas si c'est le cas en bêta et en production.
</issue>
<code>
[start of zds/settings/abstract_base/requirements.py]
1 from .config import config
2
3 # best quality, 100 is the same but documentation says
4 # ' values up to 100 are allowed, but this is not recommended'
5 # so let's use 95
6 THUMBNAIL_QUALITY = 95
7 # Let's use the default value BUT if we want to let png in lossless format, we have tu use (png,) instead of None
8 THUMBNAIL_PRESERVE_EXTENSIONS = ("svg",)
9
10
11 social_auth_config = config.get("social_auth", {})
12
13 SOCIAL_AUTH_RAISE_EXCEPTIONS = False
14
15 SOCIAL_AUTH_FACEBOOK_SCOPE = ["email"]
16
17 SOCIAL_AUTH_PIPELINE = (
18 "social_core.pipeline.social_auth.social_details",
19 "social_core.pipeline.social_auth.social_uid",
20 "social_core.pipeline.social_auth.auth_allowed",
21 "social_core.pipeline.social_auth.social_user",
22 "social_core.pipeline.user.get_username",
23 "social_core.pipeline.social_auth.associate_by_email",
24 "social_core.pipeline.user.create_user",
25 "zds.member.models.save_profile",
26 "social_core.pipeline.social_auth.associate_user",
27 "social_core.pipeline.social_auth.load_extra_data",
28 "social_core.pipeline.user.user_details",
29 )
30
31 # Before adding new providers such as Facebook and Google,
32 # you need to make sure they validate the user's email address on sign up!
33 # If they don't, a malicious person could take control of someone else account!
34 SOCIAL_AUTH_FACEBOOK_KEY = social_auth_config.get("facebook_key", "")
35 SOCIAL_AUTH_FACEBOOK_SECRET = social_auth_config.get("facebook_secret", "")
36 SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = social_auth_config.get(
37 "google_oauth2_key",
38 "696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com",
39 )
40 SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = social_auth_config.get(
41 "google_oauth2_secret",
42 "mApWNh3stCsYHwsGuWdbZWP8",
43 )
44
45 SOCIAL_AUTH_SANITIZE_REDIRECTS = social_auth_config.get(
46 "sanitize_redirects",
47 False,
48 )
49
50
51 recaptcha_config = config.get("recaptcha", {})
52
53 USE_CAPTCHA = recaptcha_config.get("use_captcha", False)
54 RECAPTCHA_PUBLIC_KEY = recaptcha_config.get("public_key", "dummy")
55 RECAPTCHA_PRIVATE_KEY = recaptcha_config.get("private_key", "dummy")
56
57
58 OAUTH2_PROVIDER = {"OAUTH2_BACKEND_CLASS": "oauth2_provider.oauth2_backends.JSONOAuthLibCore"}
59
[end of zds/settings/abstract_base/requirements.py]
[start of zds/utils/misc.py]
1 import hashlib
2 import re
3
4 from django.contrib.auth import get_user_model
5 from django.http import HttpRequest
6
7 THUMB_MAX_WIDTH = 80
8 THUMB_MAX_HEIGHT = 80
9
10 MEDIUM_MAX_WIDTH = 200
11 MEDIUM_MAX_HEIGHT = 200
12
13
14 def compute_hash(filenames):
15 """returns a md5 hexdigest of group of files to check if they have change"""
16 md5_hash = hashlib.md5()
17 for filename in filenames:
18 if filename:
19 file_handle = open(filename, "rb")
20 must_continue = True
21 while must_continue:
22 read_bytes = file_handle.read(8096)
23 if not read_bytes:
24 must_continue = False
25 else:
26 md5_hash.update(read_bytes)
27 return md5_hash.hexdigest()
28
29
30 def content_has_changed(filenames, md5):
31 return md5 != compute_hash(filenames)
32
33
34 def has_changed(instance, field, manager="objects"):
35 """Returns true if a field has changed in a model May be used in a
36 model.save() method."""
37 if not instance.pk:
38 return True
39 manager = getattr(instance.__class__, manager)
40 old = getattr(manager.get(pk=instance.pk), field)
41 return not getattr(instance, field) == old
42
43
44 def convert_camel_to_underscore(camel_case):
45 """
46 Converts a name in camel case to underscore.
47 """
48 s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", camel_case)
49 return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
50
51
52 def contains_utf8mb4(s):
53 """
54 This string contains at least one character of more than 3 bytes
55 """
56 if not isinstance(s, str):
57 s = str(s, "utf-8")
58 re_pattern = re.compile("[^\u0000-\uD7FF\uE000-\uFFFF]", re.UNICODE)
59 return s != re_pattern.sub("\uFFFD", s)
60
61
62 def check_essential_accounts():
63 """
64 Verify that essential accounts are present in the database.
65 Raise an exception if it is not the case.
66 """
67
68 from django.conf import settings
69
70 User = get_user_model()
71 essential_accounts = ("bot_account", "anonymous_account", "external_account")
72
73 for account in essential_accounts:
74 username = settings.ZDS_APP["member"][account]
75 try:
76 User.objects.get(username=username)
77 except User.DoesNotExist:
78 raise Exception(
79 f"User {username!r} does not exist. You must create it to run the server. "
80 f"On a development instance, load the fixtures to solve this issue."
81 )
82
83
84 def is_ajax(request: HttpRequest):
85 """
86 Check whether the request was sent asynchronously.
87
88 The function returns True for :
89
90 * requests sent using jQuery.ajax() since it sets the header `X-Requested-With`
91 to `XMLHttpRequest` by default ;
92 * requests sent using the tools provided by `ajax.js`, which reproduce the behavior
93 described above to ease the progressive removal of jQuery from the codebase.
94
95 The function returns False for requests without the appropriate header.
96 These requests will not be recognized as AJAX.
97
98 The function replaces `request.is_ajax()`, which is removed starting from Django 4.0.
99 """
100 return request.META.get("HTTP_X_REQUESTED_WITH") == "XMLHttpRequest"
101
[end of zds/utils/misc.py]
[start of zds/member/validators.py]
1 from django.contrib.auth.models import User
2 from django.core.exceptions import ValidationError
3 from django.core.validators import EmailValidator, ProhibitNullCharactersValidator
4 from django.utils.encoding import force_str
5 from django.utils.translation import gettext_lazy as _
6
7 from zds.utils.misc import contains_utf8mb4
8 from zds.member.models import BannedEmailProvider, Profile
9
10
11 def validate_not_empty(value):
12 """
13 Fields cannot be empty or only contain spaces.
14
15 :param value: value to validate (str or None)
16 :return:
17 """
18 if value is None or not value.strip():
19 raise ValidationError(_("Le champs ne peut être vide"))
20
21
22 class ZdSEmailValidator(EmailValidator):
23 """
24 Based on https://docs.djangoproject.com/en/1.8/_modules/django/core/validators/#EmailValidator
25 Changed :
26 - check if provider is not if blacklisted
27 - check if email is not used by another user
28 - remove whitelist check
29 - add custom errors and translate them into French
30 """
31
32 message = _("Utilisez une adresse de courriel valide.")
33
34 def __call__(self, value, check_username_available=True):
35 value = force_str(value)
36
37 if not value or "@" not in value:
38 raise ValidationError(self.message, code=self.code)
39
40 user_part, domain_part = value.rsplit("@", 1)
41
42 if not self.user_regex.match(user_part) or contains_utf8mb4(user_part):
43 raise ValidationError(self.message, code=self.code)
44
45 # check if provider is blacklisted
46 blacklist = BannedEmailProvider.objects.values_list("provider", flat=True)
47 for provider in blacklist:
48 if f"@{provider}" in value.lower():
49 raise ValidationError(_("Ce fournisseur ne peut pas être utilisé."), code=self.code)
50
51 # check if email is used by another user
52 user_count = User.objects.filter(email=value).count()
53 if check_username_available and user_count > 0:
54 raise ValidationError(_("Cette adresse courriel est déjà utilisée"), code=self.code)
55 # check if email exists in database
56 elif not check_username_available and user_count == 0:
57 raise ValidationError(_("Cette adresse courriel n'existe pas"), code=self.code)
58
59 if domain_part and not self.validate_domain_part(domain_part):
60 # Try for possible IDN domain-part
61 try:
62 domain_part = domain_part.encode("idna").decode("ascii")
63 if self.validate_domain_part(domain_part):
64 return
65 except UnicodeError:
66 pass
67 raise ValidationError(self.message, code=self.code)
68
69
70 validate_zds_email = ZdSEmailValidator()
71
72
73 def validate_zds_username(value, check_username_available=True):
74 """
75 Check if username is used by another user
76
77 :param value: value to validate (str or None)
78 :return:
79 """
80
81 # If the character \x00 is in the username, the homoglyphs library called
82 # in Profile.find_username_skeleton() will raise a ValueError (the bug has
83 # been reported: https://github.com/yamatt/homoglyphs/issues/6). To prevent
84 # this, we call this validator which will raise a ValidationError if \x00 is
85 # in the username.
86 ProhibitNullCharactersValidator()(value)
87
88 msg = None
89 user_count = User.objects.filter(username=value).count()
90 skeleton_user_count = Profile.objects.filter(username_skeleton=Profile.find_username_skeleton(value)).count()
91 if "," in value:
92 msg = _("Le nom d'utilisateur ne peut contenir de virgules")
93 elif "/" in value:
94 msg = _("Le nom d'utilisateur ne peut contenir de barres obliques")
95 elif contains_utf8mb4(value):
96 msg = _("Le nom d'utilisateur ne peut pas contenir des caractères utf8mb4")
97 elif check_username_available and user_count > 0:
98 msg = _("Ce nom d'utilisateur est déjà utilisé")
99 elif check_username_available and skeleton_user_count > 0:
100 msg = _("Un nom d'utilisateur visuellement proche du votre existe déjà")
101 elif not check_username_available and user_count == 0:
102 msg = _("Ce nom d'utilisateur n'existe pas")
103 if msg is not None:
104 raise ValidationError(msg)
105
106
107 def validate_raw_zds_username(data):
108 """
109 Check if raw username hasn't space on left or right
110 """
111 msg = None
112 username = data.get("username", None)
113 if username is None:
114 msg = _("Le nom d'utilisateur n'est pas fourni")
115 elif username != username.strip():
116 msg = _("Le nom d'utilisateur ne peut commencer ou finir par des espaces")
117
118 if msg is not None:
119 raise ValidationError(msg)
120
121
122 def validate_zds_password(value):
123 """
124
125 :param value:
126 :return:
127 """
128 if contains_utf8mb4(value):
129 raise ValidationError(_("Le mot de passe ne peut pas contenir des caractères utf8mb4"))
130
131
132 def validate_passwords(
133 cleaned_data, password_label="password", password_confirm_label="password_confirm", username=None
134 ):
135 """
136 Chek if cleaned_data['password'] == cleaned_data['password_confirm'] and password is not username.
137 :param cleaned_data:
138 :param password_label:
139 :param password_confirm_label:
140 :return:
141 """
142
143 password = cleaned_data.get(password_label)
144 password_confirm = cleaned_data.get(password_confirm_label)
145 msg = None
146
147 if username is None:
148 username = cleaned_data.get("username")
149
150 if not password_confirm == password:
151 msg = _("Les mots de passe sont différents")
152
153 if password_label in cleaned_data:
154 del cleaned_data[password_label]
155
156 if password_confirm_label in cleaned_data:
157 del cleaned_data[password_confirm_label]
158
159 if username is not None:
160 # Check that password != username
161 if password == username:
162 msg = _("Le mot de passe doit être différent du pseudo")
163 if password_label in cleaned_data:
164 del cleaned_data[password_label]
165 if password_confirm_label in cleaned_data:
166 del cleaned_data[password_confirm_label]
167
168 if msg is not None:
169 raise ValidationError(msg)
170
171 return cleaned_data
172
[end of zds/member/validators.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/zds/member/validators.py b/zds/member/validators.py
--- a/zds/member/validators.py
+++ b/zds/member/validators.py
@@ -4,7 +4,7 @@
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
-from zds.utils.misc import contains_utf8mb4
+from zds.utils.misc import contains_utf8mb4, remove_utf8mb4
from zds.member.models import BannedEmailProvider, Profile
@@ -70,6 +70,15 @@
validate_zds_email = ZdSEmailValidator()
+def clean_username_social_auth(username):
+ """
+ Clean username of accounts created using social auth.
+ """
+ # These three conditions are the same as the first three in the "validate_zds_username" function below.
+ # If you modify one of them here, make sure you do the same there!
+ return remove_utf8mb4(username).replace(",", "").replace("/", "")
+
+
def validate_zds_username(value, check_username_available=True):
"""
Check if username is used by another user
@@ -88,6 +97,9 @@
msg = None
user_count = User.objects.filter(username=value).count()
skeleton_user_count = Profile.objects.filter(username_skeleton=Profile.find_username_skeleton(value)).count()
+
+ # These first three conditions are the same as those in the "clean_username_social_auth" function above.
+ # If you modify one of them here, make sure you do the same there!
if "," in value:
msg = _("Le nom d'utilisateur ne peut contenir de virgules")
elif "/" in value:
diff --git a/zds/settings/abstract_base/requirements.py b/zds/settings/abstract_base/requirements.py
--- a/zds/settings/abstract_base/requirements.py
+++ b/zds/settings/abstract_base/requirements.py
@@ -10,9 +10,12 @@
social_auth_config = config.get("social_auth", {})
+SOCIAL_AUTH_CLEAN_USERNAME_FUNCTION = "zds.member.validators.clean_username_social_auth"
+
SOCIAL_AUTH_RAISE_EXCEPTIONS = False
SOCIAL_AUTH_FACEBOOK_SCOPE = ["email"]
+SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {"fields": "name,email"}
SOCIAL_AUTH_PIPELINE = (
"social_core.pipeline.social_auth.social_details",
diff --git a/zds/utils/misc.py b/zds/utils/misc.py
--- a/zds/utils/misc.py
+++ b/zds/utils/misc.py
@@ -49,14 +49,21 @@
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
-def contains_utf8mb4(s):
+def remove_utf8mb4(s):
"""
- This string contains at least one character of more than 3 bytes
+ Remove characters of more than 3 bytes.
"""
if not isinstance(s, str):
s = str(s, "utf-8")
re_pattern = re.compile("[^\u0000-\uD7FF\uE000-\uFFFF]", re.UNICODE)
- return s != re_pattern.sub("\uFFFD", s)
+ return re_pattern.sub("", s)
+
+
+def contains_utf8mb4(s):
+ """
+ Check if this string contains at least one character of more than 3 bytes.
+ """
+ return s != remove_utf8mb4(s)
def check_essential_accounts():
| {"golden_diff": "diff --git a/zds/member/validators.py b/zds/member/validators.py\n--- a/zds/member/validators.py\n+++ b/zds/member/validators.py\n@@ -4,7 +4,7 @@\n from django.utils.encoding import force_str\n from django.utils.translation import gettext_lazy as _\n \n-from zds.utils.misc import contains_utf8mb4\n+from zds.utils.misc import contains_utf8mb4, remove_utf8mb4\n from zds.member.models import BannedEmailProvider, Profile\n \n \n@@ -70,6 +70,15 @@\n validate_zds_email = ZdSEmailValidator()\n \n \n+def clean_username_social_auth(username):\n+ \"\"\"\n+ Clean username of accounts created using social auth.\n+ \"\"\"\n+ # These three conditions are the same as the first three in the \"validate_zds_username\" function below.\n+ # If you modify one of them here, make sure you do the same there!\n+ return remove_utf8mb4(username).replace(\",\", \"\").replace(\"/\", \"\")\n+\n+\n def validate_zds_username(value, check_username_available=True):\n \"\"\"\n Check if username is used by another user\n@@ -88,6 +97,9 @@\n msg = None\n user_count = User.objects.filter(username=value).count()\n skeleton_user_count = Profile.objects.filter(username_skeleton=Profile.find_username_skeleton(value)).count()\n+\n+ # These first three conditions are the same as those in the \"clean_username_social_auth\" function above.\n+ # If you modify one of them here, make sure you do the same there!\n if \",\" in value:\n msg = _(\"Le nom d'utilisateur ne peut contenir de virgules\")\n elif \"/\" in value:\ndiff --git a/zds/settings/abstract_base/requirements.py b/zds/settings/abstract_base/requirements.py\n--- a/zds/settings/abstract_base/requirements.py\n+++ b/zds/settings/abstract_base/requirements.py\n@@ -10,9 +10,12 @@\n \n social_auth_config = config.get(\"social_auth\", {})\n \n+SOCIAL_AUTH_CLEAN_USERNAME_FUNCTION = \"zds.member.validators.clean_username_social_auth\"\n+\n SOCIAL_AUTH_RAISE_EXCEPTIONS = False\n \n SOCIAL_AUTH_FACEBOOK_SCOPE = [\"email\"]\n+SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {\"fields\": \"name,email\"}\n \n SOCIAL_AUTH_PIPELINE = (\n \"social_core.pipeline.social_auth.social_details\",\ndiff --git a/zds/utils/misc.py b/zds/utils/misc.py\n--- a/zds/utils/misc.py\n+++ b/zds/utils/misc.py\n@@ -49,14 +49,21 @@\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()\n \n \n-def contains_utf8mb4(s):\n+def remove_utf8mb4(s):\n \"\"\"\n- This string contains at least one character of more than 3 bytes\n+ Remove characters of more than 3 bytes.\n \"\"\"\n if not isinstance(s, str):\n s = str(s, \"utf-8\")\n re_pattern = re.compile(\"[^\\u0000-\\uD7FF\\uE000-\\uFFFF]\", re.UNICODE)\n- return s != re_pattern.sub(\"\\uFFFD\", s)\n+ return re_pattern.sub(\"\", s)\n+\n+\n+def contains_utf8mb4(s):\n+ \"\"\"\n+ Check if this string contains at least one character of more than 3 bytes.\n+ \"\"\"\n+ return s != remove_utf8mb4(s)\n \n \n def check_essential_accounts():\n", "issue": "S'inscrire avec Facebook cr\u00e9e un pseudo sans les lettres accentu\u00e9es\nSi je me m'inscrit \u00e0 partir de mon compte Facebook dont le nom est \u00ab Cl\u00e9mentine Sanp\u00e9pins \u00bb, mon pseudo sur Zeste de Savoir sera \u00ab ClmentineSanppins \u00bb.\r\n\r\nC'est le cas en local sur `upstream/dev` \u00e0 la cr\u00e9ation de ce ticket. Je ne sais pas si c'est le cas en b\u00eata et en production.\n", "before_files": [{"content": "from .config import config\n\n# best quality, 100 is the same but documentation says\n# ' values up to 100 are allowed, but this is not recommended'\n# so let's use 95\nTHUMBNAIL_QUALITY = 95\n# Let's use the default value BUT if we want to let png in lossless format, we have tu use (png,) instead of None\nTHUMBNAIL_PRESERVE_EXTENSIONS = (\"svg\",)\n\n\nsocial_auth_config = config.get(\"social_auth\", {})\n\nSOCIAL_AUTH_RAISE_EXCEPTIONS = False\n\nSOCIAL_AUTH_FACEBOOK_SCOPE = [\"email\"]\n\nSOCIAL_AUTH_PIPELINE = (\n \"social_core.pipeline.social_auth.social_details\",\n \"social_core.pipeline.social_auth.social_uid\",\n \"social_core.pipeline.social_auth.auth_allowed\",\n \"social_core.pipeline.social_auth.social_user\",\n \"social_core.pipeline.user.get_username\",\n \"social_core.pipeline.social_auth.associate_by_email\",\n \"social_core.pipeline.user.create_user\",\n \"zds.member.models.save_profile\",\n \"social_core.pipeline.social_auth.associate_user\",\n \"social_core.pipeline.social_auth.load_extra_data\",\n \"social_core.pipeline.user.user_details\",\n)\n\n# Before adding new providers such as Facebook and Google,\n# you need to make sure they validate the user's email address on sign up!\n# If they don't, a malicious person could take control of someone else account!\nSOCIAL_AUTH_FACEBOOK_KEY = social_auth_config.get(\"facebook_key\", \"\")\nSOCIAL_AUTH_FACEBOOK_SECRET = social_auth_config.get(\"facebook_secret\", \"\")\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = social_auth_config.get(\n \"google_oauth2_key\",\n \"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\",\n)\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = social_auth_config.get(\n \"google_oauth2_secret\",\n \"mApWNh3stCsYHwsGuWdbZWP8\",\n)\n\nSOCIAL_AUTH_SANITIZE_REDIRECTS = social_auth_config.get(\n \"sanitize_redirects\",\n False,\n)\n\n\nrecaptcha_config = config.get(\"recaptcha\", {})\n\nUSE_CAPTCHA = recaptcha_config.get(\"use_captcha\", False)\nRECAPTCHA_PUBLIC_KEY = recaptcha_config.get(\"public_key\", \"dummy\")\nRECAPTCHA_PRIVATE_KEY = recaptcha_config.get(\"private_key\", \"dummy\")\n\n\nOAUTH2_PROVIDER = {\"OAUTH2_BACKEND_CLASS\": \"oauth2_provider.oauth2_backends.JSONOAuthLibCore\"}\n", "path": "zds/settings/abstract_base/requirements.py"}, {"content": "import hashlib\nimport re\n\nfrom django.contrib.auth import get_user_model\nfrom django.http import HttpRequest\n\nTHUMB_MAX_WIDTH = 80\nTHUMB_MAX_HEIGHT = 80\n\nMEDIUM_MAX_WIDTH = 200\nMEDIUM_MAX_HEIGHT = 200\n\n\ndef compute_hash(filenames):\n \"\"\"returns a md5 hexdigest of group of files to check if they have change\"\"\"\n md5_hash = hashlib.md5()\n for filename in filenames:\n if filename:\n file_handle = open(filename, \"rb\")\n must_continue = True\n while must_continue:\n read_bytes = file_handle.read(8096)\n if not read_bytes:\n must_continue = False\n else:\n md5_hash.update(read_bytes)\n return md5_hash.hexdigest()\n\n\ndef content_has_changed(filenames, md5):\n return md5 != compute_hash(filenames)\n\n\ndef has_changed(instance, field, manager=\"objects\"):\n \"\"\"Returns true if a field has changed in a model May be used in a\n model.save() method.\"\"\"\n if not instance.pk:\n return True\n manager = getattr(instance.__class__, manager)\n old = getattr(manager.get(pk=instance.pk), field)\n return not getattr(instance, field) == old\n\n\ndef convert_camel_to_underscore(camel_case):\n \"\"\"\n Converts a name in camel case to underscore.\n \"\"\"\n s1 = re.sub(\"(.)([A-Z][a-z]+)\", r\"\\1_\\2\", camel_case)\n return re.sub(\"([a-z0-9])([A-Z])\", r\"\\1_\\2\", s1).lower()\n\n\ndef contains_utf8mb4(s):\n \"\"\"\n This string contains at least one character of more than 3 bytes\n \"\"\"\n if not isinstance(s, str):\n s = str(s, \"utf-8\")\n re_pattern = re.compile(\"[^\\u0000-\\uD7FF\\uE000-\\uFFFF]\", re.UNICODE)\n return s != re_pattern.sub(\"\\uFFFD\", s)\n\n\ndef check_essential_accounts():\n \"\"\"\n Verify that essential accounts are present in the database.\n Raise an exception if it is not the case.\n \"\"\"\n\n from django.conf import settings\n\n User = get_user_model()\n essential_accounts = (\"bot_account\", \"anonymous_account\", \"external_account\")\n\n for account in essential_accounts:\n username = settings.ZDS_APP[\"member\"][account]\n try:\n User.objects.get(username=username)\n except User.DoesNotExist:\n raise Exception(\n f\"User {username!r} does not exist. You must create it to run the server. \"\n f\"On a development instance, load the fixtures to solve this issue.\"\n )\n\n\ndef is_ajax(request: HttpRequest):\n \"\"\"\n Check whether the request was sent asynchronously.\n\n The function returns True for :\n\n * requests sent using jQuery.ajax() since it sets the header `X-Requested-With`\n to `XMLHttpRequest` by default ;\n * requests sent using the tools provided by `ajax.js`, which reproduce the behavior\n described above to ease the progressive removal of jQuery from the codebase.\n\n The function returns False for requests without the appropriate header.\n These requests will not be recognized as AJAX.\n\n The function replaces `request.is_ajax()`, which is removed starting from Django 4.0.\n \"\"\"\n return request.META.get(\"HTTP_X_REQUESTED_WITH\") == \"XMLHttpRequest\"\n", "path": "zds/utils/misc.py"}, {"content": "from django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import EmailValidator, ProhibitNullCharactersValidator\nfrom django.utils.encoding import force_str\nfrom django.utils.translation import gettext_lazy as _\n\nfrom zds.utils.misc import contains_utf8mb4\nfrom zds.member.models import BannedEmailProvider, Profile\n\n\ndef validate_not_empty(value):\n \"\"\"\n Fields cannot be empty or only contain spaces.\n\n :param value: value to validate (str or None)\n :return:\n \"\"\"\n if value is None or not value.strip():\n raise ValidationError(_(\"Le champs ne peut \u00eatre vide\"))\n\n\nclass ZdSEmailValidator(EmailValidator):\n \"\"\"\n Based on https://docs.djangoproject.com/en/1.8/_modules/django/core/validators/#EmailValidator\n Changed :\n - check if provider is not if blacklisted\n - check if email is not used by another user\n - remove whitelist check\n - add custom errors and translate them into French\n \"\"\"\n\n message = _(\"Utilisez une adresse de courriel valide.\")\n\n def __call__(self, value, check_username_available=True):\n value = force_str(value)\n\n if not value or \"@\" not in value:\n raise ValidationError(self.message, code=self.code)\n\n user_part, domain_part = value.rsplit(\"@\", 1)\n\n if not self.user_regex.match(user_part) or contains_utf8mb4(user_part):\n raise ValidationError(self.message, code=self.code)\n\n # check if provider is blacklisted\n blacklist = BannedEmailProvider.objects.values_list(\"provider\", flat=True)\n for provider in blacklist:\n if f\"@{provider}\" in value.lower():\n raise ValidationError(_(\"Ce fournisseur ne peut pas \u00eatre utilis\u00e9.\"), code=self.code)\n\n # check if email is used by another user\n user_count = User.objects.filter(email=value).count()\n if check_username_available and user_count > 0:\n raise ValidationError(_(\"Cette adresse courriel est d\u00e9j\u00e0 utilis\u00e9e\"), code=self.code)\n # check if email exists in database\n elif not check_username_available and user_count == 0:\n raise ValidationError(_(\"Cette adresse courriel n'existe pas\"), code=self.code)\n\n if domain_part and not self.validate_domain_part(domain_part):\n # Try for possible IDN domain-part\n try:\n domain_part = domain_part.encode(\"idna\").decode(\"ascii\")\n if self.validate_domain_part(domain_part):\n return\n except UnicodeError:\n pass\n raise ValidationError(self.message, code=self.code)\n\n\nvalidate_zds_email = ZdSEmailValidator()\n\n\ndef validate_zds_username(value, check_username_available=True):\n \"\"\"\n Check if username is used by another user\n\n :param value: value to validate (str or None)\n :return:\n \"\"\"\n\n # If the character \\x00 is in the username, the homoglyphs library called\n # in Profile.find_username_skeleton() will raise a ValueError (the bug has\n # been reported: https://github.com/yamatt/homoglyphs/issues/6). To prevent\n # this, we call this validator which will raise a ValidationError if \\x00 is\n # in the username.\n ProhibitNullCharactersValidator()(value)\n\n msg = None\n user_count = User.objects.filter(username=value).count()\n skeleton_user_count = Profile.objects.filter(username_skeleton=Profile.find_username_skeleton(value)).count()\n if \",\" in value:\n msg = _(\"Le nom d'utilisateur ne peut contenir de virgules\")\n elif \"/\" in value:\n msg = _(\"Le nom d'utilisateur ne peut contenir de barres obliques\")\n elif contains_utf8mb4(value):\n msg = _(\"Le nom d'utilisateur ne peut pas contenir des caract\u00e8res utf8mb4\")\n elif check_username_available and user_count > 0:\n msg = _(\"Ce nom d'utilisateur est d\u00e9j\u00e0 utilis\u00e9\")\n elif check_username_available and skeleton_user_count > 0:\n msg = _(\"Un nom d'utilisateur visuellement proche du votre existe d\u00e9j\u00e0\")\n elif not check_username_available and user_count == 0:\n msg = _(\"Ce nom d'utilisateur n'existe pas\")\n if msg is not None:\n raise ValidationError(msg)\n\n\ndef validate_raw_zds_username(data):\n \"\"\"\n Check if raw username hasn't space on left or right\n \"\"\"\n msg = None\n username = data.get(\"username\", None)\n if username is None:\n msg = _(\"Le nom d'utilisateur n'est pas fourni\")\n elif username != username.strip():\n msg = _(\"Le nom d'utilisateur ne peut commencer ou finir par des espaces\")\n\n if msg is not None:\n raise ValidationError(msg)\n\n\ndef validate_zds_password(value):\n \"\"\"\n\n :param value:\n :return:\n \"\"\"\n if contains_utf8mb4(value):\n raise ValidationError(_(\"Le mot de passe ne peut pas contenir des caract\u00e8res utf8mb4\"))\n\n\ndef validate_passwords(\n cleaned_data, password_label=\"password\", password_confirm_label=\"password_confirm\", username=None\n):\n \"\"\"\n Chek if cleaned_data['password'] == cleaned_data['password_confirm'] and password is not username.\n :param cleaned_data:\n :param password_label:\n :param password_confirm_label:\n :return:\n \"\"\"\n\n password = cleaned_data.get(password_label)\n password_confirm = cleaned_data.get(password_confirm_label)\n msg = None\n\n if username is None:\n username = cleaned_data.get(\"username\")\n\n if not password_confirm == password:\n msg = _(\"Les mots de passe sont diff\u00e9rents\")\n\n if password_label in cleaned_data:\n del cleaned_data[password_label]\n\n if password_confirm_label in cleaned_data:\n del cleaned_data[password_confirm_label]\n\n if username is not None:\n # Check that password != username\n if password == username:\n msg = _(\"Le mot de passe doit \u00eatre diff\u00e9rent du pseudo\")\n if password_label in cleaned_data:\n del cleaned_data[password_label]\n if password_confirm_label in cleaned_data:\n del cleaned_data[password_confirm_label]\n\n if msg is not None:\n raise ValidationError(msg)\n\n return cleaned_data\n", "path": "zds/member/validators.py"}]} | 4,014 | 760 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.