problem_id
string | source
string | task_type
string | in_source_id
string | prompt
string | golden_diff
string | verification_info
string |
---|---|---|---|---|---|---|
gh_patches_debug_3606 | rasdani/github-patches | git_diff | OBOFoundry__OBOFoundry.github.io-802 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
travis on master failing, due to metadata violations from new jsonschema checks
There are two things wrong:
- the validate script assumes a util/reports folder
- hp is failing; we already know that hp has a custom license and this should be reported elsewhere and is not a schema violation
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `util/validate-metadata.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import ast
4 import sys
5 import json
6 import jsonschema
7 import re
8
9 # file paths
10 data_file = "../registry/ontologies.jsonld"
11 schema_file = "metadata-schema.json"
12 schema_lite_file = "metadata-schema-lite.json"
13 report_file = "reports/metadata-violations.csv"
14
15 # ultra-escaped regex strings
16 email_sub = 'does not match \'\\^\\[\\^@\\]\\+\\$\''
17 fmt_sub = ('does not match \'\\^\\[0\\-9A\\-Za\\-z\\-_\\\\\\\\/\\]\\+'
18 '\\\\\\\\.\\(owl\\|obo\\|json\\|omn\\|ofn\\|owx\\|ttl\\|owl'
19 '\\\\\\\\.gz\\)\\$\'')
20
21 def validate():
22 """
23 Validate registry metadata.
24 """
25 print("--- validating metadata against {0} ---".format(schema_file))
26 data = load_data()
27 schema = load_schema()
28 # validate each object
29 errors = {}
30 for item in data["ontologies"]:
31 if 'is_obsolete' in item and item["is_obsolete"] is True:
32 continue
33 # skip any 'validate: false' ontologies
34 if 'validate' in item and item["validate"] is False:
35 continue
36 ont_id = item["id"]
37 try:
38 jsonschema.validate(item, schema)
39 except jsonschema.exceptions.ValidationError as ve:
40 print("ERROR in {0}".format(ont_id))
41 errors[ont_id] = format_msg(ve)
42 if errors:
43 write_errors(errors)
44 else:
45 print("SUCCESS - no errors found in metadata")
46 sys.exit(0)
47
48 def format_msg(ve):
49 """
50 Format exception message from jsonchema.validate(...).
51 """
52 # replace u characters
53 replace_u = re.sub('u\'', '\'', ve.message)
54 # replace scary regex strings
55 replace_email = re.sub(
56 email_sub, 'is not valid for \'contact.label\'', replace_u)
57 msg = re.sub(fmt_sub, 'is not valid for \'products.id\'', replace_email)
58
59 # check if output is for license error
60 is_license = re.search('({\'url\'.+?\'label\'.+?})', msg)
61 if is_license:
62 return format_license_msg(is_license.group(1))
63
64 # check if output is for list error
65 is_list = re.search('(\\[.+?\\]) is not of type \'string\'', msg)
66 if is_list:
67 return format_list_msg(is_list.group(1), ve)
68
69 # otherwise return the message
70 return msg
71
72 def format_license_msg(substr):
73 """
74 Format an exception message for a license issue.
75 """
76 # process to dict
77 d = json.loads(substr.replace('\'', '"'))
78 url = d['url']
79 label = d['label']
80 return '\'{0}\' <{1}> is not valid for \'license\''.format(label, url)
81
82 def format_list_msg(substr, ve):
83 """
84 Format an exception for an unexpected list.
85 """
86 l = json.loads(substr.replace('\'', '"'))
87 # use the full message to find the violating property
88 prop_find = re.search('On instance\\[(\'.+?\')\\]', str(ve))
89 if prop_find:
90 prop = prop_find.group(1)
91 return '{0} expects one value, got {1}'.format(prop, len(l))
92 else:
93 return substr
94
95 def load_schema():
96 """
97 Load the schema to validate against.
98 """
99 # read the schema
100 with open(schema_file) as f:
101 schema = json.load(f)
102 return schema
103
104 def load_data():
105 """
106 Load the data to validate.
107 """
108 # read the JSON-LD data
109 with open(data_file) as f:
110 data = json.load(f)
111 return data
112
113 def write_errors(errors):
114 """
115 Write validation errors to a user-friendly report.
116 """
117 with open(report_file, 'w+') as f:
118 f.write("ID,ERROR\n")
119 for ont_id, msg in errors.items():
120 f.write('"' + ont_id + '","' + msg + '"\n')
121 print(
122 "VALIDATION FAILED: {0} errors - see {1} for details".format(
123 len(errors), report_file))
124 sys.exit(1)
125
126 # run the process!
127 if __name__ == '__main__':
128 validate()
129
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/util/validate-metadata.py b/util/validate-metadata.py
--- a/util/validate-metadata.py
+++ b/util/validate-metadata.py
@@ -7,9 +7,9 @@
import re
# file paths
-data_file = "../registry/ontologies.jsonld"
-schema_file = "metadata-schema.json"
-schema_lite_file = "metadata-schema-lite.json"
+data_file = "registry/ontologies.jsonld"
+schema_file = "util/metadata-schema.json"
+schema_lite_file = "util/metadata-schema-lite.json"
report_file = "reports/metadata-violations.csv"
# ultra-escaped regex strings
| {"golden_diff": "diff --git a/util/validate-metadata.py b/util/validate-metadata.py\n--- a/util/validate-metadata.py\n+++ b/util/validate-metadata.py\n@@ -7,9 +7,9 @@\n import re\n \n # file paths\n-data_file = \"../registry/ontologies.jsonld\"\n-schema_file = \"metadata-schema.json\"\n-schema_lite_file = \"metadata-schema-lite.json\"\n+data_file = \"registry/ontologies.jsonld\"\n+schema_file = \"util/metadata-schema.json\"\n+schema_lite_file = \"util/metadata-schema-lite.json\"\n report_file = \"reports/metadata-violations.csv\"\n \n # ultra-escaped regex strings\n", "issue": "travis on master failing, due to metadata violations from new jsonschema checks\nThere are two things wrong:\r\n\r\n - the validate script assumes a util/reports folder\r\n - hp is failing; we already know that hp has a custom license and this should be reported elsewhere and is not a schema violation\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport ast\nimport sys\nimport json\nimport jsonschema\nimport re\n\n# file paths\ndata_file = \"../registry/ontologies.jsonld\"\nschema_file = \"metadata-schema.json\"\nschema_lite_file = \"metadata-schema-lite.json\"\nreport_file = \"reports/metadata-violations.csv\"\n\n# ultra-escaped regex strings\nemail_sub = 'does not match \\'\\\\^\\\\[\\\\^@\\\\]\\\\+\\\\$\\''\nfmt_sub = ('does not match \\'\\\\^\\\\[0\\\\-9A\\\\-Za\\\\-z\\\\-_\\\\\\\\\\\\\\\\/\\\\]\\\\+'\n '\\\\\\\\\\\\\\\\.\\\\(owl\\\\|obo\\\\|json\\\\|omn\\\\|ofn\\\\|owx\\\\|ttl\\\\|owl'\n '\\\\\\\\\\\\\\\\.gz\\\\)\\\\$\\'')\n\ndef validate():\n\t\"\"\"\n\tValidate registry metadata.\n\t\"\"\"\n\tprint(\"--- validating metadata against {0} ---\".format(schema_file))\n\tdata = load_data()\n\tschema = load_schema()\n\t# validate each object\n\terrors = {}\n\tfor item in data[\"ontologies\"]:\n\t\tif 'is_obsolete' in item and item[\"is_obsolete\"] is True:\n\t\t\tcontinue\n\t\t# skip any 'validate: false' ontologies\n\t\tif 'validate' in item and item[\"validate\"] is False:\n\t\t\tcontinue\n\t\tont_id = item[\"id\"]\n\t\ttry:\n\t\t\tjsonschema.validate(item, schema)\n\t\texcept jsonschema.exceptions.ValidationError as ve:\n\t\t\tprint(\"ERROR in {0}\".format(ont_id))\n\t\t\terrors[ont_id] = format_msg(ve)\n\tif errors:\n\t\twrite_errors(errors)\n\telse:\n\t\tprint(\"SUCCESS - no errors found in metadata\")\n\t\tsys.exit(0)\n\ndef format_msg(ve):\n\t\"\"\"\n\tFormat exception message from jsonchema.validate(...).\n\t\"\"\"\n\t# replace u characters\n\treplace_u = re.sub('u\\'', '\\'', ve.message)\n\t# replace scary regex strings\n\treplace_email = re.sub(\n\t\temail_sub, 'is not valid for \\'contact.label\\'', replace_u)\n\tmsg = re.sub(fmt_sub, 'is not valid for \\'products.id\\'', replace_email)\n\n\t# check if output is for license error\n\tis_license = re.search('({\\'url\\'.+?\\'label\\'.+?})', msg)\n\tif is_license:\n\t\treturn format_license_msg(is_license.group(1))\n\n\t# check if output is for list error\n\tis_list = re.search('(\\\\[.+?\\\\]) is not of type \\'string\\'', msg)\n\tif is_list:\n\t\treturn format_list_msg(is_list.group(1), ve)\n\n\t# otherwise return the message\n\treturn msg\n\ndef format_license_msg(substr):\n\t\"\"\"\n\tFormat an exception message for a license issue.\n\t\"\"\"\n\t# process to dict\n\td = json.loads(substr.replace('\\'', '\"'))\n\turl = d['url']\n\tlabel = d['label']\n\treturn '\\'{0}\\' <{1}> is not valid for \\'license\\''.format(label, url)\n\ndef format_list_msg(substr, ve):\n\t\"\"\"\n\tFormat an exception for an unexpected list.\n\t\"\"\"\n\tl = json.loads(substr.replace('\\'', '\"'))\n\t# use the full message to find the violating property\n\tprop_find = re.search('On instance\\\\[(\\'.+?\\')\\\\]', str(ve))\n\tif prop_find:\n\t\tprop = prop_find.group(1)\n\t\treturn '{0} expects one value, got {1}'.format(prop, len(l))\n\telse:\n\t\treturn substr\n\ndef load_schema():\n\t\"\"\"\n\tLoad the schema to validate against.\n\t\"\"\"\n\t# read the schema\n\twith open(schema_file) as f:\n\t\tschema = json.load(f)\n\treturn schema\n\ndef load_data():\n\t\"\"\"\n\tLoad the data to validate.\n\t\"\"\"\n\t# read the JSON-LD data\n\twith open(data_file) as f:\n\t\tdata = json.load(f)\n\treturn data\n\ndef write_errors(errors):\n\t\"\"\"\n\tWrite validation errors to a user-friendly report.\n\t\"\"\"\n\twith open(report_file, 'w+') as f:\n\t\tf.write(\"ID,ERROR\\n\")\n\t\tfor ont_id, msg in errors.items():\n\t\t\tf.write('\"' + ont_id + '\",\"' + msg + '\"\\n')\n\tprint(\n\t\t\"VALIDATION FAILED: {0} errors - see {1} for details\".format(\n\t\t\tlen(errors), report_file))\n\tsys.exit(1)\n\n# run the process!\nif __name__ == '__main__':\n\tvalidate()\n", "path": "util/validate-metadata.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport ast\nimport sys\nimport json\nimport jsonschema\nimport re\n\n# file paths\ndata_file = \"registry/ontologies.jsonld\"\nschema_file = \"util/metadata-schema.json\"\nschema_lite_file = \"util/metadata-schema-lite.json\"\nreport_file = \"reports/metadata-violations.csv\"\n\n# ultra-escaped regex strings\nemail_sub = 'does not match \\'\\\\^\\\\[\\\\^@\\\\]\\\\+\\\\$\\''\nfmt_sub = ('does not match \\'\\\\^\\\\[0\\\\-9A\\\\-Za\\\\-z\\\\-_\\\\\\\\\\\\\\\\/\\\\]\\\\+'\n '\\\\\\\\\\\\\\\\.\\\\(owl\\\\|obo\\\\|json\\\\|omn\\\\|ofn\\\\|owx\\\\|ttl\\\\|owl'\n '\\\\\\\\\\\\\\\\.gz\\\\)\\\\$\\'')\n\ndef validate():\n\t\"\"\"\n\tValidate registry metadata.\n\t\"\"\"\n\tprint(\"--- validating metadata against {0} ---\".format(schema_file))\n\tdata = load_data()\n\tschema = load_schema()\n\t# validate each object\n\terrors = {}\n\tfor item in data[\"ontologies\"]:\n\t\tif 'is_obsolete' in item and item[\"is_obsolete\"] is True:\n\t\t\tcontinue\n\t\t# skip any 'validate: false' ontologies\n\t\tif 'validate' in item and item[\"validate\"] is False:\n\t\t\tcontinue\n\t\tont_id = item[\"id\"]\n\t\ttry:\n\t\t\tjsonschema.validate(item, schema)\n\t\texcept jsonschema.exceptions.ValidationError as ve:\n\t\t\tprint(\"ERROR in {0}\".format(ont_id))\n\t\t\terrors[ont_id] = format_msg(ve)\n\tif errors:\n\t\twrite_errors(errors)\n\telse:\n\t\tprint(\"SUCCESS - no errors found in metadata\")\n\t\tsys.exit(0)\n\ndef format_msg(ve):\n\t\"\"\"\n\tFormat exception message from jsonchema.validate(...).\n\t\"\"\"\n\t# replace u characters\n\treplace_u = re.sub('u\\'', '\\'', ve.message)\n\t# replace scary regex strings\n\treplace_email = re.sub(\n\t\temail_sub, 'is not valid for \\'contact.label\\'', replace_u)\n\tmsg = re.sub(fmt_sub, 'is not valid for \\'products.id\\'', replace_email)\n\n\t# check if output is for license error\n\tis_license = re.search('({\\'url\\'.+?\\'label\\'.+?})', msg)\n\tif is_license:\n\t\treturn format_license_msg(is_license.group(1))\n\n\t# check if output is for list error\n\tis_list = re.search('(\\\\[.+?\\\\]) is not of type \\'string\\'', msg)\n\tif is_list:\n\t\treturn format_list_msg(is_list.group(1), ve)\n\n\t# otherwise return the message\n\treturn msg\n\ndef format_license_msg(substr):\n\t\"\"\"\n\tFormat an exception message for a license issue.\n\t\"\"\"\n\t# process to dict\n\td = json.loads(substr.replace('\\'', '\"'))\n\turl = d['url']\n\tlabel = d['label']\n\treturn '\\'{0}\\' <{1}> is not valid for \\'license\\''.format(label, url)\n\ndef format_list_msg(substr, ve):\n\t\"\"\"\n\tFormat an exception for an unexpected list.\n\t\"\"\"\n\tl = json.loads(substr.replace('\\'', '\"'))\n\t# use the full message to find the violating property\n\tprop_find = re.search('On instance\\\\[(\\'.+?\\')\\\\]', str(ve))\n\tif prop_find:\n\t\tprop = prop_find.group(1)\n\t\treturn '{0} expects one value, got {1}'.format(prop, len(l))\n\telse:\n\t\treturn substr\n\ndef load_schema():\n\t\"\"\"\n\tLoad the schema to validate against.\n\t\"\"\"\n\t# read the schema\n\twith open(schema_file) as f:\n\t\tschema = json.load(f)\n\treturn schema\n\ndef load_data():\n\t\"\"\"\n\tLoad the data to validate.\n\t\"\"\"\n\t# read the JSON-LD data\n\twith open(data_file) as f:\n\t\tdata = json.load(f)\n\treturn data\n\ndef write_errors(errors):\n\t\"\"\"\n\tWrite validation errors to a user-friendly report.\n\t\"\"\"\n\twith open(report_file, 'w+') as f:\n\t\tf.write(\"ID,ERROR\\n\")\n\t\tfor ont_id, msg in errors.items():\n\t\t\tf.write('\"' + ont_id + '\",\"' + msg + '\"\\n')\n\tprint(\n\t\t\"VALIDATION FAILED: {0} errors - see {1} for details\".format(\n\t\t\tlen(errors), report_file))\n\tsys.exit(1)\n\n# run the process!\nif __name__ == '__main__':\n\tvalidate()\n", "path": "util/validate-metadata.py"}]} |
gh_patches_debug_53523 | rasdani/github-patches | git_diff | Pycord-Development__pycord-1563 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Registered commands not recognized without sync_commands call
### Summary
Previously registered commands are not pulled from the internal cache when an unknown command is recieved. Instead, a call to sync_commands is made every time.
### Reproduction Steps
1. Run MRC Part 1
2. Run MRC Part 2
3. Use the `/test` command twice
4. Observe results
### Minimal Reproducible Code
```python
import discord
import logging
from discord.ext import commands
bot = commands.Bot(command_prefix=",")
@bot.event
async def on_ready():
print("ready")
@bot.slash_command(name="test")
async def _test(ctx):
await ctx.respond("hello world")
bot.run()
```
Part 2:
```python
import discord
import logging
from discord.ext import commands
bot = commands.Bot(command_prefix=",")
logging.basicConfig(level="DEBUG")
@bot.event
async def on_ready():
print("ready")
@bot.event
async def on_connect():
print("connected")
print("skipping call to sync_commands")
@bot.slash_command(name="test")
async def _test(ctx):
await ctx.respond("hello world")
bot.run("")
```
### Expected Results
Command should be pulled from the internal cache without a call to `sync_commands`
### Actual Results
`sync_commands` is called.
### Intents
Any
### System Information
- Python v3.8.9-finalv
- py-cord v2.1.0-final
- aiohttp v3.8.1
- system info: Darwin 21.5.0 Darwin Kernel Version 21.5.0: Tue Apr 26 21:08:29 PDT 2022; root:xnu-8020.121.3~4/RELEASE_ARM64_T8101
### Checklist
- [X] I have searched the open issues for duplicates.
- [X] I have shown the entire traceback, if possible.
- [X] I have removed my token from display, if visible.
### Additional Context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `discord/bot.py`
Content:
```
1 """
2 The MIT License (MIT)
3
4 Copyright (c) 2015-2021 Rapptz
5 Copyright (c) 2021-present Pycord Development
6
7 Permission is hereby granted, free of charge, to any person obtaining a
8 copy of this software and associated documentation files (the "Software"),
9 to deal in the Software without restriction, including without limitation
10 the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 and/or sell copies of the Software, and to permit persons to whom the
12 Software is furnished to do so, subject to the following conditions:
13
14 The above copyright notice and this permission notice shall be included in
15 all copies or substantial portions of the Software.
16
17 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 DEALINGS IN THE SOFTWARE.
24 """
25
26 from __future__ import annotations
27
28 import asyncio
29 import collections
30 import copy
31 import inspect
32 import logging
33 import sys
34 import traceback
35 from abc import ABC, abstractmethod
36 from typing import (
37 Any,
38 Callable,
39 Coroutine,
40 Dict,
41 Generator,
42 List,
43 Literal,
44 Optional,
45 Type,
46 TypeVar,
47 Union,
48 )
49
50 from .client import Client
51 from .cog import CogMixin
52 from .commands import (
53 ApplicationCommand,
54 ApplicationContext,
55 AutocompleteContext,
56 MessageCommand,
57 SlashCommand,
58 SlashCommandGroup,
59 UserCommand,
60 command,
61 )
62 from .enums import InteractionType
63 from .errors import CheckFailure, DiscordException
64 from .interactions import Interaction
65 from .shard import AutoShardedClient
66 from .types import interactions
67 from .user import User
68 from .utils import MISSING, async_all, find, get
69
70 CoroFunc = Callable[..., Coroutine[Any, Any, Any]]
71 CFT = TypeVar("CFT", bound=CoroFunc)
72
73 __all__ = (
74 "ApplicationCommandMixin",
75 "Bot",
76 "AutoShardedBot",
77 )
78
79 _log = logging.getLogger(__name__)
80
81
82 class ApplicationCommandMixin(ABC):
83 """A mixin that implements common functionality for classes that need
84 application command compatibility.
85
86 Attributes
87 -----------
88 application_commands: :class:`dict`
89 A mapping of command id string to :class:`.ApplicationCommand` objects.
90 pending_application_commands: :class:`list`
91 A list of commands that have been added but not yet registered. This is read-only and is modified via other
92 methods.
93 """
94
95 def __init__(self, *args, **kwargs) -> None:
96 super().__init__(*args, **kwargs)
97 self._pending_application_commands = []
98 self._application_commands = {}
99
100 @property
101 def all_commands(self):
102 return self._application_commands
103
104 @property
105 def pending_application_commands(self):
106 return self._pending_application_commands
107
108 @property
109 def commands(self) -> List[Union[ApplicationCommand, Any]]:
110 commands = self.application_commands
111 if self._bot._supports_prefixed_commands and hasattr(self._bot, "prefixed_commands"):
112 commands += self._bot.prefixed_commands
113 return commands
114
115 @property
116 def application_commands(self) -> List[ApplicationCommand]:
117 return list(self._application_commands.values())
118
119 def add_application_command(self, command: ApplicationCommand) -> None:
120 """Adds a :class:`.ApplicationCommand` into the internal list of commands.
121
122 This is usually not called, instead the :meth:`command` or
123 other shortcut decorators are used instead.
124
125 .. versionadded:: 2.0
126
127 Parameters
128 -----------
129 command: :class:`.ApplicationCommand`
130 The command to add.
131 """
132 if isinstance(command, SlashCommand) and command.is_subcommand:
133 raise TypeError("The provided command is a sub-command of group")
134
135 if self._bot.debug_guilds and command.guild_ids is None:
136 command.guild_ids = self._bot.debug_guilds
137
138 for cmd in self.pending_application_commands:
139 if cmd == command:
140 command.id = cmd.id
141 self._application_commands[command.id] = command
142 break
143 self._pending_application_commands.append(command)
144
145 def remove_application_command(self, command: ApplicationCommand) -> Optional[ApplicationCommand]:
146 """Remove a :class:`.ApplicationCommand` from the internal list
147 of commands.
148
149 .. versionadded:: 2.0
150
151 Parameters
152 -----------
153 command: :class:`.ApplicationCommand`
154 The command to remove.
155
156 Returns
157 --------
158 Optional[:class:`.ApplicationCommand`]
159 The command that was removed. If the name is not valid then
160 ``None`` is returned instead.
161 """
162 if command.id is None:
163 try:
164 index = self._pending_application_commands.index(command)
165 except ValueError:
166 return None
167 return self._pending_application_commands.pop(index)
168 return self._application_commands.pop(command.id, None)
169
170 @property
171 def get_command(self):
172 """Shortcut for :meth:`.get_application_command`.
173
174 .. note::
175 Overridden in :class:`ext.commands.Bot`.
176
177 .. versionadded:: 2.0
178 """
179 # TODO: Do something like we did in self.commands for this
180 return self.get_application_command
181
182 def get_application_command(
183 self,
184 name: str,
185 guild_ids: Optional[List[int]] = None,
186 type: Type[ApplicationCommand] = SlashCommand,
187 ) -> Optional[ApplicationCommand]:
188 """Get a :class:`.ApplicationCommand` from the internal list
189 of commands.
190
191 .. versionadded:: 2.0
192
193 Parameters
194 -----------
195 name: :class:`str`
196 The name of the command to get.
197 guild_ids: List[:class:`int`]
198 The guild ids associated to the command to get.
199 type: Type[:class:`.ApplicationCommand`]
200 The type of the command to get. Defaults to :class:`.SlashCommand`.
201
202 Returns
203 --------
204 Optional[:class:`.ApplicationCommand`]
205 The command that was requested. If not found, returns ``None``.
206 """
207
208 for command in self._application_commands.values():
209 if command.name == name and isinstance(command, type):
210 if guild_ids is not None and command.guild_ids != guild_ids:
211 return
212 return command
213
214 async def get_desynced_commands(
215 self,
216 guild_id: Optional[int] = None,
217 prefetched: Optional[List[ApplicationCommand]] = None
218 ) -> List[Dict[str, Any]]:
219 """|coro|
220
221 Gets the list of commands that are desynced from discord. If ``guild_id`` is specified, it will only return
222 guild commands that are desynced from said guild, else it will return global commands.
223
224 .. note::
225 This function is meant to be used internally, and should only be used if you want to override the default
226 command registration behavior.
227
228 .. versionadded:: 2.0
229
230
231 Parameters
232 ----------
233 guild_id: Optional[:class:`int`]
234 The guild id to get the desynced commands for, else global commands if unspecified.
235 prefetched: Optional[List[:class:`.ApplicationCommand`]]
236 If you already fetched the commands, you can pass them here to be used. Not recommended for typical usage.
237
238 Returns
239 -------
240 List[Dict[:class:`str`, Any]]
241 A list of the desynced commands. Each will come with at least the ``cmd`` and ``action`` keys, which
242 respectively contain the command and the action to perform. Other keys may also be present depending on
243 the action, including ``id``.
244 """
245
246 # We can suggest the user to upsert, edit, delete, or bulk upsert the commands
247
248 def _check_command(cmd: ApplicationCommand, match: Dict) -> bool:
249 if isinstance(cmd, SlashCommandGroup):
250 if len(cmd.subcommands) != len(match.get("options", [])):
251 return True
252 for i, subcommand in enumerate(cmd.subcommands):
253 match_ = next(
254 (data for data in match["options"] if data["name"] == subcommand.name),
255 MISSING,
256 )
257 if match_ is not MISSING and _check_command(subcommand, match_):
258 return True
259 else:
260 as_dict = cmd.to_dict()
261 to_check = {
262 "dm_permission": None,
263 "default_member_permissions": None,
264 "name": None,
265 "description": None,
266 "name_localizations": None,
267 "description_localizations": None,
268 "options": [
269 "type",
270 "name",
271 "description",
272 "autocomplete",
273 "choices",
274 "name_localizations",
275 "description_localizations",
276 ],
277 }
278 for check, value in to_check.items():
279 if type(to_check[check]) == list:
280 # We need to do some falsy conversion here
281 # The API considers False (autocomplete) and [] (choices) to be falsy values
282 falsy_vals = (False, [])
283 for opt in value:
284 cmd_vals = [val.get(opt, MISSING) for val in as_dict[check]] if check in as_dict else []
285 for i, val in enumerate(cmd_vals):
286 if val in falsy_vals:
287 cmd_vals[i] = MISSING
288 if match.get(check, MISSING) is not MISSING and cmd_vals != [
289 val.get(opt, MISSING) for val in match[check]
290 ]:
291 # We have a difference
292 return True
293 elif getattr(cmd, check, None) != match.get(check):
294 # We have a difference
295 if check == "default_permission" and getattr(cmd, check) is True and match.get(check) is None:
296 # This is a special case
297 # TODO: Remove for perms v2
298 continue
299 return True
300 return False
301
302 return_value = []
303 cmds = self.pending_application_commands.copy()
304
305 if guild_id is None:
306 if prefetched is not None:
307 registered_commands = prefetched
308 else:
309 registered_commands = await self._bot.http.get_global_commands(self.user.id)
310 pending = [cmd for cmd in cmds if cmd.guild_ids is None]
311 else:
312 if prefetched is not None:
313 registered_commands = prefetched
314 else:
315 registered_commands = await self._bot.http.get_guild_commands(self.user.id, guild_id)
316 pending = [cmd for cmd in cmds if cmd.guild_ids is not None and guild_id in cmd.guild_ids]
317
318 registered_commands_dict = {cmd["name"]: cmd for cmd in registered_commands}
319 # First let's check if the commands we have locally are the same as the ones on discord
320 for cmd in pending:
321 match = registered_commands_dict.get(cmd.name)
322 if match is None:
323 # We don't have this command registered
324 return_value.append({"command": cmd, "action": "upsert"})
325 elif _check_command(cmd, match):
326 return_value.append(
327 {
328 "command": cmd,
329 "action": "edit",
330 "id": int(registered_commands_dict[cmd.name]["id"]),
331 }
332 )
333 else:
334 # We have this command registered but it's the same
335 return_value.append({"command": cmd, "action": None, "id": int(match["id"])})
336
337 # Now let's see if there are any commands on discord that we need to delete
338 for cmd, value_ in registered_commands_dict.items():
339 match = get(pending, name=registered_commands_dict[cmd]["name"])
340 if match is None:
341 # We have this command registered but not in our list
342 return_value.append(
343 {
344 "command": registered_commands_dict[cmd]["name"],
345 "id": int(value_["id"]),
346 "action": "delete",
347 }
348 )
349
350 continue
351
352 return return_value
353
354 async def register_command(
355 self,
356 command: ApplicationCommand,
357 force: bool = True,
358 guild_ids: List[int] = None,
359 ) -> None:
360 """|coro|
361
362 Registers a command. If the command has ``guild_ids`` set, or if the ``guild_ids`` parameter is passed,
363 the command will be registered as a guild command for those guilds.
364
365 Parameters
366 ----------
367 command: :class:`~.ApplicationCommand`
368 The command to register.
369 force: :class:`bool`
370 Whether to force the command to be registered. If this is set to False, the command will only be registered
371 if it seems to already be registered and up to date with our internal cache. Defaults to True.
372 guild_ids: :class:`list`
373 A list of guild ids to register the command for. If this is not set, the command's
374 :attr:`ApplicationCommand.guild_ids` attribute will be used.
375
376 Returns
377 -------
378 :class:`~.ApplicationCommand`
379 The command that was registered
380 """
381 # TODO: Write this
382 raise RuntimeError("This function has not been implemented yet")
383
384 async def register_commands(
385 self,
386 commands: Optional[List[ApplicationCommand]] = None,
387 guild_id: Optional[int] = None,
388 method: Literal["individual", "bulk", "auto"] = "bulk",
389 force: bool = False,
390 delete_existing: bool = True,
391 ) -> List[interactions.ApplicationCommand]:
392 """|coro|
393
394 Register a list of commands.
395
396 .. versionadded:: 2.0
397
398 Parameters
399 ----------
400 commands: Optional[List[:class:`~.ApplicationCommand`]]
401 A list of commands to register. If this is not set (``None``), then all commands will be registered.
402 guild_id: Optional[int]
403 If this is set, the commands will be registered as a guild command for the respective guild. If it is not
404 set, the commands will be registered according to their :attr:`ApplicationCommand.guild_ids` attribute.
405 method: Literal['individual', 'bulk', 'auto']
406 The method to use when registering the commands. If this is set to "individual", then each command will be
407 registered individually. If this is set to "bulk", then all commands will be registered in bulk. If this is
408 set to "auto", then the method will be determined automatically. Defaults to "bulk".
409 force: :class:`bool`
410 Registers the commands regardless of the state of the command on Discord. This uses one less API call, but
411 can result in hitting rate limits more often. Defaults to False.
412 delete_existing: :class:`bool`
413 Whether to delete existing commands that are not in the list of commands to register. Defaults to True.
414 """
415 if commands is None:
416 commands = self.pending_application_commands
417
418 commands = [copy.copy(cmd) for cmd in commands]
419
420 if guild_id is not None:
421 for cmd in commands:
422 to_rep_with = [guild_id]
423 cmd.guild_ids = to_rep_with
424
425 is_global = guild_id is None
426
427 registered = []
428
429 if is_global:
430 pending = list(filter(lambda c: c.guild_ids is None, commands))
431 registration_methods = {
432 "bulk": self._bot.http.bulk_upsert_global_commands,
433 "upsert": self._bot.http.upsert_global_command,
434 "delete": self._bot.http.delete_global_command,
435 "edit": self._bot.http.edit_global_command,
436 }
437
438 def _register(method: Literal["bulk", "upsert", "delete", "edit"], *args, **kwargs):
439 return registration_methods[method](self._bot.user.id, *args, **kwargs)
440
441 else:
442 pending = list(
443 filter(
444 lambda c: c.guild_ids is not None and guild_id in c.guild_ids,
445 commands,
446 )
447 )
448 registration_methods = {
449 "bulk": self._bot.http.bulk_upsert_guild_commands,
450 "upsert": self._bot.http.upsert_guild_command,
451 "delete": self._bot.http.delete_guild_command,
452 "edit": self._bot.http.edit_guild_command,
453 }
454
455 def _register(method: Literal["bulk", "upsert", "delete", "edit"], *args, **kwargs):
456 return registration_methods[method](self._bot.user.id, guild_id, *args, **kwargs)
457
458 def register(method: Literal["bulk", "upsert", "delete", "edit"], *args, **kwargs):
459 if kwargs.pop("_log", True):
460 if method == "bulk":
461 _log.debug(f"Bulk updating commands {[c['name'] for c in args[0]]} for guild {guild_id}")
462 elif method == "upsert":
463 _log.debug(f"Creating command {cmd['name']} for guild {guild_id}")
464 elif method == "edit":
465 _log.debug(f"Editing command {cmd['name']} for guild {guild_id}")
466 elif method == "delete":
467 _log.debug(f"Deleting command {cmd['name']} for guild {guild_id}")
468 return _register(method, *args, **kwargs)
469
470 pending_actions = []
471
472 if not force:
473 if guild_id is None:
474 prefetched_commands = await self.http.get_global_commands(self.user.id)
475 else:
476 prefetched_commands = await self.http.get_guild_commands(self.user.id, guild_id)
477 desynced = await self.get_desynced_commands(guild_id=guild_id, prefetched=prefetched_commands)
478
479 for cmd in desynced:
480 if cmd["action"] == "delete":
481 pending_actions.append(
482 {
483 "action": "delete" if delete_existing else None,
484 "command": collections.namedtuple("Command", ["name"])(name=cmd["command"]),
485 "id": cmd["id"],
486 }
487 )
488 continue
489 # We can assume the command item is a command, since it's only a string if action is delete
490 match = get(pending, name=cmd["command"].name, type=cmd["command"].type)
491 if match is None:
492 continue
493 if cmd["action"] == "edit":
494 pending_actions.append(
495 {
496 "action": "edit",
497 "command": match,
498 "id": cmd["id"],
499 }
500 )
501 elif cmd["action"] == "upsert":
502 pending_actions.append(
503 {
504 "action": "upsert",
505 "command": match,
506 }
507 )
508 elif cmd["action"] is None:
509 pending_actions.append(
510 {
511 "action": None,
512 "command": match,
513 }
514 )
515 else:
516 raise ValueError(f"Unknown action: {cmd['action']}")
517 filtered_no_action = list(filter(lambda c: c["action"] is not None, pending_actions))
518 filtered_deleted = list(filter(lambda a: a["action"] != "delete", pending_actions))
519 if method == "bulk" or (method == "auto" and len(filtered_deleted) == len(pending)):
520 # Either the method is bulk or all the commands need to be modified, so we can just do a bulk upsert
521 data = [cmd["command"].to_dict() for cmd in filtered_deleted]
522 # If there's nothing to update, don't bother
523 if len(filtered_no_action) == 0:
524 _log.debug("Skipping bulk command update: Commands are up to date")
525 registered = prefetched_commands
526 else:
527 _log.debug(
528 f"Bulk updating commands %s for guild %s",
529 {c["command"].name: c["action"] for c in pending_actions},
530 guild_id,
531 )
532 registered = await register("bulk", data, _log=False)
533 else:
534 if not filtered_no_action:
535 registered = []
536 for cmd in filtered_no_action:
537 if cmd["action"] == "delete":
538 await register("delete", cmd["command"])
539 continue
540 if cmd["action"] == "edit":
541 registered.append(await register("edit", cmd["id"], cmd["command"].to_dict()))
542 elif cmd["action"] == "upsert":
543 registered.append(await register("upsert", cmd["command"].to_dict()))
544 else:
545 raise ValueError(f"Unknown action: {cmd['action']}")
546
547 # TODO: Our lists dont work sometimes, see if that can be fixed so we can avoid this second API call
548 if method != "bulk":
549 if guild_id is None:
550 registered = await self._bot.http.get_global_commands(self._bot.user.id)
551 else:
552 registered = await self._bot.http.get_guild_commands(self._bot.user.id, guild_id)
553 else:
554 data = [cmd.to_dict() for cmd in pending]
555 registered = await register("bulk", data)
556
557 for i in registered:
558 cmd = get(
559 self.pending_application_commands,
560 name=i["name"],
561 type=i["type"],
562 )
563 if not cmd:
564 raise ValueError(f"Registered command {i['name']}, type {i['type']} not found in pending commands")
565 cmd.id = i["id"]
566 self._application_commands[cmd.id] = cmd
567
568 return registered
569
570 async def sync_commands(
571 self,
572 commands: Optional[List[ApplicationCommand]] = None,
573 method: Literal["individual", "bulk", "auto"] = "bulk",
574 force: bool = False,
575 guild_ids: Optional[List[int]] = None,
576 register_guild_commands: bool = True,
577 check_guilds: Optional[List[int]] = [],
578 delete_existing: bool = True,
579 ) -> None:
580 """|coro|
581
582 Registers all commands that have been added through :meth:`.add_application_command`. This method cleans up all
583 commands over the API and should sync them with the internal cache of commands. It attempts to register the
584 commands in the most efficient way possible, unless ``force`` is set to ``True``, in which case it will always
585 register all commands.
586
587 By default, this coroutine is called inside the :func:`.on_connect` event. If you choose to override the
588 :func:`.on_connect` event, then you should invoke this coroutine as well.
589
590 .. note::
591 If you remove all guild commands from a particular guild, the library may not be able to detect and update
592 the commands accordingly, as it would have to individually check for each guild. To force the library to
593 unregister a guild's commands, call this function with ``commands=[]`` and ``guild_ids=[guild_id]``.
594
595 .. versionadded:: 2.0
596
597 Parameters
598 ----------
599 commands: Optional[List[:class:`~.ApplicationCommand`]]
600 A list of commands to register. If this is not set (None), then all commands will be registered.
601 method: Literal['individual', 'bulk', 'auto']
602 The method to use when registering the commands. If this is set to "individual", then each command will be
603 registered individually. If this is set to "bulk", then all commands will be registered in bulk. If this is
604 set to "auto", then the method will be determined automatically. Defaults to "bulk".
605 force: :class:`bool`
606 Registers the commands regardless of the state of the command on Discord. This uses one less API call, but
607 can result in hitting rate limits more often. Defaults to False.
608 guild_ids: Optional[List[:class:`int`]]
609 A list of guild ids to register the commands for. If this is not set, the commands'
610 :attr:`~.ApplicationCommand.guild_ids` attribute will be used.
611 register_guild_commands: :class:`bool`
612 Whether to register guild commands. Defaults to True.
613 check_guilds: Optional[List[:class:`int`]]
614 A list of guilds ids to check for commands to unregister, since the bot would otherwise have to check all
615 guilds. Unlike ``guild_ids``, this does not alter the commands' :attr:`~.ApplicationCommand.guild_ids`
616 attribute, instead it adds the guild ids to a list of guilds to sync commands for. If
617 ``register_guild_commands`` is set to False, then this parameter is ignored.
618 delete_existing: :class:`bool`
619 Whether to delete existing commands that are not in the list of commands to register. Defaults to True.
620 """
621
622 check_guilds = list(set((check_guilds or []) + (self.debug_guilds or [])))
623
624 if commands is None:
625 commands = self.pending_application_commands
626
627 if guild_ids is not None:
628 for cmd in commands:
629 cmd.guild_ids = guild_ids
630
631 global_commands = [cmd for cmd in commands if cmd.guild_ids is None]
632 registered_commands = await self.register_commands(
633 global_commands, method=method, force=force, delete_existing=delete_existing
634 )
635
636 registered_guild_commands = {}
637
638 if register_guild_commands:
639 cmd_guild_ids = []
640 for cmd in commands:
641 if cmd.guild_ids is not None:
642 cmd_guild_ids.extend(cmd.guild_ids)
643 if check_guilds is not None:
644 cmd_guild_ids.extend(check_guilds)
645 for guild_id in set(cmd_guild_ids):
646 guild_commands = [cmd for cmd in commands if cmd.guild_ids is not None and guild_id in cmd.guild_ids]
647 registered_guild_commands[guild_id] = await self.register_commands(
648 guild_commands, guild_id=guild_id, method=method, force=force, delete_existing=delete_existing
649 )
650
651 for i in registered_commands:
652 cmd = get(
653 self.pending_application_commands,
654 name=i["name"],
655 guild_ids=None,
656 type=i["type"],
657 )
658 if cmd:
659 cmd.id = i["id"]
660 self._application_commands[cmd.id] = cmd
661
662 for guild_id, commands in registered_guild_commands.items():
663 for i in commands:
664 cmd = find(
665 lambda cmd: cmd.name == i["name"]
666 and cmd.type == i["type"]
667 and cmd.guild_ids is not None
668 and int(i["guild_id"]) in cmd.guild_ids,
669 self.pending_application_commands,
670 )
671 if not cmd:
672 # command has not been added yet
673 continue
674 cmd.id = i["id"]
675 self._application_commands[cmd.id] = cmd
676
677 async def process_application_commands(self, interaction: Interaction, auto_sync: bool = None) -> None:
678 """|coro|
679
680 This function processes the commands that have been registered
681 to the bot and other groups. Without this coroutine, none of the
682 commands will be triggered.
683
684 By default, this coroutine is called inside the :func:`.on_interaction`
685 event. If you choose to override the :func:`.on_interaction` event, then
686 you should invoke this coroutine as well.
687
688 This function finds a registered command matching the interaction id from
689 application commands and invokes it. If no matching command was
690 found, it replies to the interaction with a default message.
691
692 .. versionadded:: 2.0
693
694 Parameters
695 -----------
696 interaction: :class:`discord.Interaction`
697 The interaction to process
698 auto_sync: :class:`bool`
699 Whether to automatically sync and unregister the command if it is not found in the internal cache. This will
700 invoke the :meth:`~.Bot.sync_commands` method on the context of the command, either globally or per-guild,
701 based on the type of the command, respectively. Defaults to :attr:`.Bot.auto_sync_commands`.
702 """
703 if auto_sync is None:
704 auto_sync = self._bot.auto_sync_commands
705 if interaction.type not in (
706 InteractionType.application_command,
707 InteractionType.auto_complete,
708 ):
709 return
710
711 try:
712 command = self._application_commands[interaction.data["id"]]
713 except KeyError:
714 for cmd in self.application_commands:
715 guild_id = interaction.data.get("guild_id")
716 if guild_id:
717 guild_id = int(guild_id)
718 if cmd.name == interaction.data["name"] and (
719 guild_id == cmd.guild_ids or (isinstance(cmd.guild_ids, list) and guild_id in cmd.guild_ids)
720 ):
721 command = cmd
722 break
723 else:
724 if auto_sync:
725 guild_id = interaction.data.get("guild_id")
726 if guild_id is None:
727 await self.sync_commands()
728 else:
729
730 await self.sync_commands(check_guilds=[guild_id])
731 return self._bot.dispatch("unknown_application_command", interaction)
732
733 if interaction.type is InteractionType.auto_complete:
734 return self.dispatch("application_command_auto_complete", interaction, command)
735
736 ctx = await self.get_application_context(interaction)
737 ctx.command = command
738 await self.invoke_application_command(ctx)
739
740 async def on_application_command_auto_complete(self, interaction: Interaction, command: ApplicationCommand) -> None:
741 async def callback() -> None:
742 ctx = await self.get_autocomplete_context(interaction)
743 ctx.command = command
744 return await command.invoke_autocomplete_callback(ctx)
745
746 autocomplete_task = self.loop.create_task(callback())
747 try:
748 await self.wait_for("application_command_auto_complete", check=lambda i, c: c == command, timeout=3)
749 except asyncio.TimeoutError:
750 return
751 else:
752 if not autocomplete_task.done():
753 autocomplete_task.cancel()
754
755 def slash_command(self, **kwargs):
756 """A shortcut decorator that invokes :func:`command` and adds it to
757 the internal command list via :meth:`add_application_command`.
758 This shortcut is made specifically for :class:`.SlashCommand`.
759
760 .. versionadded:: 2.0
761
762 Returns
763 --------
764 Callable[..., :class:`SlashCommand`]
765 A decorator that converts the provided method into a :class:`.SlashCommand`, adds it to the bot,
766 then returns it.
767 """
768 return self.application_command(cls=SlashCommand, **kwargs)
769
770 def user_command(self, **kwargs):
771 """A shortcut decorator that invokes :func:`command` and adds it to
772 the internal command list via :meth:`add_application_command`.
773 This shortcut is made specifically for :class:`.UserCommand`.
774
775 .. versionadded:: 2.0
776
777 Returns
778 --------
779 Callable[..., :class:`UserCommand`]
780 A decorator that converts the provided method into a :class:`.UserCommand`, adds it to the bot,
781 then returns it.
782 """
783 return self.application_command(cls=UserCommand, **kwargs)
784
785 def message_command(self, **kwargs):
786 """A shortcut decorator that invokes :func:`command` and adds it to
787 the internal command list via :meth:`add_application_command`.
788 This shortcut is made specifically for :class:`.MessageCommand`.
789
790 .. versionadded:: 2.0
791
792 Returns
793 --------
794 Callable[..., :class:`MessageCommand`]
795 A decorator that converts the provided method into a :class:`.MessageCommand`, adds it to the bot,
796 then returns it.
797 """
798 return self.application_command(cls=MessageCommand, **kwargs)
799
800 def application_command(self, **kwargs):
801 """A shortcut decorator that invokes :func:`command` and adds it to
802 the internal command list via :meth:`~.Bot.add_application_command`.
803
804 .. versionadded:: 2.0
805
806 Returns
807 --------
808 Callable[..., :class:`ApplicationCommand`]
809 A decorator that converts the provided method into an :class:`.ApplicationCommand`, adds it to the bot,
810 then returns it.
811 """
812
813 def decorator(func) -> ApplicationCommand:
814 result = command(**kwargs)(func)
815 self.add_application_command(result)
816 return result
817
818 return decorator
819
820 def command(self, **kwargs):
821 """An alias for :meth:`application_command`.
822
823 .. note::
824
825 This decorator is overridden by :class:`discord.ext.commands.Bot`.
826
827 .. versionadded:: 2.0
828
829 Returns
830 --------
831 Callable[..., :class:`ApplicationCommand`]
832 A decorator that converts the provided method into an :class:`.ApplicationCommand`, adds it to the bot,
833 then returns it.
834 """
835 return self.application_command(**kwargs)
836
837 def create_group(
838 self, name: str, description: Optional[str] = None, guild_ids: Optional[List[int]] = None, **kwargs
839 ) -> SlashCommandGroup:
840 """A shortcut method that creates a slash command group with no subcommands and adds it to the internal
841 command list via :meth:`add_application_command`.
842
843 .. versionadded:: 2.0
844
845 Parameters
846 ----------
847 name: :class:`str`
848 The name of the group to create.
849 description: Optional[:class:`str`]
850 The description of the group to create.
851 guild_ids: Optional[List[:class:`int`]]
852 A list of the IDs of each guild this group should be added to, making it a guild command.
853 This will be a global command if ``None`` is passed.
854 kwargs:
855 Any additional keyword arguments to pass to :class:`.SlashCommandGroup`.
856
857 Returns
858 --------
859 SlashCommandGroup
860 The slash command group that was created.
861 """
862 description = description or "No description provided."
863 group = SlashCommandGroup(name, description, guild_ids, **kwargs)
864 self.add_application_command(group)
865 return group
866
867 def group(
868 self,
869 name: Optional[str] = None,
870 description: Optional[str] = None,
871 guild_ids: Optional[List[int]] = None,
872 ) -> Callable[[Type[SlashCommandGroup]], SlashCommandGroup]:
873 """A shortcut decorator that initializes the provided subclass of :class:`.SlashCommandGroup`
874 and adds it to the internal command list via :meth:`add_application_command`.
875
876 .. versionadded:: 2.0
877
878 Parameters
879 ----------
880 name: Optional[:class:`str`]
881 The name of the group to create. This will resolve to the name of the decorated class if ``None`` is passed.
882 description: Optional[:class:`str`]
883 The description of the group to create.
884 guild_ids: Optional[List[:class:`int`]]
885 A list of the IDs of each guild this group should be added to, making it a guild command.
886 This will be a global command if ``None`` is passed.
887
888 Returns
889 --------
890 Callable[[Type[SlashCommandGroup]], SlashCommandGroup]
891 The slash command group that was created.
892 """
893
894 def inner(cls: Type[SlashCommandGroup]) -> SlashCommandGroup:
895 group = cls(
896 name or cls.__name__,
897 (
898 description or inspect.cleandoc(cls.__doc__).splitlines()[0]
899 if cls.__doc__ is not None
900 else "No description provided"
901 ),
902 guild_ids=guild_ids,
903 )
904 self.add_application_command(group)
905 return group
906
907 return inner
908
909 slash_group = group
910
911 def walk_application_commands(self) -> Generator[ApplicationCommand, None, None]:
912 """An iterator that recursively walks through all application commands and subcommands.
913
914 Yields
915 ------
916 :class:`.ApplicationCommand`
917 An application command from the internal list of application commands.
918 """
919 for command in self.application_commands:
920 if isinstance(command, SlashCommandGroup):
921 yield from command.walk_commands()
922 yield command
923
924 async def get_application_context(self, interaction: Interaction, cls=None) -> ApplicationContext:
925 r"""|coro|
926
927 Returns the invocation context from the interaction.
928
929 This is a more low-level counter-part for :meth:`.process_application_commands`
930 to allow users more fine-grained control over the processing.
931
932 Parameters
933 -----------
934 interaction: :class:`discord.Interaction`
935 The interaction to get the invocation context from.
936 cls
937 The factory class that will be used to create the context.
938 By default, this is :class:`.ApplicationContext`. Should a custom
939 class be provided, it must be similar enough to
940 :class:`.ApplicationContext`\'s interface.
941
942 Returns
943 --------
944 :class:`.ApplicationContext`
945 The invocation context. The type of this can change via the
946 ``cls`` parameter.
947 """
948 if cls is None:
949 cls = ApplicationContext
950 return cls(self, interaction)
951
952 async def get_autocomplete_context(self, interaction: Interaction, cls=None) -> AutocompleteContext:
953 r"""|coro|
954
955 Returns the autocomplete context from the interaction.
956
957 This is a more low-level counter-part for :meth:`.process_application_commands`
958 to allow users more fine-grained control over the processing.
959
960 Parameters
961 -----------
962 interaction: :class:`discord.Interaction`
963 The interaction to get the invocation context from.
964 cls
965 The factory class that will be used to create the context.
966 By default, this is :class:`.AutocompleteContext`. Should a custom
967 class be provided, it must be similar enough to
968 :class:`.AutocompleteContext`\'s interface.
969
970 Returns
971 --------
972 :class:`.AutocompleteContext`
973 The autocomplete context. The type of this can change via the
974 ``cls`` parameter.
975 """
976 if cls is None:
977 cls = AutocompleteContext
978 return cls(self, interaction)
979
980 async def invoke_application_command(self, ctx: ApplicationContext) -> None:
981 """|coro|
982
983 Invokes the application command given under the invocation
984 context and handles all the internal event dispatch mechanisms.
985
986 Parameters
987 -----------
988 ctx: :class:`.ApplicationCommand`
989 The invocation context to invoke.
990 """
991 self._bot.dispatch("application_command", ctx)
992 try:
993 if await self._bot.can_run(ctx, call_once=True):
994 await ctx.command.invoke(ctx)
995 else:
996 raise CheckFailure("The global check once functions failed.")
997 except DiscordException as exc:
998 await ctx.command.dispatch_error(ctx, exc)
999 else:
1000 self._bot.dispatch("application_command_completion", ctx)
1001
1002 @property
1003 @abstractmethod
1004 def _bot(self) -> Union["Bot", "AutoShardedBot"]:
1005 ...
1006
1007
1008 class BotBase(ApplicationCommandMixin, CogMixin, ABC):
1009 _supports_prefixed_commands = False
1010
1011 def __init__(self, description=None, *args, **options):
1012 super().__init__(*args, **options)
1013 self.extra_events = {} # TYPE: Dict[str, List[CoroFunc]]
1014 self.__cogs = {} # TYPE: Dict[str, Cog]
1015 self.__extensions = {} # TYPE: Dict[str, types.ModuleType]
1016 self._checks = [] # TYPE: List[Check]
1017 self._check_once = []
1018 self._before_invoke = None
1019 self._after_invoke = None
1020 self.description = inspect.cleandoc(description) if description else ""
1021 self.owner_id = options.get("owner_id")
1022 self.owner_ids = options.get("owner_ids", set())
1023 self.auto_sync_commands = options.get("auto_sync_commands", True)
1024
1025 self.debug_guilds = options.pop("debug_guilds", None)
1026
1027 if self.owner_id and self.owner_ids:
1028 raise TypeError("Both owner_id and owner_ids are set.")
1029
1030 if self.owner_ids and not isinstance(self.owner_ids, collections.abc.Collection):
1031 raise TypeError(f"owner_ids must be a collection not {self.owner_ids.__class__!r}")
1032
1033 self._checks = []
1034 self._check_once = []
1035 self._before_invoke = None
1036 self._after_invoke = None
1037
1038 async def on_connect(self):
1039 if self.auto_sync_commands:
1040 await self.sync_commands()
1041
1042 async def on_interaction(self, interaction):
1043 await self.process_application_commands(interaction)
1044
1045 async def on_application_command_error(self, context: ApplicationContext, exception: DiscordException) -> None:
1046 """|coro|
1047
1048 The default command error handler provided by the bot.
1049
1050 By default, this prints to :data:`sys.stderr` however it could be
1051 overridden to have a different implementation.
1052
1053 This only fires if you do not specify any listeners for command error.
1054 """
1055 if self.extra_events.get("on_application_command_error", None):
1056 return
1057
1058 command = context.command
1059 if command and command.has_error_handler():
1060 return
1061
1062 cog = context.cog
1063 if cog and cog.has_error_handler():
1064 return
1065
1066 print(f"Ignoring exception in command {context.command}:", file=sys.stderr)
1067 traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)
1068
1069 # global check registration
1070 # TODO: Remove these from commands.Bot
1071
1072 def check(self, func):
1073 """A decorator that adds a global check to the bot. A global check is similar to a :func:`.check` that is
1074 applied on a per-command basis except it is run before any command checks have been verified and applies to
1075 every command the bot has.
1076
1077 .. note::
1078
1079 This function can either be a regular function or a coroutine. Similar to a command :func:`.check`, this
1080 takes a single parameter of type :class:`.Context` and can only raise exceptions inherited from
1081 :exc:`.ApplicationCommandError`.
1082
1083 Example
1084 ---------
1085 .. code-block:: python3
1086
1087 @bot.check
1088 def check_commands(ctx):
1089 return ctx.command.qualified_name in allowed_commands
1090
1091 """
1092 # T was used instead of Check to ensure the type matches on return
1093 self.add_check(func) # type: ignore
1094 return func
1095
1096 def add_check(self, func, *, call_once: bool = False) -> None:
1097 """Adds a global check to the bot. This is the non-decorator interface to :meth:`.check` and
1098 :meth:`.check_once`.
1099
1100 Parameters
1101 -----------
1102 func
1103 The function that was used as a global check.
1104 call_once: :class:`bool`
1105 If the function should only be called once per :meth:`.Bot.invoke` call.
1106
1107 """
1108
1109 if call_once:
1110 self._check_once.append(func)
1111 else:
1112 self._checks.append(func)
1113
1114 def remove_check(self, func, *, call_once: bool = False) -> None:
1115 """Removes a global check from the bot.
1116 This function is idempotent and will not raise an exception
1117 if the function is not in the global checks.
1118
1119 Parameters
1120 -----------
1121 func
1122 The function to remove from the global checks.
1123 call_once: :class:`bool`
1124 If the function was added with ``call_once=True`` in
1125 the :meth:`.Bot.add_check` call or using :meth:`.check_once`.
1126
1127 """
1128 checks = self._check_once if call_once else self._checks
1129
1130 try:
1131 checks.remove(func)
1132 except ValueError:
1133 pass
1134
1135 def check_once(self, func):
1136 """A decorator that adds a "call once" global check to the bot. Unlike regular global checks, this one is called
1137 only once per :meth:`.Bot.invoke` call. Regular global checks are called whenever a command is called or
1138 :meth:`.Command.can_run` is called. This type of check bypasses that and ensures that it's called only once,
1139 even inside the default help command.
1140
1141 .. note::
1142
1143 When using this function the :class:`.Context` sent to a group subcommand may only parse the parent command
1144 and not the subcommands due to it being invoked once per :meth:`.Bot.invoke` call.
1145
1146 .. note::
1147
1148 This function can either be a regular function or a coroutine. Similar to a command :func:`.check`,
1149 this takes a single parameter of type :class:`.Context` and can only raise exceptions inherited from
1150 :exc:`.ApplicationCommandError`.
1151
1152 Example
1153 ---------
1154 .. code-block:: python3
1155
1156 @bot.check_once
1157 def whitelist(ctx):
1158 return ctx.message.author.id in my_whitelist
1159
1160 """
1161 self.add_check(func, call_once=True)
1162 return func
1163
1164 async def can_run(self, ctx: ApplicationContext, *, call_once: bool = False) -> bool:
1165 data = self._check_once if call_once else self._checks
1166
1167 if not data:
1168 return True
1169
1170 # type-checker doesn't distinguish between functions and methods
1171 return await async_all(f(ctx) for f in data) # type: ignore
1172
1173 # listener registration
1174
1175 def add_listener(self, func: CoroFunc, name: str = MISSING) -> None:
1176 """The non decorator alternative to :meth:`.listen`.
1177
1178 Parameters
1179 -----------
1180 func: :ref:`coroutine <coroutine>`
1181 The function to call.
1182 name: :class:`str`
1183 The name of the event to listen for. Defaults to ``func.__name__``.
1184
1185 Example
1186 --------
1187
1188 .. code-block:: python3
1189
1190 async def on_ready(): pass
1191 async def my_message(message): pass
1192
1193 bot.add_listener(on_ready)
1194 bot.add_listener(my_message, 'on_message')
1195 """
1196 name = func.__name__ if name is MISSING else name
1197
1198 if not asyncio.iscoroutinefunction(func):
1199 raise TypeError("Listeners must be coroutines")
1200
1201 if name in self.extra_events:
1202 self.extra_events[name].append(func)
1203 else:
1204 self.extra_events[name] = [func]
1205
1206 def remove_listener(self, func: CoroFunc, name: str = MISSING) -> None:
1207 """Removes a listener from the pool of listeners.
1208
1209 Parameters
1210 -----------
1211 func
1212 The function that was used as a listener to remove.
1213 name: :class:`str`
1214 The name of the event we want to remove. Defaults to
1215 ``func.__name__``.
1216 """
1217
1218 name = func.__name__ if name is MISSING else name
1219
1220 if name in self.extra_events:
1221 try:
1222 self.extra_events[name].remove(func)
1223 except ValueError:
1224 pass
1225
1226 def listen(self, name: str = MISSING) -> Callable[[CFT], CFT]:
1227 """A decorator that registers another function as an external
1228 event listener. Basically this allows you to listen to multiple
1229 events from different places e.g. such as :func:`.on_ready`
1230
1231 The functions being listened to must be a :ref:`coroutine <coroutine>`.
1232
1233 Example
1234 --------
1235
1236 .. code-block:: python3
1237
1238 @bot.listen()
1239 async def on_message(message):
1240 print('one')
1241
1242 # in some other file...
1243
1244 @bot.listen('on_message')
1245 async def my_message(message):
1246 print('two')
1247
1248 Would print one and two in an unspecified order.
1249
1250 Raises
1251 -------
1252 TypeError
1253 The function being listened to is not a coroutine.
1254 """
1255
1256 def decorator(func: CFT) -> CFT:
1257 self.add_listener(func, name)
1258 return func
1259
1260 return decorator
1261
1262 def dispatch(self, event_name: str, *args: Any, **kwargs: Any) -> None:
1263 # super() will resolve to Client
1264 super().dispatch(event_name, *args, **kwargs) # type: ignore
1265 ev = f"on_{event_name}"
1266 for event in self.extra_events.get(ev, []):
1267 self._schedule_event(event, ev, *args, **kwargs) # type: ignore
1268
1269 def before_invoke(self, coro):
1270 """A decorator that registers a coroutine as a pre-invoke hook.
1271 A pre-invoke hook is called directly before the command is
1272 called. This makes it a useful function to set up database
1273 connections or any type of set up required.
1274 This pre-invoke hook takes a sole parameter, a :class:`.Context`.
1275
1276 .. note::
1277
1278 The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are
1279 only called if all checks and argument parsing procedures pass
1280 without error. If any check or argument parsing procedures fail
1281 then the hooks are not called.
1282
1283 Parameters
1284 -----------
1285 coro: :ref:`coroutine <coroutine>`
1286 The coroutine to register as the pre-invoke hook.
1287
1288 Raises
1289 -------
1290 TypeError
1291 The coroutine passed is not actually a coroutine.
1292 """
1293 if not asyncio.iscoroutinefunction(coro):
1294 raise TypeError("The pre-invoke hook must be a coroutine.")
1295
1296 self._before_invoke = coro
1297 return coro
1298
1299 def after_invoke(self, coro):
1300 r"""A decorator that registers a coroutine as a post-invoke hook.
1301 A post-invoke hook is called directly after the command is
1302 called. This makes it a useful function to clean-up database
1303 connections or any type of clean up required.
1304 This post-invoke hook takes a sole parameter, a :class:`.Context`.
1305
1306 .. note::
1307
1308 Similar to :meth:`~.Bot.before_invoke`\, this is not called unless
1309 checks and argument parsing procedures succeed. This hook is,
1310 however, **always** called regardless of the internal command
1311 callback raising an error (i.e. :exc:`.CommandInvokeError`\).
1312 This makes it ideal for clean-up scenarios.
1313
1314 Parameters
1315 -----------
1316 coro: :ref:`coroutine <coroutine>`
1317 The coroutine to register as the post-invoke hook.
1318
1319 Raises
1320 -------
1321 TypeError
1322 The coroutine passed is not actually a coroutine.
1323
1324 """
1325 if not asyncio.iscoroutinefunction(coro):
1326 raise TypeError("The post-invoke hook must be a coroutine.")
1327
1328 self._after_invoke = coro
1329 return coro
1330
1331 async def is_owner(self, user: User) -> bool:
1332 """|coro|
1333
1334 Checks if a :class:`~discord.User` or :class:`~discord.Member` is the owner of
1335 this bot.
1336
1337 If an :attr:`owner_id` is not set, it is fetched automatically
1338 through the use of :meth:`~.Bot.application_info`.
1339
1340 .. versionchanged:: 1.3
1341 The function also checks if the application is team-owned if
1342 :attr:`owner_ids` is not set.
1343
1344 Parameters
1345 -----------
1346 user: :class:`.abc.User`
1347 The user to check for.
1348
1349 Returns
1350 --------
1351 :class:`bool`
1352 Whether the user is the owner.
1353 """
1354
1355 if self.owner_id:
1356 return user.id == self.owner_id
1357 elif self.owner_ids:
1358 return user.id in self.owner_ids
1359 else:
1360 app = await self.application_info() # type: ignore
1361 if app.team:
1362 self.owner_ids = ids = {m.id for m in app.team.members}
1363 return user.id in ids
1364 else:
1365 self.owner_id = owner_id = app.owner.id
1366 return user.id == owner_id
1367
1368
1369 class Bot(BotBase, Client):
1370 """Represents a discord bot.
1371
1372 This class is a subclass of :class:`discord.Client` and as a result
1373 anything that you can do with a :class:`discord.Client` you can do with
1374 this bot.
1375
1376 This class also subclasses ``ApplicationCommandMixin`` to provide the functionality
1377 to manage commands.
1378
1379 .. versionadded:: 2.0
1380
1381 Attributes
1382 -----------
1383 description: :class:`str`
1384 The content prefixed into the default help message.
1385 owner_id: Optional[:class:`int`]
1386 The user ID that owns the bot. If this is not set and is then queried via
1387 :meth:`.is_owner` then it is fetched automatically using
1388 :meth:`~.Bot.application_info`.
1389 owner_ids: Optional[Collection[:class:`int`]]
1390 The user IDs that owns the bot. This is similar to :attr:`owner_id`.
1391 If this is not set and the application is team based, then it is
1392 fetched automatically using :meth:`~.Bot.application_info`.
1393 For performance reasons it is recommended to use a :class:`set`
1394 for the collection. You cannot set both ``owner_id`` and ``owner_ids``.
1395
1396 .. versionadded:: 1.3
1397 debug_guilds: Optional[List[:class:`int`]]
1398 Guild IDs of guilds to use for testing commands.
1399 The bot will not create any global commands if debug guild IDs are passed.
1400
1401 .. versionadded:: 2.0
1402 auto_sync_commands: :class:`bool`
1403 Whether to automatically sync slash commands. This will call sync_commands in on_connect, and in
1404 :attr:`.process_application_commands` if the command is not found. Defaults to ``True``.
1405
1406 .. versionadded:: 2.0
1407 """
1408
1409 @property
1410 def _bot(self) -> "Bot":
1411 return self
1412
1413
1414 class AutoShardedBot(BotBase, AutoShardedClient):
1415 """This is similar to :class:`.Bot` except that it is inherited from
1416 :class:`discord.AutoShardedClient` instead.
1417
1418 .. versionadded:: 2.0
1419 """
1420
1421 @property
1422 def _bot(self) -> "AutoShardedBot":
1423 return self
1424
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/discord/bot.py b/discord/bot.py
--- a/discord/bot.py
+++ b/discord/bot.py
@@ -711,7 +711,7 @@
try:
command = self._application_commands[interaction.data["id"]]
except KeyError:
- for cmd in self.application_commands:
+ for cmd in self.application_commands + self.pending_application_commands:
guild_id = interaction.data.get("guild_id")
if guild_id:
guild_id = int(guild_id)
| {"golden_diff": "diff --git a/discord/bot.py b/discord/bot.py\n--- a/discord/bot.py\n+++ b/discord/bot.py\n@@ -711,7 +711,7 @@\n try:\n command = self._application_commands[interaction.data[\"id\"]]\n except KeyError:\n- for cmd in self.application_commands:\n+ for cmd in self.application_commands + self.pending_application_commands:\n guild_id = interaction.data.get(\"guild_id\")\n if guild_id:\n guild_id = int(guild_id)\n", "issue": "Registered commands not recognized without sync_commands call\n### Summary\r\n\r\nPreviously registered commands are not pulled from the internal cache when an unknown command is recieved. Instead, a call to sync_commands is made every time.\r\n\r\n### Reproduction Steps\r\n\r\n1. Run MRC Part 1\r\n2. Run MRC Part 2\r\n3. Use the `/test` command twice\r\n4. Observe results\r\n\r\n\r\n### Minimal Reproducible Code\r\n\r\n```python\r\nimport discord\r\nimport logging\r\n\r\nfrom discord.ext import commands\r\n\r\nbot = commands.Bot(command_prefix=\",\")\r\n\r\[email protected]\r\nasync def on_ready():\r\n print(\"ready\")\r\n\r\n\r\[email protected]_command(name=\"test\")\r\nasync def _test(ctx):\r\n await ctx.respond(\"hello world\")\r\n\r\n \r\nbot.run()\r\n```\r\nPart 2: \r\n```python\r\nimport discord\r\nimport logging\r\n\r\nfrom discord.ext import commands\r\n\r\nbot = commands.Bot(command_prefix=\",\")\r\n\r\nlogging.basicConfig(level=\"DEBUG\")\r\n\r\[email protected]\r\nasync def on_ready():\r\n print(\"ready\")\r\n\r\[email protected]\r\nasync def on_connect():\r\n print(\"connected\")\r\n print(\"skipping call to sync_commands\")\r\n\r\n\r\[email protected]_command(name=\"test\")\r\nasync def _test(ctx):\r\n await ctx.respond(\"hello world\")\r\n\r\n \r\nbot.run(\"\")\r\n```\r\n\r\n\r\n### Expected Results\r\n\r\nCommand should be pulled from the internal cache without a call to `sync_commands`\r\n\r\n### Actual Results\r\n\r\n`sync_commands` is called.\r\n\r\n### Intents\r\n\r\nAny\r\n\r\n### System Information\r\n\r\n- Python v3.8.9-finalv\r\n- py-cord v2.1.0-final\r\n- aiohttp v3.8.1\r\n- system info: Darwin 21.5.0 Darwin Kernel Version 21.5.0: Tue Apr 26 21:08:29 PDT 2022; root:xnu-8020.121.3~4/RELEASE_ARM64_T8101\r\n\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the open issues for duplicates.\r\n- [X] I have shown the entire traceback, if possible.\r\n- [X] I have removed my token from display, if visible.\r\n\r\n### Additional Context\r\n\r\n_No response_\n", "before_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport collections\nimport copy\nimport inspect\nimport logging\nimport sys\nimport traceback\nfrom abc import ABC, abstractmethod\nfrom typing import (\n Any,\n Callable,\n Coroutine,\n Dict,\n Generator,\n List,\n Literal,\n Optional,\n Type,\n TypeVar,\n Union,\n)\n\nfrom .client import Client\nfrom .cog import CogMixin\nfrom .commands import (\n ApplicationCommand,\n ApplicationContext,\n AutocompleteContext,\n MessageCommand,\n SlashCommand,\n SlashCommandGroup,\n UserCommand,\n command,\n)\nfrom .enums import InteractionType\nfrom .errors import CheckFailure, DiscordException\nfrom .interactions import Interaction\nfrom .shard import AutoShardedClient\nfrom .types import interactions\nfrom .user import User\nfrom .utils import MISSING, async_all, find, get\n\nCoroFunc = Callable[..., Coroutine[Any, Any, Any]]\nCFT = TypeVar(\"CFT\", bound=CoroFunc)\n\n__all__ = (\n \"ApplicationCommandMixin\",\n \"Bot\",\n \"AutoShardedBot\",\n)\n\n_log = logging.getLogger(__name__)\n\n\nclass ApplicationCommandMixin(ABC):\n \"\"\"A mixin that implements common functionality for classes that need\n application command compatibility.\n\n Attributes\n -----------\n application_commands: :class:`dict`\n A mapping of command id string to :class:`.ApplicationCommand` objects.\n pending_application_commands: :class:`list`\n A list of commands that have been added but not yet registered. This is read-only and is modified via other\n methods.\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._pending_application_commands = []\n self._application_commands = {}\n\n @property\n def all_commands(self):\n return self._application_commands\n\n @property\n def pending_application_commands(self):\n return self._pending_application_commands\n\n @property\n def commands(self) -> List[Union[ApplicationCommand, Any]]:\n commands = self.application_commands\n if self._bot._supports_prefixed_commands and hasattr(self._bot, \"prefixed_commands\"):\n commands += self._bot.prefixed_commands\n return commands\n\n @property\n def application_commands(self) -> List[ApplicationCommand]:\n return list(self._application_commands.values())\n\n def add_application_command(self, command: ApplicationCommand) -> None:\n \"\"\"Adds a :class:`.ApplicationCommand` into the internal list of commands.\n\n This is usually not called, instead the :meth:`command` or\n other shortcut decorators are used instead.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n command: :class:`.ApplicationCommand`\n The command to add.\n \"\"\"\n if isinstance(command, SlashCommand) and command.is_subcommand:\n raise TypeError(\"The provided command is a sub-command of group\")\n\n if self._bot.debug_guilds and command.guild_ids is None:\n command.guild_ids = self._bot.debug_guilds\n\n for cmd in self.pending_application_commands:\n if cmd == command:\n command.id = cmd.id\n self._application_commands[command.id] = command\n break\n self._pending_application_commands.append(command)\n\n def remove_application_command(self, command: ApplicationCommand) -> Optional[ApplicationCommand]:\n \"\"\"Remove a :class:`.ApplicationCommand` from the internal list\n of commands.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n command: :class:`.ApplicationCommand`\n The command to remove.\n\n Returns\n --------\n Optional[:class:`.ApplicationCommand`]\n The command that was removed. If the name is not valid then\n ``None`` is returned instead.\n \"\"\"\n if command.id is None:\n try:\n index = self._pending_application_commands.index(command)\n except ValueError:\n return None\n return self._pending_application_commands.pop(index)\n return self._application_commands.pop(command.id, None)\n\n @property\n def get_command(self):\n \"\"\"Shortcut for :meth:`.get_application_command`.\n\n .. note::\n Overridden in :class:`ext.commands.Bot`.\n\n .. versionadded:: 2.0\n \"\"\"\n # TODO: Do something like we did in self.commands for this\n return self.get_application_command\n\n def get_application_command(\n self,\n name: str,\n guild_ids: Optional[List[int]] = None,\n type: Type[ApplicationCommand] = SlashCommand,\n ) -> Optional[ApplicationCommand]:\n \"\"\"Get a :class:`.ApplicationCommand` from the internal list\n of commands.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n name: :class:`str`\n The name of the command to get.\n guild_ids: List[:class:`int`]\n The guild ids associated to the command to get.\n type: Type[:class:`.ApplicationCommand`]\n The type of the command to get. Defaults to :class:`.SlashCommand`.\n\n Returns\n --------\n Optional[:class:`.ApplicationCommand`]\n The command that was requested. If not found, returns ``None``.\n \"\"\"\n\n for command in self._application_commands.values():\n if command.name == name and isinstance(command, type):\n if guild_ids is not None and command.guild_ids != guild_ids:\n return\n return command\n\n async def get_desynced_commands(\n self,\n guild_id: Optional[int] = None,\n prefetched: Optional[List[ApplicationCommand]] = None\n ) -> List[Dict[str, Any]]:\n \"\"\"|coro|\n\n Gets the list of commands that are desynced from discord. If ``guild_id`` is specified, it will only return\n guild commands that are desynced from said guild, else it will return global commands.\n\n .. note::\n This function is meant to be used internally, and should only be used if you want to override the default\n command registration behavior.\n\n .. versionadded:: 2.0\n\n\n Parameters\n ----------\n guild_id: Optional[:class:`int`]\n The guild id to get the desynced commands for, else global commands if unspecified.\n prefetched: Optional[List[:class:`.ApplicationCommand`]]\n If you already fetched the commands, you can pass them here to be used. Not recommended for typical usage.\n\n Returns\n -------\n List[Dict[:class:`str`, Any]]\n A list of the desynced commands. Each will come with at least the ``cmd`` and ``action`` keys, which\n respectively contain the command and the action to perform. Other keys may also be present depending on\n the action, including ``id``.\n \"\"\"\n\n # We can suggest the user to upsert, edit, delete, or bulk upsert the commands\n\n def _check_command(cmd: ApplicationCommand, match: Dict) -> bool:\n if isinstance(cmd, SlashCommandGroup):\n if len(cmd.subcommands) != len(match.get(\"options\", [])):\n return True\n for i, subcommand in enumerate(cmd.subcommands):\n match_ = next(\n (data for data in match[\"options\"] if data[\"name\"] == subcommand.name),\n MISSING,\n )\n if match_ is not MISSING and _check_command(subcommand, match_):\n return True\n else:\n as_dict = cmd.to_dict()\n to_check = {\n \"dm_permission\": None,\n \"default_member_permissions\": None,\n \"name\": None,\n \"description\": None,\n \"name_localizations\": None,\n \"description_localizations\": None,\n \"options\": [\n \"type\",\n \"name\",\n \"description\",\n \"autocomplete\",\n \"choices\",\n \"name_localizations\",\n \"description_localizations\",\n ],\n }\n for check, value in to_check.items():\n if type(to_check[check]) == list:\n # We need to do some falsy conversion here\n # The API considers False (autocomplete) and [] (choices) to be falsy values\n falsy_vals = (False, [])\n for opt in value:\n cmd_vals = [val.get(opt, MISSING) for val in as_dict[check]] if check in as_dict else []\n for i, val in enumerate(cmd_vals):\n if val in falsy_vals:\n cmd_vals[i] = MISSING\n if match.get(check, MISSING) is not MISSING and cmd_vals != [\n val.get(opt, MISSING) for val in match[check]\n ]:\n # We have a difference\n return True\n elif getattr(cmd, check, None) != match.get(check):\n # We have a difference\n if check == \"default_permission\" and getattr(cmd, check) is True and match.get(check) is None:\n # This is a special case\n # TODO: Remove for perms v2\n continue\n return True\n return False\n\n return_value = []\n cmds = self.pending_application_commands.copy()\n\n if guild_id is None:\n if prefetched is not None:\n registered_commands = prefetched\n else:\n registered_commands = await self._bot.http.get_global_commands(self.user.id)\n pending = [cmd for cmd in cmds if cmd.guild_ids is None]\n else:\n if prefetched is not None:\n registered_commands = prefetched\n else:\n registered_commands = await self._bot.http.get_guild_commands(self.user.id, guild_id)\n pending = [cmd for cmd in cmds if cmd.guild_ids is not None and guild_id in cmd.guild_ids]\n\n registered_commands_dict = {cmd[\"name\"]: cmd for cmd in registered_commands}\n # First let's check if the commands we have locally are the same as the ones on discord\n for cmd in pending:\n match = registered_commands_dict.get(cmd.name)\n if match is None:\n # We don't have this command registered\n return_value.append({\"command\": cmd, \"action\": \"upsert\"})\n elif _check_command(cmd, match):\n return_value.append(\n {\n \"command\": cmd,\n \"action\": \"edit\",\n \"id\": int(registered_commands_dict[cmd.name][\"id\"]),\n }\n )\n else:\n # We have this command registered but it's the same\n return_value.append({\"command\": cmd, \"action\": None, \"id\": int(match[\"id\"])})\n\n # Now let's see if there are any commands on discord that we need to delete\n for cmd, value_ in registered_commands_dict.items():\n match = get(pending, name=registered_commands_dict[cmd][\"name\"])\n if match is None:\n # We have this command registered but not in our list\n return_value.append(\n {\n \"command\": registered_commands_dict[cmd][\"name\"],\n \"id\": int(value_[\"id\"]),\n \"action\": \"delete\",\n }\n )\n\n continue\n\n return return_value\n\n async def register_command(\n self,\n command: ApplicationCommand,\n force: bool = True,\n guild_ids: List[int] = None,\n ) -> None:\n \"\"\"|coro|\n\n Registers a command. If the command has ``guild_ids`` set, or if the ``guild_ids`` parameter is passed,\n the command will be registered as a guild command for those guilds.\n\n Parameters\n ----------\n command: :class:`~.ApplicationCommand`\n The command to register.\n force: :class:`bool`\n Whether to force the command to be registered. If this is set to False, the command will only be registered\n if it seems to already be registered and up to date with our internal cache. Defaults to True.\n guild_ids: :class:`list`\n A list of guild ids to register the command for. If this is not set, the command's\n :attr:`ApplicationCommand.guild_ids` attribute will be used.\n\n Returns\n -------\n :class:`~.ApplicationCommand`\n The command that was registered\n \"\"\"\n # TODO: Write this\n raise RuntimeError(\"This function has not been implemented yet\")\n\n async def register_commands(\n self,\n commands: Optional[List[ApplicationCommand]] = None,\n guild_id: Optional[int] = None,\n method: Literal[\"individual\", \"bulk\", \"auto\"] = \"bulk\",\n force: bool = False,\n delete_existing: bool = True,\n ) -> List[interactions.ApplicationCommand]:\n \"\"\"|coro|\n\n Register a list of commands.\n\n .. versionadded:: 2.0\n\n Parameters\n ----------\n commands: Optional[List[:class:`~.ApplicationCommand`]]\n A list of commands to register. If this is not set (``None``), then all commands will be registered.\n guild_id: Optional[int]\n If this is set, the commands will be registered as a guild command for the respective guild. If it is not\n set, the commands will be registered according to their :attr:`ApplicationCommand.guild_ids` attribute.\n method: Literal['individual', 'bulk', 'auto']\n The method to use when registering the commands. If this is set to \"individual\", then each command will be\n registered individually. If this is set to \"bulk\", then all commands will be registered in bulk. If this is\n set to \"auto\", then the method will be determined automatically. Defaults to \"bulk\".\n force: :class:`bool`\n Registers the commands regardless of the state of the command on Discord. This uses one less API call, but\n can result in hitting rate limits more often. Defaults to False.\n delete_existing: :class:`bool`\n Whether to delete existing commands that are not in the list of commands to register. Defaults to True.\n \"\"\"\n if commands is None:\n commands = self.pending_application_commands\n\n commands = [copy.copy(cmd) for cmd in commands]\n\n if guild_id is not None:\n for cmd in commands:\n to_rep_with = [guild_id]\n cmd.guild_ids = to_rep_with\n\n is_global = guild_id is None\n\n registered = []\n\n if is_global:\n pending = list(filter(lambda c: c.guild_ids is None, commands))\n registration_methods = {\n \"bulk\": self._bot.http.bulk_upsert_global_commands,\n \"upsert\": self._bot.http.upsert_global_command,\n \"delete\": self._bot.http.delete_global_command,\n \"edit\": self._bot.http.edit_global_command,\n }\n\n def _register(method: Literal[\"bulk\", \"upsert\", \"delete\", \"edit\"], *args, **kwargs):\n return registration_methods[method](self._bot.user.id, *args, **kwargs)\n\n else:\n pending = list(\n filter(\n lambda c: c.guild_ids is not None and guild_id in c.guild_ids,\n commands,\n )\n )\n registration_methods = {\n \"bulk\": self._bot.http.bulk_upsert_guild_commands,\n \"upsert\": self._bot.http.upsert_guild_command,\n \"delete\": self._bot.http.delete_guild_command,\n \"edit\": self._bot.http.edit_guild_command,\n }\n\n def _register(method: Literal[\"bulk\", \"upsert\", \"delete\", \"edit\"], *args, **kwargs):\n return registration_methods[method](self._bot.user.id, guild_id, *args, **kwargs)\n\n def register(method: Literal[\"bulk\", \"upsert\", \"delete\", \"edit\"], *args, **kwargs):\n if kwargs.pop(\"_log\", True):\n if method == \"bulk\":\n _log.debug(f\"Bulk updating commands {[c['name'] for c in args[0]]} for guild {guild_id}\")\n elif method == \"upsert\":\n _log.debug(f\"Creating command {cmd['name']} for guild {guild_id}\")\n elif method == \"edit\":\n _log.debug(f\"Editing command {cmd['name']} for guild {guild_id}\")\n elif method == \"delete\":\n _log.debug(f\"Deleting command {cmd['name']} for guild {guild_id}\")\n return _register(method, *args, **kwargs)\n\n pending_actions = []\n\n if not force:\n if guild_id is None:\n prefetched_commands = await self.http.get_global_commands(self.user.id)\n else:\n prefetched_commands = await self.http.get_guild_commands(self.user.id, guild_id)\n desynced = await self.get_desynced_commands(guild_id=guild_id, prefetched=prefetched_commands)\n\n for cmd in desynced:\n if cmd[\"action\"] == \"delete\":\n pending_actions.append(\n {\n \"action\": \"delete\" if delete_existing else None,\n \"command\": collections.namedtuple(\"Command\", [\"name\"])(name=cmd[\"command\"]),\n \"id\": cmd[\"id\"],\n }\n )\n continue\n # We can assume the command item is a command, since it's only a string if action is delete\n match = get(pending, name=cmd[\"command\"].name, type=cmd[\"command\"].type)\n if match is None:\n continue\n if cmd[\"action\"] == \"edit\":\n pending_actions.append(\n {\n \"action\": \"edit\",\n \"command\": match,\n \"id\": cmd[\"id\"],\n }\n )\n elif cmd[\"action\"] == \"upsert\":\n pending_actions.append(\n {\n \"action\": \"upsert\",\n \"command\": match,\n }\n )\n elif cmd[\"action\"] is None:\n pending_actions.append(\n {\n \"action\": None,\n \"command\": match,\n }\n )\n else:\n raise ValueError(f\"Unknown action: {cmd['action']}\")\n filtered_no_action = list(filter(lambda c: c[\"action\"] is not None, pending_actions))\n filtered_deleted = list(filter(lambda a: a[\"action\"] != \"delete\", pending_actions))\n if method == \"bulk\" or (method == \"auto\" and len(filtered_deleted) == len(pending)):\n # Either the method is bulk or all the commands need to be modified, so we can just do a bulk upsert\n data = [cmd[\"command\"].to_dict() for cmd in filtered_deleted]\n # If there's nothing to update, don't bother\n if len(filtered_no_action) == 0:\n _log.debug(\"Skipping bulk command update: Commands are up to date\")\n registered = prefetched_commands\n else:\n _log.debug(\n f\"Bulk updating commands %s for guild %s\",\n {c[\"command\"].name: c[\"action\"] for c in pending_actions},\n guild_id,\n )\n registered = await register(\"bulk\", data, _log=False)\n else:\n if not filtered_no_action:\n registered = []\n for cmd in filtered_no_action:\n if cmd[\"action\"] == \"delete\":\n await register(\"delete\", cmd[\"command\"])\n continue\n if cmd[\"action\"] == \"edit\":\n registered.append(await register(\"edit\", cmd[\"id\"], cmd[\"command\"].to_dict()))\n elif cmd[\"action\"] == \"upsert\":\n registered.append(await register(\"upsert\", cmd[\"command\"].to_dict()))\n else:\n raise ValueError(f\"Unknown action: {cmd['action']}\")\n\n # TODO: Our lists dont work sometimes, see if that can be fixed so we can avoid this second API call\n if method != \"bulk\":\n if guild_id is None:\n registered = await self._bot.http.get_global_commands(self._bot.user.id)\n else:\n registered = await self._bot.http.get_guild_commands(self._bot.user.id, guild_id)\n else:\n data = [cmd.to_dict() for cmd in pending]\n registered = await register(\"bulk\", data)\n\n for i in registered:\n cmd = get(\n self.pending_application_commands,\n name=i[\"name\"],\n type=i[\"type\"],\n )\n if not cmd:\n raise ValueError(f\"Registered command {i['name']}, type {i['type']} not found in pending commands\")\n cmd.id = i[\"id\"]\n self._application_commands[cmd.id] = cmd\n\n return registered\n\n async def sync_commands(\n self,\n commands: Optional[List[ApplicationCommand]] = None,\n method: Literal[\"individual\", \"bulk\", \"auto\"] = \"bulk\",\n force: bool = False,\n guild_ids: Optional[List[int]] = None,\n register_guild_commands: bool = True,\n check_guilds: Optional[List[int]] = [],\n delete_existing: bool = True,\n ) -> None:\n \"\"\"|coro|\n\n Registers all commands that have been added through :meth:`.add_application_command`. This method cleans up all\n commands over the API and should sync them with the internal cache of commands. It attempts to register the\n commands in the most efficient way possible, unless ``force`` is set to ``True``, in which case it will always\n register all commands.\n\n By default, this coroutine is called inside the :func:`.on_connect` event. If you choose to override the\n :func:`.on_connect` event, then you should invoke this coroutine as well.\n\n .. note::\n If you remove all guild commands from a particular guild, the library may not be able to detect and update\n the commands accordingly, as it would have to individually check for each guild. To force the library to\n unregister a guild's commands, call this function with ``commands=[]`` and ``guild_ids=[guild_id]``.\n\n .. versionadded:: 2.0\n\n Parameters\n ----------\n commands: Optional[List[:class:`~.ApplicationCommand`]]\n A list of commands to register. If this is not set (None), then all commands will be registered.\n method: Literal['individual', 'bulk', 'auto']\n The method to use when registering the commands. If this is set to \"individual\", then each command will be\n registered individually. If this is set to \"bulk\", then all commands will be registered in bulk. If this is\n set to \"auto\", then the method will be determined automatically. Defaults to \"bulk\".\n force: :class:`bool`\n Registers the commands regardless of the state of the command on Discord. This uses one less API call, but\n can result in hitting rate limits more often. Defaults to False.\n guild_ids: Optional[List[:class:`int`]]\n A list of guild ids to register the commands for. If this is not set, the commands'\n :attr:`~.ApplicationCommand.guild_ids` attribute will be used.\n register_guild_commands: :class:`bool`\n Whether to register guild commands. Defaults to True.\n check_guilds: Optional[List[:class:`int`]]\n A list of guilds ids to check for commands to unregister, since the bot would otherwise have to check all\n guilds. Unlike ``guild_ids``, this does not alter the commands' :attr:`~.ApplicationCommand.guild_ids`\n attribute, instead it adds the guild ids to a list of guilds to sync commands for. If\n ``register_guild_commands`` is set to False, then this parameter is ignored.\n delete_existing: :class:`bool`\n Whether to delete existing commands that are not in the list of commands to register. Defaults to True.\n \"\"\"\n\n check_guilds = list(set((check_guilds or []) + (self.debug_guilds or [])))\n\n if commands is None:\n commands = self.pending_application_commands\n\n if guild_ids is not None:\n for cmd in commands:\n cmd.guild_ids = guild_ids\n\n global_commands = [cmd for cmd in commands if cmd.guild_ids is None]\n registered_commands = await self.register_commands(\n global_commands, method=method, force=force, delete_existing=delete_existing\n )\n\n registered_guild_commands = {}\n\n if register_guild_commands:\n cmd_guild_ids = []\n for cmd in commands:\n if cmd.guild_ids is not None:\n cmd_guild_ids.extend(cmd.guild_ids)\n if check_guilds is not None:\n cmd_guild_ids.extend(check_guilds)\n for guild_id in set(cmd_guild_ids):\n guild_commands = [cmd for cmd in commands if cmd.guild_ids is not None and guild_id in cmd.guild_ids]\n registered_guild_commands[guild_id] = await self.register_commands(\n guild_commands, guild_id=guild_id, method=method, force=force, delete_existing=delete_existing\n )\n\n for i in registered_commands:\n cmd = get(\n self.pending_application_commands,\n name=i[\"name\"],\n guild_ids=None,\n type=i[\"type\"],\n )\n if cmd:\n cmd.id = i[\"id\"]\n self._application_commands[cmd.id] = cmd\n\n for guild_id, commands in registered_guild_commands.items():\n for i in commands:\n cmd = find(\n lambda cmd: cmd.name == i[\"name\"]\n and cmd.type == i[\"type\"]\n and cmd.guild_ids is not None\n and int(i[\"guild_id\"]) in cmd.guild_ids,\n self.pending_application_commands,\n )\n if not cmd:\n # command has not been added yet\n continue\n cmd.id = i[\"id\"]\n self._application_commands[cmd.id] = cmd\n\n async def process_application_commands(self, interaction: Interaction, auto_sync: bool = None) -> None:\n \"\"\"|coro|\n\n This function processes the commands that have been registered\n to the bot and other groups. Without this coroutine, none of the\n commands will be triggered.\n\n By default, this coroutine is called inside the :func:`.on_interaction`\n event. If you choose to override the :func:`.on_interaction` event, then\n you should invoke this coroutine as well.\n\n This function finds a registered command matching the interaction id from\n application commands and invokes it. If no matching command was \n found, it replies to the interaction with a default message.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n interaction: :class:`discord.Interaction`\n The interaction to process\n auto_sync: :class:`bool`\n Whether to automatically sync and unregister the command if it is not found in the internal cache. This will\n invoke the :meth:`~.Bot.sync_commands` method on the context of the command, either globally or per-guild,\n based on the type of the command, respectively. Defaults to :attr:`.Bot.auto_sync_commands`.\n \"\"\"\n if auto_sync is None:\n auto_sync = self._bot.auto_sync_commands\n if interaction.type not in (\n InteractionType.application_command,\n InteractionType.auto_complete,\n ):\n return\n\n try:\n command = self._application_commands[interaction.data[\"id\"]]\n except KeyError:\n for cmd in self.application_commands:\n guild_id = interaction.data.get(\"guild_id\")\n if guild_id:\n guild_id = int(guild_id)\n if cmd.name == interaction.data[\"name\"] and (\n guild_id == cmd.guild_ids or (isinstance(cmd.guild_ids, list) and guild_id in cmd.guild_ids)\n ):\n command = cmd\n break\n else:\n if auto_sync:\n guild_id = interaction.data.get(\"guild_id\")\n if guild_id is None:\n await self.sync_commands()\n else:\n\n await self.sync_commands(check_guilds=[guild_id])\n return self._bot.dispatch(\"unknown_application_command\", interaction)\n\n if interaction.type is InteractionType.auto_complete:\n return self.dispatch(\"application_command_auto_complete\", interaction, command)\n\n ctx = await self.get_application_context(interaction)\n ctx.command = command\n await self.invoke_application_command(ctx)\n\n async def on_application_command_auto_complete(self, interaction: Interaction, command: ApplicationCommand) -> None:\n async def callback() -> None:\n ctx = await self.get_autocomplete_context(interaction)\n ctx.command = command\n return await command.invoke_autocomplete_callback(ctx)\n\n autocomplete_task = self.loop.create_task(callback())\n try:\n await self.wait_for(\"application_command_auto_complete\", check=lambda i, c: c == command, timeout=3)\n except asyncio.TimeoutError:\n return\n else:\n if not autocomplete_task.done():\n autocomplete_task.cancel()\n\n def slash_command(self, **kwargs):\n \"\"\"A shortcut decorator that invokes :func:`command` and adds it to\n the internal command list via :meth:`add_application_command`.\n This shortcut is made specifically for :class:`.SlashCommand`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`SlashCommand`]\n A decorator that converts the provided method into a :class:`.SlashCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n return self.application_command(cls=SlashCommand, **kwargs)\n\n def user_command(self, **kwargs):\n \"\"\"A shortcut decorator that invokes :func:`command` and adds it to\n the internal command list via :meth:`add_application_command`.\n This shortcut is made specifically for :class:`.UserCommand`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`UserCommand`]\n A decorator that converts the provided method into a :class:`.UserCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n return self.application_command(cls=UserCommand, **kwargs)\n\n def message_command(self, **kwargs):\n \"\"\"A shortcut decorator that invokes :func:`command` and adds it to\n the internal command list via :meth:`add_application_command`.\n This shortcut is made specifically for :class:`.MessageCommand`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`MessageCommand`]\n A decorator that converts the provided method into a :class:`.MessageCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n return self.application_command(cls=MessageCommand, **kwargs)\n\n def application_command(self, **kwargs):\n \"\"\"A shortcut decorator that invokes :func:`command` and adds it to\n the internal command list via :meth:`~.Bot.add_application_command`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`ApplicationCommand`]\n A decorator that converts the provided method into an :class:`.ApplicationCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n\n def decorator(func) -> ApplicationCommand:\n result = command(**kwargs)(func)\n self.add_application_command(result)\n return result\n\n return decorator\n\n def command(self, **kwargs):\n \"\"\"An alias for :meth:`application_command`.\n\n .. note::\n\n This decorator is overridden by :class:`discord.ext.commands.Bot`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`ApplicationCommand`]\n A decorator that converts the provided method into an :class:`.ApplicationCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n return self.application_command(**kwargs)\n\n def create_group(\n self, name: str, description: Optional[str] = None, guild_ids: Optional[List[int]] = None, **kwargs\n ) -> SlashCommandGroup:\n \"\"\"A shortcut method that creates a slash command group with no subcommands and adds it to the internal\n command list via :meth:`add_application_command`.\n\n .. versionadded:: 2.0\n\n Parameters\n ----------\n name: :class:`str`\n The name of the group to create.\n description: Optional[:class:`str`]\n The description of the group to create.\n guild_ids: Optional[List[:class:`int`]]\n A list of the IDs of each guild this group should be added to, making it a guild command.\n This will be a global command if ``None`` is passed.\n kwargs:\n Any additional keyword arguments to pass to :class:`.SlashCommandGroup`.\n\n Returns\n --------\n SlashCommandGroup\n The slash command group that was created.\n \"\"\"\n description = description or \"No description provided.\"\n group = SlashCommandGroup(name, description, guild_ids, **kwargs)\n self.add_application_command(group)\n return group\n\n def group(\n self,\n name: Optional[str] = None,\n description: Optional[str] = None,\n guild_ids: Optional[List[int]] = None,\n ) -> Callable[[Type[SlashCommandGroup]], SlashCommandGroup]:\n \"\"\"A shortcut decorator that initializes the provided subclass of :class:`.SlashCommandGroup`\n and adds it to the internal command list via :meth:`add_application_command`.\n\n .. versionadded:: 2.0\n\n Parameters\n ----------\n name: Optional[:class:`str`]\n The name of the group to create. This will resolve to the name of the decorated class if ``None`` is passed.\n description: Optional[:class:`str`]\n The description of the group to create.\n guild_ids: Optional[List[:class:`int`]]\n A list of the IDs of each guild this group should be added to, making it a guild command.\n This will be a global command if ``None`` is passed.\n\n Returns\n --------\n Callable[[Type[SlashCommandGroup]], SlashCommandGroup]\n The slash command group that was created.\n \"\"\"\n\n def inner(cls: Type[SlashCommandGroup]) -> SlashCommandGroup:\n group = cls(\n name or cls.__name__,\n (\n description or inspect.cleandoc(cls.__doc__).splitlines()[0]\n if cls.__doc__ is not None\n else \"No description provided\"\n ),\n guild_ids=guild_ids,\n )\n self.add_application_command(group)\n return group\n\n return inner\n\n slash_group = group\n\n def walk_application_commands(self) -> Generator[ApplicationCommand, None, None]:\n \"\"\"An iterator that recursively walks through all application commands and subcommands.\n\n Yields\n ------\n :class:`.ApplicationCommand`\n An application command from the internal list of application commands.\n \"\"\"\n for command in self.application_commands:\n if isinstance(command, SlashCommandGroup):\n yield from command.walk_commands()\n yield command\n\n async def get_application_context(self, interaction: Interaction, cls=None) -> ApplicationContext:\n r\"\"\"|coro|\n\n Returns the invocation context from the interaction.\n\n This is a more low-level counter-part for :meth:`.process_application_commands`\n to allow users more fine-grained control over the processing.\n\n Parameters\n -----------\n interaction: :class:`discord.Interaction`\n The interaction to get the invocation context from.\n cls\n The factory class that will be used to create the context.\n By default, this is :class:`.ApplicationContext`. Should a custom\n class be provided, it must be similar enough to\n :class:`.ApplicationContext`\\'s interface.\n\n Returns\n --------\n :class:`.ApplicationContext`\n The invocation context. The type of this can change via the\n ``cls`` parameter.\n \"\"\"\n if cls is None:\n cls = ApplicationContext\n return cls(self, interaction)\n\n async def get_autocomplete_context(self, interaction: Interaction, cls=None) -> AutocompleteContext:\n r\"\"\"|coro|\n\n Returns the autocomplete context from the interaction.\n\n This is a more low-level counter-part for :meth:`.process_application_commands`\n to allow users more fine-grained control over the processing.\n\n Parameters\n -----------\n interaction: :class:`discord.Interaction`\n The interaction to get the invocation context from.\n cls\n The factory class that will be used to create the context.\n By default, this is :class:`.AutocompleteContext`. Should a custom\n class be provided, it must be similar enough to\n :class:`.AutocompleteContext`\\'s interface.\n\n Returns\n --------\n :class:`.AutocompleteContext`\n The autocomplete context. The type of this can change via the\n ``cls`` parameter.\n \"\"\"\n if cls is None:\n cls = AutocompleteContext\n return cls(self, interaction)\n\n async def invoke_application_command(self, ctx: ApplicationContext) -> None:\n \"\"\"|coro|\n\n Invokes the application command given under the invocation\n context and handles all the internal event dispatch mechanisms.\n\n Parameters\n -----------\n ctx: :class:`.ApplicationCommand`\n The invocation context to invoke.\n \"\"\"\n self._bot.dispatch(\"application_command\", ctx)\n try:\n if await self._bot.can_run(ctx, call_once=True):\n await ctx.command.invoke(ctx)\n else:\n raise CheckFailure(\"The global check once functions failed.\")\n except DiscordException as exc:\n await ctx.command.dispatch_error(ctx, exc)\n else:\n self._bot.dispatch(\"application_command_completion\", ctx)\n\n @property\n @abstractmethod\n def _bot(self) -> Union[\"Bot\", \"AutoShardedBot\"]:\n ...\n\n\nclass BotBase(ApplicationCommandMixin, CogMixin, ABC):\n _supports_prefixed_commands = False\n\n def __init__(self, description=None, *args, **options):\n super().__init__(*args, **options)\n self.extra_events = {} # TYPE: Dict[str, List[CoroFunc]]\n self.__cogs = {} # TYPE: Dict[str, Cog]\n self.__extensions = {} # TYPE: Dict[str, types.ModuleType]\n self._checks = [] # TYPE: List[Check]\n self._check_once = []\n self._before_invoke = None\n self._after_invoke = None\n self.description = inspect.cleandoc(description) if description else \"\"\n self.owner_id = options.get(\"owner_id\")\n self.owner_ids = options.get(\"owner_ids\", set())\n self.auto_sync_commands = options.get(\"auto_sync_commands\", True)\n\n self.debug_guilds = options.pop(\"debug_guilds\", None)\n\n if self.owner_id and self.owner_ids:\n raise TypeError(\"Both owner_id and owner_ids are set.\")\n\n if self.owner_ids and not isinstance(self.owner_ids, collections.abc.Collection):\n raise TypeError(f\"owner_ids must be a collection not {self.owner_ids.__class__!r}\")\n\n self._checks = []\n self._check_once = []\n self._before_invoke = None\n self._after_invoke = None\n\n async def on_connect(self):\n if self.auto_sync_commands:\n await self.sync_commands()\n\n async def on_interaction(self, interaction):\n await self.process_application_commands(interaction)\n\n async def on_application_command_error(self, context: ApplicationContext, exception: DiscordException) -> None:\n \"\"\"|coro|\n\n The default command error handler provided by the bot.\n\n By default, this prints to :data:`sys.stderr` however it could be\n overridden to have a different implementation.\n\n This only fires if you do not specify any listeners for command error.\n \"\"\"\n if self.extra_events.get(\"on_application_command_error\", None):\n return\n\n command = context.command\n if command and command.has_error_handler():\n return\n\n cog = context.cog\n if cog and cog.has_error_handler():\n return\n\n print(f\"Ignoring exception in command {context.command}:\", file=sys.stderr)\n traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)\n\n # global check registration\n # TODO: Remove these from commands.Bot\n\n def check(self, func):\n \"\"\"A decorator that adds a global check to the bot. A global check is similar to a :func:`.check` that is\n applied on a per-command basis except it is run before any command checks have been verified and applies to\n every command the bot has.\n\n .. note::\n\n This function can either be a regular function or a coroutine. Similar to a command :func:`.check`, this\n takes a single parameter of type :class:`.Context` and can only raise exceptions inherited from\n :exc:`.ApplicationCommandError`.\n\n Example\n ---------\n .. code-block:: python3\n\n @bot.check\n def check_commands(ctx):\n return ctx.command.qualified_name in allowed_commands\n\n \"\"\"\n # T was used instead of Check to ensure the type matches on return\n self.add_check(func) # type: ignore\n return func\n\n def add_check(self, func, *, call_once: bool = False) -> None:\n \"\"\"Adds a global check to the bot. This is the non-decorator interface to :meth:`.check` and\n :meth:`.check_once`.\n\n Parameters\n -----------\n func\n The function that was used as a global check.\n call_once: :class:`bool`\n If the function should only be called once per :meth:`.Bot.invoke` call.\n\n \"\"\"\n\n if call_once:\n self._check_once.append(func)\n else:\n self._checks.append(func)\n\n def remove_check(self, func, *, call_once: bool = False) -> None:\n \"\"\"Removes a global check from the bot.\n This function is idempotent and will not raise an exception\n if the function is not in the global checks.\n\n Parameters\n -----------\n func\n The function to remove from the global checks.\n call_once: :class:`bool`\n If the function was added with ``call_once=True`` in\n the :meth:`.Bot.add_check` call or using :meth:`.check_once`.\n\n \"\"\"\n checks = self._check_once if call_once else self._checks\n\n try:\n checks.remove(func)\n except ValueError:\n pass\n\n def check_once(self, func):\n \"\"\"A decorator that adds a \"call once\" global check to the bot. Unlike regular global checks, this one is called\n only once per :meth:`.Bot.invoke` call. Regular global checks are called whenever a command is called or\n :meth:`.Command.can_run` is called. This type of check bypasses that and ensures that it's called only once,\n even inside the default help command.\n\n .. note::\n\n When using this function the :class:`.Context` sent to a group subcommand may only parse the parent command\n and not the subcommands due to it being invoked once per :meth:`.Bot.invoke` call.\n\n .. note::\n\n This function can either be a regular function or a coroutine. Similar to a command :func:`.check`,\n this takes a single parameter of type :class:`.Context` and can only raise exceptions inherited from\n :exc:`.ApplicationCommandError`.\n\n Example\n ---------\n .. code-block:: python3\n\n @bot.check_once\n def whitelist(ctx):\n return ctx.message.author.id in my_whitelist\n\n \"\"\"\n self.add_check(func, call_once=True)\n return func\n\n async def can_run(self, ctx: ApplicationContext, *, call_once: bool = False) -> bool:\n data = self._check_once if call_once else self._checks\n\n if not data:\n return True\n\n # type-checker doesn't distinguish between functions and methods\n return await async_all(f(ctx) for f in data) # type: ignore\n\n # listener registration\n\n def add_listener(self, func: CoroFunc, name: str = MISSING) -> None:\n \"\"\"The non decorator alternative to :meth:`.listen`.\n\n Parameters\n -----------\n func: :ref:`coroutine <coroutine>`\n The function to call.\n name: :class:`str`\n The name of the event to listen for. Defaults to ``func.__name__``.\n\n Example\n --------\n\n .. code-block:: python3\n\n async def on_ready(): pass\n async def my_message(message): pass\n\n bot.add_listener(on_ready)\n bot.add_listener(my_message, 'on_message')\n \"\"\"\n name = func.__name__ if name is MISSING else name\n\n if not asyncio.iscoroutinefunction(func):\n raise TypeError(\"Listeners must be coroutines\")\n\n if name in self.extra_events:\n self.extra_events[name].append(func)\n else:\n self.extra_events[name] = [func]\n\n def remove_listener(self, func: CoroFunc, name: str = MISSING) -> None:\n \"\"\"Removes a listener from the pool of listeners.\n\n Parameters\n -----------\n func\n The function that was used as a listener to remove.\n name: :class:`str`\n The name of the event we want to remove. Defaults to\n ``func.__name__``.\n \"\"\"\n\n name = func.__name__ if name is MISSING else name\n\n if name in self.extra_events:\n try:\n self.extra_events[name].remove(func)\n except ValueError:\n pass\n\n def listen(self, name: str = MISSING) -> Callable[[CFT], CFT]:\n \"\"\"A decorator that registers another function as an external\n event listener. Basically this allows you to listen to multiple\n events from different places e.g. such as :func:`.on_ready`\n\n The functions being listened to must be a :ref:`coroutine <coroutine>`.\n\n Example\n --------\n\n .. code-block:: python3\n\n @bot.listen()\n async def on_message(message):\n print('one')\n\n # in some other file...\n\n @bot.listen('on_message')\n async def my_message(message):\n print('two')\n\n Would print one and two in an unspecified order.\n\n Raises\n -------\n TypeError\n The function being listened to is not a coroutine.\n \"\"\"\n\n def decorator(func: CFT) -> CFT:\n self.add_listener(func, name)\n return func\n\n return decorator\n\n def dispatch(self, event_name: str, *args: Any, **kwargs: Any) -> None:\n # super() will resolve to Client\n super().dispatch(event_name, *args, **kwargs) # type: ignore\n ev = f\"on_{event_name}\"\n for event in self.extra_events.get(ev, []):\n self._schedule_event(event, ev, *args, **kwargs) # type: ignore\n\n def before_invoke(self, coro):\n \"\"\"A decorator that registers a coroutine as a pre-invoke hook.\n A pre-invoke hook is called directly before the command is\n called. This makes it a useful function to set up database\n connections or any type of set up required.\n This pre-invoke hook takes a sole parameter, a :class:`.Context`.\n\n .. note::\n\n The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are\n only called if all checks and argument parsing procedures pass\n without error. If any check or argument parsing procedures fail\n then the hooks are not called.\n\n Parameters\n -----------\n coro: :ref:`coroutine <coroutine>`\n The coroutine to register as the pre-invoke hook.\n\n Raises\n -------\n TypeError\n The coroutine passed is not actually a coroutine.\n \"\"\"\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError(\"The pre-invoke hook must be a coroutine.\")\n\n self._before_invoke = coro\n return coro\n\n def after_invoke(self, coro):\n r\"\"\"A decorator that registers a coroutine as a post-invoke hook.\n A post-invoke hook is called directly after the command is\n called. This makes it a useful function to clean-up database\n connections or any type of clean up required.\n This post-invoke hook takes a sole parameter, a :class:`.Context`.\n\n .. note::\n\n Similar to :meth:`~.Bot.before_invoke`\\, this is not called unless\n checks and argument parsing procedures succeed. This hook is,\n however, **always** called regardless of the internal command\n callback raising an error (i.e. :exc:`.CommandInvokeError`\\).\n This makes it ideal for clean-up scenarios.\n\n Parameters\n -----------\n coro: :ref:`coroutine <coroutine>`\n The coroutine to register as the post-invoke hook.\n\n Raises\n -------\n TypeError\n The coroutine passed is not actually a coroutine.\n\n \"\"\"\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError(\"The post-invoke hook must be a coroutine.\")\n\n self._after_invoke = coro\n return coro\n\n async def is_owner(self, user: User) -> bool:\n \"\"\"|coro|\n\n Checks if a :class:`~discord.User` or :class:`~discord.Member` is the owner of\n this bot.\n\n If an :attr:`owner_id` is not set, it is fetched automatically\n through the use of :meth:`~.Bot.application_info`.\n\n .. versionchanged:: 1.3\n The function also checks if the application is team-owned if\n :attr:`owner_ids` is not set.\n\n Parameters\n -----------\n user: :class:`.abc.User`\n The user to check for.\n\n Returns\n --------\n :class:`bool`\n Whether the user is the owner.\n \"\"\"\n\n if self.owner_id:\n return user.id == self.owner_id\n elif self.owner_ids:\n return user.id in self.owner_ids\n else:\n app = await self.application_info() # type: ignore\n if app.team:\n self.owner_ids = ids = {m.id for m in app.team.members}\n return user.id in ids\n else:\n self.owner_id = owner_id = app.owner.id\n return user.id == owner_id\n\n\nclass Bot(BotBase, Client):\n \"\"\"Represents a discord bot.\n\n This class is a subclass of :class:`discord.Client` and as a result\n anything that you can do with a :class:`discord.Client` you can do with\n this bot.\n\n This class also subclasses ``ApplicationCommandMixin`` to provide the functionality\n to manage commands.\n\n .. versionadded:: 2.0\n\n Attributes\n -----------\n description: :class:`str`\n The content prefixed into the default help message.\n owner_id: Optional[:class:`int`]\n The user ID that owns the bot. If this is not set and is then queried via\n :meth:`.is_owner` then it is fetched automatically using\n :meth:`~.Bot.application_info`.\n owner_ids: Optional[Collection[:class:`int`]]\n The user IDs that owns the bot. This is similar to :attr:`owner_id`.\n If this is not set and the application is team based, then it is\n fetched automatically using :meth:`~.Bot.application_info`.\n For performance reasons it is recommended to use a :class:`set`\n for the collection. You cannot set both ``owner_id`` and ``owner_ids``.\n\n .. versionadded:: 1.3\n debug_guilds: Optional[List[:class:`int`]]\n Guild IDs of guilds to use for testing commands.\n The bot will not create any global commands if debug guild IDs are passed.\n\n .. versionadded:: 2.0\n auto_sync_commands: :class:`bool`\n Whether to automatically sync slash commands. This will call sync_commands in on_connect, and in\n :attr:`.process_application_commands` if the command is not found. Defaults to ``True``.\n\n .. versionadded:: 2.0\n \"\"\"\n\n @property\n def _bot(self) -> \"Bot\":\n return self\n\n\nclass AutoShardedBot(BotBase, AutoShardedClient):\n \"\"\"This is similar to :class:`.Bot` except that it is inherited from\n :class:`discord.AutoShardedClient` instead.\n\n .. versionadded:: 2.0\n \"\"\"\n\n @property\n def _bot(self) -> \"AutoShardedBot\":\n return self\n", "path": "discord/bot.py"}], "after_files": [{"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport collections\nimport copy\nimport inspect\nimport logging\nimport sys\nimport traceback\nfrom abc import ABC, abstractmethod\nfrom typing import (\n Any,\n Callable,\n Coroutine,\n Dict,\n Generator,\n List,\n Literal,\n Optional,\n Type,\n TypeVar,\n Union,\n)\n\nfrom .client import Client\nfrom .cog import CogMixin\nfrom .commands import (\n ApplicationCommand,\n ApplicationContext,\n AutocompleteContext,\n MessageCommand,\n SlashCommand,\n SlashCommandGroup,\n UserCommand,\n command,\n)\nfrom .enums import InteractionType\nfrom .errors import CheckFailure, DiscordException\nfrom .interactions import Interaction\nfrom .shard import AutoShardedClient\nfrom .types import interactions\nfrom .user import User\nfrom .utils import MISSING, async_all, find, get\n\nCoroFunc = Callable[..., Coroutine[Any, Any, Any]]\nCFT = TypeVar(\"CFT\", bound=CoroFunc)\n\n__all__ = (\n \"ApplicationCommandMixin\",\n \"Bot\",\n \"AutoShardedBot\",\n)\n\n_log = logging.getLogger(__name__)\n\n\nclass ApplicationCommandMixin(ABC):\n \"\"\"A mixin that implements common functionality for classes that need\n application command compatibility.\n\n Attributes\n -----------\n application_commands: :class:`dict`\n A mapping of command id string to :class:`.ApplicationCommand` objects.\n pending_application_commands: :class:`list`\n A list of commands that have been added but not yet registered. This is read-only and is modified via other\n methods.\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._pending_application_commands = []\n self._application_commands = {}\n\n @property\n def all_commands(self):\n return self._application_commands\n\n @property\n def pending_application_commands(self):\n return self._pending_application_commands\n\n @property\n def commands(self) -> List[Union[ApplicationCommand, Any]]:\n commands = self.application_commands\n if self._bot._supports_prefixed_commands and hasattr(self._bot, \"prefixed_commands\"):\n commands += self._bot.prefixed_commands\n return commands\n\n @property\n def application_commands(self) -> List[ApplicationCommand]:\n return list(self._application_commands.values())\n\n def add_application_command(self, command: ApplicationCommand) -> None:\n \"\"\"Adds a :class:`.ApplicationCommand` into the internal list of commands.\n\n This is usually not called, instead the :meth:`command` or\n other shortcut decorators are used instead.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n command: :class:`.ApplicationCommand`\n The command to add.\n \"\"\"\n if isinstance(command, SlashCommand) and command.is_subcommand:\n raise TypeError(\"The provided command is a sub-command of group\")\n\n if self._bot.debug_guilds and command.guild_ids is None:\n command.guild_ids = self._bot.debug_guilds\n\n for cmd in self.pending_application_commands:\n if cmd == command:\n command.id = cmd.id\n self._application_commands[command.id] = command\n break\n self._pending_application_commands.append(command)\n\n def remove_application_command(self, command: ApplicationCommand) -> Optional[ApplicationCommand]:\n \"\"\"Remove a :class:`.ApplicationCommand` from the internal list\n of commands.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n command: :class:`.ApplicationCommand`\n The command to remove.\n\n Returns\n --------\n Optional[:class:`.ApplicationCommand`]\n The command that was removed. If the name is not valid then\n ``None`` is returned instead.\n \"\"\"\n if command.id is None:\n try:\n index = self._pending_application_commands.index(command)\n except ValueError:\n return None\n return self._pending_application_commands.pop(index)\n return self._application_commands.pop(command.id, None)\n\n @property\n def get_command(self):\n \"\"\"Shortcut for :meth:`.get_application_command`.\n\n .. note::\n Overridden in :class:`ext.commands.Bot`.\n\n .. versionadded:: 2.0\n \"\"\"\n # TODO: Do something like we did in self.commands for this\n return self.get_application_command\n\n def get_application_command(\n self,\n name: str,\n guild_ids: Optional[List[int]] = None,\n type: Type[ApplicationCommand] = SlashCommand,\n ) -> Optional[ApplicationCommand]:\n \"\"\"Get a :class:`.ApplicationCommand` from the internal list\n of commands.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n name: :class:`str`\n The name of the command to get.\n guild_ids: List[:class:`int`]\n The guild ids associated to the command to get.\n type: Type[:class:`.ApplicationCommand`]\n The type of the command to get. Defaults to :class:`.SlashCommand`.\n\n Returns\n --------\n Optional[:class:`.ApplicationCommand`]\n The command that was requested. If not found, returns ``None``.\n \"\"\"\n\n for command in self._application_commands.values():\n if command.name == name and isinstance(command, type):\n if guild_ids is not None and command.guild_ids != guild_ids:\n return\n return command\n\n async def get_desynced_commands(\n self,\n guild_id: Optional[int] = None,\n prefetched: Optional[List[ApplicationCommand]] = None\n ) -> List[Dict[str, Any]]:\n \"\"\"|coro|\n\n Gets the list of commands that are desynced from discord. If ``guild_id`` is specified, it will only return\n guild commands that are desynced from said guild, else it will return global commands.\n\n .. note::\n This function is meant to be used internally, and should only be used if you want to override the default\n command registration behavior.\n\n .. versionadded:: 2.0\n\n\n Parameters\n ----------\n guild_id: Optional[:class:`int`]\n The guild id to get the desynced commands for, else global commands if unspecified.\n prefetched: Optional[List[:class:`.ApplicationCommand`]]\n If you already fetched the commands, you can pass them here to be used. Not recommended for typical usage.\n\n Returns\n -------\n List[Dict[:class:`str`, Any]]\n A list of the desynced commands. Each will come with at least the ``cmd`` and ``action`` keys, which\n respectively contain the command and the action to perform. Other keys may also be present depending on\n the action, including ``id``.\n \"\"\"\n\n # We can suggest the user to upsert, edit, delete, or bulk upsert the commands\n\n def _check_command(cmd: ApplicationCommand, match: Dict) -> bool:\n if isinstance(cmd, SlashCommandGroup):\n if len(cmd.subcommands) != len(match.get(\"options\", [])):\n return True\n for i, subcommand in enumerate(cmd.subcommands):\n match_ = next(\n (data for data in match[\"options\"] if data[\"name\"] == subcommand.name),\n MISSING,\n )\n if match_ is not MISSING and _check_command(subcommand, match_):\n return True\n else:\n as_dict = cmd.to_dict()\n to_check = {\n \"dm_permission\": None,\n \"default_member_permissions\": None,\n \"name\": None,\n \"description\": None,\n \"name_localizations\": None,\n \"description_localizations\": None,\n \"options\": [\n \"type\",\n \"name\",\n \"description\",\n \"autocomplete\",\n \"choices\",\n \"name_localizations\",\n \"description_localizations\",\n ],\n }\n for check, value in to_check.items():\n if type(to_check[check]) == list:\n # We need to do some falsy conversion here\n # The API considers False (autocomplete) and [] (choices) to be falsy values\n falsy_vals = (False, [])\n for opt in value:\n cmd_vals = [val.get(opt, MISSING) for val in as_dict[check]] if check in as_dict else []\n for i, val in enumerate(cmd_vals):\n if val in falsy_vals:\n cmd_vals[i] = MISSING\n if match.get(check, MISSING) is not MISSING and cmd_vals != [\n val.get(opt, MISSING) for val in match[check]\n ]:\n # We have a difference\n return True\n elif getattr(cmd, check, None) != match.get(check):\n # We have a difference\n if check == \"default_permission\" and getattr(cmd, check) is True and match.get(check) is None:\n # This is a special case\n # TODO: Remove for perms v2\n continue\n return True\n return False\n\n return_value = []\n cmds = self.pending_application_commands.copy()\n\n if guild_id is None:\n if prefetched is not None:\n registered_commands = prefetched\n else:\n registered_commands = await self._bot.http.get_global_commands(self.user.id)\n pending = [cmd for cmd in cmds if cmd.guild_ids is None]\n else:\n if prefetched is not None:\n registered_commands = prefetched\n else:\n registered_commands = await self._bot.http.get_guild_commands(self.user.id, guild_id)\n pending = [cmd for cmd in cmds if cmd.guild_ids is not None and guild_id in cmd.guild_ids]\n\n registered_commands_dict = {cmd[\"name\"]: cmd for cmd in registered_commands}\n # First let's check if the commands we have locally are the same as the ones on discord\n for cmd in pending:\n match = registered_commands_dict.get(cmd.name)\n if match is None:\n # We don't have this command registered\n return_value.append({\"command\": cmd, \"action\": \"upsert\"})\n elif _check_command(cmd, match):\n return_value.append(\n {\n \"command\": cmd,\n \"action\": \"edit\",\n \"id\": int(registered_commands_dict[cmd.name][\"id\"]),\n }\n )\n else:\n # We have this command registered but it's the same\n return_value.append({\"command\": cmd, \"action\": None, \"id\": int(match[\"id\"])})\n\n # Now let's see if there are any commands on discord that we need to delete\n for cmd, value_ in registered_commands_dict.items():\n match = get(pending, name=registered_commands_dict[cmd][\"name\"])\n if match is None:\n # We have this command registered but not in our list\n return_value.append(\n {\n \"command\": registered_commands_dict[cmd][\"name\"],\n \"id\": int(value_[\"id\"]),\n \"action\": \"delete\",\n }\n )\n\n continue\n\n return return_value\n\n async def register_command(\n self,\n command: ApplicationCommand,\n force: bool = True,\n guild_ids: List[int] = None,\n ) -> None:\n \"\"\"|coro|\n\n Registers a command. If the command has ``guild_ids`` set, or if the ``guild_ids`` parameter is passed,\n the command will be registered as a guild command for those guilds.\n\n Parameters\n ----------\n command: :class:`~.ApplicationCommand`\n The command to register.\n force: :class:`bool`\n Whether to force the command to be registered. If this is set to False, the command will only be registered\n if it seems to already be registered and up to date with our internal cache. Defaults to True.\n guild_ids: :class:`list`\n A list of guild ids to register the command for. If this is not set, the command's\n :attr:`ApplicationCommand.guild_ids` attribute will be used.\n\n Returns\n -------\n :class:`~.ApplicationCommand`\n The command that was registered\n \"\"\"\n # TODO: Write this\n raise RuntimeError(\"This function has not been implemented yet\")\n\n async def register_commands(\n self,\n commands: Optional[List[ApplicationCommand]] = None,\n guild_id: Optional[int] = None,\n method: Literal[\"individual\", \"bulk\", \"auto\"] = \"bulk\",\n force: bool = False,\n delete_existing: bool = True,\n ) -> List[interactions.ApplicationCommand]:\n \"\"\"|coro|\n\n Register a list of commands.\n\n .. versionadded:: 2.0\n\n Parameters\n ----------\n commands: Optional[List[:class:`~.ApplicationCommand`]]\n A list of commands to register. If this is not set (``None``), then all commands will be registered.\n guild_id: Optional[int]\n If this is set, the commands will be registered as a guild command for the respective guild. If it is not\n set, the commands will be registered according to their :attr:`ApplicationCommand.guild_ids` attribute.\n method: Literal['individual', 'bulk', 'auto']\n The method to use when registering the commands. If this is set to \"individual\", then each command will be\n registered individually. If this is set to \"bulk\", then all commands will be registered in bulk. If this is\n set to \"auto\", then the method will be determined automatically. Defaults to \"bulk\".\n force: :class:`bool`\n Registers the commands regardless of the state of the command on Discord. This uses one less API call, but\n can result in hitting rate limits more often. Defaults to False.\n delete_existing: :class:`bool`\n Whether to delete existing commands that are not in the list of commands to register. Defaults to True.\n \"\"\"\n if commands is None:\n commands = self.pending_application_commands\n\n commands = [copy.copy(cmd) for cmd in commands]\n\n if guild_id is not None:\n for cmd in commands:\n to_rep_with = [guild_id]\n cmd.guild_ids = to_rep_with\n\n is_global = guild_id is None\n\n registered = []\n\n if is_global:\n pending = list(filter(lambda c: c.guild_ids is None, commands))\n registration_methods = {\n \"bulk\": self._bot.http.bulk_upsert_global_commands,\n \"upsert\": self._bot.http.upsert_global_command,\n \"delete\": self._bot.http.delete_global_command,\n \"edit\": self._bot.http.edit_global_command,\n }\n\n def _register(method: Literal[\"bulk\", \"upsert\", \"delete\", \"edit\"], *args, **kwargs):\n return registration_methods[method](self._bot.user.id, *args, **kwargs)\n\n else:\n pending = list(\n filter(\n lambda c: c.guild_ids is not None and guild_id in c.guild_ids,\n commands,\n )\n )\n registration_methods = {\n \"bulk\": self._bot.http.bulk_upsert_guild_commands,\n \"upsert\": self._bot.http.upsert_guild_command,\n \"delete\": self._bot.http.delete_guild_command,\n \"edit\": self._bot.http.edit_guild_command,\n }\n\n def _register(method: Literal[\"bulk\", \"upsert\", \"delete\", \"edit\"], *args, **kwargs):\n return registration_methods[method](self._bot.user.id, guild_id, *args, **kwargs)\n\n def register(method: Literal[\"bulk\", \"upsert\", \"delete\", \"edit\"], *args, **kwargs):\n if kwargs.pop(\"_log\", True):\n if method == \"bulk\":\n _log.debug(f\"Bulk updating commands {[c['name'] for c in args[0]]} for guild {guild_id}\")\n elif method == \"upsert\":\n _log.debug(f\"Creating command {cmd['name']} for guild {guild_id}\")\n elif method == \"edit\":\n _log.debug(f\"Editing command {cmd['name']} for guild {guild_id}\")\n elif method == \"delete\":\n _log.debug(f\"Deleting command {cmd['name']} for guild {guild_id}\")\n return _register(method, *args, **kwargs)\n\n pending_actions = []\n\n if not force:\n if guild_id is None:\n prefetched_commands = await self.http.get_global_commands(self.user.id)\n else:\n prefetched_commands = await self.http.get_guild_commands(self.user.id, guild_id)\n desynced = await self.get_desynced_commands(guild_id=guild_id, prefetched=prefetched_commands)\n\n for cmd in desynced:\n if cmd[\"action\"] == \"delete\":\n pending_actions.append(\n {\n \"action\": \"delete\" if delete_existing else None,\n \"command\": collections.namedtuple(\"Command\", [\"name\"])(name=cmd[\"command\"]),\n \"id\": cmd[\"id\"],\n }\n )\n continue\n # We can assume the command item is a command, since it's only a string if action is delete\n match = get(pending, name=cmd[\"command\"].name, type=cmd[\"command\"].type)\n if match is None:\n continue\n if cmd[\"action\"] == \"edit\":\n pending_actions.append(\n {\n \"action\": \"edit\",\n \"command\": match,\n \"id\": cmd[\"id\"],\n }\n )\n elif cmd[\"action\"] == \"upsert\":\n pending_actions.append(\n {\n \"action\": \"upsert\",\n \"command\": match,\n }\n )\n elif cmd[\"action\"] is None:\n pending_actions.append(\n {\n \"action\": None,\n \"command\": match,\n }\n )\n else:\n raise ValueError(f\"Unknown action: {cmd['action']}\")\n filtered_no_action = list(filter(lambda c: c[\"action\"] is not None, pending_actions))\n filtered_deleted = list(filter(lambda a: a[\"action\"] != \"delete\", pending_actions))\n if method == \"bulk\" or (method == \"auto\" and len(filtered_deleted) == len(pending)):\n # Either the method is bulk or all the commands need to be modified, so we can just do a bulk upsert\n data = [cmd[\"command\"].to_dict() for cmd in filtered_deleted]\n # If there's nothing to update, don't bother\n if len(filtered_no_action) == 0:\n _log.debug(\"Skipping bulk command update: Commands are up to date\")\n registered = prefetched_commands\n else:\n _log.debug(\n f\"Bulk updating commands %s for guild %s\",\n {c[\"command\"].name: c[\"action\"] for c in pending_actions},\n guild_id,\n )\n registered = await register(\"bulk\", data, _log=False)\n else:\n if not filtered_no_action:\n registered = []\n for cmd in filtered_no_action:\n if cmd[\"action\"] == \"delete\":\n await register(\"delete\", cmd[\"command\"])\n continue\n if cmd[\"action\"] == \"edit\":\n registered.append(await register(\"edit\", cmd[\"id\"], cmd[\"command\"].to_dict()))\n elif cmd[\"action\"] == \"upsert\":\n registered.append(await register(\"upsert\", cmd[\"command\"].to_dict()))\n else:\n raise ValueError(f\"Unknown action: {cmd['action']}\")\n\n # TODO: Our lists dont work sometimes, see if that can be fixed so we can avoid this second API call\n if method != \"bulk\":\n if guild_id is None:\n registered = await self._bot.http.get_global_commands(self._bot.user.id)\n else:\n registered = await self._bot.http.get_guild_commands(self._bot.user.id, guild_id)\n else:\n data = [cmd.to_dict() for cmd in pending]\n registered = await register(\"bulk\", data)\n\n for i in registered:\n cmd = get(\n self.pending_application_commands,\n name=i[\"name\"],\n type=i[\"type\"],\n )\n if not cmd:\n raise ValueError(f\"Registered command {i['name']}, type {i['type']} not found in pending commands\")\n cmd.id = i[\"id\"]\n self._application_commands[cmd.id] = cmd\n\n return registered\n\n async def sync_commands(\n self,\n commands: Optional[List[ApplicationCommand]] = None,\n method: Literal[\"individual\", \"bulk\", \"auto\"] = \"bulk\",\n force: bool = False,\n guild_ids: Optional[List[int]] = None,\n register_guild_commands: bool = True,\n check_guilds: Optional[List[int]] = [],\n delete_existing: bool = True,\n ) -> None:\n \"\"\"|coro|\n\n Registers all commands that have been added through :meth:`.add_application_command`. This method cleans up all\n commands over the API and should sync them with the internal cache of commands. It attempts to register the\n commands in the most efficient way possible, unless ``force`` is set to ``True``, in which case it will always\n register all commands.\n\n By default, this coroutine is called inside the :func:`.on_connect` event. If you choose to override the\n :func:`.on_connect` event, then you should invoke this coroutine as well.\n\n .. note::\n If you remove all guild commands from a particular guild, the library may not be able to detect and update\n the commands accordingly, as it would have to individually check for each guild. To force the library to\n unregister a guild's commands, call this function with ``commands=[]`` and ``guild_ids=[guild_id]``.\n\n .. versionadded:: 2.0\n\n Parameters\n ----------\n commands: Optional[List[:class:`~.ApplicationCommand`]]\n A list of commands to register. If this is not set (None), then all commands will be registered.\n method: Literal['individual', 'bulk', 'auto']\n The method to use when registering the commands. If this is set to \"individual\", then each command will be\n registered individually. If this is set to \"bulk\", then all commands will be registered in bulk. If this is\n set to \"auto\", then the method will be determined automatically. Defaults to \"bulk\".\n force: :class:`bool`\n Registers the commands regardless of the state of the command on Discord. This uses one less API call, but\n can result in hitting rate limits more often. Defaults to False.\n guild_ids: Optional[List[:class:`int`]]\n A list of guild ids to register the commands for. If this is not set, the commands'\n :attr:`~.ApplicationCommand.guild_ids` attribute will be used.\n register_guild_commands: :class:`bool`\n Whether to register guild commands. Defaults to True.\n check_guilds: Optional[List[:class:`int`]]\n A list of guilds ids to check for commands to unregister, since the bot would otherwise have to check all\n guilds. Unlike ``guild_ids``, this does not alter the commands' :attr:`~.ApplicationCommand.guild_ids`\n attribute, instead it adds the guild ids to a list of guilds to sync commands for. If\n ``register_guild_commands`` is set to False, then this parameter is ignored.\n delete_existing: :class:`bool`\n Whether to delete existing commands that are not in the list of commands to register. Defaults to True.\n \"\"\"\n\n check_guilds = list(set((check_guilds or []) + (self.debug_guilds or [])))\n\n if commands is None:\n commands = self.pending_application_commands\n\n if guild_ids is not None:\n for cmd in commands:\n cmd.guild_ids = guild_ids\n\n global_commands = [cmd for cmd in commands if cmd.guild_ids is None]\n registered_commands = await self.register_commands(\n global_commands, method=method, force=force, delete_existing=delete_existing\n )\n\n registered_guild_commands = {}\n\n if register_guild_commands:\n cmd_guild_ids = []\n for cmd in commands:\n if cmd.guild_ids is not None:\n cmd_guild_ids.extend(cmd.guild_ids)\n if check_guilds is not None:\n cmd_guild_ids.extend(check_guilds)\n for guild_id in set(cmd_guild_ids):\n guild_commands = [cmd for cmd in commands if cmd.guild_ids is not None and guild_id in cmd.guild_ids]\n registered_guild_commands[guild_id] = await self.register_commands(\n guild_commands, guild_id=guild_id, method=method, force=force, delete_existing=delete_existing\n )\n\n for i in registered_commands:\n cmd = get(\n self.pending_application_commands,\n name=i[\"name\"],\n guild_ids=None,\n type=i[\"type\"],\n )\n if cmd:\n cmd.id = i[\"id\"]\n self._application_commands[cmd.id] = cmd\n\n for guild_id, commands in registered_guild_commands.items():\n for i in commands:\n cmd = find(\n lambda cmd: cmd.name == i[\"name\"]\n and cmd.type == i[\"type\"]\n and cmd.guild_ids is not None\n and int(i[\"guild_id\"]) in cmd.guild_ids,\n self.pending_application_commands,\n )\n if not cmd:\n # command has not been added yet\n continue\n cmd.id = i[\"id\"]\n self._application_commands[cmd.id] = cmd\n\n async def process_application_commands(self, interaction: Interaction, auto_sync: bool = None) -> None:\n \"\"\"|coro|\n\n This function processes the commands that have been registered\n to the bot and other groups. Without this coroutine, none of the\n commands will be triggered.\n\n By default, this coroutine is called inside the :func:`.on_interaction`\n event. If you choose to override the :func:`.on_interaction` event, then\n you should invoke this coroutine as well.\n\n This function finds a registered command matching the interaction id from\n application commands and invokes it. If no matching command was \n found, it replies to the interaction with a default message.\n\n .. versionadded:: 2.0\n\n Parameters\n -----------\n interaction: :class:`discord.Interaction`\n The interaction to process\n auto_sync: :class:`bool`\n Whether to automatically sync and unregister the command if it is not found in the internal cache. This will\n invoke the :meth:`~.Bot.sync_commands` method on the context of the command, either globally or per-guild,\n based on the type of the command, respectively. Defaults to :attr:`.Bot.auto_sync_commands`.\n \"\"\"\n if auto_sync is None:\n auto_sync = self._bot.auto_sync_commands\n if interaction.type not in (\n InteractionType.application_command,\n InteractionType.auto_complete,\n ):\n return\n\n try:\n command = self._application_commands[interaction.data[\"id\"]]\n except KeyError:\n for cmd in self.application_commands + self.pending_application_commands:\n guild_id = interaction.data.get(\"guild_id\")\n if guild_id:\n guild_id = int(guild_id)\n if cmd.name == interaction.data[\"name\"] and (\n guild_id == cmd.guild_ids or (isinstance(cmd.guild_ids, list) and guild_id in cmd.guild_ids)\n ):\n command = cmd\n break\n else:\n if auto_sync:\n guild_id = interaction.data.get(\"guild_id\")\n if guild_id is None:\n await self.sync_commands()\n else:\n\n await self.sync_commands(check_guilds=[guild_id])\n return self._bot.dispatch(\"unknown_application_command\", interaction)\n\n if interaction.type is InteractionType.auto_complete:\n return self.dispatch(\"application_command_auto_complete\", interaction, command)\n\n ctx = await self.get_application_context(interaction)\n ctx.command = command\n await self.invoke_application_command(ctx)\n\n async def on_application_command_auto_complete(self, interaction: Interaction, command: ApplicationCommand) -> None:\n async def callback() -> None:\n ctx = await self.get_autocomplete_context(interaction)\n ctx.command = command\n return await command.invoke_autocomplete_callback(ctx)\n\n autocomplete_task = self.loop.create_task(callback())\n try:\n await self.wait_for(\"application_command_auto_complete\", check=lambda i, c: c == command, timeout=3)\n except asyncio.TimeoutError:\n return\n else:\n if not autocomplete_task.done():\n autocomplete_task.cancel()\n\n def slash_command(self, **kwargs):\n \"\"\"A shortcut decorator that invokes :func:`command` and adds it to\n the internal command list via :meth:`add_application_command`.\n This shortcut is made specifically for :class:`.SlashCommand`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`SlashCommand`]\n A decorator that converts the provided method into a :class:`.SlashCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n return self.application_command(cls=SlashCommand, **kwargs)\n\n def user_command(self, **kwargs):\n \"\"\"A shortcut decorator that invokes :func:`command` and adds it to\n the internal command list via :meth:`add_application_command`.\n This shortcut is made specifically for :class:`.UserCommand`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`UserCommand`]\n A decorator that converts the provided method into a :class:`.UserCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n return self.application_command(cls=UserCommand, **kwargs)\n\n def message_command(self, **kwargs):\n \"\"\"A shortcut decorator that invokes :func:`command` and adds it to\n the internal command list via :meth:`add_application_command`.\n This shortcut is made specifically for :class:`.MessageCommand`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`MessageCommand`]\n A decorator that converts the provided method into a :class:`.MessageCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n return self.application_command(cls=MessageCommand, **kwargs)\n\n def application_command(self, **kwargs):\n \"\"\"A shortcut decorator that invokes :func:`command` and adds it to\n the internal command list via :meth:`~.Bot.add_application_command`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`ApplicationCommand`]\n A decorator that converts the provided method into an :class:`.ApplicationCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n\n def decorator(func) -> ApplicationCommand:\n result = command(**kwargs)(func)\n self.add_application_command(result)\n return result\n\n return decorator\n\n def command(self, **kwargs):\n \"\"\"An alias for :meth:`application_command`.\n\n .. note::\n\n This decorator is overridden by :class:`discord.ext.commands.Bot`.\n\n .. versionadded:: 2.0\n\n Returns\n --------\n Callable[..., :class:`ApplicationCommand`]\n A decorator that converts the provided method into an :class:`.ApplicationCommand`, adds it to the bot,\n then returns it.\n \"\"\"\n return self.application_command(**kwargs)\n\n def create_group(\n self, name: str, description: Optional[str] = None, guild_ids: Optional[List[int]] = None, **kwargs\n ) -> SlashCommandGroup:\n \"\"\"A shortcut method that creates a slash command group with no subcommands and adds it to the internal\n command list via :meth:`add_application_command`.\n\n .. versionadded:: 2.0\n\n Parameters\n ----------\n name: :class:`str`\n The name of the group to create.\n description: Optional[:class:`str`]\n The description of the group to create.\n guild_ids: Optional[List[:class:`int`]]\n A list of the IDs of each guild this group should be added to, making it a guild command.\n This will be a global command if ``None`` is passed.\n kwargs:\n Any additional keyword arguments to pass to :class:`.SlashCommandGroup`.\n\n Returns\n --------\n SlashCommandGroup\n The slash command group that was created.\n \"\"\"\n description = description or \"No description provided.\"\n group = SlashCommandGroup(name, description, guild_ids, **kwargs)\n self.add_application_command(group)\n return group\n\n def group(\n self,\n name: Optional[str] = None,\n description: Optional[str] = None,\n guild_ids: Optional[List[int]] = None,\n ) -> Callable[[Type[SlashCommandGroup]], SlashCommandGroup]:\n \"\"\"A shortcut decorator that initializes the provided subclass of :class:`.SlashCommandGroup`\n and adds it to the internal command list via :meth:`add_application_command`.\n\n .. versionadded:: 2.0\n\n Parameters\n ----------\n name: Optional[:class:`str`]\n The name of the group to create. This will resolve to the name of the decorated class if ``None`` is passed.\n description: Optional[:class:`str`]\n The description of the group to create.\n guild_ids: Optional[List[:class:`int`]]\n A list of the IDs of each guild this group should be added to, making it a guild command.\n This will be a global command if ``None`` is passed.\n\n Returns\n --------\n Callable[[Type[SlashCommandGroup]], SlashCommandGroup]\n The slash command group that was created.\n \"\"\"\n\n def inner(cls: Type[SlashCommandGroup]) -> SlashCommandGroup:\n group = cls(\n name or cls.__name__,\n (\n description or inspect.cleandoc(cls.__doc__).splitlines()[0]\n if cls.__doc__ is not None\n else \"No description provided\"\n ),\n guild_ids=guild_ids,\n )\n self.add_application_command(group)\n return group\n\n return inner\n\n slash_group = group\n\n def walk_application_commands(self) -> Generator[ApplicationCommand, None, None]:\n \"\"\"An iterator that recursively walks through all application commands and subcommands.\n\n Yields\n ------\n :class:`.ApplicationCommand`\n An application command from the internal list of application commands.\n \"\"\"\n for command in self.application_commands:\n if isinstance(command, SlashCommandGroup):\n yield from command.walk_commands()\n yield command\n\n async def get_application_context(self, interaction: Interaction, cls=None) -> ApplicationContext:\n r\"\"\"|coro|\n\n Returns the invocation context from the interaction.\n\n This is a more low-level counter-part for :meth:`.process_application_commands`\n to allow users more fine-grained control over the processing.\n\n Parameters\n -----------\n interaction: :class:`discord.Interaction`\n The interaction to get the invocation context from.\n cls\n The factory class that will be used to create the context.\n By default, this is :class:`.ApplicationContext`. Should a custom\n class be provided, it must be similar enough to\n :class:`.ApplicationContext`\\'s interface.\n\n Returns\n --------\n :class:`.ApplicationContext`\n The invocation context. The type of this can change via the\n ``cls`` parameter.\n \"\"\"\n if cls is None:\n cls = ApplicationContext\n return cls(self, interaction)\n\n async def get_autocomplete_context(self, interaction: Interaction, cls=None) -> AutocompleteContext:\n r\"\"\"|coro|\n\n Returns the autocomplete context from the interaction.\n\n This is a more low-level counter-part for :meth:`.process_application_commands`\n to allow users more fine-grained control over the processing.\n\n Parameters\n -----------\n interaction: :class:`discord.Interaction`\n The interaction to get the invocation context from.\n cls\n The factory class that will be used to create the context.\n By default, this is :class:`.AutocompleteContext`. Should a custom\n class be provided, it must be similar enough to\n :class:`.AutocompleteContext`\\'s interface.\n\n Returns\n --------\n :class:`.AutocompleteContext`\n The autocomplete context. The type of this can change via the\n ``cls`` parameter.\n \"\"\"\n if cls is None:\n cls = AutocompleteContext\n return cls(self, interaction)\n\n async def invoke_application_command(self, ctx: ApplicationContext) -> None:\n \"\"\"|coro|\n\n Invokes the application command given under the invocation\n context and handles all the internal event dispatch mechanisms.\n\n Parameters\n -----------\n ctx: :class:`.ApplicationCommand`\n The invocation context to invoke.\n \"\"\"\n self._bot.dispatch(\"application_command\", ctx)\n try:\n if await self._bot.can_run(ctx, call_once=True):\n await ctx.command.invoke(ctx)\n else:\n raise CheckFailure(\"The global check once functions failed.\")\n except DiscordException as exc:\n await ctx.command.dispatch_error(ctx, exc)\n else:\n self._bot.dispatch(\"application_command_completion\", ctx)\n\n @property\n @abstractmethod\n def _bot(self) -> Union[\"Bot\", \"AutoShardedBot\"]:\n ...\n\n\nclass BotBase(ApplicationCommandMixin, CogMixin, ABC):\n _supports_prefixed_commands = False\n\n def __init__(self, description=None, *args, **options):\n super().__init__(*args, **options)\n self.extra_events = {} # TYPE: Dict[str, List[CoroFunc]]\n self.__cogs = {} # TYPE: Dict[str, Cog]\n self.__extensions = {} # TYPE: Dict[str, types.ModuleType]\n self._checks = [] # TYPE: List[Check]\n self._check_once = []\n self._before_invoke = None\n self._after_invoke = None\n self.description = inspect.cleandoc(description) if description else \"\"\n self.owner_id = options.get(\"owner_id\")\n self.owner_ids = options.get(\"owner_ids\", set())\n self.auto_sync_commands = options.get(\"auto_sync_commands\", True)\n\n self.debug_guilds = options.pop(\"debug_guilds\", None)\n\n if self.owner_id and self.owner_ids:\n raise TypeError(\"Both owner_id and owner_ids are set.\")\n\n if self.owner_ids and not isinstance(self.owner_ids, collections.abc.Collection):\n raise TypeError(f\"owner_ids must be a collection not {self.owner_ids.__class__!r}\")\n\n self._checks = []\n self._check_once = []\n self._before_invoke = None\n self._after_invoke = None\n\n async def on_connect(self):\n if self.auto_sync_commands:\n await self.sync_commands()\n\n async def on_interaction(self, interaction):\n await self.process_application_commands(interaction)\n\n async def on_application_command_error(self, context: ApplicationContext, exception: DiscordException) -> None:\n \"\"\"|coro|\n\n The default command error handler provided by the bot.\n\n By default, this prints to :data:`sys.stderr` however it could be\n overridden to have a different implementation.\n\n This only fires if you do not specify any listeners for command error.\n \"\"\"\n if self.extra_events.get(\"on_application_command_error\", None):\n return\n\n command = context.command\n if command and command.has_error_handler():\n return\n\n cog = context.cog\n if cog and cog.has_error_handler():\n return\n\n print(f\"Ignoring exception in command {context.command}:\", file=sys.stderr)\n traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)\n\n # global check registration\n # TODO: Remove these from commands.Bot\n\n def check(self, func):\n \"\"\"A decorator that adds a global check to the bot. A global check is similar to a :func:`.check` that is\n applied on a per-command basis except it is run before any command checks have been verified and applies to\n every command the bot has.\n\n .. note::\n\n This function can either be a regular function or a coroutine. Similar to a command :func:`.check`, this\n takes a single parameter of type :class:`.Context` and can only raise exceptions inherited from\n :exc:`.ApplicationCommandError`.\n\n Example\n ---------\n .. code-block:: python3\n\n @bot.check\n def check_commands(ctx):\n return ctx.command.qualified_name in allowed_commands\n\n \"\"\"\n # T was used instead of Check to ensure the type matches on return\n self.add_check(func) # type: ignore\n return func\n\n def add_check(self, func, *, call_once: bool = False) -> None:\n \"\"\"Adds a global check to the bot. This is the non-decorator interface to :meth:`.check` and\n :meth:`.check_once`.\n\n Parameters\n -----------\n func\n The function that was used as a global check.\n call_once: :class:`bool`\n If the function should only be called once per :meth:`.Bot.invoke` call.\n\n \"\"\"\n\n if call_once:\n self._check_once.append(func)\n else:\n self._checks.append(func)\n\n def remove_check(self, func, *, call_once: bool = False) -> None:\n \"\"\"Removes a global check from the bot.\n This function is idempotent and will not raise an exception\n if the function is not in the global checks.\n\n Parameters\n -----------\n func\n The function to remove from the global checks.\n call_once: :class:`bool`\n If the function was added with ``call_once=True`` in\n the :meth:`.Bot.add_check` call or using :meth:`.check_once`.\n\n \"\"\"\n checks = self._check_once if call_once else self._checks\n\n try:\n checks.remove(func)\n except ValueError:\n pass\n\n def check_once(self, func):\n \"\"\"A decorator that adds a \"call once\" global check to the bot. Unlike regular global checks, this one is called\n only once per :meth:`.Bot.invoke` call. Regular global checks are called whenever a command is called or\n :meth:`.Command.can_run` is called. This type of check bypasses that and ensures that it's called only once,\n even inside the default help command.\n\n .. note::\n\n When using this function the :class:`.Context` sent to a group subcommand may only parse the parent command\n and not the subcommands due to it being invoked once per :meth:`.Bot.invoke` call.\n\n .. note::\n\n This function can either be a regular function or a coroutine. Similar to a command :func:`.check`,\n this takes a single parameter of type :class:`.Context` and can only raise exceptions inherited from\n :exc:`.ApplicationCommandError`.\n\n Example\n ---------\n .. code-block:: python3\n\n @bot.check_once\n def whitelist(ctx):\n return ctx.message.author.id in my_whitelist\n\n \"\"\"\n self.add_check(func, call_once=True)\n return func\n\n async def can_run(self, ctx: ApplicationContext, *, call_once: bool = False) -> bool:\n data = self._check_once if call_once else self._checks\n\n if not data:\n return True\n\n # type-checker doesn't distinguish between functions and methods\n return await async_all(f(ctx) for f in data) # type: ignore\n\n # listener registration\n\n def add_listener(self, func: CoroFunc, name: str = MISSING) -> None:\n \"\"\"The non decorator alternative to :meth:`.listen`.\n\n Parameters\n -----------\n func: :ref:`coroutine <coroutine>`\n The function to call.\n name: :class:`str`\n The name of the event to listen for. Defaults to ``func.__name__``.\n\n Example\n --------\n\n .. code-block:: python3\n\n async def on_ready(): pass\n async def my_message(message): pass\n\n bot.add_listener(on_ready)\n bot.add_listener(my_message, 'on_message')\n \"\"\"\n name = func.__name__ if name is MISSING else name\n\n if not asyncio.iscoroutinefunction(func):\n raise TypeError(\"Listeners must be coroutines\")\n\n if name in self.extra_events:\n self.extra_events[name].append(func)\n else:\n self.extra_events[name] = [func]\n\n def remove_listener(self, func: CoroFunc, name: str = MISSING) -> None:\n \"\"\"Removes a listener from the pool of listeners.\n\n Parameters\n -----------\n func\n The function that was used as a listener to remove.\n name: :class:`str`\n The name of the event we want to remove. Defaults to\n ``func.__name__``.\n \"\"\"\n\n name = func.__name__ if name is MISSING else name\n\n if name in self.extra_events:\n try:\n self.extra_events[name].remove(func)\n except ValueError:\n pass\n\n def listen(self, name: str = MISSING) -> Callable[[CFT], CFT]:\n \"\"\"A decorator that registers another function as an external\n event listener. Basically this allows you to listen to multiple\n events from different places e.g. such as :func:`.on_ready`\n\n The functions being listened to must be a :ref:`coroutine <coroutine>`.\n\n Example\n --------\n\n .. code-block:: python3\n\n @bot.listen()\n async def on_message(message):\n print('one')\n\n # in some other file...\n\n @bot.listen('on_message')\n async def my_message(message):\n print('two')\n\n Would print one and two in an unspecified order.\n\n Raises\n -------\n TypeError\n The function being listened to is not a coroutine.\n \"\"\"\n\n def decorator(func: CFT) -> CFT:\n self.add_listener(func, name)\n return func\n\n return decorator\n\n def dispatch(self, event_name: str, *args: Any, **kwargs: Any) -> None:\n # super() will resolve to Client\n super().dispatch(event_name, *args, **kwargs) # type: ignore\n ev = f\"on_{event_name}\"\n for event in self.extra_events.get(ev, []):\n self._schedule_event(event, ev, *args, **kwargs) # type: ignore\n\n def before_invoke(self, coro):\n \"\"\"A decorator that registers a coroutine as a pre-invoke hook.\n A pre-invoke hook is called directly before the command is\n called. This makes it a useful function to set up database\n connections or any type of set up required.\n This pre-invoke hook takes a sole parameter, a :class:`.Context`.\n\n .. note::\n\n The :meth:`~.Bot.before_invoke` and :meth:`~.Bot.after_invoke` hooks are\n only called if all checks and argument parsing procedures pass\n without error. If any check or argument parsing procedures fail\n then the hooks are not called.\n\n Parameters\n -----------\n coro: :ref:`coroutine <coroutine>`\n The coroutine to register as the pre-invoke hook.\n\n Raises\n -------\n TypeError\n The coroutine passed is not actually a coroutine.\n \"\"\"\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError(\"The pre-invoke hook must be a coroutine.\")\n\n self._before_invoke = coro\n return coro\n\n def after_invoke(self, coro):\n r\"\"\"A decorator that registers a coroutine as a post-invoke hook.\n A post-invoke hook is called directly after the command is\n called. This makes it a useful function to clean-up database\n connections or any type of clean up required.\n This post-invoke hook takes a sole parameter, a :class:`.Context`.\n\n .. note::\n\n Similar to :meth:`~.Bot.before_invoke`\\, this is not called unless\n checks and argument parsing procedures succeed. This hook is,\n however, **always** called regardless of the internal command\n callback raising an error (i.e. :exc:`.CommandInvokeError`\\).\n This makes it ideal for clean-up scenarios.\n\n Parameters\n -----------\n coro: :ref:`coroutine <coroutine>`\n The coroutine to register as the post-invoke hook.\n\n Raises\n -------\n TypeError\n The coroutine passed is not actually a coroutine.\n\n \"\"\"\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError(\"The post-invoke hook must be a coroutine.\")\n\n self._after_invoke = coro\n return coro\n\n async def is_owner(self, user: User) -> bool:\n \"\"\"|coro|\n\n Checks if a :class:`~discord.User` or :class:`~discord.Member` is the owner of\n this bot.\n\n If an :attr:`owner_id` is not set, it is fetched automatically\n through the use of :meth:`~.Bot.application_info`.\n\n .. versionchanged:: 1.3\n The function also checks if the application is team-owned if\n :attr:`owner_ids` is not set.\n\n Parameters\n -----------\n user: :class:`.abc.User`\n The user to check for.\n\n Returns\n --------\n :class:`bool`\n Whether the user is the owner.\n \"\"\"\n\n if self.owner_id:\n return user.id == self.owner_id\n elif self.owner_ids:\n return user.id in self.owner_ids\n else:\n app = await self.application_info() # type: ignore\n if app.team:\n self.owner_ids = ids = {m.id for m in app.team.members}\n return user.id in ids\n else:\n self.owner_id = owner_id = app.owner.id\n return user.id == owner_id\n\n\nclass Bot(BotBase, Client):\n \"\"\"Represents a discord bot.\n\n This class is a subclass of :class:`discord.Client` and as a result\n anything that you can do with a :class:`discord.Client` you can do with\n this bot.\n\n This class also subclasses ``ApplicationCommandMixin`` to provide the functionality\n to manage commands.\n\n .. versionadded:: 2.0\n\n Attributes\n -----------\n description: :class:`str`\n The content prefixed into the default help message.\n owner_id: Optional[:class:`int`]\n The user ID that owns the bot. If this is not set and is then queried via\n :meth:`.is_owner` then it is fetched automatically using\n :meth:`~.Bot.application_info`.\n owner_ids: Optional[Collection[:class:`int`]]\n The user IDs that owns the bot. This is similar to :attr:`owner_id`.\n If this is not set and the application is team based, then it is\n fetched automatically using :meth:`~.Bot.application_info`.\n For performance reasons it is recommended to use a :class:`set`\n for the collection. You cannot set both ``owner_id`` and ``owner_ids``.\n\n .. versionadded:: 1.3\n debug_guilds: Optional[List[:class:`int`]]\n Guild IDs of guilds to use for testing commands.\n The bot will not create any global commands if debug guild IDs are passed.\n\n .. versionadded:: 2.0\n auto_sync_commands: :class:`bool`\n Whether to automatically sync slash commands. This will call sync_commands in on_connect, and in\n :attr:`.process_application_commands` if the command is not found. Defaults to ``True``.\n\n .. versionadded:: 2.0\n \"\"\"\n\n @property\n def _bot(self) -> \"Bot\":\n return self\n\n\nclass AutoShardedBot(BotBase, AutoShardedClient):\n \"\"\"This is similar to :class:`.Bot` except that it is inherited from\n :class:`discord.AutoShardedClient` instead.\n\n .. versionadded:: 2.0\n \"\"\"\n\n @property\n def _bot(self) -> \"AutoShardedBot\":\n return self\n", "path": "discord/bot.py"}]} |
gh_patches_debug_38720 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1488 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update Jaeger exporter status code to error tag
From the [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/sdk_exporters/jaeger.md#status) Jaeger exporter tags for status code must be the string representation of the status code enum.
Related:
https://github.com/open-telemetry/opentelemetry-python/issues/1402
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Optional, Sequence
16
17 from google.protobuf.duration_pb2 import Duration
18 from google.protobuf.timestamp_pb2 import Timestamp
19
20 from opentelemetry.exporter.jaeger.gen import model_pb2
21 from opentelemetry.exporter.jaeger.translate import (
22 NAME_KEY,
23 OTLP_JAEGER_SPAN_KIND,
24 VERSION_KEY,
25 Translator,
26 )
27 from opentelemetry.sdk.trace import Span
28 from opentelemetry.util import types
29
30 # pylint: disable=no-member,too-many-locals,no-self-use
31
32
33 def _trace_id_to_bytes(trace_id: int) -> bytes:
34 """Returns bytes representation of trace id."""
35 return trace_id.to_bytes(16, "big")
36
37
38 def _span_id_to_bytes(span_id: int) -> bytes:
39 """Returns bytes representation of span id"""
40 return span_id.to_bytes(8, "big")
41
42
43 def _get_string_key_value(key, value: str) -> model_pb2.KeyValue:
44 """Returns jaeger string KeyValue."""
45 return model_pb2.KeyValue(
46 key=key, v_str=value, v_type=model_pb2.ValueType.STRING
47 )
48
49
50 def _get_bool_key_value(key: str, value: bool) -> model_pb2.KeyValue:
51 """Returns jaeger boolean KeyValue."""
52 return model_pb2.KeyValue(
53 key=key, v_bool=value, v_type=model_pb2.ValueType.BOOL
54 )
55
56
57 def _get_long_key_value(key: str, value: int) -> model_pb2.KeyValue:
58 """Returns jaeger long KeyValue."""
59 return model_pb2.KeyValue(
60 key=key, v_int64=value, v_type=model_pb2.ValueType.INT64
61 )
62
63
64 def _get_double_key_value(key: str, value: float) -> model_pb2.KeyValue:
65 """Returns jaeger double KeyValue."""
66 return model_pb2.KeyValue(
67 key=key, v_float64=value, v_type=model_pb2.ValueType.FLOAT64
68 )
69
70
71 def _get_binary_key_value(key: str, value: bytes) -> model_pb2.KeyValue:
72 """Returns jaeger double KeyValue."""
73 return model_pb2.KeyValue(
74 key=key, v_binary=value, v_type=model_pb2.ValueType.BINARY
75 )
76
77
78 def _translate_attribute(
79 key: str, value: types.AttributeValue
80 ) -> Optional[model_pb2.KeyValue]:
81 """Convert the attributes to jaeger keyvalues."""
82 translated = None
83 if isinstance(value, bool):
84 translated = _get_bool_key_value(key, value)
85 elif isinstance(value, str):
86 translated = _get_string_key_value(key, value)
87 elif isinstance(value, int):
88 translated = _get_long_key_value(key, value)
89 elif isinstance(value, float):
90 translated = _get_double_key_value(key, value)
91 elif isinstance(value, tuple):
92 translated = _get_string_key_value(key, str(value))
93 return translated
94
95
96 def _extract_resource_tags(span: Span) -> Sequence[model_pb2.KeyValue]:
97 """Extracts resource attributes from span and returns
98 list of jaeger keyvalues.
99
100 Args:
101 span: span to extract keyvalues
102 """
103 tags = []
104 for key, value in span.resource.attributes.items():
105 tag = _translate_attribute(key, value)
106 if tag:
107 tags.append(tag)
108 return tags
109
110
111 def _duration_from_two_time_stamps(
112 start: Timestamp, end: Timestamp
113 ) -> Duration:
114 """Compute Duration from two Timestamps.
115
116 See https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration
117 """
118 duration = Duration(
119 seconds=end.seconds - start.seconds, nanos=end.nanos - start.nanos,
120 )
121 # pylint: disable=chained-comparison
122 if duration.seconds < 0 and duration.nanos > 0:
123 duration.seconds += 1
124 duration.nanos -= 1000000000
125 elif duration.seconds > 0 and duration.nanos < 0:
126 duration.seconds -= 1
127 duration.nanos += 1000000000
128 return duration
129
130
131 def _proto_timestamp_from_epoch_nanos(nsec: int) -> Timestamp:
132 """Create a Timestamp from the number of nanoseconds elapsed from the epoch.
133
134 See https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#timestamp
135 """
136 nsec_time = nsec / 1e9
137 seconds = int(nsec_time)
138 nanos = int((nsec_time - seconds) * 1e9)
139 return Timestamp(seconds=seconds, nanos=nanos)
140
141
142 class ProtobufTranslator(Translator):
143 def __init__(self, svc_name):
144 self.svc_name = svc_name
145
146 def _translate_span(self, span: Span) -> model_pb2.Span:
147
148 ctx = span.get_span_context()
149 # pb2 span expects in byte format
150 trace_id = _trace_id_to_bytes(ctx.trace_id)
151 span_id = _span_id_to_bytes(ctx.span_id)
152
153 start_time = _proto_timestamp_from_epoch_nanos(span.start_time)
154 end_time = _proto_timestamp_from_epoch_nanos(span.end_time)
155 duration = _duration_from_two_time_stamps(start_time, end_time)
156
157 tags = self._extract_tags(span)
158 refs = self._extract_refs(span)
159 logs = self._extract_logs(span)
160
161 flags = int(ctx.trace_flags)
162
163 process = model_pb2.Process(
164 service_name=self.svc_name, tags=_extract_resource_tags(span)
165 )
166 jaeger_span = model_pb2.Span(
167 trace_id=trace_id,
168 span_id=span_id,
169 operation_name=span.name,
170 references=refs,
171 flags=flags,
172 start_time=start_time,
173 duration=duration,
174 tags=tags,
175 logs=logs,
176 process=process,
177 )
178 return jaeger_span
179
180 def _extract_tags(self, span: Span) -> Sequence[model_pb2.KeyValue]:
181 translated = []
182 if span.attributes:
183 for key, value in span.attributes.items():
184 key_value = _translate_attribute(key, value)
185 if key_value is not None:
186 translated.append(key_value)
187 if span.resource.attributes:
188 for key, value in span.resource.attributes.items():
189 key_value = _translate_attribute(key, value)
190 if key_value:
191 translated.append(key_value)
192
193 code = _get_long_key_value(
194 "status.code", span.status.status_code.value
195 )
196 message = _get_string_key_value(
197 "status.message", span.status.description
198 )
199 kind = _get_string_key_value(
200 "span.kind", OTLP_JAEGER_SPAN_KIND[span.kind]
201 )
202 translated.extend([code, message, kind])
203
204 # Instrumentation info KeyValues
205 if span.instrumentation_info:
206 name = _get_string_key_value(
207 NAME_KEY, span.instrumentation_info.name
208 )
209 version = _get_string_key_value(
210 VERSION_KEY, span.instrumentation_info.version
211 )
212 translated.extend([name, version])
213
214 # Make sure to add "error" tag if span status is not OK
215 if not span.status.is_ok:
216 translated.append(_get_bool_key_value("error", True))
217
218 return translated
219
220 def _extract_refs(
221 self, span: Span
222 ) -> Optional[Sequence[model_pb2.SpanRef]]:
223 if not span.links:
224 return None
225
226 refs = []
227 for link in span.links:
228 trace_id = link.context.trace_id
229 span_id = link.context.span_id
230 refs.append(
231 model_pb2.SpanRef(
232 ref_type=model_pb2.SpanRefType.FOLLOWS_FROM,
233 trace_id=_trace_id_to_bytes(trace_id),
234 span_id=_span_id_to_bytes(span_id),
235 )
236 )
237 return refs
238
239 def _extract_logs(self, span: Span) -> Optional[Sequence[model_pb2.Log]]:
240 if not span.events:
241 return None
242
243 logs = []
244 for event in span.events:
245 fields = []
246 for key, value in event.attributes.items():
247 tag = _translate_attribute(key, value)
248 if tag:
249 fields.append(tag)
250
251 fields.append(
252 _get_string_key_value(key="message", value=event.name,)
253 )
254 event_ts = _proto_timestamp_from_epoch_nanos(event.timestamp)
255 logs.append(model_pb2.Log(timestamp=event_ts, fields=fields))
256
257 return logs
258
```
Path: `exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # pylint: disable=no-self-use
15 from typing import Optional, Sequence
16
17 from opentelemetry.exporter.jaeger.gen.jaeger import Collector as TCollector
18 from opentelemetry.exporter.jaeger.translate import (
19 NAME_KEY,
20 OTLP_JAEGER_SPAN_KIND,
21 VERSION_KEY,
22 Translator,
23 _convert_int_to_i64,
24 _nsec_to_usec_round,
25 )
26 from opentelemetry.sdk.trace import Span
27 from opentelemetry.util import types
28
29
30 def _get_string_tag(key, value: str) -> TCollector.Tag:
31 """Returns jaeger string tag."""
32 return TCollector.Tag(key=key, vStr=value, vType=TCollector.TagType.STRING)
33
34
35 def _get_bool_tag(key: str, value: bool) -> TCollector.Tag:
36 """Returns jaeger boolean tag."""
37 return TCollector.Tag(key=key, vBool=value, vType=TCollector.TagType.BOOL)
38
39
40 def _get_long_tag(key: str, value: int) -> TCollector.Tag:
41 """Returns jaeger long tag."""
42 return TCollector.Tag(key=key, vLong=value, vType=TCollector.TagType.LONG)
43
44
45 def _get_double_tag(key: str, value: float) -> TCollector.Tag:
46 """Returns jaeger double tag."""
47 return TCollector.Tag(
48 key=key, vDouble=value, vType=TCollector.TagType.DOUBLE
49 )
50
51
52 def _get_trace_id_low(trace_id):
53 return _convert_int_to_i64(trace_id & 0xFFFFFFFFFFFFFFFF)
54
55
56 def _get_trace_id_high(trace_id):
57 return _convert_int_to_i64((trace_id >> 64) & 0xFFFFFFFFFFFFFFFF)
58
59
60 def _translate_attribute(
61 key: str, value: types.AttributeValue
62 ) -> Optional[TCollector.Tag]:
63 """Convert the attributes to jaeger tags."""
64 if isinstance(value, bool):
65 return _get_bool_tag(key, value)
66 if isinstance(value, str):
67 return _get_string_tag(key, value)
68 if isinstance(value, int):
69 return _get_long_tag(key, value)
70 if isinstance(value, float):
71 return _get_double_tag(key, value)
72 if isinstance(value, tuple):
73 return _get_string_tag(key, str(value))
74 return None
75
76
77 class ThriftTranslator(Translator):
78 def _translate_span(self, span: Span) -> TCollector.Span:
79 ctx = span.get_span_context()
80 trace_id = ctx.trace_id
81 span_id = ctx.span_id
82
83 start_time_us = _nsec_to_usec_round(span.start_time)
84 duration_us = _nsec_to_usec_round(span.end_time - span.start_time)
85
86 parent_id = span.parent.span_id if span.parent else 0
87
88 tags = self._extract_tags(span)
89 refs = self._extract_refs(span)
90 logs = self._extract_logs(span)
91
92 flags = int(ctx.trace_flags)
93
94 jaeger_span = TCollector.Span(
95 traceIdHigh=_get_trace_id_high(trace_id),
96 traceIdLow=_get_trace_id_low(trace_id),
97 spanId=_convert_int_to_i64(span_id),
98 operationName=span.name,
99 startTime=start_time_us,
100 duration=duration_us,
101 tags=tags,
102 logs=logs,
103 references=refs,
104 flags=flags,
105 parentSpanId=_convert_int_to_i64(parent_id),
106 )
107 return jaeger_span
108
109 def _extract_tags(self, span: Span) -> Sequence[TCollector.Tag]:
110
111 translated = []
112 if span.attributes:
113 for key, value in span.attributes.items():
114 tag = _translate_attribute(key, value)
115 if tag:
116 translated.append(tag)
117 if span.resource.attributes:
118 for key, value in span.resource.attributes.items():
119 tag = _translate_attribute(key, value)
120 if tag:
121 translated.append(tag)
122
123 code = _get_long_tag("status.code", span.status.status_code.value)
124 message = _get_string_tag("status.message", span.status.description)
125 kind = _get_string_tag("span.kind", OTLP_JAEGER_SPAN_KIND[span.kind])
126 translated.extend([code, message, kind])
127
128 # Instrumentation info tags
129 if span.instrumentation_info:
130 name = _get_string_tag(NAME_KEY, span.instrumentation_info.name)
131 version = _get_string_tag(
132 VERSION_KEY, span.instrumentation_info.version
133 )
134 translated.extend([name, version])
135
136 # Make sure to add "error" tag if span status is not OK
137 if not span.status.is_ok:
138 translated.append(_get_bool_tag("error", True))
139
140 return translated
141
142 def _extract_refs(
143 self, span: Span
144 ) -> Optional[Sequence[TCollector.SpanRef]]:
145 if not span.links:
146 return None
147
148 refs = []
149 for link in span.links:
150 trace_id = link.context.trace_id
151 span_id = link.context.span_id
152 refs.append(
153 TCollector.SpanRef(
154 refType=TCollector.SpanRefType.FOLLOWS_FROM,
155 traceIdHigh=_get_trace_id_high(trace_id),
156 traceIdLow=_get_trace_id_low(trace_id),
157 spanId=_convert_int_to_i64(span_id),
158 )
159 )
160 return refs
161
162 def _extract_logs(self, span: Span) -> Optional[Sequence[TCollector.Log]]:
163 """Returns jaeger logs if events exists, otherwise None.
164
165 Args:
166 span: span to extract logs
167 """
168 if not span.events:
169 return None
170
171 logs = []
172 for event in span.events:
173 fields = []
174 for key, value in event.attributes.items():
175 tag = _translate_attribute(key, value)
176 if tag:
177 fields.append(tag)
178
179 fields.append(
180 TCollector.Tag(
181 key="message",
182 vType=TCollector.TagType.STRING,
183 vStr=event.name,
184 )
185 )
186
187 event_timestamp_us = _nsec_to_usec_round(event.timestamp)
188 logs.append(
189 TCollector.Log(
190 timestamp=int(event_timestamp_us), fields=fields
191 )
192 )
193
194 return logs
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py
--- a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py
+++ b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py
@@ -24,7 +24,7 @@
VERSION_KEY,
Translator,
)
-from opentelemetry.sdk.trace import Span
+from opentelemetry.sdk.trace import Span, StatusCode
from opentelemetry.util import types
# pylint: disable=no-member,too-many-locals,no-self-use
@@ -190,16 +190,24 @@
if key_value:
translated.append(key_value)
- code = _get_long_key_value(
- "status.code", span.status.status_code.value
- )
- message = _get_string_key_value(
- "status.message", span.status.description
- )
- kind = _get_string_key_value(
- "span.kind", OTLP_JAEGER_SPAN_KIND[span.kind]
+ status = span.status
+ if status.status_code is not StatusCode.UNSET:
+ translated.append(
+ _get_string_key_value(
+ "otel.status_code", status.status_code.name
+ )
+ )
+ if status.description is not None:
+ translated.append(
+ _get_string_key_value(
+ "otel.status_description", status.description
+ )
+ )
+ translated.append(
+ _get_string_key_value(
+ "span.kind", OTLP_JAEGER_SPAN_KIND[span.kind]
+ )
)
- translated.extend([code, message, kind])
# Instrumentation info KeyValues
if span.instrumentation_info:
diff --git a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py
--- a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py
+++ b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py
@@ -23,7 +23,7 @@
_convert_int_to_i64,
_nsec_to_usec_round,
)
-from opentelemetry.sdk.trace import Span
+from opentelemetry.sdk.trace import Span, StatusCode
from opentelemetry.util import types
@@ -120,10 +120,21 @@
if tag:
translated.append(tag)
- code = _get_long_tag("status.code", span.status.status_code.value)
- message = _get_string_tag("status.message", span.status.description)
- kind = _get_string_tag("span.kind", OTLP_JAEGER_SPAN_KIND[span.kind])
- translated.extend([code, message, kind])
+ status = span.status
+ if status.status_code is not StatusCode.UNSET:
+ translated.append(
+ _get_string_tag("otel.status_code", status.status_code.name)
+ )
+ if status.description is not None:
+ translated.append(
+ _get_string_tag(
+ "otel.status_description", status.description
+ )
+ )
+
+ translated.append(
+ _get_string_tag("span.kind", OTLP_JAEGER_SPAN_KIND[span.kind])
+ )
# Instrumentation info tags
if span.instrumentation_info:
| {"golden_diff": "diff --git a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py\n--- a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py\n+++ b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py\n@@ -24,7 +24,7 @@\n VERSION_KEY,\n Translator,\n )\n-from opentelemetry.sdk.trace import Span\n+from opentelemetry.sdk.trace import Span, StatusCode\n from opentelemetry.util import types\n \n # pylint: disable=no-member,too-many-locals,no-self-use\n@@ -190,16 +190,24 @@\n if key_value:\n translated.append(key_value)\n \n- code = _get_long_key_value(\n- \"status.code\", span.status.status_code.value\n- )\n- message = _get_string_key_value(\n- \"status.message\", span.status.description\n- )\n- kind = _get_string_key_value(\n- \"span.kind\", OTLP_JAEGER_SPAN_KIND[span.kind]\n+ status = span.status\n+ if status.status_code is not StatusCode.UNSET:\n+ translated.append(\n+ _get_string_key_value(\n+ \"otel.status_code\", status.status_code.name\n+ )\n+ )\n+ if status.description is not None:\n+ translated.append(\n+ _get_string_key_value(\n+ \"otel.status_description\", status.description\n+ )\n+ )\n+ translated.append(\n+ _get_string_key_value(\n+ \"span.kind\", OTLP_JAEGER_SPAN_KIND[span.kind]\n+ )\n )\n- translated.extend([code, message, kind])\n \n # Instrumentation info KeyValues\n if span.instrumentation_info:\ndiff --git a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py\n--- a/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py\n+++ b/exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py\n@@ -23,7 +23,7 @@\n _convert_int_to_i64,\n _nsec_to_usec_round,\n )\n-from opentelemetry.sdk.trace import Span\n+from opentelemetry.sdk.trace import Span, StatusCode\n from opentelemetry.util import types\n \n \n@@ -120,10 +120,21 @@\n if tag:\n translated.append(tag)\n \n- code = _get_long_tag(\"status.code\", span.status.status_code.value)\n- message = _get_string_tag(\"status.message\", span.status.description)\n- kind = _get_string_tag(\"span.kind\", OTLP_JAEGER_SPAN_KIND[span.kind])\n- translated.extend([code, message, kind])\n+ status = span.status\n+ if status.status_code is not StatusCode.UNSET:\n+ translated.append(\n+ _get_string_tag(\"otel.status_code\", status.status_code.name)\n+ )\n+ if status.description is not None:\n+ translated.append(\n+ _get_string_tag(\n+ \"otel.status_description\", status.description\n+ )\n+ )\n+\n+ translated.append(\n+ _get_string_tag(\"span.kind\", OTLP_JAEGER_SPAN_KIND[span.kind])\n+ )\n \n # Instrumentation info tags\n if span.instrumentation_info:\n", "issue": "Update Jaeger exporter status code to error tag\nFrom the [specs](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/sdk_exporters/jaeger.md#status) Jaeger exporter tags for status code must be the string representation of the status code enum.\r\n\r\nRelated:\r\nhttps://github.com/open-telemetry/opentelemetry-python/issues/1402\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional, Sequence\n\nfrom google.protobuf.duration_pb2 import Duration\nfrom google.protobuf.timestamp_pb2 import Timestamp\n\nfrom opentelemetry.exporter.jaeger.gen import model_pb2\nfrom opentelemetry.exporter.jaeger.translate import (\n NAME_KEY,\n OTLP_JAEGER_SPAN_KIND,\n VERSION_KEY,\n Translator,\n)\nfrom opentelemetry.sdk.trace import Span\nfrom opentelemetry.util import types\n\n# pylint: disable=no-member,too-many-locals,no-self-use\n\n\ndef _trace_id_to_bytes(trace_id: int) -> bytes:\n \"\"\"Returns bytes representation of trace id.\"\"\"\n return trace_id.to_bytes(16, \"big\")\n\n\ndef _span_id_to_bytes(span_id: int) -> bytes:\n \"\"\"Returns bytes representation of span id\"\"\"\n return span_id.to_bytes(8, \"big\")\n\n\ndef _get_string_key_value(key, value: str) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger string KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_str=value, v_type=model_pb2.ValueType.STRING\n )\n\n\ndef _get_bool_key_value(key: str, value: bool) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger boolean KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_bool=value, v_type=model_pb2.ValueType.BOOL\n )\n\n\ndef _get_long_key_value(key: str, value: int) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger long KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_int64=value, v_type=model_pb2.ValueType.INT64\n )\n\n\ndef _get_double_key_value(key: str, value: float) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger double KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_float64=value, v_type=model_pb2.ValueType.FLOAT64\n )\n\n\ndef _get_binary_key_value(key: str, value: bytes) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger double KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_binary=value, v_type=model_pb2.ValueType.BINARY\n )\n\n\ndef _translate_attribute(\n key: str, value: types.AttributeValue\n) -> Optional[model_pb2.KeyValue]:\n \"\"\"Convert the attributes to jaeger keyvalues.\"\"\"\n translated = None\n if isinstance(value, bool):\n translated = _get_bool_key_value(key, value)\n elif isinstance(value, str):\n translated = _get_string_key_value(key, value)\n elif isinstance(value, int):\n translated = _get_long_key_value(key, value)\n elif isinstance(value, float):\n translated = _get_double_key_value(key, value)\n elif isinstance(value, tuple):\n translated = _get_string_key_value(key, str(value))\n return translated\n\n\ndef _extract_resource_tags(span: Span) -> Sequence[model_pb2.KeyValue]:\n \"\"\"Extracts resource attributes from span and returns\n list of jaeger keyvalues.\n\n Args:\n span: span to extract keyvalues\n \"\"\"\n tags = []\n for key, value in span.resource.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n tags.append(tag)\n return tags\n\n\ndef _duration_from_two_time_stamps(\n start: Timestamp, end: Timestamp\n) -> Duration:\n \"\"\"Compute Duration from two Timestamps.\n\n See https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration\n \"\"\"\n duration = Duration(\n seconds=end.seconds - start.seconds, nanos=end.nanos - start.nanos,\n )\n # pylint: disable=chained-comparison\n if duration.seconds < 0 and duration.nanos > 0:\n duration.seconds += 1\n duration.nanos -= 1000000000\n elif duration.seconds > 0 and duration.nanos < 0:\n duration.seconds -= 1\n duration.nanos += 1000000000\n return duration\n\n\ndef _proto_timestamp_from_epoch_nanos(nsec: int) -> Timestamp:\n \"\"\"Create a Timestamp from the number of nanoseconds elapsed from the epoch.\n\n See https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#timestamp\n \"\"\"\n nsec_time = nsec / 1e9\n seconds = int(nsec_time)\n nanos = int((nsec_time - seconds) * 1e9)\n return Timestamp(seconds=seconds, nanos=nanos)\n\n\nclass ProtobufTranslator(Translator):\n def __init__(self, svc_name):\n self.svc_name = svc_name\n\n def _translate_span(self, span: Span) -> model_pb2.Span:\n\n ctx = span.get_span_context()\n # pb2 span expects in byte format\n trace_id = _trace_id_to_bytes(ctx.trace_id)\n span_id = _span_id_to_bytes(ctx.span_id)\n\n start_time = _proto_timestamp_from_epoch_nanos(span.start_time)\n end_time = _proto_timestamp_from_epoch_nanos(span.end_time)\n duration = _duration_from_two_time_stamps(start_time, end_time)\n\n tags = self._extract_tags(span)\n refs = self._extract_refs(span)\n logs = self._extract_logs(span)\n\n flags = int(ctx.trace_flags)\n\n process = model_pb2.Process(\n service_name=self.svc_name, tags=_extract_resource_tags(span)\n )\n jaeger_span = model_pb2.Span(\n trace_id=trace_id,\n span_id=span_id,\n operation_name=span.name,\n references=refs,\n flags=flags,\n start_time=start_time,\n duration=duration,\n tags=tags,\n logs=logs,\n process=process,\n )\n return jaeger_span\n\n def _extract_tags(self, span: Span) -> Sequence[model_pb2.KeyValue]:\n translated = []\n if span.attributes:\n for key, value in span.attributes.items():\n key_value = _translate_attribute(key, value)\n if key_value is not None:\n translated.append(key_value)\n if span.resource.attributes:\n for key, value in span.resource.attributes.items():\n key_value = _translate_attribute(key, value)\n if key_value:\n translated.append(key_value)\n\n code = _get_long_key_value(\n \"status.code\", span.status.status_code.value\n )\n message = _get_string_key_value(\n \"status.message\", span.status.description\n )\n kind = _get_string_key_value(\n \"span.kind\", OTLP_JAEGER_SPAN_KIND[span.kind]\n )\n translated.extend([code, message, kind])\n\n # Instrumentation info KeyValues\n if span.instrumentation_info:\n name = _get_string_key_value(\n NAME_KEY, span.instrumentation_info.name\n )\n version = _get_string_key_value(\n VERSION_KEY, span.instrumentation_info.version\n )\n translated.extend([name, version])\n\n # Make sure to add \"error\" tag if span status is not OK\n if not span.status.is_ok:\n translated.append(_get_bool_key_value(\"error\", True))\n\n return translated\n\n def _extract_refs(\n self, span: Span\n ) -> Optional[Sequence[model_pb2.SpanRef]]:\n if not span.links:\n return None\n\n refs = []\n for link in span.links:\n trace_id = link.context.trace_id\n span_id = link.context.span_id\n refs.append(\n model_pb2.SpanRef(\n ref_type=model_pb2.SpanRefType.FOLLOWS_FROM,\n trace_id=_trace_id_to_bytes(trace_id),\n span_id=_span_id_to_bytes(span_id),\n )\n )\n return refs\n\n def _extract_logs(self, span: Span) -> Optional[Sequence[model_pb2.Log]]:\n if not span.events:\n return None\n\n logs = []\n for event in span.events:\n fields = []\n for key, value in event.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n fields.append(tag)\n\n fields.append(\n _get_string_key_value(key=\"message\", value=event.name,)\n )\n event_ts = _proto_timestamp_from_epoch_nanos(event.timestamp)\n logs.append(model_pb2.Log(timestamp=event_ts, fields=fields))\n\n return logs\n", "path": "exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pylint: disable=no-self-use\nfrom typing import Optional, Sequence\n\nfrom opentelemetry.exporter.jaeger.gen.jaeger import Collector as TCollector\nfrom opentelemetry.exporter.jaeger.translate import (\n NAME_KEY,\n OTLP_JAEGER_SPAN_KIND,\n VERSION_KEY,\n Translator,\n _convert_int_to_i64,\n _nsec_to_usec_round,\n)\nfrom opentelemetry.sdk.trace import Span\nfrom opentelemetry.util import types\n\n\ndef _get_string_tag(key, value: str) -> TCollector.Tag:\n \"\"\"Returns jaeger string tag.\"\"\"\n return TCollector.Tag(key=key, vStr=value, vType=TCollector.TagType.STRING)\n\n\ndef _get_bool_tag(key: str, value: bool) -> TCollector.Tag:\n \"\"\"Returns jaeger boolean tag.\"\"\"\n return TCollector.Tag(key=key, vBool=value, vType=TCollector.TagType.BOOL)\n\n\ndef _get_long_tag(key: str, value: int) -> TCollector.Tag:\n \"\"\"Returns jaeger long tag.\"\"\"\n return TCollector.Tag(key=key, vLong=value, vType=TCollector.TagType.LONG)\n\n\ndef _get_double_tag(key: str, value: float) -> TCollector.Tag:\n \"\"\"Returns jaeger double tag.\"\"\"\n return TCollector.Tag(\n key=key, vDouble=value, vType=TCollector.TagType.DOUBLE\n )\n\n\ndef _get_trace_id_low(trace_id):\n return _convert_int_to_i64(trace_id & 0xFFFFFFFFFFFFFFFF)\n\n\ndef _get_trace_id_high(trace_id):\n return _convert_int_to_i64((trace_id >> 64) & 0xFFFFFFFFFFFFFFFF)\n\n\ndef _translate_attribute(\n key: str, value: types.AttributeValue\n) -> Optional[TCollector.Tag]:\n \"\"\"Convert the attributes to jaeger tags.\"\"\"\n if isinstance(value, bool):\n return _get_bool_tag(key, value)\n if isinstance(value, str):\n return _get_string_tag(key, value)\n if isinstance(value, int):\n return _get_long_tag(key, value)\n if isinstance(value, float):\n return _get_double_tag(key, value)\n if isinstance(value, tuple):\n return _get_string_tag(key, str(value))\n return None\n\n\nclass ThriftTranslator(Translator):\n def _translate_span(self, span: Span) -> TCollector.Span:\n ctx = span.get_span_context()\n trace_id = ctx.trace_id\n span_id = ctx.span_id\n\n start_time_us = _nsec_to_usec_round(span.start_time)\n duration_us = _nsec_to_usec_round(span.end_time - span.start_time)\n\n parent_id = span.parent.span_id if span.parent else 0\n\n tags = self._extract_tags(span)\n refs = self._extract_refs(span)\n logs = self._extract_logs(span)\n\n flags = int(ctx.trace_flags)\n\n jaeger_span = TCollector.Span(\n traceIdHigh=_get_trace_id_high(trace_id),\n traceIdLow=_get_trace_id_low(trace_id),\n spanId=_convert_int_to_i64(span_id),\n operationName=span.name,\n startTime=start_time_us,\n duration=duration_us,\n tags=tags,\n logs=logs,\n references=refs,\n flags=flags,\n parentSpanId=_convert_int_to_i64(parent_id),\n )\n return jaeger_span\n\n def _extract_tags(self, span: Span) -> Sequence[TCollector.Tag]:\n\n translated = []\n if span.attributes:\n for key, value in span.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n translated.append(tag)\n if span.resource.attributes:\n for key, value in span.resource.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n translated.append(tag)\n\n code = _get_long_tag(\"status.code\", span.status.status_code.value)\n message = _get_string_tag(\"status.message\", span.status.description)\n kind = _get_string_tag(\"span.kind\", OTLP_JAEGER_SPAN_KIND[span.kind])\n translated.extend([code, message, kind])\n\n # Instrumentation info tags\n if span.instrumentation_info:\n name = _get_string_tag(NAME_KEY, span.instrumentation_info.name)\n version = _get_string_tag(\n VERSION_KEY, span.instrumentation_info.version\n )\n translated.extend([name, version])\n\n # Make sure to add \"error\" tag if span status is not OK\n if not span.status.is_ok:\n translated.append(_get_bool_tag(\"error\", True))\n\n return translated\n\n def _extract_refs(\n self, span: Span\n ) -> Optional[Sequence[TCollector.SpanRef]]:\n if not span.links:\n return None\n\n refs = []\n for link in span.links:\n trace_id = link.context.trace_id\n span_id = link.context.span_id\n refs.append(\n TCollector.SpanRef(\n refType=TCollector.SpanRefType.FOLLOWS_FROM,\n traceIdHigh=_get_trace_id_high(trace_id),\n traceIdLow=_get_trace_id_low(trace_id),\n spanId=_convert_int_to_i64(span_id),\n )\n )\n return refs\n\n def _extract_logs(self, span: Span) -> Optional[Sequence[TCollector.Log]]:\n \"\"\"Returns jaeger logs if events exists, otherwise None.\n\n Args:\n span: span to extract logs\n \"\"\"\n if not span.events:\n return None\n\n logs = []\n for event in span.events:\n fields = []\n for key, value in event.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n fields.append(tag)\n\n fields.append(\n TCollector.Tag(\n key=\"message\",\n vType=TCollector.TagType.STRING,\n vStr=event.name,\n )\n )\n\n event_timestamp_us = _nsec_to_usec_round(event.timestamp)\n logs.append(\n TCollector.Log(\n timestamp=int(event_timestamp_us), fields=fields\n )\n )\n\n return logs\n", "path": "exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Optional, Sequence\n\nfrom google.protobuf.duration_pb2 import Duration\nfrom google.protobuf.timestamp_pb2 import Timestamp\n\nfrom opentelemetry.exporter.jaeger.gen import model_pb2\nfrom opentelemetry.exporter.jaeger.translate import (\n NAME_KEY,\n OTLP_JAEGER_SPAN_KIND,\n VERSION_KEY,\n Translator,\n)\nfrom opentelemetry.sdk.trace import Span, StatusCode\nfrom opentelemetry.util import types\n\n# pylint: disable=no-member,too-many-locals,no-self-use\n\n\ndef _trace_id_to_bytes(trace_id: int) -> bytes:\n \"\"\"Returns bytes representation of trace id.\"\"\"\n return trace_id.to_bytes(16, \"big\")\n\n\ndef _span_id_to_bytes(span_id: int) -> bytes:\n \"\"\"Returns bytes representation of span id\"\"\"\n return span_id.to_bytes(8, \"big\")\n\n\ndef _get_string_key_value(key, value: str) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger string KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_str=value, v_type=model_pb2.ValueType.STRING\n )\n\n\ndef _get_bool_key_value(key: str, value: bool) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger boolean KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_bool=value, v_type=model_pb2.ValueType.BOOL\n )\n\n\ndef _get_long_key_value(key: str, value: int) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger long KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_int64=value, v_type=model_pb2.ValueType.INT64\n )\n\n\ndef _get_double_key_value(key: str, value: float) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger double KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_float64=value, v_type=model_pb2.ValueType.FLOAT64\n )\n\n\ndef _get_binary_key_value(key: str, value: bytes) -> model_pb2.KeyValue:\n \"\"\"Returns jaeger double KeyValue.\"\"\"\n return model_pb2.KeyValue(\n key=key, v_binary=value, v_type=model_pb2.ValueType.BINARY\n )\n\n\ndef _translate_attribute(\n key: str, value: types.AttributeValue\n) -> Optional[model_pb2.KeyValue]:\n \"\"\"Convert the attributes to jaeger keyvalues.\"\"\"\n translated = None\n if isinstance(value, bool):\n translated = _get_bool_key_value(key, value)\n elif isinstance(value, str):\n translated = _get_string_key_value(key, value)\n elif isinstance(value, int):\n translated = _get_long_key_value(key, value)\n elif isinstance(value, float):\n translated = _get_double_key_value(key, value)\n elif isinstance(value, tuple):\n translated = _get_string_key_value(key, str(value))\n return translated\n\n\ndef _extract_resource_tags(span: Span) -> Sequence[model_pb2.KeyValue]:\n \"\"\"Extracts resource attributes from span and returns\n list of jaeger keyvalues.\n\n Args:\n span: span to extract keyvalues\n \"\"\"\n tags = []\n for key, value in span.resource.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n tags.append(tag)\n return tags\n\n\ndef _duration_from_two_time_stamps(\n start: Timestamp, end: Timestamp\n) -> Duration:\n \"\"\"Compute Duration from two Timestamps.\n\n See https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration\n \"\"\"\n duration = Duration(\n seconds=end.seconds - start.seconds, nanos=end.nanos - start.nanos,\n )\n # pylint: disable=chained-comparison\n if duration.seconds < 0 and duration.nanos > 0:\n duration.seconds += 1\n duration.nanos -= 1000000000\n elif duration.seconds > 0 and duration.nanos < 0:\n duration.seconds -= 1\n duration.nanos += 1000000000\n return duration\n\n\ndef _proto_timestamp_from_epoch_nanos(nsec: int) -> Timestamp:\n \"\"\"Create a Timestamp from the number of nanoseconds elapsed from the epoch.\n\n See https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#timestamp\n \"\"\"\n nsec_time = nsec / 1e9\n seconds = int(nsec_time)\n nanos = int((nsec_time - seconds) * 1e9)\n return Timestamp(seconds=seconds, nanos=nanos)\n\n\nclass ProtobufTranslator(Translator):\n def __init__(self, svc_name):\n self.svc_name = svc_name\n\n def _translate_span(self, span: Span) -> model_pb2.Span:\n\n ctx = span.get_span_context()\n # pb2 span expects in byte format\n trace_id = _trace_id_to_bytes(ctx.trace_id)\n span_id = _span_id_to_bytes(ctx.span_id)\n\n start_time = _proto_timestamp_from_epoch_nanos(span.start_time)\n end_time = _proto_timestamp_from_epoch_nanos(span.end_time)\n duration = _duration_from_two_time_stamps(start_time, end_time)\n\n tags = self._extract_tags(span)\n refs = self._extract_refs(span)\n logs = self._extract_logs(span)\n\n flags = int(ctx.trace_flags)\n\n process = model_pb2.Process(\n service_name=self.svc_name, tags=_extract_resource_tags(span)\n )\n jaeger_span = model_pb2.Span(\n trace_id=trace_id,\n span_id=span_id,\n operation_name=span.name,\n references=refs,\n flags=flags,\n start_time=start_time,\n duration=duration,\n tags=tags,\n logs=logs,\n process=process,\n )\n return jaeger_span\n\n def _extract_tags(self, span: Span) -> Sequence[model_pb2.KeyValue]:\n translated = []\n if span.attributes:\n for key, value in span.attributes.items():\n key_value = _translate_attribute(key, value)\n if key_value is not None:\n translated.append(key_value)\n if span.resource.attributes:\n for key, value in span.resource.attributes.items():\n key_value = _translate_attribute(key, value)\n if key_value:\n translated.append(key_value)\n\n status = span.status\n if status.status_code is not StatusCode.UNSET:\n translated.append(\n _get_string_key_value(\n \"otel.status_code\", status.status_code.name\n )\n )\n if status.description is not None:\n translated.append(\n _get_string_key_value(\n \"otel.status_description\", status.description\n )\n )\n translated.append(\n _get_string_key_value(\n \"span.kind\", OTLP_JAEGER_SPAN_KIND[span.kind]\n )\n )\n\n # Instrumentation info KeyValues\n if span.instrumentation_info:\n name = _get_string_key_value(\n NAME_KEY, span.instrumentation_info.name\n )\n version = _get_string_key_value(\n VERSION_KEY, span.instrumentation_info.version\n )\n translated.extend([name, version])\n\n # Make sure to add \"error\" tag if span status is not OK\n if not span.status.is_ok:\n translated.append(_get_bool_key_value(\"error\", True))\n\n return translated\n\n def _extract_refs(\n self, span: Span\n ) -> Optional[Sequence[model_pb2.SpanRef]]:\n if not span.links:\n return None\n\n refs = []\n for link in span.links:\n trace_id = link.context.trace_id\n span_id = link.context.span_id\n refs.append(\n model_pb2.SpanRef(\n ref_type=model_pb2.SpanRefType.FOLLOWS_FROM,\n trace_id=_trace_id_to_bytes(trace_id),\n span_id=_span_id_to_bytes(span_id),\n )\n )\n return refs\n\n def _extract_logs(self, span: Span) -> Optional[Sequence[model_pb2.Log]]:\n if not span.events:\n return None\n\n logs = []\n for event in span.events:\n fields = []\n for key, value in event.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n fields.append(tag)\n\n fields.append(\n _get_string_key_value(key=\"message\", value=event.name,)\n )\n event_ts = _proto_timestamp_from_epoch_nanos(event.timestamp)\n logs.append(model_pb2.Log(timestamp=event_ts, fields=fields))\n\n return logs\n", "path": "exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/protobuf.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# pylint: disable=no-self-use\nfrom typing import Optional, Sequence\n\nfrom opentelemetry.exporter.jaeger.gen.jaeger import Collector as TCollector\nfrom opentelemetry.exporter.jaeger.translate import (\n NAME_KEY,\n OTLP_JAEGER_SPAN_KIND,\n VERSION_KEY,\n Translator,\n _convert_int_to_i64,\n _nsec_to_usec_round,\n)\nfrom opentelemetry.sdk.trace import Span, StatusCode\nfrom opentelemetry.util import types\n\n\ndef _get_string_tag(key, value: str) -> TCollector.Tag:\n \"\"\"Returns jaeger string tag.\"\"\"\n return TCollector.Tag(key=key, vStr=value, vType=TCollector.TagType.STRING)\n\n\ndef _get_bool_tag(key: str, value: bool) -> TCollector.Tag:\n \"\"\"Returns jaeger boolean tag.\"\"\"\n return TCollector.Tag(key=key, vBool=value, vType=TCollector.TagType.BOOL)\n\n\ndef _get_long_tag(key: str, value: int) -> TCollector.Tag:\n \"\"\"Returns jaeger long tag.\"\"\"\n return TCollector.Tag(key=key, vLong=value, vType=TCollector.TagType.LONG)\n\n\ndef _get_double_tag(key: str, value: float) -> TCollector.Tag:\n \"\"\"Returns jaeger double tag.\"\"\"\n return TCollector.Tag(\n key=key, vDouble=value, vType=TCollector.TagType.DOUBLE\n )\n\n\ndef _get_trace_id_low(trace_id):\n return _convert_int_to_i64(trace_id & 0xFFFFFFFFFFFFFFFF)\n\n\ndef _get_trace_id_high(trace_id):\n return _convert_int_to_i64((trace_id >> 64) & 0xFFFFFFFFFFFFFFFF)\n\n\ndef _translate_attribute(\n key: str, value: types.AttributeValue\n) -> Optional[TCollector.Tag]:\n \"\"\"Convert the attributes to jaeger tags.\"\"\"\n if isinstance(value, bool):\n return _get_bool_tag(key, value)\n if isinstance(value, str):\n return _get_string_tag(key, value)\n if isinstance(value, int):\n return _get_long_tag(key, value)\n if isinstance(value, float):\n return _get_double_tag(key, value)\n if isinstance(value, tuple):\n return _get_string_tag(key, str(value))\n return None\n\n\nclass ThriftTranslator(Translator):\n def _translate_span(self, span: Span) -> TCollector.Span:\n ctx = span.get_span_context()\n trace_id = ctx.trace_id\n span_id = ctx.span_id\n\n start_time_us = _nsec_to_usec_round(span.start_time)\n duration_us = _nsec_to_usec_round(span.end_time - span.start_time)\n\n parent_id = span.parent.span_id if span.parent else 0\n\n tags = self._extract_tags(span)\n refs = self._extract_refs(span)\n logs = self._extract_logs(span)\n\n flags = int(ctx.trace_flags)\n\n jaeger_span = TCollector.Span(\n traceIdHigh=_get_trace_id_high(trace_id),\n traceIdLow=_get_trace_id_low(trace_id),\n spanId=_convert_int_to_i64(span_id),\n operationName=span.name,\n startTime=start_time_us,\n duration=duration_us,\n tags=tags,\n logs=logs,\n references=refs,\n flags=flags,\n parentSpanId=_convert_int_to_i64(parent_id),\n )\n return jaeger_span\n\n def _extract_tags(self, span: Span) -> Sequence[TCollector.Tag]:\n\n translated = []\n if span.attributes:\n for key, value in span.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n translated.append(tag)\n if span.resource.attributes:\n for key, value in span.resource.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n translated.append(tag)\n\n status = span.status\n if status.status_code is not StatusCode.UNSET:\n translated.append(\n _get_string_tag(\"otel.status_code\", status.status_code.name)\n )\n if status.description is not None:\n translated.append(\n _get_string_tag(\n \"otel.status_description\", status.description\n )\n )\n\n translated.append(\n _get_string_tag(\"span.kind\", OTLP_JAEGER_SPAN_KIND[span.kind])\n )\n\n # Instrumentation info tags\n if span.instrumentation_info:\n name = _get_string_tag(NAME_KEY, span.instrumentation_info.name)\n version = _get_string_tag(\n VERSION_KEY, span.instrumentation_info.version\n )\n translated.extend([name, version])\n\n # Make sure to add \"error\" tag if span status is not OK\n if not span.status.is_ok:\n translated.append(_get_bool_tag(\"error\", True))\n\n return translated\n\n def _extract_refs(\n self, span: Span\n ) -> Optional[Sequence[TCollector.SpanRef]]:\n if not span.links:\n return None\n\n refs = []\n for link in span.links:\n trace_id = link.context.trace_id\n span_id = link.context.span_id\n refs.append(\n TCollector.SpanRef(\n refType=TCollector.SpanRefType.FOLLOWS_FROM,\n traceIdHigh=_get_trace_id_high(trace_id),\n traceIdLow=_get_trace_id_low(trace_id),\n spanId=_convert_int_to_i64(span_id),\n )\n )\n return refs\n\n def _extract_logs(self, span: Span) -> Optional[Sequence[TCollector.Log]]:\n \"\"\"Returns jaeger logs if events exists, otherwise None.\n\n Args:\n span: span to extract logs\n \"\"\"\n if not span.events:\n return None\n\n logs = []\n for event in span.events:\n fields = []\n for key, value in event.attributes.items():\n tag = _translate_attribute(key, value)\n if tag:\n fields.append(tag)\n\n fields.append(\n TCollector.Tag(\n key=\"message\",\n vType=TCollector.TagType.STRING,\n vStr=event.name,\n )\n )\n\n event_timestamp_us = _nsec_to_usec_round(event.timestamp)\n logs.append(\n TCollector.Log(\n timestamp=int(event_timestamp_us), fields=fields\n )\n )\n\n return logs\n", "path": "exporter/opentelemetry-exporter-jaeger/src/opentelemetry/exporter/jaeger/translate/thrift.py"}]} |
gh_patches_debug_3375 | rasdani/github-patches | git_diff | mitmproxy__mitmproxy-1988 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change view._store from dictionary to OrderedDict
##### Steps to reproduce the problem:
1. Mark multiple flows in the flow list using `m` key
2. Toggle to show-marked-only view using `M`
3. mitmproxy rearranges flow list when `M` is toggled
##### Any other comments? What have you tried so far?
This is an issue with the flow list mentioned earlier in #1960. The marked flows can get randomly rearranged whenever M or Z is pressed, as it iterates through `view._store` which is a dictionary, to refresh the view.
##### System information
Mitmproxy version: 2.0.0 (v1.0.1-45-g0022c81)
Python version: 3.5.3
Platform: Linux-4.8.2-c9-x86_64-with-debian-jessie-sid
SSL version: OpenSSL 1.0.1f 6 Jan 2014
Linux distro: debian jessie/sid
<!--
Cut and paste the output of "mitmproxy --version".
If you're using an older version if mitmproxy, please specify the version
and OS.
-->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/addons/view.py`
Content:
```
1 """
2 The View:
3
4 - Keeps track of a store of flows
5 - Maintains a filtered, ordered view onto that list of flows
6 - Exposes a number of signals so the view can be monitored
7 - Tracks focus within the view
8 - Exposes a settings store for flows that automatically expires if the flow is
9 removed from the store.
10 """
11 import collections
12 import typing
13 import datetime
14
15 import blinker
16 import sortedcontainers
17
18 import mitmproxy.flow
19 from mitmproxy import flowfilter
20 from mitmproxy import exceptions
21
22 # The underlying sorted list implementation expects the sort key to be stable
23 # for the lifetime of the object. However, if we sort by size, for instance,
24 # the sort order changes as the flow progresses through its lifecycle. We
25 # address this through two means:
26 #
27 # - Let order keys cache the sort value by flow ID.
28 #
29 # - Add a facility to refresh items in the list by removing and re-adding them
30 # when they are updated.
31
32
33 class _OrderKey:
34 def __init__(self, view):
35 self.view = view
36
37 def generate(self, f: mitmproxy.flow.Flow) -> typing.Any: # pragma: no cover
38 pass
39
40 def refresh(self, f):
41 k = self._key()
42 old = self.view.settings[f][k]
43 new = self.generate(f)
44 if old != new:
45 self.view._view.remove(f)
46 self.view.settings[f][k] = new
47 self.view._view.add(f)
48 self.view.sig_view_refresh.send(self.view)
49
50 def _key(self):
51 return "_order_%s" % id(self)
52
53 def __call__(self, f):
54 if f.id in self.view._store:
55 k = self._key()
56 s = self.view.settings[f]
57 if k in s:
58 return s[k]
59 val = self.generate(f)
60 s[k] = val
61 return val
62 else:
63 return self.generate(f)
64
65
66 class OrderRequestStart(_OrderKey):
67 def generate(self, f: mitmproxy.flow.Flow) -> datetime.datetime:
68 return f.request.timestamp_start or 0
69
70
71 class OrderRequestMethod(_OrderKey):
72 def generate(self, f: mitmproxy.flow.Flow) -> str:
73 return f.request.method
74
75
76 class OrderRequestURL(_OrderKey):
77 def generate(self, f: mitmproxy.flow.Flow) -> str:
78 return f.request.url
79
80
81 class OrderKeySize(_OrderKey):
82 def generate(self, f: mitmproxy.flow.Flow) -> int:
83 s = 0
84 if f.request.raw_content:
85 s += len(f.request.raw_content)
86 if f.response and f.response.raw_content:
87 s += len(f.response.raw_content)
88 return s
89
90
91 matchall = flowfilter.parse(".")
92
93
94 orders = [
95 ("t", "time"),
96 ("m", "method"),
97 ("u", "url"),
98 ("z", "size"),
99 ]
100
101
102 class View(collections.Sequence):
103 def __init__(self):
104 super().__init__()
105 self._store = {}
106 self.filter = matchall
107 # Should we show only marked flows?
108 self.show_marked = False
109
110 self.default_order = OrderRequestStart(self)
111 self.orders = dict(
112 time = self.default_order,
113 method = OrderRequestMethod(self),
114 url = OrderRequestURL(self),
115 size = OrderKeySize(self),
116 )
117 self.order_key = self.default_order
118 self.order_reversed = False
119 self.focus_follow = False
120
121 self._view = sortedcontainers.SortedListWithKey(key = self.order_key)
122
123 # The sig_view* signals broadcast events that affect the view. That is,
124 # an update to a flow in the store but not in the view does not trigger
125 # a signal. All signals are called after the view has been updated.
126 self.sig_view_update = blinker.Signal()
127 self.sig_view_add = blinker.Signal()
128 self.sig_view_remove = blinker.Signal()
129 # Signals that the view should be refreshed completely
130 self.sig_view_refresh = blinker.Signal()
131
132 # The sig_store* signals broadcast events that affect the underlying
133 # store. If a flow is removed from just the view, sig_view_remove is
134 # triggered. If it is removed from the store while it is also in the
135 # view, both sig_store_remove and sig_view_remove are triggered.
136 self.sig_store_remove = blinker.Signal()
137 # Signals that the store should be refreshed completely
138 self.sig_store_refresh = blinker.Signal()
139
140 self.focus = Focus(self)
141 self.settings = Settings(self)
142
143 def store_count(self):
144 return len(self._store)
145
146 def inbounds(self, index: int) -> bool:
147 """
148 Is this 0 <= index < len(self)
149 """
150 return 0 <= index < len(self)
151
152 def _rev(self, idx: int) -> int:
153 """
154 Reverses an index, if needed
155 """
156 if self.order_reversed:
157 if idx < 0:
158 idx = -idx - 1
159 else:
160 idx = len(self._view) - idx - 1
161 if idx < 0:
162 raise IndexError
163 return idx
164
165 def __len__(self):
166 return len(self._view)
167
168 def __getitem__(self, offset) -> mitmproxy.flow.Flow:
169 return self._view[self._rev(offset)]
170
171 # Reflect some methods to the efficient underlying implementation
172
173 def _bisect(self, f: mitmproxy.flow.Flow) -> int:
174 v = self._view.bisect_right(f)
175 return self._rev(v - 1) + 1
176
177 def index(self, f: mitmproxy.flow.Flow, start: int = 0, stop: typing.Optional[int] = None) -> int:
178 return self._rev(self._view.index(f, start, stop))
179
180 def __contains__(self, f: mitmproxy.flow.Flow) -> bool:
181 return self._view.__contains__(f)
182
183 def _order_key_name(self):
184 return "_order_%s" % id(self.order_key)
185
186 def _base_add(self, f):
187 self.settings[f][self._order_key_name()] = self.order_key(f)
188 self._view.add(f)
189
190 def _refilter(self):
191 self._view.clear()
192 for i in self._store.values():
193 if self.show_marked and not i.marked:
194 continue
195 if self.filter(i):
196 self._base_add(i)
197 self.sig_view_refresh.send(self)
198
199 # API
200 def toggle_marked(self):
201 self.show_marked = not self.show_marked
202 self._refilter()
203
204 def set_reversed(self, value: bool):
205 self.order_reversed = value
206 self.sig_view_refresh.send(self)
207
208 def set_order(self, order_key: typing.Callable):
209 """
210 Sets the current view order.
211 """
212 self.order_key = order_key
213 newview = sortedcontainers.SortedListWithKey(key=order_key)
214 newview.update(self._view)
215 self._view = newview
216
217 def set_filter(self, flt: typing.Optional[flowfilter.TFilter]):
218 """
219 Sets the current view filter.
220 """
221 self.filter = flt or matchall
222 self._refilter()
223
224 def clear(self):
225 """
226 Clears both the store and view.
227 """
228 self._store.clear()
229 self._view.clear()
230 self.sig_view_refresh.send(self)
231 self.sig_store_refresh.send(self)
232
233 def clear_not_marked(self):
234 """
235 Clears only the unmarked flows.
236 """
237 for flow in self._store.copy().values():
238 if not flow.marked:
239 self._store.pop(flow.id)
240
241 self._refilter()
242 self.sig_store_refresh.send(self)
243
244 def add(self, f: mitmproxy.flow.Flow) -> bool:
245 """
246 Adds a flow to the state. If the flow already exists, it is
247 ignored.
248 """
249 if f.id not in self._store:
250 self._store[f.id] = f
251 if self.filter(f):
252 self._base_add(f)
253 if self.focus_follow:
254 self.focus.flow = f
255 self.sig_view_add.send(self, flow=f)
256
257 def remove(self, f: mitmproxy.flow.Flow):
258 """
259 Removes the flow from the underlying store and the view.
260 """
261 if f.id in self._store:
262 if f in self._view:
263 self._view.remove(f)
264 self.sig_view_remove.send(self, flow=f)
265 del self._store[f.id]
266 self.sig_store_remove.send(self, flow=f)
267
268 def update(self, f: mitmproxy.flow.Flow):
269 """
270 Updates a flow. If the flow is not in the state, it's ignored.
271 """
272 if f.id in self._store:
273 if self.filter(f):
274 if f not in self._view:
275 self._base_add(f)
276 if self.focus_follow:
277 self.focus.flow = f
278 self.sig_view_add.send(self, flow=f)
279 else:
280 # This is a tad complicated. The sortedcontainers
281 # implementation assumes that the order key is stable. If
282 # it changes mid-way Very Bad Things happen. We detect when
283 # this happens, and re-fresh the item.
284 self.order_key.refresh(f)
285 self.sig_view_update.send(self, flow=f)
286 else:
287 try:
288 self._view.remove(f)
289 self.sig_view_remove.send(self, flow=f)
290 except ValueError:
291 # The value was not in the view
292 pass
293
294 def get_by_id(self, flow_id: str) -> typing.Optional[mitmproxy.flow.Flow]:
295 """
296 Get flow with the given id from the store.
297 Returns None if the flow is not found.
298 """
299 return self._store.get(flow_id)
300
301 # Event handlers
302 def configure(self, opts, updated):
303 if "filter" in updated:
304 filt = None
305 if opts.filter:
306 filt = flowfilter.parse(opts.filter)
307 if not filt:
308 raise exceptions.OptionsError(
309 "Invalid interception filter: %s" % opts.filter
310 )
311 self.set_filter(filt)
312 if "console_order" in updated:
313 if opts.console_order is None:
314 self.set_order(self.default_order)
315 else:
316 if opts.console_order not in self.orders:
317 raise exceptions.OptionsError(
318 "Unknown flow order: %s" % opts.console_order
319 )
320 self.set_order(self.orders[opts.console_order])
321 if "console_order_reversed" in updated:
322 self.set_reversed(opts.console_order_reversed)
323 if "console_focus_follow" in updated:
324 self.focus_follow = opts.console_focus_follow
325
326 def request(self, f):
327 self.add(f)
328
329 def error(self, f):
330 self.update(f)
331
332 def response(self, f):
333 self.update(f)
334
335 def intercept(self, f):
336 self.update(f)
337
338 def resume(self, f):
339 self.update(f)
340
341 def kill(self, f):
342 self.update(f)
343
344
345 class Focus:
346 """
347 Tracks a focus element within a View.
348 """
349 def __init__(self, v: View) -> None:
350 self.view = v
351 self._flow = None # type: mitmproxy.flow.Flow
352 self.sig_change = blinker.Signal()
353 if len(self.view):
354 self.flow = self.view[0]
355 v.sig_view_add.connect(self._sig_view_add)
356 v.sig_view_remove.connect(self._sig_view_remove)
357 v.sig_view_refresh.connect(self._sig_view_refresh)
358
359 @property
360 def flow(self) -> typing.Optional[mitmproxy.flow.Flow]:
361 return self._flow
362
363 @flow.setter
364 def flow(self, f: typing.Optional[mitmproxy.flow.Flow]):
365 if f is not None and f not in self.view:
366 raise ValueError("Attempt to set focus to flow not in view")
367 self._flow = f
368 self.sig_change.send(self)
369
370 @property
371 def index(self) -> typing.Optional[int]:
372 if self.flow:
373 return self.view.index(self.flow)
374
375 @index.setter
376 def index(self, idx):
377 if idx < 0 or idx > len(self.view) - 1:
378 raise ValueError("Index out of view bounds")
379 self.flow = self.view[idx]
380
381 def _nearest(self, f, v):
382 return min(v._bisect(f), len(v) - 1)
383
384 def _sig_view_remove(self, view, flow):
385 if len(view) == 0:
386 self.flow = None
387 elif flow is self.flow:
388 self.flow = view[self._nearest(self.flow, view)]
389
390 def _sig_view_refresh(self, view):
391 if len(view) == 0:
392 self.flow = None
393 elif self.flow is None:
394 self.flow = view[0]
395 elif self.flow not in view:
396 self.flow = view[self._nearest(self.flow, view)]
397
398 def _sig_view_add(self, view, flow):
399 # We only have to act if we don't have a focus element
400 if not self.flow:
401 self.flow = flow
402
403
404 class Settings(collections.Mapping):
405 def __init__(self, view: View) -> None:
406 self.view = view
407 self._values = {} # type: typing.MutableMapping[str, mitmproxy.flow.Flow]
408 view.sig_store_remove.connect(self._sig_store_remove)
409 view.sig_store_refresh.connect(self._sig_store_refresh)
410
411 def __iter__(self) -> typing.Iterator:
412 return iter(self._values)
413
414 def __len__(self) -> int:
415 return len(self._values)
416
417 def __getitem__(self, f: mitmproxy.flow.Flow) -> dict:
418 if f.id not in self.view._store:
419 raise KeyError
420 return self._values.setdefault(f.id, {})
421
422 def _sig_store_remove(self, view, flow):
423 if flow.id in self._values:
424 del self._values[flow.id]
425
426 def _sig_store_refresh(self, view):
427 for fid in list(self._values.keys()):
428 if fid not in view._store:
429 del self._values[fid]
430
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mitmproxy/addons/view.py b/mitmproxy/addons/view.py
--- a/mitmproxy/addons/view.py
+++ b/mitmproxy/addons/view.py
@@ -102,7 +102,7 @@
class View(collections.Sequence):
def __init__(self):
super().__init__()
- self._store = {}
+ self._store = collections.OrderedDict()
self.filter = matchall
# Should we show only marked flows?
self.show_marked = False
| {"golden_diff": "diff --git a/mitmproxy/addons/view.py b/mitmproxy/addons/view.py\n--- a/mitmproxy/addons/view.py\n+++ b/mitmproxy/addons/view.py\n@@ -102,7 +102,7 @@\n class View(collections.Sequence):\n def __init__(self):\n super().__init__()\n- self._store = {}\n+ self._store = collections.OrderedDict()\n self.filter = matchall\n # Should we show only marked flows?\n self.show_marked = False\n", "issue": "Change view._store from dictionary to OrderedDict\n##### Steps to reproduce the problem:\r\n \r\n1. Mark multiple flows in the flow list using `m` key \r\n2. Toggle to show-marked-only view using `M`\r\n3. mitmproxy rearranges flow list when `M` is toggled\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nThis is an issue with the flow list mentioned earlier in #1960. The marked flows can get randomly rearranged whenever M or Z is pressed, as it iterates through `view._store` which is a dictionary, to refresh the view.\r\n\r\n##### System information\r\n\r\nMitmproxy version: 2.0.0 (v1.0.1-45-g0022c81)\r\nPython version: 3.5.3\r\nPlatform: Linux-4.8.2-c9-x86_64-with-debian-jessie-sid\r\nSSL version: OpenSSL 1.0.1f 6 Jan 2014\r\nLinux distro: debian jessie/sid \r\n<!--\r\n Cut and paste the output of \"mitmproxy --version\".\r\n\r\n If you're using an older version if mitmproxy, please specify the version\r\n and OS.\r\n-->\r\n\n", "before_files": [{"content": "\"\"\"\nThe View:\n\n- Keeps track of a store of flows\n- Maintains a filtered, ordered view onto that list of flows\n- Exposes a number of signals so the view can be monitored\n- Tracks focus within the view\n- Exposes a settings store for flows that automatically expires if the flow is\n removed from the store.\n\"\"\"\nimport collections\nimport typing\nimport datetime\n\nimport blinker\nimport sortedcontainers\n\nimport mitmproxy.flow\nfrom mitmproxy import flowfilter\nfrom mitmproxy import exceptions\n\n# The underlying sorted list implementation expects the sort key to be stable\n# for the lifetime of the object. However, if we sort by size, for instance,\n# the sort order changes as the flow progresses through its lifecycle. We\n# address this through two means:\n#\n# - Let order keys cache the sort value by flow ID.\n#\n# - Add a facility to refresh items in the list by removing and re-adding them\n# when they are updated.\n\n\nclass _OrderKey:\n def __init__(self, view):\n self.view = view\n\n def generate(self, f: mitmproxy.flow.Flow) -> typing.Any: # pragma: no cover\n pass\n\n def refresh(self, f):\n k = self._key()\n old = self.view.settings[f][k]\n new = self.generate(f)\n if old != new:\n self.view._view.remove(f)\n self.view.settings[f][k] = new\n self.view._view.add(f)\n self.view.sig_view_refresh.send(self.view)\n\n def _key(self):\n return \"_order_%s\" % id(self)\n\n def __call__(self, f):\n if f.id in self.view._store:\n k = self._key()\n s = self.view.settings[f]\n if k in s:\n return s[k]\n val = self.generate(f)\n s[k] = val\n return val\n else:\n return self.generate(f)\n\n\nclass OrderRequestStart(_OrderKey):\n def generate(self, f: mitmproxy.flow.Flow) -> datetime.datetime:\n return f.request.timestamp_start or 0\n\n\nclass OrderRequestMethod(_OrderKey):\n def generate(self, f: mitmproxy.flow.Flow) -> str:\n return f.request.method\n\n\nclass OrderRequestURL(_OrderKey):\n def generate(self, f: mitmproxy.flow.Flow) -> str:\n return f.request.url\n\n\nclass OrderKeySize(_OrderKey):\n def generate(self, f: mitmproxy.flow.Flow) -> int:\n s = 0\n if f.request.raw_content:\n s += len(f.request.raw_content)\n if f.response and f.response.raw_content:\n s += len(f.response.raw_content)\n return s\n\n\nmatchall = flowfilter.parse(\".\")\n\n\norders = [\n (\"t\", \"time\"),\n (\"m\", \"method\"),\n (\"u\", \"url\"),\n (\"z\", \"size\"),\n]\n\n\nclass View(collections.Sequence):\n def __init__(self):\n super().__init__()\n self._store = {}\n self.filter = matchall\n # Should we show only marked flows?\n self.show_marked = False\n\n self.default_order = OrderRequestStart(self)\n self.orders = dict(\n time = self.default_order,\n method = OrderRequestMethod(self),\n url = OrderRequestURL(self),\n size = OrderKeySize(self),\n )\n self.order_key = self.default_order\n self.order_reversed = False\n self.focus_follow = False\n\n self._view = sortedcontainers.SortedListWithKey(key = self.order_key)\n\n # The sig_view* signals broadcast events that affect the view. That is,\n # an update to a flow in the store but not in the view does not trigger\n # a signal. All signals are called after the view has been updated.\n self.sig_view_update = blinker.Signal()\n self.sig_view_add = blinker.Signal()\n self.sig_view_remove = blinker.Signal()\n # Signals that the view should be refreshed completely\n self.sig_view_refresh = blinker.Signal()\n\n # The sig_store* signals broadcast events that affect the underlying\n # store. If a flow is removed from just the view, sig_view_remove is\n # triggered. If it is removed from the store while it is also in the\n # view, both sig_store_remove and sig_view_remove are triggered.\n self.sig_store_remove = blinker.Signal()\n # Signals that the store should be refreshed completely\n self.sig_store_refresh = blinker.Signal()\n\n self.focus = Focus(self)\n self.settings = Settings(self)\n\n def store_count(self):\n return len(self._store)\n\n def inbounds(self, index: int) -> bool:\n \"\"\"\n Is this 0 <= index < len(self)\n \"\"\"\n return 0 <= index < len(self)\n\n def _rev(self, idx: int) -> int:\n \"\"\"\n Reverses an index, if needed\n \"\"\"\n if self.order_reversed:\n if idx < 0:\n idx = -idx - 1\n else:\n idx = len(self._view) - idx - 1\n if idx < 0:\n raise IndexError\n return idx\n\n def __len__(self):\n return len(self._view)\n\n def __getitem__(self, offset) -> mitmproxy.flow.Flow:\n return self._view[self._rev(offset)]\n\n # Reflect some methods to the efficient underlying implementation\n\n def _bisect(self, f: mitmproxy.flow.Flow) -> int:\n v = self._view.bisect_right(f)\n return self._rev(v - 1) + 1\n\n def index(self, f: mitmproxy.flow.Flow, start: int = 0, stop: typing.Optional[int] = None) -> int:\n return self._rev(self._view.index(f, start, stop))\n\n def __contains__(self, f: mitmproxy.flow.Flow) -> bool:\n return self._view.__contains__(f)\n\n def _order_key_name(self):\n return \"_order_%s\" % id(self.order_key)\n\n def _base_add(self, f):\n self.settings[f][self._order_key_name()] = self.order_key(f)\n self._view.add(f)\n\n def _refilter(self):\n self._view.clear()\n for i in self._store.values():\n if self.show_marked and not i.marked:\n continue\n if self.filter(i):\n self._base_add(i)\n self.sig_view_refresh.send(self)\n\n # API\n def toggle_marked(self):\n self.show_marked = not self.show_marked\n self._refilter()\n\n def set_reversed(self, value: bool):\n self.order_reversed = value\n self.sig_view_refresh.send(self)\n\n def set_order(self, order_key: typing.Callable):\n \"\"\"\n Sets the current view order.\n \"\"\"\n self.order_key = order_key\n newview = sortedcontainers.SortedListWithKey(key=order_key)\n newview.update(self._view)\n self._view = newview\n\n def set_filter(self, flt: typing.Optional[flowfilter.TFilter]):\n \"\"\"\n Sets the current view filter.\n \"\"\"\n self.filter = flt or matchall\n self._refilter()\n\n def clear(self):\n \"\"\"\n Clears both the store and view.\n \"\"\"\n self._store.clear()\n self._view.clear()\n self.sig_view_refresh.send(self)\n self.sig_store_refresh.send(self)\n\n def clear_not_marked(self):\n \"\"\"\n Clears only the unmarked flows.\n \"\"\"\n for flow in self._store.copy().values():\n if not flow.marked:\n self._store.pop(flow.id)\n\n self._refilter()\n self.sig_store_refresh.send(self)\n\n def add(self, f: mitmproxy.flow.Flow) -> bool:\n \"\"\"\n Adds a flow to the state. If the flow already exists, it is\n ignored.\n \"\"\"\n if f.id not in self._store:\n self._store[f.id] = f\n if self.filter(f):\n self._base_add(f)\n if self.focus_follow:\n self.focus.flow = f\n self.sig_view_add.send(self, flow=f)\n\n def remove(self, f: mitmproxy.flow.Flow):\n \"\"\"\n Removes the flow from the underlying store and the view.\n \"\"\"\n if f.id in self._store:\n if f in self._view:\n self._view.remove(f)\n self.sig_view_remove.send(self, flow=f)\n del self._store[f.id]\n self.sig_store_remove.send(self, flow=f)\n\n def update(self, f: mitmproxy.flow.Flow):\n \"\"\"\n Updates a flow. If the flow is not in the state, it's ignored.\n \"\"\"\n if f.id in self._store:\n if self.filter(f):\n if f not in self._view:\n self._base_add(f)\n if self.focus_follow:\n self.focus.flow = f\n self.sig_view_add.send(self, flow=f)\n else:\n # This is a tad complicated. The sortedcontainers\n # implementation assumes that the order key is stable. If\n # it changes mid-way Very Bad Things happen. We detect when\n # this happens, and re-fresh the item.\n self.order_key.refresh(f)\n self.sig_view_update.send(self, flow=f)\n else:\n try:\n self._view.remove(f)\n self.sig_view_remove.send(self, flow=f)\n except ValueError:\n # The value was not in the view\n pass\n\n def get_by_id(self, flow_id: str) -> typing.Optional[mitmproxy.flow.Flow]:\n \"\"\"\n Get flow with the given id from the store.\n Returns None if the flow is not found.\n \"\"\"\n return self._store.get(flow_id)\n\n # Event handlers\n def configure(self, opts, updated):\n if \"filter\" in updated:\n filt = None\n if opts.filter:\n filt = flowfilter.parse(opts.filter)\n if not filt:\n raise exceptions.OptionsError(\n \"Invalid interception filter: %s\" % opts.filter\n )\n self.set_filter(filt)\n if \"console_order\" in updated:\n if opts.console_order is None:\n self.set_order(self.default_order)\n else:\n if opts.console_order not in self.orders:\n raise exceptions.OptionsError(\n \"Unknown flow order: %s\" % opts.console_order\n )\n self.set_order(self.orders[opts.console_order])\n if \"console_order_reversed\" in updated:\n self.set_reversed(opts.console_order_reversed)\n if \"console_focus_follow\" in updated:\n self.focus_follow = opts.console_focus_follow\n\n def request(self, f):\n self.add(f)\n\n def error(self, f):\n self.update(f)\n\n def response(self, f):\n self.update(f)\n\n def intercept(self, f):\n self.update(f)\n\n def resume(self, f):\n self.update(f)\n\n def kill(self, f):\n self.update(f)\n\n\nclass Focus:\n \"\"\"\n Tracks a focus element within a View.\n \"\"\"\n def __init__(self, v: View) -> None:\n self.view = v\n self._flow = None # type: mitmproxy.flow.Flow\n self.sig_change = blinker.Signal()\n if len(self.view):\n self.flow = self.view[0]\n v.sig_view_add.connect(self._sig_view_add)\n v.sig_view_remove.connect(self._sig_view_remove)\n v.sig_view_refresh.connect(self._sig_view_refresh)\n\n @property\n def flow(self) -> typing.Optional[mitmproxy.flow.Flow]:\n return self._flow\n\n @flow.setter\n def flow(self, f: typing.Optional[mitmproxy.flow.Flow]):\n if f is not None and f not in self.view:\n raise ValueError(\"Attempt to set focus to flow not in view\")\n self._flow = f\n self.sig_change.send(self)\n\n @property\n def index(self) -> typing.Optional[int]:\n if self.flow:\n return self.view.index(self.flow)\n\n @index.setter\n def index(self, idx):\n if idx < 0 or idx > len(self.view) - 1:\n raise ValueError(\"Index out of view bounds\")\n self.flow = self.view[idx]\n\n def _nearest(self, f, v):\n return min(v._bisect(f), len(v) - 1)\n\n def _sig_view_remove(self, view, flow):\n if len(view) == 0:\n self.flow = None\n elif flow is self.flow:\n self.flow = view[self._nearest(self.flow, view)]\n\n def _sig_view_refresh(self, view):\n if len(view) == 0:\n self.flow = None\n elif self.flow is None:\n self.flow = view[0]\n elif self.flow not in view:\n self.flow = view[self._nearest(self.flow, view)]\n\n def _sig_view_add(self, view, flow):\n # We only have to act if we don't have a focus element\n if not self.flow:\n self.flow = flow\n\n\nclass Settings(collections.Mapping):\n def __init__(self, view: View) -> None:\n self.view = view\n self._values = {} # type: typing.MutableMapping[str, mitmproxy.flow.Flow]\n view.sig_store_remove.connect(self._sig_store_remove)\n view.sig_store_refresh.connect(self._sig_store_refresh)\n\n def __iter__(self) -> typing.Iterator:\n return iter(self._values)\n\n def __len__(self) -> int:\n return len(self._values)\n\n def __getitem__(self, f: mitmproxy.flow.Flow) -> dict:\n if f.id not in self.view._store:\n raise KeyError\n return self._values.setdefault(f.id, {})\n\n def _sig_store_remove(self, view, flow):\n if flow.id in self._values:\n del self._values[flow.id]\n\n def _sig_store_refresh(self, view):\n for fid in list(self._values.keys()):\n if fid not in view._store:\n del self._values[fid]\n", "path": "mitmproxy/addons/view.py"}], "after_files": [{"content": "\"\"\"\nThe View:\n\n- Keeps track of a store of flows\n- Maintains a filtered, ordered view onto that list of flows\n- Exposes a number of signals so the view can be monitored\n- Tracks focus within the view\n- Exposes a settings store for flows that automatically expires if the flow is\n removed from the store.\n\"\"\"\nimport collections\nimport typing\nimport datetime\n\nimport blinker\nimport sortedcontainers\n\nimport mitmproxy.flow\nfrom mitmproxy import flowfilter\nfrom mitmproxy import exceptions\n\n# The underlying sorted list implementation expects the sort key to be stable\n# for the lifetime of the object. However, if we sort by size, for instance,\n# the sort order changes as the flow progresses through its lifecycle. We\n# address this through two means:\n#\n# - Let order keys cache the sort value by flow ID.\n#\n# - Add a facility to refresh items in the list by removing and re-adding them\n# when they are updated.\n\n\nclass _OrderKey:\n def __init__(self, view):\n self.view = view\n\n def generate(self, f: mitmproxy.flow.Flow) -> typing.Any: # pragma: no cover\n pass\n\n def refresh(self, f):\n k = self._key()\n old = self.view.settings[f][k]\n new = self.generate(f)\n if old != new:\n self.view._view.remove(f)\n self.view.settings[f][k] = new\n self.view._view.add(f)\n self.view.sig_view_refresh.send(self.view)\n\n def _key(self):\n return \"_order_%s\" % id(self)\n\n def __call__(self, f):\n if f.id in self.view._store:\n k = self._key()\n s = self.view.settings[f]\n if k in s:\n return s[k]\n val = self.generate(f)\n s[k] = val\n return val\n else:\n return self.generate(f)\n\n\nclass OrderRequestStart(_OrderKey):\n def generate(self, f: mitmproxy.flow.Flow) -> datetime.datetime:\n return f.request.timestamp_start or 0\n\n\nclass OrderRequestMethod(_OrderKey):\n def generate(self, f: mitmproxy.flow.Flow) -> str:\n return f.request.method\n\n\nclass OrderRequestURL(_OrderKey):\n def generate(self, f: mitmproxy.flow.Flow) -> str:\n return f.request.url\n\n\nclass OrderKeySize(_OrderKey):\n def generate(self, f: mitmproxy.flow.Flow) -> int:\n s = 0\n if f.request.raw_content:\n s += len(f.request.raw_content)\n if f.response and f.response.raw_content:\n s += len(f.response.raw_content)\n return s\n\n\nmatchall = flowfilter.parse(\".\")\n\n\norders = [\n (\"t\", \"time\"),\n (\"m\", \"method\"),\n (\"u\", \"url\"),\n (\"z\", \"size\"),\n]\n\n\nclass View(collections.Sequence):\n def __init__(self):\n super().__init__()\n self._store = collections.OrderedDict()\n self.filter = matchall\n # Should we show only marked flows?\n self.show_marked = False\n\n self.default_order = OrderRequestStart(self)\n self.orders = dict(\n time = self.default_order,\n method = OrderRequestMethod(self),\n url = OrderRequestURL(self),\n size = OrderKeySize(self),\n )\n self.order_key = self.default_order\n self.order_reversed = False\n self.focus_follow = False\n\n self._view = sortedcontainers.SortedListWithKey(key = self.order_key)\n\n # The sig_view* signals broadcast events that affect the view. That is,\n # an update to a flow in the store but not in the view does not trigger\n # a signal. All signals are called after the view has been updated.\n self.sig_view_update = blinker.Signal()\n self.sig_view_add = blinker.Signal()\n self.sig_view_remove = blinker.Signal()\n # Signals that the view should be refreshed completely\n self.sig_view_refresh = blinker.Signal()\n\n # The sig_store* signals broadcast events that affect the underlying\n # store. If a flow is removed from just the view, sig_view_remove is\n # triggered. If it is removed from the store while it is also in the\n # view, both sig_store_remove and sig_view_remove are triggered.\n self.sig_store_remove = blinker.Signal()\n # Signals that the store should be refreshed completely\n self.sig_store_refresh = blinker.Signal()\n\n self.focus = Focus(self)\n self.settings = Settings(self)\n\n def store_count(self):\n return len(self._store)\n\n def inbounds(self, index: int) -> bool:\n \"\"\"\n Is this 0 <= index < len(self)\n \"\"\"\n return 0 <= index < len(self)\n\n def _rev(self, idx: int) -> int:\n \"\"\"\n Reverses an index, if needed\n \"\"\"\n if self.order_reversed:\n if idx < 0:\n idx = -idx - 1\n else:\n idx = len(self._view) - idx - 1\n if idx < 0:\n raise IndexError\n return idx\n\n def __len__(self):\n return len(self._view)\n\n def __getitem__(self, offset) -> mitmproxy.flow.Flow:\n return self._view[self._rev(offset)]\n\n # Reflect some methods to the efficient underlying implementation\n\n def _bisect(self, f: mitmproxy.flow.Flow) -> int:\n v = self._view.bisect_right(f)\n return self._rev(v - 1) + 1\n\n def index(self, f: mitmproxy.flow.Flow, start: int = 0, stop: typing.Optional[int] = None) -> int:\n return self._rev(self._view.index(f, start, stop))\n\n def __contains__(self, f: mitmproxy.flow.Flow) -> bool:\n return self._view.__contains__(f)\n\n def _order_key_name(self):\n return \"_order_%s\" % id(self.order_key)\n\n def _base_add(self, f):\n self.settings[f][self._order_key_name()] = self.order_key(f)\n self._view.add(f)\n\n def _refilter(self):\n self._view.clear()\n for i in self._store.values():\n if self.show_marked and not i.marked:\n continue\n if self.filter(i):\n self._base_add(i)\n self.sig_view_refresh.send(self)\n\n # API\n def toggle_marked(self):\n self.show_marked = not self.show_marked\n self._refilter()\n\n def set_reversed(self, value: bool):\n self.order_reversed = value\n self.sig_view_refresh.send(self)\n\n def set_order(self, order_key: typing.Callable):\n \"\"\"\n Sets the current view order.\n \"\"\"\n self.order_key = order_key\n newview = sortedcontainers.SortedListWithKey(key=order_key)\n newview.update(self._view)\n self._view = newview\n\n def set_filter(self, flt: typing.Optional[flowfilter.TFilter]):\n \"\"\"\n Sets the current view filter.\n \"\"\"\n self.filter = flt or matchall\n self._refilter()\n\n def clear(self):\n \"\"\"\n Clears both the store and view.\n \"\"\"\n self._store.clear()\n self._view.clear()\n self.sig_view_refresh.send(self)\n self.sig_store_refresh.send(self)\n\n def clear_not_marked(self):\n \"\"\"\n Clears only the unmarked flows.\n \"\"\"\n for flow in self._store.copy().values():\n if not flow.marked:\n self._store.pop(flow.id)\n\n self._refilter()\n self.sig_store_refresh.send(self)\n\n def add(self, f: mitmproxy.flow.Flow) -> bool:\n \"\"\"\n Adds a flow to the state. If the flow already exists, it is\n ignored.\n \"\"\"\n if f.id not in self._store:\n self._store[f.id] = f\n if self.filter(f):\n self._base_add(f)\n if self.focus_follow:\n self.focus.flow = f\n self.sig_view_add.send(self, flow=f)\n\n def remove(self, f: mitmproxy.flow.Flow):\n \"\"\"\n Removes the flow from the underlying store and the view.\n \"\"\"\n if f.id in self._store:\n if f in self._view:\n self._view.remove(f)\n self.sig_view_remove.send(self, flow=f)\n del self._store[f.id]\n self.sig_store_remove.send(self, flow=f)\n\n def update(self, f: mitmproxy.flow.Flow):\n \"\"\"\n Updates a flow. If the flow is not in the state, it's ignored.\n \"\"\"\n if f.id in self._store:\n if self.filter(f):\n if f not in self._view:\n self._base_add(f)\n if self.focus_follow:\n self.focus.flow = f\n self.sig_view_add.send(self, flow=f)\n else:\n # This is a tad complicated. The sortedcontainers\n # implementation assumes that the order key is stable. If\n # it changes mid-way Very Bad Things happen. We detect when\n # this happens, and re-fresh the item.\n self.order_key.refresh(f)\n self.sig_view_update.send(self, flow=f)\n else:\n try:\n self._view.remove(f)\n self.sig_view_remove.send(self, flow=f)\n except ValueError:\n # The value was not in the view\n pass\n\n def get_by_id(self, flow_id: str) -> typing.Optional[mitmproxy.flow.Flow]:\n \"\"\"\n Get flow with the given id from the store.\n Returns None if the flow is not found.\n \"\"\"\n return self._store.get(flow_id)\n\n # Event handlers\n def configure(self, opts, updated):\n if \"filter\" in updated:\n filt = None\n if opts.filter:\n filt = flowfilter.parse(opts.filter)\n if not filt:\n raise exceptions.OptionsError(\n \"Invalid interception filter: %s\" % opts.filter\n )\n self.set_filter(filt)\n if \"console_order\" in updated:\n if opts.console_order is None:\n self.set_order(self.default_order)\n else:\n if opts.console_order not in self.orders:\n raise exceptions.OptionsError(\n \"Unknown flow order: %s\" % opts.console_order\n )\n self.set_order(self.orders[opts.console_order])\n if \"console_order_reversed\" in updated:\n self.set_reversed(opts.console_order_reversed)\n if \"console_focus_follow\" in updated:\n self.focus_follow = opts.console_focus_follow\n\n def request(self, f):\n self.add(f)\n\n def error(self, f):\n self.update(f)\n\n def response(self, f):\n self.update(f)\n\n def intercept(self, f):\n self.update(f)\n\n def resume(self, f):\n self.update(f)\n\n def kill(self, f):\n self.update(f)\n\n\nclass Focus:\n \"\"\"\n Tracks a focus element within a View.\n \"\"\"\n def __init__(self, v: View) -> None:\n self.view = v\n self._flow = None # type: mitmproxy.flow.Flow\n self.sig_change = blinker.Signal()\n if len(self.view):\n self.flow = self.view[0]\n v.sig_view_add.connect(self._sig_view_add)\n v.sig_view_remove.connect(self._sig_view_remove)\n v.sig_view_refresh.connect(self._sig_view_refresh)\n\n @property\n def flow(self) -> typing.Optional[mitmproxy.flow.Flow]:\n return self._flow\n\n @flow.setter\n def flow(self, f: typing.Optional[mitmproxy.flow.Flow]):\n if f is not None and f not in self.view:\n raise ValueError(\"Attempt to set focus to flow not in view\")\n self._flow = f\n self.sig_change.send(self)\n\n @property\n def index(self) -> typing.Optional[int]:\n if self.flow:\n return self.view.index(self.flow)\n\n @index.setter\n def index(self, idx):\n if idx < 0 or idx > len(self.view) - 1:\n raise ValueError(\"Index out of view bounds\")\n self.flow = self.view[idx]\n\n def _nearest(self, f, v):\n return min(v._bisect(f), len(v) - 1)\n\n def _sig_view_remove(self, view, flow):\n if len(view) == 0:\n self.flow = None\n elif flow is self.flow:\n self.flow = view[self._nearest(self.flow, view)]\n\n def _sig_view_refresh(self, view):\n if len(view) == 0:\n self.flow = None\n elif self.flow is None:\n self.flow = view[0]\n elif self.flow not in view:\n self.flow = view[self._nearest(self.flow, view)]\n\n def _sig_view_add(self, view, flow):\n # We only have to act if we don't have a focus element\n if not self.flow:\n self.flow = flow\n\n\nclass Settings(collections.Mapping):\n def __init__(self, view: View) -> None:\n self.view = view\n self._values = {} # type: typing.MutableMapping[str, mitmproxy.flow.Flow]\n view.sig_store_remove.connect(self._sig_store_remove)\n view.sig_store_refresh.connect(self._sig_store_refresh)\n\n def __iter__(self) -> typing.Iterator:\n return iter(self._values)\n\n def __len__(self) -> int:\n return len(self._values)\n\n def __getitem__(self, f: mitmproxy.flow.Flow) -> dict:\n if f.id not in self.view._store:\n raise KeyError\n return self._values.setdefault(f.id, {})\n\n def _sig_store_remove(self, view, flow):\n if flow.id in self._values:\n del self._values[flow.id]\n\n def _sig_store_refresh(self, view):\n for fid in list(self._values.keys()):\n if fid not in view._store:\n del self._values[fid]\n", "path": "mitmproxy/addons/view.py"}]} |
gh_patches_debug_17521 | rasdani/github-patches | git_diff | spotify__luigi-2182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
S3's _get_s3_config() restricts the use of [DEFAULT] ConfigParser section
<!---
Currently, there are no strict procedures or guidelines for submitting issues.
In short, please just use common sense.
Common sense includes this at bare-minimum:
* search for similar issues posted before creating a new issue.
* Use markdown to format all code/logs. Issues which are hard to read
when rendered on GitHub might be closed with a friendly reminder of this.
Also, add steps to reproduce the bug, if applicable. Sample code would be nice too :)
For more information on how to submit valuable contributions,
see https://opensource.guide/how-to-contribute/#how-to-submit-a-contribution
-->
## Problem
S3Client implementation of config options prevents the use of `ConfigParser`'s `[DEFAULT]` section.
Due to the implementation of `luigi/contrib/s3.py`'s auto-fill options using all key-value pairs existing in the `[s3]` section within one's specified luigi configuration file, errors are thrown when attempting to utilize Python's ConfigParser `[DEFAULT]` section.
Functionality appears to have been added in PR #500
### Presence of Bug
From what i can tell by searching the repo for `configuration.get_config().items`, S3 is the only culprit for this issue.
## Question (fbo solution)
I understand the motivation here is to be able to specify s3 boto connection options via the Luigi config, but i'm not aware of a best strategy to limit either a list of approved keys to keep or if there's a way for the ConfigParser to parse and interpolate only those keys within the explicit `[s3]` path (ignoring those in `[DEFAULT]`).
I'm happy to tackle this, but suggestions are appreciated and welcome.
## Source Code at Fault
```python
class S3Client(FileSystem):
_s3 = None
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
from boto.s3.key import Key
options = self._get_s3_config() # pulls all s3 section vars, including DEFAULT
options.update(kwargs)
if aws_access_key_id:
options['aws_access_key_id'] = aws_access_key_id
if aws_secret_access_key:
options['aws_secret_access_key'] = aws_secret_access_key
self.Key = Key
self._options = options # all s3 and DEFAULT key/value pairs
@property
def s3(self):
# only import boto when needed to allow top-lvl s3 module import
import boto
import boto.s3.connection
options = dict(self._options) # var of interest
if self._s3:
return self._s3
aws_access_key_id = options.get('aws_access_key_id')
aws_secret_access_key = options.get('aws_secret_access_key')
# Removing key args would break backwards compability
role_arn = options.get('aws_role_arn')
role_session_name = options.get('aws_role_session_name')
aws_session_token = None
if role_arn and role_session_name:
from boto import sts
sts_client = sts.STSConnection()
assumed_role = sts_client.assume_role(role_arn, role_session_name)
aws_secret_access_key = assumed_role.credentials.secret_key
aws_access_key_id = assumed_role.credentials.access_key
aws_session_token = assumed_role.credentials.session_token
for key in ['aws_access_key_id', 'aws_secret_access_key', 'aws_role_session_name', 'aws_role_arn']:
if key in options:
options.pop(key)
self._s3 = boto.s3.connection.S3Connection(aws_access_key_id,
aws_secret_access_key,
security_token=aws_session_token,
**options) # passes all key/value pairs (except auth) to boto - may cause error
return self._s3
...
def _get_s3_config(self, key=None):
try:
config = dict(configuration.get_config().items('s3')) # pulls all k/v from s3 and DEFAULT
except NoSectionError:
return {}
# So what ports etc can be read without us having to specify all dtypes
for k, v in six.iteritems(config):
try:
config[k] = int(v)
except ValueError:
pass
if key:
return config.get(key)
return config
...
```
### Erroneous Luigi Code
luigi.cfg
```
[DEFAULT]
path_base: /Users/dstadther
[core]
...
logging_conf_file: %(path_base)s/this_project/logging.conf
[bucket]
load: s3://my.s3.load.bucket/
[path]
sql: %(path_base)s/sql
[s3]
aws_access_key_id: foo
aws_secret_access_key: bar
calling_format: boto.s3.connection.OrdinaryCallingFormat
[scheduler]
state-path: %(path_base)s/out/luigid_state.pkl
...
```
luigi example script (`test_s3_default.py`)
```python
import luigi
from luigi import mock
from luigi.contrib import s3
class bucket(luigi.Config):
load = luigi.Parameter()
class TestDefault(luigi.Task):
client = s3.S3Client()
def output(self):
return mock.MockTarget('')
def run(self):
for i in self.client.listdir(bucket().load):
print(i)
self.output().open('w').close()
if __name__ == '__main__':
luigi.run()
```
shell
```sh
cd /Users/dstadther/this_project/scripts; /Users/dstadther/venv/this_project/bin/python test_s3_default.py TestDefault
```
Error stack trace
```sh
Traceback (most recent call last):
File "/Users/dstadther/venv/this_project/lib/python2.7/site-packages/luigi/worker.py", line 191, in run
new_deps = self._run_get_new_deps()
File "/Users/dstadther/venv/this_project/lib/python2.7/site-packages/luigi/worker.py", line 129, in _run_get_new_deps
task_gen = self.task.run()
File "test_s3_default.py", line 13, in run
for i in self.client.listdir(bucket().load):
File "/Users/dstadther/venv/this_project/lib/python2.7/site-packages/luigi/contrib/s3.py", line 498, in listdir
s3_bucket = self.s3.get_bucket(bucket, validate=True)
File "/Users/dstadther/venv/this_project/lib/python2.7/site-packages/luigi/contrib/s3.py", line 126, in s3
**options)
TypeError: __init__() got an unexpected keyword argument 'path_base'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `luigi/contrib/s3.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright 2012-2015 Spotify AB
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16 #
17 """
18 Implementation of Simple Storage Service support.
19 :py:class:`S3Target` is a subclass of the Target class to support S3 file
20 system operations. The `boto` library is required to use S3 targets.
21 """
22
23 from __future__ import division
24
25 import datetime
26 import itertools
27 import logging
28 import os
29 import os.path
30
31 import time
32 from multiprocessing.pool import ThreadPool
33
34 try:
35 from urlparse import urlsplit
36 except ImportError:
37 from urllib.parse import urlsplit
38 import warnings
39
40 try:
41 from ConfigParser import NoSectionError
42 except ImportError:
43 from configparser import NoSectionError
44
45 from luigi import six
46 from luigi.six.moves import range
47
48 from luigi import configuration
49 from luigi.format import get_default_format
50 from luigi.parameter import Parameter
51 from luigi.target import FileAlreadyExists, FileSystem, FileSystemException, FileSystemTarget, AtomicLocalFile, MissingParentDirectory
52 from luigi.task import ExternalTask
53
54 logger = logging.getLogger('luigi-interface')
55
56
57 # two different ways of marking a directory
58 # with a suffix in S3
59 S3_DIRECTORY_MARKER_SUFFIX_0 = '_$folder$'
60 S3_DIRECTORY_MARKER_SUFFIX_1 = '/'
61
62
63 class InvalidDeleteException(FileSystemException):
64 pass
65
66
67 class FileNotFoundException(FileSystemException):
68 pass
69
70
71 class S3Client(FileSystem):
72 """
73 boto-powered S3 client.
74 """
75
76 _s3 = None
77
78 def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
79 **kwargs):
80 from boto.s3.key import Key
81 options = self._get_s3_config()
82 options.update(kwargs)
83 if aws_access_key_id:
84 options['aws_access_key_id'] = aws_access_key_id
85 if aws_secret_access_key:
86 options['aws_secret_access_key'] = aws_secret_access_key
87
88 self.Key = Key
89 self._options = options
90
91 @property
92 def s3(self):
93 # only import boto when needed to allow top-lvl s3 module import
94 import boto
95 import boto.s3.connection
96
97 options = dict(self._options)
98
99 if self._s3:
100 return self._s3
101
102 aws_access_key_id = options.get('aws_access_key_id')
103 aws_secret_access_key = options.get('aws_secret_access_key')
104
105 # Removing key args would break backwards compability
106 role_arn = options.get('aws_role_arn')
107 role_session_name = options.get('aws_role_session_name')
108
109 aws_session_token = None
110
111 if role_arn and role_session_name:
112 from boto import sts
113
114 sts_client = sts.STSConnection()
115 assumed_role = sts_client.assume_role(role_arn, role_session_name)
116 aws_secret_access_key = assumed_role.credentials.secret_key
117 aws_access_key_id = assumed_role.credentials.access_key
118 aws_session_token = assumed_role.credentials.session_token
119
120 for key in ['aws_access_key_id', 'aws_secret_access_key', 'aws_role_session_name', 'aws_role_arn']:
121 if key in options:
122 options.pop(key)
123 self._s3 = boto.s3.connection.S3Connection(aws_access_key_id,
124 aws_secret_access_key,
125 security_token=aws_session_token,
126 **options)
127 return self._s3
128
129 @s3.setter
130 def s3(self, value):
131 self._s3 = value
132
133 def exists(self, path):
134 """
135 Does provided path exist on S3?
136 """
137 (bucket, key) = self._path_to_bucket_and_key(path)
138
139 # grab and validate the bucket
140 s3_bucket = self.s3.get_bucket(bucket, validate=True)
141
142 # root always exists
143 if self._is_root(key):
144 return True
145
146 # file
147 s3_key = s3_bucket.get_key(key)
148 if s3_key:
149 return True
150
151 if self.isdir(path):
152 return True
153
154 logger.debug('Path %s does not exist', path)
155 return False
156
157 def remove(self, path, recursive=True):
158 """
159 Remove a file or directory from S3.
160 """
161 if not self.exists(path):
162 logger.debug('Could not delete %s; path does not exist', path)
163 return False
164
165 (bucket, key) = self._path_to_bucket_and_key(path)
166
167 # root
168 if self._is_root(key):
169 raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path)
170
171 # grab and validate the bucket
172 s3_bucket = self.s3.get_bucket(bucket, validate=True)
173
174 # file
175 s3_key = s3_bucket.get_key(key)
176 if s3_key:
177 s3_bucket.delete_key(s3_key)
178 logger.debug('Deleting %s from bucket %s', key, bucket)
179 return True
180
181 if self.isdir(path) and not recursive:
182 raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path)
183
184 delete_key_list = [
185 k for k in s3_bucket.list(self._add_path_delimiter(key))]
186
187 # delete the directory marker file if it exists
188 s3_dir_with_suffix_key = s3_bucket.get_key(key + S3_DIRECTORY_MARKER_SUFFIX_0)
189 if s3_dir_with_suffix_key:
190 delete_key_list.append(s3_dir_with_suffix_key)
191
192 if len(delete_key_list) > 0:
193 for k in delete_key_list:
194 logger.debug('Deleting %s from bucket %s', k, bucket)
195 s3_bucket.delete_keys(delete_key_list)
196 return True
197
198 return False
199
200 def get_key(self, path):
201 """
202 Returns just the key from the path.
203
204 An s3 path is composed of a bucket and a key.
205
206 Suppose we have a path `s3://my_bucket/some/files/my_file`. The key is `some/files/my_file`.
207 """
208 (bucket, key) = self._path_to_bucket_and_key(path)
209
210 s3_bucket = self.s3.get_bucket(bucket, validate=True)
211
212 return s3_bucket.get_key(key)
213
214 def put(self, local_path, destination_s3_path, **kwargs):
215 """
216 Put an object stored locally to an S3 path.
217
218 :param kwargs: Keyword arguments are passed to the boto function `set_contents_from_filename`
219 """
220 (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
221
222 # grab and validate the bucket
223 s3_bucket = self.s3.get_bucket(bucket, validate=True)
224
225 # put the file
226 s3_key = self.Key(s3_bucket)
227 s3_key.key = key
228 s3_key.set_contents_from_filename(local_path, **kwargs)
229
230 def put_string(self, content, destination_s3_path, **kwargs):
231 """
232 Put a string to an S3 path.
233
234 :param kwargs: Keyword arguments are passed to the boto function `set_contents_from_string`
235 """
236 (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
237 # grab and validate the bucket
238 s3_bucket = self.s3.get_bucket(bucket, validate=True)
239
240 # put the content
241 s3_key = self.Key(s3_bucket)
242 s3_key.key = key
243 s3_key.set_contents_from_string(content, **kwargs)
244
245 def put_multipart(self, local_path, destination_s3_path, part_size=67108864, **kwargs):
246 """
247 Put an object stored locally to an S3 path
248 using S3 multi-part upload (for files > 5GB).
249
250 :param local_path: Path to source local file
251 :param destination_s3_path: URL for target S3 location
252 :param part_size: Part size in bytes. Default: 67108864 (64MB), must be >= 5MB and <= 5 GB.
253 :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`
254 """
255 # calculate number of parts to upload
256 # based on the size of the file
257 source_size = os.stat(local_path).st_size
258
259 if source_size <= part_size:
260 # fallback to standard, non-multipart strategy
261 return self.put(local_path, destination_s3_path, **kwargs)
262
263 (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
264
265 # grab and validate the bucket
266 s3_bucket = self.s3.get_bucket(bucket, validate=True)
267
268 # calculate the number of parts (int division).
269 # use modulo to avoid float precision issues
270 # for exactly-sized fits
271 num_parts = (source_size + part_size - 1) // part_size
272
273 mp = None
274 try:
275 mp = s3_bucket.initiate_multipart_upload(key, **kwargs)
276
277 for i in range(num_parts):
278 # upload a part at a time to S3
279 offset = part_size * i
280 bytes = min(part_size, source_size - offset)
281 with open(local_path, 'rb') as fp:
282 part_num = i + 1
283 logger.info('Uploading part %s/%s to %s', part_num, num_parts, destination_s3_path)
284 fp.seek(offset)
285 mp.upload_part_from_file(fp, part_num=part_num, size=bytes)
286
287 # finish the upload, making the file available in S3
288 mp.complete_upload()
289 except BaseException:
290 if mp:
291 logger.info('Canceling multipart s3 upload for %s', destination_s3_path)
292 # cancel the upload so we don't get charged for
293 # storage consumed by uploaded parts
294 mp.cancel_upload()
295 raise
296
297 def get(self, s3_path, destination_local_path):
298 """
299 Get an object stored in S3 and write it to a local path.
300 """
301 (bucket, key) = self._path_to_bucket_and_key(s3_path)
302
303 # grab and validate the bucket
304 s3_bucket = self.s3.get_bucket(bucket, validate=True)
305
306 # download the file
307 s3_key = self.Key(s3_bucket)
308 s3_key.key = key
309 s3_key.get_contents_to_filename(destination_local_path)
310
311 def get_as_string(self, s3_path):
312 """
313 Get the contents of an object stored in S3 as a string.
314 """
315 (bucket, key) = self._path_to_bucket_and_key(s3_path)
316
317 # grab and validate the bucket
318 s3_bucket = self.s3.get_bucket(bucket, validate=True)
319
320 # get the content
321 s3_key = self.Key(s3_bucket)
322 s3_key.key = key
323 contents = s3_key.get_contents_as_string()
324
325 return contents
326
327 def copy(self, source_path, destination_path, threads=100, start_time=None, end_time=None, part_size=67108864, **kwargs):
328 """
329 Copy object(s) from one S3 location to another. Works for individual keys or entire directories.
330
331 When files are larger than `part_size`, multipart uploading will be used.
332
333 :param source_path: The `s3://` path of the directory or key to copy from
334 :param destination_path: The `s3://` path of the directory or key to copy to
335 :param threads: Optional argument to define the number of threads to use when copying (min: 3 threads)
336 :param start_time: Optional argument to copy files with modified dates after start_time
337 :param end_time: Optional argument to copy files with modified dates before end_time
338 :param part_size: Part size in bytes. Default: 67108864 (64MB), must be >= 5MB and <= 5 GB.
339 :param kwargs: Keyword arguments are passed to the boto function `copy_key`
340
341 :returns tuple (number_of_files_copied, total_size_copied_in_bytes)
342 """
343 start = datetime.datetime.now()
344
345 (src_bucket, src_key) = self._path_to_bucket_and_key(source_path)
346 (dst_bucket, dst_key) = self._path_to_bucket_and_key(destination_path)
347
348 # As the S3 copy command is completely server side, there is no issue with issuing a lot of threads
349 # to issue a single API call per copy, however, this may in theory cause issues on systems with low ulimits for
350 # number of threads when copying really large files (e.g. with a ~100GB file this will open ~1500
351 # threads), or large directories. Around 100 threads seems to work well.
352
353 threads = 3 if threads < 3 else threads # don't allow threads to be less than 3
354 total_keys = 0
355
356 copy_pool = ThreadPool(processes=threads)
357
358 if self.isdir(source_path):
359 # The management pool is to ensure that there's no deadlock between the s3 copying threads, and the
360 # multipart_copy threads that monitors each group of s3 copy threads and returns a success once the entire file
361 # is copied. Without this, we could potentially fill up the pool with threads waiting to check if the s3 copies
362 # have completed, leaving no available threads to actually perform any copying.
363 copy_jobs = []
364 management_pool = ThreadPool(processes=threads)
365
366 (bucket, key) = self._path_to_bucket_and_key(source_path)
367 key_path = self._add_path_delimiter(key)
368 key_path_len = len(key_path)
369
370 total_size_bytes = 0
371 src_prefix = self._add_path_delimiter(src_key)
372 dst_prefix = self._add_path_delimiter(dst_key)
373 for item in self.list(source_path, start_time=start_time, end_time=end_time, return_key=True):
374 path = item.key[key_path_len:]
375 # prevents copy attempt of empty key in folder
376 if path != '' and path != '/':
377 total_keys += 1
378 total_size_bytes += item.size
379 job = management_pool.apply_async(self.__copy_multipart,
380 args=(copy_pool,
381 src_bucket, src_prefix + path,
382 dst_bucket, dst_prefix + path,
383 part_size),
384 kwds=kwargs)
385 copy_jobs.append(job)
386
387 # Wait for the pools to finish scheduling all the copies
388 management_pool.close()
389 management_pool.join()
390 copy_pool.close()
391 copy_pool.join()
392
393 # Raise any errors encountered in any of the copy processes
394 for result in copy_jobs:
395 result.get()
396
397 end = datetime.datetime.now()
398 duration = end - start
399 logger.info('%s : Complete : %s total keys copied in %s' %
400 (datetime.datetime.now(), total_keys, duration))
401
402 return total_keys, total_size_bytes
403
404 # If the file isn't a directory just perform a simple copy
405 else:
406 self.__copy_multipart(copy_pool, src_bucket, src_key, dst_bucket, dst_key, part_size, **kwargs)
407 # Close the pool
408 copy_pool.close()
409 copy_pool.join()
410
411 def __copy_multipart(self, pool, src_bucket, src_key, dst_bucket, dst_key, part_size, **kwargs):
412 """
413 Copy a single S3 object to another S3 object, falling back to multipart copy where necessary
414
415 NOTE: This is a private method and should only be called from within the `luigi.s3.copy` method
416
417 :param pool: The threadpool to put the s3 copy processes onto
418 :param src_bucket: source bucket name
419 :param src_key: source key name
420 :param dst_bucket: destination bucket name
421 :param dst_key: destination key name
422 :param key_size: size of the key to copy in bytes
423 :param part_size: Part size in bytes. Must be >= 5MB and <= 5 GB.
424 :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`
425 """
426
427 source_bucket = self.s3.get_bucket(src_bucket, validate=True)
428 dest_bucket = self.s3.get_bucket(dst_bucket, validate=True)
429
430 key_size = source_bucket.lookup(src_key).size
431
432 # We can't do a multipart copy on an empty Key, so handle this specially.
433 # Also, don't bother using the multipart machinery if we're only dealing with a small non-multipart file
434 if key_size == 0 or key_size <= part_size:
435 result = pool.apply_async(dest_bucket.copy_key, args=(dst_key, src_bucket, src_key), kwds=kwargs)
436 # Bubble up any errors we may encounter
437 return result.get()
438
439 mp = None
440
441 try:
442 mp = dest_bucket.initiate_multipart_upload(dst_key, **kwargs)
443 cur_pos = 0
444
445 # Store the results from the apply_async in a list so we can check for failures
446 results = []
447
448 # Calculate the number of chunks the file will be
449 num_parts = (key_size + part_size - 1) // part_size
450
451 for i in range(num_parts):
452 # Issue an S3 copy request, one part at a time, from one S3 object to another
453 part_start = cur_pos
454 cur_pos += part_size
455 part_end = min(cur_pos - 1, key_size - 1)
456 part_num = i + 1
457 results.append(pool.apply_async(mp.copy_part_from_key, args=(src_bucket, src_key, part_num, part_start, part_end)))
458 logger.info('Requesting copy of %s/%s to %s/%s', part_num, num_parts, dst_bucket, dst_key)
459
460 logger.info('Waiting for multipart copy of %s/%s to finish', dst_bucket, dst_key)
461
462 # This will raise any exceptions in any of the copy threads
463 for result in results:
464 result.get()
465
466 # finish the copy, making the file available in S3
467 mp.complete_upload()
468 return mp.key_name
469
470 except:
471 logger.info('Error during multipart s3 copy for %s/%s to %s/%s...', src_bucket, src_key, dst_bucket, dst_key)
472 # cancel the copy so we don't get charged for storage consumed by copied parts
473 if mp:
474 mp.cancel_upload()
475 raise
476
477 def move(self, source_path, destination_path, **kwargs):
478 """
479 Rename/move an object from one S3 location to another.
480
481 :param kwargs: Keyword arguments are passed to the boto function `copy_key`
482 """
483 self.copy(source_path, destination_path, **kwargs)
484 self.remove(source_path)
485
486 def listdir(self, path, start_time=None, end_time=None, return_key=False):
487 """
488 Get an iterable with S3 folder contents.
489 Iterable contains paths relative to queried path.
490
491 :param start_time: Optional argument to list files with modified dates after start_time
492 :param end_time: Optional argument to list files with modified dates before end_time
493 :param return_key: Optional argument, when set to True will return a boto.s3.key.Key (instead of the filename)
494 """
495 (bucket, key) = self._path_to_bucket_and_key(path)
496
497 # grab and validate the bucket
498 s3_bucket = self.s3.get_bucket(bucket, validate=True)
499
500 key_path = self._add_path_delimiter(key)
501 key_path_len = len(key_path)
502 for item in s3_bucket.list(prefix=key_path):
503 last_modified_date = time.strptime(item.last_modified, "%Y-%m-%dT%H:%M:%S.%fZ")
504 if (
505 (not start_time and not end_time) or # neither are defined, list all
506 (start_time and not end_time and start_time < last_modified_date) or # start defined, after start
507 (not start_time and end_time and last_modified_date < end_time) or # end defined, prior to end
508 (start_time and end_time and start_time < last_modified_date < end_time) # both defined, between
509 ):
510 if return_key:
511 yield item
512 else:
513 yield self._add_path_delimiter(path) + item.key[key_path_len:]
514
515 def list(self, path, start_time=None, end_time=None, return_key=False): # backwards compat
516 key_path_len = len(self._add_path_delimiter(path))
517 for item in self.listdir(path, start_time=start_time, end_time=end_time, return_key=return_key):
518 if return_key:
519 yield item
520 else:
521 yield item[key_path_len:]
522
523 def isdir(self, path):
524 """
525 Is the parameter S3 path a directory?
526 """
527 (bucket, key) = self._path_to_bucket_and_key(path)
528
529 # grab and validate the bucket
530 s3_bucket = self.s3.get_bucket(bucket, validate=True)
531
532 # root is a directory
533 if self._is_root(key):
534 return True
535
536 for suffix in (S3_DIRECTORY_MARKER_SUFFIX_0,
537 S3_DIRECTORY_MARKER_SUFFIX_1):
538 s3_dir_with_suffix_key = s3_bucket.get_key(key + suffix)
539 if s3_dir_with_suffix_key:
540 return True
541
542 # files with this prefix
543 key_path = self._add_path_delimiter(key)
544 s3_bucket_list_result = list(itertools.islice(s3_bucket.list(prefix=key_path), 1))
545 if s3_bucket_list_result:
546 return True
547
548 return False
549
550 is_dir = isdir # compatibility with old version.
551
552 def mkdir(self, path, parents=True, raise_if_exists=False):
553 if raise_if_exists and self.isdir(path):
554 raise FileAlreadyExists()
555
556 _, key = self._path_to_bucket_and_key(path)
557 if self._is_root(key):
558 return # isdir raises if the bucket doesn't exist; nothing to do here.
559
560 key = self._add_path_delimiter(key)
561
562 if not parents and not self.isdir(os.path.dirname(key)):
563 raise MissingParentDirectory()
564
565 return self.put_string("", self._add_path_delimiter(path))
566
567 def _get_s3_config(self, key=None):
568 try:
569 config = dict(configuration.get_config().items('s3'))
570 except NoSectionError:
571 return {}
572 # So what ports etc can be read without us having to specify all dtypes
573 for k, v in six.iteritems(config):
574 try:
575 config[k] = int(v)
576 except ValueError:
577 pass
578 if key:
579 return config.get(key)
580 return config
581
582 def _path_to_bucket_and_key(self, path):
583 (scheme, netloc, path, query, fragment) = urlsplit(path)
584 path_without_initial_slash = path[1:]
585 return netloc, path_without_initial_slash
586
587 def _is_root(self, key):
588 return (len(key) == 0) or (key == '/')
589
590 def _add_path_delimiter(self, key):
591 return key if key[-1:] == '/' or key == '' else key + '/'
592
593
594 class AtomicS3File(AtomicLocalFile):
595 """
596 An S3 file that writes to a temp file and puts to S3 on close.
597
598 :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`
599 """
600
601 def __init__(self, path, s3_client, **kwargs):
602 self.s3_client = s3_client
603 super(AtomicS3File, self).__init__(path)
604 self.s3_options = kwargs
605
606 def move_to_final_destination(self):
607 self.s3_client.put_multipart(self.tmp_path, self.path, **self.s3_options)
608
609
610 class ReadableS3File(object):
611 def __init__(self, s3_key):
612 self.s3_key = s3_key
613 self.buffer = []
614 self.closed = False
615 self.finished = False
616
617 def read(self, size=0):
618 f = self.s3_key.read(size=size)
619
620 # boto will loop on the key forever and it's not what is expected by
621 # the python io interface
622 # boto/boto#2805
623 if f == b'':
624 self.finished = True
625 if self.finished:
626 return b''
627
628 return f
629
630 def close(self):
631 self.s3_key.close()
632 self.closed = True
633
634 def __del__(self):
635 self.close()
636
637 def __exit__(self, exc_type, exc, traceback):
638 self.close()
639
640 def __enter__(self):
641 return self
642
643 def _add_to_buffer(self, line):
644 self.buffer.append(line)
645
646 def _flush_buffer(self):
647 output = b''.join(self.buffer)
648 self.buffer = []
649 return output
650
651 def readable(self):
652 return True
653
654 def writable(self):
655 return False
656
657 def seekable(self):
658 return False
659
660 def __iter__(self):
661 key_iter = self.s3_key.__iter__()
662
663 has_next = True
664 while has_next:
665 try:
666 # grab the next chunk
667 chunk = next(key_iter)
668
669 # split on newlines, preserving the newline
670 for line in chunk.splitlines(True):
671
672 if not line.endswith(os.linesep):
673 # no newline, so store in buffer
674 self._add_to_buffer(line)
675 else:
676 # newline found, send it out
677 if self.buffer:
678 self._add_to_buffer(line)
679 yield self._flush_buffer()
680 else:
681 yield line
682 except StopIteration:
683 # send out anything we have left in the buffer
684 output = self._flush_buffer()
685 if output:
686 yield output
687 has_next = False
688 self.close()
689
690
691 class S3Target(FileSystemTarget):
692 """
693 Target S3 file object
694
695 :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`
696 """
697
698 fs = None
699
700 def __init__(self, path, format=None, client=None, **kwargs):
701 super(S3Target, self).__init__(path)
702 if format is None:
703 format = get_default_format()
704
705 self.path = path
706 self.format = format
707 self.fs = client or S3Client()
708 self.s3_options = kwargs
709
710 def open(self, mode='r'):
711 if mode not in ('r', 'w'):
712 raise ValueError("Unsupported open mode '%s'" % mode)
713
714 if mode == 'r':
715 s3_key = self.fs.get_key(self.path)
716 if not s3_key:
717 raise FileNotFoundException("Could not find file at %s" % self.path)
718
719 fileobj = ReadableS3File(s3_key)
720 return self.format.pipe_reader(fileobj)
721 else:
722 return self.format.pipe_writer(AtomicS3File(self.path, self.fs, **self.s3_options))
723
724
725 class S3FlagTarget(S3Target):
726 """
727 Defines a target directory with a flag-file (defaults to `_SUCCESS`) used
728 to signify job success.
729
730 This checks for two things:
731
732 * the path exists (just like the S3Target)
733 * the _SUCCESS file exists within the directory.
734
735 Because Hadoop outputs into a directory and not a single file,
736 the path is assumed to be a directory.
737
738 This is meant to be a handy alternative to AtomicS3File.
739
740 The AtomicFile approach can be burdensome for S3 since there are no directories, per se.
741
742 If we have 1,000,000 output files, then we have to rename 1,000,000 objects.
743 """
744
745 fs = None
746
747 def __init__(self, path, format=None, client=None, flag='_SUCCESS'):
748 """
749 Initializes a S3FlagTarget.
750
751 :param path: the directory where the files are stored.
752 :type path: str
753 :param client:
754 :type client:
755 :param flag:
756 :type flag: str
757 """
758 if format is None:
759 format = get_default_format()
760
761 if path[-1] != "/":
762 raise ValueError("S3FlagTarget requires the path to be to a "
763 "directory. It must end with a slash ( / ).")
764 super(S3FlagTarget, self).__init__(path, format, client)
765 self.flag = flag
766
767 def exists(self):
768 hadoopSemaphore = self.path + self.flag
769 return self.fs.exists(hadoopSemaphore)
770
771
772 class S3EmrTarget(S3FlagTarget):
773 """
774 Deprecated. Use :py:class:`S3FlagTarget`
775 """
776
777 def __init__(self, *args, **kwargs):
778 warnings.warn("S3EmrTarget is deprecated. Please use S3FlagTarget")
779 super(S3EmrTarget, self).__init__(*args, **kwargs)
780
781
782 class S3PathTask(ExternalTask):
783 """
784 A external task that to require existence of a path in S3.
785 """
786 path = Parameter()
787
788 def output(self):
789 return S3Target(self.path)
790
791
792 class S3EmrTask(ExternalTask):
793 """
794 An external task that requires the existence of EMR output in S3.
795 """
796 path = Parameter()
797
798 def output(self):
799 return S3EmrTarget(self.path)
800
801
802 class S3FlagTask(ExternalTask):
803 """
804 An external task that requires the existence of EMR output in S3.
805 """
806 path = Parameter()
807 flag = Parameter(default=None)
808
809 def output(self):
810 return S3FlagTarget(self.path, flag=self.flag)
811
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/luigi/contrib/s3.py b/luigi/contrib/s3.py
--- a/luigi/contrib/s3.py
+++ b/luigi/contrib/s3.py
@@ -565,6 +565,7 @@
return self.put_string("", self._add_path_delimiter(path))
def _get_s3_config(self, key=None):
+ defaults = dict(configuration.get_config().defaults())
try:
config = dict(configuration.get_config().items('s3'))
except NoSectionError:
@@ -577,7 +578,8 @@
pass
if key:
return config.get(key)
- return config
+ section_only = {k: v for k, v in config.items() if k not in defaults or v != defaults[k]}
+ return section_only
def _path_to_bucket_and_key(self, path):
(scheme, netloc, path, query, fragment) = urlsplit(path)
| {"golden_diff": "diff --git a/luigi/contrib/s3.py b/luigi/contrib/s3.py\n--- a/luigi/contrib/s3.py\n+++ b/luigi/contrib/s3.py\n@@ -565,6 +565,7 @@\n return self.put_string(\"\", self._add_path_delimiter(path))\n \n def _get_s3_config(self, key=None):\n+ defaults = dict(configuration.get_config().defaults())\n try:\n config = dict(configuration.get_config().items('s3'))\n except NoSectionError:\n@@ -577,7 +578,8 @@\n pass\n if key:\n return config.get(key)\n- return config\n+ section_only = {k: v for k, v in config.items() if k not in defaults or v != defaults[k]}\n+ return section_only\n \n def _path_to_bucket_and_key(self, path):\n (scheme, netloc, path, query, fragment) = urlsplit(path)\n", "issue": "S3's _get_s3_config() restricts the use of [DEFAULT] ConfigParser section\n<!---\r\nCurrently, there are no strict procedures or guidelines for submitting issues.\r\nIn short, please just use common sense.\r\n\r\nCommon sense includes this at bare-minimum:\r\n\r\n * search for similar issues posted before creating a new issue.\r\n * Use markdown to format all code/logs. Issues which are hard to read\r\n when rendered on GitHub might be closed with a friendly reminder of this.\r\n\r\nAlso, add steps to reproduce the bug, if applicable. Sample code would be nice too :)\r\n\r\nFor more information on how to submit valuable contributions,\r\nsee https://opensource.guide/how-to-contribute/#how-to-submit-a-contribution\r\n-->\r\n## Problem\r\nS3Client implementation of config options prevents the use of `ConfigParser`'s `[DEFAULT]` section.\r\n\r\nDue to the implementation of `luigi/contrib/s3.py`'s auto-fill options using all key-value pairs existing in the `[s3]` section within one's specified luigi configuration file, errors are thrown when attempting to utilize Python's ConfigParser `[DEFAULT]` section.\r\n\r\nFunctionality appears to have been added in PR #500\r\n\r\n### Presence of Bug\r\nFrom what i can tell by searching the repo for `configuration.get_config().items`, S3 is the only culprit for this issue.\r\n\r\n## Question (fbo solution)\r\nI understand the motivation here is to be able to specify s3 boto connection options via the Luigi config, but i'm not aware of a best strategy to limit either a list of approved keys to keep or if there's a way for the ConfigParser to parse and interpolate only those keys within the explicit `[s3]` path (ignoring those in `[DEFAULT]`).\r\n\r\nI'm happy to tackle this, but suggestions are appreciated and welcome.\r\n\r\n## Source Code at Fault\r\n```python\r\nclass S3Client(FileSystem):\r\n _s3 = None \r\n \r\n def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, **kwargs):\r\n from boto.s3.key import Key\r\n options = self._get_s3_config() # pulls all s3 section vars, including DEFAULT\r\n options.update(kwargs)\r\n if aws_access_key_id:\r\n options['aws_access_key_id'] = aws_access_key_id\r\n if aws_secret_access_key:\r\n options['aws_secret_access_key'] = aws_secret_access_key\r\n \r\n self.Key = Key\r\n self._options = options # all s3 and DEFAULT key/value pairs\r\n\r\n @property\r\n def s3(self):\r\n # only import boto when needed to allow top-lvl s3 module import\r\n import boto\r\n import boto.s3.connection\r\n\r\n options = dict(self._options) # var of interest\r\n\r\n if self._s3:\r\n return self._s3\r\n\r\n aws_access_key_id = options.get('aws_access_key_id')\r\n aws_secret_access_key = options.get('aws_secret_access_key')\r\n\r\n # Removing key args would break backwards compability\r\n role_arn = options.get('aws_role_arn')\r\n role_session_name = options.get('aws_role_session_name')\r\n\r\n aws_session_token = None\r\n\r\n if role_arn and role_session_name:\r\n from boto import sts\r\n\r\n sts_client = sts.STSConnection()\r\n assumed_role = sts_client.assume_role(role_arn, role_session_name)\r\n aws_secret_access_key = assumed_role.credentials.secret_key\r\n aws_access_key_id = assumed_role.credentials.access_key\r\n aws_session_token = assumed_role.credentials.session_token\r\n\r\n for key in ['aws_access_key_id', 'aws_secret_access_key', 'aws_role_session_name', 'aws_role_arn']:\r\n if key in options:\r\n options.pop(key)\r\n self._s3 = boto.s3.connection.S3Connection(aws_access_key_id,\r\n aws_secret_access_key,\r\n security_token=aws_session_token,\r\n **options) # passes all key/value pairs (except auth) to boto - may cause error\r\n return self._s3\r\n...\r\n\r\n def _get_s3_config(self, key=None):\r\n try:\r\n config = dict(configuration.get_config().items('s3')) # pulls all k/v from s3 and DEFAULT\r\n except NoSectionError:\r\n return {}\r\n # So what ports etc can be read without us having to specify all dtypes\r\n for k, v in six.iteritems(config):\r\n try:\r\n config[k] = int(v)\r\n except ValueError:\r\n pass\r\n if key:\r\n return config.get(key)\r\n return config\r\n...\r\n```\r\n\r\n### Erroneous Luigi Code\r\nluigi.cfg\r\n```\r\n[DEFAULT]\r\npath_base: /Users/dstadther\r\n\r\n[core]\r\n...\r\nlogging_conf_file: %(path_base)s/this_project/logging.conf\r\n\r\n[bucket]\r\nload: s3://my.s3.load.bucket/\r\n\r\n[path]\r\nsql: %(path_base)s/sql\r\n\r\n[s3]\r\naws_access_key_id: foo\r\naws_secret_access_key: bar\r\ncalling_format: boto.s3.connection.OrdinaryCallingFormat\r\n\r\n[scheduler]\r\nstate-path: %(path_base)s/out/luigid_state.pkl\r\n\r\n...\r\n\r\n```\r\n\r\nluigi example script (`test_s3_default.py`)\r\n```python\r\nimport luigi\r\nfrom luigi import mock\r\nfrom luigi.contrib import s3\r\n\r\n\r\nclass bucket(luigi.Config):\r\n load = luigi.Parameter()\r\n\r\n\r\nclass TestDefault(luigi.Task):\r\n client = s3.S3Client()\r\n\r\n def output(self):\r\n return mock.MockTarget('')\r\n\r\n def run(self):\r\n for i in self.client.listdir(bucket().load):\r\n print(i)\r\n self.output().open('w').close()\r\n\r\n\r\nif __name__ == '__main__':\r\n luigi.run()\r\n\r\n```\r\n\r\nshell\r\n```sh\r\ncd /Users/dstadther/this_project/scripts; /Users/dstadther/venv/this_project/bin/python test_s3_default.py TestDefault\r\n```\r\n\r\nError stack trace\r\n```sh\r\nTraceback (most recent call last):\r\n File \"/Users/dstadther/venv/this_project/lib/python2.7/site-packages/luigi/worker.py\", line 191, in run\r\n new_deps = self._run_get_new_deps()\r\n File \"/Users/dstadther/venv/this_project/lib/python2.7/site-packages/luigi/worker.py\", line 129, in _run_get_new_deps\r\n task_gen = self.task.run()\r\n File \"test_s3_default.py\", line 13, in run\r\n for i in self.client.listdir(bucket().load):\r\n File \"/Users/dstadther/venv/this_project/lib/python2.7/site-packages/luigi/contrib/s3.py\", line 498, in listdir\r\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\r\n File \"/Users/dstadther/venv/this_project/lib/python2.7/site-packages/luigi/contrib/s3.py\", line 126, in s3\r\n **options)\r\nTypeError: __init__() got an unexpected keyword argument 'path_base'\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nImplementation of Simple Storage Service support.\n:py:class:`S3Target` is a subclass of the Target class to support S3 file\nsystem operations. The `boto` library is required to use S3 targets.\n\"\"\"\n\nfrom __future__ import division\n\nimport datetime\nimport itertools\nimport logging\nimport os\nimport os.path\n\nimport time\nfrom multiprocessing.pool import ThreadPool\n\ntry:\n from urlparse import urlsplit\nexcept ImportError:\n from urllib.parse import urlsplit\nimport warnings\n\ntry:\n from ConfigParser import NoSectionError\nexcept ImportError:\n from configparser import NoSectionError\n\nfrom luigi import six\nfrom luigi.six.moves import range\n\nfrom luigi import configuration\nfrom luigi.format import get_default_format\nfrom luigi.parameter import Parameter\nfrom luigi.target import FileAlreadyExists, FileSystem, FileSystemException, FileSystemTarget, AtomicLocalFile, MissingParentDirectory\nfrom luigi.task import ExternalTask\n\nlogger = logging.getLogger('luigi-interface')\n\n\n# two different ways of marking a directory\n# with a suffix in S3\nS3_DIRECTORY_MARKER_SUFFIX_0 = '_$folder$'\nS3_DIRECTORY_MARKER_SUFFIX_1 = '/'\n\n\nclass InvalidDeleteException(FileSystemException):\n pass\n\n\nclass FileNotFoundException(FileSystemException):\n pass\n\n\nclass S3Client(FileSystem):\n \"\"\"\n boto-powered S3 client.\n \"\"\"\n\n _s3 = None\n\n def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n **kwargs):\n from boto.s3.key import Key\n options = self._get_s3_config()\n options.update(kwargs)\n if aws_access_key_id:\n options['aws_access_key_id'] = aws_access_key_id\n if aws_secret_access_key:\n options['aws_secret_access_key'] = aws_secret_access_key\n\n self.Key = Key\n self._options = options\n\n @property\n def s3(self):\n # only import boto when needed to allow top-lvl s3 module import\n import boto\n import boto.s3.connection\n\n options = dict(self._options)\n\n if self._s3:\n return self._s3\n\n aws_access_key_id = options.get('aws_access_key_id')\n aws_secret_access_key = options.get('aws_secret_access_key')\n\n # Removing key args would break backwards compability\n role_arn = options.get('aws_role_arn')\n role_session_name = options.get('aws_role_session_name')\n\n aws_session_token = None\n\n if role_arn and role_session_name:\n from boto import sts\n\n sts_client = sts.STSConnection()\n assumed_role = sts_client.assume_role(role_arn, role_session_name)\n aws_secret_access_key = assumed_role.credentials.secret_key\n aws_access_key_id = assumed_role.credentials.access_key\n aws_session_token = assumed_role.credentials.session_token\n\n for key in ['aws_access_key_id', 'aws_secret_access_key', 'aws_role_session_name', 'aws_role_arn']:\n if key in options:\n options.pop(key)\n self._s3 = boto.s3.connection.S3Connection(aws_access_key_id,\n aws_secret_access_key,\n security_token=aws_session_token,\n **options)\n return self._s3\n\n @s3.setter\n def s3(self, value):\n self._s3 = value\n\n def exists(self, path):\n \"\"\"\n Does provided path exist on S3?\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # root always exists\n if self._is_root(key):\n return True\n\n # file\n s3_key = s3_bucket.get_key(key)\n if s3_key:\n return True\n\n if self.isdir(path):\n return True\n\n logger.debug('Path %s does not exist', path)\n return False\n\n def remove(self, path, recursive=True):\n \"\"\"\n Remove a file or directory from S3.\n \"\"\"\n if not self.exists(path):\n logger.debug('Could not delete %s; path does not exist', path)\n return False\n\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n # root\n if self._is_root(key):\n raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # file\n s3_key = s3_bucket.get_key(key)\n if s3_key:\n s3_bucket.delete_key(s3_key)\n logger.debug('Deleting %s from bucket %s', key, bucket)\n return True\n\n if self.isdir(path) and not recursive:\n raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path)\n\n delete_key_list = [\n k for k in s3_bucket.list(self._add_path_delimiter(key))]\n\n # delete the directory marker file if it exists\n s3_dir_with_suffix_key = s3_bucket.get_key(key + S3_DIRECTORY_MARKER_SUFFIX_0)\n if s3_dir_with_suffix_key:\n delete_key_list.append(s3_dir_with_suffix_key)\n\n if len(delete_key_list) > 0:\n for k in delete_key_list:\n logger.debug('Deleting %s from bucket %s', k, bucket)\n s3_bucket.delete_keys(delete_key_list)\n return True\n\n return False\n\n def get_key(self, path):\n \"\"\"\n Returns just the key from the path.\n\n An s3 path is composed of a bucket and a key.\n\n Suppose we have a path `s3://my_bucket/some/files/my_file`. The key is `some/files/my_file`.\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n return s3_bucket.get_key(key)\n\n def put(self, local_path, destination_s3_path, **kwargs):\n \"\"\"\n Put an object stored locally to an S3 path.\n\n :param kwargs: Keyword arguments are passed to the boto function `set_contents_from_filename`\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # put the file\n s3_key = self.Key(s3_bucket)\n s3_key.key = key\n s3_key.set_contents_from_filename(local_path, **kwargs)\n\n def put_string(self, content, destination_s3_path, **kwargs):\n \"\"\"\n Put a string to an S3 path.\n\n :param kwargs: Keyword arguments are passed to the boto function `set_contents_from_string`\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # put the content\n s3_key = self.Key(s3_bucket)\n s3_key.key = key\n s3_key.set_contents_from_string(content, **kwargs)\n\n def put_multipart(self, local_path, destination_s3_path, part_size=67108864, **kwargs):\n \"\"\"\n Put an object stored locally to an S3 path\n using S3 multi-part upload (for files > 5GB).\n\n :param local_path: Path to source local file\n :param destination_s3_path: URL for target S3 location\n :param part_size: Part size in bytes. Default: 67108864 (64MB), must be >= 5MB and <= 5 GB.\n :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`\n \"\"\"\n # calculate number of parts to upload\n # based on the size of the file\n source_size = os.stat(local_path).st_size\n\n if source_size <= part_size:\n # fallback to standard, non-multipart strategy\n return self.put(local_path, destination_s3_path, **kwargs)\n\n (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # calculate the number of parts (int division).\n # use modulo to avoid float precision issues\n # for exactly-sized fits\n num_parts = (source_size + part_size - 1) // part_size\n\n mp = None\n try:\n mp = s3_bucket.initiate_multipart_upload(key, **kwargs)\n\n for i in range(num_parts):\n # upload a part at a time to S3\n offset = part_size * i\n bytes = min(part_size, source_size - offset)\n with open(local_path, 'rb') as fp:\n part_num = i + 1\n logger.info('Uploading part %s/%s to %s', part_num, num_parts, destination_s3_path)\n fp.seek(offset)\n mp.upload_part_from_file(fp, part_num=part_num, size=bytes)\n\n # finish the upload, making the file available in S3\n mp.complete_upload()\n except BaseException:\n if mp:\n logger.info('Canceling multipart s3 upload for %s', destination_s3_path)\n # cancel the upload so we don't get charged for\n # storage consumed by uploaded parts\n mp.cancel_upload()\n raise\n\n def get(self, s3_path, destination_local_path):\n \"\"\"\n Get an object stored in S3 and write it to a local path.\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(s3_path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # download the file\n s3_key = self.Key(s3_bucket)\n s3_key.key = key\n s3_key.get_contents_to_filename(destination_local_path)\n\n def get_as_string(self, s3_path):\n \"\"\"\n Get the contents of an object stored in S3 as a string.\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(s3_path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # get the content\n s3_key = self.Key(s3_bucket)\n s3_key.key = key\n contents = s3_key.get_contents_as_string()\n\n return contents\n\n def copy(self, source_path, destination_path, threads=100, start_time=None, end_time=None, part_size=67108864, **kwargs):\n \"\"\"\n Copy object(s) from one S3 location to another. Works for individual keys or entire directories.\n\n When files are larger than `part_size`, multipart uploading will be used.\n\n :param source_path: The `s3://` path of the directory or key to copy from\n :param destination_path: The `s3://` path of the directory or key to copy to\n :param threads: Optional argument to define the number of threads to use when copying (min: 3 threads)\n :param start_time: Optional argument to copy files with modified dates after start_time\n :param end_time: Optional argument to copy files with modified dates before end_time\n :param part_size: Part size in bytes. Default: 67108864 (64MB), must be >= 5MB and <= 5 GB.\n :param kwargs: Keyword arguments are passed to the boto function `copy_key`\n\n :returns tuple (number_of_files_copied, total_size_copied_in_bytes)\n \"\"\"\n start = datetime.datetime.now()\n\n (src_bucket, src_key) = self._path_to_bucket_and_key(source_path)\n (dst_bucket, dst_key) = self._path_to_bucket_and_key(destination_path)\n\n # As the S3 copy command is completely server side, there is no issue with issuing a lot of threads\n # to issue a single API call per copy, however, this may in theory cause issues on systems with low ulimits for\n # number of threads when copying really large files (e.g. with a ~100GB file this will open ~1500\n # threads), or large directories. Around 100 threads seems to work well.\n\n threads = 3 if threads < 3 else threads # don't allow threads to be less than 3\n total_keys = 0\n\n copy_pool = ThreadPool(processes=threads)\n\n if self.isdir(source_path):\n # The management pool is to ensure that there's no deadlock between the s3 copying threads, and the\n # multipart_copy threads that monitors each group of s3 copy threads and returns a success once the entire file\n # is copied. Without this, we could potentially fill up the pool with threads waiting to check if the s3 copies\n # have completed, leaving no available threads to actually perform any copying.\n copy_jobs = []\n management_pool = ThreadPool(processes=threads)\n\n (bucket, key) = self._path_to_bucket_and_key(source_path)\n key_path = self._add_path_delimiter(key)\n key_path_len = len(key_path)\n\n total_size_bytes = 0\n src_prefix = self._add_path_delimiter(src_key)\n dst_prefix = self._add_path_delimiter(dst_key)\n for item in self.list(source_path, start_time=start_time, end_time=end_time, return_key=True):\n path = item.key[key_path_len:]\n # prevents copy attempt of empty key in folder\n if path != '' and path != '/':\n total_keys += 1\n total_size_bytes += item.size\n job = management_pool.apply_async(self.__copy_multipart,\n args=(copy_pool,\n src_bucket, src_prefix + path,\n dst_bucket, dst_prefix + path,\n part_size),\n kwds=kwargs)\n copy_jobs.append(job)\n\n # Wait for the pools to finish scheduling all the copies\n management_pool.close()\n management_pool.join()\n copy_pool.close()\n copy_pool.join()\n\n # Raise any errors encountered in any of the copy processes\n for result in copy_jobs:\n result.get()\n\n end = datetime.datetime.now()\n duration = end - start\n logger.info('%s : Complete : %s total keys copied in %s' %\n (datetime.datetime.now(), total_keys, duration))\n\n return total_keys, total_size_bytes\n\n # If the file isn't a directory just perform a simple copy\n else:\n self.__copy_multipart(copy_pool, src_bucket, src_key, dst_bucket, dst_key, part_size, **kwargs)\n # Close the pool\n copy_pool.close()\n copy_pool.join()\n\n def __copy_multipart(self, pool, src_bucket, src_key, dst_bucket, dst_key, part_size, **kwargs):\n \"\"\"\n Copy a single S3 object to another S3 object, falling back to multipart copy where necessary\n\n NOTE: This is a private method and should only be called from within the `luigi.s3.copy` method\n\n :param pool: The threadpool to put the s3 copy processes onto\n :param src_bucket: source bucket name\n :param src_key: source key name\n :param dst_bucket: destination bucket name\n :param dst_key: destination key name\n :param key_size: size of the key to copy in bytes\n :param part_size: Part size in bytes. Must be >= 5MB and <= 5 GB.\n :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`\n \"\"\"\n\n source_bucket = self.s3.get_bucket(src_bucket, validate=True)\n dest_bucket = self.s3.get_bucket(dst_bucket, validate=True)\n\n key_size = source_bucket.lookup(src_key).size\n\n # We can't do a multipart copy on an empty Key, so handle this specially.\n # Also, don't bother using the multipart machinery if we're only dealing with a small non-multipart file\n if key_size == 0 or key_size <= part_size:\n result = pool.apply_async(dest_bucket.copy_key, args=(dst_key, src_bucket, src_key), kwds=kwargs)\n # Bubble up any errors we may encounter\n return result.get()\n\n mp = None\n\n try:\n mp = dest_bucket.initiate_multipart_upload(dst_key, **kwargs)\n cur_pos = 0\n\n # Store the results from the apply_async in a list so we can check for failures\n results = []\n\n # Calculate the number of chunks the file will be\n num_parts = (key_size + part_size - 1) // part_size\n\n for i in range(num_parts):\n # Issue an S3 copy request, one part at a time, from one S3 object to another\n part_start = cur_pos\n cur_pos += part_size\n part_end = min(cur_pos - 1, key_size - 1)\n part_num = i + 1\n results.append(pool.apply_async(mp.copy_part_from_key, args=(src_bucket, src_key, part_num, part_start, part_end)))\n logger.info('Requesting copy of %s/%s to %s/%s', part_num, num_parts, dst_bucket, dst_key)\n\n logger.info('Waiting for multipart copy of %s/%s to finish', dst_bucket, dst_key)\n\n # This will raise any exceptions in any of the copy threads\n for result in results:\n result.get()\n\n # finish the copy, making the file available in S3\n mp.complete_upload()\n return mp.key_name\n\n except:\n logger.info('Error during multipart s3 copy for %s/%s to %s/%s...', src_bucket, src_key, dst_bucket, dst_key)\n # cancel the copy so we don't get charged for storage consumed by copied parts\n if mp:\n mp.cancel_upload()\n raise\n\n def move(self, source_path, destination_path, **kwargs):\n \"\"\"\n Rename/move an object from one S3 location to another.\n\n :param kwargs: Keyword arguments are passed to the boto function `copy_key`\n \"\"\"\n self.copy(source_path, destination_path, **kwargs)\n self.remove(source_path)\n\n def listdir(self, path, start_time=None, end_time=None, return_key=False):\n \"\"\"\n Get an iterable with S3 folder contents.\n Iterable contains paths relative to queried path.\n\n :param start_time: Optional argument to list files with modified dates after start_time\n :param end_time: Optional argument to list files with modified dates before end_time\n :param return_key: Optional argument, when set to True will return a boto.s3.key.Key (instead of the filename)\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n key_path = self._add_path_delimiter(key)\n key_path_len = len(key_path)\n for item in s3_bucket.list(prefix=key_path):\n last_modified_date = time.strptime(item.last_modified, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n if (\n (not start_time and not end_time) or # neither are defined, list all\n (start_time and not end_time and start_time < last_modified_date) or # start defined, after start\n (not start_time and end_time and last_modified_date < end_time) or # end defined, prior to end\n (start_time and end_time and start_time < last_modified_date < end_time) # both defined, between\n ):\n if return_key:\n yield item\n else:\n yield self._add_path_delimiter(path) + item.key[key_path_len:]\n\n def list(self, path, start_time=None, end_time=None, return_key=False): # backwards compat\n key_path_len = len(self._add_path_delimiter(path))\n for item in self.listdir(path, start_time=start_time, end_time=end_time, return_key=return_key):\n if return_key:\n yield item\n else:\n yield item[key_path_len:]\n\n def isdir(self, path):\n \"\"\"\n Is the parameter S3 path a directory?\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # root is a directory\n if self._is_root(key):\n return True\n\n for suffix in (S3_DIRECTORY_MARKER_SUFFIX_0,\n S3_DIRECTORY_MARKER_SUFFIX_1):\n s3_dir_with_suffix_key = s3_bucket.get_key(key + suffix)\n if s3_dir_with_suffix_key:\n return True\n\n # files with this prefix\n key_path = self._add_path_delimiter(key)\n s3_bucket_list_result = list(itertools.islice(s3_bucket.list(prefix=key_path), 1))\n if s3_bucket_list_result:\n return True\n\n return False\n\n is_dir = isdir # compatibility with old version.\n\n def mkdir(self, path, parents=True, raise_if_exists=False):\n if raise_if_exists and self.isdir(path):\n raise FileAlreadyExists()\n\n _, key = self._path_to_bucket_and_key(path)\n if self._is_root(key):\n return # isdir raises if the bucket doesn't exist; nothing to do here.\n\n key = self._add_path_delimiter(key)\n\n if not parents and not self.isdir(os.path.dirname(key)):\n raise MissingParentDirectory()\n\n return self.put_string(\"\", self._add_path_delimiter(path))\n\n def _get_s3_config(self, key=None):\n try:\n config = dict(configuration.get_config().items('s3'))\n except NoSectionError:\n return {}\n # So what ports etc can be read without us having to specify all dtypes\n for k, v in six.iteritems(config):\n try:\n config[k] = int(v)\n except ValueError:\n pass\n if key:\n return config.get(key)\n return config\n\n def _path_to_bucket_and_key(self, path):\n (scheme, netloc, path, query, fragment) = urlsplit(path)\n path_without_initial_slash = path[1:]\n return netloc, path_without_initial_slash\n\n def _is_root(self, key):\n return (len(key) == 0) or (key == '/')\n\n def _add_path_delimiter(self, key):\n return key if key[-1:] == '/' or key == '' else key + '/'\n\n\nclass AtomicS3File(AtomicLocalFile):\n \"\"\"\n An S3 file that writes to a temp file and puts to S3 on close.\n\n :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`\n \"\"\"\n\n def __init__(self, path, s3_client, **kwargs):\n self.s3_client = s3_client\n super(AtomicS3File, self).__init__(path)\n self.s3_options = kwargs\n\n def move_to_final_destination(self):\n self.s3_client.put_multipart(self.tmp_path, self.path, **self.s3_options)\n\n\nclass ReadableS3File(object):\n def __init__(self, s3_key):\n self.s3_key = s3_key\n self.buffer = []\n self.closed = False\n self.finished = False\n\n def read(self, size=0):\n f = self.s3_key.read(size=size)\n\n # boto will loop on the key forever and it's not what is expected by\n # the python io interface\n # boto/boto#2805\n if f == b'':\n self.finished = True\n if self.finished:\n return b''\n\n return f\n\n def close(self):\n self.s3_key.close()\n self.closed = True\n\n def __del__(self):\n self.close()\n\n def __exit__(self, exc_type, exc, traceback):\n self.close()\n\n def __enter__(self):\n return self\n\n def _add_to_buffer(self, line):\n self.buffer.append(line)\n\n def _flush_buffer(self):\n output = b''.join(self.buffer)\n self.buffer = []\n return output\n\n def readable(self):\n return True\n\n def writable(self):\n return False\n\n def seekable(self):\n return False\n\n def __iter__(self):\n key_iter = self.s3_key.__iter__()\n\n has_next = True\n while has_next:\n try:\n # grab the next chunk\n chunk = next(key_iter)\n\n # split on newlines, preserving the newline\n for line in chunk.splitlines(True):\n\n if not line.endswith(os.linesep):\n # no newline, so store in buffer\n self._add_to_buffer(line)\n else:\n # newline found, send it out\n if self.buffer:\n self._add_to_buffer(line)\n yield self._flush_buffer()\n else:\n yield line\n except StopIteration:\n # send out anything we have left in the buffer\n output = self._flush_buffer()\n if output:\n yield output\n has_next = False\n self.close()\n\n\nclass S3Target(FileSystemTarget):\n \"\"\"\n Target S3 file object\n\n :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`\n \"\"\"\n\n fs = None\n\n def __init__(self, path, format=None, client=None, **kwargs):\n super(S3Target, self).__init__(path)\n if format is None:\n format = get_default_format()\n\n self.path = path\n self.format = format\n self.fs = client or S3Client()\n self.s3_options = kwargs\n\n def open(self, mode='r'):\n if mode not in ('r', 'w'):\n raise ValueError(\"Unsupported open mode '%s'\" % mode)\n\n if mode == 'r':\n s3_key = self.fs.get_key(self.path)\n if not s3_key:\n raise FileNotFoundException(\"Could not find file at %s\" % self.path)\n\n fileobj = ReadableS3File(s3_key)\n return self.format.pipe_reader(fileobj)\n else:\n return self.format.pipe_writer(AtomicS3File(self.path, self.fs, **self.s3_options))\n\n\nclass S3FlagTarget(S3Target):\n \"\"\"\n Defines a target directory with a flag-file (defaults to `_SUCCESS`) used\n to signify job success.\n\n This checks for two things:\n\n * the path exists (just like the S3Target)\n * the _SUCCESS file exists within the directory.\n\n Because Hadoop outputs into a directory and not a single file,\n the path is assumed to be a directory.\n\n This is meant to be a handy alternative to AtomicS3File.\n\n The AtomicFile approach can be burdensome for S3 since there are no directories, per se.\n\n If we have 1,000,000 output files, then we have to rename 1,000,000 objects.\n \"\"\"\n\n fs = None\n\n def __init__(self, path, format=None, client=None, flag='_SUCCESS'):\n \"\"\"\n Initializes a S3FlagTarget.\n\n :param path: the directory where the files are stored.\n :type path: str\n :param client:\n :type client:\n :param flag:\n :type flag: str\n \"\"\"\n if format is None:\n format = get_default_format()\n\n if path[-1] != \"/\":\n raise ValueError(\"S3FlagTarget requires the path to be to a \"\n \"directory. It must end with a slash ( / ).\")\n super(S3FlagTarget, self).__init__(path, format, client)\n self.flag = flag\n\n def exists(self):\n hadoopSemaphore = self.path + self.flag\n return self.fs.exists(hadoopSemaphore)\n\n\nclass S3EmrTarget(S3FlagTarget):\n \"\"\"\n Deprecated. Use :py:class:`S3FlagTarget`\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(\"S3EmrTarget is deprecated. Please use S3FlagTarget\")\n super(S3EmrTarget, self).__init__(*args, **kwargs)\n\n\nclass S3PathTask(ExternalTask):\n \"\"\"\n A external task that to require existence of a path in S3.\n \"\"\"\n path = Parameter()\n\n def output(self):\n return S3Target(self.path)\n\n\nclass S3EmrTask(ExternalTask):\n \"\"\"\n An external task that requires the existence of EMR output in S3.\n \"\"\"\n path = Parameter()\n\n def output(self):\n return S3EmrTarget(self.path)\n\n\nclass S3FlagTask(ExternalTask):\n \"\"\"\n An external task that requires the existence of EMR output in S3.\n \"\"\"\n path = Parameter()\n flag = Parameter(default=None)\n\n def output(self):\n return S3FlagTarget(self.path, flag=self.flag)\n", "path": "luigi/contrib/s3.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2012-2015 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nImplementation of Simple Storage Service support.\n:py:class:`S3Target` is a subclass of the Target class to support S3 file\nsystem operations. The `boto` library is required to use S3 targets.\n\"\"\"\n\nfrom __future__ import division\n\nimport datetime\nimport itertools\nimport logging\nimport os\nimport os.path\n\nimport time\nfrom multiprocessing.pool import ThreadPool\n\ntry:\n from urlparse import urlsplit\nexcept ImportError:\n from urllib.parse import urlsplit\nimport warnings\n\ntry:\n from ConfigParser import NoSectionError\nexcept ImportError:\n from configparser import NoSectionError\n\nfrom luigi import six\nfrom luigi.six.moves import range\n\nfrom luigi import configuration\nfrom luigi.format import get_default_format\nfrom luigi.parameter import Parameter\nfrom luigi.target import FileAlreadyExists, FileSystem, FileSystemException, FileSystemTarget, AtomicLocalFile, MissingParentDirectory\nfrom luigi.task import ExternalTask\n\nlogger = logging.getLogger('luigi-interface')\n\n\n# two different ways of marking a directory\n# with a suffix in S3\nS3_DIRECTORY_MARKER_SUFFIX_0 = '_$folder$'\nS3_DIRECTORY_MARKER_SUFFIX_1 = '/'\n\n\nclass InvalidDeleteException(FileSystemException):\n pass\n\n\nclass FileNotFoundException(FileSystemException):\n pass\n\n\nclass S3Client(FileSystem):\n \"\"\"\n boto-powered S3 client.\n \"\"\"\n\n _s3 = None\n\n def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,\n **kwargs):\n from boto.s3.key import Key\n options = self._get_s3_config()\n options.update(kwargs)\n if aws_access_key_id:\n options['aws_access_key_id'] = aws_access_key_id\n if aws_secret_access_key:\n options['aws_secret_access_key'] = aws_secret_access_key\n\n self.Key = Key\n self._options = options\n\n @property\n def s3(self):\n # only import boto when needed to allow top-lvl s3 module import\n import boto\n import boto.s3.connection\n\n options = dict(self._options)\n\n if self._s3:\n return self._s3\n\n aws_access_key_id = options.get('aws_access_key_id')\n aws_secret_access_key = options.get('aws_secret_access_key')\n\n # Removing key args would break backwards compability\n role_arn = options.get('aws_role_arn')\n role_session_name = options.get('aws_role_session_name')\n\n aws_session_token = None\n\n if role_arn and role_session_name:\n from boto import sts\n\n sts_client = sts.STSConnection()\n assumed_role = sts_client.assume_role(role_arn, role_session_name)\n aws_secret_access_key = assumed_role.credentials.secret_key\n aws_access_key_id = assumed_role.credentials.access_key\n aws_session_token = assumed_role.credentials.session_token\n\n for key in ['aws_access_key_id', 'aws_secret_access_key', 'aws_role_session_name', 'aws_role_arn']:\n if key in options:\n options.pop(key)\n self._s3 = boto.s3.connection.S3Connection(aws_access_key_id,\n aws_secret_access_key,\n security_token=aws_session_token,\n **options)\n return self._s3\n\n @s3.setter\n def s3(self, value):\n self._s3 = value\n\n def exists(self, path):\n \"\"\"\n Does provided path exist on S3?\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # root always exists\n if self._is_root(key):\n return True\n\n # file\n s3_key = s3_bucket.get_key(key)\n if s3_key:\n return True\n\n if self.isdir(path):\n return True\n\n logger.debug('Path %s does not exist', path)\n return False\n\n def remove(self, path, recursive=True):\n \"\"\"\n Remove a file or directory from S3.\n \"\"\"\n if not self.exists(path):\n logger.debug('Could not delete %s; path does not exist', path)\n return False\n\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n # root\n if self._is_root(key):\n raise InvalidDeleteException('Cannot delete root of bucket at path %s' % path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # file\n s3_key = s3_bucket.get_key(key)\n if s3_key:\n s3_bucket.delete_key(s3_key)\n logger.debug('Deleting %s from bucket %s', key, bucket)\n return True\n\n if self.isdir(path) and not recursive:\n raise InvalidDeleteException('Path %s is a directory. Must use recursive delete' % path)\n\n delete_key_list = [\n k for k in s3_bucket.list(self._add_path_delimiter(key))]\n\n # delete the directory marker file if it exists\n s3_dir_with_suffix_key = s3_bucket.get_key(key + S3_DIRECTORY_MARKER_SUFFIX_0)\n if s3_dir_with_suffix_key:\n delete_key_list.append(s3_dir_with_suffix_key)\n\n if len(delete_key_list) > 0:\n for k in delete_key_list:\n logger.debug('Deleting %s from bucket %s', k, bucket)\n s3_bucket.delete_keys(delete_key_list)\n return True\n\n return False\n\n def get_key(self, path):\n \"\"\"\n Returns just the key from the path.\n\n An s3 path is composed of a bucket and a key.\n\n Suppose we have a path `s3://my_bucket/some/files/my_file`. The key is `some/files/my_file`.\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n return s3_bucket.get_key(key)\n\n def put(self, local_path, destination_s3_path, **kwargs):\n \"\"\"\n Put an object stored locally to an S3 path.\n\n :param kwargs: Keyword arguments are passed to the boto function `set_contents_from_filename`\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # put the file\n s3_key = self.Key(s3_bucket)\n s3_key.key = key\n s3_key.set_contents_from_filename(local_path, **kwargs)\n\n def put_string(self, content, destination_s3_path, **kwargs):\n \"\"\"\n Put a string to an S3 path.\n\n :param kwargs: Keyword arguments are passed to the boto function `set_contents_from_string`\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # put the content\n s3_key = self.Key(s3_bucket)\n s3_key.key = key\n s3_key.set_contents_from_string(content, **kwargs)\n\n def put_multipart(self, local_path, destination_s3_path, part_size=67108864, **kwargs):\n \"\"\"\n Put an object stored locally to an S3 path\n using S3 multi-part upload (for files > 5GB).\n\n :param local_path: Path to source local file\n :param destination_s3_path: URL for target S3 location\n :param part_size: Part size in bytes. Default: 67108864 (64MB), must be >= 5MB and <= 5 GB.\n :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`\n \"\"\"\n # calculate number of parts to upload\n # based on the size of the file\n source_size = os.stat(local_path).st_size\n\n if source_size <= part_size:\n # fallback to standard, non-multipart strategy\n return self.put(local_path, destination_s3_path, **kwargs)\n\n (bucket, key) = self._path_to_bucket_and_key(destination_s3_path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # calculate the number of parts (int division).\n # use modulo to avoid float precision issues\n # for exactly-sized fits\n num_parts = (source_size + part_size - 1) // part_size\n\n mp = None\n try:\n mp = s3_bucket.initiate_multipart_upload(key, **kwargs)\n\n for i in range(num_parts):\n # upload a part at a time to S3\n offset = part_size * i\n bytes = min(part_size, source_size - offset)\n with open(local_path, 'rb') as fp:\n part_num = i + 1\n logger.info('Uploading part %s/%s to %s', part_num, num_parts, destination_s3_path)\n fp.seek(offset)\n mp.upload_part_from_file(fp, part_num=part_num, size=bytes)\n\n # finish the upload, making the file available in S3\n mp.complete_upload()\n except BaseException:\n if mp:\n logger.info('Canceling multipart s3 upload for %s', destination_s3_path)\n # cancel the upload so we don't get charged for\n # storage consumed by uploaded parts\n mp.cancel_upload()\n raise\n\n def get(self, s3_path, destination_local_path):\n \"\"\"\n Get an object stored in S3 and write it to a local path.\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(s3_path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # download the file\n s3_key = self.Key(s3_bucket)\n s3_key.key = key\n s3_key.get_contents_to_filename(destination_local_path)\n\n def get_as_string(self, s3_path):\n \"\"\"\n Get the contents of an object stored in S3 as a string.\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(s3_path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # get the content\n s3_key = self.Key(s3_bucket)\n s3_key.key = key\n contents = s3_key.get_contents_as_string()\n\n return contents\n\n def copy(self, source_path, destination_path, threads=100, start_time=None, end_time=None, part_size=67108864, **kwargs):\n \"\"\"\n Copy object(s) from one S3 location to another. Works for individual keys or entire directories.\n\n When files are larger than `part_size`, multipart uploading will be used.\n\n :param source_path: The `s3://` path of the directory or key to copy from\n :param destination_path: The `s3://` path of the directory or key to copy to\n :param threads: Optional argument to define the number of threads to use when copying (min: 3 threads)\n :param start_time: Optional argument to copy files with modified dates after start_time\n :param end_time: Optional argument to copy files with modified dates before end_time\n :param part_size: Part size in bytes. Default: 67108864 (64MB), must be >= 5MB and <= 5 GB.\n :param kwargs: Keyword arguments are passed to the boto function `copy_key`\n\n :returns tuple (number_of_files_copied, total_size_copied_in_bytes)\n \"\"\"\n start = datetime.datetime.now()\n\n (src_bucket, src_key) = self._path_to_bucket_and_key(source_path)\n (dst_bucket, dst_key) = self._path_to_bucket_and_key(destination_path)\n\n # As the S3 copy command is completely server side, there is no issue with issuing a lot of threads\n # to issue a single API call per copy, however, this may in theory cause issues on systems with low ulimits for\n # number of threads when copying really large files (e.g. with a ~100GB file this will open ~1500\n # threads), or large directories. Around 100 threads seems to work well.\n\n threads = 3 if threads < 3 else threads # don't allow threads to be less than 3\n total_keys = 0\n\n copy_pool = ThreadPool(processes=threads)\n\n if self.isdir(source_path):\n # The management pool is to ensure that there's no deadlock between the s3 copying threads, and the\n # multipart_copy threads that monitors each group of s3 copy threads and returns a success once the entire file\n # is copied. Without this, we could potentially fill up the pool with threads waiting to check if the s3 copies\n # have completed, leaving no available threads to actually perform any copying.\n copy_jobs = []\n management_pool = ThreadPool(processes=threads)\n\n (bucket, key) = self._path_to_bucket_and_key(source_path)\n key_path = self._add_path_delimiter(key)\n key_path_len = len(key_path)\n\n total_size_bytes = 0\n src_prefix = self._add_path_delimiter(src_key)\n dst_prefix = self._add_path_delimiter(dst_key)\n for item in self.list(source_path, start_time=start_time, end_time=end_time, return_key=True):\n path = item.key[key_path_len:]\n # prevents copy attempt of empty key in folder\n if path != '' and path != '/':\n total_keys += 1\n total_size_bytes += item.size\n job = management_pool.apply_async(self.__copy_multipart,\n args=(copy_pool,\n src_bucket, src_prefix + path,\n dst_bucket, dst_prefix + path,\n part_size),\n kwds=kwargs)\n copy_jobs.append(job)\n\n # Wait for the pools to finish scheduling all the copies\n management_pool.close()\n management_pool.join()\n copy_pool.close()\n copy_pool.join()\n\n # Raise any errors encountered in any of the copy processes\n for result in copy_jobs:\n result.get()\n\n end = datetime.datetime.now()\n duration = end - start\n logger.info('%s : Complete : %s total keys copied in %s' %\n (datetime.datetime.now(), total_keys, duration))\n\n return total_keys, total_size_bytes\n\n # If the file isn't a directory just perform a simple copy\n else:\n self.__copy_multipart(copy_pool, src_bucket, src_key, dst_bucket, dst_key, part_size, **kwargs)\n # Close the pool\n copy_pool.close()\n copy_pool.join()\n\n def __copy_multipart(self, pool, src_bucket, src_key, dst_bucket, dst_key, part_size, **kwargs):\n \"\"\"\n Copy a single S3 object to another S3 object, falling back to multipart copy where necessary\n\n NOTE: This is a private method and should only be called from within the `luigi.s3.copy` method\n\n :param pool: The threadpool to put the s3 copy processes onto\n :param src_bucket: source bucket name\n :param src_key: source key name\n :param dst_bucket: destination bucket name\n :param dst_key: destination key name\n :param key_size: size of the key to copy in bytes\n :param part_size: Part size in bytes. Must be >= 5MB and <= 5 GB.\n :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`\n \"\"\"\n\n source_bucket = self.s3.get_bucket(src_bucket, validate=True)\n dest_bucket = self.s3.get_bucket(dst_bucket, validate=True)\n\n key_size = source_bucket.lookup(src_key).size\n\n # We can't do a multipart copy on an empty Key, so handle this specially.\n # Also, don't bother using the multipart machinery if we're only dealing with a small non-multipart file\n if key_size == 0 or key_size <= part_size:\n result = pool.apply_async(dest_bucket.copy_key, args=(dst_key, src_bucket, src_key), kwds=kwargs)\n # Bubble up any errors we may encounter\n return result.get()\n\n mp = None\n\n try:\n mp = dest_bucket.initiate_multipart_upload(dst_key, **kwargs)\n cur_pos = 0\n\n # Store the results from the apply_async in a list so we can check for failures\n results = []\n\n # Calculate the number of chunks the file will be\n num_parts = (key_size + part_size - 1) // part_size\n\n for i in range(num_parts):\n # Issue an S3 copy request, one part at a time, from one S3 object to another\n part_start = cur_pos\n cur_pos += part_size\n part_end = min(cur_pos - 1, key_size - 1)\n part_num = i + 1\n results.append(pool.apply_async(mp.copy_part_from_key, args=(src_bucket, src_key, part_num, part_start, part_end)))\n logger.info('Requesting copy of %s/%s to %s/%s', part_num, num_parts, dst_bucket, dst_key)\n\n logger.info('Waiting for multipart copy of %s/%s to finish', dst_bucket, dst_key)\n\n # This will raise any exceptions in any of the copy threads\n for result in results:\n result.get()\n\n # finish the copy, making the file available in S3\n mp.complete_upload()\n return mp.key_name\n\n except:\n logger.info('Error during multipart s3 copy for %s/%s to %s/%s...', src_bucket, src_key, dst_bucket, dst_key)\n # cancel the copy so we don't get charged for storage consumed by copied parts\n if mp:\n mp.cancel_upload()\n raise\n\n def move(self, source_path, destination_path, **kwargs):\n \"\"\"\n Rename/move an object from one S3 location to another.\n\n :param kwargs: Keyword arguments are passed to the boto function `copy_key`\n \"\"\"\n self.copy(source_path, destination_path, **kwargs)\n self.remove(source_path)\n\n def listdir(self, path, start_time=None, end_time=None, return_key=False):\n \"\"\"\n Get an iterable with S3 folder contents.\n Iterable contains paths relative to queried path.\n\n :param start_time: Optional argument to list files with modified dates after start_time\n :param end_time: Optional argument to list files with modified dates before end_time\n :param return_key: Optional argument, when set to True will return a boto.s3.key.Key (instead of the filename)\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n key_path = self._add_path_delimiter(key)\n key_path_len = len(key_path)\n for item in s3_bucket.list(prefix=key_path):\n last_modified_date = time.strptime(item.last_modified, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n if (\n (not start_time and not end_time) or # neither are defined, list all\n (start_time and not end_time and start_time < last_modified_date) or # start defined, after start\n (not start_time and end_time and last_modified_date < end_time) or # end defined, prior to end\n (start_time and end_time and start_time < last_modified_date < end_time) # both defined, between\n ):\n if return_key:\n yield item\n else:\n yield self._add_path_delimiter(path) + item.key[key_path_len:]\n\n def list(self, path, start_time=None, end_time=None, return_key=False): # backwards compat\n key_path_len = len(self._add_path_delimiter(path))\n for item in self.listdir(path, start_time=start_time, end_time=end_time, return_key=return_key):\n if return_key:\n yield item\n else:\n yield item[key_path_len:]\n\n def isdir(self, path):\n \"\"\"\n Is the parameter S3 path a directory?\n \"\"\"\n (bucket, key) = self._path_to_bucket_and_key(path)\n\n # grab and validate the bucket\n s3_bucket = self.s3.get_bucket(bucket, validate=True)\n\n # root is a directory\n if self._is_root(key):\n return True\n\n for suffix in (S3_DIRECTORY_MARKER_SUFFIX_0,\n S3_DIRECTORY_MARKER_SUFFIX_1):\n s3_dir_with_suffix_key = s3_bucket.get_key(key + suffix)\n if s3_dir_with_suffix_key:\n return True\n\n # files with this prefix\n key_path = self._add_path_delimiter(key)\n s3_bucket_list_result = list(itertools.islice(s3_bucket.list(prefix=key_path), 1))\n if s3_bucket_list_result:\n return True\n\n return False\n\n is_dir = isdir # compatibility with old version.\n\n def mkdir(self, path, parents=True, raise_if_exists=False):\n if raise_if_exists and self.isdir(path):\n raise FileAlreadyExists()\n\n _, key = self._path_to_bucket_and_key(path)\n if self._is_root(key):\n return # isdir raises if the bucket doesn't exist; nothing to do here.\n\n key = self._add_path_delimiter(key)\n\n if not parents and not self.isdir(os.path.dirname(key)):\n raise MissingParentDirectory()\n\n return self.put_string(\"\", self._add_path_delimiter(path))\n\n def _get_s3_config(self, key=None):\n defaults = dict(configuration.get_config().defaults())\n try:\n config = dict(configuration.get_config().items('s3'))\n except NoSectionError:\n return {}\n # So what ports etc can be read without us having to specify all dtypes\n for k, v in six.iteritems(config):\n try:\n config[k] = int(v)\n except ValueError:\n pass\n if key:\n return config.get(key)\n section_only = {k: v for k, v in config.items() if k not in defaults or v != defaults[k]}\n return section_only\n\n def _path_to_bucket_and_key(self, path):\n (scheme, netloc, path, query, fragment) = urlsplit(path)\n path_without_initial_slash = path[1:]\n return netloc, path_without_initial_slash\n\n def _is_root(self, key):\n return (len(key) == 0) or (key == '/')\n\n def _add_path_delimiter(self, key):\n return key if key[-1:] == '/' or key == '' else key + '/'\n\n\nclass AtomicS3File(AtomicLocalFile):\n \"\"\"\n An S3 file that writes to a temp file and puts to S3 on close.\n\n :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`\n \"\"\"\n\n def __init__(self, path, s3_client, **kwargs):\n self.s3_client = s3_client\n super(AtomicS3File, self).__init__(path)\n self.s3_options = kwargs\n\n def move_to_final_destination(self):\n self.s3_client.put_multipart(self.tmp_path, self.path, **self.s3_options)\n\n\nclass ReadableS3File(object):\n def __init__(self, s3_key):\n self.s3_key = s3_key\n self.buffer = []\n self.closed = False\n self.finished = False\n\n def read(self, size=0):\n f = self.s3_key.read(size=size)\n\n # boto will loop on the key forever and it's not what is expected by\n # the python io interface\n # boto/boto#2805\n if f == b'':\n self.finished = True\n if self.finished:\n return b''\n\n return f\n\n def close(self):\n self.s3_key.close()\n self.closed = True\n\n def __del__(self):\n self.close()\n\n def __exit__(self, exc_type, exc, traceback):\n self.close()\n\n def __enter__(self):\n return self\n\n def _add_to_buffer(self, line):\n self.buffer.append(line)\n\n def _flush_buffer(self):\n output = b''.join(self.buffer)\n self.buffer = []\n return output\n\n def readable(self):\n return True\n\n def writable(self):\n return False\n\n def seekable(self):\n return False\n\n def __iter__(self):\n key_iter = self.s3_key.__iter__()\n\n has_next = True\n while has_next:\n try:\n # grab the next chunk\n chunk = next(key_iter)\n\n # split on newlines, preserving the newline\n for line in chunk.splitlines(True):\n\n if not line.endswith(os.linesep):\n # no newline, so store in buffer\n self._add_to_buffer(line)\n else:\n # newline found, send it out\n if self.buffer:\n self._add_to_buffer(line)\n yield self._flush_buffer()\n else:\n yield line\n except StopIteration:\n # send out anything we have left in the buffer\n output = self._flush_buffer()\n if output:\n yield output\n has_next = False\n self.close()\n\n\nclass S3Target(FileSystemTarget):\n \"\"\"\n Target S3 file object\n\n :param kwargs: Keyword arguments are passed to the boto function `initiate_multipart_upload`\n \"\"\"\n\n fs = None\n\n def __init__(self, path, format=None, client=None, **kwargs):\n super(S3Target, self).__init__(path)\n if format is None:\n format = get_default_format()\n\n self.path = path\n self.format = format\n self.fs = client or S3Client()\n self.s3_options = kwargs\n\n def open(self, mode='r'):\n if mode not in ('r', 'w'):\n raise ValueError(\"Unsupported open mode '%s'\" % mode)\n\n if mode == 'r':\n s3_key = self.fs.get_key(self.path)\n if not s3_key:\n raise FileNotFoundException(\"Could not find file at %s\" % self.path)\n\n fileobj = ReadableS3File(s3_key)\n return self.format.pipe_reader(fileobj)\n else:\n return self.format.pipe_writer(AtomicS3File(self.path, self.fs, **self.s3_options))\n\n\nclass S3FlagTarget(S3Target):\n \"\"\"\n Defines a target directory with a flag-file (defaults to `_SUCCESS`) used\n to signify job success.\n\n This checks for two things:\n\n * the path exists (just like the S3Target)\n * the _SUCCESS file exists within the directory.\n\n Because Hadoop outputs into a directory and not a single file,\n the path is assumed to be a directory.\n\n This is meant to be a handy alternative to AtomicS3File.\n\n The AtomicFile approach can be burdensome for S3 since there are no directories, per se.\n\n If we have 1,000,000 output files, then we have to rename 1,000,000 objects.\n \"\"\"\n\n fs = None\n\n def __init__(self, path, format=None, client=None, flag='_SUCCESS'):\n \"\"\"\n Initializes a S3FlagTarget.\n\n :param path: the directory where the files are stored.\n :type path: str\n :param client:\n :type client:\n :param flag:\n :type flag: str\n \"\"\"\n if format is None:\n format = get_default_format()\n\n if path[-1] != \"/\":\n raise ValueError(\"S3FlagTarget requires the path to be to a \"\n \"directory. It must end with a slash ( / ).\")\n super(S3FlagTarget, self).__init__(path, format, client)\n self.flag = flag\n\n def exists(self):\n hadoopSemaphore = self.path + self.flag\n return self.fs.exists(hadoopSemaphore)\n\n\nclass S3EmrTarget(S3FlagTarget):\n \"\"\"\n Deprecated. Use :py:class:`S3FlagTarget`\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n warnings.warn(\"S3EmrTarget is deprecated. Please use S3FlagTarget\")\n super(S3EmrTarget, self).__init__(*args, **kwargs)\n\n\nclass S3PathTask(ExternalTask):\n \"\"\"\n A external task that to require existence of a path in S3.\n \"\"\"\n path = Parameter()\n\n def output(self):\n return S3Target(self.path)\n\n\nclass S3EmrTask(ExternalTask):\n \"\"\"\n An external task that requires the existence of EMR output in S3.\n \"\"\"\n path = Parameter()\n\n def output(self):\n return S3EmrTarget(self.path)\n\n\nclass S3FlagTask(ExternalTask):\n \"\"\"\n An external task that requires the existence of EMR output in S3.\n \"\"\"\n path = Parameter()\n flag = Parameter(default=None)\n\n def output(self):\n return S3FlagTarget(self.path, flag=self.flag)\n", "path": "luigi/contrib/s3.py"}]} |
gh_patches_debug_25325 | rasdani/github-patches | git_diff | yt-project__yt-2106 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cut_region can't be used to derive angular momentum quantities
<!--To help us understand and resolve your issue, please fill out the form to
the best of your ability.-->
<!--You can feel free to delete the sections that do not apply.-->
I'm trying to calculate the angular momentum vector of a sphere, and in particular, for the region inside the sphere that has density higher than a certain level. But the result from the cut_region cannot be used to calculate the angular momentum quantity. So here is the code I used for one of the test datasets on yt.
```python
import yt
ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
sp = ds.sphere((0.5, 0.5, 0.5), (5, "kpc"))
dense_sp = sp.cut_region(['obj["H_p0_number_density"]>= 1e-2'])
dense_sp.quantities.angular_momentum_vector()
```
This produced the following traceback:
```
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-83-3704089812fe> in <module>
5 sp = ds.sphere((0.5, 0.5, 0.5), (5, "kpc"))
6 dense_sp = sp.cut_region(['obj["H_p0_number_density"]>= 1e-2'])
----> 7 dense_sp.quantities.angular_momentum_vector()
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in __call__(self, *args, **kwargs)
67 storage = {}
68 for sto, ds in parallel_objects(chunks, -1, storage = storage):
---> 69 sto.result = self.process_chunk(ds, *args, **kwargs)
70 # Now storage will have everything, and will be done via pickling, so
71 # the units will be preserved. (Credit to Nathan for this
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in process_chunk(self, data, use_gas, use_particles, particle_type)
493 rvals.extend([(data[self.particle_type, "particle_specific_angular_momentum_%s" % axis] *
494 data[self.particle_type, "particle_mass"]).sum(dtype=np.float64) \
--> 495 for axis in "xyz"])
496 rvals.append(data[self.particle_type, "particle_mass"].sum(dtype=np.float64))
497 return rvals
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in <listcomp>(.0)
493 rvals.extend([(data[self.particle_type, "particle_specific_angular_momentum_%s" % axis] *
494 data[self.particle_type, "particle_mass"]).sum(dtype=np.float64) \
--> 495 for axis in "xyz"])
496 rvals.append(data[self.particle_type, "particle_mass"].sum(dtype=np.float64))
497 return rvals
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/data_containers.py in __getitem__(self, key)
253 return self.field_data[f]
254 else:
--> 255 self.get_data(f)
256 # fi.units is the unit expression string. We depend on the registry
257 # hanging off the dataset to define this unit object.
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/selection_data_containers.py in get_data(self, fields)
901 parent = getattr(self, "parent", self.base_object)
902 self.field_data[field] = \
--> 903 parent[field][self._part_ind(field[0])]
904 else:
905 self.field_data[field] = self.base_object[field][ind]
~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/units/yt_array.py in __getitem__(self, item)
1056
1057 def __getitem__(self, item):
-> 1058 ret = super(YTArray, self).__getitem__(item)
1059 if ret.shape == ():
1060 return YTQuantity(ret, self.units, bypass_validation=True)
IndexError: boolean index did not match indexed array along dimension 0; dimension is 44461 but corresponding boolean dimension is 3218
```
**Version Information**
<!--Please specify your platform and versions of the relevant libraries you are
using:-->
* Operating System: Mac Os High Sierra 10.13.6
* Python Version: 3.6.6
* yt version: 3.5.dev0 (installed from source)
* Other Libraries (if applicable):
[bug.txt](https://github.com/yt-project/yt/files/2621997/bug.txt)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt/data_objects/selection_data_containers.py`
Content:
```
1 """
2 Data containers based on geometric selection
3
4
5
6
7 """
8
9 #-----------------------------------------------------------------------------
10 # Copyright (c) 2013, yt Development Team.
11 #
12 # Distributed under the terms of the Modified BSD License.
13 #
14 # The full license is in the file COPYING.txt, distributed with this software.
15 #-----------------------------------------------------------------------------
16
17 import numpy as np
18
19 from yt.data_objects.data_containers import \
20 YTSelectionContainer0D, YTSelectionContainer1D, \
21 YTSelectionContainer2D, YTSelectionContainer3D, YTSelectionContainer
22 from yt.data_objects.static_output import Dataset
23 from yt.extern.six import string_types
24 from yt.funcs import ensure_list, iterable, validate_width_tuple, \
25 fix_length, fix_axis, validate_3d_array, validate_float, \
26 validate_iterable, validate_object, validate_axis, validate_center
27 from yt.units.yt_array import \
28 YTArray, \
29 YTQuantity
30 from yt.utilities.exceptions import \
31 YTSphereTooSmall, \
32 YTIllDefinedCutRegion, \
33 YTEllipsoidOrdering
34 from yt.utilities.minimal_representation import \
35 MinimalSliceData
36 from yt.utilities.math_utils import get_rotation_matrix
37 from yt.utilities.orientation import Orientation
38 from yt.geometry.selection_routines import points_in_cells
39 from yt.utilities.on_demand_imports import _scipy
40
41
42 class YTPoint(YTSelectionContainer0D):
43 """
44 A 0-dimensional object defined by a single point
45
46 Parameters
47 ----------
48 p: array_like
49 A points defined within the domain. If the domain is
50 periodic its position will be corrected to lie inside
51 the range [DLE,DRE) to ensure one and only one cell may
52 match that point
53 ds: ~yt.data_objects.static_output.Dataset, optional
54 An optional dataset to use rather than self.ds
55 field_parameters : dictionary
56 A dictionary of field parameters than can be accessed by derived
57 fields.
58 data_source: optional
59 Draw the selection from the provided data source rather than
60 all data associated with the data_set
61
62 Examples
63 --------
64
65 >>> import yt
66 >>> ds = yt.load("RedshiftOutput0005")
67 >>> c = [0.5,0.5,0.5]
68 >>> point = ds.point(c)
69 """
70 _type_name = "point"
71 _con_args = ('p',)
72 def __init__(self, p, ds=None, field_parameters=None, data_source=None):
73 validate_3d_array(p)
74 validate_object(ds, Dataset)
75 validate_object(field_parameters, dict)
76 validate_object(data_source, YTSelectionContainer)
77 super(YTPoint, self).__init__(ds, field_parameters, data_source)
78 if isinstance(p, YTArray):
79 # we pass p through ds.arr to ensure code units are attached
80 self.p = self.ds.arr(p)
81 else:
82 self.p = self.ds.arr(p, 'code_length')
83
84 class YTOrthoRay(YTSelectionContainer1D):
85 """
86 This is an orthogonal ray cast through the entire domain, at a specific
87 coordinate.
88
89 This object is typically accessed through the `ortho_ray` object that
90 hangs off of index objects. The resulting arrays have their
91 dimensionality reduced to one, and an ordered list of points at an
92 (x,y) tuple along `axis` are available.
93
94 Parameters
95 ----------
96 axis : int or char
97 The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
98 coords : tuple of floats
99 The (plane_x, plane_y) coordinates at which to cast the ray. Note
100 that this is in the plane coordinates: so if you are casting along
101 x, this will be (y, z). If you are casting along y, this will be
102 (z, x). If you are casting along z, this will be (x, y).
103 ds: ~yt.data_objects.static_output.Dataset, optional
104 An optional dataset to use rather than self.ds
105 field_parameters : dictionary
106 A dictionary of field parameters than can be accessed by derived
107 fields.
108 data_source: optional
109 Draw the selection from the provided data source rather than
110 all data associated with the data_set
111
112 Examples
113 --------
114
115 >>> import yt
116 >>> ds = yt.load("RedshiftOutput0005")
117 >>> oray = ds.ortho_ray(0, (0.2, 0.74))
118 >>> print oray["Density"]
119
120 Note: The low-level data representation for rays are not guaranteed to be
121 spatially ordered. In particular, with AMR datasets, higher resolution
122 data is tagged on to the end of the ray. If you want this data
123 represented in a spatially ordered manner, manually sort it by the "t"
124 field, which is the value of the parametric variable that goes from 0 at
125 the start of the ray to 1 at the end:
126
127 >>> my_ray = ds.ortho_ray(...)
128 >>> ray_sort = np.argsort(my_ray["t"])
129 >>> density = my_ray["density"][ray_sort]
130 """
131 _key_fields = ['x','y','z','dx','dy','dz']
132 _type_name = "ortho_ray"
133 _con_args = ('axis', 'coords')
134 def __init__(self, axis, coords, ds=None,
135 field_parameters=None, data_source=None):
136 validate_axis(ds, axis)
137 validate_iterable(coords)
138 for c in coords:
139 validate_float(c)
140 validate_object(ds, Dataset)
141 validate_object(field_parameters, dict)
142 validate_object(data_source, YTSelectionContainer)
143 super(YTOrthoRay, self).__init__(ds, field_parameters, data_source)
144 self.axis = fix_axis(axis, self.ds)
145 xax = self.ds.coordinates.x_axis[self.axis]
146 yax = self.ds.coordinates.y_axis[self.axis]
147 self.px_ax = xax
148 self.py_ax = yax
149 # Even though we may not be using x,y,z we use them here.
150 self.px_dx = 'd%s'%('xyz'[self.px_ax])
151 self.py_dx = 'd%s'%('xyz'[self.py_ax])
152 # Convert coordinates to code length.
153 if isinstance(coords[0], YTQuantity):
154 self.px = self.ds.quan(coords[0]).to("code_length")
155 else:
156 self.px = self.ds.quan(coords[0], "code_length")
157 if isinstance(coords[1], YTQuantity):
158 self.py = self.ds.quan(coords[1]).to("code_length")
159 else:
160 self.py = self.ds.quan(coords[1], "code_length")
161 self.sort_by = 'xyz'[self.axis]
162
163 @property
164 def coords(self):
165 return (self.px, self.py)
166
167 class YTRay(YTSelectionContainer1D):
168 """
169 This is an arbitrarily-aligned ray cast through the entire domain, at a
170 specific coordinate.
171
172 This object is typically accessed through the `ray` object that hangs
173 off of index objects. The resulting arrays have their
174 dimensionality reduced to one, and an ordered list of points at an
175 (x,y) tuple along `axis` are available, as is the `t` field, which
176 corresponds to a unitless measurement along the ray from start to
177 end.
178
179 Parameters
180 ----------
181 start_point : array-like set of 3 floats
182 The place where the ray starts.
183 end_point : array-like set of 3 floats
184 The place where the ray ends.
185 ds: ~yt.data_objects.static_output.Dataset, optional
186 An optional dataset to use rather than self.ds
187 field_parameters : dictionary
188 A dictionary of field parameters than can be accessed by derived
189 fields.
190 data_source: optional
191 Draw the selection from the provided data source rather than
192 all data associated with the data_set
193
194 Examples
195 --------
196
197 >>> import yt
198 >>> ds = yt.load("RedshiftOutput0005")
199 >>> ray = ds.ray((0.2, 0.74, 0.11), (0.4, 0.91, 0.31))
200 >>> print ray["Density"], ray["t"], ray["dts"]
201
202 Note: The low-level data representation for rays are not guaranteed to be
203 spatially ordered. In particular, with AMR datasets, higher resolution
204 data is tagged on to the end of the ray. If you want this data
205 represented in a spatially ordered manner, manually sort it by the "t"
206 field, which is the value of the parametric variable that goes from 0 at
207 the start of the ray to 1 at the end:
208
209 >>> my_ray = ds.ray(...)
210 >>> ray_sort = np.argsort(my_ray["t"])
211 >>> density = my_ray["density"][ray_sort]
212
213 """
214 _type_name = "ray"
215 _con_args = ('start_point', 'end_point')
216 _container_fields = ("t", "dts")
217 def __init__(self, start_point, end_point, ds=None,
218 field_parameters=None, data_source=None):
219 validate_3d_array(start_point)
220 validate_3d_array(end_point)
221 validate_object(ds, Dataset)
222 validate_object(field_parameters, dict)
223 validate_object(data_source, YTSelectionContainer)
224 super(YTRay, self).__init__(ds, field_parameters, data_source)
225 if isinstance(start_point, YTArray):
226 self.start_point = \
227 self.ds.arr(start_point).to("code_length")
228 else:
229 self.start_point = \
230 self.ds.arr(start_point, 'code_length',
231 dtype='float64')
232 if isinstance(end_point, YTArray):
233 self.end_point = \
234 self.ds.arr(end_point).to("code_length")
235 else:
236 self.end_point = \
237 self.ds.arr(end_point, 'code_length',
238 dtype='float64')
239 self.vec = self.end_point - self.start_point
240 self._set_center(self.start_point)
241 self.set_field_parameter('center', self.start_point)
242 self._dts, self._ts = None, None
243
244 def _generate_container_field(self, field):
245 if self._current_chunk is None:
246 self.index._identify_base_chunk(self)
247 if field == "dts":
248 return self._current_chunk.dtcoords
249 elif field == "t":
250 return self._current_chunk.tcoords
251 else:
252 raise KeyError(field)
253
254 class YTSlice(YTSelectionContainer2D):
255 """
256 This is a data object corresponding to a slice through the simulation
257 domain.
258
259 This object is typically accessed through the `slice` object that hangs
260 off of index objects. Slice is an orthogonal slice through the
261 data, taking all the points at the finest resolution available and then
262 indexing them. It is more appropriately thought of as a slice
263 'operator' than an object, however, as its field and coordinate can
264 both change.
265
266 Parameters
267 ----------
268 axis : int or char
269 The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
270 coord : float
271 The coordinate along the axis at which to slice. This is in
272 "domain" coordinates.
273 center : array_like, optional
274 The 'center' supplied to fields that use it. Note that this does
275 not have to have `coord` as one value. optional.
276 ds: ~yt.data_objects.static_output.Dataset, optional
277 An optional dataset to use rather than self.ds
278 field_parameters : dictionary
279 A dictionary of field parameters than can be accessed by derived
280 fields.
281 data_source: optional
282 Draw the selection from the provided data source rather than
283 all data associated with the data_set
284
285 Examples
286 --------
287
288 >>> import yt
289 >>> ds = yt.load("RedshiftOutput0005")
290 >>> slice = ds.slice(0, 0.25)
291 >>> print slice["Density"]
292 """
293 _top_node = "/Slices"
294 _type_name = "slice"
295 _con_args = ('axis', 'coord')
296 _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz")
297 def __init__(self, axis, coord, center=None, ds=None,
298 field_parameters=None, data_source=None):
299 validate_axis(ds, axis)
300 validate_float(coord)
301 # center is an optional parameter
302 if center is not None:
303 validate_center(center)
304 validate_object(ds, Dataset)
305 validate_object(field_parameters, dict)
306 validate_object(data_source, YTSelectionContainer)
307 YTSelectionContainer2D.__init__(self, axis, ds,
308 field_parameters, data_source)
309 self._set_center(center)
310 self.coord = coord
311
312 def _generate_container_field(self, field):
313 xax = self.ds.coordinates.x_axis[self.axis]
314 yax = self.ds.coordinates.y_axis[self.axis]
315 if self._current_chunk is None:
316 self.index._identify_base_chunk(self)
317 if field == "px":
318 return self._current_chunk.fcoords[:,xax]
319 elif field == "py":
320 return self._current_chunk.fcoords[:,yax]
321 elif field == "pz":
322 return self._current_chunk.fcoords[:,self.axis]
323 elif field == "pdx":
324 return self._current_chunk.fwidth[:,xax] * 0.5
325 elif field == "pdy":
326 return self._current_chunk.fwidth[:,yax] * 0.5
327 elif field == "pdz":
328 return self._current_chunk.fwidth[:,self.axis] * 0.5
329 else:
330 raise KeyError(field)
331
332 @property
333 def _mrep(self):
334 return MinimalSliceData(self)
335
336 def hub_upload(self):
337 self._mrep.upload()
338
339 def to_pw(self, fields=None, center='c', width=None, origin='center-window'):
340 r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
341 object.
342
343 This is a bare-bones mechanism of creating a plot window from this
344 object, which can then be moved around, zoomed, and on and on. All
345 behavior of the plot window is relegated to that routine.
346 """
347 pw = self._get_pw(fields, center, width, origin, 'Slice')
348 return pw
349
350 def plot(self, fields=None):
351 if hasattr(self._data_source, "left_edge") and \
352 hasattr(self._data_source, "right_edge"):
353 left_edge = self._data_source.left_edge
354 right_edge = self._data_source.right_edge
355 center = (left_edge + right_edge)/2.0
356 width = right_edge - left_edge
357 xax = self.ds.coordinates.x_axis[self.axis]
358 yax = self.ds.coordinates.y_axis[self.axis]
359 lx, rx = left_edge[xax], right_edge[xax]
360 ly, ry = left_edge[yax], right_edge[yax]
361 width = (rx-lx), (ry-ly)
362 else:
363 width = self.ds.domain_width
364 center = self.ds.domain_center
365 pw = self._get_pw(fields, center, width, 'native', 'Slice')
366 pw.show()
367 return pw
368
369 class YTCuttingPlane(YTSelectionContainer2D):
370 """
371 This is a data object corresponding to an oblique slice through the
372 simulation domain.
373
374 This object is typically accessed through the `cutting` object
375 that hangs off of index objects. A cutting plane is an oblique
376 plane through the data, defined by a normal vector and a coordinate.
377 It attempts to guess an 'north' vector, which can be overridden, and
378 then it pixelizes the appropriate data onto the plane without
379 interpolation.
380
381 Parameters
382 ----------
383 normal : array_like
384 The vector that defines the desired plane. For instance, the
385 angular momentum of a sphere.
386 center : array_like
387 The center of the cutting plane, where the normal vector is anchored.
388 north_vector: array_like, optional
389 An optional vector to describe the north-facing direction in the resulting
390 plane.
391 ds: ~yt.data_objects.static_output.Dataset, optional
392 An optional dataset to use rather than self.ds
393 field_parameters : dictionary
394 A dictionary of field parameters than can be accessed by derived
395 fields.
396 data_source: optional
397 Draw the selection from the provided data source rather than
398 all data associated with the data_set
399
400 Notes
401 -----
402
403 This data object in particular can be somewhat expensive to create.
404 It's also important to note that unlike the other 2D data objects, this
405 object provides px, py, pz, as some cells may have a height from the
406 plane.
407
408 Examples
409 --------
410
411 >>> import yt
412 >>> ds = yt.load("RedshiftOutput0005")
413 >>> cp = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6])
414 >>> print cp["Density"]
415 """
416 _plane = None
417 _top_node = "/CuttingPlanes"
418 _key_fields = YTSelectionContainer2D._key_fields + ['pz','pdz']
419 _type_name = "cutting"
420 _con_args = ('normal', 'center')
421 _tds_attrs = ("_inv_mat",)
422 _tds_fields = ("x", "y", "z", "dx")
423 _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz")
424 def __init__(self, normal, center, north_vector=None,
425 ds=None, field_parameters=None, data_source=None):
426 validate_3d_array(normal)
427 validate_center(center)
428 if north_vector is not None:
429 validate_3d_array(north_vector)
430 validate_object(ds, Dataset)
431 validate_object(field_parameters, dict)
432 validate_object(data_source, YTSelectionContainer)
433 YTSelectionContainer2D.__init__(self, 4, ds,
434 field_parameters, data_source)
435 self._set_center(center)
436 self.set_field_parameter('center',center)
437 # Let's set up our plane equation
438 # ax + by + cz + d = 0
439 self.orienter = Orientation(normal, north_vector = north_vector)
440 self._norm_vec = self.orienter.normal_vector
441 self._d = -1.0 * np.dot(self._norm_vec, self.center)
442 self._x_vec = self.orienter.unit_vectors[0]
443 self._y_vec = self.orienter.unit_vectors[1]
444 # First we try all three, see which has the best result:
445 self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
446 self._inv_mat = np.linalg.pinv(self._rot_mat)
447 self.set_field_parameter('cp_x_vec',self._x_vec)
448 self.set_field_parameter('cp_y_vec',self._y_vec)
449 self.set_field_parameter('cp_z_vec',self._norm_vec)
450
451 @property
452 def normal(self):
453 return self._norm_vec
454
455 def _generate_container_field(self, field):
456 if self._current_chunk is None:
457 self.index._identify_base_chunk(self)
458 if field == "px":
459 x = self._current_chunk.fcoords[:,0] - self.center[0]
460 y = self._current_chunk.fcoords[:,1] - self.center[1]
461 z = self._current_chunk.fcoords[:,2] - self.center[2]
462 tr = np.zeros(x.size, dtype='float64')
463 tr = self.ds.arr(tr, "code_length")
464 tr += x * self._x_vec[0]
465 tr += y * self._x_vec[1]
466 tr += z * self._x_vec[2]
467 return tr
468 elif field == "py":
469 x = self._current_chunk.fcoords[:,0] - self.center[0]
470 y = self._current_chunk.fcoords[:,1] - self.center[1]
471 z = self._current_chunk.fcoords[:,2] - self.center[2]
472 tr = np.zeros(x.size, dtype='float64')
473 tr = self.ds.arr(tr, "code_length")
474 tr += x * self._y_vec[0]
475 tr += y * self._y_vec[1]
476 tr += z * self._y_vec[2]
477 return tr
478 elif field == "pz":
479 x = self._current_chunk.fcoords[:,0] - self.center[0]
480 y = self._current_chunk.fcoords[:,1] - self.center[1]
481 z = self._current_chunk.fcoords[:,2] - self.center[2]
482 tr = np.zeros(x.size, dtype='float64')
483 tr = self.ds.arr(tr, "code_length")
484 tr += x * self._norm_vec[0]
485 tr += y * self._norm_vec[1]
486 tr += z * self._norm_vec[2]
487 return tr
488 elif field == "pdx":
489 return self._current_chunk.fwidth[:,0] * 0.5
490 elif field == "pdy":
491 return self._current_chunk.fwidth[:,1] * 0.5
492 elif field == "pdz":
493 return self._current_chunk.fwidth[:,2] * 0.5
494 else:
495 raise KeyError(field)
496
497 def to_pw(self, fields=None, center='c', width=None, axes_unit=None):
498 r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
499 object.
500
501 This is a bare-bones mechanism of creating a plot window from this
502 object, which can then be moved around, zoomed, and on and on. All
503 behavior of the plot window is relegated to that routine.
504 """
505 normal = self.normal
506 center = self.center
507 self.fields = ensure_list(fields) + [k for k in self.field_data.keys()
508 if k not in self._key_fields]
509 from yt.visualization.plot_window import get_oblique_window_parameters, PWViewerMPL
510 from yt.visualization.fixed_resolution import FixedResolutionBuffer
511 (bounds, center_rot) = get_oblique_window_parameters(normal, center, width, self.ds)
512 pw = PWViewerMPL(
513 self, bounds, fields=self.fields, origin='center-window',
514 periodic=False, oblique=True,
515 frb_generator=FixedResolutionBuffer,
516 plot_type='OffAxisSlice')
517 if axes_unit is not None:
518 pw.set_axes_unit(axes_unit)
519 pw._setup_plots()
520 return pw
521
522 def to_frb(self, width, resolution, height=None, periodic=False):
523 r"""This function returns a FixedResolutionBuffer generated from this
524 object.
525
526 An ObliqueFixedResolutionBuffer is an object that accepts a
527 variable-resolution 2D object and transforms it into an NxM bitmap that
528 can be plotted, examined or processed. This is a convenience function
529 to return an FRB directly from an existing 2D data object. Unlike the
530 corresponding to_frb function for other YTSelectionContainer2D objects,
531 this does not accept a 'center' parameter as it is assumed to be
532 centered at the center of the cutting plane.
533
534 Parameters
535 ----------
536 width : width specifier
537 This can either be a floating point value, in the native domain
538 units of the simulation, or a tuple of the (value, unit) style.
539 This will be the width of the FRB.
540 height : height specifier, optional
541 This will be the height of the FRB, by default it is equal to width.
542 resolution : int or tuple of ints
543 The number of pixels on a side of the final FRB.
544 periodic : boolean
545 This can be true or false, and governs whether the pixelization
546 will span the domain boundaries.
547
548 Returns
549 -------
550 frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`
551 A fixed resolution buffer, which can be queried for fields.
552
553 Examples
554 --------
555
556 >>> v, c = ds.find_max("density")
557 >>> sp = ds.sphere(c, (100.0, 'au'))
558 >>> L = sp.quantities.angular_momentum_vector()
559 >>> cutting = ds.cutting(L, c)
560 >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)
561 >>> write_image(np.log10(frb["Density"]), 'density_1pc.png')
562 """
563 if iterable(width):
564 validate_width_tuple(width)
565 width = self.ds.quan(width[0], width[1])
566 if height is None:
567 height = width
568 elif iterable(height):
569 validate_width_tuple(height)
570 height = self.ds.quan(height[0], height[1])
571 if not iterable(resolution):
572 resolution = (resolution, resolution)
573 from yt.visualization.fixed_resolution import FixedResolutionBuffer
574 bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)
575 frb = FixedResolutionBuffer(self, bounds, resolution,
576 periodic=periodic)
577 return frb
578
579 class YTDisk(YTSelectionContainer3D):
580 """
581 By providing a *center*, a *normal*, a *radius* and a *height* we
582 can define a cylinder of any proportion. Only cells whose centers are
583 within the cylinder will be selected.
584
585 Parameters
586 ----------
587 center : array_like
588 coordinate to which the normal, radius, and height all reference
589 normal : array_like
590 the normal vector defining the direction of lengthwise part of the
591 cylinder
592 radius : float
593 the radius of the cylinder
594 height : float
595 the distance from the midplane of the cylinder to the top and
596 bottom planes
597 fields : array of fields, optional
598 any fields to be pre-loaded in the cylinder object
599 ds: ~yt.data_objects.static_output.Dataset, optional
600 An optional dataset to use rather than self.ds
601 field_parameters : dictionary
602 A dictionary of field parameters than can be accessed by derived
603 fields.
604 data_source: optional
605 Draw the selection from the provided data source rather than
606 all data associated with the data_set
607
608 Examples
609 --------
610
611 >>> import yt
612 >>> ds = yt.load("RedshiftOutput0005")
613 >>> c = [0.5,0.5,0.5]
614 >>> disk = ds.disk(c, [1,0,0], (1, 'kpc'), (10, 'kpc'))
615 """
616 _type_name = "disk"
617 _con_args = ('center', '_norm_vec', 'radius', 'height')
618 def __init__(self, center, normal, radius, height, fields=None,
619 ds=None, field_parameters=None, data_source=None):
620 validate_center(center)
621 validate_3d_array(normal)
622 validate_float(radius)
623 validate_float(height)
624 validate_iterable(fields)
625 validate_object(ds, Dataset)
626 validate_object(field_parameters, dict)
627 validate_object(data_source, YTSelectionContainer)
628 YTSelectionContainer3D.__init__(self, center, ds,
629 field_parameters, data_source)
630 self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))
631 self.set_field_parameter("normal", self._norm_vec)
632 self.set_field_parameter("center", self.center)
633 self.height = fix_length(height, self.ds)
634 self.radius = fix_length(radius, self.ds)
635 self._d = -1.0 * np.dot(self._norm_vec, self.center)
636
637 class YTRegion(YTSelectionContainer3D):
638 """A 3D region of data with an arbitrary center.
639
640 Takes an array of three *left_edge* coordinates, three
641 *right_edge* coordinates, and a *center* that can be anywhere
642 in the domain. If the selected region extends past the edges
643 of the domain, no data will be found there, though the
644 object's `left_edge` or `right_edge` are not modified.
645
646 Parameters
647 ----------
648 center : array_like
649 The center of the region
650 left_edge : array_like
651 The left edge of the region
652 right_edge : array_like
653 The right edge of the region
654 """
655 _type_name = "region"
656 _con_args = ('center', 'left_edge', 'right_edge')
657 def __init__(self, center, left_edge, right_edge, fields=None,
658 ds=None, field_parameters=None, data_source=None):
659 if center is not None:
660 validate_center(center)
661 validate_3d_array(left_edge)
662 validate_3d_array(right_edge)
663 validate_iterable(fields)
664 validate_object(ds, Dataset)
665 validate_object(field_parameters, dict)
666 validate_object(data_source, YTSelectionContainer)
667 YTSelectionContainer3D.__init__(self, center, ds,
668 field_parameters, data_source)
669 if not isinstance(left_edge, YTArray):
670 self.left_edge = self.ds.arr(left_edge, 'code_length')
671 else:
672 # need to assign this dataset's unit registry to the YTArray
673 self.left_edge = self.ds.arr(left_edge.copy())
674 if not isinstance(right_edge, YTArray):
675 self.right_edge = self.ds.arr(right_edge, 'code_length')
676 else:
677 # need to assign this dataset's unit registry to the YTArray
678 self.right_edge = self.ds.arr(right_edge.copy())
679
680 class YTDataCollection(YTSelectionContainer3D):
681 """
682 By selecting an arbitrary *object_list*, we can act on those grids.
683 Child cells are not returned.
684 """
685 _type_name = "data_collection"
686 _con_args = ("_obj_list",)
687 def __init__(self, obj_list, ds=None, field_parameters=None,
688 data_source=None, center=None):
689 validate_iterable(obj_list)
690 validate_object(ds, Dataset)
691 validate_object(field_parameters, dict)
692 validate_object(data_source, YTSelectionContainer)
693 if center is not None:
694 validate_center(center)
695 YTSelectionContainer3D.__init__(self, center, ds,
696 field_parameters, data_source)
697 self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],
698 dtype="int64")
699 self._obj_list = obj_list
700
701 class YTSphere(YTSelectionContainer3D):
702 """
703 A sphere of points defined by a *center* and a *radius*.
704
705 Parameters
706 ----------
707 center : array_like
708 The center of the sphere.
709 radius : float, width specifier, or YTQuantity
710 The radius of the sphere. If passed a float,
711 that will be interpreted in code units. Also
712 accepts a (radius, unit) tuple or YTQuantity
713 instance with units attached.
714
715 Examples
716 --------
717
718 >>> import yt
719 >>> ds = yt.load("RedshiftOutput0005")
720 >>> c = [0.5,0.5,0.5]
721 >>> sphere = ds.sphere(c, (1., "kpc"))
722 """
723 _type_name = "sphere"
724 _con_args = ('center', 'radius')
725 def __init__(self, center, radius, ds=None,
726 field_parameters=None, data_source=None):
727 validate_center(center)
728 validate_float(radius)
729 validate_object(ds, Dataset)
730 validate_object(field_parameters, dict)
731 validate_object(data_source, YTSelectionContainer)
732 super(YTSphere, self).__init__(center, ds,
733 field_parameters, data_source)
734 # Unpack the radius, if necessary
735 radius = fix_length(radius, self.ds)
736 if radius < self.index.get_smallest_dx():
737 raise YTSphereTooSmall(ds, radius.in_units("code_length"),
738 self.index.get_smallest_dx().in_units("code_length"))
739 self.set_field_parameter('radius',radius)
740 self.set_field_parameter("center", self.center)
741 self.radius = radius
742
743 class YTEllipsoid(YTSelectionContainer3D):
744 """
745 By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we
746 can define a ellipsoid of any proportion. Only cells whose
747 centers are within the ellipsoid will be selected.
748
749 Parameters
750 ----------
751 center : array_like
752 The center of the ellipsoid.
753 A : float
754 The magnitude of the largest axis (semi-major) of the ellipsoid.
755 B : float
756 The magnitude of the medium axis (semi-medium) of the ellipsoid.
757 C : float
758 The magnitude of the smallest axis (semi-minor) of the ellipsoid.
759 e0 : array_like (automatically normalized)
760 the direction of the largest semi-major axis of the ellipsoid
761 tilt : float
762 After the rotation about the z-axis to allign e0 to x in the x-y
763 plane, and then rotating about the y-axis to align e0 completely
764 to the x-axis, tilt is the angle in radians remaining to
765 rotate about the x-axis to align both e1 to the y-axis and e2 to
766 the z-axis.
767 Examples
768 --------
769
770 >>> import yt
771 >>> ds = yt.load("RedshiftOutput0005")
772 >>> c = [0.5,0.5,0.5]
773 >>> ell = ds.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)
774 """
775 _type_name = "ellipsoid"
776 _con_args = ('center', '_A', '_B', '_C', '_e0', '_tilt')
777 def __init__(self, center, A, B, C, e0, tilt, fields=None,
778 ds=None, field_parameters=None, data_source=None):
779 validate_center(center)
780 validate_float(A)
781 validate_float(B)
782 validate_float(C)
783 validate_3d_array(e0)
784 validate_float(tilt)
785 validate_iterable(fields)
786 validate_object(ds, Dataset)
787 validate_object(field_parameters, dict)
788 validate_object(data_source, YTSelectionContainer)
789 YTSelectionContainer3D.__init__(self, center, ds,
790 field_parameters, data_source)
791 # make sure the magnitudes of semi-major axes are in order
792 if A<B or B<C:
793 raise YTEllipsoidOrdering(ds, A, B, C)
794 # make sure the smallest side is not smaller than dx
795 self._A = self.ds.quan(A, 'code_length')
796 self._B = self.ds.quan(B, 'code_length')
797 self._C = self.ds.quan(C, 'code_length')
798 if self._C < self.index.get_smallest_dx():
799 raise YTSphereTooSmall(self.ds, self._C, self.index.get_smallest_dx())
800 self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
801 self._tilt = tilt
802
803 # find the t1 angle needed to rotate about z axis to align e0 to x
804 t1 = np.arctan(e0[1] / e0[0])
805 # rotate e0 by -t1
806 RZ = get_rotation_matrix(t1, (0,0,1)).transpose()
807 r1 = (e0 * RZ).sum(axis = 1)
808 # find the t2 angle needed to rotate about y axis to align e0 to x
809 t2 = np.arctan(-r1[2] / r1[0])
810 """
811 calculate the original e1
812 given the tilt about the x axis when e0 was aligned
813 to x after t1, t2 rotations about z, y
814 """
815 RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()
816 RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()
817 RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()
818 e1 = ((0, 1, 0) * RX).sum(axis=1)
819 e1 = (e1 * RY).sum(axis=1)
820 e1 = (e1 * RZ).sum(axis=1)
821 e2 = np.cross(e0, e1)
822
823 self._e1 = e1
824 self._e2 = e2
825
826 self.set_field_parameter('A', A)
827 self.set_field_parameter('B', B)
828 self.set_field_parameter('C', C)
829 self.set_field_parameter('e0', e0)
830 self.set_field_parameter('e1', e1)
831 self.set_field_parameter('e2', e2)
832
833 class YTCutRegion(YTSelectionContainer3D):
834 """
835 This is a data object designed to allow individuals to apply logical
836 operations to fields and filter as a result of those cuts.
837
838 Parameters
839 ----------
840 data_source : YTSelectionContainer3D
841 The object to which cuts will be applied.
842 conditionals : list of strings
843 A list of conditionals that will be evaluated. In the namespace
844 available, these conditionals will have access to 'obj' which is a data
845 object of unknown shape, and they must generate a boolean array. For
846 instance, conditionals = ["obj['temperature'] < 1e3"]
847
848 Examples
849 --------
850
851 >>> import yt
852 >>> ds = yt.load("RedshiftOutput0005")
853 >>> sp = ds.sphere("max", (1.0, 'Mpc'))
854 >>> cr = ds.cut_region(sp, ["obj['temperature'] < 1e3"])
855 """
856 _type_name = "cut_region"
857 _con_args = ("base_object", "conditionals")
858 def __init__(self, data_source, conditionals, ds=None,
859 field_parameters=None, base_object=None):
860 validate_object(data_source, YTSelectionContainer)
861 validate_iterable(conditionals)
862 for condition in conditionals:
863 validate_object(condition, string_types)
864 validate_object(ds, Dataset)
865 validate_object(field_parameters, dict)
866 validate_object(base_object, YTSelectionContainer)
867 if base_object is not None:
868 # passing base_object explicitly has been deprecated,
869 # but we handle it here for backward compatibility
870 if data_source is not None:
871 raise RuntimeError(
872 "Cannot use both base_object and data_source")
873 data_source=base_object
874 super(YTCutRegion, self).__init__(
875 data_source.center, ds, field_parameters, data_source=data_source)
876 self.conditionals = ensure_list(conditionals)
877 self.base_object = data_source
878 self._selector = None
879 self._particle_mask = {}
880 # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
881 # ires and get_data
882
883 def chunks(self, fields, chunking_style, **kwargs):
884 # We actually want to chunk the sub-chunk, not ourselves. We have no
885 # chunks to speak of, as we do not data IO.
886 for chunk in self.index._chunk(self.base_object,
887 chunking_style,
888 **kwargs):
889 with self.base_object._chunked_read(chunk):
890 with self._chunked_read(chunk):
891 self.get_data(fields)
892 yield self
893
894 def get_data(self, fields = None):
895 fields = ensure_list(fields)
896 self.base_object.get_data(fields)
897 ind = self._cond_ind
898 for field in fields:
899 f = self.base_object[field]
900 if f.shape != ind.shape:
901 parent = getattr(self, "parent", self.base_object)
902 self.field_data[field] = \
903 parent[field][self._part_ind(field[0])]
904 else:
905 self.field_data[field] = self.base_object[field][ind]
906
907 @property
908 def blocks(self):
909 # We have to take a slightly different approach here. Note that all
910 # that .blocks has to yield is a 3D array and a mask.
911 for obj, m in self.base_object.blocks:
912 m = m.copy()
913 with obj._field_parameter_state(self.field_parameters):
914 for cond in self.conditionals:
915 ss = eval(cond)
916 m = np.logical_and(m, ss, m)
917 if not np.any(m): continue
918 yield obj, m
919
920 @property
921 def _cond_ind(self):
922 ind = None
923 obj = self.base_object
924 with obj._field_parameter_state(self.field_parameters):
925 for cond in self.conditionals:
926 res = eval(cond)
927 if ind is None: ind = res
928 if ind.shape != res.shape:
929 raise YTIllDefinedCutRegion(self.conditionals)
930 np.logical_and(res, ind, ind)
931 return ind
932
933 def _part_ind_KDTree(self, ptype):
934 '''Find the particles in cells using a KDTree approach.'''
935 parent = getattr(self, "parent", self.base_object)
936 units = "code_length"
937
938 pos = np.stack([self[("index", 'x')].to(units),
939 self[("index", 'y')].to(units),
940 self[("index", 'z')].to(units)], axis=1).value
941 dx = np.stack([self[("index", "dx")].to(units),
942 self[("index", "dy")].to(units),
943 self[("index", "dz")].to(units)], axis=1).value
944 ppos = np.stack([parent[(ptype, "particle_position_x")],
945 parent[(ptype, "particle_position_y")],
946 parent[(ptype, "particle_position_z")]], axis=1).value
947 levels = self[("index", "grid_level")].astype('int32').value
948 levelmin = levels.min()
949 levelmax = levels.max()
950
951 mask = np.zeros(ppos.shape[0], dtype=bool)
952
953 for lvl in range(levelmax, levelmin-1, -1):
954 # Filter out cells not in the current level
955 lvl_mask = (levels == lvl)
956 dx_loc = dx[lvl_mask]
957 pos_loc = pos[lvl_mask]
958
959 grid_tree = _scipy.spatial.cKDTree(pos_loc, boxsize=1)
960
961 # Compute closest cell for all remaining particles
962 dist, icell = grid_tree.query(ppos[~mask], distance_upper_bound=dx_loc.max(),
963 p=np.inf)
964 mask_loc = np.isfinite(dist[:])
965
966 # Check that particles within dx of a cell are in it
967 i = icell[mask_loc]
968 dist = np.abs(ppos[~mask][mask_loc, :] - pos_loc[i])
969 tmp_mask = np.all(dist <= (dx_loc[i] / 2), axis=1)
970
971 mask_loc[mask_loc] = tmp_mask
972
973 # Update the particle mask with particles found at this level
974 mask[~mask] |= mask_loc
975
976 return mask
977
978 def _part_ind_brute_force(self, ptype):
979 parent = getattr(self, "parent", self.base_object)
980 units = "code_length"
981 mask = points_in_cells(
982 self[("index", "x")].to(units),
983 self[("index", "y")].to(units),
984 self[("index", "z")].to(units),
985 self[("index", "dx")].to(units),
986 self[("index", "dy")].to(units),
987 self[("index", "dz")].to(units),
988 parent[(ptype, "particle_position_x")].to(units),
989 parent[(ptype, "particle_position_y")].to(units),
990 parent[(ptype, "particle_position_z")].to(units))
991
992 return mask
993
994 def _part_ind(self, ptype):
995 if self._particle_mask.get(ptype) is None:
996 # If scipy is installed, use the fast KD tree
997 # implementation. Else, fall back onto the direct
998 # brute-force algorithm.
999 try:
1000 _scipy.spatial.KDTree
1001 mask = self._part_ind_KDTree(ptype)
1002 except ImportError:
1003 mask = self._part_ind_brute_force(ptype)
1004
1005 self._particle_mask[ptype] = mask
1006 return self._particle_mask[ptype]
1007
1008 @property
1009 def icoords(self):
1010 return self.base_object.icoords[self._cond_ind,:]
1011
1012 @property
1013 def fcoords(self):
1014 return self.base_object.fcoords[self._cond_ind,:]
1015
1016 @property
1017 def ires(self):
1018 return self.base_object.ires[self._cond_ind]
1019
1020 @property
1021 def fwidth(self):
1022 return self.base_object.fwidth[self._cond_ind,:]
1023
1024 class YTIntersectionContainer3D(YTSelectionContainer3D):
1025 """
1026 This is a more efficient method of selecting the intersection of multiple
1027 data selection objects.
1028
1029 Creating one of these objects returns the intersection of all of the
1030 sub-objects; it is designed to be a faster method than chaining & ("and")
1031 operations to create a single, large intersection.
1032
1033 Parameters
1034 ----------
1035 data_objects : Iterable of YTSelectionContainer
1036 The data objects to intersect
1037
1038 Examples
1039 --------
1040
1041 >>> import yt
1042 >>> ds = yt.load("RedshiftOutput0005")
1043 >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.15)
1044 >>> sp2 = ds.sphere((0.38, 0.51, 0.55), 0.1)
1045 >>> sp3 = ds.sphere((0.35, 0.5, 0.6), 0.15)
1046 >>> new_obj = ds.intersection((sp1, sp2, sp3))
1047 >>> print(new_obj.sum("cell_volume"))
1048 """
1049 _type_name = "intersection"
1050 _con_args = ("data_objects",)
1051 def __init__(self, data_objects, ds = None, field_parameters = None,
1052 data_source = None):
1053 validate_iterable(data_objects)
1054 for obj in data_objects:
1055 validate_object(obj, YTSelectionContainer)
1056 validate_object(ds, Dataset)
1057 validate_object(field_parameters, dict)
1058 validate_object(data_source, YTSelectionContainer)
1059 YTSelectionContainer3D.__init__(self, None, ds, field_parameters,
1060 data_source)
1061 # ensure_list doesn't check for tuples
1062 if isinstance(data_objects, tuple):
1063 data_objects = list(data_objects)
1064 self.data_objects = ensure_list(data_objects)
1065
1066 class YTDataObjectUnion(YTSelectionContainer3D):
1067 """
1068 This is a more efficient method of selecting the union of multiple
1069 data selection objects.
1070
1071 Creating one of these objects returns the union of all of the sub-objects;
1072 it is designed to be a faster method than chaining | (or) operations to
1073 create a single, large union.
1074
1075 Parameters
1076 ----------
1077 data_objects : Iterable of YTSelectionContainer
1078 The data objects to union
1079
1080 Examples
1081 --------
1082
1083 >>> import yt
1084 >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
1085 >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.1)
1086 >>> sp2 = ds.sphere((0.3, 0.5, 0.15), 0.1)
1087 >>> sp3 = ds.sphere((0.5, 0.5, 0.9), 0.1)
1088 >>> new_obj = ds.union((sp1, sp2, sp3))
1089 >>> print(new_obj.sum("cell_volume"))
1090 """
1091 _type_name = "union"
1092 _con_args = ("data_objects",)
1093 def __init__(self, data_objects, ds = None, field_parameters = None,
1094 data_source = None):
1095 validate_iterable(data_objects)
1096 for obj in data_objects:
1097 validate_object(obj, YTSelectionContainer)
1098 validate_object(ds, Dataset)
1099 validate_object(field_parameters, dict)
1100 validate_object(data_source, YTSelectionContainer)
1101 YTSelectionContainer3D.__init__(self, None, ds, field_parameters,
1102 data_source)
1103 # ensure_list doesn't check for tuples
1104 if isinstance(data_objects, tuple):
1105 data_objects = list(data_objects)
1106 self.data_objects = ensure_list(data_objects)
1107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -876,7 +876,6 @@
self.conditionals = ensure_list(conditionals)
self.base_object = data_source
self._selector = None
- self._particle_mask = {}
# Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
# ires and get_data
@@ -992,18 +991,14 @@
return mask
def _part_ind(self, ptype):
- if self._particle_mask.get(ptype) is None:
- # If scipy is installed, use the fast KD tree
- # implementation. Else, fall back onto the direct
- # brute-force algorithm.
- try:
- _scipy.spatial.KDTree
- mask = self._part_ind_KDTree(ptype)
- except ImportError:
- mask = self._part_ind_brute_force(ptype)
-
- self._particle_mask[ptype] = mask
- return self._particle_mask[ptype]
+ # If scipy is installed, use the fast KD tree
+ # implementation. Else, fall back onto the direct
+ # brute-force algorithm.
+ try:
+ _scipy.spatial.KDTree
+ return self._part_ind_KDTree(ptype)
+ except ImportError:
+ return self._part_ind_brute_force(ptype)
@property
def icoords(self):
| {"golden_diff": "diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py\n--- a/yt/data_objects/selection_data_containers.py\n+++ b/yt/data_objects/selection_data_containers.py\n@@ -876,7 +876,6 @@\n self.conditionals = ensure_list(conditionals)\n self.base_object = data_source\n self._selector = None\n- self._particle_mask = {}\n # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,\n # ires and get_data\n \n@@ -992,18 +991,14 @@\n return mask\n \n def _part_ind(self, ptype):\n- if self._particle_mask.get(ptype) is None:\n- # If scipy is installed, use the fast KD tree\n- # implementation. Else, fall back onto the direct\n- # brute-force algorithm.\n- try:\n- _scipy.spatial.KDTree\n- mask = self._part_ind_KDTree(ptype)\n- except ImportError:\n- mask = self._part_ind_brute_force(ptype)\n-\n- self._particle_mask[ptype] = mask\n- return self._particle_mask[ptype]\n+ # If scipy is installed, use the fast KD tree\n+ # implementation. Else, fall back onto the direct\n+ # brute-force algorithm.\n+ try:\n+ _scipy.spatial.KDTree\n+ return self._part_ind_KDTree(ptype)\n+ except ImportError:\n+ return self._part_ind_brute_force(ptype)\n \n @property\n def icoords(self):\n", "issue": "cut_region can't be used to derive angular momentum quantities \n<!--To help us understand and resolve your issue, please fill out the form to\r\nthe best of your ability.-->\r\n<!--You can feel free to delete the sections that do not apply.-->\r\n\r\nI'm trying to calculate the angular momentum vector of a sphere, and in particular, for the region inside the sphere that has density higher than a certain level. But the result from the cut_region cannot be used to calculate the angular momentum quantity. So here is the code I used for one of the test datasets on yt. \r\n\r\n```python\r\nimport yt\r\n\r\nds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\r\n\r\nsp = ds.sphere((0.5, 0.5, 0.5), (5, \"kpc\"))\r\ndense_sp = sp.cut_region(['obj[\"H_p0_number_density\"]>= 1e-2'])\r\ndense_sp.quantities.angular_momentum_vector()\r\n```\r\nThis produced the following traceback:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nIndexError Traceback (most recent call last)\r\n<ipython-input-83-3704089812fe> in <module>\r\n 5 sp = ds.sphere((0.5, 0.5, 0.5), (5, \"kpc\"))\r\n 6 dense_sp = sp.cut_region(['obj[\"H_p0_number_density\"]>= 1e-2'])\r\n----> 7 dense_sp.quantities.angular_momentum_vector()\r\n\r\n~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in __call__(self, *args, **kwargs)\r\n 67 storage = {}\r\n 68 for sto, ds in parallel_objects(chunks, -1, storage = storage):\r\n---> 69 sto.result = self.process_chunk(ds, *args, **kwargs)\r\n 70 # Now storage will have everything, and will be done via pickling, so\r\n 71 # the units will be preserved. (Credit to Nathan for this\r\n\r\n~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in process_chunk(self, data, use_gas, use_particles, particle_type)\r\n 493 rvals.extend([(data[self.particle_type, \"particle_specific_angular_momentum_%s\" % axis] *\r\n 494 data[self.particle_type, \"particle_mass\"]).sum(dtype=np.float64) \\\r\n--> 495 for axis in \"xyz\"])\r\n 496 rvals.append(data[self.particle_type, \"particle_mass\"].sum(dtype=np.float64))\r\n 497 return rvals\r\n\r\n~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/derived_quantities.py in <listcomp>(.0)\r\n 493 rvals.extend([(data[self.particle_type, \"particle_specific_angular_momentum_%s\" % axis] *\r\n 494 data[self.particle_type, \"particle_mass\"]).sum(dtype=np.float64) \\\r\n--> 495 for axis in \"xyz\"])\r\n 496 rvals.append(data[self.particle_type, \"particle_mass\"].sum(dtype=np.float64))\r\n 497 return rvals\r\n\r\n~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/data_containers.py in __getitem__(self, key)\r\n 253 return self.field_data[f]\r\n 254 else:\r\n--> 255 self.get_data(f)\r\n 256 # fi.units is the unit expression string. We depend on the registry\r\n 257 # hanging off the dataset to define this unit object.\r\n\r\n~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/data_objects/selection_data_containers.py in get_data(self, fields)\r\n 901 parent = getattr(self, \"parent\", self.base_object)\r\n 902 self.field_data[field] = \\\r\n--> 903 parent[field][self._part_ind(field[0])]\r\n 904 else:\r\n 905 self.field_data[field] = self.base_object[field][ind]\r\n\r\n~/.local/lib/python3.6/site-packages/yt-3.6.dev0-py3.6-macosx-10.9-x86_64.egg/yt/units/yt_array.py in __getitem__(self, item)\r\n 1056 \r\n 1057 def __getitem__(self, item):\r\n-> 1058 ret = super(YTArray, self).__getitem__(item)\r\n 1059 if ret.shape == ():\r\n 1060 return YTQuantity(ret, self.units, bypass_validation=True)\r\n\r\nIndexError: boolean index did not match indexed array along dimension 0; dimension is 44461 but corresponding boolean dimension is 3218\r\n```\r\n\r\n**Version Information**\r\n<!--Please specify your platform and versions of the relevant libraries you are\r\nusing:-->\r\n * Operating System: Mac Os High Sierra 10.13.6\r\n * Python Version: 3.6.6\r\n * yt version: 3.5.dev0 (installed from source)\r\n * Other Libraries (if applicable): \r\n\r\n[bug.txt](https://github.com/yt-project/yt/files/2621997/bug.txt)\r\n\n", "before_files": [{"content": "\"\"\"\nData containers based on geometric selection\n\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom yt.data_objects.data_containers import \\\n YTSelectionContainer0D, YTSelectionContainer1D, \\\n YTSelectionContainer2D, YTSelectionContainer3D, YTSelectionContainer\nfrom yt.data_objects.static_output import Dataset\nfrom yt.extern.six import string_types\nfrom yt.funcs import ensure_list, iterable, validate_width_tuple, \\\n fix_length, fix_axis, validate_3d_array, validate_float, \\\n validate_iterable, validate_object, validate_axis, validate_center\nfrom yt.units.yt_array import \\\n YTArray, \\\n YTQuantity\nfrom yt.utilities.exceptions import \\\n YTSphereTooSmall, \\\n YTIllDefinedCutRegion, \\\n YTEllipsoidOrdering\nfrom yt.utilities.minimal_representation import \\\n MinimalSliceData\nfrom yt.utilities.math_utils import get_rotation_matrix\nfrom yt.utilities.orientation import Orientation\nfrom yt.geometry.selection_routines import points_in_cells\nfrom yt.utilities.on_demand_imports import _scipy\n\n\nclass YTPoint(YTSelectionContainer0D):\n \"\"\"\n A 0-dimensional object defined by a single point\n\n Parameters\n ----------\n p: array_like\n A points defined within the domain. If the domain is\n periodic its position will be corrected to lie inside\n the range [DLE,DRE) to ensure one and only one cell may\n match that point\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> c = [0.5,0.5,0.5]\n >>> point = ds.point(c)\n \"\"\"\n _type_name = \"point\"\n _con_args = ('p',)\n def __init__(self, p, ds=None, field_parameters=None, data_source=None):\n validate_3d_array(p)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n super(YTPoint, self).__init__(ds, field_parameters, data_source)\n if isinstance(p, YTArray):\n # we pass p through ds.arr to ensure code units are attached\n self.p = self.ds.arr(p)\n else:\n self.p = self.ds.arr(p, 'code_length')\n\nclass YTOrthoRay(YTSelectionContainer1D):\n \"\"\"\n This is an orthogonal ray cast through the entire domain, at a specific\n coordinate.\n\n This object is typically accessed through the `ortho_ray` object that\n hangs off of index objects. The resulting arrays have their\n dimensionality reduced to one, and an ordered list of points at an\n (x,y) tuple along `axis` are available.\n\n Parameters\n ----------\n axis : int or char\n The axis along which to slice. Can be 0, 1, or 2 for x, y, z.\n coords : tuple of floats\n The (plane_x, plane_y) coordinates at which to cast the ray. Note\n that this is in the plane coordinates: so if you are casting along\n x, this will be (y, z). If you are casting along y, this will be\n (z, x). If you are casting along z, this will be (x, y).\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> oray = ds.ortho_ray(0, (0.2, 0.74))\n >>> print oray[\"Density\"]\n\n Note: The low-level data representation for rays are not guaranteed to be \n spatially ordered. In particular, with AMR datasets, higher resolution \n data is tagged on to the end of the ray. If you want this data \n represented in a spatially ordered manner, manually sort it by the \"t\" \n field, which is the value of the parametric variable that goes from 0 at \n the start of the ray to 1 at the end:\n\n >>> my_ray = ds.ortho_ray(...)\n >>> ray_sort = np.argsort(my_ray[\"t\"])\n >>> density = my_ray[\"density\"][ray_sort]\n \"\"\"\n _key_fields = ['x','y','z','dx','dy','dz']\n _type_name = \"ortho_ray\"\n _con_args = ('axis', 'coords')\n def __init__(self, axis, coords, ds=None, \n field_parameters=None, data_source=None):\n validate_axis(ds, axis)\n validate_iterable(coords)\n for c in coords:\n validate_float(c)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n super(YTOrthoRay, self).__init__(ds, field_parameters, data_source)\n self.axis = fix_axis(axis, self.ds)\n xax = self.ds.coordinates.x_axis[self.axis]\n yax = self.ds.coordinates.y_axis[self.axis]\n self.px_ax = xax\n self.py_ax = yax\n # Even though we may not be using x,y,z we use them here.\n self.px_dx = 'd%s'%('xyz'[self.px_ax])\n self.py_dx = 'd%s'%('xyz'[self.py_ax])\n # Convert coordinates to code length.\n if isinstance(coords[0], YTQuantity):\n self.px = self.ds.quan(coords[0]).to(\"code_length\")\n else:\n self.px = self.ds.quan(coords[0], \"code_length\")\n if isinstance(coords[1], YTQuantity):\n self.py = self.ds.quan(coords[1]).to(\"code_length\")\n else:\n self.py = self.ds.quan(coords[1], \"code_length\")\n self.sort_by = 'xyz'[self.axis]\n\n @property\n def coords(self):\n return (self.px, self.py)\n\nclass YTRay(YTSelectionContainer1D):\n \"\"\"\n This is an arbitrarily-aligned ray cast through the entire domain, at a\n specific coordinate.\n\n This object is typically accessed through the `ray` object that hangs\n off of index objects. The resulting arrays have their\n dimensionality reduced to one, and an ordered list of points at an\n (x,y) tuple along `axis` are available, as is the `t` field, which\n corresponds to a unitless measurement along the ray from start to\n end.\n\n Parameters\n ----------\n start_point : array-like set of 3 floats\n The place where the ray starts.\n end_point : array-like set of 3 floats\n The place where the ray ends.\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> ray = ds.ray((0.2, 0.74, 0.11), (0.4, 0.91, 0.31))\n >>> print ray[\"Density\"], ray[\"t\"], ray[\"dts\"]\n\n Note: The low-level data representation for rays are not guaranteed to be \n spatially ordered. In particular, with AMR datasets, higher resolution \n data is tagged on to the end of the ray. If you want this data \n represented in a spatially ordered manner, manually sort it by the \"t\" \n field, which is the value of the parametric variable that goes from 0 at \n the start of the ray to 1 at the end:\n\n >>> my_ray = ds.ray(...)\n >>> ray_sort = np.argsort(my_ray[\"t\"])\n >>> density = my_ray[\"density\"][ray_sort]\n\n\"\"\"\n _type_name = \"ray\"\n _con_args = ('start_point', 'end_point')\n _container_fields = (\"t\", \"dts\")\n def __init__(self, start_point, end_point, ds=None,\n field_parameters=None, data_source=None):\n validate_3d_array(start_point)\n validate_3d_array(end_point)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n super(YTRay, self).__init__(ds, field_parameters, data_source)\n if isinstance(start_point, YTArray):\n self.start_point = \\\n self.ds.arr(start_point).to(\"code_length\")\n else:\n self.start_point = \\\n self.ds.arr(start_point, 'code_length',\n dtype='float64')\n if isinstance(end_point, YTArray):\n self.end_point = \\\n self.ds.arr(end_point).to(\"code_length\")\n else:\n self.end_point = \\\n self.ds.arr(end_point, 'code_length',\n dtype='float64')\n self.vec = self.end_point - self.start_point\n self._set_center(self.start_point)\n self.set_field_parameter('center', self.start_point)\n self._dts, self._ts = None, None\n\n def _generate_container_field(self, field):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n if field == \"dts\":\n return self._current_chunk.dtcoords\n elif field == \"t\":\n return self._current_chunk.tcoords\n else:\n raise KeyError(field)\n\nclass YTSlice(YTSelectionContainer2D):\n \"\"\"\n This is a data object corresponding to a slice through the simulation\n domain.\n\n This object is typically accessed through the `slice` object that hangs\n off of index objects. Slice is an orthogonal slice through the\n data, taking all the points at the finest resolution available and then\n indexing them. It is more appropriately thought of as a slice\n 'operator' than an object, however, as its field and coordinate can\n both change.\n\n Parameters\n ----------\n axis : int or char\n The axis along which to slice. Can be 0, 1, or 2 for x, y, z.\n coord : float\n The coordinate along the axis at which to slice. This is in\n \"domain\" coordinates.\n center : array_like, optional\n The 'center' supplied to fields that use it. Note that this does\n not have to have `coord` as one value. optional.\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> slice = ds.slice(0, 0.25)\n >>> print slice[\"Density\"]\n \"\"\"\n _top_node = \"/Slices\"\n _type_name = \"slice\"\n _con_args = ('axis', 'coord')\n _container_fields = (\"px\", \"py\", \"pz\", \"pdx\", \"pdy\", \"pdz\")\n def __init__(self, axis, coord, center=None, ds=None,\n field_parameters=None, data_source=None):\n validate_axis(ds, axis)\n validate_float(coord)\n # center is an optional parameter\n if center is not None:\n validate_center(center)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer2D.__init__(self, axis, ds,\n field_parameters, data_source)\n self._set_center(center)\n self.coord = coord\n\n def _generate_container_field(self, field):\n xax = self.ds.coordinates.x_axis[self.axis]\n yax = self.ds.coordinates.y_axis[self.axis]\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n if field == \"px\":\n return self._current_chunk.fcoords[:,xax]\n elif field == \"py\":\n return self._current_chunk.fcoords[:,yax]\n elif field == \"pz\":\n return self._current_chunk.fcoords[:,self.axis]\n elif field == \"pdx\":\n return self._current_chunk.fwidth[:,xax] * 0.5\n elif field == \"pdy\":\n return self._current_chunk.fwidth[:,yax] * 0.5\n elif field == \"pdz\":\n return self._current_chunk.fwidth[:,self.axis] * 0.5 \n else:\n raise KeyError(field)\n\n @property\n def _mrep(self):\n return MinimalSliceData(self)\n\n def hub_upload(self):\n self._mrep.upload()\n\n def to_pw(self, fields=None, center='c', width=None, origin='center-window'):\n r\"\"\"Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this\n object.\n\n This is a bare-bones mechanism of creating a plot window from this\n object, which can then be moved around, zoomed, and on and on. All\n behavior of the plot window is relegated to that routine.\n \"\"\"\n pw = self._get_pw(fields, center, width, origin, 'Slice')\n return pw\n\n def plot(self, fields=None):\n if hasattr(self._data_source, \"left_edge\") and \\\n hasattr(self._data_source, \"right_edge\"):\n left_edge = self._data_source.left_edge\n right_edge = self._data_source.right_edge\n center = (left_edge + right_edge)/2.0\n width = right_edge - left_edge\n xax = self.ds.coordinates.x_axis[self.axis]\n yax = self.ds.coordinates.y_axis[self.axis]\n lx, rx = left_edge[xax], right_edge[xax]\n ly, ry = left_edge[yax], right_edge[yax]\n width = (rx-lx), (ry-ly)\n else:\n width = self.ds.domain_width\n center = self.ds.domain_center\n pw = self._get_pw(fields, center, width, 'native', 'Slice')\n pw.show()\n return pw\n\nclass YTCuttingPlane(YTSelectionContainer2D):\n \"\"\"\n This is a data object corresponding to an oblique slice through the\n simulation domain.\n\n This object is typically accessed through the `cutting` object\n that hangs off of index objects. A cutting plane is an oblique\n plane through the data, defined by a normal vector and a coordinate.\n It attempts to guess an 'north' vector, which can be overridden, and\n then it pixelizes the appropriate data onto the plane without\n interpolation.\n\n Parameters\n ----------\n normal : array_like\n The vector that defines the desired plane. For instance, the\n angular momentum of a sphere.\n center : array_like\n The center of the cutting plane, where the normal vector is anchored.\n north_vector: array_like, optional\n An optional vector to describe the north-facing direction in the resulting\n plane.\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Notes\n -----\n\n This data object in particular can be somewhat expensive to create.\n It's also important to note that unlike the other 2D data objects, this\n object provides px, py, pz, as some cells may have a height from the\n plane.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> cp = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6])\n >>> print cp[\"Density\"]\n \"\"\"\n _plane = None\n _top_node = \"/CuttingPlanes\"\n _key_fields = YTSelectionContainer2D._key_fields + ['pz','pdz']\n _type_name = \"cutting\"\n _con_args = ('normal', 'center')\n _tds_attrs = (\"_inv_mat\",)\n _tds_fields = (\"x\", \"y\", \"z\", \"dx\")\n _container_fields = (\"px\", \"py\", \"pz\", \"pdx\", \"pdy\", \"pdz\")\n def __init__(self, normal, center, north_vector=None,\n ds=None, field_parameters=None, data_source=None):\n validate_3d_array(normal)\n validate_center(center)\n if north_vector is not None:\n validate_3d_array(north_vector)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer2D.__init__(self, 4, ds,\n field_parameters, data_source)\n self._set_center(center)\n self.set_field_parameter('center',center)\n # Let's set up our plane equation\n # ax + by + cz + d = 0\n self.orienter = Orientation(normal, north_vector = north_vector)\n self._norm_vec = self.orienter.normal_vector\n self._d = -1.0 * np.dot(self._norm_vec, self.center)\n self._x_vec = self.orienter.unit_vectors[0]\n self._y_vec = self.orienter.unit_vectors[1]\n # First we try all three, see which has the best result:\n self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])\n self._inv_mat = np.linalg.pinv(self._rot_mat)\n self.set_field_parameter('cp_x_vec',self._x_vec)\n self.set_field_parameter('cp_y_vec',self._y_vec)\n self.set_field_parameter('cp_z_vec',self._norm_vec)\n\n @property\n def normal(self):\n return self._norm_vec\n\n def _generate_container_field(self, field):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n if field == \"px\":\n x = self._current_chunk.fcoords[:,0] - self.center[0]\n y = self._current_chunk.fcoords[:,1] - self.center[1]\n z = self._current_chunk.fcoords[:,2] - self.center[2]\n tr = np.zeros(x.size, dtype='float64')\n tr = self.ds.arr(tr, \"code_length\")\n tr += x * self._x_vec[0]\n tr += y * self._x_vec[1]\n tr += z * self._x_vec[2]\n return tr\n elif field == \"py\":\n x = self._current_chunk.fcoords[:,0] - self.center[0]\n y = self._current_chunk.fcoords[:,1] - self.center[1]\n z = self._current_chunk.fcoords[:,2] - self.center[2]\n tr = np.zeros(x.size, dtype='float64')\n tr = self.ds.arr(tr, \"code_length\")\n tr += x * self._y_vec[0]\n tr += y * self._y_vec[1]\n tr += z * self._y_vec[2]\n return tr\n elif field == \"pz\":\n x = self._current_chunk.fcoords[:,0] - self.center[0]\n y = self._current_chunk.fcoords[:,1] - self.center[1]\n z = self._current_chunk.fcoords[:,2] - self.center[2]\n tr = np.zeros(x.size, dtype='float64')\n tr = self.ds.arr(tr, \"code_length\")\n tr += x * self._norm_vec[0]\n tr += y * self._norm_vec[1]\n tr += z * self._norm_vec[2]\n return tr\n elif field == \"pdx\":\n return self._current_chunk.fwidth[:,0] * 0.5\n elif field == \"pdy\":\n return self._current_chunk.fwidth[:,1] * 0.5\n elif field == \"pdz\":\n return self._current_chunk.fwidth[:,2] * 0.5\n else:\n raise KeyError(field)\n\n def to_pw(self, fields=None, center='c', width=None, axes_unit=None):\n r\"\"\"Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this\n object.\n\n This is a bare-bones mechanism of creating a plot window from this\n object, which can then be moved around, zoomed, and on and on. All\n behavior of the plot window is relegated to that routine.\n \"\"\"\n normal = self.normal\n center = self.center\n self.fields = ensure_list(fields) + [k for k in self.field_data.keys()\n if k not in self._key_fields]\n from yt.visualization.plot_window import get_oblique_window_parameters, PWViewerMPL\n from yt.visualization.fixed_resolution import FixedResolutionBuffer\n (bounds, center_rot) = get_oblique_window_parameters(normal, center, width, self.ds)\n pw = PWViewerMPL(\n self, bounds, fields=self.fields, origin='center-window', \n periodic=False, oblique=True,\n frb_generator=FixedResolutionBuffer, \n plot_type='OffAxisSlice')\n if axes_unit is not None:\n pw.set_axes_unit(axes_unit)\n pw._setup_plots()\n return pw\n\n def to_frb(self, width, resolution, height=None, periodic=False):\n r\"\"\"This function returns a FixedResolutionBuffer generated from this\n object.\n\n An ObliqueFixedResolutionBuffer is an object that accepts a\n variable-resolution 2D object and transforms it into an NxM bitmap that\n can be plotted, examined or processed. This is a convenience function\n to return an FRB directly from an existing 2D data object. Unlike the\n corresponding to_frb function for other YTSelectionContainer2D objects, \n this does not accept a 'center' parameter as it is assumed to be \n centered at the center of the cutting plane.\n\n Parameters\n ----------\n width : width specifier\n This can either be a floating point value, in the native domain\n units of the simulation, or a tuple of the (value, unit) style.\n This will be the width of the FRB.\n height : height specifier, optional\n This will be the height of the FRB, by default it is equal to width.\n resolution : int or tuple of ints\n The number of pixels on a side of the final FRB.\n periodic : boolean\n This can be true or false, and governs whether the pixelization\n will span the domain boundaries.\n\n Returns\n -------\n frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`\n A fixed resolution buffer, which can be queried for fields.\n\n Examples\n --------\n\n >>> v, c = ds.find_max(\"density\")\n >>> sp = ds.sphere(c, (100.0, 'au'))\n >>> L = sp.quantities.angular_momentum_vector()\n >>> cutting = ds.cutting(L, c)\n >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)\n >>> write_image(np.log10(frb[\"Density\"]), 'density_1pc.png')\n \"\"\"\n if iterable(width):\n validate_width_tuple(width)\n width = self.ds.quan(width[0], width[1])\n if height is None:\n height = width\n elif iterable(height):\n validate_width_tuple(height)\n height = self.ds.quan(height[0], height[1])\n if not iterable(resolution):\n resolution = (resolution, resolution)\n from yt.visualization.fixed_resolution import FixedResolutionBuffer\n bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)\n frb = FixedResolutionBuffer(self, bounds, resolution,\n periodic=periodic)\n return frb\n\nclass YTDisk(YTSelectionContainer3D):\n \"\"\"\n By providing a *center*, a *normal*, a *radius* and a *height* we\n can define a cylinder of any proportion. Only cells whose centers are\n within the cylinder will be selected.\n\n Parameters\n ----------\n center : array_like\n coordinate to which the normal, radius, and height all reference\n normal : array_like\n the normal vector defining the direction of lengthwise part of the \n cylinder\n radius : float\n the radius of the cylinder\n height : float\n the distance from the midplane of the cylinder to the top and \n bottom planes\n fields : array of fields, optional\n any fields to be pre-loaded in the cylinder object\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> c = [0.5,0.5,0.5]\n >>> disk = ds.disk(c, [1,0,0], (1, 'kpc'), (10, 'kpc'))\n \"\"\"\n _type_name = \"disk\"\n _con_args = ('center', '_norm_vec', 'radius', 'height')\n def __init__(self, center, normal, radius, height, fields=None,\n ds=None, field_parameters=None, data_source=None):\n validate_center(center)\n validate_3d_array(normal)\n validate_float(radius)\n validate_float(height)\n validate_iterable(fields)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, center, ds,\n field_parameters, data_source)\n self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))\n self.set_field_parameter(\"normal\", self._norm_vec)\n self.set_field_parameter(\"center\", self.center)\n self.height = fix_length(height, self.ds)\n self.radius = fix_length(radius, self.ds)\n self._d = -1.0 * np.dot(self._norm_vec, self.center)\n\nclass YTRegion(YTSelectionContainer3D):\n \"\"\"A 3D region of data with an arbitrary center.\n\n Takes an array of three *left_edge* coordinates, three\n *right_edge* coordinates, and a *center* that can be anywhere\n in the domain. If the selected region extends past the edges\n of the domain, no data will be found there, though the\n object's `left_edge` or `right_edge` are not modified.\n\n Parameters\n ----------\n center : array_like\n The center of the region\n left_edge : array_like\n The left edge of the region\n right_edge : array_like\n The right edge of the region\n \"\"\"\n _type_name = \"region\"\n _con_args = ('center', 'left_edge', 'right_edge')\n def __init__(self, center, left_edge, right_edge, fields=None,\n ds=None, field_parameters=None, data_source=None):\n if center is not None:\n validate_center(center)\n validate_3d_array(left_edge)\n validate_3d_array(right_edge)\n validate_iterable(fields)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, center, ds,\n field_parameters, data_source)\n if not isinstance(left_edge, YTArray):\n self.left_edge = self.ds.arr(left_edge, 'code_length')\n else:\n # need to assign this dataset's unit registry to the YTArray\n self.left_edge = self.ds.arr(left_edge.copy())\n if not isinstance(right_edge, YTArray):\n self.right_edge = self.ds.arr(right_edge, 'code_length')\n else:\n # need to assign this dataset's unit registry to the YTArray\n self.right_edge = self.ds.arr(right_edge.copy())\n\nclass YTDataCollection(YTSelectionContainer3D):\n \"\"\"\n By selecting an arbitrary *object_list*, we can act on those grids.\n Child cells are not returned.\n \"\"\"\n _type_name = \"data_collection\"\n _con_args = (\"_obj_list\",)\n def __init__(self, obj_list, ds=None, field_parameters=None,\n data_source=None, center=None):\n validate_iterable(obj_list)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n if center is not None:\n validate_center(center)\n YTSelectionContainer3D.__init__(self, center, ds,\n field_parameters, data_source)\n self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],\n dtype=\"int64\")\n self._obj_list = obj_list\n\nclass YTSphere(YTSelectionContainer3D):\n \"\"\"\n A sphere of points defined by a *center* and a *radius*.\n\n Parameters\n ----------\n center : array_like\n The center of the sphere.\n radius : float, width specifier, or YTQuantity\n The radius of the sphere. If passed a float,\n that will be interpreted in code units. Also\n accepts a (radius, unit) tuple or YTQuantity\n instance with units attached.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> c = [0.5,0.5,0.5]\n >>> sphere = ds.sphere(c, (1., \"kpc\"))\n \"\"\"\n _type_name = \"sphere\"\n _con_args = ('center', 'radius')\n def __init__(self, center, radius, ds=None,\n field_parameters=None, data_source=None):\n validate_center(center)\n validate_float(radius)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n super(YTSphere, self).__init__(center, ds,\n field_parameters, data_source)\n # Unpack the radius, if necessary\n radius = fix_length(radius, self.ds)\n if radius < self.index.get_smallest_dx():\n raise YTSphereTooSmall(ds, radius.in_units(\"code_length\"),\n self.index.get_smallest_dx().in_units(\"code_length\"))\n self.set_field_parameter('radius',radius)\n self.set_field_parameter(\"center\", self.center)\n self.radius = radius\n\nclass YTEllipsoid(YTSelectionContainer3D):\n \"\"\"\n By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we\n can define a ellipsoid of any proportion. Only cells whose\n centers are within the ellipsoid will be selected.\n\n Parameters\n ----------\n center : array_like\n The center of the ellipsoid.\n A : float\n The magnitude of the largest axis (semi-major) of the ellipsoid.\n B : float\n The magnitude of the medium axis (semi-medium) of the ellipsoid.\n C : float\n The magnitude of the smallest axis (semi-minor) of the ellipsoid.\n e0 : array_like (automatically normalized)\n the direction of the largest semi-major axis of the ellipsoid\n tilt : float\n After the rotation about the z-axis to allign e0 to x in the x-y\n plane, and then rotating about the y-axis to align e0 completely\n to the x-axis, tilt is the angle in radians remaining to\n rotate about the x-axis to align both e1 to the y-axis and e2 to\n the z-axis.\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> c = [0.5,0.5,0.5]\n >>> ell = ds.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)\n \"\"\"\n _type_name = \"ellipsoid\"\n _con_args = ('center', '_A', '_B', '_C', '_e0', '_tilt')\n def __init__(self, center, A, B, C, e0, tilt, fields=None,\n ds=None, field_parameters=None, data_source=None):\n validate_center(center)\n validate_float(A)\n validate_float(B)\n validate_float(C)\n validate_3d_array(e0)\n validate_float(tilt)\n validate_iterable(fields)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, center, ds,\n field_parameters, data_source)\n # make sure the magnitudes of semi-major axes are in order\n if A<B or B<C:\n raise YTEllipsoidOrdering(ds, A, B, C)\n # make sure the smallest side is not smaller than dx\n self._A = self.ds.quan(A, 'code_length')\n self._B = self.ds.quan(B, 'code_length')\n self._C = self.ds.quan(C, 'code_length')\n if self._C < self.index.get_smallest_dx():\n raise YTSphereTooSmall(self.ds, self._C, self.index.get_smallest_dx())\n self._e0 = e0 = e0 / (e0**2.0).sum()**0.5\n self._tilt = tilt\n \n # find the t1 angle needed to rotate about z axis to align e0 to x\n t1 = np.arctan(e0[1] / e0[0])\n # rotate e0 by -t1\n RZ = get_rotation_matrix(t1, (0,0,1)).transpose()\n r1 = (e0 * RZ).sum(axis = 1)\n # find the t2 angle needed to rotate about y axis to align e0 to x\n t2 = np.arctan(-r1[2] / r1[0])\n \"\"\"\n calculate the original e1\n given the tilt about the x axis when e0 was aligned \n to x after t1, t2 rotations about z, y\n \"\"\"\n RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()\n RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()\n RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()\n e1 = ((0, 1, 0) * RX).sum(axis=1)\n e1 = (e1 * RY).sum(axis=1)\n e1 = (e1 * RZ).sum(axis=1)\n e2 = np.cross(e0, e1)\n\n self._e1 = e1\n self._e2 = e2\n\n self.set_field_parameter('A', A)\n self.set_field_parameter('B', B)\n self.set_field_parameter('C', C)\n self.set_field_parameter('e0', e0)\n self.set_field_parameter('e1', e1)\n self.set_field_parameter('e2', e2)\n\nclass YTCutRegion(YTSelectionContainer3D):\n \"\"\"\n This is a data object designed to allow individuals to apply logical\n operations to fields and filter as a result of those cuts.\n\n Parameters\n ----------\n data_source : YTSelectionContainer3D\n The object to which cuts will be applied.\n conditionals : list of strings\n A list of conditionals that will be evaluated. In the namespace\n available, these conditionals will have access to 'obj' which is a data\n object of unknown shape, and they must generate a boolean array. For\n instance, conditionals = [\"obj['temperature'] < 1e3\"]\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> sp = ds.sphere(\"max\", (1.0, 'Mpc'))\n >>> cr = ds.cut_region(sp, [\"obj['temperature'] < 1e3\"])\n \"\"\"\n _type_name = \"cut_region\"\n _con_args = (\"base_object\", \"conditionals\")\n def __init__(self, data_source, conditionals, ds=None,\n field_parameters=None, base_object=None):\n validate_object(data_source, YTSelectionContainer)\n validate_iterable(conditionals)\n for condition in conditionals:\n validate_object(condition, string_types)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(base_object, YTSelectionContainer)\n if base_object is not None:\n # passing base_object explicitly has been deprecated,\n # but we handle it here for backward compatibility\n if data_source is not None:\n raise RuntimeError(\n \"Cannot use both base_object and data_source\")\n data_source=base_object\n super(YTCutRegion, self).__init__(\n data_source.center, ds, field_parameters, data_source=data_source)\n self.conditionals = ensure_list(conditionals)\n self.base_object = data_source\n self._selector = None\n self._particle_mask = {}\n # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,\n # ires and get_data\n\n def chunks(self, fields, chunking_style, **kwargs):\n # We actually want to chunk the sub-chunk, not ourselves. We have no\n # chunks to speak of, as we do not data IO.\n for chunk in self.index._chunk(self.base_object,\n chunking_style,\n **kwargs):\n with self.base_object._chunked_read(chunk):\n with self._chunked_read(chunk):\n self.get_data(fields)\n yield self\n\n def get_data(self, fields = None):\n fields = ensure_list(fields)\n self.base_object.get_data(fields)\n ind = self._cond_ind\n for field in fields:\n f = self.base_object[field]\n if f.shape != ind.shape:\n parent = getattr(self, \"parent\", self.base_object)\n self.field_data[field] = \\\n parent[field][self._part_ind(field[0])]\n else:\n self.field_data[field] = self.base_object[field][ind]\n\n @property\n def blocks(self):\n # We have to take a slightly different approach here. Note that all\n # that .blocks has to yield is a 3D array and a mask.\n for obj, m in self.base_object.blocks:\n m = m.copy()\n with obj._field_parameter_state(self.field_parameters):\n for cond in self.conditionals:\n ss = eval(cond)\n m = np.logical_and(m, ss, m)\n if not np.any(m): continue\n yield obj, m\n\n @property\n def _cond_ind(self):\n ind = None\n obj = self.base_object\n with obj._field_parameter_state(self.field_parameters):\n for cond in self.conditionals:\n res = eval(cond)\n if ind is None: ind = res\n if ind.shape != res.shape:\n raise YTIllDefinedCutRegion(self.conditionals)\n np.logical_and(res, ind, ind)\n return ind\n\n def _part_ind_KDTree(self, ptype):\n '''Find the particles in cells using a KDTree approach.'''\n parent = getattr(self, \"parent\", self.base_object)\n units = \"code_length\"\n\n pos = np.stack([self[(\"index\", 'x')].to(units),\n self[(\"index\", 'y')].to(units),\n self[(\"index\", 'z')].to(units)], axis=1).value\n dx = np.stack([self[(\"index\", \"dx\")].to(units),\n self[(\"index\", \"dy\")].to(units),\n self[(\"index\", \"dz\")].to(units)], axis=1).value\n ppos = np.stack([parent[(ptype, \"particle_position_x\")],\n parent[(ptype, \"particle_position_y\")],\n parent[(ptype, \"particle_position_z\")]], axis=1).value\n levels = self[(\"index\", \"grid_level\")].astype('int32').value\n levelmin = levels.min()\n levelmax = levels.max()\n\n mask = np.zeros(ppos.shape[0], dtype=bool)\n\n for lvl in range(levelmax, levelmin-1, -1):\n # Filter out cells not in the current level\n lvl_mask = (levels == lvl)\n dx_loc = dx[lvl_mask]\n pos_loc = pos[lvl_mask]\n\n grid_tree = _scipy.spatial.cKDTree(pos_loc, boxsize=1)\n\n # Compute closest cell for all remaining particles\n dist, icell = grid_tree.query(ppos[~mask], distance_upper_bound=dx_loc.max(),\n p=np.inf)\n mask_loc = np.isfinite(dist[:])\n\n # Check that particles within dx of a cell are in it\n i = icell[mask_loc]\n dist = np.abs(ppos[~mask][mask_loc, :] - pos_loc[i])\n tmp_mask = np.all(dist <= (dx_loc[i] / 2), axis=1)\n\n mask_loc[mask_loc] = tmp_mask\n\n # Update the particle mask with particles found at this level\n mask[~mask] |= mask_loc\n\n return mask\n\n def _part_ind_brute_force(self, ptype):\n parent = getattr(self, \"parent\", self.base_object)\n units = \"code_length\"\n mask = points_in_cells(\n self[(\"index\", \"x\")].to(units),\n self[(\"index\", \"y\")].to(units),\n self[(\"index\", \"z\")].to(units),\n self[(\"index\", \"dx\")].to(units),\n self[(\"index\", \"dy\")].to(units),\n self[(\"index\", \"dz\")].to(units),\n parent[(ptype, \"particle_position_x\")].to(units),\n parent[(ptype, \"particle_position_y\")].to(units),\n parent[(ptype, \"particle_position_z\")].to(units))\n\n return mask\n\n def _part_ind(self, ptype):\n if self._particle_mask.get(ptype) is None:\n # If scipy is installed, use the fast KD tree\n # implementation. Else, fall back onto the direct\n # brute-force algorithm.\n try:\n _scipy.spatial.KDTree\n mask = self._part_ind_KDTree(ptype)\n except ImportError:\n mask = self._part_ind_brute_force(ptype)\n\n self._particle_mask[ptype] = mask\n return self._particle_mask[ptype]\n\n @property\n def icoords(self):\n return self.base_object.icoords[self._cond_ind,:]\n\n @property\n def fcoords(self):\n return self.base_object.fcoords[self._cond_ind,:]\n\n @property\n def ires(self):\n return self.base_object.ires[self._cond_ind]\n\n @property\n def fwidth(self):\n return self.base_object.fwidth[self._cond_ind,:]\n\nclass YTIntersectionContainer3D(YTSelectionContainer3D):\n \"\"\"\n This is a more efficient method of selecting the intersection of multiple\n data selection objects.\n\n Creating one of these objects returns the intersection of all of the\n sub-objects; it is designed to be a faster method than chaining & (\"and\")\n operations to create a single, large intersection.\n\n Parameters\n ----------\n data_objects : Iterable of YTSelectionContainer\n The data objects to intersect\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.15)\n >>> sp2 = ds.sphere((0.38, 0.51, 0.55), 0.1)\n >>> sp3 = ds.sphere((0.35, 0.5, 0.6), 0.15)\n >>> new_obj = ds.intersection((sp1, sp2, sp3))\n >>> print(new_obj.sum(\"cell_volume\"))\n \"\"\"\n _type_name = \"intersection\"\n _con_args = (\"data_objects\",)\n def __init__(self, data_objects, ds = None, field_parameters = None,\n data_source = None):\n validate_iterable(data_objects)\n for obj in data_objects:\n validate_object(obj, YTSelectionContainer)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, None, ds, field_parameters,\n data_source)\n # ensure_list doesn't check for tuples\n if isinstance(data_objects, tuple):\n data_objects = list(data_objects)\n self.data_objects = ensure_list(data_objects)\n\nclass YTDataObjectUnion(YTSelectionContainer3D):\n \"\"\"\n This is a more efficient method of selecting the union of multiple\n data selection objects.\n\n Creating one of these objects returns the union of all of the sub-objects;\n it is designed to be a faster method than chaining | (or) operations to\n create a single, large union.\n\n Parameters\n ----------\n data_objects : Iterable of YTSelectionContainer\n The data objects to union\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.1)\n >>> sp2 = ds.sphere((0.3, 0.5, 0.15), 0.1)\n >>> sp3 = ds.sphere((0.5, 0.5, 0.9), 0.1)\n >>> new_obj = ds.union((sp1, sp2, sp3))\n >>> print(new_obj.sum(\"cell_volume\"))\n \"\"\"\n _type_name = \"union\"\n _con_args = (\"data_objects\",)\n def __init__(self, data_objects, ds = None, field_parameters = None,\n data_source = None):\n validate_iterable(data_objects)\n for obj in data_objects:\n validate_object(obj, YTSelectionContainer)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, None, ds, field_parameters,\n data_source)\n # ensure_list doesn't check for tuples\n if isinstance(data_objects, tuple):\n data_objects = list(data_objects)\n self.data_objects = ensure_list(data_objects)\n", "path": "yt/data_objects/selection_data_containers.py"}], "after_files": [{"content": "\"\"\"\nData containers based on geometric selection\n\n\n\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, yt Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom yt.data_objects.data_containers import \\\n YTSelectionContainer0D, YTSelectionContainer1D, \\\n YTSelectionContainer2D, YTSelectionContainer3D, YTSelectionContainer\nfrom yt.data_objects.static_output import Dataset\nfrom yt.extern.six import string_types\nfrom yt.funcs import ensure_list, iterable, validate_width_tuple, \\\n fix_length, fix_axis, validate_3d_array, validate_float, \\\n validate_iterable, validate_object, validate_axis, validate_center\nfrom yt.units.yt_array import \\\n YTArray, \\\n YTQuantity\nfrom yt.utilities.exceptions import \\\n YTSphereTooSmall, \\\n YTIllDefinedCutRegion, \\\n YTEllipsoidOrdering\nfrom yt.utilities.minimal_representation import \\\n MinimalSliceData\nfrom yt.utilities.math_utils import get_rotation_matrix\nfrom yt.utilities.orientation import Orientation\nfrom yt.geometry.selection_routines import points_in_cells\nfrom yt.utilities.on_demand_imports import _scipy\n\n\nclass YTPoint(YTSelectionContainer0D):\n \"\"\"\n A 0-dimensional object defined by a single point\n\n Parameters\n ----------\n p: array_like\n A points defined within the domain. If the domain is\n periodic its position will be corrected to lie inside\n the range [DLE,DRE) to ensure one and only one cell may\n match that point\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> c = [0.5,0.5,0.5]\n >>> point = ds.point(c)\n \"\"\"\n _type_name = \"point\"\n _con_args = ('p',)\n def __init__(self, p, ds=None, field_parameters=None, data_source=None):\n validate_3d_array(p)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n super(YTPoint, self).__init__(ds, field_parameters, data_source)\n if isinstance(p, YTArray):\n # we pass p through ds.arr to ensure code units are attached\n self.p = self.ds.arr(p)\n else:\n self.p = self.ds.arr(p, 'code_length')\n\nclass YTOrthoRay(YTSelectionContainer1D):\n \"\"\"\n This is an orthogonal ray cast through the entire domain, at a specific\n coordinate.\n\n This object is typically accessed through the `ortho_ray` object that\n hangs off of index objects. The resulting arrays have their\n dimensionality reduced to one, and an ordered list of points at an\n (x,y) tuple along `axis` are available.\n\n Parameters\n ----------\n axis : int or char\n The axis along which to slice. Can be 0, 1, or 2 for x, y, z.\n coords : tuple of floats\n The (plane_x, plane_y) coordinates at which to cast the ray. Note\n that this is in the plane coordinates: so if you are casting along\n x, this will be (y, z). If you are casting along y, this will be\n (z, x). If you are casting along z, this will be (x, y).\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> oray = ds.ortho_ray(0, (0.2, 0.74))\n >>> print oray[\"Density\"]\n\n Note: The low-level data representation for rays are not guaranteed to be \n spatially ordered. In particular, with AMR datasets, higher resolution \n data is tagged on to the end of the ray. If you want this data \n represented in a spatially ordered manner, manually sort it by the \"t\" \n field, which is the value of the parametric variable that goes from 0 at \n the start of the ray to 1 at the end:\n\n >>> my_ray = ds.ortho_ray(...)\n >>> ray_sort = np.argsort(my_ray[\"t\"])\n >>> density = my_ray[\"density\"][ray_sort]\n \"\"\"\n _key_fields = ['x','y','z','dx','dy','dz']\n _type_name = \"ortho_ray\"\n _con_args = ('axis', 'coords')\n def __init__(self, axis, coords, ds=None, \n field_parameters=None, data_source=None):\n validate_axis(ds, axis)\n validate_iterable(coords)\n for c in coords:\n validate_float(c)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n super(YTOrthoRay, self).__init__(ds, field_parameters, data_source)\n self.axis = fix_axis(axis, self.ds)\n xax = self.ds.coordinates.x_axis[self.axis]\n yax = self.ds.coordinates.y_axis[self.axis]\n self.px_ax = xax\n self.py_ax = yax\n # Even though we may not be using x,y,z we use them here.\n self.px_dx = 'd%s'%('xyz'[self.px_ax])\n self.py_dx = 'd%s'%('xyz'[self.py_ax])\n # Convert coordinates to code length.\n if isinstance(coords[0], YTQuantity):\n self.px = self.ds.quan(coords[0]).to(\"code_length\")\n else:\n self.px = self.ds.quan(coords[0], \"code_length\")\n if isinstance(coords[1], YTQuantity):\n self.py = self.ds.quan(coords[1]).to(\"code_length\")\n else:\n self.py = self.ds.quan(coords[1], \"code_length\")\n self.sort_by = 'xyz'[self.axis]\n\n @property\n def coords(self):\n return (self.px, self.py)\n\nclass YTRay(YTSelectionContainer1D):\n \"\"\"\n This is an arbitrarily-aligned ray cast through the entire domain, at a\n specific coordinate.\n\n This object is typically accessed through the `ray` object that hangs\n off of index objects. The resulting arrays have their\n dimensionality reduced to one, and an ordered list of points at an\n (x,y) tuple along `axis` are available, as is the `t` field, which\n corresponds to a unitless measurement along the ray from start to\n end.\n\n Parameters\n ----------\n start_point : array-like set of 3 floats\n The place where the ray starts.\n end_point : array-like set of 3 floats\n The place where the ray ends.\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> ray = ds.ray((0.2, 0.74, 0.11), (0.4, 0.91, 0.31))\n >>> print ray[\"Density\"], ray[\"t\"], ray[\"dts\"]\n\n Note: The low-level data representation for rays are not guaranteed to be \n spatially ordered. In particular, with AMR datasets, higher resolution \n data is tagged on to the end of the ray. If you want this data \n represented in a spatially ordered manner, manually sort it by the \"t\" \n field, which is the value of the parametric variable that goes from 0 at \n the start of the ray to 1 at the end:\n\n >>> my_ray = ds.ray(...)\n >>> ray_sort = np.argsort(my_ray[\"t\"])\n >>> density = my_ray[\"density\"][ray_sort]\n\n\"\"\"\n _type_name = \"ray\"\n _con_args = ('start_point', 'end_point')\n _container_fields = (\"t\", \"dts\")\n def __init__(self, start_point, end_point, ds=None,\n field_parameters=None, data_source=None):\n validate_3d_array(start_point)\n validate_3d_array(end_point)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n super(YTRay, self).__init__(ds, field_parameters, data_source)\n if isinstance(start_point, YTArray):\n self.start_point = \\\n self.ds.arr(start_point).to(\"code_length\")\n else:\n self.start_point = \\\n self.ds.arr(start_point, 'code_length',\n dtype='float64')\n if isinstance(end_point, YTArray):\n self.end_point = \\\n self.ds.arr(end_point).to(\"code_length\")\n else:\n self.end_point = \\\n self.ds.arr(end_point, 'code_length',\n dtype='float64')\n self.vec = self.end_point - self.start_point\n self._set_center(self.start_point)\n self.set_field_parameter('center', self.start_point)\n self._dts, self._ts = None, None\n\n def _generate_container_field(self, field):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n if field == \"dts\":\n return self._current_chunk.dtcoords\n elif field == \"t\":\n return self._current_chunk.tcoords\n else:\n raise KeyError(field)\n\nclass YTSlice(YTSelectionContainer2D):\n \"\"\"\n This is a data object corresponding to a slice through the simulation\n domain.\n\n This object is typically accessed through the `slice` object that hangs\n off of index objects. Slice is an orthogonal slice through the\n data, taking all the points at the finest resolution available and then\n indexing them. It is more appropriately thought of as a slice\n 'operator' than an object, however, as its field and coordinate can\n both change.\n\n Parameters\n ----------\n axis : int or char\n The axis along which to slice. Can be 0, 1, or 2 for x, y, z.\n coord : float\n The coordinate along the axis at which to slice. This is in\n \"domain\" coordinates.\n center : array_like, optional\n The 'center' supplied to fields that use it. Note that this does\n not have to have `coord` as one value. optional.\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> slice = ds.slice(0, 0.25)\n >>> print slice[\"Density\"]\n \"\"\"\n _top_node = \"/Slices\"\n _type_name = \"slice\"\n _con_args = ('axis', 'coord')\n _container_fields = (\"px\", \"py\", \"pz\", \"pdx\", \"pdy\", \"pdz\")\n def __init__(self, axis, coord, center=None, ds=None,\n field_parameters=None, data_source=None):\n validate_axis(ds, axis)\n validate_float(coord)\n # center is an optional parameter\n if center is not None:\n validate_center(center)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer2D.__init__(self, axis, ds,\n field_parameters, data_source)\n self._set_center(center)\n self.coord = coord\n\n def _generate_container_field(self, field):\n xax = self.ds.coordinates.x_axis[self.axis]\n yax = self.ds.coordinates.y_axis[self.axis]\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n if field == \"px\":\n return self._current_chunk.fcoords[:,xax]\n elif field == \"py\":\n return self._current_chunk.fcoords[:,yax]\n elif field == \"pz\":\n return self._current_chunk.fcoords[:,self.axis]\n elif field == \"pdx\":\n return self._current_chunk.fwidth[:,xax] * 0.5\n elif field == \"pdy\":\n return self._current_chunk.fwidth[:,yax] * 0.5\n elif field == \"pdz\":\n return self._current_chunk.fwidth[:,self.axis] * 0.5 \n else:\n raise KeyError(field)\n\n @property\n def _mrep(self):\n return MinimalSliceData(self)\n\n def hub_upload(self):\n self._mrep.upload()\n\n def to_pw(self, fields=None, center='c', width=None, origin='center-window'):\n r\"\"\"Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this\n object.\n\n This is a bare-bones mechanism of creating a plot window from this\n object, which can then be moved around, zoomed, and on and on. All\n behavior of the plot window is relegated to that routine.\n \"\"\"\n pw = self._get_pw(fields, center, width, origin, 'Slice')\n return pw\n\n def plot(self, fields=None):\n if hasattr(self._data_source, \"left_edge\") and \\\n hasattr(self._data_source, \"right_edge\"):\n left_edge = self._data_source.left_edge\n right_edge = self._data_source.right_edge\n center = (left_edge + right_edge)/2.0\n width = right_edge - left_edge\n xax = self.ds.coordinates.x_axis[self.axis]\n yax = self.ds.coordinates.y_axis[self.axis]\n lx, rx = left_edge[xax], right_edge[xax]\n ly, ry = left_edge[yax], right_edge[yax]\n width = (rx-lx), (ry-ly)\n else:\n width = self.ds.domain_width\n center = self.ds.domain_center\n pw = self._get_pw(fields, center, width, 'native', 'Slice')\n pw.show()\n return pw\n\nclass YTCuttingPlane(YTSelectionContainer2D):\n \"\"\"\n This is a data object corresponding to an oblique slice through the\n simulation domain.\n\n This object is typically accessed through the `cutting` object\n that hangs off of index objects. A cutting plane is an oblique\n plane through the data, defined by a normal vector and a coordinate.\n It attempts to guess an 'north' vector, which can be overridden, and\n then it pixelizes the appropriate data onto the plane without\n interpolation.\n\n Parameters\n ----------\n normal : array_like\n The vector that defines the desired plane. For instance, the\n angular momentum of a sphere.\n center : array_like\n The center of the cutting plane, where the normal vector is anchored.\n north_vector: array_like, optional\n An optional vector to describe the north-facing direction in the resulting\n plane.\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Notes\n -----\n\n This data object in particular can be somewhat expensive to create.\n It's also important to note that unlike the other 2D data objects, this\n object provides px, py, pz, as some cells may have a height from the\n plane.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> cp = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6])\n >>> print cp[\"Density\"]\n \"\"\"\n _plane = None\n _top_node = \"/CuttingPlanes\"\n _key_fields = YTSelectionContainer2D._key_fields + ['pz','pdz']\n _type_name = \"cutting\"\n _con_args = ('normal', 'center')\n _tds_attrs = (\"_inv_mat\",)\n _tds_fields = (\"x\", \"y\", \"z\", \"dx\")\n _container_fields = (\"px\", \"py\", \"pz\", \"pdx\", \"pdy\", \"pdz\")\n def __init__(self, normal, center, north_vector=None,\n ds=None, field_parameters=None, data_source=None):\n validate_3d_array(normal)\n validate_center(center)\n if north_vector is not None:\n validate_3d_array(north_vector)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer2D.__init__(self, 4, ds,\n field_parameters, data_source)\n self._set_center(center)\n self.set_field_parameter('center',center)\n # Let's set up our plane equation\n # ax + by + cz + d = 0\n self.orienter = Orientation(normal, north_vector = north_vector)\n self._norm_vec = self.orienter.normal_vector\n self._d = -1.0 * np.dot(self._norm_vec, self.center)\n self._x_vec = self.orienter.unit_vectors[0]\n self._y_vec = self.orienter.unit_vectors[1]\n # First we try all three, see which has the best result:\n self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])\n self._inv_mat = np.linalg.pinv(self._rot_mat)\n self.set_field_parameter('cp_x_vec',self._x_vec)\n self.set_field_parameter('cp_y_vec',self._y_vec)\n self.set_field_parameter('cp_z_vec',self._norm_vec)\n\n @property\n def normal(self):\n return self._norm_vec\n\n def _generate_container_field(self, field):\n if self._current_chunk is None:\n self.index._identify_base_chunk(self)\n if field == \"px\":\n x = self._current_chunk.fcoords[:,0] - self.center[0]\n y = self._current_chunk.fcoords[:,1] - self.center[1]\n z = self._current_chunk.fcoords[:,2] - self.center[2]\n tr = np.zeros(x.size, dtype='float64')\n tr = self.ds.arr(tr, \"code_length\")\n tr += x * self._x_vec[0]\n tr += y * self._x_vec[1]\n tr += z * self._x_vec[2]\n return tr\n elif field == \"py\":\n x = self._current_chunk.fcoords[:,0] - self.center[0]\n y = self._current_chunk.fcoords[:,1] - self.center[1]\n z = self._current_chunk.fcoords[:,2] - self.center[2]\n tr = np.zeros(x.size, dtype='float64')\n tr = self.ds.arr(tr, \"code_length\")\n tr += x * self._y_vec[0]\n tr += y * self._y_vec[1]\n tr += z * self._y_vec[2]\n return tr\n elif field == \"pz\":\n x = self._current_chunk.fcoords[:,0] - self.center[0]\n y = self._current_chunk.fcoords[:,1] - self.center[1]\n z = self._current_chunk.fcoords[:,2] - self.center[2]\n tr = np.zeros(x.size, dtype='float64')\n tr = self.ds.arr(tr, \"code_length\")\n tr += x * self._norm_vec[0]\n tr += y * self._norm_vec[1]\n tr += z * self._norm_vec[2]\n return tr\n elif field == \"pdx\":\n return self._current_chunk.fwidth[:,0] * 0.5\n elif field == \"pdy\":\n return self._current_chunk.fwidth[:,1] * 0.5\n elif field == \"pdz\":\n return self._current_chunk.fwidth[:,2] * 0.5\n else:\n raise KeyError(field)\n\n def to_pw(self, fields=None, center='c', width=None, axes_unit=None):\n r\"\"\"Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this\n object.\n\n This is a bare-bones mechanism of creating a plot window from this\n object, which can then be moved around, zoomed, and on and on. All\n behavior of the plot window is relegated to that routine.\n \"\"\"\n normal = self.normal\n center = self.center\n self.fields = ensure_list(fields) + [k for k in self.field_data.keys()\n if k not in self._key_fields]\n from yt.visualization.plot_window import get_oblique_window_parameters, PWViewerMPL\n from yt.visualization.fixed_resolution import FixedResolutionBuffer\n (bounds, center_rot) = get_oblique_window_parameters(normal, center, width, self.ds)\n pw = PWViewerMPL(\n self, bounds, fields=self.fields, origin='center-window', \n periodic=False, oblique=True,\n frb_generator=FixedResolutionBuffer, \n plot_type='OffAxisSlice')\n if axes_unit is not None:\n pw.set_axes_unit(axes_unit)\n pw._setup_plots()\n return pw\n\n def to_frb(self, width, resolution, height=None, periodic=False):\n r\"\"\"This function returns a FixedResolutionBuffer generated from this\n object.\n\n An ObliqueFixedResolutionBuffer is an object that accepts a\n variable-resolution 2D object and transforms it into an NxM bitmap that\n can be plotted, examined or processed. This is a convenience function\n to return an FRB directly from an existing 2D data object. Unlike the\n corresponding to_frb function for other YTSelectionContainer2D objects, \n this does not accept a 'center' parameter as it is assumed to be \n centered at the center of the cutting plane.\n\n Parameters\n ----------\n width : width specifier\n This can either be a floating point value, in the native domain\n units of the simulation, or a tuple of the (value, unit) style.\n This will be the width of the FRB.\n height : height specifier, optional\n This will be the height of the FRB, by default it is equal to width.\n resolution : int or tuple of ints\n The number of pixels on a side of the final FRB.\n periodic : boolean\n This can be true or false, and governs whether the pixelization\n will span the domain boundaries.\n\n Returns\n -------\n frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer`\n A fixed resolution buffer, which can be queried for fields.\n\n Examples\n --------\n\n >>> v, c = ds.find_max(\"density\")\n >>> sp = ds.sphere(c, (100.0, 'au'))\n >>> L = sp.quantities.angular_momentum_vector()\n >>> cutting = ds.cutting(L, c)\n >>> frb = cutting.to_frb( (1.0, 'pc'), 1024)\n >>> write_image(np.log10(frb[\"Density\"]), 'density_1pc.png')\n \"\"\"\n if iterable(width):\n validate_width_tuple(width)\n width = self.ds.quan(width[0], width[1])\n if height is None:\n height = width\n elif iterable(height):\n validate_width_tuple(height)\n height = self.ds.quan(height[0], height[1])\n if not iterable(resolution):\n resolution = (resolution, resolution)\n from yt.visualization.fixed_resolution import FixedResolutionBuffer\n bounds = (-width/2.0, width/2.0, -height/2.0, height/2.0)\n frb = FixedResolutionBuffer(self, bounds, resolution,\n periodic=periodic)\n return frb\n\nclass YTDisk(YTSelectionContainer3D):\n \"\"\"\n By providing a *center*, a *normal*, a *radius* and a *height* we\n can define a cylinder of any proportion. Only cells whose centers are\n within the cylinder will be selected.\n\n Parameters\n ----------\n center : array_like\n coordinate to which the normal, radius, and height all reference\n normal : array_like\n the normal vector defining the direction of lengthwise part of the \n cylinder\n radius : float\n the radius of the cylinder\n height : float\n the distance from the midplane of the cylinder to the top and \n bottom planes\n fields : array of fields, optional\n any fields to be pre-loaded in the cylinder object\n ds: ~yt.data_objects.static_output.Dataset, optional\n An optional dataset to use rather than self.ds\n field_parameters : dictionary\n A dictionary of field parameters than can be accessed by derived\n fields.\n data_source: optional\n Draw the selection from the provided data source rather than\n all data associated with the data_set\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> c = [0.5,0.5,0.5]\n >>> disk = ds.disk(c, [1,0,0], (1, 'kpc'), (10, 'kpc'))\n \"\"\"\n _type_name = \"disk\"\n _con_args = ('center', '_norm_vec', 'radius', 'height')\n def __init__(self, center, normal, radius, height, fields=None,\n ds=None, field_parameters=None, data_source=None):\n validate_center(center)\n validate_3d_array(normal)\n validate_float(radius)\n validate_float(height)\n validate_iterable(fields)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, center, ds,\n field_parameters, data_source)\n self._norm_vec = np.array(normal)/np.sqrt(np.dot(normal,normal))\n self.set_field_parameter(\"normal\", self._norm_vec)\n self.set_field_parameter(\"center\", self.center)\n self.height = fix_length(height, self.ds)\n self.radius = fix_length(radius, self.ds)\n self._d = -1.0 * np.dot(self._norm_vec, self.center)\n\nclass YTRegion(YTSelectionContainer3D):\n \"\"\"A 3D region of data with an arbitrary center.\n\n Takes an array of three *left_edge* coordinates, three\n *right_edge* coordinates, and a *center* that can be anywhere\n in the domain. If the selected region extends past the edges\n of the domain, no data will be found there, though the\n object's `left_edge` or `right_edge` are not modified.\n\n Parameters\n ----------\n center : array_like\n The center of the region\n left_edge : array_like\n The left edge of the region\n right_edge : array_like\n The right edge of the region\n \"\"\"\n _type_name = \"region\"\n _con_args = ('center', 'left_edge', 'right_edge')\n def __init__(self, center, left_edge, right_edge, fields=None,\n ds=None, field_parameters=None, data_source=None):\n if center is not None:\n validate_center(center)\n validate_3d_array(left_edge)\n validate_3d_array(right_edge)\n validate_iterable(fields)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, center, ds,\n field_parameters, data_source)\n if not isinstance(left_edge, YTArray):\n self.left_edge = self.ds.arr(left_edge, 'code_length')\n else:\n # need to assign this dataset's unit registry to the YTArray\n self.left_edge = self.ds.arr(left_edge.copy())\n if not isinstance(right_edge, YTArray):\n self.right_edge = self.ds.arr(right_edge, 'code_length')\n else:\n # need to assign this dataset's unit registry to the YTArray\n self.right_edge = self.ds.arr(right_edge.copy())\n\nclass YTDataCollection(YTSelectionContainer3D):\n \"\"\"\n By selecting an arbitrary *object_list*, we can act on those grids.\n Child cells are not returned.\n \"\"\"\n _type_name = \"data_collection\"\n _con_args = (\"_obj_list\",)\n def __init__(self, obj_list, ds=None, field_parameters=None,\n data_source=None, center=None):\n validate_iterable(obj_list)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n if center is not None:\n validate_center(center)\n YTSelectionContainer3D.__init__(self, center, ds,\n field_parameters, data_source)\n self._obj_ids = np.array([o.id - o._id_offset for o in obj_list],\n dtype=\"int64\")\n self._obj_list = obj_list\n\nclass YTSphere(YTSelectionContainer3D):\n \"\"\"\n A sphere of points defined by a *center* and a *radius*.\n\n Parameters\n ----------\n center : array_like\n The center of the sphere.\n radius : float, width specifier, or YTQuantity\n The radius of the sphere. If passed a float,\n that will be interpreted in code units. Also\n accepts a (radius, unit) tuple or YTQuantity\n instance with units attached.\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> c = [0.5,0.5,0.5]\n >>> sphere = ds.sphere(c, (1., \"kpc\"))\n \"\"\"\n _type_name = \"sphere\"\n _con_args = ('center', 'radius')\n def __init__(self, center, radius, ds=None,\n field_parameters=None, data_source=None):\n validate_center(center)\n validate_float(radius)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n super(YTSphere, self).__init__(center, ds,\n field_parameters, data_source)\n # Unpack the radius, if necessary\n radius = fix_length(radius, self.ds)\n if radius < self.index.get_smallest_dx():\n raise YTSphereTooSmall(ds, radius.in_units(\"code_length\"),\n self.index.get_smallest_dx().in_units(\"code_length\"))\n self.set_field_parameter('radius',radius)\n self.set_field_parameter(\"center\", self.center)\n self.radius = radius\n\nclass YTEllipsoid(YTSelectionContainer3D):\n \"\"\"\n By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we\n can define a ellipsoid of any proportion. Only cells whose\n centers are within the ellipsoid will be selected.\n\n Parameters\n ----------\n center : array_like\n The center of the ellipsoid.\n A : float\n The magnitude of the largest axis (semi-major) of the ellipsoid.\n B : float\n The magnitude of the medium axis (semi-medium) of the ellipsoid.\n C : float\n The magnitude of the smallest axis (semi-minor) of the ellipsoid.\n e0 : array_like (automatically normalized)\n the direction of the largest semi-major axis of the ellipsoid\n tilt : float\n After the rotation about the z-axis to allign e0 to x in the x-y\n plane, and then rotating about the y-axis to align e0 completely\n to the x-axis, tilt is the angle in radians remaining to\n rotate about the x-axis to align both e1 to the y-axis and e2 to\n the z-axis.\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> c = [0.5,0.5,0.5]\n >>> ell = ds.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)\n \"\"\"\n _type_name = \"ellipsoid\"\n _con_args = ('center', '_A', '_B', '_C', '_e0', '_tilt')\n def __init__(self, center, A, B, C, e0, tilt, fields=None,\n ds=None, field_parameters=None, data_source=None):\n validate_center(center)\n validate_float(A)\n validate_float(B)\n validate_float(C)\n validate_3d_array(e0)\n validate_float(tilt)\n validate_iterable(fields)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, center, ds,\n field_parameters, data_source)\n # make sure the magnitudes of semi-major axes are in order\n if A<B or B<C:\n raise YTEllipsoidOrdering(ds, A, B, C)\n # make sure the smallest side is not smaller than dx\n self._A = self.ds.quan(A, 'code_length')\n self._B = self.ds.quan(B, 'code_length')\n self._C = self.ds.quan(C, 'code_length')\n if self._C < self.index.get_smallest_dx():\n raise YTSphereTooSmall(self.ds, self._C, self.index.get_smallest_dx())\n self._e0 = e0 = e0 / (e0**2.0).sum()**0.5\n self._tilt = tilt\n \n # find the t1 angle needed to rotate about z axis to align e0 to x\n t1 = np.arctan(e0[1] / e0[0])\n # rotate e0 by -t1\n RZ = get_rotation_matrix(t1, (0,0,1)).transpose()\n r1 = (e0 * RZ).sum(axis = 1)\n # find the t2 angle needed to rotate about y axis to align e0 to x\n t2 = np.arctan(-r1[2] / r1[0])\n \"\"\"\n calculate the original e1\n given the tilt about the x axis when e0 was aligned \n to x after t1, t2 rotations about z, y\n \"\"\"\n RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose()\n RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose()\n RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose()\n e1 = ((0, 1, 0) * RX).sum(axis=1)\n e1 = (e1 * RY).sum(axis=1)\n e1 = (e1 * RZ).sum(axis=1)\n e2 = np.cross(e0, e1)\n\n self._e1 = e1\n self._e2 = e2\n\n self.set_field_parameter('A', A)\n self.set_field_parameter('B', B)\n self.set_field_parameter('C', C)\n self.set_field_parameter('e0', e0)\n self.set_field_parameter('e1', e1)\n self.set_field_parameter('e2', e2)\n\nclass YTCutRegion(YTSelectionContainer3D):\n \"\"\"\n This is a data object designed to allow individuals to apply logical\n operations to fields and filter as a result of those cuts.\n\n Parameters\n ----------\n data_source : YTSelectionContainer3D\n The object to which cuts will be applied.\n conditionals : list of strings\n A list of conditionals that will be evaluated. In the namespace\n available, these conditionals will have access to 'obj' which is a data\n object of unknown shape, and they must generate a boolean array. For\n instance, conditionals = [\"obj['temperature'] < 1e3\"]\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> sp = ds.sphere(\"max\", (1.0, 'Mpc'))\n >>> cr = ds.cut_region(sp, [\"obj['temperature'] < 1e3\"])\n \"\"\"\n _type_name = \"cut_region\"\n _con_args = (\"base_object\", \"conditionals\")\n def __init__(self, data_source, conditionals, ds=None,\n field_parameters=None, base_object=None):\n validate_object(data_source, YTSelectionContainer)\n validate_iterable(conditionals)\n for condition in conditionals:\n validate_object(condition, string_types)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(base_object, YTSelectionContainer)\n if base_object is not None:\n # passing base_object explicitly has been deprecated,\n # but we handle it here for backward compatibility\n if data_source is not None:\n raise RuntimeError(\n \"Cannot use both base_object and data_source\")\n data_source=base_object\n super(YTCutRegion, self).__init__(\n data_source.center, ds, field_parameters, data_source=data_source)\n self.conditionals = ensure_list(conditionals)\n self.base_object = data_source\n self._selector = None\n # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,\n # ires and get_data\n\n def chunks(self, fields, chunking_style, **kwargs):\n # We actually want to chunk the sub-chunk, not ourselves. We have no\n # chunks to speak of, as we do not data IO.\n for chunk in self.index._chunk(self.base_object,\n chunking_style,\n **kwargs):\n with self.base_object._chunked_read(chunk):\n with self._chunked_read(chunk):\n self.get_data(fields)\n yield self\n\n def get_data(self, fields = None):\n fields = ensure_list(fields)\n self.base_object.get_data(fields)\n ind = self._cond_ind\n for field in fields:\n f = self.base_object[field]\n if f.shape != ind.shape:\n parent = getattr(self, \"parent\", self.base_object)\n self.field_data[field] = \\\n parent[field][self._part_ind(field[0])]\n else:\n self.field_data[field] = self.base_object[field][ind]\n\n @property\n def blocks(self):\n # We have to take a slightly different approach here. Note that all\n # that .blocks has to yield is a 3D array and a mask.\n for obj, m in self.base_object.blocks:\n m = m.copy()\n with obj._field_parameter_state(self.field_parameters):\n for cond in self.conditionals:\n ss = eval(cond)\n m = np.logical_and(m, ss, m)\n if not np.any(m): continue\n yield obj, m\n\n @property\n def _cond_ind(self):\n ind = None\n obj = self.base_object\n with obj._field_parameter_state(self.field_parameters):\n for cond in self.conditionals:\n res = eval(cond)\n if ind is None: ind = res\n if ind.shape != res.shape:\n raise YTIllDefinedCutRegion(self.conditionals)\n np.logical_and(res, ind, ind)\n return ind\n\n def _part_ind_KDTree(self, ptype):\n '''Find the particles in cells using a KDTree approach.'''\n parent = getattr(self, \"parent\", self.base_object)\n units = \"code_length\"\n\n pos = np.stack([self[(\"index\", 'x')].to(units),\n self[(\"index\", 'y')].to(units),\n self[(\"index\", 'z')].to(units)], axis=1).value\n dx = np.stack([self[(\"index\", \"dx\")].to(units),\n self[(\"index\", \"dy\")].to(units),\n self[(\"index\", \"dz\")].to(units)], axis=1).value\n ppos = np.stack([parent[(ptype, \"particle_position_x\")],\n parent[(ptype, \"particle_position_y\")],\n parent[(ptype, \"particle_position_z\")]], axis=1).value\n levels = self[(\"index\", \"grid_level\")].astype('int32').value\n levelmin = levels.min()\n levelmax = levels.max()\n\n mask = np.zeros(ppos.shape[0], dtype=bool)\n\n for lvl in range(levelmax, levelmin-1, -1):\n # Filter out cells not in the current level\n lvl_mask = (levels == lvl)\n dx_loc = dx[lvl_mask]\n pos_loc = pos[lvl_mask]\n\n grid_tree = _scipy.spatial.cKDTree(pos_loc, boxsize=1)\n\n # Compute closest cell for all remaining particles\n dist, icell = grid_tree.query(ppos[~mask], distance_upper_bound=dx_loc.max(),\n p=np.inf)\n mask_loc = np.isfinite(dist[:])\n\n # Check that particles within dx of a cell are in it\n i = icell[mask_loc]\n dist = np.abs(ppos[~mask][mask_loc, :] - pos_loc[i])\n tmp_mask = np.all(dist <= (dx_loc[i] / 2), axis=1)\n\n mask_loc[mask_loc] = tmp_mask\n\n # Update the particle mask with particles found at this level\n mask[~mask] |= mask_loc\n\n return mask\n\n def _part_ind_brute_force(self, ptype):\n parent = getattr(self, \"parent\", self.base_object)\n units = \"code_length\"\n mask = points_in_cells(\n self[(\"index\", \"x\")].to(units),\n self[(\"index\", \"y\")].to(units),\n self[(\"index\", \"z\")].to(units),\n self[(\"index\", \"dx\")].to(units),\n self[(\"index\", \"dy\")].to(units),\n self[(\"index\", \"dz\")].to(units),\n parent[(ptype, \"particle_position_x\")].to(units),\n parent[(ptype, \"particle_position_y\")].to(units),\n parent[(ptype, \"particle_position_z\")].to(units))\n\n return mask\n\n def _part_ind(self, ptype):\n # If scipy is installed, use the fast KD tree\n # implementation. Else, fall back onto the direct\n # brute-force algorithm.\n try:\n _scipy.spatial.KDTree\n return self._part_ind_KDTree(ptype)\n except ImportError:\n return self._part_ind_brute_force(ptype)\n\n @property\n def icoords(self):\n return self.base_object.icoords[self._cond_ind,:]\n\n @property\n def fcoords(self):\n return self.base_object.fcoords[self._cond_ind,:]\n\n @property\n def ires(self):\n return self.base_object.ires[self._cond_ind]\n\n @property\n def fwidth(self):\n return self.base_object.fwidth[self._cond_ind,:]\n\nclass YTIntersectionContainer3D(YTSelectionContainer3D):\n \"\"\"\n This is a more efficient method of selecting the intersection of multiple\n data selection objects.\n\n Creating one of these objects returns the intersection of all of the\n sub-objects; it is designed to be a faster method than chaining & (\"and\")\n operations to create a single, large intersection.\n\n Parameters\n ----------\n data_objects : Iterable of YTSelectionContainer\n The data objects to intersect\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"RedshiftOutput0005\")\n >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.15)\n >>> sp2 = ds.sphere((0.38, 0.51, 0.55), 0.1)\n >>> sp3 = ds.sphere((0.35, 0.5, 0.6), 0.15)\n >>> new_obj = ds.intersection((sp1, sp2, sp3))\n >>> print(new_obj.sum(\"cell_volume\"))\n \"\"\"\n _type_name = \"intersection\"\n _con_args = (\"data_objects\",)\n def __init__(self, data_objects, ds = None, field_parameters = None,\n data_source = None):\n validate_iterable(data_objects)\n for obj in data_objects:\n validate_object(obj, YTSelectionContainer)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, None, ds, field_parameters,\n data_source)\n # ensure_list doesn't check for tuples\n if isinstance(data_objects, tuple):\n data_objects = list(data_objects)\n self.data_objects = ensure_list(data_objects)\n\nclass YTDataObjectUnion(YTSelectionContainer3D):\n \"\"\"\n This is a more efficient method of selecting the union of multiple\n data selection objects.\n\n Creating one of these objects returns the union of all of the sub-objects;\n it is designed to be a faster method than chaining | (or) operations to\n create a single, large union.\n\n Parameters\n ----------\n data_objects : Iterable of YTSelectionContainer\n The data objects to union\n\n Examples\n --------\n\n >>> import yt\n >>> ds = yt.load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.1)\n >>> sp2 = ds.sphere((0.3, 0.5, 0.15), 0.1)\n >>> sp3 = ds.sphere((0.5, 0.5, 0.9), 0.1)\n >>> new_obj = ds.union((sp1, sp2, sp3))\n >>> print(new_obj.sum(\"cell_volume\"))\n \"\"\"\n _type_name = \"union\"\n _con_args = (\"data_objects\",)\n def __init__(self, data_objects, ds = None, field_parameters = None,\n data_source = None):\n validate_iterable(data_objects)\n for obj in data_objects:\n validate_object(obj, YTSelectionContainer)\n validate_object(ds, Dataset)\n validate_object(field_parameters, dict)\n validate_object(data_source, YTSelectionContainer)\n YTSelectionContainer3D.__init__(self, None, ds, field_parameters,\n data_source)\n # ensure_list doesn't check for tuples\n if isinstance(data_objects, tuple):\n data_objects = list(data_objects)\n self.data_objects = ensure_list(data_objects)\n", "path": "yt/data_objects/selection_data_containers.py"}]} |
gh_patches_debug_437 | rasdani/github-patches | git_diff | pypa__setuptools-2584 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add mechanism for side-by-side comparison of setup.py and its equivalent setup.cfg
We have many documentation examples that are purely declarative and are either documented as `setup.py` or `setup.cfg`. It would be really awesome if, for each of these, we had the option to have either both versions side-by-side or, even better, in a sort of "tabbed container", like the one in the [code sample at the bottom of this example](https://leetcode.com/articles/median-of-two-sorted-arrays/).
Requirements for this:
1. Cannot *link to* any third-party javascript dependencies. Ideally we wouldn't use any at all, but if you do they must be vendored in the documentation.
2. If javascript is disabled, it has to fall back to something intelligible.
Ideally it would be implemented in pure CSS / HTML if that's at all possible.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']
2
3 master_doc = "index"
4
5 link_files = {
6 '../CHANGES.rst': dict(
7 using=dict(
8 BB='https://bitbucket.org',
9 GH='https://github.com',
10 ),
11 replace=[
12 dict(
13 pattern=r'(Issue )?#(?P<issue>\d+)',
14 url='{package_url}/issues/{issue}',
15 ),
16 dict(
17 pattern=r'BB Pull Request ?#(?P<bb_pull_request>\d+)',
18 url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',
19 ),
20 dict(
21 pattern=r'Distribute #(?P<distribute>\d+)',
22 url='{BB}/tarek/distribute/issue/{distribute}',
23 ),
24 dict(
25 pattern=r'Buildout #(?P<buildout>\d+)',
26 url='{GH}/buildout/buildout/issues/{buildout}',
27 ),
28 dict(
29 pattern=r'Old Setuptools #(?P<old_setuptools>\d+)',
30 url='http://bugs.python.org/setuptools/issue{old_setuptools}',
31 ),
32 dict(
33 pattern=r'Jython #(?P<jython>\d+)',
34 url='http://bugs.jython.org/issue{jython}',
35 ),
36 dict(
37 pattern=r'(Python #|bpo-)(?P<python>\d+)',
38 url='http://bugs.python.org/issue{python}',
39 ),
40 dict(
41 pattern=r'Interop #(?P<interop>\d+)',
42 url='{GH}/pypa/interoperability-peps/issues/{interop}',
43 ),
44 dict(
45 pattern=r'Pip #(?P<pip>\d+)',
46 url='{GH}/pypa/pip/issues/{pip}',
47 ),
48 dict(
49 pattern=r'Packaging #(?P<packaging>\d+)',
50 url='{GH}/pypa/packaging/issues/{packaging}',
51 ),
52 dict(
53 pattern=r'[Pp]ackaging (?P<packaging_ver>\d+(\.\d+)+)',
54 url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',
55 ),
56 dict(
57 pattern=r'PEP[- ](?P<pep_number>\d+)',
58 url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',
59 ),
60 dict(
61 pattern=r'setuptools_svn #(?P<setuptools_svn>\d+)',
62 url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',
63 ),
64 dict(
65 pattern=r'pypa/distutils#(?P<distutils>\d+)',
66 url='{GH}/pypa/distutils/issues/{distutils}',
67 ),
68 dict(
69 pattern=r'^(?m)((?P<scm_version>v?\d+(\.\d+){1,2}))\n[-=]+\n',
70 with_scm='{text}\n{rev[timestamp]:%d %b %Y}\n',
71 ),
72 ],
73 ),
74 }
75
76 intersphinx_mapping = {
77 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)
78 }
79
80 # Add support for linking usernames
81 github_url = 'https://github.com'
82 github_sponsors_url = f'{github_url}/sponsors'
83 extlinks = {
84 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323
85 }
86 extensions += ['sphinx.ext.extlinks', 'sphinx.ext.intersphinx']
87
88 # Be strict about any broken references:
89 nitpicky = True
90
91 # Ref: https://github.com/python-attrs/attrs/pull/571/files\
92 # #diff-85987f48f1258d9ee486e3191495582dR82
93 default_role = 'any'
94
95 # Custom sidebar templates, maps document names to template names.
96 html_theme = 'alabaster'
97 templates_path = ['_templates']
98 html_sidebars = {'index': ['tidelift-sidebar.html']}
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -93,3 +93,6 @@
html_theme = 'alabaster'
templates_path = ['_templates']
html_sidebars = {'index': ['tidelift-sidebar.html']}
+
+# Add support for inline tabs
+extensions += ['sphinx_inline_tabs']
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -93,3 +93,6 @@\n html_theme = 'alabaster'\n templates_path = ['_templates']\n html_sidebars = {'index': ['tidelift-sidebar.html']}\n+\n+# Add support for inline tabs\n+extensions += ['sphinx_inline_tabs']\n", "issue": "Add mechanism for side-by-side comparison of setup.py and its equivalent setup.cfg\nWe have many documentation examples that are purely declarative and are either documented as `setup.py` or `setup.cfg`. It would be really awesome if, for each of these, we had the option to have either both versions side-by-side or, even better, in a sort of \"tabbed container\", like the one in the [code sample at the bottom of this example](https://leetcode.com/articles/median-of-two-sorted-arrays/).\r\n\r\nRequirements for this:\r\n\r\n1. Cannot *link to* any third-party javascript dependencies. Ideally we wouldn't use any at all, but if you do they must be vendored in the documentation.\r\n2. If javascript is disabled, it has to fall back to something intelligible.\r\n\r\nIdeally it would be implemented in pure CSS / HTML if that's at all possible.\n", "before_files": [{"content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\nintersphinx_mapping = {\n 'pypa-build': ('https://pypa-build.readthedocs.io/en/latest/', None)\n}\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks', 'sphinx.ext.intersphinx']\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# Custom sidebar templates, maps document names to template names.\nhtml_theme = 'alabaster'\ntemplates_path = ['_templates']\nhtml_sidebars = {'index': ['tidelift-sidebar.html']}\n", "path": "docs/conf.py"}], "after_files": [{"content": "extensions = ['sphinx.ext.autodoc', 'jaraco.packaging.sphinx', 'rst.linker']\n\nmaster_doc = \"index\"\n\nlink_files = {\n '../CHANGES.rst': dict(\n using=dict(\n BB='https://bitbucket.org',\n GH='https://github.com',\n ),\n replace=[\n dict(\n pattern=r'(Issue )?#(?P<issue>\\d+)',\n url='{package_url}/issues/{issue}',\n ),\n dict(\n pattern=r'BB Pull Request ?#(?P<bb_pull_request>\\d+)',\n url='{BB}/pypa/setuptools/pull-request/{bb_pull_request}',\n ),\n dict(\n pattern=r'Distribute #(?P<distribute>\\d+)',\n url='{BB}/tarek/distribute/issue/{distribute}',\n ),\n dict(\n pattern=r'Buildout #(?P<buildout>\\d+)',\n url='{GH}/buildout/buildout/issues/{buildout}',\n ),\n dict(\n pattern=r'Old Setuptools #(?P<old_setuptools>\\d+)',\n url='http://bugs.python.org/setuptools/issue{old_setuptools}',\n ),\n dict(\n pattern=r'Jython #(?P<jython>\\d+)',\n url='http://bugs.jython.org/issue{jython}',\n ),\n dict(\n pattern=r'(Python #|bpo-)(?P<python>\\d+)',\n url='http://bugs.python.org/issue{python}',\n ),\n dict(\n pattern=r'Interop #(?P<interop>\\d+)',\n url='{GH}/pypa/interoperability-peps/issues/{interop}',\n ),\n dict(\n pattern=r'Pip #(?P<pip>\\d+)',\n url='{GH}/pypa/pip/issues/{pip}',\n ),\n dict(\n pattern=r'Packaging #(?P<packaging>\\d+)',\n url='{GH}/pypa/packaging/issues/{packaging}',\n ),\n dict(\n pattern=r'[Pp]ackaging (?P<packaging_ver>\\d+(\\.\\d+)+)',\n url='{GH}/pypa/packaging/blob/{packaging_ver}/CHANGELOG.rst',\n ),\n dict(\n pattern=r'PEP[- ](?P<pep_number>\\d+)',\n url='https://www.python.org/dev/peps/pep-{pep_number:0>4}/',\n ),\n dict(\n pattern=r'setuptools_svn #(?P<setuptools_svn>\\d+)',\n url='{GH}/jaraco/setuptools_svn/issues/{setuptools_svn}',\n ),\n dict(\n pattern=r'pypa/distutils#(?P<distutils>\\d+)',\n url='{GH}/pypa/distutils/issues/{distutils}',\n ),\n dict(\n pattern=r'^(?m)((?P<scm_version>v?\\d+(\\.\\d+){1,2}))\\n[-=]+\\n',\n with_scm='{text}\\n{rev[timestamp]:%d %b %Y}\\n',\n ),\n ],\n ),\n}\n\n\n# Add support for linking usernames\ngithub_url = 'https://github.com'\ngithub_sponsors_url = f'{github_url}/sponsors'\nextlinks = {\n 'user': (f'{github_sponsors_url}/%s', '@'), # noqa: WPS323\n}\nextensions += ['sphinx.ext.extlinks']\n\n# Be strict about any broken references:\nnitpicky = True\n\n# Ref: https://github.com/python-attrs/attrs/pull/571/files\\\n# #diff-85987f48f1258d9ee486e3191495582dR82\ndefault_role = 'any'\n\n# Custom sidebar templates, maps document names to template names.\nhtml_theme = 'alabaster'\ntemplates_path = ['_templates']\nhtml_sidebars = {'index': ['tidelift-sidebar.html']}\n\n# Add support for inline tabs\nextensions += ['sphinx_inline_tabs']\n", "path": "docs/conf.py"}]} |
gh_patches_debug_46606 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1744 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for multipage tiff files with the freeimage plugin
Currently, our Freeimage TIFF reader just reads the first plane of a multipage tiff and calls it a day. It would be good to figure out how to support multipage tiff properly. (Though tifffile handles it admirably, users might set freeimage for whatever reason, and be stuck with buggy code.)
See #1440 for a related issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/io/_plugins/pil_plugin.py`
Content:
```
1 __all__ = ['imread', 'imsave']
2
3 import numpy as np
4 from six import string_types
5 from PIL import Image
6
7 from ...util import img_as_ubyte, img_as_uint
8 from .tifffile_plugin import imread as tif_imread, imsave as tif_imsave
9
10
11 def imread(fname, dtype=None, img_num=None, **kwargs):
12 """Load an image from file.
13
14 Parameters
15 ----------
16 fname : str or file
17 File name or file-like-object.
18 dtype : numpy dtype object or string specifier
19 Specifies data type of array elements.
20 img_num : int, optional
21 Specifies which image to read in a file with multiple images
22 (zero-indexed).
23 kwargs : keyword pairs, optional
24 Addition keyword arguments to pass through (only applicable to Tiff
25 files for now, see `tifffile`'s `imread` function).
26
27 Notes
28 -----
29 Tiff files are handled by Christophe Golhke's tifffile.py [1]_, and support
30 many advanced image types including multi-page and floating point.
31
32 All other files are read using the Python Imaging Libary.
33 See PIL docs [2]_ for a list of supported formats.
34
35 References
36 ----------
37 .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
38 .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
39
40 """
41 if hasattr(fname, 'lower') and dtype is None:
42 kwargs.setdefault('key', img_num)
43 if fname.lower().endswith(('.tiff', '.tif')):
44 return tif_imread(fname, **kwargs)
45
46 if isinstance(fname, string_types):
47 with open(fname, 'rb') as f:
48 im = Image.open(f)
49 return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
50 else:
51 im = Image.open(fname)
52 return pil_to_ndarray(im, dtype=dtype, img_num=img_num)
53
54
55 def pil_to_ndarray(im, dtype=None, img_num=None):
56 """Import a PIL Image object to an ndarray, in memory.
57
58 Parameters
59 ----------
60 Refer to ``imread``.
61
62 """
63 try:
64 # this will raise an IOError if the file is not readable
65 im.getdata()[0]
66 except IOError as e:
67 site = "http://pillow.readthedocs.org/en/latest/installation.html#external-libraries"
68 pillow_error_message = str(e)
69 error_message = ('Could not load "%s" \n'
70 'Reason: "%s"\n'
71 'Please see documentation at: %s'
72 % (im.filename, pillow_error_message, site))
73 raise ValueError(error_message)
74 frames = []
75 grayscale = None
76 i = 0
77 while 1:
78 try:
79 im.seek(i)
80 except EOFError:
81 break
82
83 frame = im
84
85 if img_num is not None and img_num != i:
86 im.getdata()[0]
87 i += 1
88 continue
89
90 if im.format == 'PNG' and im.mode == 'I' and dtype is None:
91 dtype = 'uint16'
92
93 if im.mode == 'P':
94 if grayscale is None:
95 grayscale = _palette_is_grayscale(im)
96
97 if grayscale:
98 frame = im.convert('L')
99 else:
100 frame = im.convert('RGB')
101
102 elif im.mode == '1':
103 frame = im.convert('L')
104
105 elif 'A' in im.mode:
106 frame = im.convert('RGBA')
107
108 elif im.mode == 'CMYK':
109 frame = im.convert('RGB')
110
111 if im.mode.startswith('I;16'):
112 shape = im.size
113 dtype = '>u2' if im.mode.endswith('B') else '<u2'
114 if 'S' in im.mode:
115 dtype = dtype.replace('u', 'i')
116 frame = np.fromstring(frame.tobytes(), dtype)
117 frame.shape = shape[::-1]
118
119 else:
120 frame = np.array(frame, dtype=dtype)
121
122 frames.append(frame)
123 i += 1
124
125 if img_num is not None:
126 break
127
128 if hasattr(im, 'fp') and im.fp:
129 im.fp.close()
130
131 if img_num is None and len(frames) > 1:
132 return np.array(frames)
133 elif frames:
134 return frames[0]
135 elif img_num:
136 raise IndexError('Could not find image #%s' % img_num)
137
138
139 def _palette_is_grayscale(pil_image):
140 """Return True if PIL image in palette mode is grayscale.
141
142 Parameters
143 ----------
144 pil_image : PIL image
145 PIL Image that is in Palette mode.
146
147 Returns
148 -------
149 is_grayscale : bool
150 True if all colors in image palette are gray.
151 """
152 assert pil_image.mode == 'P'
153 # get palette as an array with R, G, B columns
154 palette = np.asarray(pil_image.getpalette()).reshape((256, 3))
155 # Not all palette colors are used; unused colors have junk values.
156 start, stop = pil_image.getextrema()
157 valid_palette = palette[start:stop]
158 # Image is grayscale if channel differences (R - G and G - B)
159 # are all zero.
160 return np.allclose(np.diff(valid_palette), 0)
161
162
163 def ndarray_to_pil(arr, format_str=None):
164 """Export an ndarray to a PIL object.
165
166 Parameters
167 ----------
168 Refer to ``imsave``.
169
170 """
171 if arr.ndim == 3:
172 arr = img_as_ubyte(arr)
173 mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]
174
175 elif format_str in ['png', 'PNG']:
176 mode = 'I;16'
177 mode_base = 'I'
178
179 if arr.dtype.kind == 'f':
180 arr = img_as_uint(arr)
181
182 elif arr.max() < 256 and arr.min() >= 0:
183 arr = arr.astype(np.uint8)
184 mode = mode_base = 'L'
185
186 else:
187 arr = img_as_uint(arr)
188
189 else:
190 arr = img_as_ubyte(arr)
191 mode = 'L'
192 mode_base = 'L'
193
194 try:
195 array_buffer = arr.tobytes()
196 except AttributeError:
197 array_buffer = arr.tostring() # Numpy < 1.9
198
199 if arr.ndim == 2:
200 im = Image.new(mode_base, arr.T.shape)
201 try:
202 im.frombytes(array_buffer, 'raw', mode)
203 except AttributeError:
204 im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7
205 else:
206 image_shape = (arr.shape[1], arr.shape[0])
207 try:
208 im = Image.frombytes(mode, image_shape, array_buffer)
209 except AttributeError:
210 im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7
211 return im
212
213
214 def imsave(fname, arr, format_str=None, **kwargs):
215 """Save an image to disk.
216
217 Parameters
218 ----------
219 fname : str or file-like object
220 Name of destination file.
221 arr : ndarray of uint8 or float
222 Array (image) to save. Arrays of data-type uint8 should have
223 values in [0, 255], whereas floating-point arrays must be
224 in [0, 1].
225 format_str: str
226 Format to save as, this is defaulted to PNG if using a file-like
227 object; this will be derived from the extension if fname is a string
228 kwargs: dict
229 Keyword arguments to the Pillow save function (or tifffile save
230 function, for Tiff files). These are format dependent. For example,
231 Pillow's JPEG save function supports an integer ``quality`` argument
232 with values in [1, 95], while TIFFFile supports a ``compress``
233 integer argument with values in [0, 9].
234
235 Notes
236 -----
237 Tiff files are handled by Christophe Golhke's tifffile.py [1]_,
238 and support many advanced image types including multi-page and
239 floating point.
240
241 All other image formats use the Python Imaging Libary.
242 See PIL docs [2]_ for a list of other supported formats.
243 All images besides single channel PNGs are converted using `img_as_uint8`.
244 Single Channel PNGs have the following behavior:
245 - Integer values in [0, 255] and Boolean types -> img_as_uint8
246 - Floating point and other integers -> img_as_uint16
247
248 References
249 ----------
250 .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
251 .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
252 """
253 # default to PNG if file-like object
254 if not isinstance(fname, string_types) and format_str is None:
255 format_str = "PNG"
256 # Check for png in filename
257 if (isinstance(fname, string_types)
258 and fname.lower().endswith(".png")):
259 format_str = "PNG"
260
261 arr = np.asanyarray(arr)
262
263 if arr.dtype.kind == 'b':
264 arr = arr.astype(np.uint8)
265
266 use_tif = False
267 if hasattr(fname, 'lower'):
268 if fname.lower().endswith(('.tiff', '.tif')):
269 use_tif = True
270 if format_str is not None:
271 if format_str.lower() in ['tiff', 'tif']:
272 use_tif = True
273
274 if use_tif:
275 tif_imsave(fname, arr, **kwargs)
276 return
277
278 if arr.ndim not in (2, 3):
279 raise ValueError("Invalid shape for image array: %s" % arr.shape)
280
281 if arr.ndim == 3:
282 if arr.shape[2] not in (3, 4):
283 raise ValueError("Invalid number of channels in image array.")
284
285 img = ndarray_to_pil(arr, format_str=format_str)
286 img.save(fname, format=format_str, **kwargs)
287
```
Path: `skimage/io/_io.py`
Content:
```
1 from io import BytesIO
2 import warnings
3
4 import numpy as np
5 import six
6
7 from ..io.manage_plugins import call_plugin
8 from ..color import rgb2grey
9 from .util import file_or_url_context
10 from ..exposure import is_low_contrast
11 from .._shared._warnings import all_warnings
12
13
14 __all__ = ['imread', 'imread_collection', 'imsave', 'imshow', 'show']
15
16
17 def imread(fname, as_grey=False, plugin=None, flatten=None,
18 **plugin_args):
19 """Load an image from file.
20
21 Parameters
22 ----------
23 fname : string
24 Image file name, e.g. ``test.jpg`` or URL.
25 as_grey : bool
26 If True, convert color images to grey-scale (32-bit floats).
27 Images that are already in grey-scale format are not converted.
28 plugin : str
29 Name of plugin to use (Python Imaging Library by default).
30
31 Other Parameters
32 ----------------
33 flatten : bool
34 Backward compatible keyword, superseded by `as_grey`.
35
36 Returns
37 -------
38 img_array : ndarray
39 The different colour bands/channels are stored in the
40 third dimension, such that a grey-image is MxN, an
41 RGB-image MxNx3 and an RGBA-image MxNx4.
42
43 Other parameters
44 ----------------
45 plugin_args : keywords
46 Passed to the given plugin.
47
48 """
49 # Backward compatibility
50 if flatten is not None:
51 as_grey = flatten
52
53 with file_or_url_context(fname) as fname:
54 img = call_plugin('imread', fname, plugin=plugin, **plugin_args)
55
56 if not hasattr(img, 'ndim'):
57 return img
58
59 if img.ndim > 2:
60 if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):
61 img = np.swapaxes(img, -1, -3)
62 img = np.swapaxes(img, -2, -3)
63
64 if as_grey:
65 img = rgb2grey(img)
66
67 return img
68
69
70 def imread_collection(load_pattern, conserve_memory=True,
71 plugin=None, **plugin_args):
72 """
73 Load a collection of images.
74
75 Parameters
76 ----------
77 load_pattern : str or list
78 List of objects to load. These are usually filenames, but may
79 vary depending on the currently active plugin. See the docstring
80 for ``ImageCollection`` for the default behaviour of this parameter.
81 conserve_memory : bool, optional
82 If True, never keep more than one in memory at a specific
83 time. Otherwise, images will be cached once they are loaded.
84
85 Returns
86 -------
87 ic : ImageCollection
88 Collection of images.
89
90 Other parameters
91 ----------------
92 plugin_args : keywords
93 Passed to the given plugin.
94
95 """
96 return call_plugin('imread_collection', load_pattern, conserve_memory,
97 plugin=plugin, **plugin_args)
98
99
100 def imsave(fname, arr, plugin=None, **plugin_args):
101 """Save an image to file.
102
103 Parameters
104 ----------
105 fname : str
106 Target filename.
107 arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)
108 Image data.
109 plugin : str
110 Name of plugin to use. By default, the different plugins are
111 tried (starting with the Python Imaging Library) until a suitable
112 candidate is found.
113
114 Other parameters
115 ----------------
116 plugin_args : keywords
117 Passed to the given plugin.
118
119 """
120 if is_low_contrast(arr):
121 warnings.warn('%s is a low contrast image' % fname)
122 return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)
123
124
125 def imshow(arr, plugin=None, **plugin_args):
126 """Display an image.
127
128 Parameters
129 ----------
130 arr : ndarray or str
131 Image data or name of image file.
132 plugin : str
133 Name of plugin to use. By default, the different plugins are
134 tried (starting with the Python Imaging Library) until a suitable
135 candidate is found.
136
137 Other parameters
138 ----------------
139 plugin_args : keywords
140 Passed to the given plugin.
141
142 """
143 if isinstance(arr, six.string_types):
144 arr = call_plugin('imread', arr, plugin=plugin)
145 return call_plugin('imshow', arr, plugin=plugin, **plugin_args)
146
147
148 def show():
149 '''Display pending images.
150
151 Launch the event loop of the current gui plugin, and display all
152 pending images, queued via `imshow`. This is required when using
153 `imshow` from non-interactive scripts.
154
155 A call to `show` will block execution of code until all windows
156 have been closed.
157
158 Examples
159 --------
160 >>> import skimage.io as io
161
162 >>> for i in range(4):
163 ... ax_im = io.imshow(np.random.rand(50, 50))
164 >>> io.show() # doctest: +SKIP
165
166 '''
167 return call_plugin('_app_show')
168
```
Path: `skimage/io/_plugins/tifffile_plugin.py`
Content:
```
1 from ...external.tifffile import TiffFile, imsave
2
3
4 def imread(fname, dtype=None, **kwargs):
5 """Load a tiff image from file.
6
7 Parameters
8 ----------
9 fname : str or file
10 File name or file-like-object.
11 dtype : numpy dtype object or string specifier
12 Specifies data type of array elements (Not currently used).
13 kwargs : keyword pairs, optional
14 Additional keyword arguments to pass through (see ``tifffile``'s
15 ``imread`` function).
16
17 Notes
18 -----
19 Provided by Christophe Golhke's tifffile.py [1]_, and supports many
20 advanced image types including multi-page and floating point.
21
22 References
23 ----------
24 .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py
25
26 """
27 with open(fname, 'rb') as f:
28 tif = TiffFile(f)
29 return tif.asarray(**kwargs)
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/io/_io.py b/skimage/io/_io.py
--- a/skimage/io/_io.py
+++ b/skimage/io/_io.py
@@ -26,7 +26,10 @@
If True, convert color images to grey-scale (32-bit floats).
Images that are already in grey-scale format are not converted.
plugin : str
- Name of plugin to use (Python Imaging Library by default).
+ Name of plugin to use. By default, the different plugins are
+ tried (starting with the Python Imaging Library) until a suitable
+ candidate is found. If not given and fname is a tiff file, the
+ tifffile plugin will be used.
Other Parameters
----------------
@@ -50,6 +53,10 @@
if flatten is not None:
as_grey = flatten
+ if plugin is None and hasattr(fname, 'lower'):
+ if fname.lower().endswith(('.tiff', '.tif')):
+ plugin = 'tifffile'
+
with file_or_url_context(fname) as fname:
img = call_plugin('imread', fname, plugin=plugin, **plugin_args)
@@ -109,7 +116,8 @@
plugin : str
Name of plugin to use. By default, the different plugins are
tried (starting with the Python Imaging Library) until a suitable
- candidate is found.
+ candidate is found. If not given and fname is a tiff file, the
+ tifffile plugin will be used.
Other parameters
----------------
@@ -117,6 +125,9 @@
Passed to the given plugin.
"""
+ if plugin is None and hasattr(fname, 'lower'):
+ if fname.lower().endswith(('.tiff', '.tif')):
+ plugin = 'tifffile'
if is_low_contrast(arr):
warnings.warn('%s is a low contrast image' % fname)
return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)
diff --git a/skimage/io/_plugins/pil_plugin.py b/skimage/io/_plugins/pil_plugin.py
--- a/skimage/io/_plugins/pil_plugin.py
+++ b/skimage/io/_plugins/pil_plugin.py
@@ -5,7 +5,6 @@
from PIL import Image
from ...util import img_as_ubyte, img_as_uint
-from .tifffile_plugin import imread as tif_imread, imsave as tif_imsave
def imread(fname, dtype=None, img_num=None, **kwargs):
@@ -21,28 +20,17 @@
Specifies which image to read in a file with multiple images
(zero-indexed).
kwargs : keyword pairs, optional
- Addition keyword arguments to pass through (only applicable to Tiff
- files for now, see `tifffile`'s `imread` function).
+ Addition keyword arguments to pass through.
Notes
-----
- Tiff files are handled by Christophe Golhke's tifffile.py [1]_, and support
- many advanced image types including multi-page and floating point.
-
- All other files are read using the Python Imaging Libary.
- See PIL docs [2]_ for a list of supported formats.
+ Files are read using the Python Imaging Libary.
+ See PIL docs [1]_ for a list of supported formats.
References
----------
- .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
- .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
-
+ .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
- if hasattr(fname, 'lower') and dtype is None:
- kwargs.setdefault('key', img_num)
- if fname.lower().endswith(('.tiff', '.tif')):
- return tif_imread(fname, **kwargs)
-
if isinstance(fname, string_types):
with open(fname, 'rb') as f:
im = Image.open(f)
@@ -234,12 +222,8 @@
Notes
-----
- Tiff files are handled by Christophe Golhke's tifffile.py [1]_,
- and support many advanced image types including multi-page and
- floating point.
-
- All other image formats use the Python Imaging Libary.
- See PIL docs [2]_ for a list of other supported formats.
+ Use the Python Imaging Libary.
+ See PIL docs [1]_ for a list of other supported formats.
All images besides single channel PNGs are converted using `img_as_uint8`.
Single Channel PNGs have the following behavior:
- Integer values in [0, 255] and Boolean types -> img_as_uint8
@@ -247,8 +231,7 @@
References
----------
- .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html
- .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
+ .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html
"""
# default to PNG if file-like object
if not isinstance(fname, string_types) and format_str is None:
@@ -263,18 +246,6 @@
if arr.dtype.kind == 'b':
arr = arr.astype(np.uint8)
- use_tif = False
- if hasattr(fname, 'lower'):
- if fname.lower().endswith(('.tiff', '.tif')):
- use_tif = True
- if format_str is not None:
- if format_str.lower() in ['tiff', 'tif']:
- use_tif = True
-
- if use_tif:
- tif_imsave(fname, arr, **kwargs)
- return
-
if arr.ndim not in (2, 3):
raise ValueError("Invalid shape for image array: %s" % arr.shape)
diff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py
--- a/skimage/io/_plugins/tifffile_plugin.py
+++ b/skimage/io/_plugins/tifffile_plugin.py
@@ -24,6 +24,8 @@
.. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py
"""
+ if 'img_num' in kwargs:
+ kwargs['key'] = kwargs.pop('img_num')
with open(fname, 'rb') as f:
tif = TiffFile(f)
return tif.asarray(**kwargs)
| {"golden_diff": "diff --git a/skimage/io/_io.py b/skimage/io/_io.py\n--- a/skimage/io/_io.py\n+++ b/skimage/io/_io.py\n@@ -26,7 +26,10 @@\n If True, convert color images to grey-scale (32-bit floats).\n Images that are already in grey-scale format are not converted.\n plugin : str\n- Name of plugin to use (Python Imaging Library by default).\n+ Name of plugin to use. By default, the different plugins are\n+ tried (starting with the Python Imaging Library) until a suitable\n+ candidate is found. If not given and fname is a tiff file, the\n+ tifffile plugin will be used.\n \n Other Parameters\n ----------------\n@@ -50,6 +53,10 @@\n if flatten is not None:\n as_grey = flatten\n \n+ if plugin is None and hasattr(fname, 'lower'):\n+ if fname.lower().endswith(('.tiff', '.tif')):\n+ plugin = 'tifffile'\n+\n with file_or_url_context(fname) as fname:\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\n \n@@ -109,7 +116,8 @@\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n- candidate is found.\n+ candidate is found. If not given and fname is a tiff file, the\n+ tifffile plugin will be used.\n \n Other parameters\n ----------------\n@@ -117,6 +125,9 @@\n Passed to the given plugin.\n \n \"\"\"\n+ if plugin is None and hasattr(fname, 'lower'):\n+ if fname.lower().endswith(('.tiff', '.tif')):\n+ plugin = 'tifffile'\n if is_low_contrast(arr):\n warnings.warn('%s is a low contrast image' % fname)\n return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)\ndiff --git a/skimage/io/_plugins/pil_plugin.py b/skimage/io/_plugins/pil_plugin.py\n--- a/skimage/io/_plugins/pil_plugin.py\n+++ b/skimage/io/_plugins/pil_plugin.py\n@@ -5,7 +5,6 @@\n from PIL import Image\n \n from ...util import img_as_ubyte, img_as_uint\n-from .tifffile_plugin import imread as tif_imread, imsave as tif_imsave\n \n \n def imread(fname, dtype=None, img_num=None, **kwargs):\n@@ -21,28 +20,17 @@\n Specifies which image to read in a file with multiple images\n (zero-indexed).\n kwargs : keyword pairs, optional\n- Addition keyword arguments to pass through (only applicable to Tiff\n- files for now, see `tifffile`'s `imread` function).\n+ Addition keyword arguments to pass through.\n \n Notes\n -----\n- Tiff files are handled by Christophe Golhke's tifffile.py [1]_, and support\n- many advanced image types including multi-page and floating point.\n-\n- All other files are read using the Python Imaging Libary.\n- See PIL docs [2]_ for a list of supported formats.\n+ Files are read using the Python Imaging Libary.\n+ See PIL docs [1]_ for a list of supported formats.\n \n References\n ----------\n- .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html\n- .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n-\n+ .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n \"\"\"\n- if hasattr(fname, 'lower') and dtype is None:\n- kwargs.setdefault('key', img_num)\n- if fname.lower().endswith(('.tiff', '.tif')):\n- return tif_imread(fname, **kwargs)\n-\n if isinstance(fname, string_types):\n with open(fname, 'rb') as f:\n im = Image.open(f)\n@@ -234,12 +222,8 @@\n \n Notes\n -----\n- Tiff files are handled by Christophe Golhke's tifffile.py [1]_,\n- and support many advanced image types including multi-page and\n- floating point.\n-\n- All other image formats use the Python Imaging Libary.\n- See PIL docs [2]_ for a list of other supported formats.\n+ Use the Python Imaging Libary.\n+ See PIL docs [1]_ for a list of other supported formats.\n All images besides single channel PNGs are converted using `img_as_uint8`.\n Single Channel PNGs have the following behavior:\n - Integer values in [0, 255] and Boolean types -> img_as_uint8\n@@ -247,8 +231,7 @@\n \n References\n ----------\n- .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html\n- .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n+ .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n \"\"\"\n # default to PNG if file-like object\n if not isinstance(fname, string_types) and format_str is None:\n@@ -263,18 +246,6 @@\n if arr.dtype.kind == 'b':\n arr = arr.astype(np.uint8)\n \n- use_tif = False\n- if hasattr(fname, 'lower'):\n- if fname.lower().endswith(('.tiff', '.tif')):\n- use_tif = True\n- if format_str is not None:\n- if format_str.lower() in ['tiff', 'tif']:\n- use_tif = True\n-\n- if use_tif:\n- tif_imsave(fname, arr, **kwargs)\n- return\n-\n if arr.ndim not in (2, 3):\n raise ValueError(\"Invalid shape for image array: %s\" % arr.shape)\n \ndiff --git a/skimage/io/_plugins/tifffile_plugin.py b/skimage/io/_plugins/tifffile_plugin.py\n--- a/skimage/io/_plugins/tifffile_plugin.py\n+++ b/skimage/io/_plugins/tifffile_plugin.py\n@@ -24,6 +24,8 @@\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n \n \"\"\"\n+ if 'img_num' in kwargs:\n+ kwargs['key'] = kwargs.pop('img_num')\n with open(fname, 'rb') as f:\n tif = TiffFile(f)\n return tif.asarray(**kwargs)\n", "issue": "Add support for multipage tiff files with the freeimage plugin\nCurrently, our Freeimage TIFF reader just reads the first plane of a multipage tiff and calls it a day. It would be good to figure out how to support multipage tiff properly. (Though tifffile handles it admirably, users might set freeimage for whatever reason, and be stuck with buggy code.)\n\nSee #1440 for a related issue.\n\n", "before_files": [{"content": "__all__ = ['imread', 'imsave']\n\nimport numpy as np\nfrom six import string_types\nfrom PIL import Image\n\nfrom ...util import img_as_ubyte, img_as_uint\nfrom .tifffile_plugin import imread as tif_imread, imsave as tif_imsave\n\n\ndef imread(fname, dtype=None, img_num=None, **kwargs):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements.\n img_num : int, optional\n Specifies which image to read in a file with multiple images\n (zero-indexed).\n kwargs : keyword pairs, optional\n Addition keyword arguments to pass through (only applicable to Tiff\n files for now, see `tifffile`'s `imread` function).\n\n Notes\n -----\n Tiff files are handled by Christophe Golhke's tifffile.py [1]_, and support\n many advanced image types including multi-page and floating point.\n\n All other files are read using the Python Imaging Libary.\n See PIL docs [2]_ for a list of supported formats.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html\n .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n\n \"\"\"\n if hasattr(fname, 'lower') and dtype is None:\n kwargs.setdefault('key', img_num)\n if fname.lower().endswith(('.tiff', '.tif')):\n return tif_imread(fname, **kwargs)\n\n if isinstance(fname, string_types):\n with open(fname, 'rb') as f:\n im = Image.open(f)\n return pil_to_ndarray(im, dtype=dtype, img_num=img_num)\n else:\n im = Image.open(fname)\n return pil_to_ndarray(im, dtype=dtype, img_num=img_num)\n\n\ndef pil_to_ndarray(im, dtype=None, img_num=None):\n \"\"\"Import a PIL Image object to an ndarray, in memory.\n\n Parameters\n ----------\n Refer to ``imread``.\n\n \"\"\"\n try:\n # this will raise an IOError if the file is not readable\n im.getdata()[0]\n except IOError as e:\n site = \"http://pillow.readthedocs.org/en/latest/installation.html#external-libraries\"\n pillow_error_message = str(e)\n error_message = ('Could not load \"%s\" \\n'\n 'Reason: \"%s\"\\n'\n 'Please see documentation at: %s'\n % (im.filename, pillow_error_message, site))\n raise ValueError(error_message)\n frames = []\n grayscale = None\n i = 0\n while 1:\n try:\n im.seek(i)\n except EOFError:\n break\n\n frame = im\n\n if img_num is not None and img_num != i:\n im.getdata()[0]\n i += 1\n continue\n\n if im.format == 'PNG' and im.mode == 'I' and dtype is None:\n dtype = 'uint16'\n\n if im.mode == 'P':\n if grayscale is None:\n grayscale = _palette_is_grayscale(im)\n\n if grayscale:\n frame = im.convert('L')\n else:\n frame = im.convert('RGB')\n\n elif im.mode == '1':\n frame = im.convert('L')\n\n elif 'A' in im.mode:\n frame = im.convert('RGBA')\n\n elif im.mode == 'CMYK':\n frame = im.convert('RGB')\n\n if im.mode.startswith('I;16'):\n shape = im.size\n dtype = '>u2' if im.mode.endswith('B') else '<u2'\n if 'S' in im.mode:\n dtype = dtype.replace('u', 'i')\n frame = np.fromstring(frame.tobytes(), dtype)\n frame.shape = shape[::-1]\n\n else:\n frame = np.array(frame, dtype=dtype)\n\n frames.append(frame)\n i += 1\n\n if img_num is not None:\n break\n\n if hasattr(im, 'fp') and im.fp:\n im.fp.close()\n\n if img_num is None and len(frames) > 1:\n return np.array(frames)\n elif frames:\n return frames[0]\n elif img_num:\n raise IndexError('Could not find image #%s' % img_num)\n\n\ndef _palette_is_grayscale(pil_image):\n \"\"\"Return True if PIL image in palette mode is grayscale.\n\n Parameters\n ----------\n pil_image : PIL image\n PIL Image that is in Palette mode.\n\n Returns\n -------\n is_grayscale : bool\n True if all colors in image palette are gray.\n \"\"\"\n assert pil_image.mode == 'P'\n # get palette as an array with R, G, B columns\n palette = np.asarray(pil_image.getpalette()).reshape((256, 3))\n # Not all palette colors are used; unused colors have junk values.\n start, stop = pil_image.getextrema()\n valid_palette = palette[start:stop]\n # Image is grayscale if channel differences (R - G and G - B)\n # are all zero.\n return np.allclose(np.diff(valid_palette), 0)\n\n\ndef ndarray_to_pil(arr, format_str=None):\n \"\"\"Export an ndarray to a PIL object.\n\n Parameters\n ----------\n Refer to ``imsave``.\n\n \"\"\"\n if arr.ndim == 3:\n arr = img_as_ubyte(arr)\n mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]\n\n elif format_str in ['png', 'PNG']:\n mode = 'I;16'\n mode_base = 'I'\n\n if arr.dtype.kind == 'f':\n arr = img_as_uint(arr)\n\n elif arr.max() < 256 and arr.min() >= 0:\n arr = arr.astype(np.uint8)\n mode = mode_base = 'L'\n\n else:\n arr = img_as_uint(arr)\n\n else:\n arr = img_as_ubyte(arr)\n mode = 'L'\n mode_base = 'L'\n\n try:\n array_buffer = arr.tobytes()\n except AttributeError:\n array_buffer = arr.tostring() # Numpy < 1.9\n\n if arr.ndim == 2:\n im = Image.new(mode_base, arr.T.shape)\n try:\n im.frombytes(array_buffer, 'raw', mode)\n except AttributeError:\n im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7\n else:\n image_shape = (arr.shape[1], arr.shape[0])\n try:\n im = Image.frombytes(mode, image_shape, array_buffer)\n except AttributeError:\n im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7\n return im\n\n\ndef imsave(fname, arr, format_str=None, **kwargs):\n \"\"\"Save an image to disk.\n\n Parameters\n ----------\n fname : str or file-like object\n Name of destination file.\n arr : ndarray of uint8 or float\n Array (image) to save. Arrays of data-type uint8 should have\n values in [0, 255], whereas floating-point arrays must be\n in [0, 1].\n format_str: str\n Format to save as, this is defaulted to PNG if using a file-like\n object; this will be derived from the extension if fname is a string\n kwargs: dict\n Keyword arguments to the Pillow save function (or tifffile save\n function, for Tiff files). These are format dependent. For example,\n Pillow's JPEG save function supports an integer ``quality`` argument\n with values in [1, 95], while TIFFFile supports a ``compress``\n integer argument with values in [0, 9].\n\n Notes\n -----\n Tiff files are handled by Christophe Golhke's tifffile.py [1]_,\n and support many advanced image types including multi-page and\n floating point.\n\n All other image formats use the Python Imaging Libary.\n See PIL docs [2]_ for a list of other supported formats.\n All images besides single channel PNGs are converted using `img_as_uint8`.\n Single Channel PNGs have the following behavior:\n - Integer values in [0, 255] and Boolean types -> img_as_uint8\n - Floating point and other integers -> img_as_uint16\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py.html\n .. [2] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n \"\"\"\n # default to PNG if file-like object\n if not isinstance(fname, string_types) and format_str is None:\n format_str = \"PNG\"\n # Check for png in filename\n if (isinstance(fname, string_types)\n and fname.lower().endswith(\".png\")):\n format_str = \"PNG\"\n\n arr = np.asanyarray(arr)\n\n if arr.dtype.kind == 'b':\n arr = arr.astype(np.uint8)\n\n use_tif = False\n if hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n use_tif = True\n if format_str is not None:\n if format_str.lower() in ['tiff', 'tif']:\n use_tif = True\n\n if use_tif:\n tif_imsave(fname, arr, **kwargs)\n return\n\n if arr.ndim not in (2, 3):\n raise ValueError(\"Invalid shape for image array: %s\" % arr.shape)\n\n if arr.ndim == 3:\n if arr.shape[2] not in (3, 4):\n raise ValueError(\"Invalid number of channels in image array.\")\n\n img = ndarray_to_pil(arr, format_str=format_str)\n img.save(fname, format=format_str, **kwargs)\n", "path": "skimage/io/_plugins/pil_plugin.py"}, {"content": "from io import BytesIO\nimport warnings\n\nimport numpy as np\nimport six\n\nfrom ..io.manage_plugins import call_plugin\nfrom ..color import rgb2grey\nfrom .util import file_or_url_context\nfrom ..exposure import is_low_contrast\nfrom .._shared._warnings import all_warnings\n\n\n__all__ = ['imread', 'imread_collection', 'imsave', 'imshow', 'show']\n\n\ndef imread(fname, as_grey=False, plugin=None, flatten=None,\n **plugin_args):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : string\n Image file name, e.g. ``test.jpg`` or URL.\n as_grey : bool\n If True, convert color images to grey-scale (32-bit floats).\n Images that are already in grey-scale format are not converted.\n plugin : str\n Name of plugin to use (Python Imaging Library by default).\n\n Other Parameters\n ----------------\n flatten : bool\n Backward compatible keyword, superseded by `as_grey`.\n\n Returns\n -------\n img_array : ndarray\n The different colour bands/channels are stored in the\n third dimension, such that a grey-image is MxN, an\n RGB-image MxNx3 and an RGBA-image MxNx4.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n # Backward compatibility\n if flatten is not None:\n as_grey = flatten\n\n with file_or_url_context(fname) as fname:\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\n\n if not hasattr(img, 'ndim'):\n return img\n\n if img.ndim > 2:\n if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):\n img = np.swapaxes(img, -1, -3)\n img = np.swapaxes(img, -2, -3)\n\n if as_grey:\n img = rgb2grey(img)\n\n return img\n\n\ndef imread_collection(load_pattern, conserve_memory=True,\n plugin=None, **plugin_args):\n \"\"\"\n Load a collection of images.\n\n Parameters\n ----------\n load_pattern : str or list\n List of objects to load. These are usually filenames, but may\n vary depending on the currently active plugin. See the docstring\n for ``ImageCollection`` for the default behaviour of this parameter.\n conserve_memory : bool, optional\n If True, never keep more than one in memory at a specific\n time. Otherwise, images will be cached once they are loaded.\n\n Returns\n -------\n ic : ImageCollection\n Collection of images.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n return call_plugin('imread_collection', load_pattern, conserve_memory,\n plugin=plugin, **plugin_args)\n\n\ndef imsave(fname, arr, plugin=None, **plugin_args):\n \"\"\"Save an image to file.\n\n Parameters\n ----------\n fname : str\n Target filename.\n arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)\n Image data.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n if is_low_contrast(arr):\n warnings.warn('%s is a low contrast image' % fname)\n return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)\n\n\ndef imshow(arr, plugin=None, **plugin_args):\n \"\"\"Display an image.\n\n Parameters\n ----------\n arr : ndarray or str\n Image data or name of image file.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n if isinstance(arr, six.string_types):\n arr = call_plugin('imread', arr, plugin=plugin)\n return call_plugin('imshow', arr, plugin=plugin, **plugin_args)\n\n\ndef show():\n '''Display pending images.\n\n Launch the event loop of the current gui plugin, and display all\n pending images, queued via `imshow`. This is required when using\n `imshow` from non-interactive scripts.\n\n A call to `show` will block execution of code until all windows\n have been closed.\n\n Examples\n --------\n >>> import skimage.io as io\n\n >>> for i in range(4):\n ... ax_im = io.imshow(np.random.rand(50, 50))\n >>> io.show() # doctest: +SKIP\n\n '''\n return call_plugin('_app_show')\n", "path": "skimage/io/_io.py"}, {"content": "from ...external.tifffile import TiffFile, imsave\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements (Not currently used).\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by Christophe Golhke's tifffile.py [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n\n \"\"\"\n with open(fname, 'rb') as f:\n tif = TiffFile(f)\n return tif.asarray(**kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}], "after_files": [{"content": "__all__ = ['imread', 'imsave']\n\nimport numpy as np\nfrom six import string_types\nfrom PIL import Image\n\nfrom ...util import img_as_ubyte, img_as_uint\n\n\ndef imread(fname, dtype=None, img_num=None, **kwargs):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements.\n img_num : int, optional\n Specifies which image to read in a file with multiple images\n (zero-indexed).\n kwargs : keyword pairs, optional\n Addition keyword arguments to pass through.\n\n Notes\n -----\n Files are read using the Python Imaging Libary.\n See PIL docs [1]_ for a list of supported formats.\n\n References\n ----------\n .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n \"\"\"\n if isinstance(fname, string_types):\n with open(fname, 'rb') as f:\n im = Image.open(f)\n return pil_to_ndarray(im, dtype=dtype, img_num=img_num)\n else:\n im = Image.open(fname)\n return pil_to_ndarray(im, dtype=dtype, img_num=img_num)\n\n\ndef pil_to_ndarray(im, dtype=None, img_num=None):\n \"\"\"Import a PIL Image object to an ndarray, in memory.\n\n Parameters\n ----------\n Refer to ``imread``.\n\n \"\"\"\n try:\n # this will raise an IOError if the file is not readable\n im.getdata()[0]\n except IOError as e:\n site = \"http://pillow.readthedocs.org/en/latest/installation.html#external-libraries\"\n pillow_error_message = str(e)\n error_message = ('Could not load \"%s\" \\n'\n 'Reason: \"%s\"\\n'\n 'Please see documentation at: %s'\n % (im.filename, pillow_error_message, site))\n raise ValueError(error_message)\n frames = []\n grayscale = None\n i = 0\n while 1:\n try:\n im.seek(i)\n except EOFError:\n break\n\n frame = im\n\n if img_num is not None and img_num != i:\n im.getdata()[0]\n i += 1\n continue\n\n if im.format == 'PNG' and im.mode == 'I' and dtype is None:\n dtype = 'uint16'\n\n if im.mode == 'P':\n if grayscale is None:\n grayscale = _palette_is_grayscale(im)\n\n if grayscale:\n frame = im.convert('L')\n else:\n frame = im.convert('RGB')\n\n elif im.mode == '1':\n frame = im.convert('L')\n\n elif 'A' in im.mode:\n frame = im.convert('RGBA')\n\n elif im.mode == 'CMYK':\n frame = im.convert('RGB')\n\n if im.mode.startswith('I;16'):\n shape = im.size\n dtype = '>u2' if im.mode.endswith('B') else '<u2'\n if 'S' in im.mode:\n dtype = dtype.replace('u', 'i')\n frame = np.fromstring(frame.tobytes(), dtype)\n frame.shape = shape[::-1]\n\n else:\n frame = np.array(frame, dtype=dtype)\n\n frames.append(frame)\n i += 1\n\n if img_num is not None:\n break\n\n if hasattr(im, 'fp') and im.fp:\n im.fp.close()\n\n if img_num is None and len(frames) > 1:\n return np.array(frames)\n elif frames:\n return frames[0]\n elif img_num:\n raise IndexError('Could not find image #%s' % img_num)\n\n\ndef _palette_is_grayscale(pil_image):\n \"\"\"Return True if PIL image in palette mode is grayscale.\n\n Parameters\n ----------\n pil_image : PIL image\n PIL Image that is in Palette mode.\n\n Returns\n -------\n is_grayscale : bool\n True if all colors in image palette are gray.\n \"\"\"\n assert pil_image.mode == 'P'\n # get palette as an array with R, G, B columns\n palette = np.asarray(pil_image.getpalette()).reshape((256, 3))\n # Not all palette colors are used; unused colors have junk values.\n start, stop = pil_image.getextrema()\n valid_palette = palette[start:stop]\n # Image is grayscale if channel differences (R - G and G - B)\n # are all zero.\n return np.allclose(np.diff(valid_palette), 0)\n\n\ndef ndarray_to_pil(arr, format_str=None):\n \"\"\"Export an ndarray to a PIL object.\n\n Parameters\n ----------\n Refer to ``imsave``.\n\n \"\"\"\n if arr.ndim == 3:\n arr = img_as_ubyte(arr)\n mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]\n\n elif format_str in ['png', 'PNG']:\n mode = 'I;16'\n mode_base = 'I'\n\n if arr.dtype.kind == 'f':\n arr = img_as_uint(arr)\n\n elif arr.max() < 256 and arr.min() >= 0:\n arr = arr.astype(np.uint8)\n mode = mode_base = 'L'\n\n else:\n arr = img_as_uint(arr)\n\n else:\n arr = img_as_ubyte(arr)\n mode = 'L'\n mode_base = 'L'\n\n try:\n array_buffer = arr.tobytes()\n except AttributeError:\n array_buffer = arr.tostring() # Numpy < 1.9\n\n if arr.ndim == 2:\n im = Image.new(mode_base, arr.T.shape)\n try:\n im.frombytes(array_buffer, 'raw', mode)\n except AttributeError:\n im.fromstring(array_buffer, 'raw', mode) # PIL 1.1.7\n else:\n image_shape = (arr.shape[1], arr.shape[0])\n try:\n im = Image.frombytes(mode, image_shape, array_buffer)\n except AttributeError:\n im = Image.fromstring(mode, image_shape, array_buffer) # PIL 1.1.7\n return im\n\n\ndef imsave(fname, arr, format_str=None, **kwargs):\n \"\"\"Save an image to disk.\n\n Parameters\n ----------\n fname : str or file-like object\n Name of destination file.\n arr : ndarray of uint8 or float\n Array (image) to save. Arrays of data-type uint8 should have\n values in [0, 255], whereas floating-point arrays must be\n in [0, 1].\n format_str: str\n Format to save as, this is defaulted to PNG if using a file-like\n object; this will be derived from the extension if fname is a string\n kwargs: dict\n Keyword arguments to the Pillow save function (or tifffile save\n function, for Tiff files). These are format dependent. For example,\n Pillow's JPEG save function supports an integer ``quality`` argument\n with values in [1, 95], while TIFFFile supports a ``compress``\n integer argument with values in [0, 9].\n\n Notes\n -----\n Use the Python Imaging Libary.\n See PIL docs [1]_ for a list of other supported formats.\n All images besides single channel PNGs are converted using `img_as_uint8`.\n Single Channel PNGs have the following behavior:\n - Integer values in [0, 255] and Boolean types -> img_as_uint8\n - Floating point and other integers -> img_as_uint16\n\n References\n ----------\n .. [1] http://pillow.readthedocs.org/en/latest/handbook/image-file-formats.html\n \"\"\"\n # default to PNG if file-like object\n if not isinstance(fname, string_types) and format_str is None:\n format_str = \"PNG\"\n # Check for png in filename\n if (isinstance(fname, string_types)\n and fname.lower().endswith(\".png\")):\n format_str = \"PNG\"\n\n arr = np.asanyarray(arr)\n\n if arr.dtype.kind == 'b':\n arr = arr.astype(np.uint8)\n\n if arr.ndim not in (2, 3):\n raise ValueError(\"Invalid shape for image array: %s\" % arr.shape)\n\n if arr.ndim == 3:\n if arr.shape[2] not in (3, 4):\n raise ValueError(\"Invalid number of channels in image array.\")\n\n img = ndarray_to_pil(arr, format_str=format_str)\n img.save(fname, format=format_str, **kwargs)\n", "path": "skimage/io/_plugins/pil_plugin.py"}, {"content": "from io import BytesIO\nimport warnings\n\nimport numpy as np\nimport six\n\nfrom ..io.manage_plugins import call_plugin\nfrom ..color import rgb2grey\nfrom .util import file_or_url_context\nfrom ..exposure import is_low_contrast\nfrom .._shared._warnings import all_warnings\n\n\n__all__ = ['imread', 'imread_collection', 'imsave', 'imshow', 'show']\n\n\ndef imread(fname, as_grey=False, plugin=None, flatten=None,\n **plugin_args):\n \"\"\"Load an image from file.\n\n Parameters\n ----------\n fname : string\n Image file name, e.g. ``test.jpg`` or URL.\n as_grey : bool\n If True, convert color images to grey-scale (32-bit floats).\n Images that are already in grey-scale format are not converted.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found. If not given and fname is a tiff file, the\n tifffile plugin will be used.\n\n Other Parameters\n ----------------\n flatten : bool\n Backward compatible keyword, superseded by `as_grey`.\n\n Returns\n -------\n img_array : ndarray\n The different colour bands/channels are stored in the\n third dimension, such that a grey-image is MxN, an\n RGB-image MxNx3 and an RGBA-image MxNx4.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n # Backward compatibility\n if flatten is not None:\n as_grey = flatten\n\n if plugin is None and hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n plugin = 'tifffile'\n\n with file_or_url_context(fname) as fname:\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\n\n if not hasattr(img, 'ndim'):\n return img\n\n if img.ndim > 2:\n if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):\n img = np.swapaxes(img, -1, -3)\n img = np.swapaxes(img, -2, -3)\n\n if as_grey:\n img = rgb2grey(img)\n\n return img\n\n\ndef imread_collection(load_pattern, conserve_memory=True,\n plugin=None, **plugin_args):\n \"\"\"\n Load a collection of images.\n\n Parameters\n ----------\n load_pattern : str or list\n List of objects to load. These are usually filenames, but may\n vary depending on the currently active plugin. See the docstring\n for ``ImageCollection`` for the default behaviour of this parameter.\n conserve_memory : bool, optional\n If True, never keep more than one in memory at a specific\n time. Otherwise, images will be cached once they are loaded.\n\n Returns\n -------\n ic : ImageCollection\n Collection of images.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n return call_plugin('imread_collection', load_pattern, conserve_memory,\n plugin=plugin, **plugin_args)\n\n\ndef imsave(fname, arr, plugin=None, **plugin_args):\n \"\"\"Save an image to file.\n\n Parameters\n ----------\n fname : str\n Target filename.\n arr : ndarray of shape (M,N) or (M,N,3) or (M,N,4)\n Image data.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found. If not given and fname is a tiff file, the\n tifffile plugin will be used.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n if plugin is None and hasattr(fname, 'lower'):\n if fname.lower().endswith(('.tiff', '.tif')):\n plugin = 'tifffile'\n if is_low_contrast(arr):\n warnings.warn('%s is a low contrast image' % fname)\n return call_plugin('imsave', fname, arr, plugin=plugin, **plugin_args)\n\n\ndef imshow(arr, plugin=None, **plugin_args):\n \"\"\"Display an image.\n\n Parameters\n ----------\n arr : ndarray or str\n Image data or name of image file.\n plugin : str\n Name of plugin to use. By default, the different plugins are\n tried (starting with the Python Imaging Library) until a suitable\n candidate is found.\n\n Other parameters\n ----------------\n plugin_args : keywords\n Passed to the given plugin.\n\n \"\"\"\n if isinstance(arr, six.string_types):\n arr = call_plugin('imread', arr, plugin=plugin)\n return call_plugin('imshow', arr, plugin=plugin, **plugin_args)\n\n\ndef show():\n '''Display pending images.\n\n Launch the event loop of the current gui plugin, and display all\n pending images, queued via `imshow`. This is required when using\n `imshow` from non-interactive scripts.\n\n A call to `show` will block execution of code until all windows\n have been closed.\n\n Examples\n --------\n >>> import skimage.io as io\n\n >>> for i in range(4):\n ... ax_im = io.imshow(np.random.rand(50, 50))\n >>> io.show() # doctest: +SKIP\n\n '''\n return call_plugin('_app_show')\n", "path": "skimage/io/_io.py"}, {"content": "from ...external.tifffile import TiffFile, imsave\n\n\ndef imread(fname, dtype=None, **kwargs):\n \"\"\"Load a tiff image from file.\n\n Parameters\n ----------\n fname : str or file\n File name or file-like-object.\n dtype : numpy dtype object or string specifier\n Specifies data type of array elements (Not currently used).\n kwargs : keyword pairs, optional\n Additional keyword arguments to pass through (see ``tifffile``'s\n ``imread`` function).\n\n Notes\n -----\n Provided by Christophe Golhke's tifffile.py [1]_, and supports many\n advanced image types including multi-page and floating point.\n\n References\n ----------\n .. [1] http://www.lfd.uci.edu/~gohlke/code/tifffile.py\n\n \"\"\"\n if 'img_num' in kwargs:\n kwargs['key'] = kwargs.pop('img_num')\n with open(fname, 'rb') as f:\n tif = TiffFile(f)\n return tif.asarray(**kwargs)\n", "path": "skimage/io/_plugins/tifffile_plugin.py"}]} |
gh_patches_debug_34957 | rasdani/github-patches | git_diff | ckan__ckan-894 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
datastore plugin pgsql check on datastore DB, not main DB
I ran into a confusing situation here, getting "We detected that you do not use a PostgreSQL database. The DataStore will NOT work and DataStore tests will be skipped." I had the datastore as postgres://, so I tried changing to postgresql:// and got the same error. I googled, found PR 359 which says it's looking for 'postgresql', got more confused, looked through the code ...
Finally realized the code is checking the sqlalchemy.url, _not_ the datastore URL. This doesn't seem to be the right behavior or match the error message, so I made a simple patch to apply the same drivername startswith check to the datastore write_url directly, which seems the least-invasive way to make the behavior/error more understandable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/model/meta.py`
Content:
```
1 import datetime
2
3 from paste.deploy.converters import asbool
4 from pylons import config
5 """SQLAlchemy Metadata and Session object"""
6 from sqlalchemy import MetaData, and_
7 import sqlalchemy.orm as orm
8 from sqlalchemy.orm.session import SessionExtension
9
10 import extension
11 import ckan.lib.activity_streams_session_extension as activity
12
13 __all__ = ['Session', 'engine_is_sqlite', 'engine_is_pg']
14
15
16 class CkanCacheExtension(SessionExtension):
17 ''' This extension checks what tables have been affected by
18 database access and allows us to act on them. Currently this is
19 used by the page cache to flush the cache when data in the database
20 is altered. '''
21
22 def __init__(self, *args, **kw):
23 super(CkanCacheExtension, self).__init__(*args, **kw)
24 # Setup Redis support if needed.
25 self.use_redis = asbool(config.get('ckan.page_cache_enabled'))
26 if self.use_redis:
27 import redis
28 self.redis = redis
29 self.redis_connection is None
30 self.redis_exception = redis.exceptions.ConnectionError
31
32 def after_commit(self, session):
33 if hasattr(session, '_object_cache'):
34 oc = session._object_cache
35 oc_list = oc['new']
36 oc_list.update(oc['changed'])
37 oc_list.update(oc['deleted'])
38 objs = set()
39 for item in oc_list:
40 objs.add(item.__class__.__name__)
41
42 # Flush Redis
43 if self.use_redis:
44 if self.redis_connection is None:
45 try:
46 self.redis_connection = self.redis.StrictRedis()
47 except self.redis_exception:
48 pass
49 try:
50 self.redis_connection.flushdb()
51 except self.redis_exception:
52 pass
53
54 class CkanSessionExtension(SessionExtension):
55
56 def before_flush(self, session, flush_context, instances):
57 if not hasattr(session, '_object_cache'):
58 session._object_cache= {'new': set(),
59 'deleted': set(),
60 'changed': set()}
61
62 changed = [obj for obj in session.dirty if
63 session.is_modified(obj, include_collections=False, passive=True)]
64
65 session._object_cache['new'].update(session.new)
66 session._object_cache['deleted'].update(session.deleted)
67 session._object_cache['changed'].update(changed)
68
69
70 def before_commit(self, session):
71 session.flush()
72 try:
73 obj_cache = session._object_cache
74 revision = session.revision
75 except AttributeError:
76 return
77 if getattr(session, 'revisioning_disabled', False):
78 return
79 new = obj_cache['new']
80 changed = obj_cache['changed']
81 deleted = obj_cache['deleted']
82 for obj in new | changed | deleted:
83 if not hasattr(obj, '__revision_class__'):
84 continue
85 revision_cls = obj.__revision_class__
86 revision_table = orm.class_mapper(revision_cls).mapped_table
87 ## when a normal active transaction happens
88 if 'pending' not in obj.state:
89 ### this is asql statement as we do not want it in object cache
90 session.execute(
91 revision_table.update().where(
92 and_(revision_table.c.id == obj.id,
93 revision_table.c.current == '1')
94 ).values(current='0')
95 )
96
97 q = session.query(revision_cls)
98 q = q.filter_by(expired_timestamp=datetime.datetime(9999, 12, 31), id=obj.id)
99 results = q.all()
100 for rev_obj in results:
101 values = {}
102 if rev_obj.revision_id == revision.id:
103 values['revision_timestamp'] = revision.timestamp
104 if 'pending' not in obj.state:
105 values['current'] = '1'
106 else:
107 values['expired_id'] = revision.id
108 values['expired_timestamp'] = revision.timestamp
109 session.execute(
110 revision_table.update().where(
111 and_(revision_table.c.id == rev_obj.id,
112 revision_table.c.revision_id == rev_obj.revision_id)
113 ).values(**values)
114 )
115
116 def after_commit(self, session):
117 if hasattr(session, '_object_cache'):
118 del session._object_cache
119
120 def after_rollback(self, session):
121 if hasattr(session, '_object_cache'):
122 del session._object_cache
123
124 # __all__ = ['Session', 'engine', 'metadata', 'mapper']
125
126 # SQLAlchemy database engine. Updated by model.init_model()
127 engine = None
128
129 Session = orm.scoped_session(orm.sessionmaker(
130 autoflush=False,
131 autocommit=False,
132 expire_on_commit=False,
133 extension=[CkanCacheExtension(),
134 CkanSessionExtension(),
135 extension.PluginSessionExtension(),
136 activity.DatasetActivitySessionExtension()],
137 ))
138
139 create_local_session = orm.sessionmaker(
140 autoflush=False,
141 autocommit=False,
142 expire_on_commit=False,
143 extension=[CkanCacheExtension(),
144 CkanSessionExtension(),
145 extension.PluginSessionExtension(),
146 activity.DatasetActivitySessionExtension()],
147 )
148
149 #mapper = Session.mapper
150 mapper = orm.mapper
151
152 # Global metadata. If you have multiple databases with overlapping table
153 # names, you'll need a metadata for each database
154 metadata = MetaData()
155
156
157 def engine_is_sqlite():
158 # Returns true iff the engine is connected to a sqlite database.
159 return engine.url.drivername == 'sqlite'
160
161
162 def engine_is_pg():
163 # Returns true iff the engine is connected to a postgresql database.
164 # According to http://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql
165 # all Postgres driver names start with `postgresql`
166 return engine.url.drivername.startswith('postgresql')
167
```
Path: `ckanext/datastore/plugin.py`
Content:
```
1 import logging
2 import pylons
3
4 import ckan.plugins as p
5 import ckanext.datastore.logic.action as action
6 import ckanext.datastore.logic.auth as auth
7 import ckanext.datastore.db as db
8 import ckan.logic as logic
9 import ckan.model as model
10
11 log = logging.getLogger(__name__)
12 _get_or_bust = logic.get_or_bust
13
14
15 class DatastoreException(Exception):
16 pass
17
18
19 class DatastorePlugin(p.SingletonPlugin):
20 p.implements(p.IConfigurable, inherit=True)
21 p.implements(p.IActions)
22 p.implements(p.IAuthFunctions)
23
24 legacy_mode = False
25
26 def configure(self, config):
27 self.config = config
28 # check for ckan.datastore.write_url and ckan.datastore.read_url
29 if (not 'ckan.datastore.write_url' in config):
30 error_msg = 'ckan.datastore.write_url not found in config'
31 raise DatastoreException(error_msg)
32
33 # Legacy mode means that we have no read url. Consequently sql search is not
34 # available and permissions do not have to be changed. In legacy mode, the
35 # datastore runs on PG prior to 9.0 (for example 8.4).
36 self.legacy_mode = 'ckan.datastore.read_url' not in self.config
37
38 # Check whether we are running one of the paster commands which means
39 # that we should ignore the following tests.
40 import sys
41 if sys.argv[0].split('/')[-1] == 'paster' and 'datastore' in sys.argv[1:]:
42 log.warn('Omitting permission checks because you are '
43 'running paster commands.')
44 return
45
46 self.ckan_url = self.config['sqlalchemy.url']
47 self.write_url = self.config['ckan.datastore.write_url']
48 if self.legacy_mode:
49 self.read_url = self.write_url
50 log.warn('Legacy mode active. '
51 'The sql search will not be available.')
52 else:
53 self.read_url = self.config['ckan.datastore.read_url']
54
55 if not model.engine_is_pg():
56 log.warn('We detected that you do not use a PostgreSQL '
57 'database. The DataStore will NOT work and DataStore '
58 'tests will be skipped.')
59 return
60
61 if self._is_read_only_database():
62 log.warn('We detected that CKAN is running on a read '
63 'only database. Permission checks and the creation '
64 'of _table_metadata are skipped.')
65 else:
66 self._check_urls_and_permissions()
67
68 self._create_alias_table()
69
70 ## Do light wrapping around action function to add datastore_active
71 ## to resource dict. Not using IAction extension as this prevents
72 ## other plugins from having a custom resource_read.
73
74 # Make sure actions are cached
75 resource_show = p.toolkit.get_action('resource_show')
76
77 def new_resource_show(context, data_dict):
78 engine = db._get_engine(
79 context,
80 {'connection_url': self.read_url}
81 )
82 new_data_dict = resource_show(context, data_dict)
83 try:
84 connection = engine.connect()
85 result = connection.execute(
86 'SELECT 1 FROM "_table_metadata" WHERE name = %s AND alias_of IS NULL',
87 new_data_dict['id']
88 ).fetchone()
89 if result:
90 new_data_dict['datastore_active'] = True
91 else:
92 new_data_dict['datastore_active'] = False
93 finally:
94 connection.close()
95 return new_data_dict
96
97 ## Make sure do not run many times if configure is called repeatedly
98 ## as in tests.
99 if not hasattr(resource_show, '_datastore_wrapped'):
100 new_resource_show._datastore_wrapped = True
101 logic._actions['resource_show'] = new_resource_show
102
103 def _log_or_raise(self, message):
104 if self.config.get('debug'):
105 log.critical(message)
106 else:
107 raise DatastoreException(message)
108
109 def _check_urls_and_permissions(self):
110 # Make sure that the right permissions are set
111 # so that no harmful queries can be made
112
113 if self._same_ckan_and_datastore_db():
114 self._log_or_raise('CKAN and DataStore database '
115 'cannot be the same.')
116
117 # in legacy mode, the read and write url are ths same (both write url)
118 # consequently the same url check and and write privilege check
119 # don't make sense
120 if not self.legacy_mode:
121 if self._same_read_and_write_url():
122 self._log_or_raise('The write and read-only database '
123 'connection urls are the same.')
124
125 if not self._read_connection_has_correct_privileges():
126 self._log_or_raise('The read-only user has write privileges.')
127
128 def _is_read_only_database(self):
129 ''' Returns True if no connection has CREATE privileges on the public
130 schema. This is the case if replication is enabled.'''
131 for url in [self.ckan_url, self.write_url, self.read_url]:
132 connection = db._get_engine(None,
133 {'connection_url': url}).connect()
134 sql = u"SELECT has_schema_privilege('public', 'CREATE')"
135 is_writable = connection.execute(sql).first()[0]
136 if is_writable:
137 return False
138 return True
139
140 def _same_ckan_and_datastore_db(self):
141 '''Returns True if the CKAN and DataStore db are the same'''
142 return self._get_db_from_url(self.ckan_url) == self._get_db_from_url(self.read_url)
143
144 def _get_db_from_url(self, url):
145 return url[url.rindex("@"):]
146
147 def _same_read_and_write_url(self):
148 return self.write_url == self.read_url
149
150 def _read_connection_has_correct_privileges(self):
151 ''' Returns True if the right permissions are set for the read only user.
152 A table is created by the write user to test the read only user.
153 '''
154 write_connection = db._get_engine(None,
155 {'connection_url': self.write_url}).connect()
156 read_connection = db._get_engine(None,
157 {'connection_url': self.read_url}).connect()
158
159 drop_foo_sql = u'DROP TABLE IF EXISTS _foo'
160
161 write_connection.execute(drop_foo_sql)
162
163 try:
164 write_connection.execute(u'CREATE TABLE _foo ()')
165 for privilege in ['INSERT', 'UPDATE', 'DELETE']:
166 test_privilege_sql = u"SELECT has_table_privilege('_foo', '{privilege}')"
167 sql = test_privilege_sql.format(privilege=privilege)
168 have_privilege = read_connection.execute(sql).first()[0]
169 if have_privilege:
170 return False
171 finally:
172 write_connection.execute(drop_foo_sql)
173 return True
174
175 def _create_alias_table(self):
176 mapping_sql = '''
177 SELECT DISTINCT
178 substr(md5(dependee.relname || COALESCE(dependent.relname, '')), 0, 17) AS "_id",
179 dependee.relname AS name,
180 dependee.oid AS oid,
181 dependent.relname AS alias_of
182 -- dependent.oid AS oid
183 FROM
184 pg_class AS dependee
185 LEFT OUTER JOIN pg_rewrite AS r ON r.ev_class = dependee.oid
186 LEFT OUTER JOIN pg_depend AS d ON d.objid = r.oid
187 LEFT OUTER JOIN pg_class AS dependent ON d.refobjid = dependent.oid
188 WHERE
189 (dependee.oid != dependent.oid OR dependent.oid IS NULL) AND
190 (dependee.relname IN (SELECT tablename FROM pg_catalog.pg_tables)
191 OR dependee.relname IN (SELECT viewname FROM pg_catalog.pg_views)) AND
192 dependee.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname='public')
193 ORDER BY dependee.oid DESC;
194 '''
195 create_alias_table_sql = u'CREATE OR REPLACE VIEW "_table_metadata" AS {0}'.format(mapping_sql)
196 connection = db._get_engine(None,
197 {'connection_url': pylons.config['ckan.datastore.write_url']}).connect()
198 connection.execute(create_alias_table_sql)
199
200 def get_actions(self):
201 actions = {'datastore_create': action.datastore_create,
202 'datastore_upsert': action.datastore_upsert,
203 'datastore_delete': action.datastore_delete,
204 'datastore_search': action.datastore_search}
205 if not self.legacy_mode:
206 actions['datastore_search_sql'] = action.datastore_search_sql
207 return actions
208
209 def get_auth_functions(self):
210 return {'datastore_create': auth.datastore_create,
211 'datastore_upsert': auth.datastore_upsert,
212 'datastore_delete': auth.datastore_delete,
213 'datastore_search': auth.datastore_search}
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckan/model/meta.py b/ckan/model/meta.py
--- a/ckan/model/meta.py
+++ b/ckan/model/meta.py
@@ -154,13 +154,13 @@
metadata = MetaData()
-def engine_is_sqlite():
+def engine_is_sqlite(sa_engine=None):
# Returns true iff the engine is connected to a sqlite database.
- return engine.url.drivername == 'sqlite'
+ return (sa_engine or engine).url.drivername == 'sqlite'
-def engine_is_pg():
+def engine_is_pg(sa_engine=None):
# Returns true iff the engine is connected to a postgresql database.
# According to http://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql
# all Postgres driver names start with `postgresql`
- return engine.url.drivername.startswith('postgresql')
+ return (sa_engine or engine).url.drivername.startswith('postgresql')
diff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py
--- a/ckanext/datastore/plugin.py
+++ b/ckanext/datastore/plugin.py
@@ -52,7 +52,9 @@
else:
self.read_url = self.config['ckan.datastore.read_url']
- if not model.engine_is_pg():
+ read_engine = db._get_engine(
+ None, {'connection_url': self.read_url})
+ if not model.engine_is_pg(read_engine):
log.warn('We detected that you do not use a PostgreSQL '
'database. The DataStore will NOT work and DataStore '
'tests will be skipped.')
@@ -75,13 +77,9 @@
resource_show = p.toolkit.get_action('resource_show')
def new_resource_show(context, data_dict):
- engine = db._get_engine(
- context,
- {'connection_url': self.read_url}
- )
new_data_dict = resource_show(context, data_dict)
try:
- connection = engine.connect()
+ connection = read_engine.connect()
result = connection.execute(
'SELECT 1 FROM "_table_metadata" WHERE name = %s AND alias_of IS NULL',
new_data_dict['id']
| {"golden_diff": "diff --git a/ckan/model/meta.py b/ckan/model/meta.py\n--- a/ckan/model/meta.py\n+++ b/ckan/model/meta.py\n@@ -154,13 +154,13 @@\n metadata = MetaData()\n \n \n-def engine_is_sqlite():\n+def engine_is_sqlite(sa_engine=None):\n # Returns true iff the engine is connected to a sqlite database.\n- return engine.url.drivername == 'sqlite'\n+ return (sa_engine or engine).url.drivername == 'sqlite'\n \n \n-def engine_is_pg():\n+def engine_is_pg(sa_engine=None):\n # Returns true iff the engine is connected to a postgresql database.\n # According to http://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql\n # all Postgres driver names start with `postgresql`\n- return engine.url.drivername.startswith('postgresql')\n+ return (sa_engine or engine).url.drivername.startswith('postgresql')\ndiff --git a/ckanext/datastore/plugin.py b/ckanext/datastore/plugin.py\n--- a/ckanext/datastore/plugin.py\n+++ b/ckanext/datastore/plugin.py\n@@ -52,7 +52,9 @@\n else:\n self.read_url = self.config['ckan.datastore.read_url']\n \n- if not model.engine_is_pg():\n+ read_engine = db._get_engine(\n+ None, {'connection_url': self.read_url})\n+ if not model.engine_is_pg(read_engine):\n log.warn('We detected that you do not use a PostgreSQL '\n 'database. The DataStore will NOT work and DataStore '\n 'tests will be skipped.')\n@@ -75,13 +77,9 @@\n resource_show = p.toolkit.get_action('resource_show')\n \n def new_resource_show(context, data_dict):\n- engine = db._get_engine(\n- context,\n- {'connection_url': self.read_url}\n- )\n new_data_dict = resource_show(context, data_dict)\n try:\n- connection = engine.connect()\n+ connection = read_engine.connect()\n result = connection.execute(\n 'SELECT 1 FROM \"_table_metadata\" WHERE name = %s AND alias_of IS NULL',\n new_data_dict['id']\n", "issue": "datastore plugin pgsql check on datastore DB, not main DB\nI ran into a confusing situation here, getting \"We detected that you do not use a PostgreSQL database. The DataStore will NOT work and DataStore tests will be skipped.\" I had the datastore as postgres://, so I tried changing to postgresql:// and got the same error. I googled, found PR 359 which says it's looking for 'postgresql', got more confused, looked through the code ...\n\nFinally realized the code is checking the sqlalchemy.url, _not_ the datastore URL. This doesn't seem to be the right behavior or match the error message, so I made a simple patch to apply the same drivername startswith check to the datastore write_url directly, which seems the least-invasive way to make the behavior/error more understandable.\n\n", "before_files": [{"content": "import datetime\n\nfrom paste.deploy.converters import asbool\nfrom pylons import config\n\"\"\"SQLAlchemy Metadata and Session object\"\"\"\nfrom sqlalchemy import MetaData, and_\nimport sqlalchemy.orm as orm\nfrom sqlalchemy.orm.session import SessionExtension\n\nimport extension\nimport ckan.lib.activity_streams_session_extension as activity\n\n__all__ = ['Session', 'engine_is_sqlite', 'engine_is_pg']\n\n\nclass CkanCacheExtension(SessionExtension):\n ''' This extension checks what tables have been affected by\n database access and allows us to act on them. Currently this is\n used by the page cache to flush the cache when data in the database\n is altered. '''\n\n def __init__(self, *args, **kw):\n super(CkanCacheExtension, self).__init__(*args, **kw)\n # Setup Redis support if needed.\n self.use_redis = asbool(config.get('ckan.page_cache_enabled'))\n if self.use_redis:\n import redis\n self.redis = redis\n self.redis_connection is None\n self.redis_exception = redis.exceptions.ConnectionError\n\n def after_commit(self, session):\n if hasattr(session, '_object_cache'):\n oc = session._object_cache\n oc_list = oc['new']\n oc_list.update(oc['changed'])\n oc_list.update(oc['deleted'])\n objs = set()\n for item in oc_list:\n objs.add(item.__class__.__name__)\n\n # Flush Redis\n if self.use_redis:\n if self.redis_connection is None:\n try:\n self.redis_connection = self.redis.StrictRedis()\n except self.redis_exception:\n pass\n try:\n self.redis_connection.flushdb()\n except self.redis_exception:\n pass\n\nclass CkanSessionExtension(SessionExtension):\n\n def before_flush(self, session, flush_context, instances):\n if not hasattr(session, '_object_cache'):\n session._object_cache= {'new': set(),\n 'deleted': set(),\n 'changed': set()}\n\n changed = [obj for obj in session.dirty if \n session.is_modified(obj, include_collections=False, passive=True)]\n\n session._object_cache['new'].update(session.new)\n session._object_cache['deleted'].update(session.deleted)\n session._object_cache['changed'].update(changed)\n\n\n def before_commit(self, session):\n session.flush()\n try:\n obj_cache = session._object_cache\n revision = session.revision\n except AttributeError:\n return\n if getattr(session, 'revisioning_disabled', False):\n return\n new = obj_cache['new']\n changed = obj_cache['changed']\n deleted = obj_cache['deleted']\n for obj in new | changed | deleted:\n if not hasattr(obj, '__revision_class__'):\n continue\n revision_cls = obj.__revision_class__\n revision_table = orm.class_mapper(revision_cls).mapped_table\n ## when a normal active transaction happens\n if 'pending' not in obj.state:\n ### this is asql statement as we do not want it in object cache\n session.execute(\n revision_table.update().where(\n and_(revision_table.c.id == obj.id,\n revision_table.c.current == '1')\n ).values(current='0')\n )\n\n q = session.query(revision_cls)\n q = q.filter_by(expired_timestamp=datetime.datetime(9999, 12, 31), id=obj.id)\n results = q.all()\n for rev_obj in results:\n values = {}\n if rev_obj.revision_id == revision.id:\n values['revision_timestamp'] = revision.timestamp\n if 'pending' not in obj.state:\n values['current'] = '1'\n else:\n values['expired_id'] = revision.id\n values['expired_timestamp'] = revision.timestamp\n session.execute(\n revision_table.update().where(\n and_(revision_table.c.id == rev_obj.id,\n revision_table.c.revision_id == rev_obj.revision_id)\n ).values(**values)\n )\n\n def after_commit(self, session):\n if hasattr(session, '_object_cache'):\n del session._object_cache\n\n def after_rollback(self, session):\n if hasattr(session, '_object_cache'):\n del session._object_cache\n\n# __all__ = ['Session', 'engine', 'metadata', 'mapper']\n\n# SQLAlchemy database engine. Updated by model.init_model()\nengine = None\n\nSession = orm.scoped_session(orm.sessionmaker(\n autoflush=False,\n autocommit=False,\n expire_on_commit=False,\n extension=[CkanCacheExtension(),\n CkanSessionExtension(),\n extension.PluginSessionExtension(),\n activity.DatasetActivitySessionExtension()],\n))\n\ncreate_local_session = orm.sessionmaker(\n autoflush=False,\n autocommit=False,\n expire_on_commit=False,\n extension=[CkanCacheExtension(),\n CkanSessionExtension(),\n extension.PluginSessionExtension(),\n activity.DatasetActivitySessionExtension()],\n)\n\n#mapper = Session.mapper\nmapper = orm.mapper\n\n# Global metadata. If you have multiple databases with overlapping table\n# names, you'll need a metadata for each database\nmetadata = MetaData()\n\n\ndef engine_is_sqlite():\n # Returns true iff the engine is connected to a sqlite database.\n return engine.url.drivername == 'sqlite'\n\n\ndef engine_is_pg():\n # Returns true iff the engine is connected to a postgresql database.\n # According to http://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql\n # all Postgres driver names start with `postgresql`\n return engine.url.drivername.startswith('postgresql')\n", "path": "ckan/model/meta.py"}, {"content": "import logging\nimport pylons\n\nimport ckan.plugins as p\nimport ckanext.datastore.logic.action as action\nimport ckanext.datastore.logic.auth as auth\nimport ckanext.datastore.db as db\nimport ckan.logic as logic\nimport ckan.model as model\n\nlog = logging.getLogger(__name__)\n_get_or_bust = logic.get_or_bust\n\n\nclass DatastoreException(Exception):\n pass\n\n\nclass DatastorePlugin(p.SingletonPlugin):\n p.implements(p.IConfigurable, inherit=True)\n p.implements(p.IActions)\n p.implements(p.IAuthFunctions)\n\n legacy_mode = False\n\n def configure(self, config):\n self.config = config\n # check for ckan.datastore.write_url and ckan.datastore.read_url\n if (not 'ckan.datastore.write_url' in config):\n error_msg = 'ckan.datastore.write_url not found in config'\n raise DatastoreException(error_msg)\n\n # Legacy mode means that we have no read url. Consequently sql search is not\n # available and permissions do not have to be changed. In legacy mode, the\n # datastore runs on PG prior to 9.0 (for example 8.4).\n self.legacy_mode = 'ckan.datastore.read_url' not in self.config\n\n # Check whether we are running one of the paster commands which means\n # that we should ignore the following tests.\n import sys\n if sys.argv[0].split('/')[-1] == 'paster' and 'datastore' in sys.argv[1:]:\n log.warn('Omitting permission checks because you are '\n 'running paster commands.')\n return\n\n self.ckan_url = self.config['sqlalchemy.url']\n self.write_url = self.config['ckan.datastore.write_url']\n if self.legacy_mode:\n self.read_url = self.write_url\n log.warn('Legacy mode active. '\n 'The sql search will not be available.')\n else:\n self.read_url = self.config['ckan.datastore.read_url']\n\n if not model.engine_is_pg():\n log.warn('We detected that you do not use a PostgreSQL '\n 'database. The DataStore will NOT work and DataStore '\n 'tests will be skipped.')\n return\n\n if self._is_read_only_database():\n log.warn('We detected that CKAN is running on a read '\n 'only database. Permission checks and the creation '\n 'of _table_metadata are skipped.')\n else:\n self._check_urls_and_permissions()\n\n self._create_alias_table()\n\n ## Do light wrapping around action function to add datastore_active\n ## to resource dict. Not using IAction extension as this prevents\n ## other plugins from having a custom resource_read.\n\n # Make sure actions are cached\n resource_show = p.toolkit.get_action('resource_show')\n\n def new_resource_show(context, data_dict):\n engine = db._get_engine(\n context,\n {'connection_url': self.read_url}\n )\n new_data_dict = resource_show(context, data_dict)\n try:\n connection = engine.connect()\n result = connection.execute(\n 'SELECT 1 FROM \"_table_metadata\" WHERE name = %s AND alias_of IS NULL',\n new_data_dict['id']\n ).fetchone()\n if result:\n new_data_dict['datastore_active'] = True\n else:\n new_data_dict['datastore_active'] = False\n finally:\n connection.close()\n return new_data_dict\n\n ## Make sure do not run many times if configure is called repeatedly\n ## as in tests.\n if not hasattr(resource_show, '_datastore_wrapped'):\n new_resource_show._datastore_wrapped = True\n logic._actions['resource_show'] = new_resource_show\n\n def _log_or_raise(self, message):\n if self.config.get('debug'):\n log.critical(message)\n else:\n raise DatastoreException(message)\n\n def _check_urls_and_permissions(self):\n # Make sure that the right permissions are set\n # so that no harmful queries can be made\n\n if self._same_ckan_and_datastore_db():\n self._log_or_raise('CKAN and DataStore database '\n 'cannot be the same.')\n\n # in legacy mode, the read and write url are ths same (both write url)\n # consequently the same url check and and write privilege check\n # don't make sense\n if not self.legacy_mode:\n if self._same_read_and_write_url():\n self._log_or_raise('The write and read-only database '\n 'connection urls are the same.')\n\n if not self._read_connection_has_correct_privileges():\n self._log_or_raise('The read-only user has write privileges.')\n\n def _is_read_only_database(self):\n ''' Returns True if no connection has CREATE privileges on the public\n schema. This is the case if replication is enabled.'''\n for url in [self.ckan_url, self.write_url, self.read_url]:\n connection = db._get_engine(None,\n {'connection_url': url}).connect()\n sql = u\"SELECT has_schema_privilege('public', 'CREATE')\"\n is_writable = connection.execute(sql).first()[0]\n if is_writable:\n return False\n return True\n\n def _same_ckan_and_datastore_db(self):\n '''Returns True if the CKAN and DataStore db are the same'''\n return self._get_db_from_url(self.ckan_url) == self._get_db_from_url(self.read_url)\n\n def _get_db_from_url(self, url):\n return url[url.rindex(\"@\"):]\n\n def _same_read_and_write_url(self):\n return self.write_url == self.read_url\n\n def _read_connection_has_correct_privileges(self):\n ''' Returns True if the right permissions are set for the read only user.\n A table is created by the write user to test the read only user.\n '''\n write_connection = db._get_engine(None,\n {'connection_url': self.write_url}).connect()\n read_connection = db._get_engine(None,\n {'connection_url': self.read_url}).connect()\n\n drop_foo_sql = u'DROP TABLE IF EXISTS _foo'\n\n write_connection.execute(drop_foo_sql)\n\n try:\n write_connection.execute(u'CREATE TABLE _foo ()')\n for privilege in ['INSERT', 'UPDATE', 'DELETE']:\n test_privilege_sql = u\"SELECT has_table_privilege('_foo', '{privilege}')\"\n sql = test_privilege_sql.format(privilege=privilege)\n have_privilege = read_connection.execute(sql).first()[0]\n if have_privilege:\n return False\n finally:\n write_connection.execute(drop_foo_sql)\n return True\n\n def _create_alias_table(self):\n mapping_sql = '''\n SELECT DISTINCT\n substr(md5(dependee.relname || COALESCE(dependent.relname, '')), 0, 17) AS \"_id\",\n dependee.relname AS name,\n dependee.oid AS oid,\n dependent.relname AS alias_of\n -- dependent.oid AS oid\n FROM\n pg_class AS dependee\n LEFT OUTER JOIN pg_rewrite AS r ON r.ev_class = dependee.oid\n LEFT OUTER JOIN pg_depend AS d ON d.objid = r.oid\n LEFT OUTER JOIN pg_class AS dependent ON d.refobjid = dependent.oid\n WHERE\n (dependee.oid != dependent.oid OR dependent.oid IS NULL) AND\n (dependee.relname IN (SELECT tablename FROM pg_catalog.pg_tables)\n OR dependee.relname IN (SELECT viewname FROM pg_catalog.pg_views)) AND\n dependee.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname='public')\n ORDER BY dependee.oid DESC;\n '''\n create_alias_table_sql = u'CREATE OR REPLACE VIEW \"_table_metadata\" AS {0}'.format(mapping_sql)\n connection = db._get_engine(None,\n {'connection_url': pylons.config['ckan.datastore.write_url']}).connect()\n connection.execute(create_alias_table_sql)\n\n def get_actions(self):\n actions = {'datastore_create': action.datastore_create,\n 'datastore_upsert': action.datastore_upsert,\n 'datastore_delete': action.datastore_delete,\n 'datastore_search': action.datastore_search}\n if not self.legacy_mode:\n actions['datastore_search_sql'] = action.datastore_search_sql\n return actions\n\n def get_auth_functions(self):\n return {'datastore_create': auth.datastore_create,\n 'datastore_upsert': auth.datastore_upsert,\n 'datastore_delete': auth.datastore_delete,\n 'datastore_search': auth.datastore_search}\n", "path": "ckanext/datastore/plugin.py"}], "after_files": [{"content": "import datetime\n\nfrom paste.deploy.converters import asbool\nfrom pylons import config\n\"\"\"SQLAlchemy Metadata and Session object\"\"\"\nfrom sqlalchemy import MetaData, and_\nimport sqlalchemy.orm as orm\nfrom sqlalchemy.orm.session import SessionExtension\n\nimport extension\nimport ckan.lib.activity_streams_session_extension as activity\n\n__all__ = ['Session', 'engine_is_sqlite', 'engine_is_pg']\n\n\nclass CkanCacheExtension(SessionExtension):\n ''' This extension checks what tables have been affected by\n database access and allows us to act on them. Currently this is\n used by the page cache to flush the cache when data in the database\n is altered. '''\n\n def __init__(self, *args, **kw):\n super(CkanCacheExtension, self).__init__(*args, **kw)\n # Setup Redis support if needed.\n self.use_redis = asbool(config.get('ckan.page_cache_enabled'))\n if self.use_redis:\n import redis\n self.redis = redis\n self.redis_connection is None\n self.redis_exception = redis.exceptions.ConnectionError\n\n def after_commit(self, session):\n if hasattr(session, '_object_cache'):\n oc = session._object_cache\n oc_list = oc['new']\n oc_list.update(oc['changed'])\n oc_list.update(oc['deleted'])\n objs = set()\n for item in oc_list:\n objs.add(item.__class__.__name__)\n\n # Flush Redis\n if self.use_redis:\n if self.redis_connection is None:\n try:\n self.redis_connection = self.redis.StrictRedis()\n except self.redis_exception:\n pass\n try:\n self.redis_connection.flushdb()\n except self.redis_exception:\n pass\n\nclass CkanSessionExtension(SessionExtension):\n\n def before_flush(self, session, flush_context, instances):\n if not hasattr(session, '_object_cache'):\n session._object_cache= {'new': set(),\n 'deleted': set(),\n 'changed': set()}\n\n changed = [obj for obj in session.dirty if \n session.is_modified(obj, include_collections=False, passive=True)]\n\n session._object_cache['new'].update(session.new)\n session._object_cache['deleted'].update(session.deleted)\n session._object_cache['changed'].update(changed)\n\n\n def before_commit(self, session):\n session.flush()\n try:\n obj_cache = session._object_cache\n revision = session.revision\n except AttributeError:\n return\n if getattr(session, 'revisioning_disabled', False):\n return\n new = obj_cache['new']\n changed = obj_cache['changed']\n deleted = obj_cache['deleted']\n for obj in new | changed | deleted:\n if not hasattr(obj, '__revision_class__'):\n continue\n revision_cls = obj.__revision_class__\n revision_table = orm.class_mapper(revision_cls).mapped_table\n ## when a normal active transaction happens\n if 'pending' not in obj.state:\n ### this is asql statement as we do not want it in object cache\n session.execute(\n revision_table.update().where(\n and_(revision_table.c.id == obj.id,\n revision_table.c.current == '1')\n ).values(current='0')\n )\n\n q = session.query(revision_cls)\n q = q.filter_by(expired_timestamp=datetime.datetime(9999, 12, 31), id=obj.id)\n results = q.all()\n for rev_obj in results:\n values = {}\n if rev_obj.revision_id == revision.id:\n values['revision_timestamp'] = revision.timestamp\n if 'pending' not in obj.state:\n values['current'] = '1'\n else:\n values['expired_id'] = revision.id\n values['expired_timestamp'] = revision.timestamp\n session.execute(\n revision_table.update().where(\n and_(revision_table.c.id == rev_obj.id,\n revision_table.c.revision_id == rev_obj.revision_id)\n ).values(**values)\n )\n\n def after_commit(self, session):\n if hasattr(session, '_object_cache'):\n del session._object_cache\n\n def after_rollback(self, session):\n if hasattr(session, '_object_cache'):\n del session._object_cache\n\n# __all__ = ['Session', 'engine', 'metadata', 'mapper']\n\n# SQLAlchemy database engine. Updated by model.init_model()\nengine = None\n\nSession = orm.scoped_session(orm.sessionmaker(\n autoflush=False,\n autocommit=False,\n expire_on_commit=False,\n extension=[CkanCacheExtension(),\n CkanSessionExtension(),\n extension.PluginSessionExtension(),\n activity.DatasetActivitySessionExtension()],\n))\n\ncreate_local_session = orm.sessionmaker(\n autoflush=False,\n autocommit=False,\n expire_on_commit=False,\n extension=[CkanCacheExtension(),\n CkanSessionExtension(),\n extension.PluginSessionExtension(),\n activity.DatasetActivitySessionExtension()],\n)\n\n#mapper = Session.mapper\nmapper = orm.mapper\n\n# Global metadata. If you have multiple databases with overlapping table\n# names, you'll need a metadata for each database\nmetadata = MetaData()\n\n\ndef engine_is_sqlite(sa_engine=None):\n # Returns true iff the engine is connected to a sqlite database.\n return (sa_engine or engine).url.drivername == 'sqlite'\n\n\ndef engine_is_pg(sa_engine=None):\n # Returns true iff the engine is connected to a postgresql database.\n # According to http://docs.sqlalchemy.org/en/latest/core/engines.html#postgresql\n # all Postgres driver names start with `postgresql`\n return (sa_engine or engine).url.drivername.startswith('postgresql')\n", "path": "ckan/model/meta.py"}, {"content": "import logging\nimport pylons\n\nimport ckan.plugins as p\nimport ckanext.datastore.logic.action as action\nimport ckanext.datastore.logic.auth as auth\nimport ckanext.datastore.db as db\nimport ckan.logic as logic\nimport ckan.model as model\n\nlog = logging.getLogger(__name__)\n_get_or_bust = logic.get_or_bust\n\n\nclass DatastoreException(Exception):\n pass\n\n\nclass DatastorePlugin(p.SingletonPlugin):\n p.implements(p.IConfigurable, inherit=True)\n p.implements(p.IActions)\n p.implements(p.IAuthFunctions)\n\n legacy_mode = False\n\n def configure(self, config):\n self.config = config\n # check for ckan.datastore.write_url and ckan.datastore.read_url\n if (not 'ckan.datastore.write_url' in config):\n error_msg = 'ckan.datastore.write_url not found in config'\n raise DatastoreException(error_msg)\n\n # Legacy mode means that we have no read url. Consequently sql search is not\n # available and permissions do not have to be changed. In legacy mode, the\n # datastore runs on PG prior to 9.0 (for example 8.4).\n self.legacy_mode = 'ckan.datastore.read_url' not in self.config\n\n # Check whether we are running one of the paster commands which means\n # that we should ignore the following tests.\n import sys\n if sys.argv[0].split('/')[-1] == 'paster' and 'datastore' in sys.argv[1:]:\n log.warn('Omitting permission checks because you are '\n 'running paster commands.')\n return\n\n self.ckan_url = self.config['sqlalchemy.url']\n self.write_url = self.config['ckan.datastore.write_url']\n if self.legacy_mode:\n self.read_url = self.write_url\n log.warn('Legacy mode active. '\n 'The sql search will not be available.')\n else:\n self.read_url = self.config['ckan.datastore.read_url']\n\n read_engine = db._get_engine(\n None, {'connection_url': self.read_url})\n if not model.engine_is_pg(read_engine):\n log.warn('We detected that you do not use a PostgreSQL '\n 'database. The DataStore will NOT work and DataStore '\n 'tests will be skipped.')\n return\n\n if self._is_read_only_database():\n log.warn('We detected that CKAN is running on a read '\n 'only database. Permission checks and the creation '\n 'of _table_metadata are skipped.')\n else:\n self._check_urls_and_permissions()\n\n self._create_alias_table()\n\n ## Do light wrapping around action function to add datastore_active\n ## to resource dict. Not using IAction extension as this prevents\n ## other plugins from having a custom resource_read.\n\n # Make sure actions are cached\n resource_show = p.toolkit.get_action('resource_show')\n\n def new_resource_show(context, data_dict):\n new_data_dict = resource_show(context, data_dict)\n try:\n connection = read_engine.connect()\n result = connection.execute(\n 'SELECT 1 FROM \"_table_metadata\" WHERE name = %s AND alias_of IS NULL',\n new_data_dict['id']\n ).fetchone()\n if result:\n new_data_dict['datastore_active'] = True\n else:\n new_data_dict['datastore_active'] = False\n finally:\n connection.close()\n return new_data_dict\n\n ## Make sure do not run many times if configure is called repeatedly\n ## as in tests.\n if not hasattr(resource_show, '_datastore_wrapped'):\n new_resource_show._datastore_wrapped = True\n logic._actions['resource_show'] = new_resource_show\n\n def _log_or_raise(self, message):\n if self.config.get('debug'):\n log.critical(message)\n else:\n raise DatastoreException(message)\n\n def _check_urls_and_permissions(self):\n # Make sure that the right permissions are set\n # so that no harmful queries can be made\n\n if self._same_ckan_and_datastore_db():\n self._log_or_raise('CKAN and DataStore database '\n 'cannot be the same.')\n\n # in legacy mode, the read and write url are ths same (both write url)\n # consequently the same url check and and write privilege check\n # don't make sense\n if not self.legacy_mode:\n if self._same_read_and_write_url():\n self._log_or_raise('The write and read-only database '\n 'connection urls are the same.')\n\n if not self._read_connection_has_correct_privileges():\n self._log_or_raise('The read-only user has write privileges.')\n\n def _is_read_only_database(self):\n ''' Returns True if no connection has CREATE privileges on the public\n schema. This is the case if replication is enabled.'''\n for url in [self.ckan_url, self.write_url, self.read_url]:\n connection = db._get_engine(None,\n {'connection_url': url}).connect()\n sql = u\"SELECT has_schema_privilege('public', 'CREATE')\"\n is_writable = connection.execute(sql).first()[0]\n if is_writable:\n return False\n return True\n\n def _same_ckan_and_datastore_db(self):\n '''Returns True if the CKAN and DataStore db are the same'''\n return self._get_db_from_url(self.ckan_url) == self._get_db_from_url(self.read_url)\n\n def _get_db_from_url(self, url):\n return url[url.rindex(\"@\"):]\n\n def _same_read_and_write_url(self):\n return self.write_url == self.read_url\n\n def _read_connection_has_correct_privileges(self):\n ''' Returns True if the right permissions are set for the read only user.\n A table is created by the write user to test the read only user.\n '''\n write_connection = db._get_engine(None,\n {'connection_url': self.write_url}).connect()\n read_connection = db._get_engine(None,\n {'connection_url': self.read_url}).connect()\n\n drop_foo_sql = u'DROP TABLE IF EXISTS _foo'\n\n write_connection.execute(drop_foo_sql)\n\n try:\n write_connection.execute(u'CREATE TABLE _foo ()')\n for privilege in ['INSERT', 'UPDATE', 'DELETE']:\n test_privilege_sql = u\"SELECT has_table_privilege('_foo', '{privilege}')\"\n sql = test_privilege_sql.format(privilege=privilege)\n have_privilege = read_connection.execute(sql).first()[0]\n if have_privilege:\n return False\n finally:\n write_connection.execute(drop_foo_sql)\n return True\n\n def _create_alias_table(self):\n mapping_sql = '''\n SELECT DISTINCT\n substr(md5(dependee.relname || COALESCE(dependent.relname, '')), 0, 17) AS \"_id\",\n dependee.relname AS name,\n dependee.oid AS oid,\n dependent.relname AS alias_of\n -- dependent.oid AS oid\n FROM\n pg_class AS dependee\n LEFT OUTER JOIN pg_rewrite AS r ON r.ev_class = dependee.oid\n LEFT OUTER JOIN pg_depend AS d ON d.objid = r.oid\n LEFT OUTER JOIN pg_class AS dependent ON d.refobjid = dependent.oid\n WHERE\n (dependee.oid != dependent.oid OR dependent.oid IS NULL) AND\n (dependee.relname IN (SELECT tablename FROM pg_catalog.pg_tables)\n OR dependee.relname IN (SELECT viewname FROM pg_catalog.pg_views)) AND\n dependee.relnamespace = (SELECT oid FROM pg_namespace WHERE nspname='public')\n ORDER BY dependee.oid DESC;\n '''\n create_alias_table_sql = u'CREATE OR REPLACE VIEW \"_table_metadata\" AS {0}'.format(mapping_sql)\n connection = db._get_engine(None,\n {'connection_url': pylons.config['ckan.datastore.write_url']}).connect()\n connection.execute(create_alias_table_sql)\n\n def get_actions(self):\n actions = {'datastore_create': action.datastore_create,\n 'datastore_upsert': action.datastore_upsert,\n 'datastore_delete': action.datastore_delete,\n 'datastore_search': action.datastore_search}\n if not self.legacy_mode:\n actions['datastore_search_sql'] = action.datastore_search_sql\n return actions\n\n def get_auth_functions(self):\n return {'datastore_create': auth.datastore_create,\n 'datastore_upsert': auth.datastore_upsert,\n 'datastore_delete': auth.datastore_delete,\n 'datastore_search': auth.datastore_search}\n", "path": "ckanext/datastore/plugin.py"}]} |
gh_patches_debug_5668 | rasdani/github-patches | git_diff | pex-tool__pex-757 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
version 1.6.9 fails with null module __file__ attribute
On my arch linux system, after the pants upgrade to version 1.6.9, a number of pieces of pants functionality that make use of pex started failing with the following error message:
```
Traceback (most recent call last):
File "/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py", line 356, in execute
self.patch_sys(pex_inherit_path)
File "/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py", line 285, in patch_sys
new_sys_path, new_sys_path_importer_cache, new_sys_modules = self.minimum_sys(inherit_path)
File "/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py", line 259, in minimum_sys
sys_modules = cls.minimum_sys_modules(site_libs)
File "/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py", line 179, in minimum_sys_modules
if cls._tainted_path(module_file, site_libs):
File "/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py", line 157, in _tainted_path
paths = frozenset([path, os.path.realpath(path)])
File "/home/gregs/code/pants/build-support/pants_dev_deps.py37.venv/lib/python3.7/posixpath.py", line 394, in realpath
filename = os.fspath(filename)
TypeError: expected str, bytes or os.PathLike object, not NoneType
```
It looks like what's going on here is that https://github.com/pantsbuild/pex/blob/master/pex/pex.py#L178 is iterating over every module and extracting the `__file__` attribute of that mdoule. However, some module on my system seems to have a `None` value for that attribute, which is causing code downstream of that line to fail. It looks like the commit https://github.com/pantsbuild/pex/pull/752/commits/b5f7e41780c8f05a07640dc09fdeed67e1b7a6a6 might be relevant.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/pex.py`
Content:
```
1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import absolute_import, print_function
5
6 import os
7 import sys
8 from distutils import sysconfig
9 from site import USER_SITE
10
11 import pex.third_party.pkg_resources as pkg_resources
12 from pex import third_party
13 from pex.bootstrap import Bootstrap
14 from pex.common import die
15 from pex.environment import PEXEnvironment
16 from pex.executor import Executor
17 from pex.finders import (
18 get_entry_point_from_console_script,
19 get_script_from_distributions,
20 unregister_finders
21 )
22 from pex.interpreter import PythonInterpreter
23 from pex.orderedset import OrderedSet
24 from pex.pex_info import PexInfo
25 from pex.third_party.pkg_resources import EntryPoint, WorkingSet, find_distributions
26 from pex.tracer import TRACER
27 from pex.util import iter_pth_paths, named_temporary_file
28 from pex.variables import ENV
29
30
31 class DevNull(object):
32 def __init__(self):
33 pass
34
35 def write(self, *args, **kw):
36 pass
37
38 def flush(self):
39 pass
40
41
42 class PEX(object): # noqa: T000
43 """PEX, n. A self-contained python environment."""
44
45 class Error(Exception): pass
46 class NotFound(Error): pass
47 class InvalidEntryPoint(Error): pass
48
49 @classmethod
50 def clean_environment(cls):
51 try:
52 del os.environ['MACOSX_DEPLOYMENT_TARGET']
53 except KeyError:
54 pass
55 # Cannot change dictionary size during __iter__
56 filter_keys = [key for key in os.environ if key.startswith('PEX_')]
57 for key in filter_keys:
58 del os.environ[key]
59
60 def __init__(self, pex=sys.argv[0], interpreter=None, env=ENV, verify_entry_point=False):
61 self._pex = pex
62 self._interpreter = interpreter or PythonInterpreter.get()
63 self._pex_info = PexInfo.from_pex(self._pex)
64 self._pex_info_overrides = PexInfo.from_env(env=env)
65 self._vars = env
66 self._envs = []
67 self._working_set = None
68 if verify_entry_point:
69 self._do_entry_point_verification()
70
71 def _activate(self):
72 if not self._working_set:
73 working_set = WorkingSet([])
74
75 # set up the local .pex environment
76 pex_info = self._pex_info.copy()
77 pex_info.update(self._pex_info_overrides)
78 pex_info.merge_pex_path(self._vars.PEX_PATH)
79 self._envs.append(PEXEnvironment(self._pex, pex_info, interpreter=self._interpreter))
80 # N.B. by this point, `pex_info.pex_path` will contain a single pex path
81 # merged from pex_path in `PEX-INFO` and `PEX_PATH` set in the environment.
82 # `PEX_PATH` entries written into `PEX-INFO` take precedence over those set
83 # in the environment.
84 if pex_info.pex_path:
85 # set up other environments as specified in pex_path
86 for pex_path in filter(None, pex_info.pex_path.split(os.pathsep)):
87 pex_info = PexInfo.from_pex(pex_path)
88 pex_info.update(self._pex_info_overrides)
89 self._envs.append(PEXEnvironment(pex_path, pex_info, interpreter=self._interpreter))
90
91 # activate all of them
92 for env in self._envs:
93 for dist in env.activate():
94 working_set.add(dist)
95
96 # Ensure that pkg_resources is not imported until at least every pex environment
97 # (i.e. PEX_PATH) has been merged into the environment
98 PEXEnvironment.declare_namespace_packages(working_set)
99 self._working_set = working_set
100
101 return self._working_set
102
103 @classmethod
104 def _extras_paths(cls):
105 standard_lib = sysconfig.get_python_lib(standard_lib=True)
106
107 try:
108 makefile = sysconfig.parse_makefile(sysconfig.get_makefile_filename())
109 except (AttributeError, IOError):
110 # This is not available by default in PyPy's distutils.sysconfig or it simply is
111 # no longer available on the system (IOError ENOENT)
112 makefile = {}
113
114 extras_paths = filter(None, makefile.get('EXTRASPATH', '').split(':'))
115 for path in extras_paths:
116 yield os.path.join(standard_lib, path)
117
118 # Handle .pth injected paths as extras.
119 sitedirs = cls._get_site_packages()
120 for pth_path in cls._scan_pth_files(sitedirs):
121 TRACER.log('Found .pth file: %s' % pth_path, V=3)
122 for extras_path in iter_pth_paths(pth_path):
123 yield extras_path
124
125 @staticmethod
126 def _scan_pth_files(dir_paths):
127 """Given an iterable of directory paths, yield paths to all .pth files within."""
128 for dir_path in dir_paths:
129 if not os.path.exists(dir_path):
130 continue
131
132 pth_filenames = (f for f in os.listdir(dir_path) if f.endswith('.pth'))
133 for pth_filename in pth_filenames:
134 yield os.path.join(dir_path, pth_filename)
135
136 @staticmethod
137 def _get_site_packages():
138 try:
139 from site import getsitepackages
140 return set(getsitepackages())
141 except ImportError:
142 return set()
143
144 @classmethod
145 def site_libs(cls):
146 site_libs = cls._get_site_packages()
147 site_libs.update([sysconfig.get_python_lib(plat_specific=False),
148 sysconfig.get_python_lib(plat_specific=True)])
149 # On windows getsitepackages() returns the python stdlib too.
150 if sys.prefix in site_libs:
151 site_libs.remove(sys.prefix)
152 real_site_libs = set(os.path.realpath(path) for path in site_libs)
153 return site_libs | real_site_libs
154
155 @classmethod
156 def _tainted_path(cls, path, site_libs):
157 paths = frozenset([path, os.path.realpath(path)])
158 return any(path.startswith(site_lib) for site_lib in site_libs for path in paths)
159
160 @classmethod
161 def minimum_sys_modules(cls, site_libs, modules=None):
162 """Given a set of site-packages paths, return a "clean" sys.modules.
163
164 When importing site, modules within sys.modules have their __path__'s populated with
165 additional paths as defined by *-nspkg.pth in site-packages, or alternately by distribution
166 metadata such as *.dist-info/namespace_packages.txt. This can possibly cause namespace
167 packages to leak into imports despite being scrubbed from sys.path.
168
169 NOTE: This method mutates modules' __path__ attributes in sys.modules, so this is currently an
170 irreversible operation.
171 """
172
173 modules = modules or sys.modules
174 new_modules = {}
175
176 for module_name, module in modules.items():
177 # Tainted modules should be dropped.
178 module_file = getattr(module, '__file__', os.devnull)
179 if cls._tainted_path(module_file, site_libs):
180 TRACER.log('Dropping %s' % (module_name,), V=3)
181 continue
182
183 # Untainted non-packages (builtin modules) need no further special handling and can stay.
184 if not hasattr(module, '__path__'):
185 new_modules[module_name] = module
186 continue
187
188 # Unexpected objects, e.g. PEP 420 namespace packages, should just be dropped.
189 if not isinstance(module.__path__, list):
190 TRACER.log('Dropping %s' % (module_name,), V=3)
191 continue
192
193 # Drop tainted package paths.
194 for k in reversed(range(len(module.__path__))):
195 if cls._tainted_path(module.__path__[k], site_libs):
196 TRACER.log('Scrubbing %s.__path__: %s' % (module_name, module.__path__[k]), V=3)
197 module.__path__.pop(k)
198
199 # The package still contains untainted path elements, so it can stay.
200 if module.__path__:
201 new_modules[module_name] = module
202
203 return new_modules
204
205 @classmethod
206 def minimum_sys_path(cls, site_libs, inherit_path):
207 scrub_paths = OrderedSet()
208 site_distributions = OrderedSet()
209 user_site_distributions = OrderedSet()
210
211 def all_distribution_paths(path):
212 locations = set(dist.location for dist in find_distributions(path))
213 return set([path]) | locations | set(os.path.realpath(path) for path in locations)
214
215 for path_element in sys.path:
216 if cls._tainted_path(path_element, site_libs):
217 TRACER.log('Tainted path element: %s' % path_element)
218 site_distributions.update(all_distribution_paths(path_element))
219 else:
220 TRACER.log('Not a tainted path element: %s' % path_element, V=2)
221
222 user_site_distributions.update(all_distribution_paths(USER_SITE))
223
224 if inherit_path == 'false':
225 scrub_paths = site_distributions | user_site_distributions
226 for path in user_site_distributions:
227 TRACER.log('Scrubbing from user site: %s' % path)
228 for path in site_distributions:
229 TRACER.log('Scrubbing from site-packages: %s' % path)
230
231 scrubbed_sys_path = list(OrderedSet(sys.path) - scrub_paths)
232 scrub_from_importer_cache = filter(
233 lambda key: any(key.startswith(path) for path in scrub_paths),
234 sys.path_importer_cache.keys())
235 scrubbed_importer_cache = dict((key, value) for (key, value) in sys.path_importer_cache.items()
236 if key not in scrub_from_importer_cache)
237
238 for importer_cache_entry in scrub_from_importer_cache:
239 TRACER.log('Scrubbing from path_importer_cache: %s' % importer_cache_entry, V=2)
240
241 return scrubbed_sys_path, scrubbed_importer_cache
242
243 @classmethod
244 def minimum_sys(cls, inherit_path):
245 """Return the minimum sys necessary to run this interpreter, a la python -S.
246
247 :returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a
248 bare python installation.
249 """
250 site_libs = set(cls.site_libs())
251 for site_lib in site_libs:
252 TRACER.log('Found site-library: %s' % site_lib)
253 for extras_path in cls._extras_paths():
254 TRACER.log('Found site extra: %s' % extras_path)
255 site_libs.add(extras_path)
256 site_libs = set(os.path.normpath(path) for path in site_libs)
257
258 sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs, inherit_path)
259 sys_modules = cls.minimum_sys_modules(site_libs)
260
261 return sys_path, sys_path_importer_cache, sys_modules
262
263 @classmethod
264 def patch_pkg_resources(cls, working_set):
265 """Patch pkg_resources given a new working set."""
266 pkg_resources.working_set = working_set
267 pkg_resources.require = working_set.require
268 pkg_resources.iter_entry_points = working_set.iter_entry_points
269 pkg_resources.run_script = pkg_resources.run_main = working_set.run_script
270 pkg_resources.add_activation_listener = working_set.subscribe
271
272 # Thar be dragons -- when this function exits, the interpreter is potentially in a wonky state
273 # since the patches here (minimum_sys_modules for example) actually mutate global state.
274 def patch_sys(self, inherit_path):
275 """Patch sys with all site scrubbed."""
276 def patch_dict(old_value, new_value):
277 old_value.clear()
278 old_value.update(new_value)
279
280 def patch_all(path, path_importer_cache, modules):
281 sys.path[:] = path
282 patch_dict(sys.path_importer_cache, path_importer_cache)
283 patch_dict(sys.modules, modules)
284
285 new_sys_path, new_sys_path_importer_cache, new_sys_modules = self.minimum_sys(inherit_path)
286
287 patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)
288
289 def _wrap_coverage(self, runner, *args):
290 if not self._vars.PEX_COVERAGE and self._vars.PEX_COVERAGE_FILENAME is None:
291 return runner(*args)
292
293 try:
294 import coverage
295 except ImportError:
296 die('Could not bootstrap coverage module, aborting.')
297
298 pex_coverage_filename = self._vars.PEX_COVERAGE_FILENAME
299 if pex_coverage_filename is not None:
300 cov = coverage.coverage(data_file=pex_coverage_filename)
301 else:
302 cov = coverage.coverage(data_suffix=True)
303
304 TRACER.log('Starting coverage.')
305 cov.start()
306
307 try:
308 return runner(*args)
309 finally:
310 TRACER.log('Stopping coverage')
311 cov.stop()
312
313 # TODO(wickman) Post-process coverage to elide $PEX_ROOT and make
314 # the report more useful/less noisy. #89
315 if pex_coverage_filename:
316 cov.save()
317 else:
318 cov.report(show_missing=False, ignore_errors=True, file=sys.stdout)
319
320 def _wrap_profiling(self, runner, *args):
321 if not self._vars.PEX_PROFILE and self._vars.PEX_PROFILE_FILENAME is None:
322 return runner(*args)
323
324 pex_profile_filename = self._vars.PEX_PROFILE_FILENAME
325 pex_profile_sort = self._vars.PEX_PROFILE_SORT
326 try:
327 import cProfile as profile
328 except ImportError:
329 import profile
330
331 profiler = profile.Profile()
332
333 try:
334 return profiler.runcall(runner, *args)
335 finally:
336 if pex_profile_filename is not None:
337 profiler.dump_stats(pex_profile_filename)
338 else:
339 profiler.print_stats(sort=pex_profile_sort)
340
341 def path(self):
342 """Return the path this PEX was built at."""
343 return self._pex
344
345 def execute(self):
346 """Execute the PEX.
347
348 This function makes assumptions that it is the last function called by
349 the interpreter.
350 """
351 teardown_verbosity = self._vars.PEX_TEARDOWN_VERBOSE
352 try:
353 pex_inherit_path = self._vars.PEX_INHERIT_PATH
354 if pex_inherit_path == "false":
355 pex_inherit_path = self._pex_info.inherit_path
356 self.patch_sys(pex_inherit_path)
357 working_set = self._activate()
358 self.patch_pkg_resources(working_set)
359 exit_code = self._wrap_coverage(self._wrap_profiling, self._execute)
360 if exit_code:
361 sys.exit(exit_code)
362 except Exception:
363 # Allow the current sys.excepthook to handle this app exception before we tear things down in
364 # finally, then reraise so that the exit status is reflected correctly.
365 sys.excepthook(*sys.exc_info())
366 raise
367 except SystemExit as se:
368 # Print a SystemExit error message, avoiding a traceback in python3.
369 # This must happen here, as sys.stderr is about to be torn down
370 if not isinstance(se.code, int) and se.code is not None:
371 print(se.code, file=sys.stderr)
372 raise
373 finally:
374 # squash all exceptions on interpreter teardown -- the primary type here are
375 # atexit handlers failing to run because of things such as:
376 # http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit
377 if not teardown_verbosity:
378 sys.stderr.flush()
379 sys.stderr = DevNull()
380 sys.excepthook = lambda *a, **kw: None
381
382 def _execute(self):
383 force_interpreter = self._vars.PEX_INTERPRETER
384
385 self.clean_environment()
386
387 if force_interpreter:
388 TRACER.log('PEX_INTERPRETER specified, dropping into interpreter')
389 return self.execute_interpreter()
390
391 if self._pex_info_overrides.script and self._pex_info_overrides.entry_point:
392 die('Cannot specify both script and entry_point for a PEX!')
393
394 if self._pex_info.script and self._pex_info.entry_point:
395 die('Cannot specify both script and entry_point for a PEX!')
396
397 if self._pex_info_overrides.script:
398 return self.execute_script(self._pex_info_overrides.script)
399 elif self._pex_info_overrides.entry_point:
400 return self.execute_entry(self._pex_info_overrides.entry_point)
401 elif self._pex_info.script:
402 return self.execute_script(self._pex_info.script)
403 elif self._pex_info.entry_point:
404 return self.execute_entry(self._pex_info.entry_point)
405 else:
406 TRACER.log('No entry point specified, dropping into interpreter')
407 return self.execute_interpreter()
408
409 @classmethod
410 def demote_bootstrap(cls):
411 TRACER.log('Bootstrap complete, performing final sys.path modifications...')
412
413 should_log = {level: TRACER.should_log(V=level) for level in range(1, 10)}
414
415 def log(msg, V=1):
416 if should_log.get(V, False):
417 print('pex: {}'.format(msg), file=sys.stderr)
418
419 # Remove the third party resources pex uses and demote pex bootstrap code to the end of
420 # sys.path for the duration of the run to allow conflicting versions supplied by user
421 # dependencies to win during the course of the execution of user code.
422 unregister_finders()
423 third_party.uninstall()
424
425 bootstrap = Bootstrap.locate()
426 log('Demoting code from %s' % bootstrap, V=2)
427 for module in bootstrap.demote():
428 log('un-imported {}'.format(module), V=9)
429
430 import pex
431 log('Re-imported pex from {}'.format(pex.__path__), V=3)
432
433 log('PYTHONPATH contains:')
434 for element in sys.path:
435 log(' %c %s' % (' ' if os.path.exists(element) else '*', element))
436 log(' * - paths that do not exist or will be imported via zipimport')
437
438 def execute_interpreter(self):
439 args = sys.argv[1:]
440 if args:
441 # NB: We take care here to setup sys.argv to match how CPython does it for each case.
442 arg = args[0]
443 if arg == '-c':
444 content = args[1]
445 sys.argv = ['-c'] + args[2:]
446 self.execute_content('-c <cmd>', content, argv0='-c')
447 elif arg == '-m':
448 module = args[1]
449 sys.argv = args[1:]
450 self.execute_module(module)
451 else:
452 try:
453 if arg == '-':
454 content = sys.stdin.read()
455 else:
456 with open(arg) as fp:
457 content = fp.read()
458 except IOError as e:
459 die("Could not open %s in the environment [%s]: %s" % (arg, sys.argv[0], e))
460 sys.argv = args
461 self.execute_content(arg, content)
462 else:
463 self.demote_bootstrap()
464
465 import code
466 code.interact()
467
468 def execute_script(self, script_name):
469 dists = list(self._activate())
470
471 dist, entry_point = get_entry_point_from_console_script(script_name, dists)
472 if entry_point:
473 TRACER.log('Found console_script %r in %r' % (entry_point, dist))
474 sys.exit(self.execute_entry(entry_point))
475
476 dist, script_path, script_content = get_script_from_distributions(script_name, dists)
477 if not dist:
478 raise self.NotFound('Could not find script %r in pex!' % script_name)
479 TRACER.log('Found script %r in %r' % (script_name, dist))
480 return self.execute_content(script_path, script_content, argv0=script_name)
481
482 @classmethod
483 def execute_content(cls, name, content, argv0=None):
484 argv0 = argv0 or name
485 try:
486 ast = compile(content, name, 'exec', flags=0, dont_inherit=1)
487 except SyntaxError:
488 die('Unable to parse %s. PEX script support only supports Python scripts.' % name)
489
490 cls.demote_bootstrap()
491
492 from pex.compatibility import exec_function
493 sys.argv[0] = argv0
494 globals_map = globals().copy()
495 globals_map['__name__'] = '__main__'
496 globals_map['__file__'] = name
497 exec_function(ast, globals_map)
498
499 @classmethod
500 def execute_entry(cls, entry_point):
501 runner = cls.execute_pkg_resources if ':' in entry_point else cls.execute_module
502 return runner(entry_point)
503
504 @classmethod
505 def execute_module(cls, module_name):
506 cls.demote_bootstrap()
507
508 import runpy
509 runpy.run_module(module_name, run_name='__main__')
510
511 @classmethod
512 def execute_pkg_resources(cls, spec):
513 entry = EntryPoint.parse("run = {}".format(spec))
514 cls.demote_bootstrap()
515
516 runner = entry.resolve()
517 return runner()
518
519 def cmdline(self, args=()):
520 """The commandline to run this environment.
521
522 :keyword args: Additional arguments to be passed to the application being invoked by the
523 environment.
524 """
525 cmds = [self._interpreter.binary]
526 cmds.append(self._pex)
527 cmds.extend(args)
528 return cmds
529
530 def run(self, args=(), with_chroot=False, blocking=True, setsid=False, **kwargs):
531 """Run the PythonEnvironment in an interpreter in a subprocess.
532
533 :keyword args: Additional arguments to be passed to the application being invoked by the
534 environment.
535 :keyword with_chroot: Run with cwd set to the environment's working directory.
536 :keyword blocking: If true, return the return code of the subprocess.
537 If false, return the Popen object of the invoked subprocess.
538 :keyword setsid: If true, run the PEX in a separate operating system session.
539
540 Remaining keyword arguments are passed directly to subprocess.Popen.
541 """
542 self.clean_environment()
543
544 cmdline = self.cmdline(args)
545 TRACER.log('PEX.run invoking %s' % ' '.join(cmdline))
546 process = Executor.open_process(cmdline,
547 cwd=self._pex if with_chroot else os.getcwd(),
548 preexec_fn=os.setsid if setsid else None,
549 stdin=kwargs.pop('stdin', None),
550 stdout=kwargs.pop('stdout', None),
551 stderr=kwargs.pop('stderr', None),
552 **kwargs)
553 return process.wait() if blocking else process
554
555 def _do_entry_point_verification(self):
556
557 entry_point = self._pex_info.entry_point
558 ep_split = entry_point.split(':')
559
560 # a.b.c:m ->
561 # ep_module = 'a.b.c'
562 # ep_method = 'm'
563
564 # Only module is specified
565 if len(ep_split) == 1:
566 ep_module = ep_split[0]
567 import_statement = 'import {}'.format(ep_module)
568 elif len(ep_split) == 2:
569 ep_module = ep_split[0]
570 ep_method = ep_split[1]
571 import_statement = 'from {} import {}'.format(ep_module, ep_method)
572 else:
573 raise self.InvalidEntryPoint("Failed to parse: `{}`".format(entry_point))
574
575 with named_temporary_file() as fp:
576 fp.write(import_statement.encode('utf-8'))
577 fp.close()
578 retcode = self.run([fp.name], env={'PEX_INTERPRETER': '1'})
579 if retcode != 0:
580 raise self.InvalidEntryPoint('Invalid entry point: `{}`\n'
581 'Entry point verification failed: `{}`'
582 .format(entry_point, import_statement))
583
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/pex.py b/pex/pex.py
--- a/pex/pex.py
+++ b/pex/pex.py
@@ -175,8 +175,8 @@
for module_name, module in modules.items():
# Tainted modules should be dropped.
- module_file = getattr(module, '__file__', os.devnull)
- if cls._tainted_path(module_file, site_libs):
+ module_file = getattr(module, '__file__', None)
+ if module_file and cls._tainted_path(module_file, site_libs):
TRACER.log('Dropping %s' % (module_name,), V=3)
continue
| {"golden_diff": "diff --git a/pex/pex.py b/pex/pex.py\n--- a/pex/pex.py\n+++ b/pex/pex.py\n@@ -175,8 +175,8 @@\n \n for module_name, module in modules.items():\n # Tainted modules should be dropped.\n- module_file = getattr(module, '__file__', os.devnull)\n- if cls._tainted_path(module_file, site_libs):\n+ module_file = getattr(module, '__file__', None)\n+ if module_file and cls._tainted_path(module_file, site_libs):\n TRACER.log('Dropping %s' % (module_name,), V=3)\n continue\n", "issue": "version 1.6.9 fails with null module __file__ attribute\nOn my arch linux system, after the pants upgrade to version 1.6.9, a number of pieces of pants functionality that make use of pex started failing with the following error message:\r\n\r\n```\r\n Traceback (most recent call last):\r\n File \"/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py\", line 356, in execute\r\n self.patch_sys(pex_inherit_path)\r\n File \"/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py\", line 285, in patch_sys\r\n new_sys_path, new_sys_path_importer_cache, new_sys_modules = self.minimum_sys(inherit_path)\r\n File \"/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py\", line 259, in minimum_sys\r\n sys_modules = cls.minimum_sys_modules(site_libs)\r\n File \"/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py\", line 179, in minimum_sys_modules\r\n if cls._tainted_path(module_file, site_libs):\r\n File \"/home/gregs/code/pants/.pants.d/test/pytest-prep/CPython-3.7.3/7c81bb6ec656c4064b548c392d519f0695716e81/.bootstrap/pex/pex.py\", line 157, in _tainted_path\r\n paths = frozenset([path, os.path.realpath(path)])\r\n File \"/home/gregs/code/pants/build-support/pants_dev_deps.py37.venv/lib/python3.7/posixpath.py\", line 394, in realpath\r\n filename = os.fspath(filename)\r\n TypeError: expected str, bytes or os.PathLike object, not NoneType\r\n```\r\nIt looks like what's going on here is that https://github.com/pantsbuild/pex/blob/master/pex/pex.py#L178 is iterating over every module and extracting the `__file__` attribute of that mdoule. However, some module on my system seems to have a `None` value for that attribute, which is causing code downstream of that line to fail. It looks like the commit https://github.com/pantsbuild/pex/pull/752/commits/b5f7e41780c8f05a07640dc09fdeed67e1b7a6a6 might be relevant.\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport sys\nfrom distutils import sysconfig\nfrom site import USER_SITE\n\nimport pex.third_party.pkg_resources as pkg_resources\nfrom pex import third_party\nfrom pex.bootstrap import Bootstrap\nfrom pex.common import die\nfrom pex.environment import PEXEnvironment\nfrom pex.executor import Executor\nfrom pex.finders import (\n get_entry_point_from_console_script,\n get_script_from_distributions,\n unregister_finders\n)\nfrom pex.interpreter import PythonInterpreter\nfrom pex.orderedset import OrderedSet\nfrom pex.pex_info import PexInfo\nfrom pex.third_party.pkg_resources import EntryPoint, WorkingSet, find_distributions\nfrom pex.tracer import TRACER\nfrom pex.util import iter_pth_paths, named_temporary_file\nfrom pex.variables import ENV\n\n\nclass DevNull(object):\n def __init__(self):\n pass\n\n def write(self, *args, **kw):\n pass\n\n def flush(self):\n pass\n\n\nclass PEX(object): # noqa: T000\n \"\"\"PEX, n. A self-contained python environment.\"\"\"\n\n class Error(Exception): pass\n class NotFound(Error): pass\n class InvalidEntryPoint(Error): pass\n\n @classmethod\n def clean_environment(cls):\n try:\n del os.environ['MACOSX_DEPLOYMENT_TARGET']\n except KeyError:\n pass\n # Cannot change dictionary size during __iter__\n filter_keys = [key for key in os.environ if key.startswith('PEX_')]\n for key in filter_keys:\n del os.environ[key]\n\n def __init__(self, pex=sys.argv[0], interpreter=None, env=ENV, verify_entry_point=False):\n self._pex = pex\n self._interpreter = interpreter or PythonInterpreter.get()\n self._pex_info = PexInfo.from_pex(self._pex)\n self._pex_info_overrides = PexInfo.from_env(env=env)\n self._vars = env\n self._envs = []\n self._working_set = None\n if verify_entry_point:\n self._do_entry_point_verification()\n\n def _activate(self):\n if not self._working_set:\n working_set = WorkingSet([])\n\n # set up the local .pex environment\n pex_info = self._pex_info.copy()\n pex_info.update(self._pex_info_overrides)\n pex_info.merge_pex_path(self._vars.PEX_PATH)\n self._envs.append(PEXEnvironment(self._pex, pex_info, interpreter=self._interpreter))\n # N.B. by this point, `pex_info.pex_path` will contain a single pex path\n # merged from pex_path in `PEX-INFO` and `PEX_PATH` set in the environment.\n # `PEX_PATH` entries written into `PEX-INFO` take precedence over those set\n # in the environment.\n if pex_info.pex_path:\n # set up other environments as specified in pex_path\n for pex_path in filter(None, pex_info.pex_path.split(os.pathsep)):\n pex_info = PexInfo.from_pex(pex_path)\n pex_info.update(self._pex_info_overrides)\n self._envs.append(PEXEnvironment(pex_path, pex_info, interpreter=self._interpreter))\n\n # activate all of them\n for env in self._envs:\n for dist in env.activate():\n working_set.add(dist)\n\n # Ensure that pkg_resources is not imported until at least every pex environment\n # (i.e. PEX_PATH) has been merged into the environment\n PEXEnvironment.declare_namespace_packages(working_set)\n self._working_set = working_set\n\n return self._working_set\n\n @classmethod\n def _extras_paths(cls):\n standard_lib = sysconfig.get_python_lib(standard_lib=True)\n\n try:\n makefile = sysconfig.parse_makefile(sysconfig.get_makefile_filename())\n except (AttributeError, IOError):\n # This is not available by default in PyPy's distutils.sysconfig or it simply is\n # no longer available on the system (IOError ENOENT)\n makefile = {}\n\n extras_paths = filter(None, makefile.get('EXTRASPATH', '').split(':'))\n for path in extras_paths:\n yield os.path.join(standard_lib, path)\n\n # Handle .pth injected paths as extras.\n sitedirs = cls._get_site_packages()\n for pth_path in cls._scan_pth_files(sitedirs):\n TRACER.log('Found .pth file: %s' % pth_path, V=3)\n for extras_path in iter_pth_paths(pth_path):\n yield extras_path\n\n @staticmethod\n def _scan_pth_files(dir_paths):\n \"\"\"Given an iterable of directory paths, yield paths to all .pth files within.\"\"\"\n for dir_path in dir_paths:\n if not os.path.exists(dir_path):\n continue\n\n pth_filenames = (f for f in os.listdir(dir_path) if f.endswith('.pth'))\n for pth_filename in pth_filenames:\n yield os.path.join(dir_path, pth_filename)\n\n @staticmethod\n def _get_site_packages():\n try:\n from site import getsitepackages\n return set(getsitepackages())\n except ImportError:\n return set()\n\n @classmethod\n def site_libs(cls):\n site_libs = cls._get_site_packages()\n site_libs.update([sysconfig.get_python_lib(plat_specific=False),\n sysconfig.get_python_lib(plat_specific=True)])\n # On windows getsitepackages() returns the python stdlib too.\n if sys.prefix in site_libs:\n site_libs.remove(sys.prefix)\n real_site_libs = set(os.path.realpath(path) for path in site_libs)\n return site_libs | real_site_libs\n\n @classmethod\n def _tainted_path(cls, path, site_libs):\n paths = frozenset([path, os.path.realpath(path)])\n return any(path.startswith(site_lib) for site_lib in site_libs for path in paths)\n\n @classmethod\n def minimum_sys_modules(cls, site_libs, modules=None):\n \"\"\"Given a set of site-packages paths, return a \"clean\" sys.modules.\n\n When importing site, modules within sys.modules have their __path__'s populated with\n additional paths as defined by *-nspkg.pth in site-packages, or alternately by distribution\n metadata such as *.dist-info/namespace_packages.txt. This can possibly cause namespace\n packages to leak into imports despite being scrubbed from sys.path.\n\n NOTE: This method mutates modules' __path__ attributes in sys.modules, so this is currently an\n irreversible operation.\n \"\"\"\n\n modules = modules or sys.modules\n new_modules = {}\n\n for module_name, module in modules.items():\n # Tainted modules should be dropped.\n module_file = getattr(module, '__file__', os.devnull)\n if cls._tainted_path(module_file, site_libs):\n TRACER.log('Dropping %s' % (module_name,), V=3)\n continue\n\n # Untainted non-packages (builtin modules) need no further special handling and can stay.\n if not hasattr(module, '__path__'):\n new_modules[module_name] = module\n continue\n\n # Unexpected objects, e.g. PEP 420 namespace packages, should just be dropped.\n if not isinstance(module.__path__, list):\n TRACER.log('Dropping %s' % (module_name,), V=3)\n continue\n\n # Drop tainted package paths.\n for k in reversed(range(len(module.__path__))):\n if cls._tainted_path(module.__path__[k], site_libs):\n TRACER.log('Scrubbing %s.__path__: %s' % (module_name, module.__path__[k]), V=3)\n module.__path__.pop(k)\n\n # The package still contains untainted path elements, so it can stay.\n if module.__path__:\n new_modules[module_name] = module\n\n return new_modules\n\n @classmethod\n def minimum_sys_path(cls, site_libs, inherit_path):\n scrub_paths = OrderedSet()\n site_distributions = OrderedSet()\n user_site_distributions = OrderedSet()\n\n def all_distribution_paths(path):\n locations = set(dist.location for dist in find_distributions(path))\n return set([path]) | locations | set(os.path.realpath(path) for path in locations)\n\n for path_element in sys.path:\n if cls._tainted_path(path_element, site_libs):\n TRACER.log('Tainted path element: %s' % path_element)\n site_distributions.update(all_distribution_paths(path_element))\n else:\n TRACER.log('Not a tainted path element: %s' % path_element, V=2)\n\n user_site_distributions.update(all_distribution_paths(USER_SITE))\n\n if inherit_path == 'false':\n scrub_paths = site_distributions | user_site_distributions\n for path in user_site_distributions:\n TRACER.log('Scrubbing from user site: %s' % path)\n for path in site_distributions:\n TRACER.log('Scrubbing from site-packages: %s' % path)\n\n scrubbed_sys_path = list(OrderedSet(sys.path) - scrub_paths)\n scrub_from_importer_cache = filter(\n lambda key: any(key.startswith(path) for path in scrub_paths),\n sys.path_importer_cache.keys())\n scrubbed_importer_cache = dict((key, value) for (key, value) in sys.path_importer_cache.items()\n if key not in scrub_from_importer_cache)\n\n for importer_cache_entry in scrub_from_importer_cache:\n TRACER.log('Scrubbing from path_importer_cache: %s' % importer_cache_entry, V=2)\n\n return scrubbed_sys_path, scrubbed_importer_cache\n\n @classmethod\n def minimum_sys(cls, inherit_path):\n \"\"\"Return the minimum sys necessary to run this interpreter, a la python -S.\n\n :returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a\n bare python installation.\n \"\"\"\n site_libs = set(cls.site_libs())\n for site_lib in site_libs:\n TRACER.log('Found site-library: %s' % site_lib)\n for extras_path in cls._extras_paths():\n TRACER.log('Found site extra: %s' % extras_path)\n site_libs.add(extras_path)\n site_libs = set(os.path.normpath(path) for path in site_libs)\n\n sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs, inherit_path)\n sys_modules = cls.minimum_sys_modules(site_libs)\n\n return sys_path, sys_path_importer_cache, sys_modules\n\n @classmethod\n def patch_pkg_resources(cls, working_set):\n \"\"\"Patch pkg_resources given a new working set.\"\"\"\n pkg_resources.working_set = working_set\n pkg_resources.require = working_set.require\n pkg_resources.iter_entry_points = working_set.iter_entry_points\n pkg_resources.run_script = pkg_resources.run_main = working_set.run_script\n pkg_resources.add_activation_listener = working_set.subscribe\n\n # Thar be dragons -- when this function exits, the interpreter is potentially in a wonky state\n # since the patches here (minimum_sys_modules for example) actually mutate global state.\n def patch_sys(self, inherit_path):\n \"\"\"Patch sys with all site scrubbed.\"\"\"\n def patch_dict(old_value, new_value):\n old_value.clear()\n old_value.update(new_value)\n\n def patch_all(path, path_importer_cache, modules):\n sys.path[:] = path\n patch_dict(sys.path_importer_cache, path_importer_cache)\n patch_dict(sys.modules, modules)\n\n new_sys_path, new_sys_path_importer_cache, new_sys_modules = self.minimum_sys(inherit_path)\n\n patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)\n\n def _wrap_coverage(self, runner, *args):\n if not self._vars.PEX_COVERAGE and self._vars.PEX_COVERAGE_FILENAME is None:\n return runner(*args)\n\n try:\n import coverage\n except ImportError:\n die('Could not bootstrap coverage module, aborting.')\n\n pex_coverage_filename = self._vars.PEX_COVERAGE_FILENAME\n if pex_coverage_filename is not None:\n cov = coverage.coverage(data_file=pex_coverage_filename)\n else:\n cov = coverage.coverage(data_suffix=True)\n\n TRACER.log('Starting coverage.')\n cov.start()\n\n try:\n return runner(*args)\n finally:\n TRACER.log('Stopping coverage')\n cov.stop()\n\n # TODO(wickman) Post-process coverage to elide $PEX_ROOT and make\n # the report more useful/less noisy. #89\n if pex_coverage_filename:\n cov.save()\n else:\n cov.report(show_missing=False, ignore_errors=True, file=sys.stdout)\n\n def _wrap_profiling(self, runner, *args):\n if not self._vars.PEX_PROFILE and self._vars.PEX_PROFILE_FILENAME is None:\n return runner(*args)\n\n pex_profile_filename = self._vars.PEX_PROFILE_FILENAME\n pex_profile_sort = self._vars.PEX_PROFILE_SORT\n try:\n import cProfile as profile\n except ImportError:\n import profile\n\n profiler = profile.Profile()\n\n try:\n return profiler.runcall(runner, *args)\n finally:\n if pex_profile_filename is not None:\n profiler.dump_stats(pex_profile_filename)\n else:\n profiler.print_stats(sort=pex_profile_sort)\n\n def path(self):\n \"\"\"Return the path this PEX was built at.\"\"\"\n return self._pex\n\n def execute(self):\n \"\"\"Execute the PEX.\n\n This function makes assumptions that it is the last function called by\n the interpreter.\n \"\"\"\n teardown_verbosity = self._vars.PEX_TEARDOWN_VERBOSE\n try:\n pex_inherit_path = self._vars.PEX_INHERIT_PATH\n if pex_inherit_path == \"false\":\n pex_inherit_path = self._pex_info.inherit_path\n self.patch_sys(pex_inherit_path)\n working_set = self._activate()\n self.patch_pkg_resources(working_set)\n exit_code = self._wrap_coverage(self._wrap_profiling, self._execute)\n if exit_code:\n sys.exit(exit_code)\n except Exception:\n # Allow the current sys.excepthook to handle this app exception before we tear things down in\n # finally, then reraise so that the exit status is reflected correctly.\n sys.excepthook(*sys.exc_info())\n raise\n except SystemExit as se:\n # Print a SystemExit error message, avoiding a traceback in python3.\n # This must happen here, as sys.stderr is about to be torn down\n if not isinstance(se.code, int) and se.code is not None:\n print(se.code, file=sys.stderr)\n raise\n finally:\n # squash all exceptions on interpreter teardown -- the primary type here are\n # atexit handlers failing to run because of things such as:\n # http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit\n if not teardown_verbosity:\n sys.stderr.flush()\n sys.stderr = DevNull()\n sys.excepthook = lambda *a, **kw: None\n\n def _execute(self):\n force_interpreter = self._vars.PEX_INTERPRETER\n\n self.clean_environment()\n\n if force_interpreter:\n TRACER.log('PEX_INTERPRETER specified, dropping into interpreter')\n return self.execute_interpreter()\n\n if self._pex_info_overrides.script and self._pex_info_overrides.entry_point:\n die('Cannot specify both script and entry_point for a PEX!')\n\n if self._pex_info.script and self._pex_info.entry_point:\n die('Cannot specify both script and entry_point for a PEX!')\n\n if self._pex_info_overrides.script:\n return self.execute_script(self._pex_info_overrides.script)\n elif self._pex_info_overrides.entry_point:\n return self.execute_entry(self._pex_info_overrides.entry_point)\n elif self._pex_info.script:\n return self.execute_script(self._pex_info.script)\n elif self._pex_info.entry_point:\n return self.execute_entry(self._pex_info.entry_point)\n else:\n TRACER.log('No entry point specified, dropping into interpreter')\n return self.execute_interpreter()\n\n @classmethod\n def demote_bootstrap(cls):\n TRACER.log('Bootstrap complete, performing final sys.path modifications...')\n\n should_log = {level: TRACER.should_log(V=level) for level in range(1, 10)}\n\n def log(msg, V=1):\n if should_log.get(V, False):\n print('pex: {}'.format(msg), file=sys.stderr)\n\n # Remove the third party resources pex uses and demote pex bootstrap code to the end of\n # sys.path for the duration of the run to allow conflicting versions supplied by user\n # dependencies to win during the course of the execution of user code.\n unregister_finders()\n third_party.uninstall()\n\n bootstrap = Bootstrap.locate()\n log('Demoting code from %s' % bootstrap, V=2)\n for module in bootstrap.demote():\n log('un-imported {}'.format(module), V=9)\n\n import pex\n log('Re-imported pex from {}'.format(pex.__path__), V=3)\n\n log('PYTHONPATH contains:')\n for element in sys.path:\n log(' %c %s' % (' ' if os.path.exists(element) else '*', element))\n log(' * - paths that do not exist or will be imported via zipimport')\n\n def execute_interpreter(self):\n args = sys.argv[1:]\n if args:\n # NB: We take care here to setup sys.argv to match how CPython does it for each case.\n arg = args[0]\n if arg == '-c':\n content = args[1]\n sys.argv = ['-c'] + args[2:]\n self.execute_content('-c <cmd>', content, argv0='-c')\n elif arg == '-m':\n module = args[1]\n sys.argv = args[1:]\n self.execute_module(module)\n else:\n try:\n if arg == '-':\n content = sys.stdin.read()\n else:\n with open(arg) as fp:\n content = fp.read()\n except IOError as e:\n die(\"Could not open %s in the environment [%s]: %s\" % (arg, sys.argv[0], e))\n sys.argv = args\n self.execute_content(arg, content)\n else:\n self.demote_bootstrap()\n\n import code\n code.interact()\n\n def execute_script(self, script_name):\n dists = list(self._activate())\n\n dist, entry_point = get_entry_point_from_console_script(script_name, dists)\n if entry_point:\n TRACER.log('Found console_script %r in %r' % (entry_point, dist))\n sys.exit(self.execute_entry(entry_point))\n\n dist, script_path, script_content = get_script_from_distributions(script_name, dists)\n if not dist:\n raise self.NotFound('Could not find script %r in pex!' % script_name)\n TRACER.log('Found script %r in %r' % (script_name, dist))\n return self.execute_content(script_path, script_content, argv0=script_name)\n\n @classmethod\n def execute_content(cls, name, content, argv0=None):\n argv0 = argv0 or name\n try:\n ast = compile(content, name, 'exec', flags=0, dont_inherit=1)\n except SyntaxError:\n die('Unable to parse %s. PEX script support only supports Python scripts.' % name)\n\n cls.demote_bootstrap()\n\n from pex.compatibility import exec_function\n sys.argv[0] = argv0\n globals_map = globals().copy()\n globals_map['__name__'] = '__main__'\n globals_map['__file__'] = name\n exec_function(ast, globals_map)\n\n @classmethod\n def execute_entry(cls, entry_point):\n runner = cls.execute_pkg_resources if ':' in entry_point else cls.execute_module\n return runner(entry_point)\n\n @classmethod\n def execute_module(cls, module_name):\n cls.demote_bootstrap()\n\n import runpy\n runpy.run_module(module_name, run_name='__main__')\n\n @classmethod\n def execute_pkg_resources(cls, spec):\n entry = EntryPoint.parse(\"run = {}\".format(spec))\n cls.demote_bootstrap()\n\n runner = entry.resolve()\n return runner()\n\n def cmdline(self, args=()):\n \"\"\"The commandline to run this environment.\n\n :keyword args: Additional arguments to be passed to the application being invoked by the\n environment.\n \"\"\"\n cmds = [self._interpreter.binary]\n cmds.append(self._pex)\n cmds.extend(args)\n return cmds\n\n def run(self, args=(), with_chroot=False, blocking=True, setsid=False, **kwargs):\n \"\"\"Run the PythonEnvironment in an interpreter in a subprocess.\n\n :keyword args: Additional arguments to be passed to the application being invoked by the\n environment.\n :keyword with_chroot: Run with cwd set to the environment's working directory.\n :keyword blocking: If true, return the return code of the subprocess.\n If false, return the Popen object of the invoked subprocess.\n :keyword setsid: If true, run the PEX in a separate operating system session.\n\n Remaining keyword arguments are passed directly to subprocess.Popen.\n \"\"\"\n self.clean_environment()\n\n cmdline = self.cmdline(args)\n TRACER.log('PEX.run invoking %s' % ' '.join(cmdline))\n process = Executor.open_process(cmdline,\n cwd=self._pex if with_chroot else os.getcwd(),\n preexec_fn=os.setsid if setsid else None,\n stdin=kwargs.pop('stdin', None),\n stdout=kwargs.pop('stdout', None),\n stderr=kwargs.pop('stderr', None),\n **kwargs)\n return process.wait() if blocking else process\n\n def _do_entry_point_verification(self):\n\n entry_point = self._pex_info.entry_point\n ep_split = entry_point.split(':')\n\n # a.b.c:m ->\n # ep_module = 'a.b.c'\n # ep_method = 'm'\n\n # Only module is specified\n if len(ep_split) == 1:\n ep_module = ep_split[0]\n import_statement = 'import {}'.format(ep_module)\n elif len(ep_split) == 2:\n ep_module = ep_split[0]\n ep_method = ep_split[1]\n import_statement = 'from {} import {}'.format(ep_module, ep_method)\n else:\n raise self.InvalidEntryPoint(\"Failed to parse: `{}`\".format(entry_point))\n\n with named_temporary_file() as fp:\n fp.write(import_statement.encode('utf-8'))\n fp.close()\n retcode = self.run([fp.name], env={'PEX_INTERPRETER': '1'})\n if retcode != 0:\n raise self.InvalidEntryPoint('Invalid entry point: `{}`\\n'\n 'Entry point verification failed: `{}`'\n .format(entry_point, import_statement))\n", "path": "pex/pex.py"}], "after_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport sys\nfrom distutils import sysconfig\nfrom site import USER_SITE\n\nimport pex.third_party.pkg_resources as pkg_resources\nfrom pex import third_party\nfrom pex.bootstrap import Bootstrap\nfrom pex.common import die\nfrom pex.environment import PEXEnvironment\nfrom pex.executor import Executor\nfrom pex.finders import (\n get_entry_point_from_console_script,\n get_script_from_distributions,\n unregister_finders\n)\nfrom pex.interpreter import PythonInterpreter\nfrom pex.orderedset import OrderedSet\nfrom pex.pex_info import PexInfo\nfrom pex.third_party.pkg_resources import EntryPoint, WorkingSet, find_distributions\nfrom pex.tracer import TRACER\nfrom pex.util import iter_pth_paths, named_temporary_file\nfrom pex.variables import ENV\n\n\nclass DevNull(object):\n def __init__(self):\n pass\n\n def write(self, *args, **kw):\n pass\n\n def flush(self):\n pass\n\n\nclass PEX(object): # noqa: T000\n \"\"\"PEX, n. A self-contained python environment.\"\"\"\n\n class Error(Exception): pass\n class NotFound(Error): pass\n class InvalidEntryPoint(Error): pass\n\n @classmethod\n def clean_environment(cls):\n try:\n del os.environ['MACOSX_DEPLOYMENT_TARGET']\n except KeyError:\n pass\n # Cannot change dictionary size during __iter__\n filter_keys = [key for key in os.environ if key.startswith('PEX_')]\n for key in filter_keys:\n del os.environ[key]\n\n def __init__(self, pex=sys.argv[0], interpreter=None, env=ENV, verify_entry_point=False):\n self._pex = pex\n self._interpreter = interpreter or PythonInterpreter.get()\n self._pex_info = PexInfo.from_pex(self._pex)\n self._pex_info_overrides = PexInfo.from_env(env=env)\n self._vars = env\n self._envs = []\n self._working_set = None\n if verify_entry_point:\n self._do_entry_point_verification()\n\n def _activate(self):\n if not self._working_set:\n working_set = WorkingSet([])\n\n # set up the local .pex environment\n pex_info = self._pex_info.copy()\n pex_info.update(self._pex_info_overrides)\n pex_info.merge_pex_path(self._vars.PEX_PATH)\n self._envs.append(PEXEnvironment(self._pex, pex_info, interpreter=self._interpreter))\n # N.B. by this point, `pex_info.pex_path` will contain a single pex path\n # merged from pex_path in `PEX-INFO` and `PEX_PATH` set in the environment.\n # `PEX_PATH` entries written into `PEX-INFO` take precedence over those set\n # in the environment.\n if pex_info.pex_path:\n # set up other environments as specified in pex_path\n for pex_path in filter(None, pex_info.pex_path.split(os.pathsep)):\n pex_info = PexInfo.from_pex(pex_path)\n pex_info.update(self._pex_info_overrides)\n self._envs.append(PEXEnvironment(pex_path, pex_info, interpreter=self._interpreter))\n\n # activate all of them\n for env in self._envs:\n for dist in env.activate():\n working_set.add(dist)\n\n # Ensure that pkg_resources is not imported until at least every pex environment\n # (i.e. PEX_PATH) has been merged into the environment\n PEXEnvironment.declare_namespace_packages(working_set)\n self._working_set = working_set\n\n return self._working_set\n\n @classmethod\n def _extras_paths(cls):\n standard_lib = sysconfig.get_python_lib(standard_lib=True)\n\n try:\n makefile = sysconfig.parse_makefile(sysconfig.get_makefile_filename())\n except (AttributeError, IOError):\n # This is not available by default in PyPy's distutils.sysconfig or it simply is\n # no longer available on the system (IOError ENOENT)\n makefile = {}\n\n extras_paths = filter(None, makefile.get('EXTRASPATH', '').split(':'))\n for path in extras_paths:\n yield os.path.join(standard_lib, path)\n\n # Handle .pth injected paths as extras.\n sitedirs = cls._get_site_packages()\n for pth_path in cls._scan_pth_files(sitedirs):\n TRACER.log('Found .pth file: %s' % pth_path, V=3)\n for extras_path in iter_pth_paths(pth_path):\n yield extras_path\n\n @staticmethod\n def _scan_pth_files(dir_paths):\n \"\"\"Given an iterable of directory paths, yield paths to all .pth files within.\"\"\"\n for dir_path in dir_paths:\n if not os.path.exists(dir_path):\n continue\n\n pth_filenames = (f for f in os.listdir(dir_path) if f.endswith('.pth'))\n for pth_filename in pth_filenames:\n yield os.path.join(dir_path, pth_filename)\n\n @staticmethod\n def _get_site_packages():\n try:\n from site import getsitepackages\n return set(getsitepackages())\n except ImportError:\n return set()\n\n @classmethod\n def site_libs(cls):\n site_libs = cls._get_site_packages()\n site_libs.update([sysconfig.get_python_lib(plat_specific=False),\n sysconfig.get_python_lib(plat_specific=True)])\n # On windows getsitepackages() returns the python stdlib too.\n if sys.prefix in site_libs:\n site_libs.remove(sys.prefix)\n real_site_libs = set(os.path.realpath(path) for path in site_libs)\n return site_libs | real_site_libs\n\n @classmethod\n def _tainted_path(cls, path, site_libs):\n paths = frozenset([path, os.path.realpath(path)])\n return any(path.startswith(site_lib) for site_lib in site_libs for path in paths)\n\n @classmethod\n def minimum_sys_modules(cls, site_libs, modules=None):\n \"\"\"Given a set of site-packages paths, return a \"clean\" sys.modules.\n\n When importing site, modules within sys.modules have their __path__'s populated with\n additional paths as defined by *-nspkg.pth in site-packages, or alternately by distribution\n metadata such as *.dist-info/namespace_packages.txt. This can possibly cause namespace\n packages to leak into imports despite being scrubbed from sys.path.\n\n NOTE: This method mutates modules' __path__ attributes in sys.modules, so this is currently an\n irreversible operation.\n \"\"\"\n\n modules = modules or sys.modules\n new_modules = {}\n\n for module_name, module in modules.items():\n # Tainted modules should be dropped.\n module_file = getattr(module, '__file__', None)\n if module_file and cls._tainted_path(module_file, site_libs):\n TRACER.log('Dropping %s' % (module_name,), V=3)\n continue\n\n # Untainted non-packages (builtin modules) need no further special handling and can stay.\n if not hasattr(module, '__path__'):\n new_modules[module_name] = module\n continue\n\n # Unexpected objects, e.g. PEP 420 namespace packages, should just be dropped.\n if not isinstance(module.__path__, list):\n TRACER.log('Dropping %s' % (module_name,), V=3)\n continue\n\n # Drop tainted package paths.\n for k in reversed(range(len(module.__path__))):\n if cls._tainted_path(module.__path__[k], site_libs):\n TRACER.log('Scrubbing %s.__path__: %s' % (module_name, module.__path__[k]), V=3)\n module.__path__.pop(k)\n\n # The package still contains untainted path elements, so it can stay.\n if module.__path__:\n new_modules[module_name] = module\n\n return new_modules\n\n @classmethod\n def minimum_sys_path(cls, site_libs, inherit_path):\n scrub_paths = OrderedSet()\n site_distributions = OrderedSet()\n user_site_distributions = OrderedSet()\n\n def all_distribution_paths(path):\n locations = set(dist.location for dist in find_distributions(path))\n return set([path]) | locations | set(os.path.realpath(path) for path in locations)\n\n for path_element in sys.path:\n if cls._tainted_path(path_element, site_libs):\n TRACER.log('Tainted path element: %s' % path_element)\n site_distributions.update(all_distribution_paths(path_element))\n else:\n TRACER.log('Not a tainted path element: %s' % path_element, V=2)\n\n user_site_distributions.update(all_distribution_paths(USER_SITE))\n\n if inherit_path == 'false':\n scrub_paths = site_distributions | user_site_distributions\n for path in user_site_distributions:\n TRACER.log('Scrubbing from user site: %s' % path)\n for path in site_distributions:\n TRACER.log('Scrubbing from site-packages: %s' % path)\n\n scrubbed_sys_path = list(OrderedSet(sys.path) - scrub_paths)\n scrub_from_importer_cache = filter(\n lambda key: any(key.startswith(path) for path in scrub_paths),\n sys.path_importer_cache.keys())\n scrubbed_importer_cache = dict((key, value) for (key, value) in sys.path_importer_cache.items()\n if key not in scrub_from_importer_cache)\n\n for importer_cache_entry in scrub_from_importer_cache:\n TRACER.log('Scrubbing from path_importer_cache: %s' % importer_cache_entry, V=2)\n\n return scrubbed_sys_path, scrubbed_importer_cache\n\n @classmethod\n def minimum_sys(cls, inherit_path):\n \"\"\"Return the minimum sys necessary to run this interpreter, a la python -S.\n\n :returns: (sys.path, sys.path_importer_cache, sys.modules) tuple of a\n bare python installation.\n \"\"\"\n site_libs = set(cls.site_libs())\n for site_lib in site_libs:\n TRACER.log('Found site-library: %s' % site_lib)\n for extras_path in cls._extras_paths():\n TRACER.log('Found site extra: %s' % extras_path)\n site_libs.add(extras_path)\n site_libs = set(os.path.normpath(path) for path in site_libs)\n\n sys_path, sys_path_importer_cache = cls.minimum_sys_path(site_libs, inherit_path)\n sys_modules = cls.minimum_sys_modules(site_libs)\n\n return sys_path, sys_path_importer_cache, sys_modules\n\n @classmethod\n def patch_pkg_resources(cls, working_set):\n \"\"\"Patch pkg_resources given a new working set.\"\"\"\n pkg_resources.working_set = working_set\n pkg_resources.require = working_set.require\n pkg_resources.iter_entry_points = working_set.iter_entry_points\n pkg_resources.run_script = pkg_resources.run_main = working_set.run_script\n pkg_resources.add_activation_listener = working_set.subscribe\n\n # Thar be dragons -- when this function exits, the interpreter is potentially in a wonky state\n # since the patches here (minimum_sys_modules for example) actually mutate global state.\n def patch_sys(self, inherit_path):\n \"\"\"Patch sys with all site scrubbed.\"\"\"\n def patch_dict(old_value, new_value):\n old_value.clear()\n old_value.update(new_value)\n\n def patch_all(path, path_importer_cache, modules):\n sys.path[:] = path\n patch_dict(sys.path_importer_cache, path_importer_cache)\n patch_dict(sys.modules, modules)\n\n new_sys_path, new_sys_path_importer_cache, new_sys_modules = self.minimum_sys(inherit_path)\n\n patch_all(new_sys_path, new_sys_path_importer_cache, new_sys_modules)\n\n def _wrap_coverage(self, runner, *args):\n if not self._vars.PEX_COVERAGE and self._vars.PEX_COVERAGE_FILENAME is None:\n return runner(*args)\n\n try:\n import coverage\n except ImportError:\n die('Could not bootstrap coverage module, aborting.')\n\n pex_coverage_filename = self._vars.PEX_COVERAGE_FILENAME\n if pex_coverage_filename is not None:\n cov = coverage.coverage(data_file=pex_coverage_filename)\n else:\n cov = coverage.coverage(data_suffix=True)\n\n TRACER.log('Starting coverage.')\n cov.start()\n\n try:\n return runner(*args)\n finally:\n TRACER.log('Stopping coverage')\n cov.stop()\n\n # TODO(wickman) Post-process coverage to elide $PEX_ROOT and make\n # the report more useful/less noisy. #89\n if pex_coverage_filename:\n cov.save()\n else:\n cov.report(show_missing=False, ignore_errors=True, file=sys.stdout)\n\n def _wrap_profiling(self, runner, *args):\n if not self._vars.PEX_PROFILE and self._vars.PEX_PROFILE_FILENAME is None:\n return runner(*args)\n\n pex_profile_filename = self._vars.PEX_PROFILE_FILENAME\n pex_profile_sort = self._vars.PEX_PROFILE_SORT\n try:\n import cProfile as profile\n except ImportError:\n import profile\n\n profiler = profile.Profile()\n\n try:\n return profiler.runcall(runner, *args)\n finally:\n if pex_profile_filename is not None:\n profiler.dump_stats(pex_profile_filename)\n else:\n profiler.print_stats(sort=pex_profile_sort)\n\n def path(self):\n \"\"\"Return the path this PEX was built at.\"\"\"\n return self._pex\n\n def execute(self):\n \"\"\"Execute the PEX.\n\n This function makes assumptions that it is the last function called by\n the interpreter.\n \"\"\"\n teardown_verbosity = self._vars.PEX_TEARDOWN_VERBOSE\n try:\n pex_inherit_path = self._vars.PEX_INHERIT_PATH\n if pex_inherit_path == \"false\":\n pex_inherit_path = self._pex_info.inherit_path\n self.patch_sys(pex_inherit_path)\n working_set = self._activate()\n self.patch_pkg_resources(working_set)\n exit_code = self._wrap_coverage(self._wrap_profiling, self._execute)\n if exit_code:\n sys.exit(exit_code)\n except Exception:\n # Allow the current sys.excepthook to handle this app exception before we tear things down in\n # finally, then reraise so that the exit status is reflected correctly.\n sys.excepthook(*sys.exc_info())\n raise\n except SystemExit as se:\n # Print a SystemExit error message, avoiding a traceback in python3.\n # This must happen here, as sys.stderr is about to be torn down\n if not isinstance(se.code, int) and se.code is not None:\n print(se.code, file=sys.stderr)\n raise\n finally:\n # squash all exceptions on interpreter teardown -- the primary type here are\n # atexit handlers failing to run because of things such as:\n # http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit\n if not teardown_verbosity:\n sys.stderr.flush()\n sys.stderr = DevNull()\n sys.excepthook = lambda *a, **kw: None\n\n def _execute(self):\n force_interpreter = self._vars.PEX_INTERPRETER\n\n self.clean_environment()\n\n if force_interpreter:\n TRACER.log('PEX_INTERPRETER specified, dropping into interpreter')\n return self.execute_interpreter()\n\n if self._pex_info_overrides.script and self._pex_info_overrides.entry_point:\n die('Cannot specify both script and entry_point for a PEX!')\n\n if self._pex_info.script and self._pex_info.entry_point:\n die('Cannot specify both script and entry_point for a PEX!')\n\n if self._pex_info_overrides.script:\n return self.execute_script(self._pex_info_overrides.script)\n elif self._pex_info_overrides.entry_point:\n return self.execute_entry(self._pex_info_overrides.entry_point)\n elif self._pex_info.script:\n return self.execute_script(self._pex_info.script)\n elif self._pex_info.entry_point:\n return self.execute_entry(self._pex_info.entry_point)\n else:\n TRACER.log('No entry point specified, dropping into interpreter')\n return self.execute_interpreter()\n\n @classmethod\n def demote_bootstrap(cls):\n TRACER.log('Bootstrap complete, performing final sys.path modifications...')\n\n should_log = {level: TRACER.should_log(V=level) for level in range(1, 10)}\n\n def log(msg, V=1):\n if should_log.get(V, False):\n print('pex: {}'.format(msg), file=sys.stderr)\n\n # Remove the third party resources pex uses and demote pex bootstrap code to the end of\n # sys.path for the duration of the run to allow conflicting versions supplied by user\n # dependencies to win during the course of the execution of user code.\n unregister_finders()\n third_party.uninstall()\n\n bootstrap = Bootstrap.locate()\n log('Demoting code from %s' % bootstrap, V=2)\n for module in bootstrap.demote():\n log('un-imported {}'.format(module), V=9)\n\n import pex\n log('Re-imported pex from {}'.format(pex.__path__), V=3)\n\n log('PYTHONPATH contains:')\n for element in sys.path:\n log(' %c %s' % (' ' if os.path.exists(element) else '*', element))\n log(' * - paths that do not exist or will be imported via zipimport')\n\n def execute_interpreter(self):\n args = sys.argv[1:]\n if args:\n # NB: We take care here to setup sys.argv to match how CPython does it for each case.\n arg = args[0]\n if arg == '-c':\n content = args[1]\n sys.argv = ['-c'] + args[2:]\n self.execute_content('-c <cmd>', content, argv0='-c')\n elif arg == '-m':\n module = args[1]\n sys.argv = args[1:]\n self.execute_module(module)\n else:\n try:\n if arg == '-':\n content = sys.stdin.read()\n else:\n with open(arg) as fp:\n content = fp.read()\n except IOError as e:\n die(\"Could not open %s in the environment [%s]: %s\" % (arg, sys.argv[0], e))\n sys.argv = args\n self.execute_content(arg, content)\n else:\n self.demote_bootstrap()\n\n import code\n code.interact()\n\n def execute_script(self, script_name):\n dists = list(self._activate())\n\n dist, entry_point = get_entry_point_from_console_script(script_name, dists)\n if entry_point:\n TRACER.log('Found console_script %r in %r' % (entry_point, dist))\n sys.exit(self.execute_entry(entry_point))\n\n dist, script_path, script_content = get_script_from_distributions(script_name, dists)\n if not dist:\n raise self.NotFound('Could not find script %r in pex!' % script_name)\n TRACER.log('Found script %r in %r' % (script_name, dist))\n return self.execute_content(script_path, script_content, argv0=script_name)\n\n @classmethod\n def execute_content(cls, name, content, argv0=None):\n argv0 = argv0 or name\n try:\n ast = compile(content, name, 'exec', flags=0, dont_inherit=1)\n except SyntaxError:\n die('Unable to parse %s. PEX script support only supports Python scripts.' % name)\n\n cls.demote_bootstrap()\n\n from pex.compatibility import exec_function\n sys.argv[0] = argv0\n globals_map = globals().copy()\n globals_map['__name__'] = '__main__'\n globals_map['__file__'] = name\n exec_function(ast, globals_map)\n\n @classmethod\n def execute_entry(cls, entry_point):\n runner = cls.execute_pkg_resources if ':' in entry_point else cls.execute_module\n return runner(entry_point)\n\n @classmethod\n def execute_module(cls, module_name):\n cls.demote_bootstrap()\n\n import runpy\n runpy.run_module(module_name, run_name='__main__')\n\n @classmethod\n def execute_pkg_resources(cls, spec):\n entry = EntryPoint.parse(\"run = {}\".format(spec))\n cls.demote_bootstrap()\n\n runner = entry.resolve()\n return runner()\n\n def cmdline(self, args=()):\n \"\"\"The commandline to run this environment.\n\n :keyword args: Additional arguments to be passed to the application being invoked by the\n environment.\n \"\"\"\n cmds = [self._interpreter.binary]\n cmds.append(self._pex)\n cmds.extend(args)\n return cmds\n\n def run(self, args=(), with_chroot=False, blocking=True, setsid=False, **kwargs):\n \"\"\"Run the PythonEnvironment in an interpreter in a subprocess.\n\n :keyword args: Additional arguments to be passed to the application being invoked by the\n environment.\n :keyword with_chroot: Run with cwd set to the environment's working directory.\n :keyword blocking: If true, return the return code of the subprocess.\n If false, return the Popen object of the invoked subprocess.\n :keyword setsid: If true, run the PEX in a separate operating system session.\n\n Remaining keyword arguments are passed directly to subprocess.Popen.\n \"\"\"\n self.clean_environment()\n\n cmdline = self.cmdline(args)\n TRACER.log('PEX.run invoking %s' % ' '.join(cmdline))\n process = Executor.open_process(cmdline,\n cwd=self._pex if with_chroot else os.getcwd(),\n preexec_fn=os.setsid if setsid else None,\n stdin=kwargs.pop('stdin', None),\n stdout=kwargs.pop('stdout', None),\n stderr=kwargs.pop('stderr', None),\n **kwargs)\n return process.wait() if blocking else process\n\n def _do_entry_point_verification(self):\n\n entry_point = self._pex_info.entry_point\n ep_split = entry_point.split(':')\n\n # a.b.c:m ->\n # ep_module = 'a.b.c'\n # ep_method = 'm'\n\n # Only module is specified\n if len(ep_split) == 1:\n ep_module = ep_split[0]\n import_statement = 'import {}'.format(ep_module)\n elif len(ep_split) == 2:\n ep_module = ep_split[0]\n ep_method = ep_split[1]\n import_statement = 'from {} import {}'.format(ep_module, ep_method)\n else:\n raise self.InvalidEntryPoint(\"Failed to parse: `{}`\".format(entry_point))\n\n with named_temporary_file() as fp:\n fp.write(import_statement.encode('utf-8'))\n fp.close()\n retcode = self.run([fp.name], env={'PEX_INTERPRETER': '1'})\n if retcode != 0:\n raise self.InvalidEntryPoint('Invalid entry point: `{}`\\n'\n 'Entry point verification failed: `{}`'\n .format(entry_point, import_statement))\n", "path": "pex/pex.py"}]} |
gh_patches_debug_22113 | rasdani/github-patches | git_diff | rlworkgroup__garage-1879 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TF 2.3.0 incompatibility
TF 2.3.0 was released yesterday, and seems to be incompatible with TFP <0.11.0 and breaks imports (https://travis-ci.com/github/rlworkgroup/garage/jobs/365922927#L3061). We pin TFP to <=0.10.0 in the first place to resolve cloudpickle version mismatch (https://github.com/rlworkgroup/garage/issues/1758). Since TFP 0.11.0 still pins cloudpickle to 1.3.0 while gym needs 1.2.x, unpinning TFP won't work. So for now, TF needs to be pinned to <2.3.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """setuptools based setup module."""
2 import os
3
4 from setuptools import find_packages, setup
5
6 GARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'
7 GYM_VERSION = '0.15.4'
8
9 # Required dependencies
10 REQUIRED = [
11 # Please keep alphabetized
12 'akro',
13 'click>=2.0',
14 'cloudpickle<1.5',
15 'cma==2.7.0',
16 'dowel==0.0.3',
17 f'gym[atari,box2d,classic_control]=={GYM_VERSION}',
18 'numpy>=1.14.5',
19 'psutil',
20 # Pyglet 1.4.0 introduces some api change which breaks some
21 # gym environments
22 # See: https://github.com/openai/gym/issues/1588
23 'pyglet<1.4.0,>=1.3.0',
24 'python-dateutil',
25 'ray',
26 'scikit-image',
27 'scipy',
28 'setproctitle>=1.0',
29 'tensorflow>=1.14,<2.3.0',
30 'tensorflow-probability<=0.10.0',
31 'torch>=1.0.0,!=1.5.0,<1.6.0',
32 'torchvision>=0.2.1,<0.7.0',
33 ]
34
35 # Dependencies for optional features
36 EXTRAS = {}
37
38 EXTRAS['mujoco'] = [
39 'mujoco-py<2.1,>=2.0',
40 f'gym[all]=={GYM_VERSION}',
41 ]
42
43 EXTRAS['dm_control'] = [
44 # dm_control throws an error during install about not being able to
45 # find a build dependency (absl-py). Later pip executes the `install`
46 # command again and the install succeeds because absl-py has been
47 # installed. This is stupid, but harmless.
48 'dm_control',
49 ]
50
51 EXTRAS['bullet'] = ['mpi4py', 'pybullet']
52
53 EXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))
54
55 # Development dependencies (*not* included in 'all')
56 EXTRAS['dev'] = [
57 # Please keep alphabetized
58 'flake8',
59 'flake8-docstrings>=1.5.0',
60 'flake8-import-order',
61 f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501
62 'isort>=4.3.21,<5.0.0',
63 'pep8-naming==0.7.0',
64 'pre-commit',
65 'pycodestyle>=2.5.0',
66 'pydocstyle>=4.0.0',
67 'pylint>=2.5.3',
68 'pytest>=4.5.0', # Required for strict-markers
69 'pytest-cov',
70 'pytest-timeout',
71 'pytest-xdist',
72 'recommonmark',
73 'sphinx',
74 'sphinx-autoapi>=1.4.0',
75 'sphinx_rtd_theme',
76 'sphinxcontrib-bibtex',
77 'yapf==0.30.0',
78 ] # yapf: disable
79
80 with open('README.md') as f:
81 README = f.read()
82
83 # Get the package version dynamically
84 with open('VERSION') as v:
85 VERSION = v.read().strip()
86
87 setup(
88 name='garage',
89 version=VERSION,
90 author='Reinforcement Learning Working Group',
91 description='A toolkit for reproducible reinforcement learning research',
92 url='https://github.com/rlworkgroup/garage',
93 packages=find_packages(where='src'),
94 package_dir={'': 'src'},
95 scripts=['scripts/garage'],
96 python_requires='>=3.6',
97 install_requires=REQUIRED,
98 extras_require=EXTRAS,
99 license='MIT',
100 long_description=README,
101 long_description_content_type='text/markdown',
102 classifiers=[
103 'Development Status :: 4 - Beta',
104 'Intended Audience :: Developers',
105 'Intended Audience :: Education',
106 'Intended Audience :: Science/Research',
107 'License :: OSI Approved :: MIT License',
108 'Programming Language :: Python :: 3.6',
109 'Programming Language :: Python :: 3.7',
110 'Programming Language :: Python :: 3 :: Only',
111 'Topic :: Scientific/Engineering :: Artificial Intelligence',
112 'Topic :: Scientific/Engineering :: Mathematics',
113 'Topic :: Software Development :: Libraries',
114 ],
115 )
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,30 +4,26 @@
from setuptools import find_packages, setup
GARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'
-GYM_VERSION = '0.15.4'
+GYM_VERSION = '0.17.2'
# Required dependencies
REQUIRED = [
# Please keep alphabetized
'akro',
'click>=2.0',
- 'cloudpickle<1.5',
+ 'cloudpickle==1.3',
'cma==2.7.0',
'dowel==0.0.3',
f'gym[atari,box2d,classic_control]=={GYM_VERSION}',
'numpy>=1.14.5',
'psutil',
- # Pyglet 1.4.0 introduces some api change which breaks some
- # gym environments
- # See: https://github.com/openai/gym/issues/1588
- 'pyglet<1.4.0,>=1.3.0',
'python-dateutil',
'ray',
'scikit-image',
'scipy',
'setproctitle>=1.0',
- 'tensorflow>=1.14,<2.3.0',
- 'tensorflow-probability<=0.10.0',
+ 'tensorflow>=1.14',
+ 'tensorflow-probability>=0.11.0',
'torch>=1.0.0,!=1.5.0,<1.6.0',
'torchvision>=0.2.1,<0.7.0',
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,30 +4,26 @@\n from setuptools import find_packages, setup\n \n GARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\n-GYM_VERSION = '0.15.4'\n+GYM_VERSION = '0.17.2'\n \n # Required dependencies\n REQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n- 'cloudpickle<1.5',\n+ 'cloudpickle==1.3',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n- # Pyglet 1.4.0 introduces some api change which breaks some\n- # gym environments\n- # See: https://github.com/openai/gym/issues/1588\n- 'pyglet<1.4.0,>=1.3.0',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n- 'tensorflow>=1.14,<2.3.0',\n- 'tensorflow-probability<=0.10.0',\n+ 'tensorflow>=1.14',\n+ 'tensorflow-probability>=0.11.0',\n 'torch>=1.0.0,!=1.5.0,<1.6.0',\n 'torchvision>=0.2.1,<0.7.0',\n ]\n", "issue": "TF 2.3.0 incompatibility\nTF 2.3.0 was released yesterday, and seems to be incompatible with TFP <0.11.0 and breaks imports (https://travis-ci.com/github/rlworkgroup/garage/jobs/365922927#L3061). We pin TFP to <=0.10.0 in the first place to resolve cloudpickle version mismatch (https://github.com/rlworkgroup/garage/issues/1758). Since TFP 0.11.0 still pins cloudpickle to 1.3.0 while gym needs 1.2.x, unpinning TFP won't work. So for now, TF needs to be pinned to <2.3.0\n", "before_files": [{"content": "\"\"\"setuptools based setup module.\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\nGARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\nGYM_VERSION = '0.15.4'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n 'cloudpickle<1.5',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n # Pyglet 1.4.0 introduces some api change which breaks some\n # gym environments\n # See: https://github.com/openai/gym/issues/1588\n 'pyglet<1.4.0,>=1.3.0',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n 'tensorflow>=1.14,<2.3.0',\n 'tensorflow-probability<=0.10.0',\n 'torch>=1.0.0,!=1.5.0,<1.6.0',\n 'torchvision>=0.2.1,<0.7.0',\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n f'gym[all]=={GYM_VERSION}',\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control',\n]\n\nEXTRAS['bullet'] = ['mpi4py', 'pybullet']\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501\n 'isort>=4.3.21,<5.0.0',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.5.3',\n 'pytest>=4.5.0', # Required for strict-markers\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-xdist',\n 'recommonmark',\n 'sphinx',\n 'sphinx-autoapi>=1.4.0',\n 'sphinx_rtd_theme',\n 'sphinxcontrib-bibtex',\n 'yapf==0.30.0',\n] # yapf: disable\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.6',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"setuptools based setup module.\"\"\"\nimport os\n\nfrom setuptools import find_packages, setup\n\nGARAGE_GH_TOKEN = os.environ.get('GARAGE_GH_TOKEN') or 'git'\nGYM_VERSION = '0.17.2'\n\n# Required dependencies\nREQUIRED = [\n # Please keep alphabetized\n 'akro',\n 'click>=2.0',\n 'cloudpickle==1.3',\n 'cma==2.7.0',\n 'dowel==0.0.3',\n f'gym[atari,box2d,classic_control]=={GYM_VERSION}',\n 'numpy>=1.14.5',\n 'psutil',\n 'python-dateutil',\n 'ray',\n 'scikit-image',\n 'scipy',\n 'setproctitle>=1.0',\n 'tensorflow>=1.14',\n 'tensorflow-probability>=0.11.0',\n 'torch>=1.0.0,!=1.5.0,<1.6.0',\n 'torchvision>=0.2.1,<0.7.0',\n]\n\n# Dependencies for optional features\nEXTRAS = {}\n\nEXTRAS['mujoco'] = [\n 'mujoco-py<2.1,>=2.0',\n f'gym[all]=={GYM_VERSION}',\n]\n\nEXTRAS['dm_control'] = [\n # dm_control throws an error during install about not being able to\n # find a build dependency (absl-py). Later pip executes the `install`\n # command again and the install succeeds because absl-py has been\n # installed. This is stupid, but harmless.\n 'dm_control',\n]\n\nEXTRAS['bullet'] = ['mpi4py', 'pybullet']\n\nEXTRAS['all'] = list(set(sum(EXTRAS.values(), [])))\n\n# Development dependencies (*not* included in 'all')\nEXTRAS['dev'] = [\n # Please keep alphabetized\n 'flake8',\n 'flake8-docstrings>=1.5.0',\n 'flake8-import-order',\n f'metaworld @ https://{GARAGE_GH_TOKEN}@api.github.com/repos/rlworkgroup/metaworld/tarball/861ae8d8c4bef80a7ed86f47f47acaa494d4ab77', # noqa: E501\n 'isort>=4.3.21,<5.0.0',\n 'pep8-naming==0.7.0',\n 'pre-commit',\n 'pycodestyle>=2.5.0',\n 'pydocstyle>=4.0.0',\n 'pylint>=2.5.3',\n 'pytest>=4.5.0', # Required for strict-markers\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-xdist',\n 'recommonmark',\n 'sphinx',\n 'sphinx-autoapi>=1.4.0',\n 'sphinx_rtd_theme',\n 'sphinxcontrib-bibtex',\n 'yapf==0.30.0',\n] # yapf: disable\n\nwith open('README.md') as f:\n README = f.read()\n\n# Get the package version dynamically\nwith open('VERSION') as v:\n VERSION = v.read().strip()\n\nsetup(\n name='garage',\n version=VERSION,\n author='Reinforcement Learning Working Group',\n description='A toolkit for reproducible reinforcement learning research',\n url='https://github.com/rlworkgroup/garage',\n packages=find_packages(where='src'),\n package_dir={'': 'src'},\n scripts=['scripts/garage'],\n python_requires='>=3.6',\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n license='MIT',\n long_description=README,\n long_description_content_type='text/markdown',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development :: Libraries',\n ],\n)\n", "path": "setup.py"}]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.