response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Update a particular user.
Args:
user_id (int): ID of the user.
user (UserIn): Updated user data.
Returns:
User: The updated user details.
Raises:
HTTPException (status_code=404): If the user with the specified ID is not found.
|
def update_user(user_id: int,
user: UserBase,
Authorize: AuthJWT = Depends(check_auth)):
"""
Update a particular user.
Args:
user_id (int): ID of the user.
user (UserIn): Updated user data.
Returns:
User: The updated user details.
Raises:
HTTPException (status_code=404): If the user with the specified ID is not found.
"""
db_user = db.session.query(User).filter(User.id == user_id).first()
if not db_user:
raise HTTPException(status_code=404, detail="User not found")
db_user.name = user.name
db_user.email = user.email
db_user.password = user.password
db.session.commit()
return db_user
|
Update first login source of the user
|
def update_first_login_source(source: str, Authorize: AuthJWT = Depends(check_auth)):
""" Update first login source of the user """
user = get_current_user(Authorize)
# valid_sources = ['google', 'github', 'email']
if user.first_login_source is None or user.first_login_source == '':
user.first_login_source = source
db.session.commit()
db.session.flush()
logger.info("User : ",user)
return user
|
Creates a new webhook
Args:
Returns:
Agent: An object of Agent representing the created Agent.
Raises:
HTTPException (Status Code=404): If the associated project is not found.
|
def create_webhook(webhook: WebHookIn, Authorize: AuthJWT = Depends(check_auth),
organisation=Depends(get_user_organisation)):
"""
Creates a new webhook
Args:
Returns:
Agent: An object of Agent representing the created Agent.
Raises:
HTTPException (Status Code=404): If the associated project is not found.
"""
db_webhook = Webhooks(name=webhook.name, url=webhook.url, headers=webhook.headers, org_id=organisation.id,
is_deleted=False, filters=webhook.filters)
db.session.add(db_webhook)
db.session.commit()
db.session.flush()
return db_webhook
|
Retrieves a single webhook for the authenticated user's organisation.
Returns:
JSONResponse: A JSON response containing the retrieved webhook.
Raises:
|
def get_all_webhooks(
Authorize: AuthJWT = Depends(check_auth),
organisation=Depends(get_user_organisation),
):
"""
Retrieves a single webhook for the authenticated user's organisation.
Returns:
JSONResponse: A JSON response containing the retrieved webhook.
Raises:
"""
webhook = db.session.query(Webhooks).filter(Webhooks.org_id == organisation.id, Webhooks.is_deleted == False).first()
return webhook
|
Soft-deletes a webhook by setting the value of is_deleted to True.
Args:
webhook_id (int): The ID of the webhook to delete.
Returns:
WebHookOut: The deleted webhook.
Raises:
HTTPException (Status Code=404): If the webhook is not found.
|
def edit_webhook(
updated_webhook: WebHookEdit,
webhook_id: int,
Authorize: AuthJWT = Depends(check_auth),
organisation=Depends(get_user_organisation),
):
"""
Soft-deletes a webhook by setting the value of is_deleted to True.
Args:
webhook_id (int): The ID of the webhook to delete.
Returns:
WebHookOut: The deleted webhook.
Raises:
HTTPException (Status Code=404): If the webhook is not found.
"""
webhook = db.session.query(Webhooks).filter(Webhooks.org_id == organisation.id, Webhooks.id == webhook_id, Webhooks.is_deleted == False).first()
if webhook is None:
raise HTTPException(status_code=404, detail="Webhook not found")
webhook.url = updated_webhook.url
webhook.filters = updated_webhook.filters
db.session.commit()
return webhook
|
Function to check if the user is authenticated or not based on the environment.
Args:
Authorize (AuthJWT, optional): Instance of AuthJWT class to authorize the user. Defaults to Depends().
Returns:
AuthJWT: Instance of AuthJWT class if the user is authenticated.
|
def check_auth(Authorize: AuthJWT = Depends()):
"""
Function to check if the user is authenticated or not based on the environment.
Args:
Authorize (AuthJWT, optional): Instance of AuthJWT class to authorize the user. Defaults to Depends().
Returns:
AuthJWT: Instance of AuthJWT class if the user is authenticated.
"""
env = get_config("ENV", "DEV")
if env == "PROD":
Authorize.jwt_required()
return Authorize
|
Function to get the organisation of the authenticated user based on the environment.
Args:
Authorize (AuthJWT, optional): Instance of AuthJWT class to authorize the user. Defaults to Depends on check_auth().
Returns:
Organisation: Instance of Organisation class to which the authenticated user belongs.
|
def get_user_organisation(Authorize: AuthJWT = Depends(check_auth)):
"""
Function to get the organisation of the authenticated user based on the environment.
Args:
Authorize (AuthJWT, optional): Instance of AuthJWT class to authorize the user. Defaults to Depends on check_auth().
Returns:
Organisation: Instance of Organisation class to which the authenticated user belongs.
"""
user = get_current_user(Authorize)
if user is None:
raise HTTPException(status_code=401, detail="Unauthenticated")
organisation = db.session.query(Organisation).filter(Organisation.id == user.organisation_id).first()
return organisation
|
Encrypts the given data using the Fernet cipher suite.
Args:
data (str): The data to be encrypted.
Returns:
str: The encrypted data, decoded as a string.
|
def encrypt_data(data):
"""
Encrypts the given data using the Fernet cipher suite.
Args:
data (str): The data to be encrypted.
Returns:
str: The encrypted data, decoded as a string.
"""
encrypted_data = cipher_suite.encrypt(data.encode())
return encrypted_data.decode()
|
Decrypts the given encrypted data using the Fernet cipher suite.
Args:
encrypted_data (str): The encrypted data to be decrypted.
Returns:
str: The decrypted data, decoded as a string.
|
def decrypt_data(encrypted_data):
"""
Decrypts the given encrypted data using the Fernet cipher suite.
Args:
encrypted_data (str): The encrypted data to be decrypted.
Returns:
str: The decrypted data, decoded as a string.
"""
decrypted_data = cipher_suite.decrypt(encrypted_data.encode())
return decrypted_data.decode()
|
Helper function to parse the feed.
Args:
feed (AgentExecutionFeed): The feed to be parsed.
Returns:
dict: Parsed feed information with role, feed content, and updated timestamp.
If parsing fails, the original feed is returned.
|
def parse_feed(feed):
"""
Helper function to parse the feed.
Args:
feed (AgentExecutionFeed): The feed to be parsed.
Returns:
dict: Parsed feed information with role, feed content, and updated timestamp.
If parsing fails, the original feed is returned.
"""
# Get the current time
feed.time_difference = get_time_difference(feed.updated_at, str(datetime.now()))
# Check if the feed belongs to an assistant role
if feed.role == "assistant":
try:
# Parse the feed as JSON
parsed = json.loads(feed.feed, strict=False)
final_output = ""
if "reasoning" in parsed["thoughts"]:
final_output = "Thoughts: " + parsed["thoughts"]["reasoning"] + "\n"
if "plan" in parsed["thoughts"]:
final_output += "Plan: " + str(parsed["thoughts"]["plan"]) + "\n"
if "criticism" in parsed["thoughts"]:
final_output += "Criticism: " + parsed["thoughts"]["criticism"] + "\n"
if "tool" in parsed:
final_output += "Tool: " + parsed["tool"]["name"] + "\n"
if "command" in parsed:
final_output += "Tool: " + parsed["command"]["name"] + "\n"
return {"role": "assistant", "feed": final_output, "updated_at": feed.updated_at,
"time_difference": feed.time_difference}
except Exception:
return {"role": "assistant", "feed": feed.feed, "updated_at": feed.updated_at,
"time_difference": feed.time_difference}
if feed.role == "system":
final_output = feed.feed
if "json-schema.org" in feed.feed:
final_output = feed.feed.split("TOOLS:")[0]
return {"role": "system", "feed": final_output, "updated_at": feed.updated_at,
"time_difference": feed.time_difference}
if feed.role == "user":
return {"role": "user", "feed": feed.feed, "updated_at": feed.updated_at,
"time_difference": feed.time_difference}
return feed
|
Get tool information from an object.
|
def get_tool_info(class_dict, classes, obj):
"""
Get tool information from an object.
"""
class_dict['tool_name'] = obj.name
class_dict['tool_description'] = obj.description
classes.append(class_dict)
|
Get toolkit information from an object.
|
def get_toolkit_info(class_dict, classes, obj):
"""
Get toolkit information from an object.
"""
class_dict['toolkit_name'] = obj.name
class_dict['toolkit_description'] = obj.description
class_dict['toolkit_tools'] = obj.get_tools()
class_dict['toolkit_keys'] = obj.get_env_keys()
classes.append(class_dict)
|
Connects to the PostgreSQL database using SQLAlchemy.
Returns:
engine: The SQLAlchemy engine object representing the database connection.
|
def connect_db():
"""
Connects to the PostgreSQL database using SQLAlchemy.
Returns:
engine: The SQLAlchemy engine object representing the database connection.
"""
global engine
if engine is not None:
return engine
# Create the connection URL
db_host = get_config('DB_HOST', 'super__postgres')
db_username = get_config('DB_USERNAME')
db_password = get_config('DB_PASSWORD')
db_name = get_config('DB_NAME')
db_url = get_config('DB_URL', None)
if db_url is None:
if db_username is None:
db_url = f'postgresql://{db_host}/{db_name}'
else:
db_url = f'postgresql://{db_username}:{db_password}@{db_host}/{db_name}'
else:
db_url = urlparse(db_url)
db_url = db_url.scheme + "://" + db_url.netloc + db_url.path
# Create the SQLAlchemy engine
engine = create_engine(db_url,
pool_size=20, # Maximum number of database connections in the pool
max_overflow=50, # Maximum number of connections that can be created beyond the pool_size
pool_timeout=30, # Timeout value in seconds for acquiring a connection from the pool
pool_recycle=1800, # Recycle connections after this number of seconds (optional)
pool_pre_ping=False, # Enable connection health checks (optional)
)
# Test the connection
try:
connection = engine.connect()
logger.info("Connected to the database! @ " + db_url)
connection.close()
except Exception as e:
logger.error(f"Unable to connect to the database:{e}")
return engine
|
Get the arguments from a function's signature.
|
def extract_valid_parameters(
inferred_type: Type[BaseModel],
function: Callable,
) -> dict:
"""Get the arguments from a function's signature."""
schema = inferred_type.schema()["properties"]
valid_params = signature(function).parameters
return {param: schema[param] for param in valid_params if param != "run_manager"}
|
Create a pydantic model with only a subset of model's fields.
|
def _construct_model_subset(
model_name: str, original_model: BaseModel, required_fields: list
) -> Type[BaseModel]:
"""Create a pydantic model with only a subset of model's fields."""
fields = {
field: (
original_model.__fields__[field].type_,
original_model.__fields__[field].default,
)
for field in required_fields
if field in original_model.__fields__
}
return create_model(model_name, **fields)
|
Create a pydantic schema from a function's signature.
|
def create_function_schema(
schema_name: str,
function: Callable,
) -> Type[BaseModel]:
"""Create a pydantic schema from a function's signature."""
validated = validate_arguments(function, config=SchemaSettings) # type: ignore
inferred_type = validated.model # type: ignore
if "run_manager" in inferred_type.__fields__:
del inferred_type.__fields__["run_manager"]
valid_parameters = extract_valid_parameters(inferred_type, function)
return _construct_model_subset(
f"{schema_name}Schema", inferred_type, list(valid_parameters)
)
|
Gets the raw HTML of a searx search result page
Args:
query : The query to search for.
|
def search(query):
"""
Gets the raw HTML of a searx search result page
Args:
query : The query to search for.
"""
# TODO: use a better strategy for choosing hosts. Could use this list: https://searx.space/data/instances.json
searx_url = random.choice(searx_hosts)
res = httpx.get(
searx_url + "/search", params={"q": query}, headers={"User-Agent": "Mozilla/5.0 (X11; Linux i686; rv:109.0) Gecko/20100101 Firefox/114.0"}
)
if res.status_code != 200:
logger.info(res.status_code, searx_url)
raise Exception(f"Searx returned {res.status_code} status code")
return res.text
|
Cleans up whitespace in a string
Args:
s : The string to clean up.
Returns:
The cleaned up string.
|
def clean_whitespace(s: str):
"""
Cleans up whitespace in a string
Args:
s : The string to clean up.
Returns:
The cleaned up string.
"""
return " ".join(s.split())
|
Converts raw HTML into a list of SearchResult objects
Args:
html : The raw HTML to convert.
Returns:
A list of SearchResult objects.
|
def scrape_results(html):
"""
Converts raw HTML into a list of SearchResult objects
Args:
html : The raw HTML to convert.
Returns:
A list of SearchResult objects.
"""
soup = BeautifulSoup(html, "html.parser")
result_divs = soup.find_all(attrs={"class": "result"})
result_list = []
n = 1
for result_div in result_divs:
if result_div is None:
continue
# Needed to work on multiple versions of Searx
header = result_div.find(["h4", "h3"])
if header is None:
continue
link = header.find("a")["href"]
title = header.text.strip()
description = clean_whitespace(result_div.find("p").text)
# Needed to work on multiple versions of Searx
sources_container = result_div.find(
attrs={"class": "pull-right"}
) or result_div.find(attrs={"class": "engines"})
source_spans = sources_container.find_all("span")
sources = []
for s in source_spans:
sources.append(s.text.strip())
result = SearchResult(
id=n, title=title, link=link, description=description, sources=sources
)
result_list.append(result)
n += 1
return result_list
|
Returns a text summary of the search results via the SearchResult.__str__ method
|
def search_results(query):
'''Returns a text summary of the search results via the SearchResult.__str__ method'''
return "\n\n".join(list(map(lambda x: str(x), scrape_results(search(query)))))
|
Creates a Weaviate client instance.
Args:
use_embedded: Whether to use the embedded Weaviate instance. Defaults to True.
url: The URL of the Weaviate instance to connect to. Required if `use_embedded` is False.
api_key: The API key to use for authentication if using Weaviate Cloud Services. Optional.
Returns:
A Weaviate client instance.
Raises:
ValueError: If invalid argument combination are passed.
|
def create_weaviate_client(
url: Optional[str] = None,
api_key: Optional[str] = None,
) -> weaviate.Client:
"""
Creates a Weaviate client instance.
Args:
use_embedded: Whether to use the embedded Weaviate instance. Defaults to True.
url: The URL of the Weaviate instance to connect to. Required if `use_embedded` is False.
api_key: The API key to use for authentication if using Weaviate Cloud Services. Optional.
Returns:
A Weaviate client instance.
Raises:
ValueError: If invalid argument combination are passed.
"""
if url:
if api_key:
auth_config = weaviate.AuthApiKey(api_key=api_key)
else:
auth_config = None
client = weaviate.Client(url=url, auth_client_secret=auth_config)
else:
raise ValueError("Invalid arguments passed to create_weaviate_client")
return client
|
init.
Initialize DDP using the given rendezvous file.
|
def init(args):
"""init.
Initialize DDP using the given rendezvous file.
"""
global rank, world_size
if args.ddp:
assert args.rank is not None and args.world_size is not None
rank = args.rank
world_size = args.world_size
if world_size == 1:
return
torch.cuda.set_device(rank)
torch.distributed.init_process_group(
backend=args.ddp_backend,
init_method='file://' + os.path.abspath(args.rendezvous_file),
world_size=world_size,
rank=rank)
logger.debug("Distributed rendezvous went well, rank %d/%d", rank, world_size)
|
average.
Average all the relevant metrices across processes
`metrics`should be a 1D float32 fector. Returns the average of `metrics`
over all hosts. You can use `count` to control the weight of each worker.
|
def average(metrics, count=1.):
"""average.
Average all the relevant metrices across processes
`metrics`should be a 1D float32 fector. Returns the average of `metrics`
over all hosts. You can use `count` to control the weight of each worker.
"""
if world_size == 1:
return metrics
tensor = torch.tensor(list(metrics) + [1], device='cuda', dtype=torch.float32)
tensor *= count
torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM)
return (tensor[:-1] / tensor[-1]).cpu().numpy().tolist()
|
wrap.
Wrap a model with DDP if distributed training is enabled.
|
def wrap(model):
"""wrap.
Wrap a model with DDP if distributed training is enabled.
"""
if world_size == 1:
return model
else:
return DistributedDataParallel(
model,
device_ids=[torch.cuda.current_device()],
output_device=torch.cuda.current_device())
|
loader.
Create a dataloader properly in case of distributed training.
If a gradient is going to be computed you must set `shuffle=True`.
:param dataset: the dataset to be parallelized
:param args: relevant args for the loader
:param shuffle: shuffle examples
:param klass: loader class
:param kwargs: relevant args
|
def loader(dataset, *args, shuffle=False, klass=DataLoader, **kwargs):
"""loader.
Create a dataloader properly in case of distributed training.
If a gradient is going to be computed you must set `shuffle=True`.
:param dataset: the dataset to be parallelized
:param args: relevant args for the loader
:param shuffle: shuffle examples
:param klass: loader class
:param kwargs: relevant args
"""
if world_size == 1:
return klass(dataset, *args, shuffle=shuffle, **kwargs)
if shuffle:
# train means we will compute backward, we use DistributedSampler
sampler = DistributedSampler(dataset)
# We ignore shuffle, DistributedSampler already shuffles
return klass(dataset, *args, **kwargs, sampler=sampler)
else:
# We make a manual shard, as DistributedSampler otherwise replicate some examples
dataset = Subset(dataset, list(range(rank, len(dataset), world_size)))
return klass(dataset, *args, shuffle=shuffle)
|
Calcuate Scale-Invariant Source-to-Noise Ratio (SI-SNR)
Args:
ref_sig: numpy.ndarray, [B, T]
out_sig: numpy.ndarray, [B, T]
Returns:
SISNR
|
def cal_SISNR(ref_sig, out_sig, eps=1e-8):
"""Calcuate Scale-Invariant Source-to-Noise Ratio (SI-SNR)
Args:
ref_sig: numpy.ndarray, [B, T]
out_sig: numpy.ndarray, [B, T]
Returns:
SISNR
"""
assert len(ref_sig) == len(out_sig)
B, T = ref_sig.shape
ref_sig = ref_sig - np.mean(ref_sig, axis=1).reshape(B, 1)
out_sig = out_sig - np.mean(out_sig, axis=1).reshape(B, 1)
ref_energy = (np.sum(ref_sig ** 2, axis=1) + eps).reshape(B, 1)
proj = (np.sum(ref_sig * out_sig, axis=1).reshape(B, 1)) * \
ref_sig / ref_energy
noise = out_sig - proj
ratio = np.sum(proj ** 2, axis=1) / (np.sum(noise ** 2, axis=1) + eps)
sisnr = 10 * np.log(ratio + eps) / np.log(10.0)
return sisnr.mean()
|
Calculate PESQ.
Args:
ref_sig: numpy.ndarray, [B, C, T]
out_sig: numpy.ndarray, [B, C, T]
Returns
PESQ
|
def cal_PESQ(ref_sig, out_sig, sr):
"""Calculate PESQ.
Args:
ref_sig: numpy.ndarray, [B, C, T]
out_sig: numpy.ndarray, [B, C, T]
Returns
PESQ
"""
B, C, T = ref_sig.shape
ref_sig = ref_sig.reshape(B*C, T)
out_sig = out_sig.reshape(B*C, T)
pesq_val = 0
for i in range(len(ref_sig)):
pesq_val += pesq(sr, ref_sig[i], out_sig[i], 'nb')
return pesq_val / (B*C)
|
Calculate STOI.
Args:
ref_sig: numpy.ndarray, [B, C, T]
out_sig: numpy.ndarray, [B, C, T]
Returns:
STOI
|
def cal_STOI(ref_sig, out_sig, sr):
"""Calculate STOI.
Args:
ref_sig: numpy.ndarray, [B, C, T]
out_sig: numpy.ndarray, [B, C, T]
Returns:
STOI
"""
B, C, T = ref_sig.shape
ref_sig = ref_sig.reshape(B*C, T)
out_sig = out_sig.reshape(B*C, T)
try:
stoi_val = 0
for i in range(len(ref_sig)):
stoi_val += stoi(ref_sig[i], out_sig[i], sr, extended=False)
return stoi_val / (B*C)
except:
return 0
|
Calculate Scale-Invariant Source-to-Noise Ratio improvement (SI-SNRi)
Args:
src_ref: numpy.ndarray, [B, C, T]
src_est: numpy.ndarray, [B, C, T], reordered by best PIT permutation
mix: numpy.ndarray, [T]
Returns:
average_SISNRi
|
def cal_SISNRi(src_ref, src_est, mix):
"""Calculate Scale-Invariant Source-to-Noise Ratio improvement (SI-SNRi)
Args:
src_ref: numpy.ndarray, [B, C, T]
src_est: numpy.ndarray, [B, C, T], reordered by best PIT permutation
mix: numpy.ndarray, [T]
Returns:
average_SISNRi
"""
avg_SISNRi = 0.0
B, C, T = src_ref.shape
for c in range(C):
sisnr = cal_SISNR(src_ref[:, c], src_est[:, c])
sisnrb = cal_SISNR(src_ref[:, c], mix)
avg_SISNRi += (sisnr - sisnrb)
avg_SISNRi /= C
return avg_SISNRi
|
Decorate `__init__` with this, and you can then
recover the *args and **kwargs passed to it in `self._init_args_kwargs`
|
def capture_init(init):
"""
Decorate `__init__` with this, and you can then
recover the *args and **kwargs passed to it in `self._init_args_kwargs`
"""
@functools.wraps(init)
def __init__(self, *args, **kwargs):
self._init_args_kwargs = (args, kwargs)
init(self, *args, **kwargs)
return __init__
|
Args:
inputs: torch.Tensor, [B, C, T] or [B, T], B is batch size
inputs_lengths: torch.Tensor, [B]
Returns:
results: a list containing B items, each item is [C, T], T varies
|
def remove_pad(inputs, inputs_lengths):
"""
Args:
inputs: torch.Tensor, [B, C, T] or [B, T], B is batch size
inputs_lengths: torch.Tensor, [B]
Returns:
results: a list containing B items, each item is [C, T], T varies
"""
results = []
dim = inputs.dim()
if dim == 3:
C = inputs.size(1)
for input, length in zip(inputs, inputs_lengths):
if dim == 3: # [B, C, T]
results.append(input[:, :length].view(C, -1).cpu().numpy())
elif dim == 2: # [B, T]
results.append(input[:length].view(-1).cpu().numpy())
return results
|
Reconstructs a signal from a framed representation.
Adds potentially overlapping frames of a signal with shape
`[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.
The resulting tensor has shape `[..., output_size]` where
output_size = (frames - 1) * frame_step + frame_length
Args:
signal: A [..., frames, frame_length] Tensor. All dimensions may be unknown, and rank must be at least 2.
frame_step: An integer denoting overlap offsets. Must be less than or equal to frame_length.
Returns:
A Tensor with shape [..., output_size] containing the overlap-added frames of signal's inner-most two dimensions.
output_size = (frames - 1) * frame_step + frame_length
Based on https://github.com/tensorflow/tensorflow/blob/r1.12/tensorflow/contrib/signal/python/ops/reconstruction_ops.py
|
def overlap_and_add(signal, frame_step):
"""Reconstructs a signal from a framed representation.
Adds potentially overlapping frames of a signal with shape
`[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.
The resulting tensor has shape `[..., output_size]` where
output_size = (frames - 1) * frame_step + frame_length
Args:
signal: A [..., frames, frame_length] Tensor. All dimensions may be unknown, and rank must be at least 2.
frame_step: An integer denoting overlap offsets. Must be less than or equal to frame_length.
Returns:
A Tensor with shape [..., output_size] containing the overlap-added frames of signal's inner-most two dimensions.
output_size = (frames - 1) * frame_step + frame_length
Based on https://github.com/tensorflow/tensorflow/blob/r1.12/tensorflow/contrib/signal/python/ops/reconstruction_ops.py
"""
outer_dimensions = signal.size()[:-2]
frames, frame_length = signal.size()[-2:]
# gcd=Greatest Common Divisor
subframe_length = math.gcd(frame_length, frame_step)
subframe_step = frame_step // subframe_length
subframes_per_frame = frame_length // subframe_length
output_size = frame_step * (frames - 1) + frame_length
output_subframes = output_size // subframe_length
subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)
frame = torch.arange(0, output_subframes).unfold(
0, subframes_per_frame, subframe_step)
frame = frame.clone().detach().long().to(signal.device)
# frame = signal.new_tensor(frame).clone().long() # signal may in GPU or CPU
frame = frame.contiguous().view(-1)
result = signal.new_zeros(
*outer_dimensions, output_subframes, subframe_length)
result.index_add_(-2, frame, subframe_signal)
result = result.view(*outer_dimensions, -1)
return result
|
Args:
batch: list, len(batch) = 1. See AudioDataset.__getitem__()
Returns:
mixtures_pad: B x T, torch.Tensor
ilens : B, torch.Tentor
filenames: a list contain B strings
|
def _collate_fn_eval(batch):
"""
Args:
batch: list, len(batch) = 1. See AudioDataset.__getitem__()
Returns:
mixtures_pad: B x T, torch.Tensor
ilens : B, torch.Tentor
filenames: a list contain B strings
"""
# batch should be located in list
assert len(batch) == 1
mixtures, filenames = load_mixtures(batch[0])
# get batch of lengths of input sequences
ilens = np.array([mix.shape[0] for mix in mixtures])
# perform padding and convert to tensor
pad_value = 0
mixtures_pad = pad_list([torch.from_numpy(mix).float()
for mix in mixtures], pad_value)
ilens = torch.from_numpy(ilens)
return mixtures_pad, ilens, filenames
|
Returns:
mixtures: a list containing B items, each item is T np.ndarray
filenames: a list containing B strings
T varies from item to item.
|
def load_mixtures(batch):
"""
Returns:
mixtures: a list containing B items, each item is T np.ndarray
filenames: a list containing B strings
T varies from item to item.
"""
mixtures, filenames = [], []
mix_infos, sample_rate = batch
# for each utterance
for mix_info in mix_infos:
mix_path = mix_info[0]
# read wav file
mix, _ = librosa.load(mix_path, sr=sample_rate)
mixtures.append(mix)
filenames.append(mix_path)
return mixtures, filenames
|
Args:
source: [B, C, T], B is batch size
estimate_source: [B, C, T]
source_lengths: [B]
|
def cal_loss(source, estimate_source, source_lengths):
"""
Args:
source: [B, C, T], B is batch size
estimate_source: [B, C, T]
source_lengths: [B]
"""
max_snr, perms, max_snr_idx, snr_set = cal_si_snr_with_pit(source,
estimate_source,
source_lengths)
B, C, T = estimate_source.shape
loss = 0 - torch.mean(max_snr)
reorder_estimate_source = reorder_source(
estimate_source, perms, max_snr_idx)
return loss, max_snr, estimate_source, reorder_estimate_source
|
Calculate SI-SNR with PIT training.
Args:
source: [B, C, T], B is batch size
estimate_source: [B, C, T]
source_lengths: [B], each item is between [0, T]
|
def cal_si_snr_with_pit(source, estimate_source, source_lengths):
"""Calculate SI-SNR with PIT training.
Args:
source: [B, C, T], B is batch size
estimate_source: [B, C, T]
source_lengths: [B], each item is between [0, T]
"""
assert source.size() == estimate_source.size()
B, C, T = source.size()
# mask padding position along T
mask = get_mask(source, source_lengths)
estimate_source *= mask
# Step 1. Zero-mean norm
num_samples = source_lengths.view(-1, 1, 1).float() # [B, 1, 1]
mean_target = torch.sum(source, dim=2, keepdim=True) / num_samples
mean_estimate = torch.sum(estimate_source, dim=2,
keepdim=True) / num_samples
zero_mean_target = source - mean_target
zero_mean_estimate = estimate_source - mean_estimate
# mask padding position along T
zero_mean_target *= mask
zero_mean_estimate *= mask
# Step 2. SI-SNR with PIT
# reshape to use broadcast
s_target = torch.unsqueeze(zero_mean_target, dim=1) # [B, 1, C, T]
s_estimate = torch.unsqueeze(zero_mean_estimate, dim=2) # [B, C, 1, T]
# s_target = <s', s>s / ||s||^2
pair_wise_dot = torch.sum(s_estimate * s_target,
dim=3, keepdim=True) # [B, C, C, 1]
s_target_energy = torch.sum(
s_target ** 2, dim=3, keepdim=True) + EPS # [B, 1, C, 1]
pair_wise_proj = pair_wise_dot * s_target / s_target_energy # [B, C, C, T]
# e_noise = s' - s_target
e_noise = s_estimate - pair_wise_proj # [B, C, C, T]
# SI-SNR = 10 * log_10(||s_target||^2 / ||e_noise||^2)
pair_wise_si_snr = torch.sum(
pair_wise_proj ** 2, dim=3) / (torch.sum(e_noise ** 2, dim=3) + EPS)
pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + EPS) # [B, C, C]
pair_wise_si_snr = torch.transpose(pair_wise_si_snr, 1, 2)
# Get max_snr of each utterance
# permutations, [C!, C]
perms = source.new_tensor(list(permutations(range(C))), dtype=torch.long)
# one-hot, [C!, C, C]
index = torch.unsqueeze(perms, 2)
perms_one_hot = source.new_zeros((*perms.size(), C)).scatter_(2, index, 1)
# [B, C!] <- [B, C, C] einsum [C!, C, C], SI-SNR sum of each permutation
snr_set = torch.einsum('bij,pij->bp', [pair_wise_si_snr, perms_one_hot])
max_snr_idx = torch.argmax(snr_set, dim=1) # [B]
# max_snr = torch.gather(snr_set, 1, max_snr_idx.view(-1, 1)) # [B, 1]
max_snr, _ = torch.max(snr_set, dim=1, keepdim=True)
max_snr /= C
return max_snr, perms, max_snr_idx, snr_set / C
|
Args:
source: [B, C, T]
perms: [C!, C], permutations
max_snr_idx: [B], each item is between [0, C!)
Returns:
reorder_source: [B, C, T]
|
def reorder_source(source, perms, max_snr_idx):
"""
Args:
source: [B, C, T]
perms: [C!, C], permutations
max_snr_idx: [B], each item is between [0, C!)
Returns:
reorder_source: [B, C, T]
"""
B, C, *_ = source.size()
# [B, C], permutation whose SI-SNR is max of each utterance
# for each utterance, reorder estimate source according this permutation
max_snr_perm = torch.index_select(perms, dim=0, index=max_snr_idx)
# print('max_snr_perm', max_snr_perm)
# maybe use torch.gather()/index_select()/scatter() to impl this?
reorder_source = torch.zeros_like(source)
for b in range(B):
for c in range(C):
reorder_source[b, c] = source[b, max_snr_perm[b][c]]
return reorder_source
|
Args:
source: [B, C, T]
source_lengths: [B]
Returns:
mask: [B, 1, T]
|
def get_mask(source, source_lengths):
"""
Args:
source: [B, C, T]
source_lengths: [B]
Returns:
mask: [B, 1, T]
"""
B, _, T = source.size()
mask = source.new_ones((B, 1, T))
for i in range(B):
mask[i, :, source_lengths[i]:] = 0
return mask
|
Parse command line arguments and return a ScriptArguments object.
Args:
args: Optional list of arguments to parse. If not provided, uses sys.argv.
|
def get_args(args=None) -> ScriptArguments:
"""Parse command line arguments and return a ScriptArguments object.
Args:
args: Optional list of arguments to parse. If not provided, uses sys.argv.
"""
defaults = ScriptArguments(
suffix="",
environment=EnvironmentArguments(
image_name="sweagent/swe-agent:latest",
data_path="princeton-nlp/SWE-bench_Lite",
split="dev",
verbose=True,
install_environment=True,
),
skip_existing=True,
agent=AgentArguments(
model=ModelArguments(
model_name="gpt4",
total_cost_limit=0.0,
per_instance_cost_limit=3.0,
temperature=0.0,
top_p=0.95,
),
config_file=Path("config/default.yaml"),
),
actions=ActionsArguments(open_pr=False, skip_if_commits_reference_issue=True),
)
# Nicer yaml dumping of multiline strings
def multiline_representer(dumper, data):
"""configures yaml for dumping multiline strings
Ref: https://stackoverflow.com/questions/8640959/how-can-i-control-what-scalar-form-pyyaml-uses-for-my-data
"""
if data.count("\n") > 0: # check for multiline string
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return dumper.represent_scalar("tag:yaml.org,2002:str", data)
yaml.add_representer(str, multiline_representer)
return parse(ScriptArguments, default=defaults, add_config_path_arg=False, args=args, formatter_class=RichHelpFormatter, description=Markdown(__doc__))
|
Args:
traj_path (str): _description_
config_file (str): _description_
data_path (str): _description_
suffix (str): _description_
forward_args (List[str]): Passed to run.py
Raises:
ValueError: Incorrect paths or other config issue
Returns:
None
|
def process_single_traj(traj_path: str, config_file: str, data_path: str, suffix: str, *, forward_args: List[str]):
"""
Args:
traj_path (str): _description_
config_file (str): _description_
data_path (str): _description_
suffix (str): _description_
forward_args (List[str]): Passed to run.py
Raises:
ValueError: Incorrect paths or other config issue
Returns:
None
"""
replay_action_trajs_path = "temp_replay.jsonl"
# Open trajectory file, extract responses as actions
if traj_path.endswith(".yaml"):
traj_data = dict()
with open(traj_path, "r") as f:
traj_data["history"] = yaml.safe_load(f)
else:
traj_data = json.load(open(traj_path, "r"))
actions = [x["content"] for x in traj_data["history"] if x["role"] == "assistant"]
instance_id = traj_path.split("/")[-1].split(".")[0]
with open(replay_action_trajs_path, "w") as f:
print(
json.dumps({instance_id: actions}),
file=f,
end="\n",
flush=True
)
# Get data_path from args.yaml
if data_path is None:
args_path = os.path.join(
os.path.dirname(traj_path),
"args.yaml"
)
args = yaml.safe_load(open(args_path))
data_path = args['environment']['data_path']
# Identify the relevant task instance and create it
def create_task_instances_tmp_file(data: List[Dict[str, Any]]) -> str:
"""Helper function to create a temporary file to write task instances to.
Returns path to the temporary file.
"""
data = [d for d in data if d["instance_id"] == instance_id]
tmp_path = instance_id + ".jsonl"
with open(tmp_path, "w") as f:
for d in data:
print(json.dumps(d), file=f, end="\n", flush=True)
return tmp_path
is_other = False
if data_path.endswith(".jsonl"):
replay_task_instances_path = create_task_instances_tmp_file([json.loads(x) for x in open(data_path, "r").readlines()])
elif data_path.endswith(".json"):
replay_task_instances_path = create_task_instances_tmp_file(json.load(open(data_path)))
else:
# Assume data_path is a github url or local url
is_other = True
replay_task_instances_path = data_path
# Call run.py via subprocess
run_args = [
"--config_file", config_file,
"--data_path", replay_task_instances_path,
"--install_environment", "True",
"--model_name", "replay",
"--replay_path", replay_action_trajs_path,
*forward_args,
]
if is_other:
# Not sure if this only applies to github urls for data_path
run_args.extend(["--skip_existing", "False"])
if suffix is not None:
run_args.extend(["--suffix", suffix])
script_args = runscript.get_args(run_args)
runscript.main(script_args)
os.remove(replay_action_trajs_path)
if not is_other:
os.remove(replay_task_instances_path)
|
Parse the folder name to get the different parts
|
def parse_folder_name(folder_name):
"""
Parse the folder name to get the different parts
"""
parsed_folder = folder_name.split("__")
if len(parsed_folder) == 7:
parsed_folder.append("")
return parsed_folder
|
Convert each experiment to a row in the csv
|
def convert_experiments_to_rows(folder_name, runs_max):
"""
Convert each experiment to a row in the csv
"""
rows = []
directories = get_folders(folder_name)
for directory in directories:
folders = get_folders(directory)
for folder in folders:
# Skip debug folders
if "debug" in folder.name:
continue
# Skip fine tuned models
if "ft_gpt-3.5" in folder.name:
continue
# Skip folders without a results.json file
json_file = folder / "results.json"
if not json_file.exists():
# print(f"No json file in {folder}")
continue
# Extract run attributes
folder_data = parse_folder_name(folder.name)
model = folder_data[0]
dataset = folder_data[1]
if dataset.startswith("swe-bench-dev-easy-"):
dataset = dataset[len("swe-bench-dev-easy-") :]
elif dataset.startswith("swe-bench-dev-"):
dataset = dataset[len("swe-bench-dev-") :]
setup = folder_data[2]
if len(folder_data) != 8:
# TODO: This might be too strict?
continue
temperature = float(folder_data[3][len("t-"):].strip())
top_p = float(folder_data[4][len("p-"):].strip())
cost = float(folder_data[5][len("c-"):].strip())
install = "Y" if folder_data[6].strip() == "install-1" else "N"
# Parse out run number
run = folder_data[-1]
if "run" not in run:
continue
try:
if "run-" in run:
run = int(run.split("run-")[-1].split("-")[0].replace("_", "").strip())
else:
run = int(run.split("run")[-1].split("-")[0].replace("_", "").strip())
except Exception as e:
print(run)
raise e
if runs_max is not None and run > runs_max:
continue
# Load results.json file
with json_file.open() as file:
results_data = json.load(file)
report = results_data.get("report", {})
# Extract resolved ids (to calculate pass@k)
resolved_ids = []
if "resolved" in results_data and isinstance(results_data["resolved"], list):
resolved_ids = results_data["resolved"]
elif "counts" in results_data and isinstance(results_data["counts"]["resolved"], list):
resolved_ids = results_data["counts"]["resolved"]
# Extract instance costs from trajectories
costs_overall = []
costs_success = []
costs_failure = []
for x in glob.glob(os.path.join(str(folder), "*.traj")):
traj_data = json.load(open(x))
if "model_stats" not in traj_data["info"]:
continue
run_cost = traj_data["info"]["model_stats"]["instance_cost"]
inst_id = x.split("/")[-1].split(".")[0]
costs_overall.append(run_cost)
if inst_id in resolved_ids:
costs_success.append(run_cost)
else:
costs_failure.append(run_cost)
# Create run row, write to csv
rows.append(
[
model,
dataset,
setup,
temperature,
top_p,
cost,
install,
run,
report.get("# Not Generated", 0),
report.get("# Generated", 0),
report.get("# Applied", 0),
report.get("# Resolved", 0),
resolved_ids,
costs_success,
costs_failure,
costs_overall,
]
)
return rows
|
Convert any multi-line strings to LiteralScalarString
|
def convert_to_literal_string(d):
"""
Convert any multi-line strings to LiteralScalarString
"""
if isinstance(d, dict):
for key, value in d.items():
if isinstance(value, str) and '\n' in value:
d[key] = LSS(value.replace('\r\n', '\n').replace('\r', '\n'))
elif isinstance(value, dict):
convert_to_literal_string(value)
elif isinstance(d, list):
for i, item in enumerate(d):
if isinstance(item, str) and '\n' in item:
d[i] = LSS(item.replace('\r\n', '\n').replace('\r', '\n'))
elif isinstance(item, dict):
convert_to_literal_string(item)
elif isinstance(d, str) and '\n' in d:
d = LSS(d.replace('\r\n', '\n').replace('\r', '\n'))
else:
raise ValueError(f"Unsupported type: {type(d)}")
return d
|
Save a single task instance as a yaml file
|
def save_demo(data, file, traj_path):
"""
Save a single task instance as a yaml file
"""
data = convert_to_literal_string(data)
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
buffer = io.StringIO()
yaml.dump(data, buffer)
content = buffer.getvalue()
header = DEMO_COMMENT.format(traj_path=traj_path)
with open(file, "w") as f:
f.write(f"{header}\n{content}")
|
Create `prompt` by filtering out all keys except for role/content per `history` turn
Reference: https://docs.anthropic.com/claude/reference/complete_post
|
def anthropic_history_to_messages(
model: Union[AnthropicModel, BedrockModel], history: list[dict[str, str]], is_demonstration: bool = False
) -> Union[str, list[dict[str, str]]]:
"""
Create `prompt` by filtering out all keys except for role/content per `history` turn
Reference: https://docs.anthropic.com/claude/reference/complete_post
"""
# Preserve behavior for older models
if model.api_model in ["claude-instant", "claude-2.0"] or \
(isinstance(model, BedrockModel) and model.api_model in ["anthropic.claude-instant-v1", "anthropic.claude-v2"]):
# Remove system messages if it is a demonstration
if is_demonstration:
history = [entry for entry in history if entry["role"] != "system"]
# Map history to Claude format
prompt = "\n\n"
for entry in history:
if entry["role"] in {"user", "system"}:
prompt += f'{HUMAN_PROMPT} {entry["content"]}\n\n'
elif entry["role"] == "assistant":
prompt += f'{AI_PROMPT} {entry["content"]}\n\n'
prompt += AI_PROMPT
return prompt
# Remove system messages if it is a demonstration
if is_demonstration:
history = [entry for entry in history if entry["role"] != "system"]
return '\n'.join([entry["content"] for entry in history])
# Return history components with just role, content fields (no system message)
messages = [
{
k: v for k, v in entry.items()
if k in ["role", "content"]
}
for entry in history if entry["role"] != "system"
]
compiled_messages = [] # Combine messages from the same role
last_role = None
for message in reversed(messages):
if last_role == message["role"]:
compiled_messages[-1]["content"] = message["content"] + "\n" + compiled_messages[-1]["content"]
else:
compiled_messages.append(message)
last_role = message["role"]
compiled_messages = list(reversed(compiled_messages))
# Replace any empty content values with a "(No output)"
for message in compiled_messages:
if message["content"].strip() == "":
message["content"] = "(No output)"
return compiled_messages
|
Query the Anthropic API with the given `history` and return the response.
|
def anthropic_query(model: Union[AnthropicModel, BedrockModel], history: list[dict[str, str]]) -> str:
"""
Query the Anthropic API with the given `history` and return the response.
"""
# Preserve behavior for older models
if model.api_model in ["claude-instant", "claude-2.0", "claude-2.1"] or \
(isinstance(model, BedrockModel) and model.api_model in ["anthropic.claude-instant-v1", "anthropic.claude-v2"]):
# Perform Anthropic API call
prompt = anthropic_history_to_messages(model, history)
if isinstance(model, BedrockModel):
# Use a dummy Anthropic client since count_tokens
# is not available in AnthropicBedrock
# https://github.com/anthropics/anthropic-sdk-python/issues/353
input_tokens = Anthropic().count_tokens(prompt)
else:
input_tokens = model.api.count_tokens(prompt)
completion = model.api.completions.create(
model=model.api_model,
prompt=prompt,
max_tokens_to_sample=model.model_metadata["max_context"] - input_tokens if isinstance(model, Anthropic) else model.model_metadata["max_tokens_to_sample"],
temperature=model.args.temperature,
top_p=model.args.top_p,
)
# Calculate + update costs, return response
response = completion.completion
if isinstance(model, BedrockModel):
output_tokens = Anthropic().count_tokens(response)
else:
output_tokens = model.api.count_tokens(response)
model.update_stats(input_tokens, output_tokens)
return response
# Get system message(s)
system_message = "\n".join([
entry["content"] for entry in history if entry["role"] == "system"
])
messages = anthropic_history_to_messages(model, history)
# Perform Anthropic API call
response = model.api.messages.create(
messages=messages,
max_tokens=model.model_metadata["max_tokens"],
model=model.api_model,
temperature=model.args.temperature,
top_p=model.args.top_p,
system=system_message,
)
# Calculate + update costs, return response
model.update_stats(
response.usage.input_tokens,
response.usage.output_tokens
)
response = "\n".join([x.text for x in response.content])
return response
|
Returns correct model object given arguments and commands
|
def get_model(args: ModelArguments, commands: Optional[list[Command]] = None):
"""
Returns correct model object given arguments and commands
"""
if commands is None:
commands = []
if args.model_name == "instant_empty_submit":
return InstantEmptySubmitTestModel(args, commands)
if args.model_name == "human":
return HumanModel(args, commands)
if args.model_name == "human_thought":
return HumanThoughtModel(args, commands)
if args.model_name == "replay":
return ReplayModel(args, commands)
elif args.model_name.startswith("gpt") or args.model_name.startswith("ft:gpt") or args.model_name.startswith("azure:gpt"):
return OpenAIModel(args, commands)
elif args.model_name.startswith("claude"):
return AnthropicModel(args, commands)
elif args.model_name.startswith("bedrock"):
return BedrockModel(args, commands)
elif args.model_name.startswith("ollama"):
return OllamaModel(args, commands)
elif args.model_name in TogetherModel.SHORTCUTS:
return TogetherModel(args, commands)
else:
raise ValueError(f"Invalid model name: {args.model_name}")
|
Given a format string, returns a set of all the keys in the format string.
|
def extract_keys(format_string):
"""
Given a format string, returns a set of all the keys in the format string.
"""
formatter = string.Formatter()
keys = set()
for _, field_name, _, _ in formatter.parse(format_string):
if field_name is not None:
keys.add(field_name)
return keys
|
Returns True if the value should be quoted, False otherwise.
|
def should_quote(value, command):
"""
Returns True if the value should be quoted, False otherwise.
"""
return (isinstance(value, str) and command.end_name is None)
|
if data_path is a file, return the file stem
elif it's a github url, return the owner__repo_name
|
def get_data_path_name(data_path: str):
""" if data_path is a file, return the file stem
elif it's a github url, return the owner__repo_name
"""
match = GITHUB_ISSUE_URL_PATTERN.search(data_path)
if match:
owner, repo, _ = match.groups()
return f"{owner}__{repo}"
return Path(data_path).stem
|
Check if data_path is an URL pointing to a github issue
|
def is_github_issue_url(data_path: str) -> bool:
"""Check if data_path is an URL pointing to a github issue"""
return GITHUB_ISSUE_URL_PATTERN.search(data_path) is not None
|
Check if data_path is an URL pointing to a github repository.
Paths to issues or PRs will also match this pattern.
|
def is_github_repo_url(data_path: str) -> bool:
"""Check if data_path is an URL pointing to a github repository.
Paths to issues or PRs will also match this pattern.
"""
return GITHUB_REPO_URL_PATTERN.search(data_path) is not None
|
Copies a given string into a Docker container at a specified path.
Args:
- container: Docker SDK container object.
- contents: The string to copy into the container.
- container_path: The path inside the container where the string should be copied to.
Returns:
- None
|
def copy_file_to_container(container, contents, container_path):
"""
Copies a given string into a Docker container at a specified path.
Args:
- container: Docker SDK container object.
- contents: The string to copy into the container.
- container_path: The path inside the container where the string should be copied to.
Returns:
- None
"""
temp_file_name = None
try:
# Create a temporary file
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_file_name = temp_file.name
# Write the string to the temporary file and ensure it's written to disk
temp_file.write(contents.encode('utf-8'))
temp_file.flush()
os.fsync(temp_file.fileno())
# Create a TAR archive in memory containing the temporary file
with tempfile.NamedTemporaryFile():
with open(temp_file_name, 'rb') as temp_file:
# Prepare the TAR archive
with BytesIO() as tar_stream:
with tarfile.open(fileobj=tar_stream, mode='w') as tar:
tar_info = tarfile.TarInfo(name=os.path.basename(container_path))
tar_info.size = os.path.getsize(temp_file_name)
tar.addfile(tarinfo=tar_info, fileobj=temp_file)
tar_stream.seek(0)
# Copy the TAR stream to the container
container.put_archive(path=os.path.dirname(container_path), data=tar_stream.read())
except Exception as e:
logger.error(f"An error occurred: {e}")
logger.error(traceback.format_exc())
finally:
# Cleanup: Remove the temporary file if it was created
if temp_file_name and os.path.exists(temp_file_name):
os.remove(temp_file_name)
|
Copy files or directories from host to container
Note: Will need to set ownership on the copied files in the container.
|
def copy_anything_to_container(container, host_path: str, container_path: str) -> None:
"""Copy files or directories from host to container
Note: Will need to set ownership on the copied files in the container.
"""
if not Path(host_path).exists():
msg = f"Path {host_path} does not exist, cannot copy it to container."
raise FileNotFoundError(msg)
cmd = ["docker", "cp", host_path, f"{container.id}:{container_path}"]
logger.debug(f"Copying {host_path} to container at {container_path} with command: {shlex.join(cmd)}")
try:
subprocess.run(cmd, check=True)
except subprocess.CalledProcessError as e:
msg = f"Error copying {host_path} to container at {container_path}: {e}"
raise RuntimeError(msg) from e
|
Read data from a subprocess with a timeout.
This function uses a file descriptor to read data from the subprocess in a non-blocking way.
Args:
container (subprocess.Popen): The subprocess container.
pid_func (function): A function that returns a list of process IDs (except the PID of the main process).
timeout_duration (int): The timeout duration in seconds.
Returns:
str: The data read from the subprocess, stripped of trailing newline characters.
Raises:
TimeoutError: If the timeout duration is reached while reading from the subprocess.
|
def read_with_timeout(container, pid_func, timeout_duration):
"""
Read data from a subprocess with a timeout.
This function uses a file descriptor to read data from the subprocess in a non-blocking way.
Args:
container (subprocess.Popen): The subprocess container.
pid_func (function): A function that returns a list of process IDs (except the PID of the main process).
timeout_duration (int): The timeout duration in seconds.
Returns:
str: The data read from the subprocess, stripped of trailing newline characters.
Raises:
TimeoutError: If the timeout duration is reached while reading from the subprocess.
"""
buffer = b""
fd = container.stdout.fileno()
end_time = time.time() + timeout_duration
while time.time() < end_time:
pids = pid_func()
if len(pids) > 0:
# There are still PIDs running
time.sleep(0.05)
continue
ready_to_read, _, _ = select.select([fd], [], [], 0.1)
if ready_to_read:
data = os.read(fd, 4096)
if data:
buffer += data
else:
# No more data to read
break
time.sleep(0.05) # Prevents CPU hogging
if container.poll() is not None:
raise RuntimeError("Subprocess exited unexpectedly.\nCurrent buffer: {}".format(buffer.decode()))
if time.time() >= end_time:
raise TimeoutError("Timeout reached while reading from subprocess.\nCurrent buffer: {}\nRunning PIDs: {}".format(buffer.decode(), pids))
return buffer.decode()
|
Read data from a subprocess with a timeout.
This function uses a file descriptor to read data from the subprocess in a non-blocking way.
NOTE: This is an experimental implementation that is faster than `read_with_timeout`, but
has not been thoroughly tested.
Args:
container (subprocess.Popen): The subprocess container.
timeout_duration (int): The timeout duration in seconds.
Returns:
str: The data read from the subprocess, stripped of trailing newline characters.
Raises:
TimeoutError: If the timeout duration is reached while reading from the subprocess.
|
def read_with_timeout_experimental(container, timeout_duration):
"""
Read data from a subprocess with a timeout.
This function uses a file descriptor to read data from the subprocess in a non-blocking way.
NOTE: This is an experimental implementation that is faster than `read_with_timeout`, but
has not been thoroughly tested.
Args:
container (subprocess.Popen): The subprocess container.
timeout_duration (int): The timeout duration in seconds.
Returns:
str: The data read from the subprocess, stripped of trailing newline characters.
Raises:
TimeoutError: If the timeout duration is reached while reading from the subprocess.
"""
buffer = b""
fd = container.stdout.fileno()
end_time = time.time() + timeout_duration
while time.time() < end_time:
ready_to_read, _, _ = select.select([fd], [], [], 0.01)
if ready_to_read:
data = os.read(fd, 4096)
if data:
buffer += data
if PROCESS_DONE_MARKER_START in buffer.decode():
break
time.sleep(0.01) # Prevents CPU hogging
if container.poll() is not None:
raise RuntimeError("Subprocess exited unexpectedly.\nCurrent buffer: {}".format(buffer.decode()))
if time.time() >= end_time:
raise TimeoutError("Timeout reached while reading from subprocess.\nCurrent buffer: {}".format(buffer.decode()))
decoded = buffer.decode()
body = "\n".join(line for line in decoded.splitlines() if not line.startswith(PROCESS_DONE_MARKER_START))
last_line = decoded.splitlines()[-1]
_results = PROCESS_DONE_REGEX.search(last_line)
if _results is None:
raise ValueError(f"Could not find process done marker in last line: {last_line=}, {body=}")
exit_code = _results.group(1)
return body, exit_code
|
Get a container object for a given container name and image name
Arguments:
ctr_name (str): Name of container
image_name (str): Name of image
persistent (bool): Whether to use a persistent container or not
Returns:
Container object
|
def get_container(ctr_name: str, image_name: str, persistent: bool = False) -> Tuple[subprocess.Popen, Set]:
"""
Get a container object for a given container name and image name
Arguments:
ctr_name (str): Name of container
image_name (str): Name of image
persistent (bool): Whether to use a persistent container or not
Returns:
Container object
"""
# Let's first check that the image exists and give some better error messages
try:
client = docker.from_env()
except docker.errors.DockerException as e:
docker_not_runnnig = any((
"connection aborted" in str(e).lower(),
"connection refused" in str(e).lower(),
"error while fetching server api version" in str(e).lower(),
))
if docker_not_runnnig:
msg = (
"Probably the Docker daemon is not running. Please start the Docker daemon and try again. "
"You might need to allow the use of the docker socket "
"(https://github.com/princeton-nlp/SWE-agent/issues/159) or symlink the socket "
"if it's at a non-standard location "
"(https://github.com/princeton-nlp/SWE-agent/issues/20#issuecomment-2047506005)."
)
raise RuntimeError(msg) from e
raise
filterred_images = client.images.list(filters={'reference': image_name})
if len(filterred_images) == 0:
msg = (
f"Image {image_name} not found. Please ensure it is built and available. "
"Please double-check that you followed all installation/setup instructions from the "
"readme."
)
raise RuntimeError(msg)
elif len(filterred_images) > 1:
logger.warning(f"Multiple images found for {image_name}, that's weird.")
attrs = filterred_images[0].attrs
if attrs is not None:
logger.info(
f"Found image {image_name} with tags: {attrs['RepoTags']}, created: {attrs['Created']} "
f"for {attrs['Os']} {attrs['Architecture']}."
)
if persistent:
return _get_persistent_container(ctr_name, image_name)
else:
return _get_non_persistent_container(ctr_name, image_name)
|
Return owner, repo, issue number from issue url
|
def parse_gh_issue_url(issue_url: str) -> Tuple[str, str, str]:
"""Return owner, repo, issue number from issue url"""
match = GITHUB_ISSUE_URL_PATTERN.search(issue_url)
if not match:
raise InvalidGithubURL(f"Invalid GitHub issue URL: {issue_url}")
res = match.groups()
assert len(res) == 3
return tuple(res)
|
Return owner, repo from repo url
|
def parse_gh_repo_url(repo_url: str) -> Tuple[str, str]:
"""Return owner, repo from repo url"""
match = GITHUB_REPO_URL_PATTERN.search(repo_url)
if not match:
raise InvalidGithubURL(f"Invalid GitHub issue URL: {repo_url}")
res = match.groups()
assert len(res) == 2
return tuple(res)
|
Returns github issue data in the form of a dictionary.
See https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#get-an-issue
for return format
|
def get_gh_issue_data(issue_url: str, *, token: str = ""):
"""Returns github issue data in the form of a dictionary.
See https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#get-an-issue
for return format
"""
owner, repo, issue_number = parse_gh_issue_url(issue_url)
api = GhApi(token=token)
return api.issues.get(owner, repo, issue_number)
|
Return problem statement from github issue
|
def get_problem_statement_from_github_issue(owner: str, repo: str, issue_number: str, *, token: Optional[str] = "") -> str:
"""Return problem statement from github issue"""
api = GhApi(token=token)
issue = api.issues.get(owner, repo, issue_number)
title = issue.title if issue.title else ""
body = issue.body if issue.body else ""
return f"{title}\n{body}\n"
|
Getter function for handling json, jsonl files
Args:
file_path (str): Path to file
Returns:
List of instances as dictionaries
|
def get_instances(
file_path: str,
base_commit: Optional[str] = None,
split: Optional[str] = None,
token: Optional[str] = None,
*,
repo_path: str = "",
) -> List[Dict[str, Any]]:
"""
Getter function for handling json, jsonl files
Args:
file_path (str): Path to file
Returns:
List of instances as dictionaries
"""
def instance_from_dict(instances):
ib = InstanceBuilder(token=token)
ib.set_from_dict(instances)
return ib.build()
def postproc_instance_list(instances):
if isinstance(instances, dict):
msg = "Expected a list of instances, got a dictionary."
raise ValueError(msg)
return [instance_from_dict(x) for x in instances]
# If file_path is a directory, attempt load from disk
if os.path.isdir(file_path):
try:
dataset_or_dict = load_from_disk(file_path)
if isinstance(dataset_or_dict, dict):
return postproc_instance_list(dataset_or_dict[split])
return postproc_instance_list(dataset_or_dict)
except FileNotFoundError:
# Raised by load_from_disk if the directory is not a dataset directory
pass
# The next if statement is very brittle logic to determine if we're processing a single instance
if (Path(file_path).is_file() and Path(file_path).suffix in ['.md', '.txt']) or is_github_issue_url(file_path):
ib = InstanceBuilder(token=token)
ib.set_problem_statement(file_path)
if repo_path:
ib.set_repo_info(repo_path, base_commit=base_commit)
elif is_github_repo_url(file_path):
ib.set_repo_info_from_gh_url(file_path)
else:
raise ValueError(f"Could not determine repo path from {file_path=}, {repo_path=}")
return [ib.build()]
if base_commit is not None:
raise ValueError("base_commit must be None if data_path is not a github issue url")
# If file_path is a file, load the file
if file_path.endswith(".json"):
return postproc_instance_list(json.load(open(file_path)))
if file_path.endswith(".jsonl"):
return postproc_instance_list([json.loads(x) for x in open(file_path, 'r').readlines()])
if repo_path:
msg = "repo_path must be empty if data_path is not a github url or local repo url"
raise ValueError(msg)
# Attempt load from HF datasets as a last resort
try:
return postproc_instance_list(load_dataset(file_path, split=split))
except:
raise ValueError(
f"Could not load instances from {file_path}. "
"Please ensure --data_path is a GitHub URL, a SWE-bench HuggingFace dataset, or a JSON/JSONL file."
)
|
Return the URLs of commits that would close an issue.
|
def get_associated_commit_urls(org: str, repo: str, issue_number: str, *, token: str = "") -> list[str]:
"""Return the URLs of commits that would close an issue."""
api = GhApi(token=token)
# Strangely the "pull_request" field of api.issues.get is often not set
# so we have to go through the events to check if there's a commit
events = api.issues.list_events(org, repo, issue_number)
commit_urls = []
for event in events:
if not event.event == "referenced":
continue
if not event.commit_id:
continue
commit = api.repos.get_commit(org, repo, event.commit_id)
message = commit.commit.message
if f"fixes #{issue_number}" in message.lower() or f"closes #{issue_number}" in message.lower():
commit_urls.append(commit.html_url)
return commit_urls
|
Format a trajectory as a markdown string for use in gh PR description.
|
def format_trajectory_markdown(trajectory: List[Dict[str, str]]):
"""Format a trajectory as a markdown string for use in gh PR description."""
prefix = [
"<details>",
"<summary>Thought process ('trajectory') of SWE-agent (click to expand)</summary>",
"",
"",
]
steps = []
for i, step in enumerate(trajectory):
step_strs = []
for key, value in step.items():
emoji = _MARKDOWN_TRAJECTORY_EMOJI_MAPPING.get(key, "")
if emoji:
emoji += " "
step_strs.append(f"**{emoji}{key.capitalize()} ({i})**:")
if key in ["observation", "state", "action"]:
step_strs.append("```")
step_strs.append(remove_triple_backticks(value).strip())
step_strs.append("```")
else:
step_strs.append(value.strip())
steps.append("\n".join(step_strs))
suffix = [
"",
"</details>",
]
return "\n".join(prefix) + "\n\n---\n\n".join(steps) + "\n".join(suffix)
|
This will use a persistent container
|
def test_env_args(tmpdir_factory, ):
"""This will use a persistent container"""
local_repo_path = tmpdir_factory.getbasetemp() / "swe-agent-test-repo"
clone_cmd = ["git", "clone", "https://github.com/klieret/swe-agent-test-repo", local_repo_path]
subprocess.run(clone_cmd, check=True)
data_path = local_repo_path / "problem_statements" / "1.md"
test_env_args = EnvironmentArguments(
data_path=str(data_path),
repo_path=str(local_repo_path),
image_name="sweagent/swe-agent:latest",
container_name="test-container-134245890345098",
)
yield test_env_args
# Cleanup (after session ends)
client = docker.from_env()
container = client.containers.get(test_env_args.container_name)
container.remove(force=True)
|
Context manager to make sure we close the shell on the container
so that we can reuse it.
|
def swe_env_context(env_args):
"""Context manager to make sure we close the shell on the container
so that we can reuse it.
"""
env = SWEEnv(env_args)
try:
yield env
finally:
env.close()
|
Test that swe-agent refuses to work if the local repo is dirty
|
def test_exception_replay_local_dirty(swe_agent_test_repo_clone, swe_agent_test_repo_traj):
"""Test that swe-agent refuses to work if the local repo is dirty"""
problem_statement_path = swe_agent_test_repo_clone / "problem_statements" / "1.md"
test_file = swe_agent_test_repo_clone / "tests" / "missing_colon.py"
assert test_file.is_file()
test_file.write_text(test_file.read_text().replace("division", "division_function"))
run_cmd = [
"--traj_path",
str(swe_agent_test_repo_traj),
"--repo_path",
str(swe_agent_test_repo_clone),
"--config_file",
"config/default_from_url.yaml",
"--data_path",
str(problem_statement_path),
"--apply_patch",
"--raise_exceptions",
]
args, remaing_args = get_args(run_cmd)
with pytest.raises(ValueError, match=".*dirty.*"):
main(**vars(args), forward_args=remaing_args)
|
Parse command line arguments and set default values.
|
def parse_args(args):
"""Parse command line arguments and set default values."""
parser = argparse.ArgumentParser(description="Compare Performance tests.")
parser.add_argument(
"--old-file", help="Baseline performance test suite (csv file)", required=True
)
parser.add_argument(
"--new-file", help="New performance test suite (csv file)", required=True
)
parser.add_argument(
"--format",
choices=["markdown", "git", "html"],
help="Output format. Default is markdown.",
default="markdown",
)
parser.add_argument("--output", help="Output file name")
parser.add_argument(
"--changes-only", help="Output only affected tests", action="store_true"
)
parser.add_argument(
"--single-table",
help="Combine data in a single table in git and markdown formats",
action="store_true",
)
parser.add_argument(
"--delta-threshold",
help="Delta threshold. Default 0.05.",
type=float,
default=0.05,
)
return parser.parse_args(args)
|
Compare benchmarks for changes in a formatted report.
|
def main():
"""Compare benchmarks for changes in a formatted report."""
args = parse_args(sys.argv[1:])
report = create_report(
LogParser.results_from_file(args.old_file),
LogParser.results_from_file(args.new_file),
args.delta_threshold,
args.format,
args.changes_only,
args.single_table,
)
print(report)
if args.output:
with open(args.output, "w") as f:
f.write(report)
|
Adds a new entry to the `CMakeLists.txt` file with the given
benchmark name.
|
def update_cmakelists(name):
"""Adds a new entry to the `CMakeLists.txt` file with the given
benchmark name.
"""
relative_path = create_relative_path("../CMakeLists.txt")
file_contents = []
with open(relative_path, "r") as f:
file_contents = f.readlines()
file_new_contents = insert_line_alphabetically(
name,
" single-source/" + name + "\n",
file_contents,
r" single-source\/([a-zA-Z]+)",
)
with open(relative_path, "w") as f:
for line in file_new_contents:
f.write(line)
|
Creates a new Swift file with the given name based on the template
and places it in the `single-source` directory.
|
def create_benchmark_file(name):
"""Creates a new Swift file with the given name based on the template
and places it in the `single-source` directory.
"""
file_text = ""
template_path = create_relative_path("Template.swift")
with open(template_path, "r") as f:
file_text = "".join(f.readlines())
# fill in missing template details
file_text = file_text.format(
name=name,
padding="-" * (56 - len(name)),
year=datetime.date.today().year
)
file_path_prefix = create_relative_path("../single-source/")
file_path = os.path.join(file_path_prefix, name + ".swift")
with open(file_path, "w") as f:
f.write(file_text)
|
Adds an `import` statement to the `main.swift` file for the new
benchmark.
|
def add_import_benchmark(name):
"""Adds an `import` statement to the `main.swift` file for the new
benchmark.
"""
relative_path = create_relative_path("../utils/main.swift")
# read current contents into an array
file_contents = []
with open(relative_path, "r") as f:
file_contents = f.readlines()
# the test dependencies are placed before all benchmarks, so we have to
# insert the benchmark in the right alphabetical order after we have seen
# all test dependencies.
read_test_dependencies = False
previous_benchmark_name = None
file_new_contents = []
for line in file_contents:
# check if this line is a definition of a benchmark and get its name
match = re.search(r"import ([a-zA-Z]+)", line)
if match and match.group(1):
benchmark_name = match.group(1)
# find where to insert the new benchmark in the right alphabetical
# order.
if (
name < benchmark_name
and previous_benchmark_name is None
or name < benchmark_name
and name > previous_benchmark_name
):
if read_test_dependencies:
file_new_contents.append("import " + name + "\n" + line)
else:
# all test dependencies are first specified, so from now
# on we can look where to insert the new benchmark.
read_test_dependencies = True
file_new_contents.append(line)
else:
file_new_contents.append(line)
previous_benchmark_name = benchmark_name
else:
file_new_contents.append(line)
with open(relative_path, "w") as f:
for line in file_new_contents:
f.write(line)
|
Adds an `import` statement to the `main.swift` file for the new
benchmark.
|
def add_register_benchmark(name):
"""Adds an `import` statement to the `main.swift` file for the new
benchmark.
"""
relative_path = create_relative_path("../utils/main.swift")
file_contents = []
with open(relative_path, "r") as f:
file_contents = f.readlines()
file_new_contents = insert_line_alphabetically(
name,
"register(" + name + ".benchmarks)\n",
file_contents,
r"register\(([a-zA-Z]+)\.benchmarks\)",
)
with open(relative_path, "w") as f:
for line in file_new_contents:
f.write(line)
|
Iterates through the given lines and executes the regex on each line to
find where the new benchmark should be inserted with the given `new_line`.
|
def insert_line_alphabetically(name, new_line, lines, regex):
"""Iterates through the given lines and executes the regex on each line to
find where the new benchmark should be inserted with the given `new_line`.
"""
# the name of the previous seen benchmark in order to insert the new
# one at the correct position
previous_benchmark_name = None
# the new contents of the file
updated_lines = []
for line in lines:
# apply regex and get name of benchmark on this line
match = re.search(regex, line)
if match and match.group(1):
benchmark_name = match.group(1)
# check if we're at the line where we have to insert the new
# benchmark in the correct alphabetical order
if (
name < benchmark_name
and previous_benchmark_name is None
or name < benchmark_name
and name > previous_benchmark_name
):
updated_lines.append(new_line + line)
else:
updated_lines.append(line)
previous_benchmark_name = benchmark_name
else:
updated_lines.append(line)
return updated_lines
|
Create PerformanceTestResult Stub.
|
def _PTR(min_value=700, mem_pages=1000, setup=None):
"""Create PerformanceTestResult Stub."""
return Stub(min_value=min_value, mem_pages=mem_pages, setup=setup)
|
Helper function that constructs tuple with arguments for run method.
|
def _run(test, num_samples=None, num_iters=None, verbose=None, measure_memory=False):
"""Helper function that constructs tuple with arguments for run method."""
return (test, num_samples, num_iters, verbose, measure_memory)
|
Capture stdout and stderr and return their output as string buffers.
|
def captured_output():
"""Capture stdout and stderr and return their output as string buffers."""
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
|
Split and validate path for an account.
:param req: a swob request
:returns: a tuple of path parts as strings
|
def get_account_name_and_placement(req):
"""
Split and validate path for an account.
:param req: a swob request
:returns: a tuple of path parts as strings
"""
drive, part, account = split_and_validate_path(req, 3)
validate_internal_account(account)
return drive, part, account
|
Split and validate path for a container.
:param req: a swob request
:returns: a tuple of path parts as strings
|
def get_container_name_and_placement(req):
"""
Split and validate path for a container.
:param req: a swob request
:returns: a tuple of path parts as strings
"""
drive, part, account, container = split_and_validate_path(req, 3, 4)
validate_internal_container(account, container)
return drive, part, account, container
|
paste.deploy app factory for creating WSGI account server apps
|
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI account server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return AccountController(conf)
|
Create a list of async-delete jobs
:param account: (native or unicode string) account to delete from
:param container: (native or unicode string) container to delete from
:param objects: (list of native or unicode strings) objects to delete
:param timestamp: (Timestamp) time at which objects should be marked
deleted
:returns: list of dicts appropriate for an UPDATE request to an
expiring-object queue
|
def make_delete_jobs(account, container, objects, timestamp):
'''
Create a list of async-delete jobs
:param account: (native or unicode string) account to delete from
:param container: (native or unicode string) container to delete from
:param objects: (list of native or unicode strings) objects to delete
:param timestamp: (Timestamp) time at which objects should be marked
deleted
:returns: list of dicts appropriate for an UPDATE request to an
expiring-object queue
'''
if six.PY2:
if isinstance(account, str):
account = account.decode('utf8')
if isinstance(container, str):
container = container.decode('utf8')
return [
{
'name': build_task_obj(
timestamp, account, container,
obj.decode('utf8') if six.PY2 and isinstance(obj, str)
else obj, high_precision=True),
'deleted': 0,
'created_at': timestamp.internal,
'etag': MD5_OF_EMPTY_STRING,
'size': 0,
'storage_policy_index': 0,
'content_type': ASYNC_DELETE_TYPE,
} for obj in objects]
|
Enqueue jobs to async-delete some portion of a container's namespace
:param swift: InternalClient to use
:param account: account to delete from
:param container: container to delete from
:param marker: only delete objects after this name
:param end_marker: only delete objects before this name. Use ``None`` or
empty string to delete to the end of the namespace.
:param prefix: only delete objects starting with this prefix
:param timestamp: delete all objects as of this time. If ``None``, the
current time will be used.
:param yield_time: approximate period with which intermediate results
should be returned. If ``None``, disable intermediate
results.
:returns: If ``yield_time`` is ``None``, the number of objects marked for
deletion. Otherwise, a generator that will yield out tuples of
``(number of marked objects, last object name)`` approximately
every ``yield_time`` seconds. The final tuple will have ``None``
as the second element. This form allows you to retry when an
error occurs partway through while minimizing duplicate work.
|
def mark_for_deletion(swift, account, container, marker, end_marker,
prefix, timestamp=None, yield_time=10):
'''
Enqueue jobs to async-delete some portion of a container's namespace
:param swift: InternalClient to use
:param account: account to delete from
:param container: container to delete from
:param marker: only delete objects after this name
:param end_marker: only delete objects before this name. Use ``None`` or
empty string to delete to the end of the namespace.
:param prefix: only delete objects starting with this prefix
:param timestamp: delete all objects as of this time. If ``None``, the
current time will be used.
:param yield_time: approximate period with which intermediate results
should be returned. If ``None``, disable intermediate
results.
:returns: If ``yield_time`` is ``None``, the number of objects marked for
deletion. Otherwise, a generator that will yield out tuples of
``(number of marked objects, last object name)`` approximately
every ``yield_time`` seconds. The final tuple will have ``None``
as the second element. This form allows you to retry when an
error occurs partway through while minimizing duplicate work.
'''
if timestamp is None:
timestamp = Timestamp.now()
def enqueue_deletes():
deleted = 0
obj_iter = swift.iter_objects(
account, container,
marker=marker, end_marker=end_marker, prefix=prefix)
time_marker = time.time()
while True:
to_delete = [obj['name'] for obj in itertools.islice(
obj_iter, OBJECTS_PER_UPDATE)]
if not to_delete:
break
delete_jobs = make_delete_jobs(
account, container, to_delete, timestamp)
swift.make_request(
'UPDATE',
swift.make_path('.expiring_objects', str(int(timestamp))),
headers={'X-Backend-Allow-Private-Methods': 'True',
'X-Backend-Storage-Policy-Index': '0',
'X-Timestamp': timestamp.internal},
acceptable_statuses=(2,),
body_file=io.BytesIO(json.dumps(delete_jobs).encode('ascii')))
deleted += len(delete_jobs)
if yield_time is not None and \
time.time() - time_marker > yield_time:
yield deleted, to_delete[-1]
time_marker = time.time()
yield deleted, None
if yield_time is None:
for deleted, marker in enqueue_deletes():
if marker is None:
return deleted
else:
return enqueue_deletes()
|
Parse the get_nodes commandline args
:returns: a tuple, (ring_path, args)
|
def parse_get_node_args(options, args):
"""
Parse the get_nodes commandline args
:returns: a tuple, (ring_path, args)
"""
ring_path = None
if options.policy_name:
if POLICIES.get_by_name(options.policy_name) is None:
raise InfoSystemExit('No policy named %r' % options.policy_name)
elif args and args[0].endswith('.ring.gz'):
if os.path.exists(args[0]):
ring_path = args.pop(0)
else:
raise InfoSystemExit('Ring file does not exist')
if options.quoted:
args = [urllib.parse.unquote(arg) for arg in args]
if len(args) == 1:
args = args[0].strip('/').split('/', 2)
if not ring_path and not options.policy_name:
raise InfoSystemExit('Need to specify policy_name or <ring.gz>')
if not (args or options.partition):
raise InfoSystemExit('No target specified')
if len(args) > 3:
raise InfoSystemExit('Invalid arguments')
return ring_path, args
|
Provide a string that is a well formatted curl command to HEAD an object
on a storage node.
:param ip: the ip of the node
:param port: the port of the node
:param device: the device of the node
:param target: the path of the target resource
:param policy_index: the policy_index of the target resource (can be None)
:returns: a string, a well formatted curl command
|
def curl_head_command(ip, port, device, part, target, policy_index):
"""
Provide a string that is a well formatted curl command to HEAD an object
on a storage node.
:param ip: the ip of the node
:param port: the port of the node
:param device: the device of the node
:param target: the path of the target resource
:param policy_index: the policy_index of the target resource (can be None)
:returns: a string, a well formatted curl command
"""
if is_valid_ipv6(ip):
formatted_ip = '[%s]' % ip
else:
formatted_ip = ip
cmd = 'curl -g -I -XHEAD "http://%s:%s/%s/%s/%s"' % (
formatted_ip, port, device, part, urllib.parse.quote(target))
if policy_index is not None:
cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index',
policy_index)
cmd += ' --path-as-is'
return cmd
|
print out ring locations of specified type
:param ring: ring instance
:param datadir: name of directory where things are stored. Usually one of
"accounts", "containers", "objects", or "objects-N".
:param account: account name
:param container: container name
:param obj: object name
:param tpart: target partition in ring
:param all_nodes: include all handoff nodes. If false, only the N primary
nodes and first N handoffs will be printed.
:param policy_index: include policy_index in curl headers
|
def print_ring_locations(ring, datadir, account, container=None, obj=None,
tpart=None, all_nodes=False, policy_index=None):
"""
print out ring locations of specified type
:param ring: ring instance
:param datadir: name of directory where things are stored. Usually one of
"accounts", "containers", "objects", or "objects-N".
:param account: account name
:param container: container name
:param obj: object name
:param tpart: target partition in ring
:param all_nodes: include all handoff nodes. If false, only the N primary
nodes and first N handoffs will be printed.
:param policy_index: include policy_index in curl headers
"""
if not ring:
raise ValueError("No ring specified")
if not datadir:
raise ValueError("No datadir specified")
if tpart is None and not account:
raise ValueError("No partition or account/container/object specified")
if not account and (container or obj):
raise ValueError("Container/object specified without account")
if obj and not container:
raise ValueError('Object specified without container')
if obj:
target = '%s/%s/%s' % (account, container, obj)
elif container:
target = '%s/%s' % (account, container)
else:
target = '%s' % (account)
if tpart:
part = int(tpart)
else:
part = ring.get_part(account, container, obj)
primary_nodes = ring.get_part_nodes(part)
handoff_nodes = ring.get_more_nodes(part)
if not all_nodes:
handoff_nodes = itertools.islice(handoff_nodes, len(primary_nodes))
handoff_nodes = list(handoff_nodes)
if account and not tpart:
path_hash = hash_path(account, container, obj)
else:
path_hash = None
print('Partition\t%s' % part)
print('Hash \t%s\n' % path_hash)
for node in primary_nodes:
print('Server:Port Device\t%s:%s %s' % (node['ip'], node['port'],
node['device']))
for node in handoff_nodes:
print('Server:Port Device\t%s:%s %s\t [Handoff]' % (
node['ip'], node['port'], node['device']))
print("\n")
for node in primary_nodes:
cmd = curl_head_command(node['ip'], node['port'], node['device'],
part, target, policy_index)
print(cmd)
for node in handoff_nodes:
cmd = curl_head_command(node['ip'], node['port'], node['device'],
part, target, policy_index)
cmd += ' # [Handoff]'
print(cmd)
print("\n\nUse your own device location of servers:")
print("such as \"export DEVICE=/srv/node\"")
if path_hash:
for node in primary_nodes:
print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s"' %
(node['ip'], node['device'],
storage_directory(datadir, part, path_hash)))
for node in handoff_nodes:
print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s" # [Handoff]' %
(node['ip'], node['device'],
storage_directory(datadir, part, path_hash)))
else:
for node in primary_nodes:
print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' %
(node['ip'], node['device'], datadir, part))
for node in handoff_nodes:
print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"'
' # [Handoff]' %
(node['ip'], node['device'], datadir, part))
print('\nnote: `/srv/node*` is used as default value of `devices`, the '
'real value is set in the config file on each storage node.')
|
print out data base info/metadata based on its type
:param db_type: database type, account or container
:param info: dict of data base info
:param metadata: dict of data base metadata
:param drop_prefixes: if True, strip "X-Account-Meta-",
"X-Container-Meta-", "X-Account-Sysmeta-", and
"X-Container-Sysmeta-" when displaying
User Metadata and System Metadata dicts
|
def print_db_info_metadata(db_type, info, metadata, drop_prefixes=False,
verbose=False):
"""
print out data base info/metadata based on its type
:param db_type: database type, account or container
:param info: dict of data base info
:param metadata: dict of data base metadata
:param drop_prefixes: if True, strip "X-Account-Meta-",
"X-Container-Meta-", "X-Account-Sysmeta-", and
"X-Container-Sysmeta-" when displaying
User Metadata and System Metadata dicts
"""
if info is None:
raise ValueError('DB info is None')
if db_type not in ['container', 'account']:
raise ValueError('Wrong DB type')
try:
account = info['account']
container = None
if db_type == 'container':
container = info['container']
path = '/%s/%s' % (account, container)
else:
path = '/%s' % account
print('Path: %s' % path)
print(' Account: %s' % account)
if db_type == 'container':
print(' Container: %s' % container)
print(' Deleted: %s' % info['is_deleted'])
path_hash = hash_path(account, container)
if db_type == 'container':
print(' Container Hash: %s' % path_hash)
else:
print(' Account Hash: %s' % path_hash)
print('Metadata:')
print(' Created at: %s (%s)' %
(Timestamp(info['created_at']).isoformat,
info['created_at']))
print(' Put Timestamp: %s (%s)' %
(Timestamp(info['put_timestamp']).isoformat,
info['put_timestamp']))
print(' Delete Timestamp: %s (%s)' %
(Timestamp(info['delete_timestamp']).isoformat,
info['delete_timestamp']))
print(' Status Timestamp: %s (%s)' %
(Timestamp(info['status_changed_at']).isoformat,
info['status_changed_at']))
if db_type == 'account':
print(' Container Count: %s' % info['container_count'])
print(' Object Count: %s' % info['object_count'])
print(' Bytes Used: %s' % info['bytes_used'])
if db_type == 'container':
try:
policy_name = POLICIES[info['storage_policy_index']].name
except KeyError:
policy_name = 'Unknown'
print(' Storage Policy: %s (%s)' % (
policy_name, info['storage_policy_index']))
print(' Reported Put Timestamp: %s (%s)' %
(Timestamp(info['reported_put_timestamp']).isoformat,
info['reported_put_timestamp']))
print(' Reported Delete Timestamp: %s (%s)' %
(Timestamp(info['reported_delete_timestamp']).isoformat,
info['reported_delete_timestamp']))
print(' Reported Object Count: %s' %
info['reported_object_count'])
print(' Reported Bytes Used: %s' % info['reported_bytes_used'])
print(' Chexor: %s' % info['hash'])
print(' UUID: %s' % info['id'])
except KeyError as e:
raise ValueError('Info is incomplete: %s' % e)
meta_prefix = 'x_' + db_type + '_'
for key, value in info.items():
if key.lower().startswith(meta_prefix):
title = key.replace('_', '-').title()
print(' %s: %s' % (title, value))
user_metadata = {}
sys_metadata = {}
for key, (value, timestamp) in metadata.items():
if is_user_meta(db_type, key):
if drop_prefixes:
key = strip_user_meta_prefix(db_type, key)
user_metadata[key] = value
elif is_sys_meta(db_type, key):
if drop_prefixes:
key = strip_sys_meta_prefix(db_type, key)
sys_metadata[key] = value
else:
title = key.replace('_', '-').title()
print(' %s: %s' % (title, value))
if sys_metadata:
print(' System Metadata:')
for key, value in sys_metadata.items():
print(' %s: %s' % (key, value))
else:
print('No system metadata found in db file')
if user_metadata:
print(' User Metadata:')
for key, value in user_metadata.items():
print(' %s: %s' % (key, value))
else:
print('No user metadata found in db file')
if db_type == 'container':
print('Sharding Metadata:')
shard_type = 'root' if info['is_root'] else 'shard'
print(' Type: %s' % shard_type)
print(' State: %s' % info['db_state'])
if info.get('shard_ranges'):
num_shards = len(info['shard_ranges'])
print('Shard Ranges (%d):' % num_shards)
count_by_state = defaultdict(int)
for srange in info['shard_ranges']:
count_by_state[(srange.state, srange.state_text)] += 1
print(' States:')
for key_state, count in sorted(count_by_state.items()):
key, state = key_state
print(' %9s: %s' % (state, count))
if verbose:
for srange in info['shard_ranges']:
srange = dict(srange, state_text=srange.state_text)
print(' Name: %(name)s' % srange)
print(' lower: %(lower)r, upper: %(upper)r' % srange)
print(' Object Count: %(object_count)d, Bytes Used: '
'%(bytes_used)d, State: %(state_text)s (%(state)d)'
% srange)
print(' Created at: %s (%s)'
% (Timestamp(srange['timestamp']).isoformat,
srange['timestamp']))
print(' Meta Timestamp: %s (%s)'
% (Timestamp(srange['meta_timestamp']).isoformat,
srange['meta_timestamp']))
else:
print('(Use -v/--verbose to show more Shard Ranges details)')
|
Print out basic info and metadata from object, as returned from
:func:`swift.obj.diskfile.read_metadata`.
Metadata should include the keys: name, Content-Type, and
X-Timestamp.
Additional metadata is displayed unmodified.
:param metadata: dict of object metadata
:param drop_prefixes: if True, strip "X-Object-Meta-", "X-Object-Sysmeta-",
and "X-Object-Transient-Sysmeta-" when displaying
User Metadata, System Metadata, and Transient
System Metadata entries
:raises ValueError:
|
def print_obj_metadata(metadata, drop_prefixes=False):
"""
Print out basic info and metadata from object, as returned from
:func:`swift.obj.diskfile.read_metadata`.
Metadata should include the keys: name, Content-Type, and
X-Timestamp.
Additional metadata is displayed unmodified.
:param metadata: dict of object metadata
:param drop_prefixes: if True, strip "X-Object-Meta-", "X-Object-Sysmeta-",
and "X-Object-Transient-Sysmeta-" when displaying
User Metadata, System Metadata, and Transient
System Metadata entries
:raises ValueError:
"""
user_metadata = {}
sys_metadata = {}
transient_sys_metadata = {}
other_metadata = {}
if not metadata:
raise ValueError('Metadata is None')
path = metadata.pop('name', '')
content_type = metadata.pop('Content-Type', '')
ts = Timestamp(metadata.pop('X-Timestamp', 0))
account = container = obj = obj_hash = None
if path:
try:
account, container, obj = path.split('/', 3)[1:]
except ValueError:
raise ValueError('Path is invalid for object %r' % path)
else:
obj_hash = hash_path(account, container, obj)
print('Path: %s' % path)
print(' Account: %s' % account)
print(' Container: %s' % container)
print(' Object: %s' % obj)
print(' Object hash: %s' % obj_hash)
else:
print('Path: Not found in metadata')
if content_type:
print('Content-Type: %s' % content_type)
else:
print('Content-Type: Not found in metadata')
if ts:
print('Timestamp: %s (%s)' % (ts.isoformat, ts.internal))
else:
print('Timestamp: Not found in metadata')
for key, value in metadata.items():
if is_user_meta('Object', key):
if drop_prefixes:
key = strip_user_meta_prefix('Object', key)
user_metadata[key] = value
elif is_sys_meta('Object', key):
if drop_prefixes:
key = strip_sys_meta_prefix('Object', key)
sys_metadata[key] = value
elif is_object_transient_sysmeta(key):
if drop_prefixes:
key = strip_object_transient_sysmeta_prefix(key)
transient_sys_metadata[key] = value
else:
other_metadata[key] = value
def print_metadata(title, items):
print(title)
if items:
for key, value in sorted(items.items()):
print(' %s: %s' % (key, value))
else:
print(' No metadata found')
print_metadata('System Metadata:', sys_metadata)
print_metadata('Transient System Metadata:', transient_sys_metadata)
print_metadata('User Metadata:', user_metadata)
print_metadata('Other Metadata:', other_metadata)
for label, meta in [
('Data crypto details',
sys_metadata.get('X-Object-Sysmeta-Crypto-Body-Meta')),
('Metadata crypto details',
transient_sys_metadata.get('X-Object-Transient-Sysmeta-Crypto-Meta')),
]:
if meta is None:
continue
print('%s: %s' % (
label,
json.dumps(load_crypto_meta(meta, b64decode=False), indent=2,
sort_keys=True, separators=(',', ': '))))
|
Display information about an object read from the datafile.
Optionally verify the datafile content matches the ETag metadata.
:param datafile: path on disk to object file
:param check_etag: boolean, will read datafile content and verify
computed checksum matches value stored in
metadata.
:param swift_dir: the path on disk to rings
:param policy_name: optionally the name to use when finding the ring
:param drop_prefixes: if True, strip "X-Object-Meta-", "X-Object-Sysmeta-",
and "X-Object-Transient-Sysmeta-" when displaying
User Metadata, System Metadata, and Transient
System Metadata entries
|
def print_obj(datafile, check_etag=True, swift_dir='/etc/swift',
policy_name='', drop_prefixes=False):
"""
Display information about an object read from the datafile.
Optionally verify the datafile content matches the ETag metadata.
:param datafile: path on disk to object file
:param check_etag: boolean, will read datafile content and verify
computed checksum matches value stored in
metadata.
:param swift_dir: the path on disk to rings
:param policy_name: optionally the name to use when finding the ring
:param drop_prefixes: if True, strip "X-Object-Meta-", "X-Object-Sysmeta-",
and "X-Object-Transient-Sysmeta-" when displaying
User Metadata, System Metadata, and Transient
System Metadata entries
"""
if not os.path.exists(datafile):
print("Data file doesn't exist")
raise InfoSystemExit()
if not datafile.startswith(('/', './')):
datafile = './' + datafile
policy_index = None
ring = None
datadir = DATADIR_BASE
# try to extract policy index from datafile disk path
fullpath = os.path.abspath(datafile)
policy_index = int(extract_policy(fullpath) or POLICIES.legacy)
try:
if policy_index:
datadir += '-' + str(policy_index)
ring = Ring(swift_dir, ring_name='object-' + str(policy_index))
elif policy_index == 0:
ring = Ring(swift_dir, ring_name='object')
except IOError:
# no such ring
pass
if policy_name:
policy = POLICIES.get_by_name(policy_name)
if policy:
policy_index_for_name = policy.idx
if (policy_index is not None and
policy_index_for_name is not None and
policy_index != policy_index_for_name):
print('Warning: Ring does not match policy!')
print('Double check your policy name!')
if not ring and policy_index_for_name:
ring = POLICIES.get_object_ring(policy_index_for_name,
swift_dir)
datadir = get_data_dir(policy_index_for_name)
with open(datafile, 'rb') as fp:
try:
metadata = read_metadata(fp)
except EOFError:
print("Invalid metadata")
raise InfoSystemExit()
metadata = {wsgi_to_str(k): v if k == 'name' else wsgi_to_str(v)
for k, v in metadata.items()}
etag = metadata.pop('ETag', '')
length = metadata.pop('Content-Length', '')
path = metadata.get('name', '')
print_obj_metadata(metadata, drop_prefixes)
# Optional integrity check; it's useful, but slow.
file_len = None
if check_etag:
h = md5(usedforsecurity=False)
file_len = 0
while True:
data = fp.read(64 * 1024)
if not data:
break
h.update(data)
file_len += len(data)
h = h.hexdigest()
if etag:
if h == etag:
print('ETag: %s (valid)' % etag)
else:
print("ETag: %s doesn't match file hash of %s!" %
(etag, h))
else:
print('ETag: Not found in metadata')
else:
print('ETag: %s (not checked)' % etag)
file_len = os.fstat(fp.fileno()).st_size
if length:
if file_len == int(length):
print('Content-Length: %s (valid)' % length)
else:
print("Content-Length: %s doesn't match file length of %s"
% (length, file_len))
else:
print('Content-Length: Not found in metadata')
account, container, obj = path.split('/', 3)[1:]
if ring:
print_ring_locations(ring, datadir, account, container, obj,
policy_index=policy_index)
|
Display placement information for an item based on ring lookup.
If a ring is provided it always takes precedence, but warnings will be
emitted if it doesn't match other optional arguments like the policy_name
or ring_name.
If no ring is provided the ring_name and/or policy_name will be used to
lookup the ring.
:param ring: a ring instance
:param ring_name: server type, or storage policy ring name if object ring
:param account: account name
:param container: container name
:param obj: object name
:param partition: part number for non path lookups
:param policy_name: name of storage policy to use to lookup the ring
:param all_nodes: include all handoff nodes. If false, only the N primary
nodes and first N handoffs will be printed.
|
def print_item_locations(ring, ring_name=None, account=None, container=None,
obj=None, **kwargs):
"""
Display placement information for an item based on ring lookup.
If a ring is provided it always takes precedence, but warnings will be
emitted if it doesn't match other optional arguments like the policy_name
or ring_name.
If no ring is provided the ring_name and/or policy_name will be used to
lookup the ring.
:param ring: a ring instance
:param ring_name: server type, or storage policy ring name if object ring
:param account: account name
:param container: container name
:param obj: object name
:param partition: part number for non path lookups
:param policy_name: name of storage policy to use to lookup the ring
:param all_nodes: include all handoff nodes. If false, only the N primary
nodes and first N handoffs will be printed.
"""
policy_name = kwargs.get('policy_name', None)
part = kwargs.get('partition', None)
all_nodes = kwargs.get('all', False)
swift_dir = kwargs.get('swift_dir', '/etc/swift')
if ring and policy_name:
policy = POLICIES.get_by_name(policy_name)
if policy:
if ring_name != policy.ring_name:
print('Warning: mismatch between ring and policy name!')
else:
print('Warning: Policy %s is not valid' % policy_name)
policy_index = None
if ring is None and (obj or part):
if not policy_name:
print('Need a ring or policy')
raise InfoSystemExit()
policy = POLICIES.get_by_name(policy_name)
if not policy:
print('No policy named %r' % policy_name)
raise InfoSystemExit()
policy_index = int(policy)
ring = POLICIES.get_object_ring(policy_index, swift_dir)
ring_name = (POLICIES.get_by_name(policy_name)).ring_name
if (container or obj) and not account:
print('No account specified')
raise InfoSystemExit()
if obj and not container:
print('No container specified')
raise InfoSystemExit()
if not account and not part:
print('No target specified')
raise InfoSystemExit()
loc = '<type>'
if part and ring_name:
if '-' in ring_name and ring_name.startswith('object'):
loc = 'objects-' + ring_name.split('-', 1)[1]
else:
loc = ring_name + 's'
if account and container and obj:
loc = 'objects'
if '-' in ring_name and ring_name.startswith('object'):
policy_index = int(ring_name.rsplit('-', 1)[1])
loc = 'objects-%d' % policy_index
if account and container and not obj:
loc = 'containers'
if not any([ring, ring_name]):
ring = Ring(swift_dir, ring_name='container')
else:
if ring_name != 'container':
print('Warning: account/container specified ' +
'but ring not named "container"')
if account and not container and not obj:
loc = 'accounts'
if not any([ring, ring_name]):
ring = Ring(swift_dir, ring_name='account')
else:
if ring_name != 'account':
print('Warning: account specified ' +
'but ring not named "account"')
if account:
print('\nAccount \t%s' % urllib.parse.quote(account))
if container:
print('Container\t%s' % urllib.parse.quote(container))
if obj:
print('Object \t%s\n\n' % urllib.parse.quote(obj))
print_ring_locations(ring, loc, account, container, obj, part, all_nodes,
policy_index=policy_index)
|
Wrap the given ``func`` to catch any ``ValueError`` and raise an
``argparse.ArgumentTypeError`` instead.
:param func: a function.
:param msg: an optional message to use with any exception that is used; if
not given then the string representation of the ValueError will be
used.
:return: a function wrapper.
|
def wrap_for_argparse(func, msg=None):
"""
Wrap the given ``func`` to catch any ``ValueError`` and raise an
``argparse.ArgumentTypeError`` instead.
:param func: a function.
:param msg: an optional message to use with any exception that is used; if
not given then the string representation of the ValueError will be
used.
:return: a function wrapper.
"""
def wrapped_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except ValueError as err:
raise argparse.ArgumentTypeError(str(err) if msg is None else msg)
return wrapped_func
|
Format a device for display.
|
def format_device(dev):
"""
Format a device for display.
"""
copy_dev = dev.copy()
for key in ('ip', 'replication_ip'):
if ':' in copy_dev[key]:
copy_dev[key] = '[' + copy_dev[key] + ']'
return ('d%(id)sr%(region)sz%(zone)s-%(ip)s:%(port)sR'
'%(replication_ip)s:%(replication_port)s/%(device)s_'
'"%(meta)s"' % copy_dev)
|
Parse devices to add as specified on the command line.
Will exit on error and spew warnings.
:returns: array of device dicts
|
def _parse_add_values(argvish):
"""
Parse devices to add as specified on the command line.
Will exit on error and spew warnings.
:returns: array of device dicts
"""
new_cmd_format, opts, args = validate_args(argvish)
# We'll either parse the all-in-one-string format or the
# --options format,
# but not both. If both are specified, raise an error.
parsed_devs = []
if len(args) > 0:
if new_cmd_format or len(args) % 2 != 0:
print(Commands.add.__doc__.strip())
exit(EXIT_ERROR)
devs_and_weights = izip(islice(args, 0, len(args), 2),
islice(args, 1, len(args), 2))
for devstr, weightstr in devs_and_weights:
dev_dict = parse_add_value(devstr)
if dev_dict['region'] is None:
stderr.write('WARNING: No region specified for %s. '
'Defaulting to region 1.\n' % devstr)
dev_dict['region'] = 1
if dev_dict['replication_ip'] is None:
dev_dict['replication_ip'] = dev_dict['ip']
if dev_dict['replication_port'] is None:
dev_dict['replication_port'] = dev_dict['port']
weight = float(weightstr)
if weight < 0:
raise ValueError('Invalid weight value: %s' % devstr)
dev_dict['weight'] = weight
parsed_devs.append(dev_dict)
else:
parsed_devs.append(build_dev_from_opts(opts))
return parsed_devs
|
Takes a serialized scenario and turns it into a data structure suitable
for feeding to run_scenario().
:returns: scenario
:raises ValueError: on invalid scenario
|
def parse_scenario(scenario_data):
"""
Takes a serialized scenario and turns it into a data structure suitable
for feeding to run_scenario().
:returns: scenario
:raises ValueError: on invalid scenario
"""
parsed_scenario = {}
try:
raw_scenario = json.loads(scenario_data)
except ValueError as err:
raise ValueError("Invalid JSON in scenario file: %s" % err)
if not isinstance(raw_scenario, dict):
raise ValueError("Scenario must be a JSON object, not array or string")
if 'part_power' not in raw_scenario:
raise ValueError("part_power missing")
try:
parsed_scenario['part_power'] = int(raw_scenario['part_power'])
except ValueError as err:
raise ValueError("part_power not an integer: %s" % err)
if not 1 <= parsed_scenario['part_power'] <= 32:
raise ValueError("part_power must be between 1 and 32, but was %d"
% raw_scenario['part_power'])
if 'replicas' not in raw_scenario:
raise ValueError("replicas missing")
try:
parsed_scenario['replicas'] = float(raw_scenario['replicas'])
except ValueError as err:
raise ValueError("replicas not a float: %s" % err)
if parsed_scenario['replicas'] < 1:
raise ValueError("replicas must be at least 1, but is %f"
% parsed_scenario['replicas'])
if 'overload' not in raw_scenario:
raise ValueError("overload missing")
try:
parsed_scenario['overload'] = float(raw_scenario['overload'])
except ValueError as err:
raise ValueError("overload not a float: %s" % err)
if parsed_scenario['overload'] < 0:
raise ValueError("overload must be non-negative, but is %f"
% parsed_scenario['overload'])
if 'random_seed' not in raw_scenario:
raise ValueError("random_seed missing")
try:
parsed_scenario['random_seed'] = int(raw_scenario['random_seed'])
except ValueError as err:
raise ValueError("replicas not an integer: %s" % err)
if 'rounds' not in raw_scenario:
raise ValueError("rounds missing")
if not isinstance(raw_scenario['rounds'], list):
raise ValueError("rounds must be an array")
parser_for_command = {
'add': _parse_add_command,
'remove': _parse_remove_command,
'set_weight': _parse_set_weight_command,
'save': _parse_save_command,
}
parsed_scenario['rounds'] = []
for round_index, raw_round in enumerate(raw_scenario['rounds']):
if not isinstance(raw_round, list):
raise ValueError("round %d not an array" % round_index)
parsed_round = []
for command_index, command in enumerate(raw_round):
if command[0] not in parser_for_command:
raise ValueError(
"Unknown command (round %d, command %d): "
"'%s' should be one of %s" %
(round_index, command_index, command[0],
parser_for_command.keys()))
parsed_round.append(
parser_for_command[command[0]](
round_index, command_index, command))
parsed_scenario['rounds'].append(parsed_round)
return parsed_scenario
|
Takes a parsed scenario (like from parse_scenario()) and runs it.
|
def run_scenario(scenario):
"""
Takes a parsed scenario (like from parse_scenario()) and runs it.
"""
seed = scenario['random_seed']
rb = builder.RingBuilder(scenario['part_power'], scenario['replicas'], 1)
rb.set_overload(scenario['overload'])
command_map = {
'add': rb.add_dev,
'remove': rb.remove_dev,
'set_weight': rb.set_dev_weight,
'save': rb.save,
}
for round_index, commands in enumerate(scenario['rounds']):
print("Round %d" % (round_index + 1))
for command in commands:
key = command.pop(0)
try:
command_f = command_map[key]
except KeyError:
raise ValueError("unknown command %r" % key)
command_f(*command)
rebalance_number = 1
parts_moved, old_balance, removed_devs = rb.rebalance(seed=seed)
rb.pretend_min_part_hours_passed()
print("\tRebalance 1: moved %d parts, balance is %.6f, %d removed "
"devs" % (parts_moved, old_balance, removed_devs))
while True:
rebalance_number += 1
parts_moved, new_balance, removed_devs = rb.rebalance(seed=seed)
rb.pretend_min_part_hours_passed()
print("\tRebalance %d: moved %d parts, balance is %.6f, "
"%d removed devs" % (rebalance_number, parts_moved,
new_balance, removed_devs))
if parts_moved == 0 and removed_devs == 0:
break
if abs(new_balance - old_balance) < 1 and not (
old_balance == builder.MAX_BALANCE and
new_balance == builder.MAX_BALANCE):
break
old_balance = new_balance
|
Helper function to create an HTTPConnection object. If ssl is set True,
HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection
will be used, which is buffered for backend Swift services.
:param ipaddr: IPv4 address to connect to
:param port: port to connect to
:param device: device of the node to query
:param partition: partition on the device
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
:param path: request path
:param headers: dictionary of headers
:param query_string: request query string
:param ssl: set True if SSL should be used (default: False)
:returns: HTTPConnection object
|
def http_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None, ssl=False):
"""
Helper function to create an HTTPConnection object. If ssl is set True,
HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection
will be used, which is buffered for backend Swift services.
:param ipaddr: IPv4 address to connect to
:param port: port to connect to
:param device: device of the node to query
:param partition: partition on the device
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
:param path: request path
:param headers: dictionary of headers
:param query_string: request query string
:param ssl: set True if SSL should be used (default: False)
:returns: HTTPConnection object
"""
if isinstance(path, six.text_type):
path = path.encode("utf-8")
if isinstance(device, six.text_type):
device = device.encode("utf-8")
if isinstance(partition, six.text_type):
partition = partition.encode('utf-8')
elif isinstance(partition, six.integer_types):
partition = str(partition).encode('ascii')
path = quote(b'/' + device + b'/' + partition + path)
return http_connect_raw(
ipaddr, port, method, path, headers, query_string, ssl)
|
Helper function to create an HTTPConnection object. If ssl is set True,
HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection
will be used, which is buffered for backend Swift services.
:param ipaddr: IPv4 address to connect to
:param port: port to connect to
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
:param path: request path
:param headers: dictionary of headers
:param query_string: request query string
:param ssl: set True if SSL should be used (default: False)
:returns: HTTPConnection object
|
def http_connect_raw(ipaddr, port, method, path, headers=None,
query_string=None, ssl=False):
"""
Helper function to create an HTTPConnection object. If ssl is set True,
HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection
will be used, which is buffered for backend Swift services.
:param ipaddr: IPv4 address to connect to
:param port: port to connect to
:param method: HTTP method to request ('GET', 'PUT', 'POST', etc.)
:param path: request path
:param headers: dictionary of headers
:param query_string: request query string
:param ssl: set True if SSL should be used (default: False)
:returns: HTTPConnection object
"""
if not port:
port = 443 if ssl else 80
if ssl:
conn = HTTPSConnection('%s:%s' % (ipaddr, port))
else:
conn = BufferedHTTPConnection('%s:%s' % (ipaddr, port))
if query_string:
# Round trip to ensure proper quoting
if six.PY2:
query_string = urlencode(parse_qsl(
query_string, keep_blank_values=True))
else:
query_string = urlencode(
parse_qsl(query_string, keep_blank_values=True,
encoding='latin1'),
encoding='latin1')
path += '?' + query_string
conn.path = path
conn.putrequest(method, path, skip_host=(headers and 'Host' in headers))
if headers:
for header, value in headers.items():
conn.putheader(header, str(value))
conn.endheaders()
return conn
|
Parse SWIFT_CONF_FILE and reset module level global constraint attrs,
populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way.
|
def reload_constraints():
"""
Parse SWIFT_CONF_FILE and reset module level global constraint attrs,
populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way.
"""
global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS
SWIFT_CONSTRAINTS_LOADED = False
OVERRIDE_CONSTRAINTS = {}
constraints_conf = ConfigParser()
if constraints_conf.read(utils.SWIFT_CONF_FILE):
SWIFT_CONSTRAINTS_LOADED = True
for name, default in DEFAULT_CONSTRAINTS.items():
try:
value = constraints_conf.get('swift-constraints', name)
except NoOptionError:
pass
except NoSectionError:
# We are never going to find the section for another option
break
else:
if isinstance(default, int):
value = int(value) # Go ahead and let it error
elif isinstance(default, str):
pass # No translation needed, I guess
else:
# Hope we want a list!
value = utils.list_from_csv(value)
OVERRIDE_CONSTRAINTS[name] = value
for name, default in DEFAULT_CONSTRAINTS.items():
value = OVERRIDE_CONSTRAINTS.get(name, default)
EFFECTIVE_CONSTRAINTS[name] = value
# "globals" in this context is module level globals, always.
globals()[name.upper()] = value
|
Check metadata sent in the request headers. This should only check
that the metadata in the request given is valid. Checks against
account/container overall metadata should be forwarded on to its
respective server to be checked.
:param req: request object
:param target_type: str: one of: object, container, or account: indicates
which type the target storage for the metadata is
:returns: HTTPBadRequest with bad metadata otherwise None
|
def check_metadata(req, target_type):
"""
Check metadata sent in the request headers. This should only check
that the metadata in the request given is valid. Checks against
account/container overall metadata should be forwarded on to its
respective server to be checked.
:param req: request object
:param target_type: str: one of: object, container, or account: indicates
which type the target storage for the metadata is
:returns: HTTPBadRequest with bad metadata otherwise None
"""
target_type = target_type.lower()
prefix = 'x-%s-meta-' % target_type
meta_count = 0
meta_size = 0
for key, value in req.headers.items():
if (isinstance(value, six.string_types)
and len(value) > MAX_HEADER_SIZE):
return HTTPBadRequest(body=b'Header value too long: %s' %
wsgi_to_bytes(key[:MAX_META_NAME_LENGTH]),
request=req, content_type='text/plain')
if not key.lower().startswith(prefix):
continue
key = key[len(prefix):]
if not key:
return HTTPBadRequest(body='Metadata name cannot be empty',
request=req, content_type='text/plain')
bad_key = not check_utf8(wsgi_to_str(key))
bad_value = value and not check_utf8(wsgi_to_str(value))
if target_type in ('account', 'container') and (bad_key or bad_value):
return HTTPBadRequest(body='Metadata must be valid UTF-8',
request=req, content_type='text/plain')
meta_count += 1
meta_size += len(key) + len(value)
if len(key) > MAX_META_NAME_LENGTH:
return HTTPBadRequest(
body=wsgi_to_bytes('Metadata name too long: %s%s' % (
prefix, key)),
request=req, content_type='text/plain')
if len(value) > MAX_META_VALUE_LENGTH:
return HTTPBadRequest(
body=wsgi_to_bytes('Metadata value longer than %d: %s%s' % (
MAX_META_VALUE_LENGTH, prefix, key)),
request=req, content_type='text/plain')
if meta_count > MAX_META_COUNT:
return HTTPBadRequest(
body='Too many metadata items; max %d' % MAX_META_COUNT,
request=req, content_type='text/plain')
if meta_size > MAX_META_OVERALL_SIZE:
return HTTPBadRequest(
body='Total metadata too large; max %d'
% MAX_META_OVERALL_SIZE,
request=req, content_type='text/plain')
return None
|
Check to ensure that everything is alright about an object to be created.
:param req: HTTP request object
:param object_name: name of object to be created
:returns: HTTPRequestEntityTooLarge -- the object is too large
:returns: HTTPLengthRequired -- missing content-length header and not
a chunked request
:returns: HTTPBadRequest -- missing or bad content-type header, or
bad metadata
:returns: HTTPNotImplemented -- unsupported transfer-encoding header value
|
def check_object_creation(req, object_name):
"""
Check to ensure that everything is alright about an object to be created.
:param req: HTTP request object
:param object_name: name of object to be created
:returns: HTTPRequestEntityTooLarge -- the object is too large
:returns: HTTPLengthRequired -- missing content-length header and not
a chunked request
:returns: HTTPBadRequest -- missing or bad content-type header, or
bad metadata
:returns: HTTPNotImplemented -- unsupported transfer-encoding header value
"""
try:
ml = req.message_length()
except ValueError as e:
return HTTPBadRequest(request=req, content_type='text/plain',
body=str(e))
except AttributeError as e:
return HTTPNotImplemented(request=req, content_type='text/plain',
body=str(e))
if ml is not None and ml > MAX_FILE_SIZE:
return HTTPRequestEntityTooLarge(body='Your request is too large.',
request=req,
content_type='text/plain')
if req.content_length is None and \
req.headers.get('transfer-encoding') != 'chunked':
return HTTPLengthRequired(body='Missing Content-Length header.',
request=req,
content_type='text/plain')
if len(object_name) > MAX_OBJECT_NAME_LENGTH:
return HTTPBadRequest(body='Object name length of %d longer than %d' %
(len(object_name), MAX_OBJECT_NAME_LENGTH),
request=req, content_type='text/plain')
if 'Content-Type' not in req.headers:
return HTTPBadRequest(request=req, content_type='text/plain',
body=b'No content type')
try:
req = check_delete_headers(req)
except HTTPException as e:
return HTTPBadRequest(request=req, body=e.body,
content_type='text/plain')
if not check_utf8(wsgi_to_str(req.headers['Content-Type'])):
return HTTPBadRequest(request=req, body='Invalid Content-Type',
content_type='text/plain')
return check_metadata(req, 'object')
|
Verify that the path to the device is a directory and is a lesser
constraint that is enforced when a full mount_check isn't possible
with, for instance, a VM using loopback or partitions.
:param root: base path where the dir is
:param drive: drive name to be checked
:returns: full path to the device
:raises ValueError: if drive fails to validate
|
def check_dir(root, drive):
"""
Verify that the path to the device is a directory and is a lesser
constraint that is enforced when a full mount_check isn't possible
with, for instance, a VM using loopback or partitions.
:param root: base path where the dir is
:param drive: drive name to be checked
:returns: full path to the device
:raises ValueError: if drive fails to validate
"""
return check_drive(root, drive, False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.