repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
GNNDelete
|
GNNDelete-main/framework/models/graph_classification/gcn_delete.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ogb.graphproppred.mol_encoder import AtomEncoder
from torch_geometric.nn import GCNConv, MessagePassing, global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from torch_geometric.utils import degree
from torch_geometric.nn.inits import uniform
from torch_scatter import scatter_mean
from ..deletion import DeletionLayer
def remove_edges(edge_index, edge_attr=None, ratio=0.025):
row, col = edge_index
mask = row < col
row, col = row[mask], col[mask]
if edge_attr is not None:
edge_attr = edge_attr[mask]
num_edges = len(row)
num_remove = max(1, int(num_edges * ratio))
selected = torch.randperm(num_edges)[:num_edges - num_remove]
row = row[selected]
col = col[selected]
edge_attr = edge_attr[selected]
return torch.stack([row, col], dim=0), edge_attr
'''
Source: OGB github
https://github.com/snap-stanford/ogb/blob/master/examples/graphproppred/mol/main_pyg.py
'''
### GCN convolution along the graph structure
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super().__init__(aggr='add')
self.linear = nn.Linear(emb_dim, emb_dim)
self.root_emb = nn.Embedding(1, emb_dim)
self.bond_encoder = BondEncoder(emb_dim=emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.bond_encoder(edge_attr)
row, col = edge_index
#edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)
deg = degree(row, x.size(0), dtype=x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr=edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GNN to generate node embedding
class GNN_node_delete(nn.Module):
def __init__(self, num_layer, emb_dim, drop_ratio=0.5, JK="last", residual=False, mask_1hop=None, mask_2hop=None):
super().__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
self.atom_encoder = AtomEncoder(emb_dim)
###List of GNNs
self.deletes = nn.ModuleList([
DeletionLayer(emb_dim, None),
DeletionLayer(emb_dim, None)
])
self.convs = nn.ModuleList()
self.batch_norms = nn.ModuleList()
for layer in range(num_layer):
self.convs.append(GCNConv(emb_dim))
self.batch_norms.append(nn.BatchNorm1d(emb_dim))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
edge_index, edge_attr = remove_edges(edge_index, edge_attr)
### computing input node embedding
h_list = [self.atom_encoder(x)]
for layer in range(self.num_layer):
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.deletes[layer](h)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training=self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training=self.training)
if self.residual:
h += h_list[layer]
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer + 1):
node_representation += h_list[layer]
return node_representation
class GNN(torch.nn.Module):
def __init__(self, num_tasks, num_layer=2, emb_dim=300, virtual_node=True, residual=False, drop_ratio=0.5, JK="last", graph_pooling="mean"):
super().__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.graph_pooling = graph_pooling
### GNN to generate node embeddings
self.gnn_node = GNN_node_delete(num_layer, emb_dim, JK=JK, drop_ratio=drop_ratio, residual=residual)
### Pooling function to generate whole-graph embeddings
if self.graph_pooling == "sum":
self.pool = global_add_pool
elif self.graph_pooling == "mean":
self.pool = global_mean_pool
elif self.graph_pooling == "max":
self.pool = global_max_pool
elif self.graph_pooling == "attention":
self.pool = GlobalAttention(gate_nn=nn.Sequential(Linear(emb_dim, 2*emb_dim), nn.BatchNorm1d(2*emb_dim), nn.ReLU(), nn.Linear(2*emb_dim, 1)))
elif self.graph_pooling == "set2set":
self.pool = Set2Set(emb_dim, processing_steps = 2)
else:
raise ValueError("Invalid graph pooling type.")
if graph_pooling == "set2set":
self.graph_pred_linear = nn.Linear(2*self.emb_dim, self.num_tasks)
else:
self.graph_pred_linear = nn.Linear(self.emb_dim, self.num_tasks)
def forward(self, batched_data):
h_node = self.gnn_node(batched_data)
h_graph = self.pool(h_node, batched_data.batch)
return self.graph_pred_linear(h_graph)
| 5,857 | 34.719512 | 153 |
py
|
GNNDelete
|
GNNDelete-main/framework/models/graph_classification/gcn.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from ogb.graphproppred.mol_encoder import AtomEncoder
from torch_geometric.nn import GCNConv, MessagePassing, global_add_pool, global_mean_pool, global_max_pool, GlobalAttention, Set2Set
from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder
from torch_geometric.utils import degree
from torch_geometric.nn.inits import uniform
from torch_scatter import scatter_mean
'''
Source: OGB github
https://github.com/snap-stanford/ogb/blob/master/examples/graphproppred/mol/main_pyg.py
'''
### GCN convolution along the graph structure
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super().__init__(aggr='add')
self.linear = nn.Linear(emb_dim, emb_dim)
self.root_emb = nn.Embedding(1, emb_dim)
self.bond_encoder = BondEncoder(emb_dim=emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.bond_encoder(edge_attr)
row, col = edge_index
#edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)
deg = degree(row, x.size(0), dtype=x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr=edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GNN to generate node embedding
class GNN_node(nn.Module):
def __init__(self, num_layer, emb_dim, drop_ratio=0.5, JK="last", residual=False):
super().__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
self.atom_encoder = AtomEncoder(emb_dim)
###List of GNNs
self.convs = nn.ModuleList()
self.batch_norms = nn.ModuleList()
for layer in range(num_layer):
self.convs.append(GCNConv(emb_dim))
self.batch_norms.append(nn.BatchNorm1d(emb_dim))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### computing input node embedding
h_list = [self.atom_encoder(x)]
for layer in range(self.num_layer):
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training=self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training=self.training)
if self.residual:
h += h_list[layer]
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer + 1):
node_representation += h_list[layer]
return node_representation
class GNN(torch.nn.Module):
def __init__(self, num_tasks, num_layer=2, emb_dim=300, virtual_node=True, residual=False, drop_ratio=0.5, JK="last", graph_pooling="mean"):
super().__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
self.emb_dim = emb_dim
self.num_tasks = num_tasks
self.graph_pooling = graph_pooling
### GNN to generate node embeddings
self.gnn_node = GNN_node(num_layer, emb_dim, JK=JK, drop_ratio=drop_ratio, residual=residual)
### Pooling function to generate whole-graph embeddings
if self.graph_pooling == "sum":
self.pool = global_add_pool
elif self.graph_pooling == "mean":
self.pool = global_mean_pool
elif self.graph_pooling == "max":
self.pool = global_max_pool
elif self.graph_pooling == "attention":
self.pool = GlobalAttention(gate_nn=nn.Sequential(Linear(emb_dim, 2*emb_dim), nn.BatchNorm1d(2*emb_dim), nn.ReLU(), nn.Linear(2*emb_dim, 1)))
elif self.graph_pooling == "set2set":
self.pool = Set2Set(emb_dim, processing_steps = 2)
else:
raise ValueError("Invalid graph pooling type.")
if graph_pooling == "set2set":
self.graph_pred_linear = nn.Linear(2*self.emb_dim, self.num_tasks)
else:
self.graph_pred_linear = nn.Linear(self.emb_dim, self.num_tasks)
def forward(self, batched_data):
h_node = self.gnn_node(batched_data)
h_graph = self.pool(h_node, batched_data.batch)
return self.graph_pred_linear(h_graph)
| 5,019 | 35.642336 | 153 |
py
|
Researchers-Migrations
|
Researchers-Migrations-main/Researchers' migrations study/2 - Data ORCID/ORCID extraction/ORCiD_data_user_remover.py
|
#name of the output .csv file
filename = 'ORCID_2021_10_activities_2_extract'
new_filename = 'ORCID_2021_10_activities_2_extract_clean'
ids_to_remove = ['0000-0002-2103-7692']
import os, sys
import pandas as pd
#check we are in the right place
print(os.getcwd())
df = pd.read_csv(filename)
for id in ids_to_remove:
df = df[df.ORCiD != str(id)]
df.to_csv(new_filename)
| 378 | 18.947368 | 57 |
py
|
Researchers-Migrations
|
Researchers-Migrations-main/Researchers' migrations study/2 - Data ORCID/ORCID extraction/ORCiD_data_file_adder.py
|
#name of the output .csv file
dataset_name = 'ORCID_2021_10_activities_2_extract'
file_names = ['0000-0002-2103-7692_employments_3086073.xml']
ids = ['0000-0002-2103-7692']
flags = ['EMP']
new_dataset_name = 'ORCID_2021_10_activities_2_extract_final'
import os, sys
import pandas as pd
import copy
import xml.etree.ElementTree as ET
def get_affiliation_data(filepath, flag, id, has_published, phd):
tree = ET.parse(filepath)
root = tree.getroot()
profile_data = []
source = None
org = None
role = None
startdate = None
enddate = None
for child in root:
if child.tag == '{http://www.orcid.org/ns/common}source':
source = child
elif child.tag == '{http://www.orcid.org/ns/common}organization':
org = child
elif child.tag == '{http://www.orcid.org/ns/common}role-title':
role = child
elif child.tag == '{http://www.orcid.org/ns/common}start-date':
startdate = child
elif child.tag == '{http://www.orcid.org/ns/common}end-date':
enddate = child
#ORCiD
#try:
# orcid_id = str(source.find('{http://www.orcid.org/ns/common}source-orcid').find('{http://www.orcid.org/ns/common}path').text)
# row = [orcid_id]
#except:
# row=[None]
row = [str(id)]
#POSITION: PhD, Bcs, Associate Professor, Member of Technical Staff...
try:
row.append(str(role.text))
except:
row.append(None)
if not(org is None):
try:
row.append(str(org.find('{http://www.orcid.org/ns/common}name').text))
except:
row.append(None)
#country of affiliation
try:
row.append(str(org.find('{http://www.orcid.org/ns/common}address').find('{http://www.orcid.org/ns/common}country').text))
except:
row.append(None)
#region of affiliation
try:
row.append(str(org.find('{http://www.orcid.org/ns/common}address').find('{http://www.orcid.org/ns/common}region').text))
except:
row.append(None)
#city of affiliation
try:
row.append(str(org.find('{http://www.orcid.org/ns/common}address').find('{http://www.orcid.org/ns/common}city').text))
except:
row.append(None)
else:
for _ in range(4):
row.append(None)
if not(startdate is None):
#starting year
try:
row.append(str(startdate.find('{http://www.orcid.org/ns/common}year').text))
except:
row.append(None)
#starting month
try:
row.append(str(startdate.find('{http://www.orcid.org/ns/common}month').text))
except:
row.append(None)
else:
row.append(None)
row.append(None)
if not(enddate is None):
#ending year
try:
row.append(str(enddate.find('{http://www.orcid.org/ns/common}year').text))
except:
row.append(None)
#ending month
try:
row.append(str(enddate.find('{http://www.orcid.org/ns/common}month').text))
except:
row.append(None)
else:
row.append(None)
row.append(None)
#affiliation_type
row.append(str(flag))
row.append(str(has_published))
row.append(phd)
return row
#check we are in the right place
print(os.getcwd())
df = pd.read_csv(dataset_name)
header = ['ORCiD', 'role_title', 'org_name', 'country', 'region', 'city', 'start_year', 'start_month',
'end_year', 'end_month', 'aff_type', 'has_published', 'is_phd']
for i in range(len(ids)):
for index in range(df.shape[0]):
sys.stdout.write('\r' + str((index + 1) * 100 / df.shape[0]) + '%')
if ids[i] == df.ORCiD[index]:
newrow = get_affiliation_data(file_names[i], flags[i], ids[i], df.has_published[index], df.is_phd[index])
new_row = pd.DataFrame(columns=header)
new_row[0] = newrow
df = pd.concat([df.loc[0:index], copy.deepcopy(new_row), df.loc[index:]], ignore_index=True)
break
df.to_csv(new_dataset_name)
| 4,166 | 28.553191 | 134 |
py
|
Researchers-Migrations
|
Researchers-Migrations-main/Researchers' migrations study/2 - Data ORCID/ORCID extraction/ORCiD_data_extractor_final.py
|
#folder which contains all folders for groups of IDs
data_dir = 'ORCID_2021_10_activities_2'
#name of the output .csv file
filename = 'ORCID_2021_10_activities_2_extract'
import os, sys
import pandas as pd
import xml.etree.ElementTree as ET
#check we are in the right place
print(os.getcwd())
useful_directories = ['educations', 'employments', 'services']
def get_person_data(data, orcid_code_dir, id):
has_published = False
for _, pub_finder in enumerate(os.listdir(orcid_code_dir)):
if pub_finder == 'works':
has_published = True
for n_dir, i in enumerate(os.listdir(orcid_code_dir)):
if i in useful_directories:
orcid_indir_dir = os.path.join(orcid_code_dir, i)
if i == 'educations':
flag = 'EDU'
elif i == 'employments':
flag = 'EMP'
else:# i == 'services'
flag = 'SER'
for n, j in enumerate(os.listdir(orcid_indir_dir)):
filepath = os.path.join(orcid_indir_dir, j)
if filepath.endswith(".xml"):
try:
tree = ET.parse(filepath)
except:
print('\n' + "####")
print("Check " + str(filepath) + " manually")
print("####")
else:
root = tree.getroot()
data.append(get_affiliation_data(root, flag, id, has_published))
#row: ORCiD, role_title, org_name, country, region, city, start year, start month, end year, end mont
def get_affiliation_data(root, flag, id, has_published):
profile_data = []
source = None
org = None
role = None
startdate = None
enddate = None
for child in root:
if child.tag == '{http://www.orcid.org/ns/common}source':
source = child
elif child.tag == '{http://www.orcid.org/ns/common}organization':
org = child
elif child.tag == '{http://www.orcid.org/ns/common}role-title':
role = child
elif child.tag == '{http://www.orcid.org/ns/common}start-date':
startdate = child
elif child.tag == '{http://www.orcid.org/ns/common}end-date':
enddate = child
#ORCiD
#try:
# orcid_id = str(source.find('{http://www.orcid.org/ns/common}source-orcid').find('{http://www.orcid.org/ns/common}path').text)
# row = [orcid_id]
#except:
# row=[None]
row = [str(id)]
#POSITION: PhD, Bcs, Associate Professor, Member of Technical Staff...
try:
row.append(str(role.text))
except:
row.append(None)
if not(org is None):
try:
row.append(str(org.find('{http://www.orcid.org/ns/common}name').text))
except:
row.append(None)
#country of affiliation
try:
row.append(str(org.find('{http://www.orcid.org/ns/common}address').find('{http://www.orcid.org/ns/common}country').text))
except:
row.append(None)
#region of affiliation
try:
row.append(str(org.find('{http://www.orcid.org/ns/common}address').find('{http://www.orcid.org/ns/common}region').text))
except:
row.append(None)
#city of affiliation
try:
row.append(str(org.find('{http://www.orcid.org/ns/common}address').find('{http://www.orcid.org/ns/common}city').text))
except:
row.append(None)
else:
for _ in range(4):
row.append(None)
if not(startdate is None):
#starting year
try:
row.append(str(startdate.find('{http://www.orcid.org/ns/common}year').text))
except:
row.append(None)
#starting month
try:
row.append(str(startdate.find('{http://www.orcid.org/ns/common}month').text))
except:
row.append(None)
else:
row.append(None)
row.append(None)
if not(enddate is None):
#ending year
try:
row.append(str(enddate.find('{http://www.orcid.org/ns/common}year').text))
except:
row.append(None)
#ending month
try:
row.append(str(enddate.find('{http://www.orcid.org/ns/common}month').text))
except:
row.append(None)
else:
row.append(None)
row.append(None)
#affiliation_type
row.append(str(flag))
row.append(str(has_published))
return row
def run_through_people(data, dataset_directory):
n_tot = 0
for _, _ in enumerate(os.listdir(dataset_directory)):
n_tot+=1
for n, i in enumerate(os.listdir(dataset_directory)):
block_directory = os.path.join(dataset_directory, i)
for _, id_dir in enumerate(os.listdir(block_directory)):
orcid_code_dir = os.path.join(block_directory, id_dir)
get_person_data(data, orcid_code_dir, id_dir)
sys.stdout.flush()
sys.stdout.write('\r' + str((n+1)*100/n_tot) + '%')
data = []
run_through_people(data, data_dir)
header = ['ORCiD', 'role_title', 'org_name', 'country', 'region', 'city', 'start_year', 'start_month', 'end_year', 'end_month', 'aff_type', 'has_published']
df = pd.DataFrame(data, columns = header)
def is_phd(role):
''' After lowercasing the affiliation_role string, look for
terms that indicate it is a Ph.D. degree.
Yes, regular expressions are tidier, but also unreadable
to most people and harder to debug. '''
# These are international synonyms for the Ph.D. degree
synonyms = ("phd", "ph.d", "dphil", "d.phil", "rer. nat",
"rer, nat", "doctor rerum", "doktor rerum")
# This catches things like "Doctorate en Chimie" but
# excludes "Postdoctoral Fellow" and "Medical Doctorate"
special_cases_allowed = ("doctor", "doktor")
special_cases_disallowed = ("pre", "post", "med")
if type(role) == str:
# lowercase the string
role = role.lower()
# Look for Ph.D. synonyms
if any([(i in role) for i in synonyms]):
return True
# Look for special cases
if any([(i in role) for i in special_cases_allowed]) and \
not any([(i in role) for i in special_cases_disallowed]):
return True
# Otherwise call it False
return False
df["is_phd"] = df.role_title.apply(is_phd)
df.to_csv(filename)
| 6,472 | 31.527638 | 156 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/inputmethods/fcitx5/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p nix-update nix-prefetch-github python3Packages.requests
from nix_prefetch_github import *
import requests
import subprocess
REPOS = [
"libime",
"xcb-imdkit",
"fcitx5",
"fcitx5-anthy",
"fcitx5-chewing",
"fcitx5-chinese-addons",
"fcitx5-configtool",
"fcitx5-gtk",
"fcitx5-hangul",
"fcitx5-lua",
"fcitx5-m17n",
"fcitx5-qt",
"fcitx5-rime",
"fcitx5-skk",
"fcitx5-table-extra",
"fcitx5-table-other",
"fcitx5-unikey"
]
OWNER = "fcitx"
def get_latest_tag(repo, owner=OWNER):
r = requests.get('https://api.github.com/repos/{}/{}/tags'.format(owner,repo))
return r.json()[0].get("name")
def main():
for repo in REPOS:
rev = get_latest_tag(repo)
if repo == "fcitx5-qt":
subprocess.run(["nix-update", "--commit", "--version", rev, "libsForQt5.{}".format(repo)])
else:
subprocess.run(["nix-update", "--commit", "--version", rev, repo])
if __name__ == "__main__":
main ()
| 1,124 | 24 | 102 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/security/enpass/update_script.py
|
from __future__ import print_function
import argparse
import bz2
import email
import json
import logging
from itertools import product
from operator import itemgetter
import attr
import pkg_resources
from pathlib2 import Path
from requests import Session
from six.moves.urllib_parse import urljoin
@attr.s
class ReleaseElement(object):
sha256 = attr.ib(repr=False)
size = attr.ib(convert=int)
path = attr.ib()
log = logging.getLogger('enpass.updater')
parser = argparse.ArgumentParser()
parser.add_argument('--repo')
parser.add_argument('--target', type=Path)
session = Session()
def parse_bz2_msg(msg):
msg = bz2.decompress(msg)
if '\n\n' in msg:
parts = msg.split('\n\n')
return list(map(email.message_from_string, parts))
return email.message_from_string(msg)
def fetch_meta(repo, name, parse=email.message_from_string, split=False):
url = urljoin(repo, 'dists/stable', name)
response = session.get("{repo}/dists/stable/{name}".format(**locals()))
return parse(response.content)
def fetch_filehashes(repo, path):
meta = fetch_meta(repo, path, parse=parse_bz2_msg)
for item in meta:
yield {
'version': pkg_resources.parse_version(str(item['Version'])),
'path': item['Filename'],
'sha256': item['sha256'],
}
def fetch_archs(repo):
m = fetch_meta(repo, 'Release')
architectures = m['Architectures'].split()
elements = [ReleaseElement(*x.split()) for x in m['SHA256'].splitlines()]
elements = [x for x in elements if x.path.endswith('bz2')]
for arch, elem in product(architectures, elements):
if arch in elem.path:
yield arch, max(fetch_filehashes(repo, elem.path),
key=itemgetter('version'))
class OurVersionEncoder(json.JSONEncoder):
def default(self, obj):
# the other way around to avoid issues with
# newer setuptools having strict/legacy versions
if not isinstance(obj, (dict, str)):
return str(obj)
return json.JSONEncoder.default(self, obj)
def main(repo, target):
logging.basicConfig(level=logging.DEBUG)
with target.open(mode='wb') as fp:
json.dump(
dict(fetch_archs(repo)), fp,
cls=OurVersionEncoder,
indent=2,
sort_keys=True)
opts = parser.parse_args()
main(opts.repo, opts.target)
| 2,413 | 24.145833 | 77 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/X11/xborders/setup.py
|
from setuptools import setup
setup(
name='@pname@',
version='@version@',
author='deter0',
description='@desc@',
install_requires=['pycairo', 'requests', 'PyGObject'],
scripts=[
'xborders',
],
)
| 231 | 16.846154 | 58 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/X11/xkeysnail/browser-emacs-bindings.py
|
# -*- coding: utf-8 -*-
import re
from xkeysnail.transform import *
aa = False
def aa_setvar(v):
def _aa_setvar():
transform._mark_set = False
global aa; aa = v
return _aa_setvar
def aa_ifvar():
def _aa_ifvar():
transform._mark_set = False
global aa
if aa: aa = False; return K("esc")
return K("enter")
return _aa_ifvar
def aa_flipmark():
def _aa_flipmark():
transform._mark_set = not transform._mark_set;
return _aa_flipmark
define_keymap(re.compile("Google-chrome|Chromium-browser|firefox"), {
K("C-b"): with_mark(K("left")),
K("C-f"): with_mark(K("right")),
K("C-p"): with_mark(K("up")),
K("C-n"): with_mark(K("down")),
K("M-b"): with_mark(K("C-left")),
K("M-f"): with_mark(K("C-right")),
K("C-a"): with_mark(K("home")),
K("C-e"): with_mark(K("end")),
K("C-w"): [K("C-x"), set_mark(False)],
K("M-w"): [K("C-c"), K("right"), set_mark(False)],
K("C-y"): [K("C-v"), set_mark(False)],
K("C-k"): [K("Shift-end"), K("C-x"), set_mark(False)],
K("C-d"): [K("delete"), set_mark(False)],
K("M-d"): [K("C-delete"), set_mark(False)],
K("M-backspace"): [K("C-backspace"), set_mark(False)],
K("C-slash"): [K("C-z"), set_mark(False)],
K("C-space"): aa_flipmark(),
K("C-M-space"): with_or_set_mark(K("C-right")),
# K("C-s"): K("F3"),
# K("C-r"): K("Shift-F3"),
# K("C-g"): [K("esc"), set_mark(False)]
K("C-s"): [K("F3"), aa_setvar(True)],
K("C-r"): [K("Shift-F3"), aa_setvar(True)],
K("C-g"): [K("esc"), aa_setvar(False)],
K("enter"): aa_ifvar()
})
| 1,622 | 29.055556 | 69 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/misc/btdu/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell -i python -p python39Packages.requests
import requests
import subprocess
pkgbuild = requests.get('https://aur.archlinux.org/cgit/aur.git/plain/PKGBUILD?h=btdu').text
def grabDepVersions(depDict, pkgbuild=pkgbuild):
for line in pkgbuild.split('\n'):
if depDict["string"] in line:
start = len(depDict["string"]) + 1
depDict["version"] = line[start:]
break
def grabDepHashes(key,pkgbuild=pkgbuild):
start = pkgbuild.find(key) + len(key)
end = start+64
hashes = []
for i in range(5):
hashes.append(pkgbuild[start:end])
start = pkgbuild.find("'",end+1) + 1
end = start+64
return hashes
def findLine(key,derivation):
count = 0
lines = []
for line in derivation:
if key in line:
lines.append(count)
count += 1
return lines
def updateVersions(btdu,ae,btrfs,ncurses,containers,derivation):
key = "let"
line = findLine(key,derivation)[0] + 1
derivation[line+0] = f' _d_ae_ver = "{ae["version"]}";\n'
derivation[line+1] = f' _d_btrfs_ver = "{btrfs["version"]}";\n'
derivation[line+2] = f' _d_ncurses_ver = "{ncurses["version"]}";\n'
derivation[line+3] = f' _d_emsi_containers_ver = "{containers["version"]}";\n'
key = "version = "
line = findLine(key,derivation)[0]
derivation[line] = f' version = "{btdu["version"]}";\n'
return derivation
def updateHashes(btdu,ae,btrfs,ncurses,containers,derivation):
key = "sha256 = "
hashLines = findLine(key,derivation)
for i in range(len(hashes)):
derivation[hashLines[i]] = f' sha256 = "{hashes[i]}";\n'
return derivation
if __name__ == "__main__":
btdu = {"string": "pkgver"}
ae = {"string": "_d_ae_ver"}
btrfs = {"string": "_d_btrfs_ver"}
ncurses = {"string": "_d_ncurses_ver"}
containers = {"string": "_d_emsi_containers_ver"}
grabDepVersions(btdu)
grabDepVersions(ae)
grabDepVersions(btrfs)
grabDepVersions(ncurses)
grabDepVersions(containers)
hashes = grabDepHashes("sha256sums=('")
nixpkgs = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode("utf-8").strip('\n')
btduFolder = "/pkgs/tools/misc/btdu/"
with open(nixpkgs + btduFolder + "default.nix", 'r') as arq:
derivation = arq.readlines()
derivation = updateVersions(btdu,ae,btrfs,ncurses,containers,derivation)
derivation = updateHashes(btdu,ae,btrfs,ncurses,containers,derivation)
with open(nixpkgs + btduFolder + "default.nix", 'w') as arq:
arq.writelines(derivation)
| 2,703 | 31.578313 | 106 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/admin/google-cloud-sdk/beta__init__.py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auth for the Google Cloud SDK.
"""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class Beta(base.Group):
"""Beta versions of gcloud commands."""
| 788 | 31.875 | 74 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/admin/google-cloud-sdk/alpha__init__.py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auth for the Google Cloud SDK.
"""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Alpha(base.Group):
"""Alpha versions of gcloud commands."""
| 791 | 32 | 74 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/tests/test_html.py
|
import nixos_render_docs as nrd
import pytest
import textwrap
from sample_md import sample1
class Renderer(nrd.html.HTMLRenderer):
def _pull_image(self, src: str) -> str:
return src
class Converter(nrd.md.Converter[nrd.html.HTMLRenderer]):
def __init__(self, manpage_urls: dict[str, str], xrefs: dict[str, nrd.manual_structure.XrefTarget]):
super().__init__()
self._renderer = Renderer(manpage_urls, xrefs)
def unpretty(s: str) -> str:
return "".join(map(str.strip, s.splitlines())).replace('␣', ' ').replace('↵', '\n')
def test_lists_styles() -> None:
# nested lists rotate through a number of list style
c = Converter({}, {})
assert c._render("- - - - foo") == unpretty("""
<div class="itemizedlist"><ul class="itemizedlist compact" style="list-style-type: disc;">
<li class="listitem">
<div class="itemizedlist"><ul class="itemizedlist compact" style="list-style-type: circle;">
<li class="listitem">
<div class="itemizedlist"><ul class="itemizedlist compact" style="list-style-type: square;">
<li class="listitem">
<div class="itemizedlist"><ul class="itemizedlist compact" style="list-style-type: disc;">
<li class="listitem"><p>foo</p></li>
</ul></div>
</li>
</ul></div>
</li>
</ul></div>
</li>
</ul></div>
""")
assert c._render("1. 1. 1. 1. 1. 1. foo") == unpretty("""
<div class="orderedlist"><ol class="orderedlist compact" type="1">
<li class="listitem">
<div class="orderedlist"><ol class="orderedlist compact" type="a">
<li class="listitem">
<div class="orderedlist"><ol class="orderedlist compact" type="i">
<li class="listitem">
<div class="orderedlist"><ol class="orderedlist compact" type="A">
<li class="listitem">
<div class="orderedlist"><ol class="orderedlist compact" type="I">
<li class="listitem">
<div class="orderedlist"><ol class="orderedlist compact" type="1">
<li class="listitem"><p>foo</p></li>
</ol></div>
</li>
</ol></div>
</li>
</ol></div>
</li>
</ol></div>
</li>
</ol></div>
</li>
</ol></div>
""")
def test_xrefs() -> None:
# nested lists rotate through a number of list style
c = Converter({}, {
'foo': nrd.manual_structure.XrefTarget('foo', '<hr/>', 'toc1', 'title1', 'index.html'),
'bar': nrd.manual_structure.XrefTarget('bar', '<br/>', 'toc2', 'title2', 'index.html', True),
})
assert c._render("[](#foo)") == '<p><a class="xref" href="index.html#foo" title="title1" ><hr/></a></p>'
assert c._render("[](#bar)") == '<p><a class="xref" href="index.html" title="title2" ><br/></a></p>'
with pytest.raises(nrd.html.UnresolvedXrefError) as exc:
c._render("[](#baz)")
assert exc.value.args[0] == 'bad local reference, id #baz not known'
def test_images() -> None:
c = Converter({}, {})
assert c._render("") == unpretty("""
<p>
<div class="mediaobject">
<img src="foo" alt="*alt text*" title="title text" />
</div>
</p>
""")
def test_tables() -> None:
c = Converter({}, {})
assert c._render(textwrap.dedent("""
| d | l | m | r |
|---|:--|:-:|--:|
| a | b | c | d |
""")) == unpretty("""
<div class="informaltable">
<table class="informaltable" border="1">
<colgroup>
<col align="left" />
<col align="left" />
<col align="center" />
<col align="right" />
</colgroup>
<thead>
<tr>
<th align="left">d</th>
<th align="left">l</th>
<th align="center">m</th>
<th align="right">r</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">a</td>
<td align="left">b</td>
<td align="center">c</td>
<td align="right">d</td>
</tr>
</tbody>
</table>
</div>
""")
def test_footnotes() -> None:
c = Converter({}, {
"bar": nrd.manual_structure.XrefTarget("bar", "", None, None, ""),
"bar.__back.0": nrd.manual_structure.XrefTarget("bar.__back.0", "", None, None, ""),
"bar.__back.1": nrd.manual_structure.XrefTarget("bar.__back.1", "", None, None, ""),
})
assert c._render(textwrap.dedent("""
foo [^bar] baz [^bar]
[^bar]: note
""")) == unpretty("""
<p>
foo <a href="#bar" class="footnote" id="bar.__back.0"><sup class="footnote">[1]</sup></a>␣
baz <a href="#bar" class="footnote" id="bar.__back.1"><sup class="footnote">[1]</sup></a>
</p>
<div class="footnotes">
<br />
<hr style="width:100; text-align:left;margin-left: 0" />
<div id="bar" class="footnote">
<p>
note<a href="#bar.__back.0" class="para"><sup class="para">[1]</sup></a>
<a href="#bar.__back.1" class="para"><sup class="para">[1]</sup></a>
</p>
</div>
</div>
""")
def test_full() -> None:
c = Converter({ 'man(1)': 'http://example.org' }, {})
assert c._render(sample1) == unpretty("""
<div class="warning">
<h3 class="title">Warning</h3>
<p>foo</p>
<div class="note">
<h3 class="title">Note</h3>
<p>nested</p>
</div>
</div>
<p>
<a class="link" href="link" target="_top">↵
multiline↵
</a>
</p>
<p>
<a class="link" href="http://example.org" target="_top">
<span class="citerefentry"><span class="refentrytitle">man</span>(1)</span>
</a> reference
</p>
<p><a id="b" />some <a id="a" />nested anchors</p>
<p>
<span class="emphasis"><em>emph</em></span>␣
<span class="strong"><strong>strong</strong></span>␣
<span class="emphasis"><em>nesting emph <span class="strong"><strong>and strong</strong></span>␣
and <code class="literal">code</code></em></span>
</p>
<div class="itemizedlist">
<ul class="itemizedlist " style="list-style-type: disc;">
<li class="listitem"><p>wide bullet</p></li>
<li class="listitem"><p>list</p></li>
</ul>
</div>
<div class="orderedlist">
<ol class="orderedlist " type="1">
<li class="listitem"><p>wide ordered</p></li>
<li class="listitem"><p>list</p></li>
</ol>
</div>
<div class="itemizedlist">
<ul class="itemizedlist compact" style="list-style-type: disc;">
<li class="listitem"><p>narrow bullet</p></li>
<li class="listitem"><p>list</p></li>
</ul>
</div>
<div class="orderedlist">
<ol class="orderedlist compact" type="1">
<li class="listitem"><p>narrow ordered</p></li>
<li class="listitem"><p>list</p></li>
</ol>
</div>
<div class="blockquote">
<blockquote class="blockquote">
<p>quotes</p>
<div class="blockquote">
<blockquote class="blockquote">
<p>with <span class="emphasis"><em>nesting</em></span></p>
<pre class="programlisting">↵
nested code block↵
</pre>
</blockquote>
</div>
<div class="itemizedlist">
<ul class="itemizedlist compact" style="list-style-type: disc;">
<li class="listitem"><p>and lists</p></li>
<li class="listitem">
<pre class="programlisting">↵
containing code↵
</pre>
</li>
</ul>
</div>
<p>and more quote</p>
</blockquote>
</div>
<div class="orderedlist">
<ol class="orderedlist compact" start="100" type="1">
<li class="listitem"><p>list starting at 100</p></li>
<li class="listitem"><p>goes on</p></li>
</ol>
</div>
<div class="variablelist">
<dl class="variablelist">
<dt><span class="term">deflist</span></dt>
<dd>
<div class="blockquote">
<blockquote class="blockquote">
<p>
with a quote↵
and stuff
</p>
</blockquote>
</div>
<pre class="programlisting">↵
code block↵
</pre>
<pre class="programlisting">↵
fenced block↵
</pre>
<p>text</p>
</dd>
<dt><span class="term">more stuff in same deflist</span></dt>
<dd>
<p>foo</p>
</dd>
</dl>
</div>""")
| 8,956 | 33.85214 | 108 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/tests/test_lists.py
|
import nixos_render_docs as nrd
import pytest
from markdown_it.token import Token
class Converter(nrd.md.Converter[nrd.docbook.DocBookRenderer]):
# actual renderer doesn't matter, we're just parsing.
def __init__(self, manpage_urls: dict[str, str]) -> None:
super().__init__()
self._renderer = nrd.docbook.DocBookRenderer(manpage_urls)
@pytest.mark.parametrize("ordered", [True, False])
def test_list_wide(ordered: bool) -> None:
t, tag, m, e1, e2, i1, i2 = (
("ordered", "ol", ".", "1.", "2.", "1", "2") if ordered else ("bullet", "ul", "-", "-", "-", "", "")
)
c = Converter({})
meta = { 'end': int(e2[:-1]) } if ordered else {}
meta['compact'] = False
assert c._parse(f"{e1} a\n\n{e2} b") == [
Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={}, map=[0, 3], level=0,
children=None, content='', markup=m, info='', meta=meta, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 2], level=1, children=None,
content='', markup=m, info=i1, meta={}, block=True, hidden=False),
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=3,
content='a', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='a', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[2, 3], level=1, children=None,
content='', markup=m, info=i2, meta={}, block=True, hidden=False),
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[2, 3], level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[2, 3], level=3,
content='b', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='b', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False),
Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False)
]
@pytest.mark.parametrize("ordered", [True, False])
def test_list_narrow(ordered: bool) -> None:
t, tag, m, e1, e2, i1, i2 = (
("ordered", "ol", ".", "1.", "2.", "1", "2") if ordered else ("bullet", "ul", "-", "-", "-", "", "")
)
c = Converter({})
meta = { 'end': int(e2[:-1]) } if ordered else {}
meta['compact'] = True
assert c._parse(f"{e1} a\n{e2} b") == [
Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={}, map=[0, 2], level=0,
children=None, content='', markup=m, info='', meta=meta, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=1, children=None,
content='', markup=m, info=i1, meta={}, block=True, hidden=False),
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=3,
content='a', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='a', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=1, children=None,
content='', markup=m, info=i2, meta={}, block=True, hidden=False),
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[1, 2], level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=3,
content='b', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='b', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False),
Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False)
]
assert c._parse(f"{e1} - a\n{e2} b") == [
Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={}, map=[0, 2], level=0,
children=None, content='', markup=m, info='', meta=meta, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=1, children=None,
content='', markup=m, info=i1, meta={}, block=True, hidden=False),
Token(type='bullet_list_open', tag='ul', nesting=1, attrs={}, map=[0, 1], level=2,
children=None, content='', markup='-', info='', meta={'compact': True}, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=3, children=None,
content='', markup='-', info='', meta={}, block=True, hidden=False),
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=4, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=5,
content='a', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='a', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=4, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=3, children=None,
content='', markup='-', info='', meta={}, block=True, hidden=False),
Token(type='bullet_list_close', tag='ul', nesting=-1, attrs={}, map=None, level=2, children=None,
content='', markup='-', info='', meta={}, block=True, hidden=False),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=1, children=None,
content='', markup=m, info=i2, meta={}, block=True, hidden=False),
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[1, 2], level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=3,
content='b', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='b', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=2, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False),
Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False)
]
assert c._parse(f"{e1} - a\n{e2} - b") == [
Token(type=f'{t}_list_open', tag=tag, nesting=1, attrs={}, map=[0, 2], level=0,
children=None, content='', markup=m, info='', meta=meta, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=1, children=None,
content='', markup=m, info=i1, meta={}, block=True, hidden=False),
Token(type='bullet_list_open', tag='ul', nesting=1, attrs={}, map=[0, 1], level=2,
children=None, content='', markup='-', info='', meta={'compact': True}, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[0, 1], level=3, children=None,
content='', markup='-', info='', meta={}, block=True, hidden=False),
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=4, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=5,
content='a', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='a', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=4, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=3, children=None,
content='', markup='-', info='', meta={}, block=True, hidden=False),
Token(type='bullet_list_close', tag='ul', nesting=-1, attrs={}, map=None, level=2, children=None,
content='', markup='-', info='', meta={}, block=True, hidden=False),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=1, children=None,
content='', markup=m, info=i2, meta={}, block=True, hidden=False),
Token(type='bullet_list_open', tag='ul', nesting=1, attrs={}, map=[1, 2], level=2,
children=None, content='', markup='-', info='', meta={'compact': True}, block=True, hidden=False),
Token(type='list_item_open', tag='li', nesting=1, attrs={}, map=[1, 2], level=3, children=None,
content='', markup='-', info='', meta={}, block=True, hidden=False),
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[1, 2], level=4, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=5,
content='b', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='b', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=4, children=None,
content='', markup='', info='', meta={}, block=True, hidden=True),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=3, children=None,
content='', markup='-', info='', meta={}, block=True, hidden=False),
Token(type='bullet_list_close', tag='ul', nesting=-1, attrs={}, map=None, level=2, children=None,
content='', markup='-', info='', meta={}, block=True, hidden=False),
Token(type='list_item_close', tag='li', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False),
Token(type=f'{t}_list_close', tag=tag, nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup=m, info='', meta={}, block=True, hidden=False)
]
| 14,128 | 73.756614 | 112 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/tests/test_manpage.py
|
import nixos_render_docs as nrd
from sample_md import sample1
from typing import Mapping
class Converter(nrd.md.Converter[nrd.manpage.ManpageRenderer]):
def __init__(self, manpage_urls: Mapping[str, str], options_by_id: dict[str, str] = {}):
super().__init__()
self._renderer = nrd.manpage.ManpageRenderer(manpage_urls, options_by_id)
def test_inline_code() -> None:
c = Converter({})
assert c._render("1 `x a x` 2") == "1 \\fR\\(oqx a x\\(cq\\fP 2"
def test_fonts() -> None:
c = Converter({})
assert c._render("*a **b** c*") == "\\fIa \\fBb\\fI c\\fR"
assert c._render("*a [1 `2`](3) c*") == "\\fIa \\fB1 \\fR\\(oq2\\(cq\\fP\\fI c\\fR"
def test_expand_link_targets() -> None:
c = Converter({}, { '#foo1': "bar", "#foo2": "bar" })
assert (c._render("[a](#foo1) [](#foo2) [b](#bar1) [](#bar2)") ==
"\\fBa\\fR \\fBbar\\fR \\fBb\\fR \\fB\\fR")
def test_collect_links() -> None:
c = Converter({}, { '#foo': "bar" })
c._renderer.link_footnotes = []
assert c._render("[a](link1) [b](link2)") == "\\fBa\\fR[1]\\fR \\fBb\\fR[2]\\fR"
assert c._renderer.link_footnotes == ['link1', 'link2']
def test_dedup_links() -> None:
c = Converter({}, { '#foo': "bar" })
c._renderer.link_footnotes = []
assert c._render("[a](link) [b](link)") == "\\fBa\\fR[1]\\fR \\fBb\\fR[1]\\fR"
assert c._renderer.link_footnotes == ['link']
def test_full() -> None:
c = Converter({ 'man(1)': 'http://example.org' })
assert c._render(sample1) == """\
.sp
.RS 4
\\fBWarning\\fP
.br
foo
.sp
.RS 4
\\fBNote\\fP
.br
nested
.RE
.RE
.sp
\\fBmultiline\\fR
.sp
\\fBman\\fP\\fR(1)\\fP reference
.sp
some nested anchors
.sp
\\fIemph\\fR \\fBstrong\\fR \\fInesting emph \\fBand strong\\fI and \\fR\\(oqcode\\(cq\\fP\\fR
.sp
.RS 4
\\h'-2'\\fB\\[u2022]\\fP\\h'1'\\c
wide bullet
.RE
.sp
.RS 4
\\h'-2'\\fB\\[u2022]\\fP\\h'1'\\c
list
.RE
.sp
.RS 4
\\h'-3'\\fB1\\&.\\fP\\h'1'\\c
wide ordered
.RE
.sp
.RS 4
\\h'-3'\\fB2\\&.\\fP\\h'1'\\c
list
.RE
.sp
.RS 4
\\h'-2'\\fB\\[u2022]\\fP\\h'1'\\c
narrow bullet
.RE
.RS 4
\\h'-2'\\fB\\[u2022]\\fP\\h'1'\\c
list
.RE
.sp
.RS 4
\\h'-3'\\fB1\\&.\\fP\\h'1'\\c
narrow ordered
.RE
.RS 4
\\h'-3'\\fB2\\&.\\fP\\h'1'\\c
list
.RE
.sp
.RS 4
\\h'-3'\\fI\\(lq\\(rq\\fP\\h'1'\\c
quotes
.sp
.RS 4
\\h'-3'\\fI\\(lq\\(rq\\fP\\h'1'\\c
with \\fInesting\\fR
.sp
.RS 4
.nf
nested code block
.fi
.RE
.RE
.sp
.RS 4
\\h'-2'\\fB\\[u2022]\\fP\\h'1'\\c
and lists
.RE
.RS 4
\\h'-2'\\fB\\[u2022]\\fP\\h'1'\\c
.sp
.RS 4
.nf
containing code
.fi
.RE
.RE
.sp
and more quote
.RE
.sp
.RS 6
\\h'-5'\\fB100\\&.\\fP\\h'1'\\c
list starting at 100
.RE
.RS 6
\\h'-5'\\fB101\\&.\\fP\\h'1'\\c
goes on
.RE
.RS 4
.PP
deflist
.RS 4
.RS 4
\\h'-3'\\fI\\(lq\\(rq\\fP\\h'1'\\c
with a quote and stuff
.RE
.sp
.RS 4
.nf
code block
.fi
.RE
.sp
.RS 4
.nf
fenced block
.fi
.RE
.sp
text
.RE
.PP
more stuff in same deflist
.RS 4
foo
.RE
.RE"""
| 2,891 | 16.011765 | 94 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/tests/test_commonmark.py
|
import nixos_render_docs as nrd
from sample_md import sample1
from typing import Mapping
class Converter(nrd.md.Converter[nrd.commonmark.CommonMarkRenderer]):
def __init__(self, manpage_urls: Mapping[str, str]):
super().__init__()
self._renderer = nrd.commonmark.CommonMarkRenderer(manpage_urls)
# NOTE: in these tests we represent trailing spaces by ` ` and replace them with real space later,
# since a number of editors will strip trailing whitespace on save and that would break the tests.
def test_indented_fence() -> None:
c = Converter({})
s = """\
> - ```foo
> thing
>
> rest
> ```\
""".replace(' ', ' ')
assert c._render(s) == s
def test_full() -> None:
c = Converter({ 'man(1)': 'http://example.org' })
assert c._render(sample1) == """\
**Warning:** foo
**Note:** nested
[
multiline
](link)
[` man(1) `](http://example.org) reference
some nested anchors
*emph* **strong** *nesting emph **and strong** and ` code `*
- wide bullet
- list
1. wide ordered
2. list
- narrow bullet
- list
1. narrow ordered
2. list
> quotes
>
> > with *nesting*
> >
> > ```
> > nested code block
> > ```
>
> - and lists
> - ```
> containing code
> ```
>
> and more quote
100. list starting at 100
101. goes on
- *deflist*
> with a quote
> and stuff
```
code block
```
```
fenced block
```
text
- *more stuff in same deflist*
foo""".replace(' ', ' ')
def test_images() -> None:
c = Converter({})
assert c._render("") == (
""
)
| 1,686 | 15.87 | 98 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/tests/sample_md.py
|
sample1 = """\
:::: {.warning}
foo
::: {.note}
nested
:::
::::
[
multiline
](link)
{manpage}`man(1)` reference
[some [nested]{#a} anchors]{#b}
*emph* **strong** *nesting emph **and strong** and `code`*
- wide bullet
- list
1. wide ordered
2. list
- narrow bullet
- list
1. narrow ordered
2. list
> quotes
>> with *nesting*
>>
>> nested code block
>
> - and lists
> - ```
> containing code
> ```
>
> and more quote
100. list starting at 100
1. goes on
deflist
: > with a quote
> and stuff
code block
```
fenced block
```
text
more stuff in same deflist
: foo
"""
| 605 | 8.619048 | 58 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/tests/test_asciidoc.py
|
import nixos_render_docs as nrd
from sample_md import sample1
class Converter(nrd.md.Converter[nrd.asciidoc.AsciiDocRenderer]):
def __init__(self, manpage_urls: dict[str, str]):
super().__init__()
self._renderer = nrd.asciidoc.AsciiDocRenderer(manpage_urls)
def test_lists() -> None:
c = Converter({})
# attaching to the nth ancestor list requires n newlines before the +
assert c._render("""\
- a
b
- c
- d
- e
1
f
""") == """\
[]
* {empty}a
+
b
* {empty}c
+
[options="compact"]
** {empty}d
+
[]
** {empty}e
+
1
+
f
"""
def test_full() -> None:
c = Converter({ 'man(1)': 'http://example.org' })
assert c._render(sample1) == """\
[WARNING]
====
foo
[NOTE]
=====
nested
=====
====
link:link[ multiline ]
link:http://example.org[man(1)] reference
[[b]]some [[a]]nested anchors
__emph__ **strong** __nesting emph **and strong** and ``code``__
[]
* {empty}wide bullet
* {empty}list
[]
. {empty}wide ordered
. {empty}list
[options="compact"]
* {empty}narrow bullet
* {empty}list
[options="compact"]
. {empty}narrow ordered
. {empty}list
[quote]
====
quotes
[quote]
=====
with __nesting__
----
nested code block
----
=====
[options="compact"]
* {empty}and lists
* {empty}
+
----
containing code
----
and more quote
====
[start=100,options="compact"]
. {empty}list starting at 100
. {empty}goes on
[]
deflist:: {empty}
+
[quote]
=====
with a quote and stuff
=====
+
----
code block
----
+
----
fenced block
----
+
text
more stuff in same deflist:: {empty}foo
"""
| 1,555 | 9.657534 | 73 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/tests/test_plugins.py
|
import nixos_render_docs as nrd
import pytest
from markdown_it.token import Token
class Converter(nrd.md.Converter[nrd.docbook.DocBookRenderer]):
# actual renderer doesn't matter, we're just parsing.
def __init__(self, manpage_urls: dict[str, str]) -> None:
super().__init__()
self._renderer = nrd.docbook.DocBookRenderer(manpage_urls)
def test_attr_span_parsing() -> None:
c = Converter({})
assert c._parse("[]{#test}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, content='[]{#test}',
markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'test'}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("[]{.test}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, content='[]{.test}',
markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='attr_span_begin', tag='span', nesting=1, attrs={'class': 'test'}, map=None,
level=0, children=None, content='', markup='', info='', meta={}, block=False,
hidden=False),
Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("[]{.test1 .test2 #foo .test3 .test4}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='[]{.test1 .test2 #foo .test3 .test4}',
markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='attr_span_begin', tag='span', nesting=1,
attrs={'class': 'test1 test2 test3 test4', 'id': 'foo'}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("[]{#a #a}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='[]{#a #a}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='[]{#a #a}', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("[]{foo}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='[]{foo}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='[]{foo}', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
def test_attr_span_formatted() -> None:
c = Converter({})
assert c._parse("a[b c `d` ***e***]{#test}f") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0,
children=None, content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='a[b c `d` ***e***]{#test}f', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0,
children=None, content='a', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'test'}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None,
content='b c ', markup='', info='', meta={}, block=False, hidden=False),
Token(type='code_inline', tag='code', nesting=0, attrs={}, map=None, level=1,
children=None, content='d', markup='`', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None,
content=' ', markup='', info='', meta={}, block=False, hidden=False),
Token(type='em_open', tag='em', nesting=1, attrs={}, map=None, level=1, children=None,
content='', markup='*', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=2, children=None,
content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='strong_open', tag='strong', nesting=1, attrs={}, map=None, level=2,
children=None, content='', markup='**', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=3, children=None,
content='e', markup='', info='', meta={}, block=False, hidden=False),
Token(type='strong_close', tag='strong', nesting=-1, attrs={}, map=None, level=2,
children=None, content='', markup='**', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=2, children=None,
content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='em_close', tag='em', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup='*', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='f', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
def test_attr_span_in_heading() -> None:
c = Converter({})
# inline anchors in headers are allowed, but header attributes should be preferred
assert c._parse("# foo []{#bar} baz") == [
Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='foo []{#bar} baz', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo ', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'bar'}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content=' baz', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False)
]
def test_attr_span_on_links() -> None:
c = Converter({})
assert c._parse("[ [a](#bar) ]{#foo}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1, content='[ [a](#bar) ]{#foo}',
markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'foo'}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None,
content=' ', markup='', info='', meta={}, block=False, hidden=False),
Token(type='link_open', tag='a', nesting=1, attrs={'href': '#bar'}, map=None, level=1,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=2, children=None,
content='a', markup='', info='', meta={}, block=False, hidden=False),
Token(type='link_close', tag='a', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None,
content=' ', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
def test_attr_span_nested() -> None:
# inline anchors may contain more anchors (even though this is a bit pointless)
c = Converter({})
assert c._parse("[ [a]{#bar} ]{#foo}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='[ [a]{#bar} ]{#foo}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'foo'}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None,
content=' ', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'bar'}, map=None, level=1,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=2, children=None,
content='a', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=1,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None,
content=' ', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
def test_attr_span_escaping() -> None:
c = Converter({})
assert c._parse("\\[a]{#bar}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='\\[a]{#bar}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='[a]{#bar}', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("\\\\[a]{#bar}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='\\\\[a]{#bar}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='\\', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_begin', tag='span', nesting=1, attrs={'id': 'bar'}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None,
content='a', markup='', info='', meta={}, block=False, hidden=False),
Token(type='attr_span_end', tag='span', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("\\\\\\[a]{#bar}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='\\[a]{#bar}', markup='', info='', meta={}, block=False, hidden=False)
],
content='\\\\\\[a]{#bar}', markup='', info='', meta={}, block=True, hidden=False),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
def test_inline_comment_basic() -> None:
c = Converter({})
assert c._parse("a <!-- foo --><!----> b") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='a <!-- foo --><!----> b', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='a b', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("a<!-- b -->") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='a<!-- b -->', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='a', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
def test_inline_comment_does_not_nest_in_code() -> None:
c = Converter({})
assert c._parse("`a<!-- b -->c`") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='`a<!-- b -->c`', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='code_inline', tag='code', nesting=0, attrs={}, map=None, level=0, children=None,
content='a<!-- b -->c', markup='`', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
def test_inline_comment_does_not_nest_elsewhere() -> None:
c = Converter({})
assert c._parse("*a<!-- b -->c*") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='*a<!-- b -->c*', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='em_open', tag='em', nesting=1, attrs={}, map=None, level=0, children=None,
content='', markup='*', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None,
content='ac', markup='', info='', meta={}, block=False, hidden=False),
Token(type='em_close', tag='em', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='*', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
def test_inline_comment_can_be_escaped() -> None:
c = Converter({})
assert c._parse("a\\<!-- b -->c") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='a\\<!-- b -->c', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='a<!-- b -->c', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("a\\\\<!-- b -->c") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='a\\c', markup='', info='', meta={}, block=False, hidden=False)
],
content='a\\\\<!-- b -->c', markup='', info='', meta={}, block=True, hidden=False),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("a\\\\\\<!-- b -->c") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='a\\<!-- b -->c', markup='', info='', meta={}, block=False, hidden=False)
],
content='a\\\\\\<!-- b -->c', markup='', info='', meta={}, block=True, hidden=False),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
def test_block_comment() -> None:
c = Converter({})
assert c._parse("<!-- a -->") == []
assert c._parse("<!-- a\n-->") == []
assert c._parse("<!--\na\n-->") == []
assert c._parse("<!--\n\na\n\n-->") == []
assert c._parse("<!--\n\n```\n\n\n```\n\n-->") == []
def test_heading_attributes() -> None:
c = Converter({})
assert c._parse("# foo *bar* {#hid}") == [
Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'hid'}, map=[0, 1], level=0,
children=None, content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='foo *bar* {#hid}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo ', markup='', info='', meta={}, block=False, hidden=False),
Token(type='em_open', tag='em', nesting=1, attrs={}, map=None, level=0, children=None,
content='', markup='*', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=1, children=None,
content='bar', markup='', info='', meta={}, block=False, hidden=False),
Token(type='em_close', tag='em', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='*', info='', meta={}, block=False, hidden=False),
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False)
]
assert c._parse("# foo--bar {#id-with--double-dashes}") == [
Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'id-with--double-dashes'}, map=[0, 1],
level=0, children=None, content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='foo--bar {#id-with--double-dashes}', markup='', info='', meta={}, block=True,
hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo–bar', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False)
]
def test_admonitions() -> None:
c = Converter({})
assert c._parse("::: {.note}") == [
Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0,
children=None, content='', markup=':::', info=' {.note}', meta={'kind': 'note'}, block=True,
hidden=False),
Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False)
]
assert c._parse("::: {.caution}") == [
Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0,
children=None, content='', markup=':::', info=' {.caution}', meta={'kind': 'caution'},
block=True, hidden=False),
Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False)
]
assert c._parse("::: {.tip}") == [
Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0,
children=None, content='', markup=':::', info=' {.tip}', meta={'kind': 'tip'}, block=True,
hidden=False),
Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False)
]
assert c._parse("::: {.important}") == [
Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0,
children=None, content='', markup=':::', info=' {.important}', meta={'kind': 'important'},
block=True, hidden=False),
Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False)
]
assert c._parse("::: {.warning}") == [
Token(type='admonition_open', tag='div', nesting=1, attrs={}, map=[0, 1], level=0,
children=None, content='', markup=':::', info=' {.warning}', meta={'kind': 'warning'},
block=True, hidden=False),
Token(type='admonition_close', tag='div', nesting=-1, attrs={}, map=None, level=0,
children=None, content='', markup=':::', info='', meta={}, block=True, hidden=False)
]
def test_example() -> None:
c = Converter({})
assert c._parse("::: {.example}\n# foo") == [
Token(type='example_open', tag='div', nesting=1, attrs={}, map=[0, 2], level=0, children=None,
content='', markup=':::', info=' {.example}', meta={}, block=True, hidden=False),
Token(type='example_title_open', tag='h1', nesting=1, attrs={}, map=[1, 2], level=1, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=2,
content='foo', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='example_title_close', tag='h1', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='example_close', tag='div', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("::: {#eid .example}\n# foo") == [
Token(type='example_open', tag='div', nesting=1, attrs={'id': 'eid'}, map=[0, 2], level=0,
children=None, content='', markup=':::', info=' {#eid .example}', meta={}, block=True,
hidden=False),
Token(type='example_title_open', tag='h1', nesting=1, attrs={}, map=[1, 2], level=1, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=2,
content='foo', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='example_title_close', tag='h1', nesting=-1, attrs={}, map=None, level=1, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='example_close', tag='div', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("::: {.example .note}") == [
Token(type='paragraph_open', tag='p', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='::: {.example .note}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='::: {.example .note}', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='paragraph_close', tag='p', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='', info='', meta={}, block=True, hidden=False)
]
assert c._parse("::: {.example}\n### foo: `code`\nbar\n:::\nbaz") == [
Token(type='example_open', tag='div', nesting=1, map=[0, 3], markup=':::', info=' {.example}',
block=True),
Token(type='example_title_open', tag='h3', nesting=1, map=[1, 2], level=1, markup='###', block=True),
Token(type='inline', tag='', nesting=0, map=[1, 2], level=2, content='foo: `code`', block=True,
children=[
Token(type='text', tag='', nesting=0, content='foo: '),
Token(type='code_inline', tag='code', nesting=0, content='code', markup='`')
]),
Token(type='example_title_close', tag='h3', nesting=-1, level=1, markup='###', block=True),
Token(type='paragraph_open', tag='p', nesting=1, map=[2, 3], level=1, block=True),
Token(type='inline', tag='', nesting=0, map=[2, 3], level=2, content='bar', block=True,
children=[
Token(type='text', tag='', nesting=0, content='bar')
]),
Token(type='paragraph_close', tag='p', nesting=-1, level=1, block=True),
Token(type='example_close', tag='div', nesting=-1, markup=':::', block=True),
Token(type='paragraph_open', tag='p', nesting=1, map=[4, 5], block=True),
Token(type='inline', tag='', nesting=0, map=[4, 5], level=1, content='baz', block=True,
children=[
Token(type='text', tag='', nesting=0, content='baz')
]),
Token(type='paragraph_close', tag='p', nesting=-1, block=True)
]
with pytest.raises(RuntimeError) as exc:
c._parse("::: {.example}\n### foo\n### bar\n:::")
assert exc.value.args[0] == 'unexpected non-title heading in example in line 3'
def test_footnotes() -> None:
c = Converter({})
assert c._parse("text [^foo]\n\n[^foo]: bar") == [
Token(type='paragraph_open', tag='p', nesting=1, map=[0, 1], block=True),
Token(type='inline', tag='', nesting=0, map=[0, 1], level=1, content='text [^foo]', block=True,
children=[
Token(type='text', tag='', nesting=0, content='text '),
Token(type='footnote_ref', tag='', nesting=0, attrs={'id': 'foo.__back.0'},
meta={'id': 0, 'subId': 0, 'label': 'foo', 'target': 'foo'})
]),
Token(type='paragraph_close', tag='p', nesting=-1, block=True),
Token(type='footnote_block_open', tag='', nesting=1),
Token(type='footnote_open', tag='', nesting=1, attrs={'id': 'foo'}, meta={'id': 0, 'label': 'foo'}),
Token(type='paragraph_open', tag='p', nesting=1, map=[2, 3], level=1, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, map=[2, 3], level=2, content='bar', block=True,
children=[
Token(type='text', tag='', nesting=0, content='bar')
]),
Token(type='footnote_anchor', tag='', nesting=0,
meta={'id': 0, 'label': 'foo', 'subId': 0, 'target': 'foo.__back.0'}),
Token(type='paragraph_close', tag='p', nesting=-1, level=1, block=True),
Token(type='footnote_close', tag='', nesting=-1),
Token(type='footnote_block_close', tag='', nesting=-1),
]
| 36,568 | 68.128544 | 111 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/tests/test_headings.py
|
import nixos_render_docs as nrd
from markdown_it.token import Token
class Converter(nrd.md.Converter[nrd.docbook.DocBookRenderer]):
# actual renderer doesn't matter, we're just parsing.
def __init__(self, manpage_urls: dict[str, str]) -> None:
super().__init__()
self._renderer = nrd.docbook.DocBookRenderer(manpage_urls)
def test_heading_id_absent() -> None:
c = Converter({})
assert c._parse("# foo") == [
Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo', markup='', info='', meta={}, block=False, hidden=False)
],
content='foo', markup='', info='', meta={}, block=True, hidden=False),
Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False)
]
def test_heading_id_present() -> None:
c = Converter({})
assert c._parse("# foo {#foo}\n## bar { #bar}\n### bal { #bal} ") == [
Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'foo'}, map=[0, 1], level=0,
children=None, content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='foo {#foo}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='heading_open', tag='h2', nesting=1, attrs={'id': 'bar'}, map=[1, 2], level=0,
children=None, content='', markup='##', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[1, 2], level=1,
content='bar { #bar}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='bar', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='heading_close', tag='h2', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='##', info='', meta={}, block=True, hidden=False),
Token(type='heading_open', tag='h3', nesting=1, attrs={'id': 'bal'}, map=[2, 3], level=0,
children=None, content='', markup='###', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[2, 3], level=1,
content='bal { #bal}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='bal', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='heading_close', tag='h3', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='###', info='', meta={}, block=True, hidden=False)
]
def test_heading_id_incomplete() -> None:
c = Converter({})
assert c._parse("# foo {#}") == [
Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='foo {#}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo {#}', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False)
]
def test_heading_id_double() -> None:
c = Converter({})
assert c._parse("# foo {#a} {#b}") == [
Token(type='heading_open', tag='h1', nesting=1, attrs={'id': 'b'}, map=[0, 1], level=0,
children=None, content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='foo {#a} {#b}', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo {#a}', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False)
]
def test_heading_id_suffixed() -> None:
c = Converter({})
assert c._parse("# foo {#a} s") == [
Token(type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0,
children=None, content='', markup='#', info='', meta={}, block=True, hidden=False),
Token(type='inline', tag='', nesting=0, attrs={}, map=[0, 1], level=1,
content='foo {#a} s', markup='', info='', meta={}, block=True, hidden=False,
children=[
Token(type='text', tag='', nesting=0, attrs={}, map=None, level=0, children=None,
content='foo {#a} s', markup='', info='', meta={}, block=False, hidden=False)
]),
Token(type='heading_close', tag='h1', nesting=-1, attrs={}, map=None, level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False)
]
| 6,423 | 60.180952 | 101 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/tests/test_options.py
|
import nixos_render_docs
from markdown_it.token import Token
import pytest
def test_option_headings() -> None:
c = nixos_render_docs.options.DocBookConverter({}, 'local', 'none', 'vars', 'opt-')
with pytest.raises(RuntimeError) as exc:
c._render("# foo")
assert exc.value.args[0] == 'md token not supported in options doc'
assert exc.value.args[1] == Token(
type='heading_open', tag='h1', nesting=1, attrs={}, map=[0, 1], level=0, children=None,
content='', markup='#', info='', meta={}, block=True, hidden=False
)
| 561 | 36.466667 | 95 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/asciidoc.py
|
from collections.abc import Mapping, Sequence
from dataclasses import dataclass
from typing import cast
from urllib.parse import quote
from .md import Renderer
from markdown_it.token import Token
_asciidoc_escapes = {
# escape all dots, just in case one is pasted at SOL
ord('.'): "{zwsp}.",
# may be replaced by typographic variants
ord("'"): "{apos}",
ord('"'): "{quot}",
# passthrough character
ord('+'): "{plus}",
# table marker
ord('|'): "{vbar}",
# xml entity reference
ord('&'): "{amp}",
# crossrefs. < needs extra escaping because links break in odd ways if they start with it
ord('<'): "{zwsp}+<+{zwsp}",
ord('>'): "{gt}",
# anchors, links, block attributes
ord('['): "{startsb}",
ord(']'): "{endsb}",
# superscript, subscript
ord('^'): "{caret}",
ord('~'): "{tilde}",
# bold
ord('*'): "{asterisk}",
# backslash
ord('\\'): "{backslash}",
# inline code
ord('`'): "{backtick}",
}
def asciidoc_escape(s: str) -> str:
s = s.translate(_asciidoc_escapes)
# :: is deflist item, ;; is has a replacement but no idea why
return s.replace("::", "{two-colons}").replace(";;", "{two-semicolons}")
@dataclass(kw_only=True)
class List:
head: str
@dataclass()
class Par:
sep: str
block_delim: str
continuing: bool = False
class AsciiDocRenderer(Renderer):
__output__ = "asciidoc"
_parstack: list[Par]
_list_stack: list[List]
_attrspans: list[str]
def __init__(self, manpage_urls: Mapping[str, str]):
super().__init__(manpage_urls)
self._parstack = [ Par("\n\n", "====") ]
self._list_stack = []
self._attrspans = []
def _enter_block(self, is_list: bool) -> None:
self._parstack.append(Par("\n+\n" if is_list else "\n\n", self._parstack[-1].block_delim + "="))
def _leave_block(self) -> None:
self._parstack.pop()
def _break(self, force: bool = False) -> str:
result = self._parstack[-1].sep if force or self._parstack[-1].continuing else ""
self._parstack[-1].continuing = True
return result
def _admonition_open(self, kind: str) -> str:
pbreak = self._break()
self._enter_block(False)
return f"{pbreak}[{kind}]\n{self._parstack[-2].block_delim}\n"
def _admonition_close(self) -> str:
self._leave_block()
return f"\n{self._parstack[-1].block_delim}\n"
def _list_open(self, token: Token, head: str) -> str:
attrs = []
if (idx := token.attrs.get('start')) is not None:
attrs.append(f"start={idx}")
if token.meta['compact']:
attrs.append('options="compact"')
if self._list_stack:
head *= len(self._list_stack[0].head) + 1
self._list_stack.append(List(head=head))
return f"{self._break()}[{','.join(attrs)}]"
def _list_close(self) -> str:
self._list_stack.pop()
return ""
def text(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
return asciidoc_escape(token.content)
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._break()
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ""
def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return " +\n"
def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return " "
def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
return f"``{asciidoc_escape(token.content)}``"
def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self.fence(token, tokens, i)
def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
return f"link:{quote(cast(str, token.attrs['href']), safe='/:')}["
def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "]"
def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._enter_block(True)
# allow the next token to be a block or an inline.
return f'\n{self._list_stack[-1].head} {{empty}}'
def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._leave_block()
return "\n"
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._list_open(token, '*')
def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._list_close()
def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "__"
def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "__"
def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "**"
def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "**"
def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str:
attrs = f"[source,{token.info}]\n" if token.info else ""
code = token.content
if code.endswith('\n'):
code = code[:-1]
return f"{self._break(True)}{attrs}----\n{code}\n----"
def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
pbreak = self._break(True)
self._enter_block(False)
return f"{pbreak}[quote]\n{self._parstack[-2].block_delim}\n"
def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._leave_block()
return f"\n{self._parstack[-1].block_delim}"
def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("NOTE")
def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("CAUTION")
def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("IMPORTANT")
def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("TIP")
def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("WARNING")
def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f"{self._break()}[]"
def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ""
def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._break()
def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._enter_block(True)
return ":: {empty}"
def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ""
def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._leave_block()
return "\n"
def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
content = asciidoc_escape(token.content)
if token.meta['name'] == 'manpage' and (url := self._manpage_urls.get(token.content)):
return f"link:{quote(url, safe='/:')}[{content}]"
return f"[.{token.meta['name']}]``{asciidoc_escape(token.content)}``"
def inline_anchor(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
return f"[[{token.attrs['id']}]]"
def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
(id_part, class_part) = ("", "")
if id := token.attrs.get('id'):
id_part = f"[[{id}]]"
if s := token.attrs.get('class'):
if s == 'keycap':
class_part = "kbd:["
self._attrspans.append("]")
else:
return super().attr_span_begin(token, tokens, i)
else:
self._attrspans.append("")
return id_part + class_part
def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._attrspans.pop()
def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return token.markup.replace("#", "=") + " "
def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "\n"
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._list_open(token, '.')
def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._list_close()
| 9,525 | 42.898618 | 104 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/manual.py
|
import argparse
import hashlib
import html
import json
import re
import xml.sax.saxutils as xml
from abc import abstractmethod
from collections.abc import Mapping, Sequence
from pathlib import Path
from typing import Any, cast, ClassVar, Generic, get_args, NamedTuple
from markdown_it.token import Token
from . import md, options
from .docbook import DocBookRenderer, Heading, make_xml_id
from .html import HTMLRenderer, UnresolvedXrefError
from .manual_structure import check_structure, FragmentType, is_include, TocEntry, TocEntryType, XrefTarget
from .md import Converter, Renderer
class BaseConverter(Converter[md.TR], Generic[md.TR]):
# per-converter configuration for ns:arg=value arguments to include blocks, following
# the include type. html converters need something like this to support chunking, or
# another external method like the chunktocs docbook uses (but block options seem like
# a much nicer of doing this).
INCLUDE_ARGS_NS: ClassVar[str]
INCLUDE_FRAGMENT_ALLOWED_ARGS: ClassVar[set[str]] = set()
INCLUDE_OPTIONS_ALLOWED_ARGS: ClassVar[set[str]] = set()
_base_paths: list[Path]
_current_type: list[TocEntryType]
def convert(self, infile: Path, outfile: Path) -> None:
self._base_paths = [ infile ]
self._current_type = ['book']
try:
tokens = self._parse(infile.read_text())
self._postprocess(infile, outfile, tokens)
converted = self._renderer.render(tokens)
outfile.write_text(converted)
except Exception as e:
raise RuntimeError(f"failed to render manual {infile}") from e
def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> None:
pass
def _parse(self, src: str) -> list[Token]:
tokens = super()._parse(src)
check_structure(self._current_type[-1], tokens)
for token in tokens:
if not is_include(token):
continue
directive = token.info[12:].split()
if not directive:
continue
args = { k: v for k, _sep, v in map(lambda s: s.partition('='), directive[1:]) }
typ = directive[0]
if typ == 'options':
token.type = 'included_options'
self._process_include_args(token, args, self.INCLUDE_OPTIONS_ALLOWED_ARGS)
self._parse_options(token, args)
else:
fragment_type = typ.removesuffix('s')
if fragment_type not in get_args(FragmentType):
raise RuntimeError(f"unsupported structural include type '{typ}'")
self._current_type.append(cast(FragmentType, fragment_type))
token.type = 'included_' + typ
self._process_include_args(token, args, self.INCLUDE_FRAGMENT_ALLOWED_ARGS)
self._parse_included_blocks(token, args)
self._current_type.pop()
return tokens
def _process_include_args(self, token: Token, args: dict[str, str], allowed: set[str]) -> None:
ns = self.INCLUDE_ARGS_NS + ":"
args = { k[len(ns):]: v for k, v in args.items() if k.startswith(ns) }
if unknown := set(args.keys()) - allowed:
assert token.map
raise RuntimeError(f"unrecognized include argument in line {token.map[0] + 1}", unknown)
token.meta['include-args'] = args
def _parse_included_blocks(self, token: Token, block_args: dict[str, str]) -> None:
assert token.map
included = token.meta['included'] = []
for (lnum, line) in enumerate(token.content.splitlines(), token.map[0] + 2):
line = line.strip()
path = self._base_paths[-1].parent / line
if path in self._base_paths:
raise RuntimeError(f"circular include found in line {lnum}")
try:
self._base_paths.append(path)
with open(path, 'r') as f:
tokens = self._parse(f.read())
included.append((tokens, path))
self._base_paths.pop()
except Exception as e:
raise RuntimeError(f"processing included file {path} from line {lnum}") from e
def _parse_options(self, token: Token, block_args: dict[str, str]) -> None:
assert token.map
items = {}
for (lnum, line) in enumerate(token.content.splitlines(), token.map[0] + 2):
if len(args := line.split(":", 1)) != 2:
raise RuntimeError(f"options directive with no argument in line {lnum}")
(k, v) = (args[0].strip(), args[1].strip())
if k in items:
raise RuntimeError(f"duplicate options directive {k} in line {lnum}")
items[k] = v
try:
id_prefix = items.pop('id-prefix')
varlist_id = items.pop('list-id')
source = items.pop('source')
except KeyError as e:
raise RuntimeError(f"options directive {e} missing in block at line {token.map[0] + 1}")
if items.keys():
raise RuntimeError(
f"unsupported options directives in block at line {token.map[0] + 1}",
" ".join(items.keys()))
try:
with open(self._base_paths[-1].parent / source, 'r') as f:
token.meta['id-prefix'] = id_prefix
token.meta['list-id'] = varlist_id
token.meta['source'] = json.load(f)
except Exception as e:
raise RuntimeError(f"processing options block in line {token.map[0] + 1}") from e
class RendererMixin(Renderer):
_toplevel_tag: str
_revision: str
def __init__(self, toplevel_tag: str, revision: str, *args: Any, **kwargs: Any):
super().__init__(*args, **kwargs)
self._toplevel_tag = toplevel_tag
self._revision = revision
self.rules |= {
'included_sections': lambda *args: self._included_thing("section", *args),
'included_chapters': lambda *args: self._included_thing("chapter", *args),
'included_preface': lambda *args: self._included_thing("preface", *args),
'included_parts': lambda *args: self._included_thing("part", *args),
'included_appendix': lambda *args: self._included_thing("appendix", *args),
'included_options': self.included_options,
}
def render(self, tokens: Sequence[Token]) -> str:
# books get special handling because they have *two* title tags. doing this with
# generic code is more complicated than it's worth. the checks above have verified
# that both titles actually exist.
if self._toplevel_tag == 'book':
return self._render_book(tokens)
return super().render(tokens)
@abstractmethod
def _render_book(self, tokens: Sequence[Token]) -> str:
raise NotImplementedError()
@abstractmethod
def _included_thing(self, tag: str, token: Token, tokens: Sequence[Token], i: int) -> str:
raise NotImplementedError()
@abstractmethod
def included_options(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise NotImplementedError()
class ManualDocBookRenderer(RendererMixin, DocBookRenderer):
def __init__(self, toplevel_tag: str, revision: str, manpage_urls: Mapping[str, str]):
super().__init__(toplevel_tag, revision, manpage_urls)
def _render_book(self, tokens: Sequence[Token]) -> str:
assert tokens[1].children
assert tokens[4].children
if (maybe_id := cast(str, tokens[0].attrs.get('id', ""))):
maybe_id = "xml:id=" + xml.quoteattr(maybe_id)
return (f'<book xmlns="http://docbook.org/ns/docbook"'
f' xmlns:xlink="http://www.w3.org/1999/xlink"'
f' {maybe_id} version="5.0">'
f' <title>{self.renderInline(tokens[1].children)}</title>'
f' <subtitle>{self.renderInline(tokens[4].children)}</subtitle>'
f' {super(DocBookRenderer, self).render(tokens[6:])}'
f'</book>')
def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int) -> tuple[str, dict[str, str]]:
(tag, attrs) = super()._heading_tag(token, tokens, i)
# render() has already verified that we don't have supernumerary headings and since the
# book tag is handled specially we can leave the check this simple
if token.tag != 'h1':
return (tag, attrs)
return (self._toplevel_tag, attrs | {
'xmlns': "http://docbook.org/ns/docbook",
'xmlns:xlink': "http://www.w3.org/1999/xlink",
})
def _included_thing(self, tag: str, token: Token, tokens: Sequence[Token], i: int) -> str:
result = []
# close existing partintro. the generic render doesn't really need this because
# it doesn't have a concept of structure in the way the manual does.
if self._headings and self._headings[-1] == Heading('part', 1):
result.append("</partintro>")
self._headings[-1] = self._headings[-1]._replace(partintro_closed=True)
# must nest properly for structural includes. this requires saving at least
# the headings stack, but creating new renderers is cheap and much easier.
r = ManualDocBookRenderer(tag, self._revision, self._manpage_urls)
for (included, path) in token.meta['included']:
try:
result.append(r.render(included))
except Exception as e:
raise RuntimeError(f"rendering {path}") from e
return "".join(result)
def included_options(self, token: Token, tokens: Sequence[Token], i: int) -> str:
conv = options.DocBookConverter(self._manpage_urls, self._revision, 'fragment',
token.meta['list-id'], token.meta['id-prefix'])
conv.add_options(token.meta['source'])
return conv.finalize(fragment=True)
# TODO minimize docbook diffs with existing conversions. remove soon.
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return super().paragraph_open(token, tokens, i) + "\n "
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "\n" + super().paragraph_close(token, tokens, i)
def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f"<programlisting>\n{xml.escape(token.content)}</programlisting>"
def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str:
info = f" language={xml.quoteattr(token.info)}" if token.info != "" else ""
return f"<programlisting{info}>\n{xml.escape(token.content)}</programlisting>"
class DocBookConverter(BaseConverter[ManualDocBookRenderer]):
INCLUDE_ARGS_NS = "docbook"
def __init__(self, manpage_urls: Mapping[str, str], revision: str):
super().__init__()
self._renderer = ManualDocBookRenderer('book', revision, manpage_urls)
class HTMLParameters(NamedTuple):
generator: str
stylesheets: Sequence[str]
scripts: Sequence[str]
# number of levels in the rendered table of contents. tables are prepended to
# the content they apply to (entire document / document chunk / top-level section
# of a chapter), setting a depth of 0 omits the respective table.
toc_depth: int
chunk_toc_depth: int
section_toc_depth: int
media_dir: Path
class ManualHTMLRenderer(RendererMixin, HTMLRenderer):
_base_path: Path
_in_dir: Path
_html_params: HTMLParameters
def __init__(self, toplevel_tag: str, revision: str, html_params: HTMLParameters,
manpage_urls: Mapping[str, str], xref_targets: dict[str, XrefTarget],
in_dir: Path, base_path: Path):
super().__init__(toplevel_tag, revision, manpage_urls, xref_targets)
self._in_dir = in_dir
self._base_path = base_path.absolute()
self._html_params = html_params
def _pull_image(self, src: str) -> str:
src_path = Path(src)
content = (self._in_dir / src_path).read_bytes()
# images may be used more than once, but we want to store them only once and
# in an easily accessible (ie, not input-file-path-dependent) location without
# having to maintain a mapping structure. hashing the file and using the hash
# as both the path of the final image provides both.
content_hash = hashlib.sha3_256(content).hexdigest()
target_name = f"{content_hash}{src_path.suffix}"
target_path = self._base_path / self._html_params.media_dir / target_name
target_path.write_bytes(content)
return f"./{self._html_params.media_dir}/{target_name}"
def _push(self, tag: str, hlevel_offset: int) -> Any:
result = (self._toplevel_tag, self._headings, self._attrspans, self._hlevel_offset, self._in_dir)
self._hlevel_offset += hlevel_offset
self._toplevel_tag, self._headings, self._attrspans = tag, [], []
return result
def _pop(self, state: Any) -> None:
(self._toplevel_tag, self._headings, self._attrspans, self._hlevel_offset, self._in_dir) = state
def _render_book(self, tokens: Sequence[Token]) -> str:
assert tokens[4].children
title_id = cast(str, tokens[0].attrs.get('id', ""))
title = self._xref_targets[title_id].title
# subtitles don't have IDs, so we can't use xrefs to get them
subtitle = self.renderInline(tokens[4].children)
toc = TocEntry.of(tokens[0])
return "\n".join([
self._file_header(toc),
' <div class="book">',
' <div class="titlepage">',
' <div>',
f' <div><h1 class="title"><a id="{html.escape(title_id, True)}"></a>{title}</h1></div>',
f' <div><h2 class="subtitle">{subtitle}</h2></div>',
' </div>',
" <hr />",
' </div>',
self._build_toc(tokens, 0),
super(HTMLRenderer, self).render(tokens[6:]),
' </div>',
self._file_footer(toc),
])
def _file_header(self, toc: TocEntry) -> str:
prev_link, up_link, next_link = "", "", ""
prev_a, next_a, parent_title = "", "", " "
nav_html = ""
home = toc.root
if toc.prev:
prev_link = f'<link rel="prev" href="{toc.prev.target.href()}" title="{toc.prev.target.title}" />'
prev_a = f'<a accesskey="p" href="{toc.prev.target.href()}">Prev</a>'
if toc.parent:
up_link = (
f'<link rel="up" href="{toc.parent.target.href()}" '
f'title="{toc.parent.target.title}" />'
)
if (part := toc.parent) and part.kind != 'book':
assert part.target.title
parent_title = part.target.title
if toc.next:
next_link = f'<link rel="next" href="{toc.next.target.href()}" title="{toc.next.target.title}" />'
next_a = f'<a accesskey="n" href="{toc.next.target.href()}">Next</a>'
if toc.prev or toc.parent or toc.next:
nav_html = "\n".join([
' <div class="navheader">',
' <table width="100%" summary="Navigation header">',
' <tr>',
f' <th colspan="3" align="center">{toc.target.title}</th>',
' </tr>',
' <tr>',
f' <td width="20%" align="left">{prev_a} </td>',
f' <th width="60%" align="center">{parent_title}</th>',
f' <td width="20%" align="right"> {next_a}</td>',
' </tr>',
' </table>',
' <hr />',
' </div>',
])
return "\n".join([
'<?xml version="1.0" encoding="utf-8" standalone="no"?>',
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"',
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
' <head>',
' <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />',
f' <title>{toc.target.title}</title>',
"".join((f'<link rel="stylesheet" type="text/css" href="{html.escape(style, True)}" />'
for style in self._html_params.stylesheets)),
"".join((f'<script src="{html.escape(script, True)}" type="text/javascript"></script>'
for script in self._html_params.scripts)),
f' <meta name="generator" content="{html.escape(self._html_params.generator, True)}" />',
f' <link rel="home" href="{home.target.href()}" title="{home.target.title}" />' if home.target.href() else "",
f' {up_link}{prev_link}{next_link}',
' </head>',
' <body>',
nav_html,
])
def _file_footer(self, toc: TocEntry) -> str:
# prev, next = self._get_prev_and_next()
prev_a, up_a, home_a, next_a = "", " ", " ", ""
prev_text, up_text, next_text = "", "", ""
nav_html = ""
home = toc.root
if toc.prev:
prev_a = f'<a accesskey="p" href="{toc.prev.target.href()}">Prev</a>'
assert toc.prev.target.title
prev_text = toc.prev.target.title
if toc.parent:
home_a = f'<a accesskey="h" href="{home.target.href()}">Home</a>'
if toc.parent != home:
up_a = f'<a accesskey="u" href="{toc.parent.target.href()}">Up</a>'
if toc.next:
next_a = f'<a accesskey="n" href="{toc.next.target.href()}">Next</a>'
assert toc.next.target.title
next_text = toc.next.target.title
if toc.prev or toc.parent or toc.next:
nav_html = "\n".join([
' <div class="navfooter">',
' <hr />',
' <table width="100%" summary="Navigation footer">',
' <tr>',
f' <td width="40%" align="left">{prev_a} </td>',
f' <td width="20%" align="center">{up_a}</td>',
f' <td width="40%" align="right"> {next_a}</td>',
' </tr>',
' <tr>',
f' <td width="40%" align="left" valign="top">{prev_text} </td>',
f' <td width="20%" align="center">{home_a}</td>',
f' <td width="40%" align="right" valign="top"> {next_text}</td>',
' </tr>',
' </table>',
' </div>',
])
return "\n".join([
nav_html,
' </body>',
'</html>',
])
def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if token.tag == 'h1':
return self._toplevel_tag
return super()._heading_tag(token, tokens, i)
def _build_toc(self, tokens: Sequence[Token], i: int) -> str:
toc = TocEntry.of(tokens[i])
if toc.kind == 'section' and self._html_params.section_toc_depth < 1:
return ""
def walk_and_emit(toc: TocEntry, depth: int) -> list[str]:
if depth <= 0:
return []
result = []
for child in toc.children:
result.append(
f'<dt>'
f' <span class="{html.escape(child.kind, True)}">'
f' <a href="{child.target.href()}">{child.target.toc_html}</a>'
f' </span>'
f'</dt>'
)
# we want to look straight through parts because docbook-xsl does too, but it
# also makes for more uesful top-level tocs.
next_level = walk_and_emit(child, depth - (0 if child.kind == 'part' else 1))
if next_level:
result.append(f'<dd><dl>{"".join(next_level)}</dl></dd>')
return result
def build_list(kind: str, id: str, lst: Sequence[TocEntry]) -> str:
if not lst:
return ""
entries = [
f'<dt>{i}. <a href="{e.target.href()}">{e.target.toc_html}</a></dt>'
for i, e in enumerate(lst, start=1)
]
return (
f'<div class="{id}">'
f'<p><strong>List of {kind}</strong></p>'
f'<dl>{"".join(entries)}</dl>'
'</div>'
)
# we don't want to generate the "Title of Contents" header for sections,
# docbook doesn't and it's only distracting clutter unless it's the main table.
# we also want to generate tocs only for a top-level section (ie, one that is
# not itself contained in another section)
print_title = toc.kind != 'section'
if toc.kind == 'section':
if toc.parent and toc.parent.kind == 'section':
toc_depth = 0
else:
toc_depth = self._html_params.section_toc_depth
elif toc.starts_new_chunk and toc.kind != 'book':
toc_depth = self._html_params.chunk_toc_depth
else:
toc_depth = self._html_params.toc_depth
if not (items := walk_and_emit(toc, toc_depth)):
return ""
figures = build_list("Figures", "list-of-figures", toc.figures)
examples = build_list("Examples", "list-of-examples", toc.examples)
return "".join([
f'<div class="toc">',
' <p><strong>Table of Contents</strong></p>' if print_title else "",
f' <dl class="toc">'
f' {"".join(items)}'
f' </dl>'
f'</div>'
f'{figures}'
f'{examples}'
])
def _make_hN(self, level: int) -> tuple[str, str]:
# for some reason chapters don't increase the hN nesting count in docbook xslts. duplicate
# this for consistency.
if self._toplevel_tag == 'chapter':
level -= 1
# TODO docbook compat. these are never useful for us, but not having them breaks manual
# compare workflows while docbook is still allowed.
style = ""
if level + self._hlevel_offset < 3 \
and (self._toplevel_tag == 'section' or (self._toplevel_tag == 'chapter' and level > 0)):
style = "clear: both"
tag, hstyle = super()._make_hN(max(1, level))
return tag, style
def _included_thing(self, tag: str, token: Token, tokens: Sequence[Token], i: int) -> str:
outer, inner = [], []
# since books have no non-include content the toplevel book wrapper will not count
# towards nesting depth. other types will have at least a title+id heading which
# *does* count towards the nesting depth. chapters give a -1 to included sections
# mirroring the special handing in _make_hN. sigh.
hoffset = (
0 if not self._headings
else self._headings[-1].level - 1 if self._toplevel_tag == 'chapter'
else self._headings[-1].level
)
outer.append(self._maybe_close_partintro())
into = token.meta['include-args'].get('into-file')
fragments = token.meta['included']
state = self._push(tag, hoffset)
if into:
toc = TocEntry.of(fragments[0][0][0])
inner.append(self._file_header(toc))
# we do not set _hlevel_offset=0 because docbook doesn't either.
else:
inner = outer
in_dir = self._in_dir
for included, path in fragments:
try:
self._in_dir = (in_dir / path).parent
inner.append(self.render(included))
except Exception as e:
raise RuntimeError(f"rendering {path}") from e
if into:
inner.append(self._file_footer(toc))
(self._base_path / into).write_text("".join(inner))
self._pop(state)
return "".join(outer)
def included_options(self, token: Token, tokens: Sequence[Token], i: int) -> str:
conv = options.HTMLConverter(self._manpage_urls, self._revision,
token.meta['list-id'], token.meta['id-prefix'],
self._xref_targets)
conv.add_options(token.meta['source'])
return conv.finalize()
def _to_base26(n: int) -> str:
return (_to_base26(n // 26) if n > 26 else "") + chr(ord("A") + n % 26)
class HTMLConverter(BaseConverter[ManualHTMLRenderer]):
INCLUDE_ARGS_NS = "html"
INCLUDE_FRAGMENT_ALLOWED_ARGS = { 'into-file' }
_revision: str
_html_params: HTMLParameters
_manpage_urls: Mapping[str, str]
_xref_targets: dict[str, XrefTarget]
_redirection_targets: set[str]
_appendix_count: int = 0
def _next_appendix_id(self) -> str:
self._appendix_count += 1
return _to_base26(self._appendix_count - 1)
def __init__(self, revision: str, html_params: HTMLParameters, manpage_urls: Mapping[str, str]):
super().__init__()
self._revision, self._html_params, self._manpage_urls = revision, html_params, manpage_urls
self._xref_targets = {}
self._redirection_targets = set()
# renderer not set on purpose since it has a dependency on the output path!
def convert(self, infile: Path, outfile: Path) -> None:
self._renderer = ManualHTMLRenderer(
'book', self._revision, self._html_params, self._manpage_urls, self._xref_targets,
infile.parent, outfile.parent)
super().convert(infile, outfile)
def _parse(self, src: str) -> list[Token]:
tokens = super()._parse(src)
for token in tokens:
if not token.type.startswith('included_') \
or not (into := token.meta['include-args'].get('into-file')):
continue
assert token.map
if len(token.meta['included']) == 0:
raise RuntimeError(f"redirection target {into} in line {token.map[0] + 1} is empty!")
# we use blender-style //path to denote paths relative to the origin file
# (usually index.html). this makes everything a lot easier and clearer.
if not into.startswith("//") or '/' in into[2:]:
raise RuntimeError("html:into-file must be a relative-to-origin //filename", into)
into = token.meta['include-args']['into-file'] = into[2:]
if into in self._redirection_targets:
raise RuntimeError(f"redirection target {into} in line {token.map[0] + 1} is already in use")
self._redirection_targets.add(into)
return tokens
def _number_block(self, block: str, prefix: str, tokens: Sequence[Token], start: int = 1) -> int:
title_open, title_close = f'{block}_title_open', f'{block}_title_close'
for (i, token) in enumerate(tokens):
if token.type == title_open:
title = tokens[i + 1]
assert title.type == 'inline' and title.children
# the prefix is split into two tokens because the xref title_html will want
# only the first of the two, but both must be rendered into the example itself.
title.children = (
[
Token('text', '', 0, content=f'{prefix} {start}'),
Token('text', '', 0, content='. ')
] + title.children
)
start += 1
elif token.type.startswith('included_') and token.type != 'included_options':
for sub, _path in token.meta['included']:
start = self._number_block(block, prefix, sub, start)
return start
# xref | (id, type, heading inlines, file, starts new file)
def _collect_ids(self, tokens: Sequence[Token], target_file: str, typ: str, file_changed: bool
) -> list[XrefTarget | tuple[str, str, Token, str, bool]]:
result: list[XrefTarget | tuple[str, str, Token, str, bool]] = []
# collect all IDs and their xref substitutions. headings are deferred until everything
# has been parsed so we can resolve links in headings. if that's even used anywhere.
for (i, bt) in enumerate(tokens):
if bt.type == 'heading_open' and (id := cast(str, bt.attrs.get('id', ''))):
result.append((id, typ if bt.tag == 'h1' else 'section', tokens[i + 1], target_file,
i == 0 and file_changed))
elif bt.type == 'included_options':
id_prefix = bt.meta['id-prefix']
for opt in bt.meta['source'].keys():
id = make_xml_id(f"{id_prefix}{opt}")
name = html.escape(opt)
result.append(XrefTarget(id, f'<code class="option">{name}</code>', name, None, target_file))
elif bt.type.startswith('included_'):
sub_file = bt.meta['include-args'].get('into-file', target_file)
subtyp = bt.type.removeprefix('included_').removesuffix('s')
for si, (sub, _path) in enumerate(bt.meta['included']):
result += self._collect_ids(sub, sub_file, subtyp, si == 0 and sub_file != target_file)
elif bt.type == 'example_open' and (id := cast(str, bt.attrs.get('id', ''))):
result.append((id, 'example', tokens[i + 2], target_file, False))
elif bt.type == 'figure_open' and (id := cast(str, bt.attrs.get('id', ''))):
result.append((id, 'figure', tokens[i + 2], target_file, False))
elif bt.type == 'footnote_open' and (id := cast(str, bt.attrs.get('id', ''))):
result.append(XrefTarget(id, "???", None, None, target_file))
elif bt.type == 'footnote_ref' and (id := cast(str, bt.attrs.get('id', ''))):
result.append(XrefTarget(id, "???", None, None, target_file))
elif bt.type == 'inline':
assert bt.children
result += self._collect_ids(bt.children, target_file, typ, False)
elif id := cast(str, bt.attrs.get('id', '')):
# anchors and examples have no titles we could use, but we'll have to put
# *something* here to communicate that there's no title.
result.append(XrefTarget(id, "???", None, None, target_file))
return result
def _render_xref(self, id: str, typ: str, inlines: Token, path: str, drop_fragment: bool) -> XrefTarget:
assert inlines.children
title_html = self._renderer.renderInline(inlines.children)
if typ == 'appendix':
# NOTE the docbook compat is strong here
n = self._next_appendix_id()
prefix = f"Appendix\u00A0{n}.\u00A0"
# HACK for docbook compat: prefix the title inlines with appendix id if
# necessary. the alternative is to mess with titlepage rendering in headings,
# which seems just a lot worse than this
prefix_tokens = [Token(type='text', tag='', nesting=0, content=prefix)]
inlines.children = prefix_tokens + list(inlines.children)
title = prefix + title_html
toc_html = f"{n}. {title_html}"
title_html = f"Appendix {n}"
elif typ in ['example', 'figure']:
# skip the prepended `{Example,Figure} N. ` from numbering
toc_html, title = self._renderer.renderInline(inlines.children[2:]), title_html
# xref title wants only the prepended text, sans the trailing colon and space
title_html = self._renderer.renderInline(inlines.children[0:1])
else:
toc_html, title = title_html, title_html
title_html = (
f"<em>{title_html}</em>"
if typ == 'chapter'
else title_html if typ in [ 'book', 'part' ]
else f'the section called “{title_html}”'
)
return XrefTarget(id, title_html, toc_html, re.sub('<.*?>', '', title), path, drop_fragment)
def _postprocess(self, infile: Path, outfile: Path, tokens: Sequence[Token]) -> None:
self._number_block('example', "Example", tokens)
self._number_block('figure', "Figure", tokens)
xref_queue = self._collect_ids(tokens, outfile.name, 'book', True)
failed = False
deferred = []
while xref_queue:
for item in xref_queue:
try:
target = item if isinstance(item, XrefTarget) else self._render_xref(*item)
except UnresolvedXrefError:
if failed:
raise
deferred.append(item)
continue
if target.id in self._xref_targets:
raise RuntimeError(f"found duplicate id #{target.id}")
self._xref_targets[target.id] = target
if len(deferred) == len(xref_queue):
failed = True # do another round and report the first error
xref_queue = deferred
paths_seen = set()
for t in self._xref_targets.values():
paths_seen.add(t.path)
if len(paths_seen) == 1:
for (k, t) in self._xref_targets.items():
self._xref_targets[k] = XrefTarget(
t.id,
t.title_html,
t.toc_html,
t.title,
t.path,
t.drop_fragment,
drop_target=True
)
TocEntry.collect_and_link(self._xref_targets, tokens)
def _build_cli_db(p: argparse.ArgumentParser) -> None:
p.add_argument('--manpage-urls', required=True)
p.add_argument('--revision', required=True)
p.add_argument('infile', type=Path)
p.add_argument('outfile', type=Path)
def _build_cli_html(p: argparse.ArgumentParser) -> None:
p.add_argument('--manpage-urls', required=True)
p.add_argument('--revision', required=True)
p.add_argument('--generator', default='nixos-render-docs')
p.add_argument('--stylesheet', default=[], action='append')
p.add_argument('--script', default=[], action='append')
p.add_argument('--toc-depth', default=1, type=int)
p.add_argument('--chunk-toc-depth', default=1, type=int)
p.add_argument('--section-toc-depth', default=0, type=int)
p.add_argument('--media-dir', default="media", type=Path)
p.add_argument('infile', type=Path)
p.add_argument('outfile', type=Path)
def _run_cli_db(args: argparse.Namespace) -> None:
with open(args.manpage_urls, 'r') as manpage_urls:
md = DocBookConverter(json.load(manpage_urls), args.revision)
md.convert(args.infile, args.outfile)
def _run_cli_html(args: argparse.Namespace) -> None:
with open(args.manpage_urls, 'r') as manpage_urls:
md = HTMLConverter(
args.revision,
HTMLParameters(args.generator, args.stylesheet, args.script, args.toc_depth,
args.chunk_toc_depth, args.section_toc_depth, args.media_dir),
json.load(manpage_urls))
md.convert(args.infile, args.outfile)
def build_cli(p: argparse.ArgumentParser) -> None:
formats = p.add_subparsers(dest='format', required=True)
_build_cli_db(formats.add_parser('docbook'))
_build_cli_html(formats.add_parser('html'))
def run_cli(args: argparse.Namespace) -> None:
if args.format == 'docbook':
_run_cli_db(args)
elif args.format == 'html':
_run_cli_html(args)
else:
raise RuntimeError('format not hooked up', args)
| 35,938 | 46.727756 | 122 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/commonmark.py
|
from collections.abc import Mapping, Sequence
from dataclasses import dataclass
from typing import cast, Optional
from .md import md_escape, md_make_code, Renderer
from markdown_it.token import Token
@dataclass(kw_only=True)
class List:
next_idx: Optional[int] = None
compact: bool
first_item_seen: bool = False
@dataclass
class Par:
indent: str
continuing: bool = False
class CommonMarkRenderer(Renderer):
__output__ = "commonmark"
_parstack: list[Par]
_link_stack: list[str]
_list_stack: list[List]
def __init__(self, manpage_urls: Mapping[str, str]):
super().__init__(manpage_urls)
self._parstack = [ Par("") ]
self._link_stack = []
self._list_stack = []
def _enter_block(self, extra_indent: str) -> None:
self._parstack.append(Par(self._parstack[-1].indent + extra_indent))
def _leave_block(self) -> None:
self._parstack.pop()
self._parstack[-1].continuing = True
def _break(self) -> str:
self._parstack[-1].continuing = True
return f"\n{self._parstack[-1].indent}"
def _maybe_parbreak(self) -> str:
result = f"\n{self._parstack[-1].indent}" * 2 if self._parstack[-1].continuing else ""
self._parstack[-1].continuing = True
return result
def _admonition_open(self, kind: str) -> str:
pbreak = self._maybe_parbreak()
self._enter_block("")
return f"{pbreak}**{kind}:** "
def _admonition_close(self) -> str:
self._leave_block()
return ""
def _indent_raw(self, s: str) -> str:
if '\n' not in s:
return s
return f"\n{self._parstack[-1].indent}".join(s.splitlines())
def text(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
return self._indent_raw(md_escape(token.content))
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._maybe_parbreak()
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ""
def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f" {self._break()}"
def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._break()
def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
return md_make_code(token.content)
def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self.fence(token, tokens, i)
def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
self._link_stack.append(cast(str, token.attrs['href']))
return "["
def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f"]({md_escape(self._link_stack.pop())})"
def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
lst = self._list_stack[-1]
lbreak = "" if not lst.first_item_seen else self._break() * (1 if lst.compact else 2)
lst.first_item_seen = True
head = " -"
if lst.next_idx is not None:
head = f" {lst.next_idx}."
lst.next_idx += 1
self._enter_block(" " * (len(head) + 1))
return f'{lbreak}{head} '
def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._leave_block()
return ""
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._list_stack.append(List(compact=bool(token.meta['compact'])))
return self._maybe_parbreak()
def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._list_stack.pop()
return ""
def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "*"
def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "*"
def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "**"
def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "**"
def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str:
code = token.content
if code.endswith('\n'):
code = code[:-1]
pbreak = self._maybe_parbreak()
return pbreak + self._indent_raw(md_make_code(code, info=token.info, multiline=True))
def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
pbreak = self._maybe_parbreak()
self._enter_block("> ")
return pbreak + "> "
def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._leave_block()
return ""
def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("Note")
def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("Caution")
def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("Important")
def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("Tip")
def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("Warning")
def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._list_stack.append(List(compact=False))
return ""
def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._list_stack.pop()
return ""
def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
pbreak = self._maybe_parbreak()
self._enter_block(" ")
# add an opening zero-width non-joiner to separate *our* emphasis from possible
# emphasis in the provided term
return f'{pbreak} - *{chr(0x200C)}'
def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f"{chr(0x200C)}*"
def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
return ""
def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._leave_block()
return ""
def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._parstack[-1].continuing = True
content = md_make_code(token.content)
if token.meta['name'] == 'manpage' and (url := self._manpage_urls.get(token.content)):
return f"[{content}]({url})"
return content # no roles in regular commonmark
def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str:
# there's no way we can emit attrspans correctly in all cases. we could use inline
# html for ids, but that would not round-trip. same holds for classes. since this
# renderer is only used for approximate options export and all of these things are
# not allowed in options we can ignore them for now.
return ""
def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ""
def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return token.markup + " "
def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "\n"
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._list_stack.append(
List(next_idx = cast(int, token.attrs.get('start', 1)),
compact = bool(token.meta['compact'])))
return self._maybe_parbreak()
def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._list_stack.pop()
return ""
def image(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if title := cast(str, token.attrs.get('title', '')):
title = ' "' + title.replace('"', '\\"') + '"'
return f''
| 8,928 | 45.748691 | 94 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/docbook.py
|
from collections.abc import Mapping, Sequence
from typing import cast, Optional, NamedTuple
from markdown_it.token import Token
from xml.sax.saxutils import escape, quoteattr
from .md import Renderer
_xml_id_translate_table = {
ord('*'): ord('_'),
ord('<'): ord('_'),
ord(' '): ord('_'),
ord('>'): ord('_'),
ord('['): ord('_'),
ord(']'): ord('_'),
ord(':'): ord('_'),
ord('"'): ord('_'),
}
def make_xml_id(s: str) -> str:
return s.translate(_xml_id_translate_table)
class Deflist:
has_dd = False
class Heading(NamedTuple):
container_tag: str
level: int
# special handling for <part> titles: whether partinfo was already closed from elsewhere
# or still needs closing.
partintro_closed: bool = False
class DocBookRenderer(Renderer):
_link_tags: list[str]
_deflists: list[Deflist]
_headings: list[Heading]
_attrspans: list[str]
def __init__(self, manpage_urls: Mapping[str, str]):
super().__init__(manpage_urls)
self._link_tags = []
self._deflists = []
self._headings = []
self._attrspans = []
def render(self, tokens: Sequence[Token]) -> str:
result = super().render(tokens)
result += self._close_headings(None)
return result
def renderInline(self, tokens: Sequence[Token]) -> str:
# HACK to support docbook links and xrefs. link handling is only necessary because the docbook
# manpage stylesheet converts - in urls to a mathematical minus, which may be somewhat incorrect.
for i, token in enumerate(tokens):
if token.type != 'link_open':
continue
token.tag = 'link'
# turn [](#foo) into xrefs
if token.attrs['href'][0:1] == '#' and tokens[i + 1].type == 'link_close': # type: ignore[index]
token.tag = "xref"
# turn <x> into links without contents
if tokens[i + 1].type == 'text' and tokens[i + 1].content == token.attrs['href']:
tokens[i + 1].content = ''
return super().renderInline(tokens)
def text(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return escape(token.content)
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<para>"
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</para>"
def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<literallayout>\n</literallayout>"
def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
# should check options.breaks() and emit hard break if so
return "\n"
def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f"<literal>{escape(token.content)}</literal>"
def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f"<programlisting>{escape(token.content)}</programlisting>"
def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._link_tags.append(token.tag)
href = cast(str, token.attrs['href'])
(attr, start) = ('linkend', 1) if href[0] == '#' else ('xlink:href', 0)
return f"<{token.tag} {attr}={quoteattr(href[start:])}>"
def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f"</{self._link_tags.pop()}>"
def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<listitem>"
def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</listitem>\n"
# HACK open and close para for docbook change size. remove soon.
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
spacing = ' spacing="compact"' if token.meta.get('compact', False) else ''
return f"<para><itemizedlist{spacing}>\n"
def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "\n</itemizedlist></para>"
def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<emphasis>"
def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</emphasis>"
def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<emphasis role=\"strong\">"
def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</emphasis>"
def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str:
info = f" language={quoteattr(token.info)}" if token.info != "" else ""
return f"<programlisting{info}>{escape(token.content)}</programlisting>"
def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<para><blockquote>"
def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</blockquote></para>"
def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<para><note>"
def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</note></para>"
def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<para><caution>"
def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</caution></para>"
def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<para><important>"
def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</important></para>"
def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<para><tip>"
def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</tip></para>"
def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<para><warning>"
def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</warning></para>"
# markdown-it emits tokens based on the html syntax tree, but docbook is
# slightly different. html has <dl>{<dt/>{<dd/>}}</dl>,
# docbook has <variablelist>{<varlistentry><term/><listitem/></varlistentry>}<variablelist>
# we have to reject multiple definitions for the same term for time being.
def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._deflists.append(Deflist())
return "<para><variablelist>"
def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._deflists.pop()
return "</variablelist></para>"
def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._deflists[-1].has_dd = False
return "<varlistentry><term>"
def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</term>"
def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if self._deflists[-1].has_dd:
raise Exception("multiple definitions per term not supported")
self._deflists[-1].has_dd = True
return "<listitem>"
def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</listitem></varlistentry>"
def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if token.meta['name'] == 'command':
return f"<command>{escape(token.content)}</command>"
if token.meta['name'] == 'file':
return f"<filename>{escape(token.content)}</filename>"
if token.meta['name'] == 'var':
return f"<varname>{escape(token.content)}</varname>"
if token.meta['name'] == 'env':
return f"<envar>{escape(token.content)}</envar>"
if token.meta['name'] == 'option':
return f"<option>{escape(token.content)}</option>"
if token.meta['name'] == 'manpage':
[page, section] = [ s.strip() for s in token.content.rsplit('(', 1) ]
section = section[:-1]
man = f"{page}({section})"
title = f"<refentrytitle>{escape(page)}</refentrytitle>"
vol = f"<manvolnum>{escape(section)}</manvolnum>"
ref = f"<citerefentry>{title}{vol}</citerefentry>"
if man in self._manpage_urls:
return f"<link xlink:href={quoteattr(self._manpage_urls[man])}>{ref}</link>"
else:
return ref
raise NotImplementedError("md node not supported yet", token)
def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str:
# we currently support *only* inline anchors and the special .keycap class to produce
# <keycap> docbook elements.
(id_part, class_part) = ("", "")
if s := token.attrs.get('id'):
id_part = f'<anchor xml:id={quoteattr(cast(str, s))} />'
if s := token.attrs.get('class'):
if s == 'keycap':
class_part = "<keycap>"
self._attrspans.append("</keycap>")
else:
return super().attr_span_begin(token, tokens, i)
else:
self._attrspans.append("")
return id_part + class_part
def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._attrspans.pop()
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
start = f' startingnumber="{token.attrs["start"]}"' if 'start' in token.attrs else ""
spacing = ' spacing="compact"' if token.meta.get('compact', False) else ''
return f"<orderedlist{start}{spacing}>"
def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</orderedlist>"
def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
hlevel = int(token.tag[1:])
result = self._close_headings(hlevel)
(tag, attrs) = self._heading_tag(token, tokens, i)
self._headings.append(Heading(tag, hlevel))
attrs_str = "".join([ f" {k}={quoteattr(v)}" for k, v in attrs.items() ])
return result + f'<{tag}{attrs_str}>\n<title>'
def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
heading = self._headings[-1]
result = '</title>'
if heading.container_tag == 'part':
# generate the same ids as were previously assigned manually. if this collides we
# rely on outside schema validation to catch it!
maybe_id = ""
assert tokens[i - 2].type == 'heading_open'
if id := cast(str, tokens[i - 2].attrs.get('id', "")):
maybe_id = " xml:id=" + quoteattr(id + "-intro")
result += f"<partintro{maybe_id}>"
return result
def example_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if id := cast(str, token.attrs.get('id', '')):
id = f'xml:id={quoteattr(id)}' if id else ''
return f'<example {id}>'
def example_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</example>"
def example_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<title>"
def example_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</title>"
def _close_headings(self, level: Optional[int]) -> str:
# we rely on markdown-it producing h{1..6} tags in token.tag for this to work
result = []
while len(self._headings):
if level is None or self._headings[-1].level >= level:
heading = self._headings.pop()
if heading.container_tag == 'part' and not heading.partintro_closed:
result.append("</partintro>")
result.append(f"</{heading.container_tag}>")
else:
break
return "\n".join(result)
def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int) -> tuple[str, dict[str, str]]:
attrs = {}
if id := token.attrs.get('id'):
attrs['xml:id'] = cast(str, id)
return ("section", attrs)
| 12,382 | 48.931452 | 108 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/html.py
|
from collections.abc import Mapping, Sequence
from typing import cast, Optional, NamedTuple
from html import escape
from markdown_it.token import Token
from .manual_structure import XrefTarget
from .md import Renderer
class UnresolvedXrefError(Exception):
pass
class Heading(NamedTuple):
container_tag: str
level: int
html_tag: str
# special handling for part content: whether partinfo div was already closed from
# elsewhere or still needs closing.
partintro_closed: bool
# tocs are generated when the heading opens, but have to be emitted into the file
# after the heading titlepage (and maybe partinfo) has been closed.
toc_fragment: str
_bullet_list_styles = [ 'disc', 'circle', 'square' ]
_ordered_list_styles = [ '1', 'a', 'i', 'A', 'I' ]
class HTMLRenderer(Renderer):
_xref_targets: Mapping[str, XrefTarget]
_headings: list[Heading]
_attrspans: list[str]
_hlevel_offset: int = 0
_bullet_list_nesting: int = 0
_ordered_list_nesting: int = 0
def __init__(self, manpage_urls: Mapping[str, str], xref_targets: Mapping[str, XrefTarget]):
super().__init__(manpage_urls)
self._headings = []
self._attrspans = []
self._xref_targets = xref_targets
def render(self, tokens: Sequence[Token]) -> str:
result = super().render(tokens)
result += self._close_headings(None)
return result
def _pull_image(self, path: str) -> str:
raise NotImplementedError()
def text(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return escape(token.content)
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<p>"
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</p>"
def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<br />"
def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "\n"
def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f'<code class="literal">{escape(token.content)}</code>'
def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self.fence(token, tokens, i)
def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
href = escape(cast(str, token.attrs['href']), True)
tag, title, target, text = "link", "", 'target="_top"', ""
if href.startswith('#'):
if not (xref := self._xref_targets.get(href[1:])):
raise UnresolvedXrefError(f"bad local reference, id {href} not known")
if tokens[i + 1].type == 'link_close':
tag, text = "xref", xref.title_html
if xref.title:
# titles are not attribute-safe on their own, so we need to replace quotes.
title = 'title="{}"'.format(xref.title.replace('"', '"'))
target, href = "", xref.href()
return f'<a class="{tag}" href="{href}" {title} {target}>{text}'
def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</a>"
def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<li class="listitem">'
def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</li>"
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
extra = 'compact' if token.meta.get('compact', False) else ''
style = _bullet_list_styles[self._bullet_list_nesting % len(_bullet_list_styles)]
self._bullet_list_nesting += 1
return f'<div class="itemizedlist"><ul class="itemizedlist {extra}" style="list-style-type: {style};">'
def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._bullet_list_nesting -= 1
return "</ul></div>"
def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<span class="emphasis"><em>'
def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</em></span>"
def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<span class="strong"><strong>'
def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</strong></span>"
def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str:
# TODO use token.info. docbook doesn't so we can't yet.
return f'<pre class="programlisting">\n{escape(token.content)}</pre>'
def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<div class="blockquote"><blockquote class="blockquote">'
def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</blockquote></div>"
def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<div class="note"><h3 class="title">Note</h3>'
def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</div>"
def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<div class="caution"><h3 class="title">Caution</h3>'
def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</div>"
def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<div class="important"><h3 class="title">Important</h3>'
def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</div>"
def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<div class="tip"><h3 class="title">Tip</h3>'
def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</div>"
def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<div class="warning"><h3 class="title">Warning</h3>'
def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</div>"
def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<div class="variablelist"><dl class="variablelist">'
def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</dl></div>"
def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<dt><span class="term">'
def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</span></dt>"
def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<dd>"
def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</dd>"
def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if token.meta['name'] == 'command':
return f'<span class="command"><strong>{escape(token.content)}</strong></span>'
if token.meta['name'] == 'file':
return f'<code class="filename">{escape(token.content)}</code>'
if token.meta['name'] == 'var':
return f'<code class="varname">{escape(token.content)}</code>'
if token.meta['name'] == 'env':
return f'<code class="envar">{escape(token.content)}</code>'
if token.meta['name'] == 'option':
return f'<code class="option">{escape(token.content)}</code>'
if token.meta['name'] == 'manpage':
[page, section] = [ s.strip() for s in token.content.rsplit('(', 1) ]
section = section[:-1]
man = f"{page}({section})"
title = f'<span class="refentrytitle">{escape(page)}</span>'
vol = f"({escape(section)})"
ref = f'<span class="citerefentry">{title}{vol}</span>'
if man in self._manpage_urls:
return f'<a class="link" href="{escape(self._manpage_urls[man], True)}" target="_top">{ref}</a>'
else:
return ref
return super().myst_role(token, tokens, i)
def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str:
# we currently support *only* inline anchors and the special .keycap class to produce
# keycap-styled spans.
(id_part, class_part) = ("", "")
if s := token.attrs.get('id'):
id_part = f'<a id="{escape(cast(str, s), True)}" />'
if s := token.attrs.get('class'):
if s == 'keycap':
class_part = '<span class="keycap"><strong>'
self._attrspans.append("</strong></span>")
else:
return super().attr_span_begin(token, tokens, i)
else:
self._attrspans.append("")
return id_part + class_part
def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._attrspans.pop()
def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
hlevel = int(token.tag[1:])
htag, hstyle = self._make_hN(hlevel)
if hstyle:
hstyle = f'style="{escape(hstyle, True)}"'
if anchor := cast(str, token.attrs.get('id', '')):
anchor = f'<a id="{escape(anchor, True)}"></a>'
result = self._close_headings(hlevel)
tag = self._heading_tag(token, tokens, i)
toc_fragment = self._build_toc(tokens, i)
self._headings.append(Heading(tag, hlevel, htag, tag != 'part', toc_fragment))
return (
f'{result}'
f'<div class="{tag}">'
f' <div class="titlepage">'
f' <div>'
f' <div>'
f' <{htag} class="title" {hstyle}>'
f' {anchor}'
)
def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
heading = self._headings[-1]
result = (
f' </{heading.html_tag}>'
f' </div>'
f' </div>'
f'</div>'
)
if heading.container_tag == 'part':
result += '<div class="partintro">'
else:
result += heading.toc_fragment
return result
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
extra = 'compact' if token.meta.get('compact', False) else ''
start = f'start="{token.attrs["start"]}"' if 'start' in token.attrs else ""
style = _ordered_list_styles[self._ordered_list_nesting % len(_ordered_list_styles)]
self._ordered_list_nesting += 1
return f'<div class="orderedlist"><ol class="orderedlist {extra}" {start} type="{style}">'
def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._ordered_list_nesting -= 1
return "</ol></div>"
def example_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if id := cast(str, token.attrs.get('id', '')):
id = f'id="{escape(id, True)}"' if id else ''
return f'<div class="example"><a {id} />'
def example_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '</div></div><br class="example-break" />'
def example_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '<p class="title"><strong>'
def example_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return '</strong></p><div class="example-contents">'
def image(self, token: Token, tokens: Sequence[Token], i: int) -> str:
src = self._pull_image(cast(str, token.attrs['src']))
alt = f'alt="{escape(token.content, True)}"' if token.content else ""
if title := cast(str, token.attrs.get('title', '')):
title = f'title="{escape(title, True)}"'
return (
'<div class="mediaobject">'
f'<img src="{escape(src, True)}" {alt} {title} />'
'</div>'
)
def figure_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if anchor := cast(str, token.attrs.get('id', '')):
anchor = f'<a id="{escape(anchor, True)}"></a>'
return f'<div class="figure">{anchor}'
def figure_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return (
' </div>'
'</div><br class="figure-break" />'
)
def figure_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return (
'<p class="title">'
' <strong>'
)
def figure_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return (
' </strong>'
'</p>'
'<div class="figure-contents">'
)
def table_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return (
'<div class="informaltable">'
'<table class="informaltable" border="1">'
)
def table_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return (
'</table>'
'</div>'
)
def thead_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
cols = []
for j in range(i + 1, len(tokens)):
if tokens[j].type == 'thead_close':
break
elif tokens[j].type == 'th_open':
cols.append(cast(str, tokens[j].attrs.get('style', 'left')).removeprefix('text-align:'))
return "".join([
"<colgroup>",
"".join([ f'<col align="{col}" />' for col in cols ]),
"</colgroup>",
"<thead>",
])
def thead_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</thead>"
def tr_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<tr>"
def tr_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</tr>"
def th_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f'<th align="{cast(str, token.attrs.get("style", "left")).removeprefix("text-align:")}">'
def th_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</th>"
def tbody_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "<tbody>"
def tbody_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</tbody>"
def td_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return f'<td align="{cast(str, token.attrs.get("style", "left")).removeprefix("text-align:")}">'
def td_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</td>"
def footnote_ref(self, token: Token, tokens: Sequence[Token], i: int) -> str:
href = self._xref_targets[token.meta['target']].href()
id = escape(cast(str, token.attrs["id"]), True)
return (
f'<a href="{href}" class="footnote" id="{id}">'
f'<sup class="footnote">[{token.meta["id"] + 1}]</sup>'
'</a>'
)
def footnote_block_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return (
'<div class="footnotes">'
'<br />'
'<hr style="width:100; text-align:left;margin-left: 0" />'
)
def footnote_block_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</div>"
def footnote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
# meta id,label
id = escape(self._xref_targets[token.meta["label"]].id, True)
return f'<div id="{id}" class="footnote">'
def footnote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "</div>"
def footnote_anchor(self, token: Token, tokens: Sequence[Token], i: int) -> str:
href = self._xref_targets[token.meta['target']].href()
return (
f'<a href="{href}" class="para">'
f'<sup class="para">[{token.meta["id"] + 1}]</sup>'
'</a>'
)
def _make_hN(self, level: int) -> tuple[str, str]:
return f"h{min(6, max(1, level + self._hlevel_offset))}", ""
def _maybe_close_partintro(self) -> str:
if self._headings:
heading = self._headings[-1]
if heading.container_tag == 'part' and not heading.partintro_closed:
self._headings[-1] = heading._replace(partintro_closed=True)
return heading.toc_fragment + "</div>"
return ""
def _close_headings(self, level: Optional[int]) -> str:
result = []
while len(self._headings) and (level is None or self._headings[-1].level >= level):
result.append(self._maybe_close_partintro())
result.append("</div>")
self._headings.pop()
return "\n".join(result)
def _heading_tag(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return "section"
def _build_toc(self, tokens: Sequence[Token], i: int) -> str:
return ""
| 17,260 | 47.759887 | 112 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/md.py
|
from abc import ABC
from collections.abc import Mapping, MutableMapping, Sequence
from typing import Any, Callable, cast, Generic, get_args, Iterable, Literal, NoReturn, Optional, TypeVar
import dataclasses
import re
from .types import RenderFn
import markdown_it
from markdown_it.token import Token
from markdown_it.utils import OptionsDict
from mdit_py_plugins.container import container_plugin # type: ignore[attr-defined]
from mdit_py_plugins.deflist import deflist_plugin # type: ignore[attr-defined]
from mdit_py_plugins.footnote import footnote_plugin # type: ignore[attr-defined]
from mdit_py_plugins.myst_role import myst_role_plugin # type: ignore[attr-defined]
_md_escape_table = {
ord('*'): '\\*',
ord('<'): '\\<',
ord('['): '\\[',
ord('`'): '\\`',
ord('.'): '\\.',
ord('#'): '\\#',
ord('&'): '\\&',
ord('\\'): '\\\\',
}
def md_escape(s: str) -> str:
return s.translate(_md_escape_table)
def md_make_code(code: str, info: str = "", multiline: Optional[bool] = None) -> str:
# for multi-line code blocks we only have to count ` runs at the beginning
# of a line, but this is much easier.
multiline = multiline or info != "" or '\n' in code
longest, current = (0, 0)
for c in code:
current = current + 1 if c == '`' else 0
longest = max(current, longest)
# inline literals need a space to separate ticks from content, code blocks
# need newlines. inline literals need one extra tick, code blocks need three.
ticks, sep = ('`' * (longest + (3 if multiline else 1)), '\n' if multiline else ' ')
return f"{ticks}{info}{sep}{code}{sep}{ticks}"
AttrBlockKind = Literal['admonition', 'example', 'figure']
AdmonitionKind = Literal["note", "caution", "tip", "important", "warning"]
class Renderer:
_admonitions: dict[AdmonitionKind, tuple[RenderFn, RenderFn]]
_admonition_stack: list[AdmonitionKind]
def __init__(self, manpage_urls: Mapping[str, str]):
self._manpage_urls = manpage_urls
self.rules = {
'text': self.text,
'paragraph_open': self.paragraph_open,
'paragraph_close': self.paragraph_close,
'hardbreak': self.hardbreak,
'softbreak': self.softbreak,
'code_inline': self.code_inline,
'code_block': self.code_block,
'link_open': self.link_open,
'link_close': self.link_close,
'list_item_open': self.list_item_open,
'list_item_close': self.list_item_close,
'bullet_list_open': self.bullet_list_open,
'bullet_list_close': self.bullet_list_close,
'em_open': self.em_open,
'em_close': self.em_close,
'strong_open': self.strong_open,
'strong_close': self.strong_close,
'fence': self.fence,
'blockquote_open': self.blockquote_open,
'blockquote_close': self.blockquote_close,
'dl_open': self.dl_open,
'dl_close': self.dl_close,
'dt_open': self.dt_open,
'dt_close': self.dt_close,
'dd_open': self.dd_open,
'dd_close': self.dd_close,
'myst_role': self.myst_role,
"admonition_open": self.admonition_open,
"admonition_close": self.admonition_close,
"attr_span_begin": self.attr_span_begin,
"attr_span_end": self.attr_span_end,
"heading_open": self.heading_open,
"heading_close": self.heading_close,
"ordered_list_open": self.ordered_list_open,
"ordered_list_close": self.ordered_list_close,
"example_open": self.example_open,
"example_close": self.example_close,
"example_title_open": self.example_title_open,
"example_title_close": self.example_title_close,
"image": self.image,
"figure_open": self.figure_open,
"figure_close": self.figure_close,
"figure_title_open": self.figure_title_open,
"figure_title_close": self.figure_title_close,
"table_open": self.table_open,
"table_close": self.table_close,
"thead_open": self.thead_open,
"thead_close": self.thead_close,
"tr_open": self.tr_open,
"tr_close": self.tr_close,
"th_open": self.th_open,
"th_close": self.th_close,
"tbody_open": self.tbody_open,
"tbody_close": self.tbody_close,
"td_open": self.td_open,
"td_close": self.td_close,
"footnote_ref": self.footnote_ref,
"footnote_block_open": self.footnote_block_open,
"footnote_block_close": self.footnote_block_close,
"footnote_open": self.footnote_open,
"footnote_close": self.footnote_close,
"footnote_anchor": self.footnote_anchor,
}
self._admonitions = {
"note": (self.note_open, self.note_close),
"caution": (self.caution_open,self.caution_close),
"tip": (self.tip_open, self.tip_close),
"important": (self.important_open, self.important_close),
"warning": (self.warning_open, self.warning_close),
}
self._admonition_stack = []
def _join_block(self, ls: Iterable[str]) -> str:
return "".join(ls)
def _join_inline(self, ls: Iterable[str]) -> str:
return "".join(ls)
def admonition_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
tag = token.meta['kind']
self._admonition_stack.append(tag)
return self._admonitions[tag][0](token, tokens, i)
def admonition_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonitions[self._admonition_stack.pop()][1](token, tokens, i)
def render(self, tokens: Sequence[Token]) -> str:
def do_one(i: int, token: Token) -> str:
if token.type == "inline":
assert token.children is not None
return self.renderInline(token.children)
elif token.type in self.rules:
return self.rules[token.type](tokens[i], tokens, i)
else:
raise NotImplementedError("md token not supported yet", token)
return self._join_block(map(lambda arg: do_one(*arg), enumerate(tokens)))
def renderInline(self, tokens: Sequence[Token]) -> str:
def do_one(i: int, token: Token) -> str:
if token.type in self.rules:
return self.rules[token.type](tokens[i], tokens, i)
else:
raise NotImplementedError("md token not supported yet", token)
return self._join_inline(map(lambda arg: do_one(*arg), enumerate(tokens)))
def text(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def example_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def example_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def example_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def example_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def image(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def figure_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def figure_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def figure_title_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def figure_title_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def table_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def table_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def thead_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def thead_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def tr_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def tr_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def th_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def th_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def tbody_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def tbody_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def td_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def td_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def footnote_ref(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def footnote_block_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def footnote_block_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def footnote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def footnote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def footnote_anchor(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported", token)
def _is_escaped(src: str, pos: int) -> bool:
found = 0
while pos >= 0 and src[pos] == '\\':
found += 1
pos -= 1
return found % 2 == 1
# the contents won't be split apart in the regex because spacing rules get messy here
_ATTR_SPAN_PATTERN = re.compile(r"\{([^}]*)\}")
# this one is for blocks with attrs. we want to use it with fullmatch() to deconstruct an info.
_ATTR_BLOCK_PATTERN = re.compile(r"\s*\{([^}]*)\}\s*")
def _parse_attrs(s: str) -> Optional[tuple[Optional[str], list[str]]]:
(id, classes) = (None, [])
for part in s.split():
if part.startswith('#'):
if id is not None:
return None # just bail on multiple ids instead of trying to recover
id = part[1:]
elif part.startswith('.'):
classes.append(part[1:])
else:
return None # no support for key=value attrs like in pandoc
return (id, classes)
def _parse_blockattrs(info: str) -> Optional[tuple[AttrBlockKind, Optional[str], list[str]]]:
if (m := _ATTR_BLOCK_PATTERN.fullmatch(info)) is None:
return None
if (parsed_attrs := _parse_attrs(m[1])) is None:
return None
id, classes = parsed_attrs
# check that we actually support this kind of block, and that is adheres to
# whetever restrictions we want to enforce for that kind of block.
if len(classes) == 1 and classes[0] in get_args(AdmonitionKind):
# don't want to support ids for admonitions just yet
if id is not None:
return None
return ('admonition', id, classes)
if classes == ['example']:
return ('example', id, classes)
elif classes == ['figure']:
return ('figure', id, classes)
return None
def _attr_span_plugin(md: markdown_it.MarkdownIt) -> None:
def attr_span(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool:
if state.src[state.pos] != '[':
return False
if _is_escaped(state.src, state.pos - 1):
return False
# treat the inline span like a link label for simplicity.
label_begin = state.pos + 1
label_end = markdown_it.helpers.parseLinkLabel(state, state.pos)
input_end = state.posMax
if label_end < 0:
return False
# match id and classes in any combination
match = _ATTR_SPAN_PATTERN.match(state.src[label_end + 1 : ])
if not match:
return False
if not silent:
if (parsed_attrs := _parse_attrs(match[1])) is None:
return False
id, classes = parsed_attrs
token = state.push("attr_span_begin", "span", 1) # type: ignore[no-untyped-call]
if id:
token.attrs['id'] = id
if classes:
token.attrs['class'] = " ".join(classes)
state.pos = label_begin
state.posMax = label_end
state.md.inline.tokenize(state)
state.push("attr_span_end", "span", -1) # type: ignore[no-untyped-call]
state.pos = label_end + match.end() + 1
state.posMax = input_end
return True
md.inline.ruler.before("link", "attr_span", attr_span)
def _inline_comment_plugin(md: markdown_it.MarkdownIt) -> None:
def inline_comment(state: markdown_it.rules_inline.StateInline, silent: bool) -> bool:
if state.src[state.pos : state.pos + 4] != '<!--':
return False
if _is_escaped(state.src, state.pos - 1):
return False
for i in range(state.pos + 4, state.posMax - 2):
if state.src[i : i + 3] == '-->': # -->
state.pos = i + 3
return True
return False
md.inline.ruler.after("autolink", "inline_comment", inline_comment)
def _block_comment_plugin(md: markdown_it.MarkdownIt) -> None:
def block_comment(state: markdown_it.rules_block.StateBlock, startLine: int, endLine: int,
silent: bool) -> bool:
pos = state.bMarks[startLine] + state.tShift[startLine]
posMax = state.eMarks[startLine]
if state.src[pos : pos + 4] != '<!--':
return False
nextLine = startLine
while nextLine < endLine:
pos = state.bMarks[nextLine] + state.tShift[nextLine]
posMax = state.eMarks[nextLine]
if state.src[posMax - 3 : posMax] == '-->':
state.line = nextLine + 1
return True
nextLine += 1
return False
md.block.ruler.after("code", "block_comment", block_comment)
_HEADER_ID_RE = re.compile(r"\s*\{\s*\#([\w.-]+)\s*\}\s*$")
def _heading_ids(md: markdown_it.MarkdownIt) -> None:
def heading_ids(state: markdown_it.rules_core.StateCore) -> None:
tokens = state.tokens
# this is purposely simple and doesn't support classes or other kinds of attributes.
for (i, token) in enumerate(tokens):
if token.type == 'heading_open':
children = tokens[i + 1].children
assert children is not None
if len(children) == 0 or children[-1].type != 'text':
continue
if m := _HEADER_ID_RE.search(children[-1].content):
tokens[i].attrs['id'] = m[1]
children[-1].content = children[-1].content[:-len(m[0])].rstrip()
md.core.ruler.before("replacements", "heading_ids", heading_ids)
def _footnote_ids(md: markdown_it.MarkdownIt) -> None:
"""generate ids for footnotes, their refs, and their backlinks. the ids we
generate here are derived from the footnote label, making numeric footnote
labels invalid.
"""
def generate_ids(tokens: Sequence[Token]) -> None:
for token in tokens:
if token.type == 'footnote_open':
if token.meta["label"][:1].isdigit():
assert token.map
raise RuntimeError(f"invalid footnote label in line {token.map[0] + 1}")
token.attrs['id'] = token.meta["label"]
elif token.type == 'footnote_anchor':
token.meta['target'] = f'{token.meta["label"]}.__back.{token.meta["subId"]}'
elif token.type == 'footnote_ref':
token.attrs['id'] = f'{token.meta["label"]}.__back.{token.meta["subId"]}'
token.meta['target'] = token.meta["label"]
elif token.type == 'inline':
assert token.children
generate_ids(token.children)
def footnote_ids(state: markdown_it.rules_core.StateCore) -> None:
generate_ids(state.tokens)
md.core.ruler.after("footnote_tail", "footnote_ids", footnote_ids)
def _compact_list_attr(md: markdown_it.MarkdownIt) -> None:
@dataclasses.dataclass
class Entry:
head: Token
end: int
compact: bool = True
def compact_list_attr(state: markdown_it.rules_core.StateCore) -> None:
# markdown-it signifies wide lists by setting the wrapper paragraphs
# of each item to hidden. this is not useful for our stylesheets, which
# signify this with a special css class on list elements instead.
stack = []
for token in state.tokens:
if token.type in [ 'bullet_list_open', 'ordered_list_open' ]:
stack.append(Entry(token, cast(int, token.attrs.get('start', 1))))
elif token.type in [ 'bullet_list_close', 'ordered_list_close' ]:
lst = stack.pop()
lst.head.meta['compact'] = lst.compact
if token.type == 'ordered_list_close':
lst.head.meta['end'] = lst.end - 1
elif len(stack) > 0 and token.type == 'paragraph_open' and not token.hidden:
stack[-1].compact = False
elif token.type == 'list_item_open':
stack[-1].end += 1
md.core.ruler.push("compact_list_attr", compact_list_attr)
def _block_attr(md: markdown_it.MarkdownIt) -> None:
def assert_never(value: NoReturn) -> NoReturn:
assert False
def block_attr(state: markdown_it.rules_core.StateCore) -> None:
stack = []
for token in state.tokens:
if token.type == 'container_blockattr_open':
if (parsed_attrs := _parse_blockattrs(token.info)) is None:
# if we get here we've missed a possible case in the plugin validate function
raise RuntimeError("this should be unreachable")
kind, id, classes = parsed_attrs
if kind == 'admonition':
token.type = 'admonition_open'
token.meta['kind'] = classes[0]
stack.append('admonition_close')
elif kind == 'example':
token.type = 'example_open'
if id is not None:
token.attrs['id'] = id
stack.append('example_close')
elif kind == 'figure':
token.type = 'figure_open'
if id is not None:
token.attrs['id'] = id
stack.append('figure_close')
else:
assert_never(kind)
elif token.type == 'container_blockattr_close':
token.type = stack.pop()
md.core.ruler.push("block_attr", block_attr)
def _block_titles(block: str) -> Callable[[markdown_it.MarkdownIt], None]:
open, close = f'{block}_open', f'{block}_close'
title_open, title_close = f'{block}_title_open', f'{block}_title_close'
"""
find title headings of blocks and stick them into meta for renderers, then
remove them from the token stream. also checks whether any block contains a
non-title heading since those would make toc generation extremely complicated.
"""
def block_titles(state: markdown_it.rules_core.StateCore) -> None:
in_example = [False]
for i, token in enumerate(state.tokens):
if token.type == open:
if state.tokens[i + 1].type == 'heading_open':
assert state.tokens[i + 3].type == 'heading_close'
state.tokens[i + 1].type = title_open
state.tokens[i + 3].type = title_close
else:
assert token.map
raise RuntimeError(f"found {block} without title in line {token.map[0] + 1}")
in_example.append(True)
elif token.type == close:
in_example.pop()
elif token.type == 'heading_open' and in_example[-1]:
assert token.map
raise RuntimeError(f"unexpected non-title heading in {block} in line {token.map[0] + 1}")
def do_add(md: markdown_it.MarkdownIt) -> None:
md.core.ruler.push(f"{block}_titles", block_titles)
return do_add
TR = TypeVar('TR', bound='Renderer')
class Converter(ABC, Generic[TR]):
# we explicitly disable markdown-it rendering support and use our own entirely.
# rendering is well separated from parsing and our renderers carry much more state than
# markdown-it easily acknowledges as 'good' (unless we used the untyped env args to
# shuttle that state around, which is very fragile)
class ForbiddenRenderer(markdown_it.renderer.RendererProtocol):
__output__ = "none"
def __init__(self, parser: Optional[markdown_it.MarkdownIt]):
pass
def render(self, tokens: Sequence[Token], options: OptionsDict,
env: MutableMapping[str, Any]) -> str:
raise NotImplementedError("do not use Converter._md.renderer. 'tis a silly place")
_renderer: TR
def __init__(self) -> None:
self._md = markdown_it.MarkdownIt(
"commonmark",
{
'maxNesting': 100, # default is 20
'html': False, # not useful since we target many formats
'typographer': True, # required for smartquotes
},
renderer_cls=self.ForbiddenRenderer
)
self._md.enable('table')
self._md.use(
container_plugin,
name="blockattr",
validate=lambda name, *args: _parse_blockattrs(name),
)
self._md.use(deflist_plugin)
self._md.use(footnote_plugin)
self._md.use(myst_role_plugin)
self._md.use(_attr_span_plugin)
self._md.use(_inline_comment_plugin)
self._md.use(_block_comment_plugin)
self._md.use(_heading_ids)
self._md.use(_footnote_ids)
self._md.use(_compact_list_attr)
self._md.use(_block_attr)
self._md.use(_block_titles("example"))
self._md.use(_block_titles("figure"))
self._md.enable(["smartquotes", "replacements"])
def _parse(self, src: str) -> list[Token]:
return self._md.parse(src, {})
def _render(self, src: str) -> str:
tokens = self._parse(src)
return self._renderer.render(tokens)
| 29,769 | 47.327922 | 105 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/utils.py
|
from typing import Any
_frozen_classes: dict[type, type] = {}
# make a derived class freezable (ie, disallow modifications).
# we do this by changing the class of an instance at runtime when freeze()
# is called, providing a derived class that is exactly the same except
# for a __setattr__ that raises an error when called. this beats having
# a field for frozenness and an unconditional __setattr__ that checks this
# field because it does not insert anything into the class dict.
class Freezeable:
def freeze(self) -> None:
cls = type(self)
if not (frozen := _frozen_classes.get(cls)):
def __setattr__(instance: Any, n: str, v: Any) -> None:
raise TypeError(f'{cls.__name__} is frozen')
frozen = type(cls.__name__, (cls,), {
'__setattr__': __setattr__,
})
_frozen_classes[cls] = frozen
self.__class__ = frozen
| 923 | 41 | 74 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/types.py
|
from collections.abc import Sequence
from typing import Callable, Optional, NamedTuple
from markdown_it.token import Token
OptionLoc = str | dict[str, str]
Option = dict[str, str | dict[str, str] | list[OptionLoc]]
class RenderedOption(NamedTuple):
loc: list[str]
lines: list[str]
links: Optional[list[str]] = None
RenderFn = Callable[[Token, Sequence[Token], int], str]
| 387 | 24.866667 | 58 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/manual_structure.py
|
from __future__ import annotations
import dataclasses as dc
import html
import itertools
from typing import cast, get_args, Iterable, Literal, Sequence
from markdown_it.token import Token
from .utils import Freezeable
# FragmentType is used to restrict structural include blocks.
FragmentType = Literal['preface', 'part', 'chapter', 'section', 'appendix']
# in the TOC all fragments are allowed, plus the all-encompassing book.
TocEntryType = Literal['book', 'preface', 'part', 'chapter', 'section', 'appendix', 'example', 'figure']
def is_include(token: Token) -> bool:
return token.type == "fence" and token.info.startswith("{=include=} ")
# toplevel file must contain only the title headings and includes, anything else
# would cause strange rendering.
def _check_book_structure(tokens: Sequence[Token]) -> None:
for token in tokens[6:]:
if not is_include(token):
assert token.map
raise RuntimeError(f"unexpected content in line {token.map[0] + 1}, "
"expected structural include")
# much like books, parts may not contain headings other than their title heading.
# this is a limitation of the current renderers and TOC generators that do not handle
# this case well even though it is supported in docbook (and probably supportable
# anywhere else).
def _check_part_structure(tokens: Sequence[Token]) -> None:
_check_fragment_structure(tokens)
for token in tokens[3:]:
if token.type == 'heading_open':
assert token.map
raise RuntimeError(f"unexpected heading in line {token.map[0] + 1}")
# two include blocks must either be adjacent or separated by a heading, otherwise
# we cannot generate a correct TOC (since there'd be nothing to link to between
# the two includes).
def _check_fragment_structure(tokens: Sequence[Token]) -> None:
for i, token in enumerate(tokens):
if is_include(token) \
and i + 1 < len(tokens) \
and not (is_include(tokens[i + 1]) or tokens[i + 1].type == 'heading_open'):
assert token.map
raise RuntimeError(f"unexpected content in line {token.map[0] + 1}, "
"expected heading or structural include")
def check_structure(kind: TocEntryType, tokens: Sequence[Token]) -> None:
wanted = { 'h1': 'title' }
wanted |= { 'h2': 'subtitle' } if kind == 'book' else {}
for (i, (tag, role)) in enumerate(wanted.items()):
if len(tokens) < 3 * (i + 1):
raise RuntimeError(f"missing {role} ({tag}) heading")
token = tokens[3 * i]
if token.type != 'heading_open' or token.tag != tag:
assert token.map
raise RuntimeError(f"expected {role} ({tag}) heading in line {token.map[0] + 1}", token)
for t in tokens[3 * len(wanted):]:
if t.type != 'heading_open' or not (role := wanted.get(t.tag, '')):
continue
assert t.map
raise RuntimeError(
f"only one {role} heading ({t.markup} [text...]) allowed per "
f"{kind}, but found a second in line {t.map[0] + 1}. "
"please remove all such headings except the first or demote the subsequent headings.",
t)
last_heading_level = 0
for token in tokens:
if token.type != 'heading_open':
continue
# book subtitle headings do not need an id, only book title headings do.
# every other headings needs one too. we need this to build a TOC and to
# provide stable links if the manual changes shape.
if 'id' not in token.attrs and (kind != 'book' or token.tag != 'h2'):
assert token.map
raise RuntimeError(f"heading in line {token.map[0] + 1} does not have an id")
level = int(token.tag[1:]) # because tag = h1..h6
if level > last_heading_level + 1:
assert token.map
raise RuntimeError(f"heading in line {token.map[0] + 1} skips one or more heading levels, "
"which is currently not allowed")
last_heading_level = level
if kind == 'book':
_check_book_structure(tokens)
elif kind == 'part':
_check_part_structure(tokens)
else:
_check_fragment_structure(tokens)
@dc.dataclass(frozen=True)
class XrefTarget:
id: str
"""link label for `[](#local-references)`"""
title_html: str
"""toc label"""
toc_html: str | None
"""text for `<title>` tags and `title="..."` attributes"""
title: str | None
"""path to file that contains the anchor"""
path: str
"""whether to drop the `#anchor` from links when expanding xrefs"""
drop_fragment: bool = False
"""whether to drop the `path.html` from links when expanding xrefs.
mostly useful for docbook compatibility"""
drop_target: bool = False
def href(self) -> str:
path = "" if self.drop_target else html.escape(self.path, True)
return path if self.drop_fragment else f"{path}#{html.escape(self.id, True)}"
@dc.dataclass
class TocEntry(Freezeable):
kind: TocEntryType
target: XrefTarget
parent: TocEntry | None = None
prev: TocEntry | None = None
next: TocEntry | None = None
children: list[TocEntry] = dc.field(default_factory=list)
starts_new_chunk: bool = False
examples: list[TocEntry] = dc.field(default_factory=list)
figures: list[TocEntry] = dc.field(default_factory=list)
@property
def root(self) -> TocEntry:
return self.parent.root if self.parent else self
@classmethod
def of(cls, token: Token) -> TocEntry:
entry = token.meta.get('TocEntry')
if not isinstance(entry, TocEntry):
raise RuntimeError('requested toc entry, none found', token)
return entry
@classmethod
def collect_and_link(cls, xrefs: dict[str, XrefTarget], tokens: Sequence[Token]) -> TocEntry:
entries, examples, figures = cls._collect_entries(xrefs, tokens, 'book')
def flatten_with_parent(this: TocEntry, parent: TocEntry | None) -> Iterable[TocEntry]:
this.parent = parent
return itertools.chain([this], *[ flatten_with_parent(c, this) for c in this.children ])
flat = list(flatten_with_parent(entries, None))
prev = flat[0]
prev.starts_new_chunk = True
paths_seen = set([prev.target.path])
for c in flat[1:]:
if prev.target.path != c.target.path and c.target.path not in paths_seen:
c.starts_new_chunk = True
c.prev, prev.next = prev, c
prev = c
paths_seen.add(c.target.path)
flat[0].examples = examples
flat[0].figures = figures
for c in flat:
c.freeze()
return entries
@classmethod
def _collect_entries(cls, xrefs: dict[str, XrefTarget], tokens: Sequence[Token],
kind: TocEntryType) -> tuple[TocEntry, list[TocEntry], list[TocEntry]]:
# we assume that check_structure has been run recursively over the entire input.
# list contains (tag, entry) pairs that will collapse to a single entry for
# the full sequence.
entries: list[tuple[str, TocEntry]] = []
examples: list[TocEntry] = []
figures: list[TocEntry] = []
for token in tokens:
if token.type.startswith('included_') and (included := token.meta.get('included')):
fragment_type_str = token.type[9:].removesuffix('s')
assert fragment_type_str in get_args(TocEntryType)
fragment_type = cast(TocEntryType, fragment_type_str)
for fragment, _path in included:
subentries, subexamples, subfigures = cls._collect_entries(xrefs, fragment, fragment_type)
entries[-1][1].children.append(subentries)
examples += subexamples
figures += subfigures
elif token.type == 'heading_open' and (id := cast(str, token.attrs.get('id', ''))):
while len(entries) > 1 and entries[-1][0] >= token.tag:
entries[-2][1].children.append(entries.pop()[1])
entries.append((token.tag,
TocEntry(kind if token.tag == 'h1' else 'section', xrefs[id])))
token.meta['TocEntry'] = entries[-1][1]
elif token.type == 'example_open' and (id := cast(str, token.attrs.get('id', ''))):
examples.append(TocEntry('example', xrefs[id]))
elif token.type == 'figure_open' and (id := cast(str, token.attrs.get('id', ''))):
figures.append(TocEntry('figure', xrefs[id]))
while len(entries) > 1:
entries[-2][1].children.append(entries.pop()[1])
return (entries[0][1], examples, figures)
| 8,847 | 42.372549 | 110 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/options.py
|
from __future__ import annotations
import argparse
import html
import json
import xml.sax.saxutils as xml
from abc import abstractmethod
from collections.abc import Mapping, Sequence
from markdown_it.token import Token
from typing import Any, Generic, Optional
from urllib.parse import quote
from . import md
from . import parallel
from .asciidoc import AsciiDocRenderer, asciidoc_escape
from .commonmark import CommonMarkRenderer
from .docbook import DocBookRenderer, make_xml_id
from .html import HTMLRenderer
from .manpage import ManpageRenderer, man_escape
from .manual_structure import XrefTarget
from .md import Converter, md_escape, md_make_code
from .types import OptionLoc, Option, RenderedOption
def option_is(option: Option, key: str, typ: str) -> Optional[dict[str, str]]:
if key not in option:
return None
if type(option[key]) != dict:
return None
if option[key].get('_type') != typ: # type: ignore[union-attr]
return None
return option[key] # type: ignore[return-value]
class BaseConverter(Converter[md.TR], Generic[md.TR]):
__option_block_separator__: str
_options: dict[str, RenderedOption]
def __init__(self, revision: str):
super().__init__()
self._options = {}
self._revision = revision
def _sorted_options(self) -> list[tuple[str, RenderedOption]]:
keys = list(self._options.keys())
keys.sort(key=lambda opt: [ (0 if p.startswith("enable") else 1 if p.startswith("package") else 2, p)
for p in self._options[opt].loc ])
return [ (k, self._options[k]) for k in keys ]
def _format_decl_def_loc(self, loc: OptionLoc) -> tuple[Optional[str], str]:
# locations can be either plain strings (specific to nixpkgs), or attrsets
# { name = "foo/bar.nix"; url = "https://github.com/....."; }
if isinstance(loc, str):
# Hyperlink the filename either to the NixOS github
# repository (if it’s a module and we have a revision number),
# or to the local filesystem.
if not loc.startswith('/'):
if self._revision == 'local':
href = f"https://github.com/NixOS/nixpkgs/blob/master/{loc}"
else:
href = f"https://github.com/NixOS/nixpkgs/blob/{self._revision}/{loc}"
else:
href = f"file://{loc}"
# Print the filename and make it user-friendly by replacing the
# /nix/store/<hash> prefix by the default location of nixos
# sources.
if not loc.startswith('/'):
name = f"<nixpkgs/{loc}>"
elif 'nixops' in loc and '/nix/' in loc:
name = f"<nixops/{loc[loc.find('/nix/') + 5:]}>"
else:
name = loc
return (href, name)
else:
return (loc['url'] if 'url' in loc else None, loc['name'])
@abstractmethod
def _decl_def_header(self, header: str) -> list[str]: raise NotImplementedError()
@abstractmethod
def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]: raise NotImplementedError()
@abstractmethod
def _decl_def_footer(self) -> list[str]: raise NotImplementedError()
def _render_decl_def(self, header: str, locs: list[OptionLoc]) -> list[str]:
result = []
result += self._decl_def_header(header)
for loc in locs:
href, name = self._format_decl_def_loc(loc)
result += self._decl_def_entry(href, name)
result += self._decl_def_footer()
return result
def _render_code(self, option: Option, key: str) -> list[str]:
if lit := option_is(option, key, 'literalMD'):
return [ self._render(f"*{key.capitalize()}:*\n{lit['text']}") ]
elif lit := option_is(option, key, 'literalExpression'):
code = md_make_code(lit['text'])
return [ self._render(f"*{key.capitalize()}:*\n{code}") ]
elif key in option:
raise Exception(f"{key} has unrecognized type", option[key])
else:
return []
def _render_description(self, desc: str | dict[str, str]) -> list[str]:
if isinstance(desc, str):
return [ self._render(desc) ] if desc else []
elif isinstance(desc, dict) and desc.get('_type') == 'mdDoc':
return [ self._render(desc['text']) ] if desc['text'] else []
else:
raise Exception("description has unrecognized type", desc)
@abstractmethod
def _related_packages_header(self) -> list[str]: raise NotImplementedError()
def _convert_one(self, option: dict[str, Any]) -> list[str]:
blocks: list[list[str]] = []
if desc := option.get('description'):
blocks.append(self._render_description(desc))
if typ := option.get('type'):
ro = " *(read only)*" if option.get('readOnly', False) else ""
blocks.append([ self._render(f"*Type:*\n{md_escape(typ)}{ro}") ])
if option.get('default'):
blocks.append(self._render_code(option, 'default'))
if option.get('example'):
blocks.append(self._render_code(option, 'example'))
if related := option.get('relatedPackages'):
blocks.append(self._related_packages_header())
blocks[-1].append(self._render(related))
if decl := option.get('declarations'):
blocks.append(self._render_decl_def("Declared by", decl))
if defs := option.get('definitions'):
blocks.append(self._render_decl_def("Defined by", defs))
for part in [ p for p in blocks[0:-1] if p ]:
part.append(self.__option_block_separator__)
return [ l for part in blocks for l in part ]
# this could return a TState parameter, but that does not allow dependent types and
# will cause headaches when using BaseConverter as a type bound anywhere. Any is the
# next best thing we can use, and since this is internal it will be mostly safe.
@abstractmethod
def _parallel_render_prepare(self) -> Any: raise NotImplementedError()
# this should return python 3.11's Self instead to ensure that a prepare+finish
# round-trip ends up with an object of the same type. for now we'll use BaseConverter
# since it's good enough so far.
@classmethod
@abstractmethod
def _parallel_render_init_worker(cls, a: Any) -> BaseConverter[md.TR]: raise NotImplementedError()
def _render_option(self, name: str, option: dict[str, Any]) -> RenderedOption:
try:
return RenderedOption(option['loc'], self._convert_one(option))
except Exception as e:
raise Exception(f"Failed to render option {name}") from e
@classmethod
def _parallel_render_step(cls, s: BaseConverter[md.TR], a: Any) -> RenderedOption:
return s._render_option(*a)
def add_options(self, options: dict[str, Any]) -> None:
mapped = parallel.map(self._parallel_render_step, options.items(), 100,
self._parallel_render_init_worker, self._parallel_render_prepare())
for (name, option) in zip(options.keys(), mapped):
self._options[name] = option
@abstractmethod
def finalize(self) -> str: raise NotImplementedError()
class OptionDocsRestrictions:
def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported in options doc", token)
def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported in options doc", token)
def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported in options doc", token)
def example_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported in options doc", token)
class OptionsDocBookRenderer(OptionDocsRestrictions, DocBookRenderer):
# TODO keep optionsDocBook diff small. remove soon if rendering is still good.
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
token.meta['compact'] = False
return super().ordered_list_open(token, tokens, i)
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
token.meta['compact'] = False
return super().bullet_list_open(token, tokens, i)
class DocBookConverter(BaseConverter[OptionsDocBookRenderer]):
__option_block_separator__ = ""
def __init__(self, manpage_urls: Mapping[str, str],
revision: str,
document_type: str,
varlist_id: str,
id_prefix: str):
super().__init__(revision)
self._renderer = OptionsDocBookRenderer(manpage_urls)
self._document_type = document_type
self._varlist_id = varlist_id
self._id_prefix = id_prefix
def _parallel_render_prepare(self) -> Any:
return (self._renderer._manpage_urls, self._revision, self._document_type,
self._varlist_id, self._id_prefix)
@classmethod
def _parallel_render_init_worker(cls, a: Any) -> DocBookConverter:
return cls(*a)
def _related_packages_header(self) -> list[str]:
return [
"<para>",
" <emphasis>Related packages:</emphasis>",
"</para>",
]
def _decl_def_header(self, header: str) -> list[str]:
return [
f"<para><emphasis>{header}:</emphasis></para>",
"<simplelist>"
]
def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]:
if href is not None:
href = " xlink:href=" + xml.quoteattr(href)
return [
f"<member><filename{href}>",
xml.escape(name),
"</filename></member>"
]
def _decl_def_footer(self) -> list[str]:
return [ "</simplelist>" ]
def finalize(self, *, fragment: bool = False) -> str:
result = []
if not fragment:
result.append('<?xml version="1.0" encoding="UTF-8"?>')
if self._document_type == 'appendix':
result += [
'<appendix xmlns="http://docbook.org/ns/docbook"',
' xml:id="appendix-configuration-options">',
' <title>Configuration Options</title>',
]
result += [
'<variablelist xmlns:xlink="http://www.w3.org/1999/xlink"',
' xmlns:nixos="tag:nixos.org"',
' xmlns="http://docbook.org/ns/docbook"',
f' xml:id="{self._varlist_id}">',
]
for (name, opt) in self._sorted_options():
id = make_xml_id(self._id_prefix + name)
result += [
"<varlistentry>",
# NOTE adding extra spaces here introduces spaces into xref link expansions
(f"<term xlink:href={xml.quoteattr('#' + id)} xml:id={xml.quoteattr(id)}>" +
f"<option>{xml.escape(name)}</option></term>"),
"<listitem>"
]
result += opt.lines
result += [
"</listitem>",
"</varlistentry>"
]
result.append("</variablelist>")
if self._document_type == 'appendix':
result.append("</appendix>")
return "\n".join(result)
class OptionsManpageRenderer(OptionDocsRestrictions, ManpageRenderer):
pass
class ManpageConverter(BaseConverter[OptionsManpageRenderer]):
__option_block_separator__ = ".sp"
_options_by_id: dict[str, str]
_links_in_last_description: Optional[list[str]] = None
def __init__(self, revision: str,
*,
# only for parallel rendering
_options_by_id: Optional[dict[str, str]] = None):
super().__init__(revision)
self._options_by_id = _options_by_id or {}
self._renderer = OptionsManpageRenderer({}, self._options_by_id)
def _parallel_render_prepare(self) -> Any:
return (self._revision, { '_options_by_id': self._options_by_id })
@classmethod
def _parallel_render_init_worker(cls, a: Any) -> ManpageConverter:
return cls(a[0], **a[1])
def _render_option(self, name: str, option: dict[str, Any]) -> RenderedOption:
links = self._renderer.link_footnotes = []
result = super()._render_option(name, option)
self._renderer.link_footnotes = None
return result._replace(links=links)
def add_options(self, options: dict[str, Any]) -> None:
for (k, v) in options.items():
self._options_by_id[f'#{make_xml_id(f"opt-{k}")}'] = k
return super().add_options(options)
def _render_code(self, option: dict[str, Any], key: str) -> list[str]:
try:
self._renderer.inline_code_is_quoted = False
return super()._render_code(option, key)
finally:
self._renderer.inline_code_is_quoted = True
def _related_packages_header(self) -> list[str]:
return [
'\\fIRelated packages:\\fP',
'.sp',
]
def _decl_def_header(self, header: str) -> list[str]:
return [
f'\\fI{man_escape(header)}:\\fP',
]
def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]:
return [
'.RS 4',
f'\\fB{man_escape(name)}\\fP',
'.RE'
]
def _decl_def_footer(self) -> list[str]:
return []
def finalize(self) -> str:
result = []
result += [
r'''.TH "CONFIGURATION\&.NIX" "5" "01/01/1980" "NixOS" "NixOS Reference Pages"''',
r'''.\" disable hyphenation''',
r'''.nh''',
r'''.\" disable justification (adjust text to left margin only)''',
r'''.ad l''',
r'''.\" enable line breaks after slashes''',
r'''.cflags 4 /''',
r'''.SH "NAME"''',
self._render('{file}`configuration.nix` - NixOS system configuration specification'),
r'''.SH "DESCRIPTION"''',
r'''.PP''',
self._render('The file {file}`/etc/nixos/configuration.nix` contains the '
'declarative specification of your NixOS system configuration. '
'The command {command}`nixos-rebuild` takes this file and '
'realises the system configuration specified therein.'),
r'''.SH "OPTIONS"''',
r'''.PP''',
self._render('You can use the following options in {file}`configuration.nix`.'),
]
for (name, opt) in self._sorted_options():
result += [
".PP",
f"\\fB{man_escape(name)}\\fR",
".RS 4",
]
result += opt.lines
if links := opt.links:
result.append(self.__option_block_separator__)
md_links = ""
for i in range(0, len(links)):
md_links += "\n" if i > 0 else ""
if links[i].startswith('#opt-'):
md_links += f"{i+1}. see the {{option}}`{self._options_by_id[links[i]]}` option"
else:
md_links += f"{i+1}. " + md_escape(links[i])
result.append(self._render(md_links))
result.append(".RE")
result += [
r'''.SH "AUTHORS"''',
r'''.PP''',
r'''Eelco Dolstra and the Nixpkgs/NixOS contributors''',
]
return "\n".join(result)
class OptionsCommonMarkRenderer(OptionDocsRestrictions, CommonMarkRenderer):
pass
class CommonMarkConverter(BaseConverter[OptionsCommonMarkRenderer]):
__option_block_separator__ = ""
def __init__(self, manpage_urls: Mapping[str, str], revision: str):
super().__init__(revision)
self._renderer = OptionsCommonMarkRenderer(manpage_urls)
def _parallel_render_prepare(self) -> Any:
return (self._renderer._manpage_urls, self._revision)
@classmethod
def _parallel_render_init_worker(cls, a: Any) -> CommonMarkConverter:
return cls(*a)
def _related_packages_header(self) -> list[str]:
return [ "*Related packages:*" ]
def _decl_def_header(self, header: str) -> list[str]:
return [ f"*{header}:*" ]
def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]:
if href is not None:
return [ f" - [{md_escape(name)}]({href})" ]
return [ f" - {md_escape(name)}" ]
def _decl_def_footer(self) -> list[str]:
return []
def finalize(self) -> str:
result = []
for (name, opt) in self._sorted_options():
result.append(f"## {md_escape(name)}\n")
result += opt.lines
result.append("\n\n")
return "\n".join(result)
class OptionsAsciiDocRenderer(OptionDocsRestrictions, AsciiDocRenderer):
pass
class AsciiDocConverter(BaseConverter[OptionsAsciiDocRenderer]):
__option_block_separator__ = ""
def __init__(self, manpage_urls: Mapping[str, str], revision: str):
super().__init__(revision)
self._renderer = OptionsAsciiDocRenderer(manpage_urls)
def _parallel_render_prepare(self) -> Any:
return (self._renderer._manpage_urls, self._revision)
@classmethod
def _parallel_render_init_worker(cls, a: Any) -> AsciiDocConverter:
return cls(*a)
def _related_packages_header(self) -> list[str]:
return [ "__Related packages:__" ]
def _decl_def_header(self, header: str) -> list[str]:
return [ f"__{header}:__\n" ]
def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]:
if href is not None:
return [ f"* link:{quote(href, safe='/:')}[{asciidoc_escape(name)}]" ]
return [ f"* {asciidoc_escape(name)}" ]
def _decl_def_footer(self) -> list[str]:
return []
def finalize(self) -> str:
result = []
for (name, opt) in self._sorted_options():
result.append(f"== {asciidoc_escape(name)}\n")
result += opt.lines
result.append("\n\n")
return "\n".join(result)
class OptionsHTMLRenderer(OptionDocsRestrictions, HTMLRenderer):
# TODO docbook compat. must be removed together with the matching docbook handlers.
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
token.meta['compact'] = False
return super().ordered_list_open(token, tokens, i)
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
token.meta['compact'] = False
return super().bullet_list_open(token, tokens, i)
def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str:
# TODO use token.info. docbook doesn't so we can't yet.
return f'<pre class="programlisting">{html.escape(token.content)}</pre>'
class HTMLConverter(BaseConverter[OptionsHTMLRenderer]):
__option_block_separator__ = ""
def __init__(self, manpage_urls: Mapping[str, str], revision: str,
varlist_id: str, id_prefix: str, xref_targets: Mapping[str, XrefTarget]):
super().__init__(revision)
self._xref_targets = xref_targets
self._varlist_id = varlist_id
self._id_prefix = id_prefix
self._renderer = OptionsHTMLRenderer(manpage_urls, self._xref_targets)
def _parallel_render_prepare(self) -> Any:
return (self._renderer._manpage_urls, self._revision,
self._varlist_id, self._id_prefix, self._xref_targets)
@classmethod
def _parallel_render_init_worker(cls, a: Any) -> HTMLConverter:
return cls(*a)
def _related_packages_header(self) -> list[str]:
return [
'<p><span class="emphasis"><em>Related packages:</em></span></p>',
]
def _decl_def_header(self, header: str) -> list[str]:
return [
f'<p><span class="emphasis"><em>{header}:</em></span></p>',
'<table border="0" summary="Simple list" class="simplelist">'
]
def _decl_def_entry(self, href: Optional[str], name: str) -> list[str]:
if href is not None:
href = f' href="{html.escape(href, True)}"'
return [
"<tr><td>",
f'<code class="filename"><a class="filename" {href} target="_top">',
f'{html.escape(name)}',
'</a></code>',
"</td></tr>"
]
def _decl_def_footer(self) -> list[str]:
return [ "</table>" ]
def finalize(self) -> str:
result = []
result += [
'<div class="variablelist">',
f'<a id="{html.escape(self._varlist_id, True)}"></a>',
' <dl class="variablelist">',
]
for (name, opt) in self._sorted_options():
id = make_xml_id(self._id_prefix + name)
target = self._xref_targets[id]
result += [
'<dt>',
' <span class="term">',
# docbook compat, these could be one tag
f' <a id="{html.escape(id, True)}"></a><a class="term" href="{target.href()}">'
# no spaces here (and string merging) for docbook output compat
f'<code class="option">{html.escape(name)}</code>',
' </a>',
' </span>',
'</dt>',
'<dd>',
]
result += opt.lines
result += [
"</dd>",
]
result += [
" </dl>",
"</div>"
]
return "\n".join(result)
def _build_cli_db(p: argparse.ArgumentParser) -> None:
p.add_argument('--manpage-urls', required=True)
p.add_argument('--revision', required=True)
p.add_argument('--document-type', required=True)
p.add_argument('--varlist-id', required=True)
p.add_argument('--id-prefix', required=True)
p.add_argument("infile")
p.add_argument("outfile")
def _build_cli_manpage(p: argparse.ArgumentParser) -> None:
p.add_argument('--revision', required=True)
p.add_argument("infile")
p.add_argument("outfile")
def _build_cli_commonmark(p: argparse.ArgumentParser) -> None:
p.add_argument('--manpage-urls', required=True)
p.add_argument('--revision', required=True)
p.add_argument("infile")
p.add_argument("outfile")
def _build_cli_asciidoc(p: argparse.ArgumentParser) -> None:
p.add_argument('--manpage-urls', required=True)
p.add_argument('--revision', required=True)
p.add_argument("infile")
p.add_argument("outfile")
def _run_cli_db(args: argparse.Namespace) -> None:
with open(args.manpage_urls, 'r') as manpage_urls:
md = DocBookConverter(
json.load(manpage_urls),
revision = args.revision,
document_type = args.document_type,
varlist_id = args.varlist_id,
id_prefix = args.id_prefix)
with open(args.infile, 'r') as f:
md.add_options(json.load(f))
with open(args.outfile, 'w') as f:
f.write(md.finalize())
def _run_cli_manpage(args: argparse.Namespace) -> None:
md = ManpageConverter(revision = args.revision)
with open(args.infile, 'r') as f:
md.add_options(json.load(f))
with open(args.outfile, 'w') as f:
f.write(md.finalize())
def _run_cli_commonmark(args: argparse.Namespace) -> None:
with open(args.manpage_urls, 'r') as manpage_urls:
md = CommonMarkConverter(json.load(manpage_urls), revision = args.revision)
with open(args.infile, 'r') as f:
md.add_options(json.load(f))
with open(args.outfile, 'w') as f:
f.write(md.finalize())
def _run_cli_asciidoc(args: argparse.Namespace) -> None:
with open(args.manpage_urls, 'r') as manpage_urls:
md = AsciiDocConverter(json.load(manpage_urls), revision = args.revision)
with open(args.infile, 'r') as f:
md.add_options(json.load(f))
with open(args.outfile, 'w') as f:
f.write(md.finalize())
def build_cli(p: argparse.ArgumentParser) -> None:
formats = p.add_subparsers(dest='format', required=True)
_build_cli_db(formats.add_parser('docbook'))
_build_cli_manpage(formats.add_parser('manpage'))
_build_cli_commonmark(formats.add_parser('commonmark'))
_build_cli_asciidoc(formats.add_parser('asciidoc'))
def run_cli(args: argparse.Namespace) -> None:
if args.format == 'docbook':
_run_cli_db(args)
elif args.format == 'manpage':
_run_cli_manpage(args)
elif args.format == 'commonmark':
_run_cli_commonmark(args)
elif args.format == 'asciidoc':
_run_cli_asciidoc(args)
else:
raise RuntimeError('format not hooked up', args)
| 25,072 | 37.633282 | 109 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/manpage.py
|
from collections.abc import Mapping, Sequence
from dataclasses import dataclass
from typing import cast, Iterable, Optional
import re
from markdown_it.token import Token
from .md import Renderer
# roff(7) says:
#
# > roff documents may contain only graphable 7-bit ASCII characters, the space character,
# > and, in certain circumstances, the tab character. The backslash character ‘\’ indicates
# > the start of an escape sequence […]
#
# mandoc_char(7) says about the `'~^ characters:
#
# > In prose, this automatic substitution is often desirable; but when these characters have
# > to be displayed as plain ASCII characters, for example in source code samples, they require
# > escaping to render as follows:
#
# since we don't want these to be touched anywhere (because markdown will do all substituations
# we want to have) we'll escape those as well. we also escape " (macro metacharacter), - (might
# turn into a typographic hyphen), and . (roff request marker at SOL, changes spacing semantics
# at EOL). groff additionally does not allow unicode escapes for codepoints below U+0080, so
# those need "proper" roff escapes/replacements instead.
_roff_unicode = re.compile(r'''[^\n !#$%&()*+,\-./0-9:;<=>?@A-Z[\\\]_a-z{|}]''', re.ASCII)
_roff_escapes = {
ord('"'): "\\(dq",
ord("'"): "\\(aq",
ord('-'): "\\-",
ord('.'): "\\&.",
ord('\\'): "\\e",
ord('^'): "\\(ha",
ord('`'): "\\(ga",
ord('~'): "\\(ti",
}
def man_escape(s: str) -> str:
s = s.translate(_roff_escapes)
return _roff_unicode.sub(lambda m: f"\\[u{ord(m[0]):04X}]", s)
# remove leading and trailing spaces from links and condense multiple consecutive spaces
# into a single space for presentation parity with html. this is currently easiest with
# regex postprocessing and some marker characters. since we don't want to drop spaces
# from code blocks we will have to specially protect *inline* code (luckily not block code)
# so normalization can turn the spaces inside it into regular spaces again.
_normalize_space_re = re.compile(r'''\u0000 < *| *>\u0000 |(?<= ) +''')
def _normalize_space(s: str) -> str:
return _normalize_space_re.sub("", s).replace("\0p", " ")
def _protect_spaces(s: str) -> str:
return s.replace(" ", "\0p")
@dataclass(kw_only=True)
class List:
width: int
next_idx: Optional[int] = None
compact: bool
first_item_seen: bool = False
# this renderer assumed that it produces a set of lines as output, and that those lines will
# be pasted as-is into a larger output. no prefixing or suffixing is allowed for correctness.
#
# NOTE that we output exclusively physical markup. this is because we have to use the older
# mandoc(7) format instead of the newer mdoc(7) format due to limitations in groff: while
# using mdoc in groff works fine it is not a native format and thus very slow to render on
# manpages as large as configuration.nix.5. mandoc(1) renders both really quickly, but with
# groff being our predominant manpage viewer we have to optimize for groff instead.
#
# while we do use only physical markup (adjusting indentation with .RS and .RE, adding
# vertical spacing with .sp, \f[BIRP] escapes for bold/italic/roman/$previous font, \h for
# horizontal motion in a line) we do attempt to copy the style of mdoc(7) semantic requests
# as appropriate for each markup element.
class ManpageRenderer(Renderer):
# whether to emit mdoc .Ql equivalents for inline code or just the contents. this is
# mainly used by the options manpage converter to not emit extra quotes in defaults
# and examples where it's already clear from context that the following text is code.
inline_code_is_quoted: bool = True
link_footnotes: Optional[list[str]] = None
_href_targets: dict[str, str]
_link_stack: list[str]
_do_parbreak_stack: list[bool]
_list_stack: list[List]
_font_stack: list[str]
def __init__(self, manpage_urls: Mapping[str, str], href_targets: dict[str, str]):
super().__init__(manpage_urls)
self._href_targets = href_targets
self._link_stack = []
self._do_parbreak_stack = []
self._list_stack = []
self._font_stack = []
def _join_block(self, ls: Iterable[str]) -> str:
return "\n".join([ l for l in ls if len(l) ])
def _join_inline(self, ls: Iterable[str]) -> str:
return _normalize_space(super()._join_inline(ls))
def _enter_block(self) -> None:
self._do_parbreak_stack.append(False)
def _leave_block(self) -> None:
self._do_parbreak_stack.pop()
self._do_parbreak_stack[-1] = True
def _maybe_parbreak(self, suffix: str = "") -> str:
result = f".sp{suffix}" if self._do_parbreak_stack[-1] else ""
self._do_parbreak_stack[-1] = True
return result
def _admonition_open(self, kind: str) -> str:
self._enter_block()
return (
'.sp\n'
'.RS 4\n'
f'\\fB{kind}\\fP\n'
'.br'
)
def _admonition_close(self) -> str:
self._leave_block()
return ".RE"
def render(self, tokens: Sequence[Token]) -> str:
self._do_parbreak_stack = [ False ]
self._font_stack = [ "\\fR" ]
return super().render(tokens)
def text(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return man_escape(token.content)
def paragraph_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._maybe_parbreak()
def paragraph_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ""
def hardbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ".br"
def softbreak(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return " "
def code_inline(self, token: Token, tokens: Sequence[Token], i: int) -> str:
s = _protect_spaces(man_escape(token.content))
return f"\\fR\\(oq{s}\\(cq\\fP" if self.inline_code_is_quoted else s
def code_block(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self.fence(token, tokens, i)
def link_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
href = cast(str, token.attrs['href'])
self._link_stack.append(href)
text = ""
if tokens[i + 1].type == 'link_close' and href in self._href_targets:
# TODO error or warning if the target can't be resolved
text = self._href_targets[href]
self._font_stack.append("\\fB")
return f"\\fB{text}\0 <"
def link_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
href = self._link_stack.pop()
text = ""
if self.link_footnotes is not None:
try:
idx = self.link_footnotes.index(href) + 1
except ValueError:
self.link_footnotes.append(href)
idx = len(self.link_footnotes)
text = "\\fR" + man_escape(f"[{idx}]")
self._font_stack.pop()
return f">\0 {text}{self._font_stack[-1]}"
def list_item_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._enter_block()
lst = self._list_stack[-1]
maybe_space = '' if lst.compact or not lst.first_item_seen else '.sp\n'
lst.first_item_seen = True
head = "•"
if lst.next_idx is not None:
head = f"{lst.next_idx}."
lst.next_idx += 1
return (
f'{maybe_space}'
f'.RS {lst.width}\n'
f"\\h'-{len(head) + 1}'\\fB{man_escape(head)}\\fP\\h'1'\\c"
)
def list_item_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._leave_block()
return ".RE"
def bullet_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._list_stack.append(List(width=4, compact=bool(token.meta['compact'])))
return self._maybe_parbreak()
def bullet_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._list_stack.pop()
return ""
def em_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._font_stack.append("\\fI")
return "\\fI"
def em_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._font_stack.pop()
return self._font_stack[-1]
def strong_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._font_stack.append("\\fB")
return "\\fB"
def strong_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._font_stack.pop()
return self._font_stack[-1]
def fence(self, token: Token, tokens: Sequence[Token], i: int) -> str:
s = man_escape(token.content).rstrip('\n')
return (
'.sp\n'
'.RS 4\n'
'.nf\n'
f'{s}\n'
'.fi\n'
'.RE'
)
def blockquote_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
maybe_par = self._maybe_parbreak("\n")
self._enter_block()
return (
f"{maybe_par}"
".RS 4\n"
f"\\h'-3'\\fI\\(lq\\(rq\\fP\\h'1'\\c"
)
def blockquote_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._leave_block()
return ".RE"
def note_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open("Note")
def note_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def caution_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open( "Caution")
def caution_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def important_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open( "Important")
def important_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def tip_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open( "Tip")
def tip_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def warning_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_open( "Warning")
def warning_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return self._admonition_close()
def dl_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ".RS 4"
def dl_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ".RE"
def dt_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ".PP"
def dt_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ""
def dd_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._enter_block()
return ".RS 4"
def dd_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._leave_block()
return ".RE"
def myst_role(self, token: Token, tokens: Sequence[Token], i: int) -> str:
if token.meta['name'] in [ 'command', 'env', 'option' ]:
return f'\\fB{man_escape(token.content)}\\fP'
elif token.meta['name'] in [ 'file', 'var' ]:
return f'\\fI{man_escape(token.content)}\\fP'
elif token.meta['name'] == 'manpage':
[page, section] = [ s.strip() for s in token.content.rsplit('(', 1) ]
section = section[:-1]
return f'\\fB{man_escape(page)}\\fP\\fR({man_escape(section)})\\fP'
else:
raise NotImplementedError("md node not supported yet", token)
def attr_span_begin(self, token: Token, tokens: Sequence[Token], i: int) -> str:
# mdoc knows no anchors so we can drop those, but classes must be rejected.
if 'class' in token.attrs:
return super().attr_span_begin(token, tokens, i)
return ""
def attr_span_end(self, token: Token, tokens: Sequence[Token], i: int) -> str:
return ""
def heading_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported in manpages", token)
def heading_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
raise RuntimeError("md token not supported in manpages", token)
def ordered_list_open(self, token: Token, tokens: Sequence[Token], i: int) -> str:
# max item head width for a number, a dot, and one leading space and one trailing space
width = 3 + len(str(cast(int, token.meta['end'])))
self._list_stack.append(
List(width = width,
next_idx = cast(int, token.attrs.get('start', 1)),
compact = bool(token.meta['compact'])))
return self._maybe_parbreak()
def ordered_list_close(self, token: Token, tokens: Sequence[Token], i: int) -> str:
self._list_stack.pop()
return ""
| 13,238 | 44.96875 | 95 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/__init__.py
|
import argparse
import sys
import textwrap
import traceback
from io import StringIO
from pprint import pprint
from . import manual
from . import options
from . import parallel
def pretty_print_exc(e: BaseException, *, _desc_text: str = "error") -> None:
print(f"\x1b[1;31m{_desc_text}:\x1b[0m", file=sys.stderr)
# destructure Exception and RuntimeError specifically so we can show nice
# messages for errors that weren't given their own exception type with
# a good pretty-printer.
if type(e) is Exception or type(e) is RuntimeError:
args = e.args
if len(args) and isinstance(args[0], str):
print("\t", args[0], file=sys.stderr, sep="")
args = args[1:]
buf = StringIO()
for arg in args:
pprint(arg, stream=buf)
if extra_info := buf.getvalue():
print("\x1b[1;34mextra info:\x1b[0m", file=sys.stderr)
print(textwrap.indent(extra_info, "\t"), file=sys.stderr, end="")
else:
print(e)
if e.__cause__ is not None:
print("", file=sys.stderr)
pretty_print_exc(e.__cause__, _desc_text="caused by")
def main() -> None:
parser = argparse.ArgumentParser(description='render nixos manual bits')
parser.add_argument('-j', '--jobs', type=int, default=None)
commands = parser.add_subparsers(dest='command', required=True)
options.build_cli(commands.add_parser('options'))
manual.build_cli(commands.add_parser('manual'))
args = parser.parse_args()
try:
parallel.pool_processes = args.jobs
if args.command == 'options':
options.run_cli(args)
elif args.command == 'manual':
manual.run_cli(args)
else:
raise RuntimeError('command not hooked up', args)
except Exception as e:
traceback.print_exc()
pretty_print_exc(e)
sys.exit(1)
| 1,891 | 32.785714 | 77 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/nix/nixos-render-docs/src/nixos_render_docs/parallel.py
|
# this module only has to exist because cpython has a global interpreter lock
# and markdown-it is pure python code. ideally we'd just use thread pools, but
# the GIL prohibits this.
import multiprocessing
from typing import Any, Callable, Iterable, Optional, TypeVar
R = TypeVar('R')
S = TypeVar('S')
T = TypeVar('T')
A = TypeVar('A')
pool_processes: Optional[int] = None
# this thing is impossible to type because there's so much global state involved.
# wrapping in a class to get access to Generic[] parameters is not sufficient
# because mypy is too weak, and unnecessarily obscures how much global state is
# needed in each worker to make this whole brouhaha work.
_map_worker_fn: Any = None
_map_worker_state_fn: Any = None
_map_worker_state_arg: Any = None
def _map_worker_init(*args: Any) -> None:
global _map_worker_fn, _map_worker_state_fn, _map_worker_state_arg
(_map_worker_fn, _map_worker_state_fn, _map_worker_state_arg) = args
# NOTE: the state argument is never passed by any caller, we only use it as a localized
# cache for the created state in lieu of another global. it is effectively a global though.
def _map_worker_step(arg: Any, state: Any = []) -> Any:
global _map_worker_fn, _map_worker_state_fn, _map_worker_state_arg
# if a Pool initializer throws it'll just be retried, leading to endless loops.
# doing the proper initialization only on first use avoids this.
if not state:
state.append(_map_worker_state_fn(_map_worker_state_arg))
return _map_worker_fn(state[0], arg)
def map(fn: Callable[[S, T], R], d: Iterable[T], chunk_size: int,
state_fn: Callable[[A], S], state_arg: A) -> list[R]:
"""
`[ fn(state, i) for i in d ]` where `state = state_fn(state_arg)`, but using multiprocessing
if `pool_processes` is not `None`. when using multiprocessing is used the state function will
be run once in ever worker process and `multiprocessing.Pool.imap` will be used.
**NOTE:** neither `state_fn` nor `fn` are allowed to mutate global state! doing so will cause
discrepancies if `pool_processes` is not None, since each worker will have its own copy.
**NOTE**: all data types that potentially cross a process boundary (so, all of them) must be
pickle-able. this excludes lambdas, bound functions, local functions, and a number of other
types depending on their exact internal structure. *theoretically* the pool constructor
can transfer non-pickleable data to worker processes, but this only works when using the
`fork` spawn method (and is thus not available on darwin or windows).
"""
if pool_processes is None:
state = state_fn(state_arg)
return [ fn(state, i) for i in d ]
with multiprocessing.Pool(pool_processes, _map_worker_init, (fn, state_fn, state_arg)) as p:
return list(p.imap(_map_worker_step, d, chunk_size))
| 2,882 | 47.864407 | 97 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/tools/games/minecraft/optifine/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell -I nixpkgs=./. -i python3 -p python3.pkgs.requests python3.pkgs.lxml nix
from lxml import html
import json
import os.path
import re
import requests
import subprocess
def nix_prefetch_sha256(name):
return subprocess.run(['nix-prefetch-url', '--type', 'sha256', 'https://optifine.net/download?f=' + name], capture_output=True, text=True).stdout.strip()
# fetch download page
sess = requests.session()
page = sess.get('https://optifine.net/downloads')
tree = html.fromstring(page.content)
# parse and extract main jar file names
href = tree.xpath('//tr[@class="downloadLine downloadLineMain"]/td[@class="colMirror"]/a/@href')
expr = re.compile('(OptiFine_)([0-9.]*)(.*)\.jar')
result = [ expr.search(x) for x in href ]
# format name, version and hash for each file
catalogue = {}
for i, r in enumerate(result):
index = r.group(1).lower() + r.group(2).replace('.', '_')
version = r.group(2) + r.group(3)
catalogue[index] = {
"version": version,
"sha256": nix_prefetch_sha256(r.group(0))
}
# latest version should be the first entry
if len(catalogue) > 0:
catalogue['optifine-latest'] = list(catalogue.values())[0]
# read previous versions
d = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(d, 'versions.json'), 'r') as f:
prev = json.load(f)
# `maintainers/scripts/update.py` will extract stdout to write commit message
# embed the commit message in json and print it
changes = [ { 'commitMessage': 'optifinePackages: update versions\n\n' } ]
# build a longest common subsequence, natural sorted by keys
for key, value in sorted({**prev, **catalogue}.items(), key=lambda item: [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', item[0])]):
if key not in prev:
changes[0]['commitMessage'] += 'optifinePackages.{}: init at {}\n'.format(key, value['version'])
elif value['version'] != prev[key]['version']:
changes[0]['commitMessage'] += 'optifinePackages.{}: {} -> {}\n'.format(key, prev[key]['version'], value['version'])
# print the changes in stdout
print(json.dumps(changes))
# write catalogue to file
with open(os.path.join(d, 'versions.json'), 'w') as f:
json.dump(catalogue, f, indent=4)
f.write('\n')
| 2,253 | 35.95082 | 157 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/docker/stream_layered_image.py
|
"""
This script generates a Docker image from a set of store paths. Uses
Docker Image Specification v1.2 as reference [1].
It expects a JSON file with the following properties and writes the
image as an uncompressed tarball to stdout:
* "architecture", "config", "os", "created", "repo_tag" correspond to
the fields with the same name on the image spec [2].
* "created" can be "now".
* "created" is also used as mtime for files added to the image.
* "store_layers" is a list of layers in ascending order, where each
layer is the list of store paths to include in that layer.
The main challenge for this script to create the final image in a
streaming fashion, without dumping any intermediate data to disk
for performance.
A docker image has each layer contents archived as separate tarballs,
and they later all get enveloped into a single big tarball in a
content addressed fashion. However, because how "tar" format works,
we have to know about the name (which includes the checksum in our
case) and the size of the tarball before we can start adding it to the
outer tarball. We achieve that by creating the layer tarballs twice;
on the first iteration we calculate the file size and the checksum,
and on the second one we actually stream the contents. 'add_layer_dir'
function does all this.
[1]: https://github.com/moby/moby/blob/master/image/spec/v1.2.md
[2]: https://github.com/moby/moby/blob/4fb59c20a4fb54f944fe170d0ff1d00eb4a24d6f/image/spec/v1.2.md#image-json-field-descriptions
""" # noqa: E501
import io
import os
import re
import sys
import json
import hashlib
import pathlib
import tarfile
import itertools
import threading
from datetime import datetime, timezone
from collections import namedtuple
def archive_paths_to(obj, paths, mtime):
"""
Writes the given store paths as a tar file to the given stream.
obj: Stream to write to. Should have a 'write' method.
paths: List of store paths.
"""
# gettarinfo makes the paths relative, this makes them
# absolute again
def append_root(ti):
ti.name = "/" + ti.name
return ti
def apply_filters(ti):
ti.mtime = mtime
ti.uid = 0
ti.gid = 0
ti.uname = "root"
ti.gname = "root"
return ti
def nix_root(ti):
ti.mode = 0o0555 # r-xr-xr-x
return ti
def dir(path):
ti = tarfile.TarInfo(path)
ti.type = tarfile.DIRTYPE
return ti
with tarfile.open(fileobj=obj, mode="w|") as tar:
# To be consistent with the docker utilities, we need to have
# these directories first when building layer tarballs.
tar.addfile(apply_filters(nix_root(dir("/nix"))))
tar.addfile(apply_filters(nix_root(dir("/nix/store"))))
for path in paths:
path = pathlib.Path(path)
if path.is_symlink():
files = [path]
else:
files = itertools.chain([path], path.rglob("*"))
for filename in sorted(files):
ti = append_root(tar.gettarinfo(filename))
# copy hardlinks as regular files
if ti.islnk():
ti.type = tarfile.REGTYPE
ti.linkname = ""
ti.size = filename.stat().st_size
ti = apply_filters(ti)
if ti.isfile():
with open(filename, "rb") as f:
tar.addfile(ti, f)
else:
tar.addfile(ti)
class ExtractChecksum:
"""
A writable stream which only calculates the final file size and
sha256sum, while discarding the actual contents.
"""
def __init__(self):
self._digest = hashlib.sha256()
self._size = 0
def write(self, data):
self._digest.update(data)
self._size += len(data)
def extract(self):
"""
Returns: Hex-encoded sha256sum and size as a tuple.
"""
return (self._digest.hexdigest(), self._size)
FromImage = namedtuple("FromImage", ["tar", "manifest_json", "image_json"])
# Some metadata for a layer
LayerInfo = namedtuple("LayerInfo", ["size", "checksum", "path", "paths"])
def load_from_image(from_image_str):
"""
Loads the given base image, if any.
from_image_str: Path to the base image archive.
Returns: A 'FromImage' object with references to the loaded base image,
or 'None' if no base image was provided.
"""
if from_image_str is None:
return None
base_tar = tarfile.open(from_image_str)
manifest_json_tarinfo = base_tar.getmember("manifest.json")
with base_tar.extractfile(manifest_json_tarinfo) as f:
manifest_json = json.load(f)
image_json_tarinfo = base_tar.getmember(manifest_json[0]["Config"])
with base_tar.extractfile(image_json_tarinfo) as f:
image_json = json.load(f)
return FromImage(base_tar, manifest_json, image_json)
def add_base_layers(tar, from_image):
"""
Adds the layers from the given base image to the final image.
tar: 'tarfile.TarFile' object for new layers to be added to.
from_image: 'FromImage' object with references to the loaded base image.
"""
if from_image is None:
print("No 'fromImage' provided", file=sys.stderr)
return []
layers = from_image.manifest_json[0]["Layers"]
checksums = from_image.image_json["rootfs"]["diff_ids"]
layers_checksums = zip(layers, checksums)
for num, (layer, checksum) in enumerate(layers_checksums, start=1):
layer_tarinfo = from_image.tar.getmember(layer)
checksum = re.sub(r"^sha256:", "", checksum)
tar.addfile(layer_tarinfo, from_image.tar.extractfile(layer_tarinfo))
path = layer_tarinfo.path
size = layer_tarinfo.size
print("Adding base layer", num, "from", path, file=sys.stderr)
yield LayerInfo(size=size, checksum=checksum, path=path, paths=[path])
from_image.tar.close()
def overlay_base_config(from_image, final_config):
"""
Overlays the final image 'config' JSON on top of selected defaults from the
base image 'config' JSON.
from_image: 'FromImage' object with references to the loaded base image.
final_config: 'dict' object of the final image 'config' JSON.
"""
if from_image is None:
return final_config
base_config = from_image.image_json["config"]
# Preserve environment from base image
final_env = base_config.get("Env", []) + final_config.get("Env", [])
if final_env:
# Resolve duplicates (last one wins) and format back as list
resolved_env = {entry.split("=", 1)[0]: entry for entry in final_env}
final_config["Env"] = list(resolved_env.values())
return final_config
def add_layer_dir(tar, paths, store_dir, mtime):
"""
Appends given store paths to a TarFile object as a new layer.
tar: 'tarfile.TarFile' object for the new layer to be added to.
paths: List of store paths.
store_dir: the root directory of the nix store
mtime: 'mtime' of the added files and the layer tarball.
Should be an integer representing a POSIX time.
Returns: A 'LayerInfo' object containing some metadata of
the layer added.
"""
invalid_paths = [i for i in paths if not i.startswith(store_dir)]
assert len(invalid_paths) == 0, \
f"Expecting absolute paths from {store_dir}, but got: {invalid_paths}"
# First, calculate the tarball checksum and the size.
extract_checksum = ExtractChecksum()
archive_paths_to(
extract_checksum,
paths,
mtime=mtime,
)
(checksum, size) = extract_checksum.extract()
path = f"{checksum}/layer.tar"
layer_tarinfo = tarfile.TarInfo(path)
layer_tarinfo.size = size
layer_tarinfo.mtime = mtime
# Then actually stream the contents to the outer tarball.
read_fd, write_fd = os.pipe()
with open(read_fd, "rb") as read, open(write_fd, "wb") as write:
def producer():
archive_paths_to(
write,
paths,
mtime=mtime,
)
write.close()
# Closing the write end of the fifo also closes the read end,
# so we don't need to wait until this thread is finished.
#
# Any exception from the thread will get printed by the default
# exception handler, and the 'addfile' call will fail since it
# won't be able to read required amount of bytes.
threading.Thread(target=producer).start()
tar.addfile(layer_tarinfo, read)
return LayerInfo(size=size, checksum=checksum, path=path, paths=paths)
def add_customisation_layer(target_tar, customisation_layer, mtime):
"""
Adds the customisation layer as a new layer. This is layer is structured
differently; given store path has the 'layer.tar' and corresponding
sha256sum ready.
tar: 'tarfile.TarFile' object for the new layer to be added to.
customisation_layer: Path containing the layer archive.
mtime: 'mtime' of the added layer tarball.
"""
checksum_path = os.path.join(customisation_layer, "checksum")
with open(checksum_path) as f:
checksum = f.read().strip()
assert len(checksum) == 64, f"Invalid sha256 at ${checksum_path}."
layer_path = os.path.join(customisation_layer, "layer.tar")
path = f"{checksum}/layer.tar"
tarinfo = target_tar.gettarinfo(layer_path)
tarinfo.name = path
tarinfo.mtime = mtime
with open(layer_path, "rb") as f:
target_tar.addfile(tarinfo, f)
return LayerInfo(
size=None,
checksum=checksum,
path=path,
paths=[customisation_layer]
)
def add_bytes(tar, path, content, mtime):
"""
Adds a file to the tarball with given path and contents.
tar: 'tarfile.TarFile' object.
path: Path of the file as a string.
content: Contents of the file.
mtime: 'mtime' of the file. Should be an integer representing a POSIX time.
"""
assert type(content) is bytes
ti = tarfile.TarInfo(path)
ti.size = len(content)
ti.mtime = mtime
tar.addfile(ti, io.BytesIO(content))
def main():
with open(sys.argv[1], "r") as f:
conf = json.load(f)
created = (
datetime.now(tz=timezone.utc)
if conf["created"] == "now"
else datetime.fromisoformat(conf["created"])
)
mtime = int(created.timestamp())
store_dir = conf["store_dir"]
from_image = load_from_image(conf["from_image"])
with tarfile.open(mode="w|", fileobj=sys.stdout.buffer) as tar:
layers = []
layers.extend(add_base_layers(tar, from_image))
start = len(layers) + 1
for num, store_layer in enumerate(conf["store_layers"], start=start):
print("Creating layer", num, "from paths:", store_layer,
file=sys.stderr)
info = add_layer_dir(tar, store_layer, store_dir, mtime=mtime)
layers.append(info)
print("Creating layer", len(layers) + 1, "with customisation...",
file=sys.stderr)
layers.append(
add_customisation_layer(
tar,
conf["customisation_layer"],
mtime=mtime
)
)
print("Adding manifests...", file=sys.stderr)
image_json = {
"created": datetime.isoformat(created),
"architecture": conf["architecture"],
"os": "linux",
"config": overlay_base_config(from_image, conf["config"]),
"rootfs": {
"diff_ids": [f"sha256:{layer.checksum}" for layer in layers],
"type": "layers",
},
"history": [
{
"created": datetime.isoformat(created),
"comment": f"store paths: {layer.paths}"
}
for layer in layers
],
}
image_json = json.dumps(image_json, indent=4).encode("utf-8")
image_json_checksum = hashlib.sha256(image_json).hexdigest()
image_json_path = f"{image_json_checksum}.json"
add_bytes(tar, image_json_path, image_json, mtime=mtime)
manifest_json = [
{
"Config": image_json_path,
"RepoTags": [conf["repo_tag"]],
"Layers": [layer.path for layer in layers],
}
]
manifest_json = json.dumps(manifest_json, indent=4).encode("utf-8")
add_bytes(tar, "manifest.json", manifest_json, mtime=mtime)
print("Done.", file=sys.stderr)
if __name__ == "__main__":
main()
| 12,671 | 31.326531 | 128 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/docker/detjson.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Deterministic layer json: https://github.com/docker/hub-feedback/issues/488
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import json
# If any of the keys below are equal to a certain value
# then we can delete it because it's the default value
SAFEDELS = {
"Size": 0,
"config": {
"ExposedPorts": None,
"MacAddress": "",
"NetworkDisabled": False,
"PortSpecs": None,
"VolumeDriver": ""
}
}
SAFEDELS["container_config"] = SAFEDELS["config"]
def makedet(j, safedels):
for k,v in safedels.items():
if k not in j:
continue
if type(v) == dict:
makedet(j[k], v)
elif j[k] == v:
del j[k]
def main():
j = json.load(sys.stdin)
makedet(j, SAFEDELS)
json.dump(j, sys.stdout, sort_keys=True)
if __name__ == '__main__':
main()
| 907 | 21.146341 | 77 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/dotnet/make-nuget-source/extract-licenses-from-nupkgs.py
|
#!/usr/bin/env python3
"""
Opens each .nupkg file in a directory, and extracts the SPDX license identifiers
from them if they exist. The SPDX license identifier is stored in the
'<license type="expression">...</license>' tag in the .nuspec file.
All found license identifiers will be printed to stdout.
"""
from glob import glob
from pathlib import Path
import sys
import xml.etree.ElementTree as ET
import zipfile
all_licenses = set()
if len(sys.argv) < 2:
print(f"Usage: {sys.argv[0]} DIRECTORY")
sys.exit(1)
nupkg_dir = Path(sys.argv[1])
for nupkg_name in glob("*.nupkg", root_dir=nupkg_dir):
with zipfile.ZipFile(nupkg_dir / nupkg_name) as nupkg:
for nuspec_name in [name for name in nupkg.namelist() if name.endswith(".nuspec")]:
with nupkg.open(nuspec_name) as nuspec_stream:
nuspec = ET.parse(nuspec_stream)
licenses = nuspec.findall(".//{*}license[@type='expression']")
all_licenses.update([license.text for license in licenses])
print("\n".join(sorted(all_licenses)))
| 1,062 | 33.290323 | 91 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/templaterpm/nix-template-rpm.py
|
#!/bin/env python
import sys
import os
import subprocess
import argparse
import re
import shutil
import rpm
import urlparse
import traceback
import toposort
class SPECTemplate(object):
def __init__(self, specFilename, outputDir, inputDir=None, buildRootInclude=None, translateTable=None, repositoryDir=None, allPackagesDir=None, maintainer="MAINTAINER"):
rpm.addMacro("buildroot","$out")
rpm.addMacro("_libdir","lib")
rpm.addMacro("_libexecdir","libexec")
rpm.addMacro("_sbindir","sbin")
rpm.addMacro("_sysconfdir","etc")
rpm.addMacro("_topdir","SPACER_DIR_FOR_REMOVAL")
rpm.addMacro("_sourcedir","SOURCE_DIR_SPACER")
self.packageGroups = [ "ocaml", "python" ]
ts = rpm.TransactionSet()
self.specFilename = specFilename
self.spec = ts.parseSpec(specFilename)
self.inputDir = inputDir
self.buildRootInclude = buildRootInclude
self.repositoryDir = repositoryDir
self.allPackagesDir = allPackagesDir
self.maintainer = maintainer
self.translateTable = translateTable
self.facts = self.getFacts()
self.key = self.getSelfKey()
tmpDir = os.path.join(outputDir, self.rewriteName(self.spec.sourceHeader['name']))
if self.translateTable is not None:
self.relOutputDir = self.translateTable.path(self.key,tmpDir)
else:
self.relOutputDir = tmpDir
self.final_output_dir = os.path.normpath( self.relOutputDir )
if self.repositoryDir is not None:
self.potential_repository_dir = os.path.normpath( os.path.join(self.repositoryDir,self.relOutputDir) )
def rewriteCommands(self, string):
string = string.replace('SPACER_DIR_FOR_REMOVAL/','')
string = string.replace('SPACER_DIR_FOR_REMOVAL','')
string = '\n'.join(map(lambda line: ' '.join(map(lambda x: x.replace('SOURCE_DIR_SPACER/',('${./' if (self.buildRootInclude is None) else '${buildRoot}/usr/share/buildroot/SOURCES/'))+('}' if (self.buildRootInclude is None) else '') if x.startswith('SOURCE_DIR_SPACER/') else x, line.split(' '))), string.split('\n')))
string = string.replace('\n','\n ')
string = string.rstrip()
return string
def rewriteName(self, string):
parts = string.split('-')
parts = filter(lambda x: not x == "devel", parts)
parts = filter(lambda x: not x == "doc", parts)
if len(parts) > 1 and parts[0] in self.packageGroups:
return parts[0] + '-' + ''.join(parts[1:2] + map(lambda x: x.capitalize(), parts[2:]))
else:
return ''.join(parts[:1] + map(lambda x: x.capitalize(), parts[1:]))
def rewriteInputs(self,target,inputs):
camelcase = lambda l: l[:1] + map(lambda x: x.capitalize(), l[1:])
filterDevel = lambda l: filter(lambda x: not x == "devel", l)
filterDoc = lambda l: filter(lambda x: not x == "doc", l)
rewrite = lambda l: ''.join(camelcase(filterDoc(filterDevel(l))))
def filterPackageGroup(target):
if target is None:
return [ rewrite(x.split('-')) for x in inputs if (not x.split('-')[0] in self.packageGroups) or (len(x.split('-')) == 1) ]
elif target in self.packageGroups:
return [ target + '_' + rewrite(x.split('-')[1:]) for x in inputs if (x.split('-')[0] == target) and (len(x.split('-')) > 1)]
else:
raise Exception("Unknown target")
return []
if target is None:
packages = filterPackageGroup(None)
packages.sort()
elif target in self.packageGroups:
packages = filterPackageGroup(target)
packages.sort()
elif target == "ALL":
packages = []
for t in [None] + self.packageGroups:
tmp = filterPackageGroup(t)
tmp.sort()
packages += tmp
else:
raise Exception("Unknown target")
packages = []
return packages
def getBuildInputs(self,target=None):
inputs = self.rewriteInputs(target,self.spec.sourceHeader['requires'])
if self.translateTable is not None:
return map(lambda x: self.translateTable.name(x), inputs)
else:
return inputs
def getSelfKey(self):
name = self.spec.sourceHeader['name']
if len(name.split('-')) > 1 and name.split('-')[0] in self.packageGroups:
key = self.rewriteInputs(name.split('-')[0], [self.spec.sourceHeader['name']])[0]
else:
key = self.rewriteInputs(None, [self.spec.sourceHeader['name']])[0]
return key
def getSelf(self):
if self.translateTable is not None:
return self.translateTable.name(self.key)
else:
return self.key
def copyPatches(self, input_dir, output_dir):
patches = [source for (source, _, flag) in self.spec.sources if flag==2]
for filename in patches:
shutil.copyfile(os.path.join(input_dir, filename), os.path.join(output_dir, filename))
def copySources(self, input_dir, output_dir):
filenames = [source for (source, _, flag) in self.spec.sources if flag==1 if not urlparse.urlparse(source).scheme in ["http", "https"] ]
for filename in filenames:
shutil.copyfile(os.path.join(input_dir, filename), os.path.join(output_dir, filename))
def getFacts(self):
facts = {}
facts["name"] = self.rewriteName(self.spec.sourceHeader['name'])
facts["version"] = self.spec.sourceHeader['version']
facts["url"] = []
facts["sha256"] = []
sources = [source for (source, _, flag) in self.spec.sources if flag==1 if urlparse.urlparse(source).scheme in ["http", "https"] ]
for url in sources:
p = subprocess.Popen(['nix-prefetch-url', url], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
sha256 = output[:-1] #remove new line
facts["url"].append(url)
facts["sha256"].append(sha256)
patches = [source for (source, _, flag) in self.spec.sources if flag==2]
if self.buildRootInclude is None:
facts["patches"] = map(lambda x: './'+x, patches)
else:
facts["patches"] = map(lambda x: '"${buildRoot}/usr/share/buildroot/SOURCES/'+x+'"', reversed(patches))
return facts
@property
def name(self):
out = ' name = "' + self.facts["name"] + '-' + self.facts["version"] + '";\n'
out += ' version = "' + self.facts['version'] + '";\n'
return out
@property
def src(self):
sources = [source for (source, _, flag) in self.spec.sources if flag==1 if urlparse.urlparse(source).scheme in ["http", "https"] ]
out = ''
for (url,sha256) in zip(self.facts['url'],self.facts['sha256']):
out += ' src = fetchurl {\n'
out += ' url = "' + url + '";\n'
out += ' sha256 = "' + sha256 + '";\n'
out += ' };\n'
return out
@property
def patch(self):
out = ' patches = [ ' + ' '.join(self.facts['patches']) + ' ];\n'
return out
@property
def buildInputs(self):
out = ' buildInputs = [ '
out += ' '.join(self.getBuildInputs("ALL"))
out += ' ];\n'
return out
@property
def configure(self):
out = ' configurePhase = \'\'\n ' + self.rewriteCommands(self.spec.prep) + '\n \'\';\n';
return out
@property
def build(self):
out = ' buildPhase = \'\'\n ' + self.rewriteCommands(self.spec.build) + '\n \'\';\n';
return out
@property
def install(self):
out = ' installPhase = \'\'\n ' + self.rewriteCommands(self.spec.install) + '\n \'\';\n';
return out
@property
def ocamlExtra(self):
if "ocaml" in self.getBuildInputs("ALL"):
return ' createFindlibDestdir = true;\n'
else:
return ''
@property
def meta(self):
out = ' meta = with lib; {\n'
out += ' homepage = ' + self.spec.sourceHeader['url'] + ';\n'
out += ' description = "' + self.spec.sourceHeader['summary'] + '";\n'
out += ' license = lib.licenses.' + self.spec.sourceHeader['license'] + ';\n'
out += ' platforms = [ "i686-linux" "x86_64-linux" ];\n'
out += ' maintainers = with lib.maintainers; [ ' + self.maintainer + ' ];\n'
out += ' };\n'
out += '}\n'
return out
def __str__(self):
head = '{lib, stdenv, fetchurl, ' + ', '.join(self.getBuildInputs("ALL")) + '}:\n\n'
head += 'stdenv.mkDerivation {\n'
body = [ self.name, self.src, self.patch, self.buildInputs, self.configure, self.build, self.ocamlExtra, self.install, self.meta ]
return head + '\n'.join(body)
def getTemplate(self):
head = '{lib, stdenv, buildRoot, fetchurl, ' + ', '.join(self.getBuildInputs("ALL")) + '}:\n\n'
head += 'let\n'
head += ' buildRootInput = (import "${buildRoot}/usr/share/buildroot/buildRootInput.nix") { fetchurl=fetchurl; buildRoot=buildRoot; };\n'
head += 'in\n\n'
head += 'stdenv.mkDerivation {\n'
head += ' inherit (buildRootInput.'+self.rewriteName(self.spec.sourceHeader['name'])+') name version src;\n'
head += ' patches = buildRootInput.'+self.rewriteName(self.spec.sourceHeader['name'])+'.patches ++ [];\n\n'
body = [ self.buildInputs, self.configure, self.build, self.ocamlExtra, self.install, self.meta ]
return head + '\n'.join(body)
def getInclude(self):
head = self.rewriteName(self.spec.sourceHeader['name']) + ' = {\n'
body = [ self.name, self.src, self.patch ]
return head + '\n'.join(body) + '};\n'
def __cmp__(self,other):
if self.getSelf() in other.getBuildInputs("ALL"):
return 1
else:
return -1
def callPackage(self):
callPackage = ' ' + self.getSelf() + ' = callPackage ' + os.path.relpath(self.final_output_dir, self.allPackagesDir) + ' {'
newline = False;
for target in self.packageGroups:
tmp = self.getBuildInputs(target)
if len(tmp) > 0:
newline = True;
callPackage += '\n ' + 'inherit (' + target + 'Packages) ' + ' '.join(tmp) + ';'
if newline:
callPackage += '\n };'
else:
callPackage += ' };'
return callPackage
def generateCombined(self):
if not os.path.exists(self.final_output_dir):
os.makedirs(self.final_output_dir)
if self.inputDir is not None:
self.copySources(self.inputDir, self.final_output_dir)
self.copyPatches(self.inputDir, self.final_output_dir)
nixfile = open(os.path.join(self.final_output_dir,'default.nix'), 'w')
nixfile.write(str(self))
nixfile.close()
shutil.copyfile(self.specFilename, os.path.join(self.final_output_dir, os.path.basename(self.specFilename)))
def generateSplit(self):
if not os.path.exists(self.final_output_dir):
os.makedirs(self.final_output_dir)
nixfile = open(os.path.join(self.final_output_dir,'default.nix'), 'w')
nixfile.write(self.getTemplate())
nixfile.close()
return self.getInclude()
class NixTemplate(object):
def __init__(self, nixfile):
self.nixfile = nixfile
self.original = { "name":None, "version":None, "url":None, "sha256":None, "patches":None }
self.update = { "name":None, "version":None, "url":None, "sha256":None, "patches":None }
self.matchedLines = {}
if os.path.isfile(nixfile):
with file(nixfile, 'r') as infile:
for (n,line) in enumerate(infile):
name = re.match(r'^\s*name\s*=\s*"(.*?)"\s*;\s*$', line)
version = re.match(r'^\s*version\s*=\s*"(.*?)"\s*;\s*$', line)
url = re.match(r'^\s*url\s*=\s*"?(.*?)"?\s*;\s*$', line)
sha256 = re.match(r'^\s*sha256\s*=\s*"(.*?)"\s*;\s*$', line)
patches = re.match(r'^\s*patches\s*=\s*(\[.*?\])\s*;\s*$', line)
if name is not None and self.original["name"] is None:
self.original["name"] = name.group(1)
self.matchedLines[n] = "name"
if version is not None and self.original["version"] is None:
self.original["version"] = version.group(1)
self.matchedLines[n] = "version"
if url is not None and self.original["url"] is None:
self.original["url"] = url.group(1)
self.matchedLines[n] = "url"
if sha256 is not None and self.original["sha256"] is None:
self.original["sha256"] = sha256.group(1)
self.matchedLines[n] = "sha256"
if patches is not None and self.original["patches"] is None:
self.original["patches"] = patches.group(1)
self.matchedLines[n] = "patches"
def generateUpdated(self, nixOut):
nixTemplateFile = open(os.path.normpath(self.nixfile),'r')
nixOutFile = open(os.path.normpath(nixOut),'w')
for (n,line) in enumerate(nixTemplateFile):
if self.matchedLines.has_key(n) and self.update[self.matchedLines[n]] is not None:
nixOutFile.write(line.replace(self.original[self.matchedLines[n]], self.update[self.matchedLines[n]], 1))
else:
nixOutFile.write(line)
nixTemplateFile.close()
nixOutFile.close()
def loadUpdate(self,orig):
if orig.has_key("name") and orig.has_key("version"):
self.update["name"] = orig["name"] + '-' + orig["version"]
self.update["version"] = orig["version"]
if orig.has_key("url") and orig.has_key("sha256") and len(orig["url"])>0:
self.update["url"] = orig["url"][0]
self.update["sha256"] = orig["sha256"][0]
for url in orig["url"][1:-1]:
sys.stderr.write("WARNING: URL has been dropped: %s\n" % url)
if orig.has_key("patches"):
self.update["patches"] = '[ ' + ' '.join(orig['patches']) + ' ]'
class TranslationTable(object):
def __init__(self):
self.tablePath = {}
self.tableName = {}
def update(self, key, path, name=None):
self.tablePath[key] = path
if name is not None:
self.tableName[key] = name
def readTable(self, tableFile):
with file(tableFile, 'r') as infile:
for line in infile:
match = re.match(r'^(.+?)\s+(.+?)\s+(.+?)\s*$', line)
if match is not None:
if not self.tablePath.has_key(match.group(1)):
self.tablePath[match.group(1)] = match.group(2)
if not self.tableName.has_key(match.group(1)):
self.tableName[match.group(1)] = match.group(3)
else:
match = re.match(r'^(.+?)\s+(.+?)\s*$', line)
if not self.tablePath.has_key(match.group(1)):
self.tablePath[match.group(1)] = match.group(2)
def writeTable(self, tableFile):
outFile = open(os.path.normpath(tableFile),'w')
keys = self.tablePath.keys()
keys.sort()
for k in keys:
if self.tableName.has_key(k):
outFile.write( k + " " + self.tablePath[k] + " " + self.tableName[k] + "\n" )
else:
outFile.write( k + " " + self.tablePath[k] + "\n" )
outFile.close()
def name(self, key):
if self.tableName.has_key(key):
return self.tableName[key]
else:
return key
def path(self, key, orig):
if self.tablePath.has_key(key):
return self.tablePath[key]
else:
return orig
if __name__ == "__main__":
#Parse command line options
parser = argparse.ArgumentParser(description="Generate .nix templates from RPM spec files")
parser.add_argument("specs", metavar="SPEC", nargs="+", help="spec file")
parser.add_argument("-o", "--output", metavar="OUT_DIR", required=True, help="output directory")
parser.add_argument("-b", "--buildRoot", metavar="BUILDROOT_DIR", default=None, help="buildroot output directory")
parser.add_argument("-i", "--inputSources", metavar="IN_DIR", default=None, help="sources input directory")
parser.add_argument("-m", "--maintainer", metavar="MAINTAINER", default="__NIX_MAINTAINER__", help="package maintainer")
parser.add_argument("-r", "--repository", metavar="REP_DIR", default=None, help="nix repository to compare output against")
parser.add_argument("-t", "--translate", metavar="TRANSLATE_TABLE", default=None, help="path of translation table for name and path")
parser.add_argument("-u", "--translateOut", metavar="TRANSLATE_OUT", default=None, help="output path for updated translation table")
parser.add_argument("-a", "--allPackages", metavar="ALL_PACKAGES", default=None, help="top level dir to call packages from")
args = parser.parse_args()
allPackagesDir = os.path.normpath( os.path.dirname(args.allPackages) )
if not os.path.exists(allPackagesDir):
os.makedirs(allPackagesDir)
buildRootContent = {}
nameMap = {}
newTable = TranslationTable()
if args.translate is not None:
table = TranslationTable()
table.readTable(args.translate)
newTable.readTable(args.translate)
else:
table = None
for specPath in args.specs:
try:
sys.stderr.write("INFO: generate nix file from: %s\n" % specPath)
spec = SPECTemplate(specPath, args.output, args.inputSources, args.buildRoot, table, args.repository, allPackagesDir, args.maintainer)
if args.repository is not None:
if os.path.exists(os.path.join(spec.potential_repository_dir,'default.nix')):
nixTemplate = NixTemplate(os.path.join(spec.potential_repository_dir,'default.nix'))
nixTemplate.loadUpdate(spec.facts)
if not os.path.exists(spec.final_output_dir):
os.makedirs(spec.final_output_dir)
nixTemplate.generateUpdated(os.path.join(spec.final_output_dir,'default.nix'))
else:
sys.stderr.write("WARNING: Repository does not contain template: %s\n" % os.path.join(spec.potential_repository_dir,'default.nix'))
if args.buildRoot is None:
spec.generateCombined()
else:
buildRootContent[spec.key] = spec.generateSplit()
else:
if args.buildRoot is None:
spec.generateCombined()
else:
buildRootContent[spec.key] = spec.generateSplit()
newTable.update(spec.key,spec.relOutputDir,spec.getSelf())
nameMap[spec.getSelf()] = spec
except Exception, e:
sys.stderr.write("ERROR: %s failed with:\n%s\n%s\n" % (specPath,e.message,traceback.format_exc()))
if args.translateOut is not None:
if not os.path.exists(os.path.dirname(os.path.normpath(args.translateOut))):
os.makedirs(os.path.dirname(os.path.normpath(args.translateOut)))
newTable.writeTable(args.translateOut)
graph = {}
for k, v in nameMap.items():
graph[k] = set(v.getBuildInputs("ALL"))
sortedSpecs = toposort.toposort_flatten(graph)
sortedSpecs = filter( lambda x: x in nameMap.keys(), sortedSpecs)
allPackagesFile = open(os.path.normpath( args.allPackages ), 'w')
allPackagesFile.write( '\n\n'.join(map(lambda x: x.callPackage(), map(lambda x: nameMap[x], sortedSpecs))) )
allPackagesFile.close()
if args.buildRoot is not None:
buildRootFilename = os.path.normpath( args.buildRoot )
if not os.path.exists(os.path.dirname(buildRootFilename)):
os.makedirs(os.path.dirname(buildRootFilename))
buildRootFile = open(buildRootFilename, 'w')
buildRootFile.write( "{ fetchurl, buildRoot }: {\n\n" )
keys = buildRootContent.keys()
keys.sort()
for k in keys:
buildRootFile.write( buildRootContent[k] + '\n' )
buildRootFile.write( "}\n" )
buildRootFile.close()
| 19,069 | 35.743738 | 322 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/rust/replace-workspace-values.py
|
# This script implements the workspace inheritance mechanism described
# here: https://doc.rust-lang.org/cargo/reference/workspaces.html#the-package-table
#
# Please run `mypy --strict`, `black`, and `isort --profile black` on this after editing, thanks!
import sys
from typing import Any
import tomli
import tomli_w
def load_file(path: str) -> dict[str, Any]:
with open(path, "rb") as f:
return tomli.load(f)
def replace_key(
workspace_manifest: dict[str, Any], table: dict[str, Any], section: str, key: str
) -> bool:
if (
isinstance(table[key], dict)
and "workspace" in table[key]
and table[key]["workspace"] is True
):
print("replacing " + key)
replaced = table[key]
del replaced["workspace"]
workspace_copy = workspace_manifest[section][key]
if section == "dependencies":
crate_features = replaced.get("features")
if type(workspace_copy) is str:
replaced["version"] = workspace_copy
else:
replaced.update(workspace_copy)
merged_features = (crate_features or []) + (
workspace_copy.get("features") or []
)
if len(merged_features) > 0:
# Dictionaries are guaranteed to be ordered (https://stackoverflow.com/a/7961425)
replaced["features"] = list(dict.fromkeys(merged_features))
elif section == "package":
table[key] = replaced = workspace_copy
return True
return False
def replace_dependencies(
workspace_manifest: dict[str, Any], root: dict[str, Any]
) -> bool:
changed = False
for key in ["dependencies", "dev-dependencies", "build-dependencies"]:
if key in root:
for k in root[key].keys():
changed |= replace_key(workspace_manifest, root[key], "dependencies", k)
return changed
def main() -> None:
top_cargo_toml = load_file(sys.argv[2])
if "workspace" not in top_cargo_toml:
# If top_cargo_toml is not a workspace manifest, then this script was probably
# ran on something that does not actually use workspace dependencies
print(f"{sys.argv[2]} is not a workspace manifest, doing nothing.")
return
crate_manifest = load_file(sys.argv[1])
workspace_manifest = top_cargo_toml["workspace"]
if "workspace" in crate_manifest:
return
changed = False
for key in crate_manifest["package"].keys():
changed |= replace_key(
workspace_manifest, crate_manifest["package"], "package", key
)
changed |= replace_dependencies(workspace_manifest, crate_manifest)
if "target" in crate_manifest:
for key in crate_manifest["target"].keys():
changed |= replace_dependencies(
workspace_manifest, crate_manifest["target"][key]
)
if not changed:
return
with open(sys.argv[1], "wb") as f:
tomli_w.dump(crate_manifest, f)
if __name__ == "__main__":
main()
| 3,095 | 27.666667 | 101 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/rust/sysroot/cargo.py
|
import os
import toml
rust_src = os.environ['RUSTC_SRC']
orig_cargo = os.environ['ORIG_CARGO'] if 'ORIG_CARGO' in os.environ else None
base = {
'package': {
'name': 'nixpkgs-sysroot-stub-crate',
'version': '0.0.0',
'authors': ['The Rust Project Developers'],
'edition': '2018',
},
'dependencies': {
'compiler_builtins': {
'version': '0.1.0',
'features': ['rustc-dep-of-std', 'mem'],
},
'core': {
'path': os.path.join(rust_src, 'core'),
},
'alloc': {
'path': os.path.join(rust_src, 'alloc'),
},
},
'patch': {
'crates-io': {
'rustc-std-workspace-core': {
'path': os.path.join(rust_src, 'rustc-std-workspace-core'),
},
'rustc-std-workspace-alloc': {
'path': os.path.join(rust_src, 'rustc-std-workspace-alloc'),
},
},
},
}
if orig_cargo is not None:
with open(orig_cargo, 'r') as f:
src = toml.loads(f.read())
if 'profile' in src:
base['profile'] = src['profile']
out = toml.dumps(base)
with open('Cargo.toml', 'x') as f:
f.write(out)
| 1,078 | 21.479167 | 77 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/rust/fetch-cargo-tarball/cargo-vendor-normalise.py
|
#!/usr/bin/env python
import sys
import toml
def quote(s: str) -> str:
escaped = s.replace('"', r"\"").replace("\n", r"\n").replace("\\", "\\\\")
return '"{}"'.format(escaped)
def main() -> None:
data = toml.load(sys.stdin)
# There is no dependency to vendor in this project.
if not list(data.keys()) == ["source"]:
return
# this value is non deterministic
data["source"]["vendored-sources"]["directory"] = "@vendor@"
lines = []
inner = data["source"]
for source, attrs in sorted(inner.items()):
lines.append("[source.{}]".format(quote(source)))
if source == "vendored-sources":
lines.append('"directory" = "@vendor@"\n')
else:
for key, value in sorted(attrs.items()):
attr = "{} = {}".format(quote(key), quote(value))
lines.append(attr)
lines.append("")
result = "\n".join(lines)
real = toml.loads(result)
assert real == data, "output = {} while input = {}".format(real, data)
print(result)
if __name__ == "__main__":
main()
| 1,095 | 23.909091 | 78 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/binary-cache/make-binary-cache.py
|
import json
import os
import subprocess
with open(".attrs.json", "r") as f:
closures = json.load(f)["closure"]
os.chdir(os.environ["out"])
nixPrefix = os.environ["NIX_STORE"] # Usually /nix/store
with open("nix-cache-info", "w") as f:
f.write("StoreDir: " + nixPrefix + "\n")
def dropPrefix(path):
return path[len(nixPrefix + "/"):]
for item in closures:
narInfoHash = dropPrefix(item["path"]).split("-")[0]
xzFile = "nar/" + narInfoHash + ".nar.xz"
with open(xzFile, "w") as f:
subprocess.run("nix-store --dump %s | xz -c" % item["path"], stdout=f, shell=True)
fileHash = subprocess.run(["nix-hash", "--base32", "--type", "sha256", item["path"]], capture_output=True).stdout.decode().strip()
fileSize = os.path.getsize(xzFile)
# Rename the .nar.xz file to its own hash to match "nix copy" behavior
finalXzFile = "nar/" + fileHash + ".nar.xz"
os.rename(xzFile, finalXzFile)
with open(narInfoHash + ".narinfo", "w") as f:
f.writelines((x + "\n" for x in [
"StorePath: " + item["path"],
"URL: " + finalXzFile,
"Compression: xz",
"FileHash: sha256:" + fileHash,
"FileSize: " + str(fileSize),
"NarHash: " + item["narHash"],
"NarSize: " + str(item["narSize"]),
"References: " + " ".join(dropPrefix(ref) for ref in item["references"]),
]))
| 1,329 | 29.227273 | 132 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/replace-secret/replace-secret.py
|
#!/usr/bin/env python
import argparse
from argparse import RawDescriptionHelpFormatter
description = """
Replace a string in one file with a secret from a second file.
Since the secret is read from a file, it won't be leaked through
'/proc/<pid>/cmdline', unlike when 'sed' or 'replace' is used.
"""
parser = argparse.ArgumentParser(
description=description,
formatter_class=RawDescriptionHelpFormatter
)
parser.add_argument("string_to_replace", help="the string to replace")
parser.add_argument("secret_file", help="the file containing the secret")
parser.add_argument("file", help="the file to perform the replacement on")
args = parser.parse_args()
with open(args.secret_file) as sf, open(args.file, 'r+') as f:
old = f.read()
secret = sf.read().strip("\n")
new_content = old.replace(args.string_to_replace, secret)
f.seek(0)
f.write(new_content)
f.truncate()
| 900 | 30.068966 | 74 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/references-by-popularity/closure-graph.py
|
# IMPORTANT: Making changes?
#
# Validate your changes with python3 ./closure-graph.py --test
# Using a simple algorithm, convert the references to a path in to a
# sorted list of dependent paths based on how often they're referenced
# and how deep in the tree they live. Equally-"popular" paths are then
# sorted by name.
#
# The existing writeReferencesToFile prints the paths in a simple
# ascii-based sorting of the paths.
#
# Sorting the paths by graph improves the chances that the difference
# between two builds appear near the end of the list, instead of near
# the beginning. This makes a difference for Nix builds which export a
# closure for another program to consume, if that program implements its
# own level of binary diffing.
#
# For an example, Docker Images. If each store path is a separate layer
# then Docker Images can be very efficiently transfered between systems,
# and we get very good cache reuse between images built with the same
# version of Nixpkgs. However, since Docker only reliably supports a
# small number of layers (42) it is important to pick the individual
# layers carefully. By storing very popular store paths in the first 40
# layers, we improve the chances that the next Docker image will share
# many of those layers.*
#
# Given the dependency tree:
#
# A - B - C - D -\
# \ \ \ \
# \ \ \ \
# \ \ - E ---- F
# \- G
#
# Nodes which have multiple references are duplicated:
#
# A - B - C - D - F
# \ \ \
# \ \ \- E - F
# \ \
# \ \- E - F
# \
# \- G
#
# Each leaf node is now replaced by a counter defaulted to 1:
#
# A - B - C - D - (F:1)
# \ \ \
# \ \ \- E - (F:1)
# \ \
# \ \- E - (F:1)
# \
# \- (G:1)
#
# Then each leaf counter is merged with its parent node, replacing the
# parent node with a counter of 1, and each existing counter being
# incremented by 1. That is to say `- D - (F:1)` becomes `- (D:1, F:2)`:
#
# A - B - C - (D:1, F:2)
# \ \ \
# \ \ \- (E:1, F:2)
# \ \
# \ \- (E:1, F:2)
# \
# \- (G:1)
#
# Then each leaf counter is merged with its parent node again, merging
# any counters, then incrementing each:
#
# A - B - (C:1, D:2, E:2, F:5)
# \ \
# \ \- (E:1, F:2)
# \
# \- (G:1)
#
# And again:
#
# A - (B:1, C:2, D:3, E:4, F:8)
# \
# \- (G:1)
#
# And again:
#
# (A:1, B:2, C:3, D:4, E:5, F:9, G:2)
#
# and then paths have the following "popularity":
#
# A 1
# B 2
# C 3
# D 4
# E 5
# F 9
# G 2
#
# and the popularity contest would result in the paths being printed as:
#
# F
# E
# D
# C
# B
# G
# A
#
# * Note: People who have used a Dockerfile before assume Docker's
# Layers are inherently ordered. However, this is not true -- Docker
# layers are content-addressable and are not explicitly layered until
# they are composed in to an Image.
import sys
import json
import unittest
from pprint import pprint
from collections import defaultdict
def debug(msg, *args, **kwargs):
if False:
print(
"DEBUG: {}".format(
msg.format(*args, **kwargs)
),
file=sys.stderr
)
# Find paths in the original dataset which are never referenced by
# any other paths
def find_roots(closures):
roots = [];
for closure in closures:
path = closure['path']
if not any_refer_to(path, closures):
roots.append(path)
return roots
class TestFindRoots(unittest.TestCase):
def test_find_roots(self):
self.assertCountEqual(
find_roots([
{
"path": "/nix/store/foo",
"references": [
"/nix/store/foo",
"/nix/store/bar"
]
},
{
"path": "/nix/store/bar",
"references": [
"/nix/store/bar",
"/nix/store/tux"
]
},
{
"path": "/nix/store/hello",
"references": [
]
}
]),
["/nix/store/foo", "/nix/store/hello"]
)
def any_refer_to(path, closures):
for closure in closures:
if path != closure['path']:
if path in closure['references']:
return True
return False
class TestAnyReferTo(unittest.TestCase):
def test_has_references(self):
self.assertTrue(
any_refer_to(
"/nix/store/bar",
[
{
"path": "/nix/store/foo",
"references": [
"/nix/store/bar"
]
},
]
),
)
def test_no_references(self):
self.assertFalse(
any_refer_to(
"/nix/store/foo",
[
{
"path": "/nix/store/foo",
"references": [
"/nix/store/foo",
"/nix/store/bar"
]
},
]
),
)
def all_paths(closures):
paths = []
for closure in closures:
paths.append(closure['path'])
paths.extend(closure['references'])
paths.sort()
return list(set(paths))
class TestAllPaths(unittest.TestCase):
def test_returns_all_paths(self):
self.assertCountEqual(
all_paths([
{
"path": "/nix/store/foo",
"references": [
"/nix/store/foo",
"/nix/store/bar"
]
},
{
"path": "/nix/store/bar",
"references": [
"/nix/store/bar",
"/nix/store/tux"
]
},
{
"path": "/nix/store/hello",
"references": [
]
}
]),
["/nix/store/foo", "/nix/store/bar", "/nix/store/hello", "/nix/store/tux",]
)
def test_no_references(self):
self.assertFalse(
any_refer_to(
"/nix/store/foo",
[
{
"path": "/nix/store/foo",
"references": [
"/nix/store/foo",
"/nix/store/bar"
]
},
]
),
)
# Convert:
#
# [
# { path: /nix/store/foo, references: [ /nix/store/foo, /nix/store/bar, /nix/store/baz ] },
# { path: /nix/store/bar, references: [ /nix/store/bar, /nix/store/baz ] },
# { path: /nix/store/baz, references: [ /nix/store/baz, /nix/store/tux ] },
# { path: /nix/store/tux, references: [ /nix/store/tux ] }
# ]
#
# To:
# {
# /nix/store/foo: [ /nix/store/bar, /nix/store/baz ],
# /nix/store/bar: [ /nix/store/baz ],
# /nix/store/baz: [ /nix/store/tux ] },
# /nix/store/tux: [ ]
# }
#
# Note that it drops self-references to avoid loops.
def make_lookup(closures):
lookup = {}
for closure in closures:
# paths often self-refer
nonreferential_paths = [ref for ref in closure['references'] if ref != closure['path']]
lookup[closure['path']] = nonreferential_paths
return lookup
class TestMakeLookup(unittest.TestCase):
def test_returns_lookp(self):
self.assertDictEqual(
make_lookup([
{
"path": "/nix/store/foo",
"references": [
"/nix/store/foo",
"/nix/store/bar"
]
},
{
"path": "/nix/store/bar",
"references": [
"/nix/store/bar",
"/nix/store/tux"
]
},
{
"path": "/nix/store/hello",
"references": [
]
}
]),
{
"/nix/store/foo": [ "/nix/store/bar" ],
"/nix/store/bar": [ "/nix/store/tux" ],
"/nix/store/hello": [ ],
}
)
# Convert:
#
# /nix/store/foo with
# {
# /nix/store/foo: [ /nix/store/bar, /nix/store/baz ],
# /nix/store/bar: [ /nix/store/baz ],
# /nix/store/baz: [ /nix/store/tux ] },
# /nix/store/tux: [ ]
# }
#
# To:
#
# {
# /nix/store/bar: {
# /nix/store/baz: {
# /nix/store/tux: {}
# }
# },
# /nix/store/baz: {
# /nix/store/tux: {}
# }
# }
subgraphs_cache = {}
def make_graph_segment_from_root(root, lookup):
global subgraphs_cache
children = {}
for ref in lookup[root]:
# make_graph_segment_from_root is a pure function, and will
# always return the same result based on a given input. Thus,
# cache computation.
#
# Python's assignment will use a pointer, preventing memory
# bloat for large graphs.
if ref not in subgraphs_cache:
debug("Subgraph Cache miss on {}".format(ref))
subgraphs_cache[ref] = make_graph_segment_from_root(ref, lookup)
else:
debug("Subgraph Cache hit on {}".format(ref))
children[ref] = subgraphs_cache[ref]
return children
class TestMakeGraphSegmentFromRoot(unittest.TestCase):
def test_returns_graph(self):
self.assertDictEqual(
make_graph_segment_from_root("/nix/store/foo", {
"/nix/store/foo": [ "/nix/store/bar" ],
"/nix/store/bar": [ "/nix/store/tux" ],
"/nix/store/tux": [ ],
"/nix/store/hello": [ ],
}),
{
"/nix/store/bar": {
"/nix/store/tux": {}
}
}
)
def test_returns_graph_tiny(self):
self.assertDictEqual(
make_graph_segment_from_root("/nix/store/tux", {
"/nix/store/foo": [ "/nix/store/bar" ],
"/nix/store/bar": [ "/nix/store/tux" ],
"/nix/store/tux": [ ],
}),
{}
)
# Convert a graph segment in to a popularity-counted dictionary:
#
# From:
# {
# /nix/store/foo: {
# /nix/store/bar: {
# /nix/store/baz: {
# /nix/store/tux: {}
# }
# }
# /nix/store/baz: {
# /nix/store/tux: {}
# }
# }
# }
#
# to:
# [
# /nix/store/foo: 1
# /nix/store/bar: 2
# /nix/store/baz: 4
# /nix/store/tux: 6
# ]
popularity_cache = {}
def graph_popularity_contest(full_graph):
global popularity_cache
popularity = defaultdict(int)
for path, subgraph in full_graph.items():
popularity[path] += 1
# graph_popularity_contest is a pure function, and will
# always return the same result based on a given input. Thus,
# cache computation.
#
# Python's assignment will use a pointer, preventing memory
# bloat for large graphs.
if path not in popularity_cache:
debug("Popularity Cache miss on {}", path)
popularity_cache[path] = graph_popularity_contest(subgraph)
else:
debug("Popularity Cache hit on {}", path)
subcontest = popularity_cache[path]
for subpath, subpopularity in subcontest.items():
debug("Calculating popularity for {}", subpath)
popularity[subpath] += subpopularity + 1
return popularity
class TestGraphPopularityContest(unittest.TestCase):
def test_counts_popularity(self):
self.assertDictEqual(
graph_popularity_contest({
"/nix/store/foo": {
"/nix/store/bar": {
"/nix/store/baz": {
"/nix/store/tux": {}
}
},
"/nix/store/baz": {
"/nix/store/tux": {}
}
}
}),
{
"/nix/store/foo": 1,
"/nix/store/bar": 2,
"/nix/store/baz": 4,
"/nix/store/tux": 6,
}
)
# Emit a list of packages by popularity, most first:
#
# From:
# [
# /nix/store/foo: 1
# /nix/store/bar: 1
# /nix/store/baz: 2
# /nix/store/tux: 2
# ]
#
# To:
# [ /nix/store/baz /nix/store/tux /nix/store/bar /nix/store/foo ]
def order_by_popularity(paths):
paths_by_popularity = defaultdict(list)
popularities = []
for path, popularity in paths.items():
popularities.append(popularity)
paths_by_popularity[popularity].append(path)
popularities = list(set(popularities))
popularities.sort()
flat_ordered = []
for popularity in popularities:
paths = paths_by_popularity[popularity]
paths.sort(key=package_name)
flat_ordered.extend(reversed(paths))
return list(reversed(flat_ordered))
class TestOrderByPopularity(unittest.TestCase):
def test_returns_in_order(self):
self.assertEqual(
order_by_popularity({
"/nix/store/foo": 1,
"/nix/store/bar": 1,
"/nix/store/baz": 2,
"/nix/store/tux": 2,
}),
[
"/nix/store/baz",
"/nix/store/tux",
"/nix/store/bar",
"/nix/store/foo"
]
)
def package_name(path):
parts = path.split('-')
start = parts.pop(0)
# don't throw away any data, so the order is always the same.
# even in cases where only the hash at the start has changed.
parts.append(start)
return '-'.join(parts)
def main():
filename = sys.argv[1]
key = sys.argv[2]
debug("Loading from {}", filename)
with open(filename) as f:
data = json.load(f)
# Data comes in as:
# [
# { path: /nix/store/foo, references: [ /nix/store/foo, /nix/store/bar, /nix/store/baz ] },
# { path: /nix/store/bar, references: [ /nix/store/bar, /nix/store/baz ] },
# { path: /nix/store/baz, references: [ /nix/store/baz, /nix/store/tux ] },
# { path: /nix/store/tux, references: [ /nix/store/tux ] }
# ]
#
# and we want to get out a list of paths ordered by how universally,
# important they are, ie: tux is referenced by every path, transitively
# so it should be #1
#
# [
# /nix/store/tux,
# /nix/store/baz,
# /nix/store/bar,
# /nix/store/foo,
# ]
graph = data[key]
debug("Finding roots from {}", key)
roots = find_roots(graph);
debug("Making lookup for {}", key)
lookup = make_lookup(graph)
full_graph = {}
for root in roots:
debug("Making full graph for {}", root)
full_graph[root] = make_graph_segment_from_root(root, lookup)
debug("Running contest")
contest = graph_popularity_contest(full_graph)
debug("Ordering by popularity")
ordered = order_by_popularity(contest)
debug("Checking for missing paths")
missing = []
for path in all_paths(graph):
if path not in ordered:
missing.append(path)
ordered.extend(missing)
print("\n".join(ordered))
if "--test" in sys.argv:
# Don't pass --test otherwise unittest gets mad
unittest.main(argv = [f for f in sys.argv if f != "--test" ])
else:
main()
| 16,271 | 27.647887 | 98 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/build-support/setup-hooks/auto-patchelf.py
|
#!/usr/bin/env python3
import argparse
import os
import pprint
import subprocess
import sys
from fnmatch import fnmatch
from collections import defaultdict
from contextlib import contextmanager
from dataclasses import dataclass
from itertools import chain
from pathlib import Path, PurePath
from typing import DefaultDict, Iterator, List, Optional, Set, Tuple
from elftools.common.exceptions import ELFError # type: ignore
from elftools.elf.dynamic import DynamicSection # type: ignore
from elftools.elf.elffile import ELFFile # type: ignore
from elftools.elf.enums import ENUM_E_TYPE, ENUM_EI_OSABI # type: ignore
@contextmanager
def open_elf(path: Path) -> Iterator[ELFFile]:
with path.open('rb') as stream:
yield ELFFile(stream)
def is_static_executable(elf: ELFFile) -> bool:
# Statically linked executables have an ELF type of EXEC but no INTERP.
return (elf.header["e_type"] == 'ET_EXEC'
and not elf.get_section_by_name(".interp"))
def is_dynamic_executable(elf: ELFFile) -> bool:
# We do not require an ELF type of EXEC. This also catches
# position-independent executables, as they typically have an INTERP
# section but their ELF type is DYN.
return bool(elf.get_section_by_name(".interp"))
def get_dependencies(elf: ELFFile) -> List[str]:
dependencies = []
# This convoluted code is here on purpose. For some reason, using
# elf.get_section_by_name(".dynamic") does not always return an
# instance of DynamicSection, but that is required to call iter_tags
for section in elf.iter_sections():
if isinstance(section, DynamicSection):
for tag in section.iter_tags('DT_NEEDED'):
dependencies.append(tag.needed)
break # There is only one dynamic section
return dependencies
def get_rpath(elf: ELFFile) -> List[str]:
# This convoluted code is here on purpose. For some reason, using
# elf.get_section_by_name(".dynamic") does not always return an
# instance of DynamicSection, but that is required to call iter_tags
for section in elf.iter_sections():
if isinstance(section, DynamicSection):
for tag in section.iter_tags('DT_RUNPATH'):
return tag.runpath.split(':')
for tag in section.iter_tags('DT_RPATH'):
return tag.rpath.split(':')
break # There is only one dynamic section
return []
def get_arch(elf: ELFFile) -> str:
return elf.get_machine_arch()
def get_osabi(elf: ELFFile) -> str:
return elf.header["e_ident"]["EI_OSABI"]
def osabi_are_compatible(wanted: str, got: str) -> bool:
"""
Tests whether two OS ABIs are compatible, taking into account the
generally accepted compatibility of SVR4 ABI with other ABIs.
"""
if not wanted or not got:
# One of the types couldn't be detected, so as a fallback we'll
# assume they're compatible.
return True
# Generally speaking, the base ABI (0x00), which is represented by
# readelf(1) as "UNIX - System V", indicates broad compatibility
# with other ABIs.
#
# TODO: This isn't always true. For example, some OSes embed ABI
# compatibility into SHT_NOTE sections like .note.tag and
# .note.ABI-tag. It would be prudent to add these to the detection
# logic to produce better ABI information.
if wanted == 'ELFOSABI_SYSV':
return True
# Similarly here, we should be able to link against a superset of
# features, so even if the target has another ABI, this should be
# fine.
if got == 'ELFOSABI_SYSV':
return True
# Otherwise, we simply return whether the ABIs are identical.
return wanted == got
def glob(path: Path, pattern: str, recursive: bool) -> Iterator[Path]:
if path.is_dir():
return path.rglob(pattern) if recursive else path.glob(pattern)
else:
# path.glob won't return anything if the path is not a directory.
# We extend that behavior by matching the file name against the pattern.
# This allows to pass single files instead of dirs to auto_patchelf,
# for greater control on the files to consider.
return [path] if path.match(pattern) else []
cached_paths: Set[Path] = set()
soname_cache: DefaultDict[Tuple[str, str], List[Tuple[Path, str]]] = defaultdict(list)
def populate_cache(initial: List[Path], recursive: bool =False) -> None:
lib_dirs = list(initial)
while lib_dirs:
lib_dir = lib_dirs.pop(0)
if lib_dir in cached_paths:
continue
cached_paths.add(lib_dir)
for path in glob(lib_dir, "*.so*", recursive):
if not path.is_file():
continue
# As an optimisation, resolve the symlinks here, as the target is unique
# XXX: (layus, 2022-07-25) is this really an optimisation in all cases ?
# It could make the rpath bigger or break the fragile precedence of $out.
resolved = path.resolve()
# Do not use resolved paths when names do not match
if resolved.name != path.name:
resolved = path
try:
with open_elf(path) as elf:
osabi = get_osabi(elf)
arch = get_arch(elf)
rpath = [Path(p) for p in get_rpath(elf)
if p and '$ORIGIN' not in p]
lib_dirs += rpath
soname_cache[(path.name, arch)].append((resolved.parent, osabi))
except ELFError:
# Not an ELF file in the right format
pass
def find_dependency(soname: str, soarch: str, soabi: str) -> Optional[Path]:
for lib, libabi in soname_cache[(soname, soarch)]:
if osabi_are_compatible(soabi, libabi):
return lib
return None
@dataclass
class Dependency:
file: Path # The file that contains the dependency
name: Path # The name of the dependency
found: bool = False # Whether it was found somewhere
def auto_patchelf_file(path: Path, runtime_deps: list[Path], append_rpaths: List[Path] = []) -> list[Dependency]:
try:
with open_elf(path) as elf:
if is_static_executable(elf):
# No point patching these
print(f"skipping {path} because it is statically linked")
return []
if elf.num_segments() == 0:
# no segment (e.g. object file)
print(f"skipping {path} because it contains no segment")
return []
file_arch = get_arch(elf)
if interpreter_arch != file_arch:
# Our target architecture is different than this file's
# architecture, so skip it.
print(f"skipping {path} because its architecture ({file_arch})"
f" differs from target ({interpreter_arch})")
return []
file_osabi = get_osabi(elf)
if not osabi_are_compatible(interpreter_osabi, file_osabi):
print(f"skipping {path} because its OS ABI ({file_osabi}) is"
f" not compatible with target ({interpreter_osabi})")
return []
file_is_dynamic_executable = is_dynamic_executable(elf)
file_dependencies = map(Path, get_dependencies(elf))
except ELFError:
return []
rpath = []
if file_is_dynamic_executable:
print("setting interpreter of", path)
subprocess.run(
["patchelf", "--set-interpreter", interpreter_path.as_posix(), path.as_posix()],
check=True)
rpath += runtime_deps
print("searching for dependencies of", path)
dependencies = []
# Be sure to get the output of all missing dependencies instead of
# failing at the first one, because it's more useful when working
# on a new package where you don't yet know the dependencies.
for dep in file_dependencies:
if dep.is_absolute() and dep.is_file():
# This is an absolute path. If it exists, just use it.
# Otherwise, we probably want this to produce an error when
# checked (because just updating the rpath won't satisfy
# it).
continue
elif (libc_lib / dep).is_file():
# This library exists in libc, and will be correctly
# resolved by the linker.
continue
if found_dependency := find_dependency(dep.name, file_arch, file_osabi):
rpath.append(found_dependency)
dependencies.append(Dependency(path, dep, True))
print(f" {dep} -> found: {found_dependency}")
else:
dependencies.append(Dependency(path, dep, False))
print(f" {dep} -> not found!")
rpath.extend(append_rpaths)
# Dedup the rpath
rpath_str = ":".join(dict.fromkeys(map(Path.as_posix, rpath)))
if rpath:
print("setting RPATH to:", rpath_str)
subprocess.run(
["patchelf", "--set-rpath", rpath_str, path.as_posix()],
check=True)
return dependencies
def auto_patchelf(
paths_to_patch: List[Path],
lib_dirs: List[Path],
runtime_deps: List[Path],
recursive: bool = True,
ignore_missing: List[str] = [],
append_rpaths: List[Path] = []) -> None:
if not paths_to_patch:
sys.exit("No paths to patch, stopping.")
# Add all shared objects of the current output path to the cache,
# before lib_dirs, so that they are chosen first in find_dependency.
populate_cache(paths_to_patch, recursive)
populate_cache(lib_dirs)
dependencies = []
for path in chain.from_iterable(glob(p, '*', recursive) for p in paths_to_patch):
if not path.is_symlink() and path.is_file():
dependencies += auto_patchelf_file(path, runtime_deps, append_rpaths)
missing = [dep for dep in dependencies if not dep.found]
# Print a summary of the missing dependencies at the end
print(f"auto-patchelf: {len(missing)} dependencies could not be satisfied")
failure = False
for dep in missing:
for pattern in ignore_missing:
if fnmatch(dep.name.name, pattern):
print(f"warn: auto-patchelf ignoring missing {dep.name} wanted by {dep.file}")
break
else:
print(f"error: auto-patchelf could not satisfy dependency {dep.name} wanted by {dep.file}")
failure = True
if failure:
sys.exit('auto-patchelf failed to find all the required dependencies.\n'
'Add the missing dependencies to --libs or use '
'`--ignore-missing="foo.so.1 bar.so etc.so"`.')
def main() -> None:
parser = argparse.ArgumentParser(
prog="auto-patchelf",
description='auto-patchelf tries as hard as possible to patch the'
' provided binary files by looking for compatible'
'libraries in the provided paths.')
parser.add_argument(
"--ignore-missing",
nargs="*",
type=str,
help="Do not fail when some dependencies are not found.")
parser.add_argument(
"--no-recurse",
dest="recursive",
action="store_false",
help="Disable the recursive traversal of paths to patch.")
parser.add_argument(
"--paths", nargs="*", type=Path,
help="Paths whose content needs to be patched."
" Single files and directories are accepted."
" Directories are traversed recursively by default.")
parser.add_argument(
"--libs", nargs="*", type=Path,
help="Paths where libraries are searched for."
" Single files and directories are accepted."
" Directories are not searched recursively.")
parser.add_argument(
"--runtime-dependencies", nargs="*", type=Path,
help="Paths to prepend to the runtime path of executable binaries."
" Subject to deduplication, which may imply some reordering.")
parser.add_argument(
"--append-rpaths",
nargs="*",
type=Path,
help="Paths to append to all runtime paths unconditionally",
)
print("automatically fixing dependencies for ELF files")
args = parser.parse_args()
pprint.pprint(vars(args))
auto_patchelf(
args.paths,
args.libs,
args.runtime_dependencies,
args.recursive,
args.ignore_missing,
append_rpaths=args.append_rpaths)
interpreter_path: Path = None # type: ignore
interpreter_osabi: str = None # type: ignore
interpreter_arch: str = None # type: ignore
libc_lib: Path = None # type: ignore
if __name__ == "__main__":
nix_support = Path(os.environ['NIX_BINTOOLS']) / 'nix-support'
interpreter_path = Path((nix_support / 'dynamic-linker').read_text().strip())
libc_lib = Path((nix_support / 'orig-libc').read_text().strip()) / 'lib'
with open_elf(interpreter_path) as interpreter:
interpreter_osabi = get_osabi(interpreter)
interpreter_arch = get_arch(interpreter)
if interpreter_arch and interpreter_osabi and interpreter_path and libc_lib:
main()
else:
sys.exit("Failed to parse dynamic linker (ld) properties.")
| 13,424 | 35.480978 | 113 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/servers/apache-airflow/update-providers.py
|
#! /usr/bin/env python3
from itertools import chain
import json
import logging
from pathlib import Path
import os
import re
import subprocess
import sys
from typing import Dict, List, Optional, Set, TextIO
from urllib.request import urlopen
from urllib.error import HTTPError
import yaml
PKG_SET = "apache-airflow.pythonPackages"
# If some requirements are matched by multiple or no Python packages, the
# following can be used to choose the correct one
PKG_PREFERENCES = {
"dnspython": "dnspython",
"elasticsearch-dsl": "elasticsearch-dsl",
"google-api-python-client": "google-api-python-client",
"psycopg2-binary": "psycopg2",
"requests_toolbelt": "requests-toolbelt",
}
# Requirements missing from the airflow provider metadata
EXTRA_REQS = {
"sftp": ["pysftp"],
}
def get_version():
with open(os.path.dirname(sys.argv[0]) + "/default.nix") as fh:
# A version consists of digits, dots, and possibly a "b" (for beta)
m = re.search('version = "([\\d\\.b]+)";', fh.read())
return m.group(1)
def get_file_from_github(version: str, path: str):
with urlopen(
f"https://raw.githubusercontent.com/apache/airflow/{version}/{path}"
) as response:
return yaml.safe_load(response)
def repository_root() -> Path:
return Path(os.path.dirname(sys.argv[0])) / "../../../.."
def dump_packages() -> Dict[str, Dict[str, str]]:
# Store a JSON dump of Nixpkgs' python3Packages
output = subprocess.check_output(
[
"nix-env",
"-f",
repository_root(),
"-qa",
"-A",
PKG_SET,
"--arg",
"config",
"{ allowAliases = false; }",
"--json",
]
)
return json.loads(output)
def remove_version_constraint(req: str) -> str:
return re.sub(r"[=><~].*$", "", req)
def name_to_attr_path(req: str, packages: Dict[str, Dict[str, str]]) -> Optional[str]:
if req in PKG_PREFERENCES:
return f"{PKG_SET}.{PKG_PREFERENCES[req]}"
attr_paths = []
names = [req]
# E.g. python-mpd2 is actually called python3.6-mpd2
# instead of python-3.6-python-mpd2 inside Nixpkgs
if req.startswith("python-") or req.startswith("python_"):
names.append(req[len("python-") :])
for name in names:
# treat "-" and "_" equally
name = re.sub("[-_]", "[-_]", name)
# python(minor).(major)-(pname)-(version or unstable-date)
# we need the version qualifier, or we'll have multiple matches
# (e.g. pyserial and pyserial-asyncio when looking for pyserial)
pattern = re.compile(
f"^python\\d+\\.\\d+-{name}-(?:\\d|unstable-.*)", re.I
)
for attr_path, package in packages.items():
# logging.debug("Checking match for %s with %s", name, package["name"])
if pattern.match(package["name"]):
attr_paths.append(attr_path)
# Let's hope there's only one derivation with a matching name
assert len(attr_paths) <= 1, f"{req} matches more than one derivation: {attr_paths}"
if attr_paths:
return attr_paths[0]
return None
def provider_reqs_to_attr_paths(reqs: List, packages: Dict) -> List:
no_version_reqs = map(remove_version_constraint, reqs)
filtered_reqs = [
req for req in no_version_reqs if not re.match(r"^apache-airflow", req)
]
attr_paths = []
for req in filtered_reqs:
attr_path = name_to_attr_path(req, packages)
if attr_path is not None:
# Add attribute path without "python3Packages." prefix
pname = attr_path[len(PKG_SET + ".") :]
attr_paths.append(pname)
else:
# If we can't find it, we just skip and warn the user
logging.warning("Could not find package attr for %s", req)
return attr_paths
def get_cross_provider_reqs(
provider: str, provider_reqs: Dict, cross_provider_deps: Dict, seen: List = None
) -> Set:
# Unfortunately there are circular cross-provider dependencies, so keep a
# list of ones we've seen already
seen = seen or []
reqs = set(provider_reqs[provider])
if len(cross_provider_deps[provider]) > 0:
reqs.update(
chain.from_iterable(
get_cross_provider_reqs(
d, provider_reqs, cross_provider_deps, seen + [provider]
)
if d not in seen
else []
for d in cross_provider_deps[provider]
)
)
return reqs
def get_provider_reqs(version: str, packages: Dict) -> Dict:
provider_dependencies = get_file_from_github(
version, "generated/provider_dependencies.json"
)
provider_reqs = {}
cross_provider_deps = {}
for provider, provider_data in provider_dependencies.items():
provider_reqs[provider] = list(
provider_reqs_to_attr_paths(provider_data["deps"], packages)
) + EXTRA_REQS.get(provider, [])
cross_provider_deps[provider] = [
d for d in provider_data["cross-providers-deps"] if d != "common.sql"
]
transitive_provider_reqs = {}
# Add transitive cross-provider reqs
for provider in provider_reqs:
transitive_provider_reqs[provider] = get_cross_provider_reqs(
provider, provider_reqs, cross_provider_deps
)
return transitive_provider_reqs
def get_provider_yaml(version: str, provider: str) -> Dict:
provider_dir = provider.replace(".", "/")
path = f"airflow/providers/{provider_dir}/provider.yaml"
try:
return get_file_from_github(version, path)
except HTTPError:
logging.warning("Couldn't get provider yaml for %s", provider)
return {}
def get_provider_imports(version: str, providers) -> Dict:
provider_imports = {}
for provider in providers:
provider_yaml = get_provider_yaml(version, provider)
imports: List[str] = []
if "hooks" in provider_yaml:
imports.extend(
chain.from_iterable(
hook["python-modules"] for hook in provider_yaml["hooks"]
)
)
if "operators" in provider_yaml:
imports.extend(
chain.from_iterable(
operator["python-modules"]
for operator in provider_yaml["operators"]
)
)
provider_imports[provider] = imports
return provider_imports
def to_nix_expr(provider_reqs: Dict, provider_imports: Dict, fh: TextIO) -> None:
fh.write("# Warning: generated by update-providers.py, do not update manually\n")
fh.write("{\n")
for provider, reqs in provider_reqs.items():
provider_name = provider.replace(".", "_")
fh.write(f" {provider_name} = {{\n")
fh.write(
" deps = [ " + " ".join(sorted(f'"{req}"' for req in reqs)) + " ];\n"
)
fh.write(
" imports = [ "
+ " ".join(sorted(f'"{imp}"' for imp in provider_imports[provider]))
+ " ];\n"
)
fh.write(" };\n")
fh.write("}\n")
def main() -> None:
logging.basicConfig(level=logging.INFO)
version = get_version()
packages = dump_packages()
logging.info("Generating providers.nix for version %s", version)
provider_reqs = get_provider_reqs(version, packages)
provider_imports = get_provider_imports(version, provider_reqs.keys())
with open("providers.nix", "w") as fh:
to_nix_expr(provider_reqs, provider_imports, fh)
if __name__ == "__main__":
main()
| 7,642 | 32.52193 | 88 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/servers/home-assistant/parse-requirements.py
|
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ packaging rich ])" -p nodePackages.pyright ruff isort"
#
# This script downloads Home Assistant's source tarball.
# Inside the homeassistant/components directory, each integration has an associated manifest.json,
# specifying required packages and other integrations it depends on:
#
# {
# "requirements": [ "package==1.2.3" ],
# "dependencies": [ "component" ]
# }
#
# By parsing the files, a dictionary mapping integrations to requirements and dependencies is created.
# For all of these requirements and the dependencies' requirements,
# nixpkgs' python3Packages are searched for appropriate names.
# Then, a Nix attribute set mapping integration name to dependencies is created.
import json
import os
import pathlib
import re
import subprocess
import sys
import tarfile
import tempfile
from functools import reduce
from io import BytesIO
from typing import Any, Dict, List, Optional, Set
from urllib.request import urlopen
from packaging import version as Version
from packaging.version import InvalidVersion
from rich.console import Console
from rich.table import Table
COMPONENT_PREFIX = "homeassistant.components"
PKG_SET = "home-assistant.python.pkgs"
# If some requirements are matched by multiple or no Python packages, the
# following can be used to choose the correct one
PKG_PREFERENCES = {
"fiblary3": "fiblary3-fork", # https://github.com/home-assistant/core/issues/66466
"ha-av": "av",
"HAP-python": "hap-python",
"tensorflow": "tensorflow",
"yt-dlp": "yt-dlp",
}
# Some dependencies are loaded dynamically at runtime, and are not
# mentioned in the manifest files.
EXTRA_COMPONENT_DEPS = {
"conversation": [
"intent"
],
"default_config": [
"backup",
],
}
def run_sync(cmd: List[str]) -> None:
print(f"$ {' '.join(cmd)}")
process = subprocess.run(cmd)
if process.returncode != 0:
sys.exit(1)
def get_version() -> str:
with open(os.path.dirname(sys.argv[0]) + "/default.nix") as f:
# A version consists of digits, dots, and possibly a "b" (for beta)
if match := re.search('hassVersion = "([\\d\\.b]+)";', f.read()):
return match.group(1)
raise RuntimeError("hassVersion not in default.nix")
def parse_components(version: str = "master"):
components = {}
components_with_tests = []
with tempfile.TemporaryDirectory() as tmp:
with urlopen(
f"https://github.com/home-assistant/home-assistant/archive/{version}.tar.gz"
) as response:
tarfile.open(fileobj=BytesIO(response.read())).extractall(tmp)
# Use part of a script from the Home Assistant codebase
core_path = os.path.join(tmp, f"core-{version}")
for entry in os.scandir(os.path.join(core_path, "tests/components")):
if entry.is_dir():
components_with_tests.append(entry.name)
sys.path.append(core_path)
from script.hassfest.model import Integration # type: ignore
integrations = Integration.load_dir(
pathlib.Path(
os.path.join(core_path, "homeassistant/components")
)
)
for domain in sorted(integrations):
integration = integrations[domain]
if extra_deps := EXTRA_COMPONENT_DEPS.get(integration.domain):
integration.dependencies.extend(extra_deps)
if not integration.disabled:
components[domain] = integration.manifest
return components, components_with_tests
# Recursively get the requirements of a component and its dependencies
def get_reqs(components: Dict[str, Dict[str, Any]], component: str, processed: Set[str]) -> Set[str]:
requirements = set(components[component].get("requirements", []))
deps = components[component].get("dependencies", [])
deps.extend(components[component].get("after_dependencies", []))
processed.add(component)
for dependency in deps:
if dependency not in processed:
requirements.update(get_reqs(components, dependency, processed))
return requirements
def repository_root() -> str:
return os.path.abspath(sys.argv[0] + "/../../../..")
# For a package attribute and and an extra, check if the package exposes it via passthru.optional-dependencies
def has_extra(package: str, extra: str):
cmd = [
"nix-instantiate",
repository_root(),
"-A",
f"{package}.optional-dependencies.{extra}",
]
try:
subprocess.run(
cmd,
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
except subprocess.CalledProcessError:
return False
return True
def dump_packages() -> Dict[str, Dict[str, str]]:
# Store a JSON dump of Nixpkgs' python3Packages
output = subprocess.check_output(
[
"nix-env",
"-f",
repository_root(),
"-qa",
"-A",
PKG_SET,
"--arg", "config", "{ allowAliases = false; }",
"--json",
]
)
return json.loads(output)
def name_to_attr_path(req: str, packages: Dict[str, Dict[str, str]]) -> Optional[str]:
if req in PKG_PREFERENCES:
return f"{PKG_SET}.{PKG_PREFERENCES[req]}"
attr_paths = []
names = [req]
# E.g. python-mpd2 is actually called python3.6-mpd2
# instead of python-3.6-python-mpd2 inside Nixpkgs
if req.startswith("python-") or req.startswith("python_"):
names.append(req[len("python-") :])
for name in names:
# treat "-" and "_" equally
name = re.sub("[-_]", "[-_]", name)
# python(minor).(major)-(pname)-(version or unstable-date)
# we need the version qualifier, or we'll have multiple matches
# (e.g. pyserial and pyserial-asyncio when looking for pyserial)
pattern = re.compile(f"^python\\d+\\.\\d+-{name}-(?:\\d|unstable-.*)", re.I)
for attr_path, package in packages.items():
if pattern.match(package["name"]):
attr_paths.append(attr_path)
# Let's hope there's only one derivation with a matching name
assert len(attr_paths) <= 1, f"{req} matches more than one derivation: {attr_paths}"
if attr_paths:
return attr_paths[0]
else:
return None
def get_pkg_version(attr_path: str, packages: Dict[str, Dict[str, str]]) -> Optional[str]:
pkg = packages.get(attr_path, None)
if not pkg:
return None
return pkg["version"]
def main() -> None:
packages = dump_packages()
version = get_version()
print("Generating component-packages.nix for version {}".format(version))
components, components_with_tests = parse_components(version=version)
build_inputs = {}
outdated = {}
for component in sorted(components.keys()):
attr_paths = []
extra_attrs = []
missing_reqs = []
reqs = sorted(get_reqs(components, component, set()))
for req in reqs:
# Some requirements are specified by url, e.g. https://example.org/foobar#xyz==1.0.0
# Therefore, if there's a "#" in the line, only take the part after it
req = req[req.find("#") + 1 :]
name, required_version = req.split("==", maxsplit=1)
# Strip conditions off version constraints e.g. "1.0; python<3.11"
required_version = required_version.split(";").pop(0)
# Split package name and extra requires
extras = []
if name.endswith("]"):
extras = name[name.find("[")+1:name.find("]")].split(",")
name = name[:name.find("[")]
attr_path = name_to_attr_path(name, packages)
if attr_path:
if our_version := get_pkg_version(attr_path, packages):
attr_name = attr_path.split(".")[-1]
attr_outdated = False
try:
Version.parse(our_version)
except InvalidVersion:
print(f"Attribute {attr_name} has invalid version specifier {our_version}", file=sys.stderr)
attr_outdated = True
else:
attr_outdated = Version.parse(our_version) < Version.parse(required_version)
finally:
if attr_outdated:
outdated[attr_name] = {
'wanted': required_version,
'current': our_version
}
if attr_path is not None:
# Add attribute path without "python3Packages." prefix
pname = attr_path[len(PKG_SET + "."):]
attr_paths.append(pname)
for extra in extras:
# Check if package advertises extra requirements
extra_attr = f"{pname}.optional-dependencies.{extra}"
if has_extra(attr_path, extra):
extra_attrs.append(extra_attr)
else:
missing_reqs.append(extra_attr)
else:
missing_reqs.append(name)
else:
build_inputs[component] = (attr_paths, extra_attrs, missing_reqs)
with open(os.path.dirname(sys.argv[0]) + "/component-packages.nix", "w") as f:
f.write("# Generated by parse-requirements.py\n")
f.write("# Do not edit!\n\n")
f.write("{\n")
f.write(f' version = "{version}";\n')
f.write(" components = {\n")
for component, deps in build_inputs.items():
available, extras, missing = deps
f.write(f' "{component}" = ps: with ps; [')
if available:
f.write("\n " + "\n ".join(sorted(available)))
f.write("\n ]")
if extras:
f.write("\n ++ " + "\n ++ ".join(sorted(extras)))
f.write(";")
if len(missing) > 0:
f.write(f" # missing inputs: {' '.join(sorted(missing))}")
f.write("\n")
f.write(" };\n")
f.write(" # components listed in tests/components for which all dependencies are packaged\n")
f.write(" supportedComponentsWithTests = [\n")
for component, deps in build_inputs.items():
available, extras, missing = deps
if len(missing) == 0 and component in components_with_tests:
f.write(f' "{component}"' + "\n")
f.write(" ];\n")
f.write("}\n")
supported_components = reduce(lambda n, c: n + (build_inputs[c][2] == []),
components.keys(), 0)
total_components = len(components)
print(f"{supported_components} / {total_components} components supported, "
f"i.e. {supported_components / total_components:.2%}")
if outdated:
table = Table(title="Outdated dependencies")
table.add_column("Package")
table.add_column("Current")
table.add_column("Wanted")
for package, version in sorted(outdated.items()):
table.add_row(package, version['current'], version['wanted'])
console = Console()
console.print(table)
if __name__ == "__main__":
run_sync(["pyright", __file__])
run_sync(["ruff", "--ignore=E501", __file__])
run_sync(["isort", __file__])
main()
| 11,563 | 36.667752 | 119 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/servers/home-assistant/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell -I nixpkgs=channel:nixpkgs-unstable -i python3 -p "python3.withPackages (ps: with ps; [ aiohttp packaging ])" -p git nurl nodePackages.pyright ruff isort
import asyncio
import json
import os
import re
import sys
from subprocess import check_output, run
from typing import Dict, Final, List, Optional, Union
import aiohttp
from aiohttp import ClientSession
from packaging.version import Version
ROOT: Final = check_output([
"git",
"rev-parse",
"--show-toplevel",
]).decode().strip()
def run_sync(cmd: List[str]) -> None:
print(f"$ {' '.join(cmd)}")
process = run(cmd)
if process.returncode != 0:
sys.exit(1)
async def check_async(cmd: List[str]) -> str:
print(f"$ {' '.join(cmd)}")
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
if process.returncode != 0:
error = stderr.decode()
raise RuntimeError(f"{cmd[0]} failed: {error}")
return stdout.decode().strip()
async def run_async(cmd: List[str]):
print(f"$ {' '.join(cmd)}")
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
stdout, stderr = await process.communicate()
print(stdout.decode())
if process.returncode != 0:
error = stderr.decode()
raise RuntimeError(f"{cmd[0]} failed: {error}")
class File:
def __init__(self, path: str):
self.path = os.path.join(ROOT, path)
def __enter__(self):
with open(self.path, "r") as handle:
self.text = handle.read()
return self
def get_exact_match(self, attr: str, value: str):
matches = re.findall(
rf'{re.escape(attr)}\s+=\s+\"?{re.escape(value)}\"?',
self.text
)
n = len(matches)
if n > 1:
raise ValueError(f"multiple occurrences found for {attr}={value}")
elif n == 1:
return matches.pop()
else:
raise ValueError(f"no occurrence found for {attr}={value}")
def substitute(self, attr: str, old_value: str, new_value: str) -> None:
old_line = self.get_exact_match(attr, old_value)
new_line = old_line.replace(old_value, new_value)
self.text = self.text.replace(old_line, new_line)
print(f"Substitute `{attr}` value `{old_value}` with `{new_value}`")
def __exit__(self, exc_type, exc_val, exc_tb):
with open(self.path, "w") as handle:
handle.write(self.text)
class Nurl:
@classmethod
async def prefetch(cls, url: str, version: str, *extra_args: str) -> str:
cmd = [
"nurl",
"--hash",
url,
version,
]
cmd.extend(extra_args)
return await check_async(cmd)
class Nix:
base_cmd: Final = [
"nix",
"--show-trace",
"--extra-experimental-features", "nix-command"
]
@classmethod
async def _run(cls, args: List[str]) -> Optional[str]:
return await check_async(cls.base_cmd + args)
@classmethod
async def eval(cls, expr: str) -> Union[List, Dict, int, float, str, bool]:
response = await cls._run([
"eval",
"-f", f"{ROOT}/default.nix",
"--json",
expr
])
if response is None:
raise RuntimeError("Nix eval expression returned no response")
try:
return json.loads(response)
except (TypeError, ValueError):
raise RuntimeError("Nix eval response could not be parsed from JSON")
@classmethod
async def hash_to_sri(cls, algorithm: str, value: str) -> Optional[str]:
return await cls._run([
"hash",
"to-sri",
"--type", algorithm,
value
])
class HomeAssistant:
def __init__(self, session: ClientSession):
self._session = session
async def get_latest_core_version(
self,
owner: str = "home-assistant",
repo: str = "core"
) -> str:
async with self._session.get(
f"https://api.github.com/repos/{owner}/{repo}/releases/latest"
) as response:
document = await response.json()
try:
return str(document.get("name"))
except KeyError:
raise RuntimeError("No tag name in response document")
async def get_latest_frontend_version(
self,
core_version: str
) -> str:
async with self._session.get(
f"https://raw.githubusercontent.com/home-assistant/core/{core_version}/homeassistant/components/frontend/manifest.json"
) as response:
document = await response.json(content_type="text/plain")
requirements = [
requirement
for requirement in document.get("requirements", [])
if requirement.startswith("home-assistant-frontend==")
]
if len(requirements) > 1:
raise RuntimeError(
"Found more than one version specifier for the frontend package"
)
elif len(requirements) == 1:
requirement = requirements.pop()
_, version = requirement.split("==", maxsplit=1)
return str(version)
else:
raise RuntimeError(
"Found no version specifier for frontend package"
)
async def update_core(self, old_version: str, new_version: str) -> None:
old_sdist_hash = str(await Nix.eval("home-assistant.src.outputHash"))
new_sdist_hash = await Nurl.prefetch("https://pypi.org/project/homeassistant/", new_version)
print(f"sdist: {old_sdist_hash} -> {new_sdist_hash}")
old_git_hash = str(await Nix.eval("home-assistant.gitSrc.outputHash"))
new_git_hash = await Nurl.prefetch("https://github.com/home-assistant/core/", new_version)
print(f"git: {old_git_hash} -> {new_git_hash}")
with File("pkgs/servers/home-assistant/default.nix") as file:
file.substitute("hassVersion", old_version, new_version)
file.substitute("hash", old_sdist_hash, new_sdist_hash)
file.substitute("hash", old_git_hash, new_git_hash)
async def update_frontend(self, old_version: str, new_version: str) -> None:
old_hash = str(await Nix.eval("home-assistant.frontend.src.outputHash"))
new_hash = await Nurl.prefetch(
"https://pypi.org/project/home_assistant_frontend/",
new_version,
"-A", "format", "wheel",
"-A", "dist", "py3",
"-A", "python", "py3"
)
print(f"frontend: {old_hash} -> {new_hash}")
with File("pkgs/servers/home-assistant/frontend.nix") as file:
file.substitute("version", old_version, new_version)
file.substitute("hash", old_hash, new_hash)
async def update_components(self):
await run_async([
f"{ROOT}/pkgs/servers/home-assistant/parse-requirements.py"
])
async def main():
headers = {}
if token := os.environ.get("GITHUB_TOKEN", None):
headers.update({"GITHUB_TOKEN": token})
async with aiohttp.ClientSession(headers=headers) as client:
hass = HomeAssistant(client)
core_current = str(await Nix.eval("home-assistant.version"))
core_latest = await hass.get_latest_core_version()
if Version(core_latest) > Version(core_current):
print(f"New Home Assistant version {core_latest} is available")
await hass.update_core(str(core_current), str(core_latest))
frontend_current = str(await Nix.eval("home-assistant.frontend.version"))
frontend_latest = await hass.get_latest_frontend_version(str(core_latest))
if Version(frontend_latest) > Version(frontend_current):
await hass.update_frontend(str(frontend_current), str(frontend_latest))
await hass.update_components()
else:
print(f"Home Assistant {core_current} is still the latest version.")
# wait for async client sessions to close
# https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown
await asyncio.sleep(0)
if __name__ == "__main__":
run_sync(["pyright", __file__])
run_sync(["ruff", "--ignore=E501", __file__])
run_sync(["isort", __file__])
asyncio.run(main())
| 8,577 | 31.492424 | 165 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/servers/dict/wordnet_structures.py
|
#!/usr/bin/env python3
#Copyright 2007 Sebastian Hagen
# This file is part of wordnet_tools.
# wordnet_tools is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation
# wordnet_tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with wordnet_tools; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This program requires python >= 2.4.
# This program converts wordnet index/data file pairs into dict index/data
# files usable by dictd.
# This is basically a reimplementation of the wnfilter program by Rik Faith,
# which unfortunately doesn't work correctly for wordnet files in the newer
# formats. This version of wordnet_structures whould parse wordnet 2.1 files
# correctly, and create output very similar to what wnfilter would have
# written.
import datetime
import math
from textwrap import TextWrapper
CAT_ADJECTIVE = 0
CAT_ADVERB = 1
CAT_NOUN = 2
CAT_VERB = 3
category_map = {
'n': CAT_NOUN,
'v': CAT_VERB,
'a': CAT_ADJECTIVE,
's': CAT_ADJECTIVE,
'r': CAT_ADVERB
}
class WordIndex:
def __init__(self, lemma, category, ptrs, synsets, tagsense_count):
self.lemma = lemma
self.category = category
self.ptrs = ptrs
self.synsets = synsets
self.tagsense_count = tagsense_count
@classmethod
def build_from_line(cls, line_data, synset_map):
line_split = line_data.split()
lemma = line_split[0]
category = category_map[line_split[1]]
synset_count = int(line_split[2],10)
ptr_count = int(line_split[3],10)
ptrs = [line_split[i] for i in range(3, 3+ptr_count)]
tagsense_count = int(line_split[5 + ptr_count],10)
synsets = [synset_map[int(line_split[i],10)] for i in range(6 + ptr_count, 6 + ptr_count + synset_count)]
return cls(lemma, category, ptrs, synsets, tagsense_count)
@classmethod
def build_from_file(cls, f, synset_map, rv_base=None):
if (rv_base is None):
rv = {}
else:
rv = rv_base
for line in f:
if (line.startswith(' ')):
continue
wi = cls.build_from_line(line, synset_map)
word = wi.lemma.lower()
if not (word in rv):
rv[word] = []
rv[word].append(wi)
return rv
def __repr__(self):
return '%s%s' % (self.__class__.__name__, (self.lemma, self.category, self.ptrs, self.synsets, self.tagsense_count))
class WordIndexDictFormatter(WordIndex):
category_map_rev = {
CAT_NOUN: 'n',
CAT_VERB: 'v',
CAT_ADJECTIVE: 'adj',
CAT_ADVERB: 'adv'
}
linesep = '\n'
LINE_WIDTH_MAX = 68
prefix_fmtf_line_first = '%5s 1: '
prefix_fmtn_line_first = ' '
prefix_fmtf_line_nonfirst = '%5d: '
prefix_fmtn_line_nonfirst = ' '
def dict_str(self):
tw = TextWrapper(width=self.LINE_WIDTH_MAX,
initial_indent=(self.prefix_fmtf_line_first % self.category_map_rev[self.category]),
subsequent_indent=self.prefix_fmtn_line_first)
lines = (tw.wrap(self.synsets[0].dict_str()))
i = 2
for synset in self.synsets[1:]:
tw = TextWrapper(width=self.LINE_WIDTH_MAX,
initial_indent=(self.prefix_fmtf_line_nonfirst % i),
subsequent_indent=self.prefix_fmtn_line_nonfirst)
lines.extend(tw.wrap(synset.dict_str()))
i += 1
return self.linesep.join(lines)
class Synset:
def __init__(self, offset, ss_type, words, ptrs, gloss, frames=()):
self.offset = offset
self.type = ss_type
self.words = words
self.ptrs = ptrs
self.gloss = gloss
self.frames = frames
self.comments = []
@classmethod
def build_from_line(cls, line_data):
line_split = line_data.split()
synset_offset = int(line_split[0],10)
ss_type = category_map[line_split[2]]
word_count = int(line_split[3],16)
words = [line_split[i] for i in range(4, 4 + word_count*2,2)]
ptr_count = int(line_split[4 + word_count*2],10)
ptrs = [(line_split[i], line_split[i+1], line_split[i+2], line_split[i+3]) for i in range(5 + word_count*2,4 + word_count*2 + ptr_count*4,4)]
tok = line_split[5 + word_count*2 + ptr_count*4]
base = 6 + word_count*2 + ptr_count*4
if (tok != '|'):
frame_count = int(tok, 10)
frames = [(int(line_split[i+1],10), int(line_split[i+2],16)) for i in range(base, base + frame_count*3, 3)]
base += frame_count*3 + 1
else:
frames = []
line_split2 = line_data.split(None, base)
if (len(line_split2) < base):
gloss = None
else:
gloss = line_split2[-1]
return cls(synset_offset, ss_type, words, ptrs, gloss, frames)
@classmethod
def build_from_file(cls, f):
rv = {}
comments = []
for line in f:
if (line.startswith(' ')):
line_s = line.lstrip().rstrip('\n')
line_elements = line_s.split(None,1)
try:
int(line_elements[0])
except ValueError:
continue
if (len(line_elements) == 1):
line_elements.append('')
comments.append(line_elements[1])
continue
synset = cls.build_from_line(line.rstrip())
rv[synset.offset] = synset
return (rv, comments)
def dict_str(self):
rv = self.gloss
if (len(self.words) > 1):
rv += ' [syn: %s]' % (', '.join([('{%s}' % word) for word in self.words]))
return rv
def __repr__(self):
return '%s%s' % (self.__class__.__name__, (self.offset, self.type, self.words, self.ptrs, self.gloss, self.frames))
class WordnetDict:
db_info_fmt = '''This file was converted from the original database on:
%(conversion_datetime)s
The original data is available from:
%(wn_url)s
The original data was distributed with the notice shown below. No
additional restrictions are claimed. Please redistribute this changed
version under the same conditions and restriction that apply to the
original version.\n\n
%(wn_license)s'''
datetime_fmt = '%Y-%m-%dT%H:%M:%S'
base64_map = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __init__(self, wn_url, desc_short, desc_long):
self.word_data = {}
self.wn_url = wn_url
self.desc_short = desc_short
self.desc_long = desc_long
self.wn_license = None
def wn_dict_add(self, file_index, file_data):
file_data.seek(0)
file_index.seek(0)
(synsets, license_lines) = Synset.build_from_file(file_data)
WordIndexDictFormatter.build_from_file(file_index, synsets, self.word_data)
if (license_lines):
self.wn_license = '\n'.join(license_lines) + '\n'
@classmethod
def base64_encode(cls, i):
"""Encode a non-negative integer into a dictd compatible base64 string"""
if (i < 0):
raise ValueError('Value %r for i is negative' % (i,))
r = 63
e = 1
while (r < i):
e += 1
r = 64**e - 1
rv = ''
while (e > 0):
e -= 1
d = math.floor(i / 64**e)
rv += cls.base64_map[d]
i = i % (64**e)
return rv
@classmethod
def dict_entry_write(cls, file_index, file_data, key, entry, linesep='\n'):
"""Write a single dict entry for <key> to index and data files"""
entry_start = file_data.tell()
file_data.write(entry)
entry_len = len(entry)
file_index.write('%s\t%s\t%s%s' % (key, cls.base64_encode(entry_start),
cls.base64_encode(entry_len), linesep))
def dict_generate(self, file_index, file_data):
file_index.seek(0)
file_data.seek(0)
# The dictd file format is fairly iffy on the subject of special
# headwords: either dictd is buggy, or the manpage doesn't tell the whole
# story about the format.
# The upshot is that order of these entries in the index *matters*.
# Putting them at the beginning and in alphabetic order is afaict ok.
# Some other orders completely and quietly break the ability to look
# those headwords up.
# -- problem encountered with 1.10.2, at 2007-08-05.
file_data.write('\n')
wn_url = self.wn_url
conversion_datetime = datetime.datetime.now().strftime(self.datetime_fmt)
wn_license = self.wn_license
self.dict_entry_write(file_index, file_data, '00-database-info', '00-database-info\n%s\n' % (self.db_info_fmt % vars()))
self.dict_entry_write(file_index, file_data, '00-database-long', '00-database-long\n%s\n' % self.desc_long)
self.dict_entry_write(file_index, file_data, '00-database-short', '00-database-short\n%s\n' % self.desc_short)
self.dict_entry_write(file_index, file_data, '00-database-url', '00-database-url\n%s\n' % self.wn_url)
words = list(self.word_data.keys())
words.sort()
for word in words:
for wi in self.word_data[word]:
word_cs = word
# Use case-sensitivity information of first entry of first synset that
# matches this word case-insensitively
for synset in wi.synsets:
for ss_word in synset.words:
if (ss_word.lower() == word_cs.lower()):
word_cs = ss_word
break
else:
continue
break
else:
continue
break
outstr = ''
for wi in self.word_data[word]:
outstr += wi.dict_str() + '\n'
outstr = '%s%s%s' % (word_cs, wi.linesep, outstr)
self.dict_entry_write(file_index, file_data, word_cs, outstr, wi.linesep)
file_index.truncate()
file_data.truncate()
if (__name__ == '__main__'):
import optparse
op = optparse.OptionParser(usage='usage: %prog [options] (<wn_index_file> <wn_data_file>)+')
op.add_option('-i', '--outindex', dest='oi', default='wn.index', help='filename of index file to write to')
op.add_option('-d', '--outdata', dest='od', default='wn.dict', help='filename of data file to write to')
op.add_option('--wn_url', dest='wn_url', default='ftp://ftp.cogsci.princeton.edu/pub/wordnet/2.0', help='URL for wordnet sources')
op.add_option('--db_desc_short', dest='desc_short', default=' WordNet (r) 2.1 (2005)', help='short dict DB description')
op.add_option('--db_desc_long', dest='desc_long', default=' WordNet (r): A Lexical Database for English from the\n Cognitive Science Laboratory at Princeton University', help='long dict DB description')
(options, args) = op.parse_args()
wnd = WordnetDict(wn_url=options.wn_url, desc_short=options.desc_short, desc_long=options.desc_long)
for i in range(0,len(args),2):
print('Opening index file %r...' % args[i])
file_index = open(args[i])
print('Opening data file %r...' % args[i+1])
file_data = open(args[i+1])
print('Parsing index file and data file...')
wnd.wn_dict_add(file_index, file_data)
print('All input files parsed. Writing output to index file %r and data file %r.' % (options.oi, options.od))
wnd.dict_generate(open(options.oi, 'w'),open(options.od, 'w'))
print('All done.')
| 11,618 | 35.196262 | 212 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/servers/dict/wiktionary/latest_version.py
|
import subprocess
from html.parser import HTMLParser
from os.path import abspath, dirname
from urllib.request import urlopen
class WiktionaryLatestVersionParser(HTMLParser):
def __init__(self, current_version, *args, **kwargs):
self.latest_version = current_version
super().__init__(*args, **kwargs)
def handle_starttag(self, tag, attrs):
if tag != 'a':
return
href = dict(attrs)['href'][0:-1]
if href == 'latest':
return
self.latest_version = max(self.latest_version, href)
def nix_prefetch_url(url, algo='sha256'):
"""Prefetches the content of the given URL."""
print(f'nix-prefetch-url {url}')
out = subprocess.check_output(['nix-prefetch-url', '--type', algo, url])
return out.rstrip()
current_version = subprocess.check_output([
'nix', 'eval', '--raw',
'-f', dirname(abspath(__file__)) + '/../../../..',
'dictdDBs.wiktionary.version',
])
parser = WiktionaryLatestVersionParser(current_version)
with urlopen('https://dumps.wikimedia.org/enwiktionary/') as resp:
parser.feed(resp.read())
print(parser.latest_version)
| 1,145 | 25.651163 | 76 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/servers/dict/wiktionary/wiktionary2dict.py
|
# Adapted to produce DICT-compatible files by Petr Rockai in 2012
# Based on code from wiktiondict by Greg Hewgill
import re
import sys
import os
import textwrap
import time
import xml.sax
class Text:
def __init__(self, s):
self.s = s
def process(self):
return s
class TemplateCall:
def __init__(self):
pass
def process(self):
pass
class Template:
def __init__(self):
self.parts = []
def append(self, part):
self.parts.append(part)
def process(self):
return ''.join(x.process() for x in self.parts)
class Whitespace:
def __init__(self, s):
self.s = s
class OpenDouble: pass
class OpenTriple: pass
class CloseDouble: pass
class CloseTriple: pass
class Equals:
def __str__(self):
return "="
class Delimiter:
def __init__(self, c):
self.c = c
def __str__(self):
return self.c
def Tokenise(s):
s = str(s)
stack = []
last = 0
i = 0
while i < len(s):
if s[i] == '{' and i+1 < len(s) and s[i+1] == '{':
if i > last:
yield s[last:i]
if i+2 < len(s) and s[i+2] == '{':
yield OpenTriple()
stack.append(3)
i += 3
else:
yield OpenDouble()
stack.append(2)
i += 2
last = i
elif s[i] == '}' and i+1 < len(s) and s[i+1] == '}':
if i > last:
yield s[last:i]
if len(stack) == 0:
yield "}}"
i += 2
elif stack[-1] == 2:
yield CloseDouble()
i += 2
stack.pop()
elif i+2 < len(s) and s[i+2] == '}':
yield CloseTriple()
i += 3
stack.pop()
else:
raise SyntaxError()
last = i
elif s[i] == ':' or s[i] == '|':
if i > last:
yield s[last:i]
yield Delimiter(s[i])
i += 1
last = i
elif s[i] == '=':
if i > last:
yield s[last:i]
yield Equals()
i += 1
last = i
#elif s[i] == ' ' or s[i] == '\t' or s[i] == '\n':
# if i > last:
# yield s[last:i]
# last = i
# m = re.match(r"\s+", s[i:])
# assert m
# yield Whitespace(m.group(0))
# i += len(m.group(0))
# last = i
else:
i += 1
if i > last:
yield s[last:i]
def processSub(templates, tokens, args):
t = next(tokens)
if not isinstance(t, str):
raise SyntaxError
name = t
t = next(tokens)
default = None
if isinstance(t, Delimiter) and t.c == '|':
default = ""
while True:
t = next(tokens)
if isinstance(t, str):
default += t
elif isinstance(t, OpenDouble):
default += processTemplateCall(templates, tokens, args)
elif isinstance(t, OpenTriple):
default += processSub(templates, tokens, args)
elif isinstance(t, CloseTriple):
break
else:
print("Unexpected:", t)
raise SyntaxError()
if name in args:
return args[name]
if default is not None:
return default
if name == "lang":
return "en"
return "{{{%s}}}" % name
def processTemplateCall(templates, tokens, args):
template = tokens.next().strip().lower()
args = {}
a = 1
t = next(tokens)
while True:
if isinstance(t, Delimiter):
name = str(a)
arg = ""
while True:
t = next(tokens)
if isinstance(t, str):
arg += t
elif isinstance(t, OpenDouble):
arg += processTemplateCall(templates, tokens, args)
elif isinstance(t, OpenTriple):
arg += processSub(templates, tokens, args)
elif isinstance(t, Delimiter) and t.c != '|':
arg += str(t)
else:
break
if isinstance(t, Equals):
name = arg.strip()
arg = ""
while True:
t = next(tokens)
if isinstance(t, (str, Equals)):
arg += str(t)
elif isinstance(t, OpenDouble):
arg += processTemplateCall(templates, tokens, args)
elif isinstance(t, OpenTriple):
arg += processSub(templates, tokens, args)
elif isinstance(t, Delimiter) and t.c != '|':
arg += str(t)
else:
break
arg = arg.strip()
else:
a += 1
args[name] = arg
elif isinstance(t, CloseDouble):
break
else:
print("Unexpected:", t)
raise SyntaxError
#print template, args
if template[0] == '#':
if template == "#if":
if args['1'].strip():
return args['2']
elif '3' in args:
return args['3']
else:
return ""
elif template == "#ifeq":
if args['1'].strip() == args['2'].strip():
return args['3']
elif '4' in args:
return args['4']
else:
return ""
elif template == "#ifexist":
return ""
elif template == "#switch":
sw = args['1'].strip()
if sw in args:
return args[sw]
else:
return ""
else:
print("Unknown ParserFunction:", template)
sys.exit(1)
if template not in templates:
return "{{%s}}" % template
return process(templates, templates[template], args)
def process(templates, s, args = {}):
s = re.compile(r"<!--.*?-->", re.DOTALL).sub("", s)
s = re.compile(r"<noinclude>.*?</noinclude>", re.DOTALL).sub("", s)
assert "<onlyinclude>" not in s
#s = re.sub(r"(.*?)<onlyinclude>(.*?)</onlyinclude>(.*)", r"\1", s)
s = re.compile(r"<includeonly>(.*?)</includeonly>", re.DOTALL).sub(r"\1", s)
r = ""
#print list(Tokenise(s))
tokens = Tokenise(s)
try:
while True:
t = next(tokens)
if isinstance(t, OpenDouble):
r += processTemplateCall(templates, tokens, args)
elif isinstance(t, OpenTriple):
r += processSub(templates, tokens, args)
else:
r += str(t)
except StopIteration:
pass
return r
def test():
templates = {
'lb': "{{",
'name-example': "I am a template example, my first name is '''{{{firstName}}}''' and my last name is '''{{{lastName}}}'''. You can reference my page at [[{{{lastName}}}, {{{firstName}}}]].",
't': "start-{{{1|pqr}}}-end",
't0': "start-{{{1}}}-end",
't1': "start{{{1}}}end<noinclude>moo</noinclude>",
't2a1': "{{t2demo|a|{{{1}}}}}",
't2a2': "{{t2demo|a|2={{{1}}}}}",
't2demo': "start-{{{1}}}-middle-{{{2}}}-end",
't5': "{{t2demo|{{{a}}}=b}}",
't6': "t2demo|a",
}
def t(text, expected):
print("text:", text)
s = process(templates, text)
if s != expected:
print("got:", s)
print("expected:", expected)
sys.exit(1)
t("{{Name-example}}", "I am a template example, my first name is '''{{{firstName}}}''' and my last name is '''{{{lastName}}}'''. You can reference my page at [[{{{lastName}}}, {{{firstName}}}]].")
t("{{Name-example | firstName=John | lastName=Smith }}", "I am a template example, my first name is '''John''' and my last name is '''Smith'''. You can reference my page at [[Smith, John]].")
t("{{t0|a}}", "start-a-end")
t("{{t0| }}", "start- -end")
t("{{t0|}}", "start--end")
t("{{t0}}", "start-{{{1}}}-end")
t("{{t0| }}", "start- -end")
t("{{t0|\n}}", "start-\n-end")
t("{{t0|1= }}", "start--end")
t("{{t0|1=\n}}", "start--end")
t("{{T}}", "start-pqr-end")
t("{{T|}}", "start--end")
t("{{T|abc}}", "start-abc-end")
t("{{T|abc|def}}", "start-abc-end")
t("{{T|1=abc|1=def}}", "start-def-end")
t("{{T|abc|1=def}}", "start-def-end")
t("{{T|1=abc|def}}", "start-def-end")
t("{{T|{{T}}}}", "start-start-pqr-end-end")
t("{{T|{{T|{{T}}}}}}", "start-start-start-pqr-end-end-end")
t("{{T|{{T|{{T|{{T}}}}}}}}", "start-start-start-start-pqr-end-end-end-end")
t("{{T|a{{t|b}}}}", "start-astart-b-end-end")
t("{{T|{{T|a=b}}}}", "start-start-pqr-end-end")
t("{{T|a=b}}", "start-pqr-end")
t("{{T|1=a=b}}", "start-a=b-end")
#t("{{t1|{{lb}}tc}}}}", "start{{tcend}}")
#t("{{t2a1|1=x=y}}", "start-a-middle-{{{2}}}-end")
#t("{{t2a2|1=x=y}}", "start-a-middle-x=y-end")
#t("{{t5|a=2=d}}", "start-{{{1}}}-middle-d=b-end")
#t("{{ {{t6}} }}", "{{ t2demo|a }}")
t("{{t|[[a|b]]}}", "start-b-end")
t("{{t|[[a|b]] }}", "start-b -end")
Parts = {
# Standard POS headers
'noun': "n.",
'Noun': "n.",
'Noun 1': "n.",
'Noun 2': "n.",
'Verb': "v.",
'Adjective': "adj.",
'Adverb': "adv.",
'Pronoun': "pron.",
'Conjunction': "conj.",
'Interjection': "interj.",
'Preposition': "prep.",
'Proper noun': "n.p.",
'Proper Noun': "n.p.",
'Article': "art.",
# Standard non-POS level 3 headers
'{{acronym}}': "acr.",
'Acronym': "acr.",
'{{abbreviation}}': "abbr.",
'[[Abbreviation]]': "abbr.",
'Abbreviation': "abbr.",
'[[initialism]]': "init.",
'{{initialism}}': "init.",
'Initialism': "init.",
'Contraction': "cont.",
'Prefix': "prefix",
'Suffix': "suffix",
'Symbol': "sym.",
'Letter': "letter",
'Idiom': "idiom",
'Idioms': "idiom",
'Phrase': "phrase",
# Debated POS level 3 headers
'Number': "num.",
'Numeral': "num.",
'Cardinal number': "num.",
'Ordinal number': "num.",
'Cardinal numeral': "num.",
'Ordinal numeral': "num.",
# Other headers in use
'Personal pronoun': "pers.pron.",
'Adjective/Adverb': "adj./adv.",
'Proper adjective': "prop.adj.",
'Determiner': "det.",
'Demonstrative determiner': "dem.det.",
'Clitic': "clitic",
'Infix': "infix",
'Counter': "counter",
'Kanji': None,
'Kanji reading': None,
'Hiragana letter': None,
'Katakana letter': None,
'Pinyin': None,
'Han character': None,
'Hanzi': None,
'Hanja': None,
'Proverb': "prov.",
'Expression': None,
'Adjectival noun': None,
'Quasi-adjective': None,
'Particle': "part.",
'Infinitive particle': "part.",
'Possessive adjective': "poss.adj.",
'Verbal prefix': "v.p.",
'Postposition': "post.",
'Prepositional article': "prep.art.",
'Phrasal verb': "phr.v.",
'Participle': "participle",
'Interrogative auxiliary verb': "int.aux.v.",
'Pronominal adverb': "pron.adv.",
'Adnominal': "adn.",
'Abstract pronoun': "abs.pron.",
'Conjunction particle': None,
'Root': "root",
# Non-standard, deprecated headers
'Noun form': "n.",
'Verb form': "v.",
'Adjective form': "adj.form.",
'Nominal phrase': "nom.phr.",
'Noun phrase': "n. phrase",
'Verb phrase': "v. phrase",
'Transitive verb': "v.t.",
'Intransitive verb': "v.i.",
'Reflexive verb': "v.r.",
'Cmavo': None,
'Romaji': "rom.",
'Hiragana': None,
'Furigana': None,
'Compounds': None,
# Other headers seen
'Alternative forms': None,
'Alternative spellings': None,
'Anagrams': None,
'Antonym': None,
'Antonyms': None,
'Conjugation': None,
'Declension': None,
'Declension and pronunciations': None,
'Definite Article': "def.art.",
'Definite article': "def.art.",
'Demonstrative pronoun': "dem.pron.",
'Derivation': None,
'Derived expression': None,
'Derived expressions': None,
'Derived forms': None,
'Derived phrases': None,
'Derived terms': None,
'Derived, Related terms': None,
'Descendants': None,
#'Etymology': None,
#'Etymology 1': None,
#'Etymology 2': None,
#'Etymology 3': None,
#'Etymology 4': None,
#'Etymology 5': None,
'Examples': None,
'External links': None,
'[[Gismu]]': None,
'Gismu': None,
'Homonyms': None,
'Homophones': None,
'Hyphenation': None,
'Indefinite article': "art.",
'Indefinite pronoun': "ind.pron.",
'Indefinite Pronoun': "ind.pron.",
'Indetermined pronoun': "ind.pron.",
'Interrogative conjunction': "int.conj.",
'Interrogative determiner': "int.det.",
'Interrogative particle': "int.part.",
'Interrogative pronoun': "int.pron.",
'Legal expression': "legal",
'Mass noun': "n.",
'Miscellaneous': None,
'Mutations': None,
'Noun and verb': "n/v.",
'Other language': None,
'Pinyin syllable': None,
'Possessive determiner': "poss.det.",
'Possessive pronoun': "poss.pron.",
'Prepositional phrase': "prep.phr.",
'Prepositional Pronoun': "prep.pron.",
'Pronunciation': None,
'Pronunciation 1': None,
'Pronunciation 2': None,
'Quotations': None,
'References': None,
'Reflexive pronoun': "refl.pron.",
'Related expressions': None,
'Related terms': None,
'Related words': None,
'Relative pronoun': "rel.pron.",
'Saying': "saying",
'See also': None,
'Shorthand': None,
'[http://en.wikipedia.org/wiki/Shorthand Shorthand]': None,
'Sister projects': None,
'Spelling note': None,
'Synonyms': None,
'Translation': None,
'Translations': None,
'Translations to be checked': None,
'Transliteration': None,
'Trivia': None,
'Usage': None,
'Usage in English': None,
'Usage notes': None,
'Verbal noun': "v.n.",
}
PartsUsed = {}
for p in list(Parts.keys()):
PartsUsed[p] = 0
def encode(s):
r = e(s)
assert r[1] == len(s)
return r[0]
def dowikilink(m):
a = m.group(1).split("|")
if len(a) > 1:
link = a[1]
else:
link = a[0]
if ':' in link:
link = ""
return link
seentemplates = {}
def dotemplate(m):
aa = m.group(1).split("|")
args = {}
n = 0
for a in aa:
am = re.match(r"(.*?)(=(.*))?", a)
if am:
args[am.group(1)] = am.group(3)
else:
n += 1
args[n] = am.group(1)
#if aa[0] in seentemplates:
# seentemplates[aa[0]] += 1
#else:
# seentemplates[aa[0]] = 1
# print len(seentemplates), aa[0]
#print aa[0]
#if aa[0] not in Templates:
# return "(unknown template %s)" % aa[0]
#body = Templates[aa[0]]
#body = re.sub(r"<noinclude>.*?</noinclude>", "", body)
#assert "<onlyinclude>" not in body
##body = re.sub(r"(.*?)<onlyinclude>(.*?)</onlyinclude>(.*)", r"\1", body)
#body = re.sub(r"<includeonly>(.*?)</includeonly>", r"\1", body)
#def dotemplatearg(m):
# ta = m.group(1).split("|")
# if ta[0] in args:
# return args[ta[0]]
# elif len(ta) > 1:
# return ta[1]
# else:
# return "{{{%s}}}" % ta[0]
#body = re.sub(r"{{{(.*?)}}}", dotemplatearg, body)
#return dewiki(body)
def doparserfunction(m):
a = m.group(2).split("|")
if m.group(1) == "ifeq":
if a[0] == a[1]:
return a[2]
elif len(a) >= 4:
return a[3]
return ""
def dewiki(body, indent = 0):
# process in this order:
# {{{ }}}
# <> <>
# [[ ]]
# {{ }}
# ''' '''
# '' ''
#body = wikimediatemplate.process(Templates, body)
body = re.sub(r"\[\[(.*?)\]\]", dowikilink, body)
#body = re.sub(r"{{(.*?)}}", dotemplate, body)
#body = re.sub(r"{{#(.*?):(.*?)}}", doparserfunction, body)
body = re.sub(r"'''(.*?)'''", r"\1", body)
body = re.sub(r"''(.*?)''", r"\1", body)
lines = body.split("\n")
n = 0
i = 0
while i < len(lines):
if len(lines[i]) > 0 and lines[i][0] == "#":
if len(lines[i]) > 1 and lines[i][1] == '*':
wlines = textwrap.wrap(lines[i][2:].strip(),
initial_indent = " * ",
subsequent_indent = " ")
elif len(lines[i]) > 1 and lines[i][1] == ':':
wlines = textwrap.wrap(lines[i][2:].strip(),
initial_indent = " ",
subsequent_indent = " ")
else:
n += 1
wlines = textwrap.wrap(str(n) + ". " + lines[i][1:].strip(),
subsequent_indent = " ")
elif len(lines[i]) > 0 and lines[i][0] == "*":
n = 0
wlines = textwrap.wrap(lines[i][1:].strip(),
initial_indent = "* ",
subsequent_indent = " ")
else:
n = 0
wlines = textwrap.wrap(lines[i].strip())
if len(wlines) == 0:
wlines = ['']
lines[i:i+1] = wlines
i += len(wlines)
return ''.join(" "*(indent-1)+x+"\n" for x in lines)
class WikiSection:
def __init__(self, heading, body):
self.heading = heading
self.body = body
#self.lines = re.split("\n+", body.strip())
#if len(self.lines) == 1 and len(self.lines[0]) == 0:
# self.lines = []
self.children = []
def __str__(self):
return "<%s:%i:%s>" % (self.heading, len(self.body or ""), ','.join([str(x) for x in self.children]))
def add(self, section):
self.children.append(section)
def parse(word, text):
headings = list(re.finditer("^(=+)\s*(.*?)\s*=+\n", text, re.MULTILINE))
#print [x.group(1) for x in headings]
doc = WikiSection(word, "")
stack = [doc]
for i, m in enumerate(headings):
depth = len(m.group(1))
if depth < len(stack):
stack = stack[:depth]
else:
while depth > len(stack):
s = WikiSection(None, "")
stack[-1].add(s)
stack.append(s)
if i+1 < len(headings):
s = WikiSection(m.group(2), text[m.end(0):headings[i+1].start(0)].strip())
else:
s = WikiSection(m.group(2), text[m.end(0):].strip())
assert len(stack) == depth
stack[-1].add(s)
stack.append(s)
#while doc.heading is None and len(doc.lines) == 0 and len(doc.children) == 1:
# doc = doc.children[0]
return doc
def formatFull(word, doc):
def f(depth, section):
if section.heading:
r = " "*(depth-1) + section.heading + "\n\n"
else:
r = ""
if section.body:
r += dewiki(section.body, depth+1)+"\n"
#r += "".join(" "*depth + x + "\n" for x in dewiki(section.body))
#if len(section.lines) > 0:
# r += "\n"
for c in section.children:
r += f(depth+1, c)
return r
s = f(0, doc)
s += "Ref: http://en.wiktionary.org/wiki/%s\n" % word
return s
def formatNormal(word, doc):
def f(depth, posdepth, section):
r = ""
if depth == posdepth:
if not section.heading or section.heading.startswith("Etymology"):
posdepth += 1
elif section.heading in Parts:
#p = Parts[section.heading]
#if p:
# r += " "*(depth-1) + word + " (" + p + ")\n\n"
r += " "*(depth-1) + section.heading + "\n\n"
else:
print("Unknown part: (%s) %s" % (word, section.heading), file=errors)
return ""
elif depth > posdepth:
return ""
elif section.heading:
r += " "*(depth-1) + section.heading + "\n\n"
if section.body:
r += dewiki(section.body, depth+1)+"\n"
#r += "".join(" "*depth + x + "\n" for x in dewiki(section.lines))
#if len(section.lines) > 0:
# r += "\n"
for c in section.children:
r += f(depth+1, posdepth, c)
return r
s = f(0, 3, doc)
s += "Ref: http://en.wiktionary.org/wiki/%s\n" % word
return s
def formatBrief(word, doc):
def f(depth, posdepth, section):
if depth == posdepth:
h = section.heading
if not section.heading or section.heading.startswith("Etymology"):
posdepth += 1
elif section.heading in Parts:
#h = Parts[section.heading]
#if h:
# h = "%s (%s)" % (word, h)
pass
stack.append([h, False])
elif depth > 0:
stack.append([section.heading, False])
else:
stack.append(["%h " + section.heading, False])
r = ""
#if section.heading:
# r += " "*(depth-1) + section.heading + "\n"
body = ''.join(x+"\n" for x in section.body.split("\n") if len(x) > 0 and x[0] == '#')
if len(body) > 0:
for i in range(len(stack)):
if not stack[i][1]:
if stack[i][0]:
r += " "*(i-1) + stack[i][0] + "\n"
stack[i][1] = True
r += dewiki(body, depth+1)
for c in section.children:
r += f(depth+1, posdepth, c)
stack.pop()
return r
stack = []
s = f(0, 3, doc)
s += "Ref: http://en.wiktionary.org/wiki/%s\n" % word
return s
class WikiHandler(xml.sax.ContentHandler):
def __init__(self):
self.element = None
self.page = None
self.text = ""
self.long = {}
def startElement(self, name, attrs):
#print "start", name, attrs
self.element = name
def endElement(self, name):
#print "end", name
if self.element == "text":
if self.page:
if self.page in self.long:
print(self.page, len(self.text))
print()
self.doPage(self.page, self.text)
self.page = None
self.text = ""
self.element = None
def characters(self, content):
#print "characters", content
if self.element == "title":
if self.checkPage(content):
self.page = content
elif self.element == "text":
if self.page:
self.text += content
if len(self.text) > 100000 and self.page not in self.long:
self.long[self.page] = 1
def checkPage(self, page):
return False
def doPage(self, page, text):
pass
class TemplateHandler(WikiHandler):
def checkPage(self, page):
return page.startswith("Template:")
def doPage(self, page, text):
Templates[page[page.find(':')+1:].lower()] = text
class WordHandler(WikiHandler):
def checkPage(self, page):
return ':' not in page
def doPage(self, page, text):
m = re.match(r"#redirect\s*\[\[(.*?)\]\]", text, re.IGNORECASE)
if m:
out.write(" See <%s>" % page)
return
doc = parse(page, text)
out.write(formatBrief(page, doc))
#print formatBrief(page, doc)
fn = sys.argv[1]
info = """ This file was converted from the original database on:
%s
The original data is available from:
http://en.wiktionary.org
The version from which this file was generated was:
%s
Wiktionary is available under the GNU Free Documentation License.
""" % (time.ctime(), os.path.basename(fn))
errors = open("mkdict.err", "w")
Templates = {}
f = os.popen("bunzip2 -c %s" % fn, "r")
xml.sax.parse(f, TemplateHandler())
f.close()
f = os.popen("bunzip2 -c %s" % fn, "r")
out = os.popen("dictfmt -p wiktionary-en --locale en_US.UTF-8 --columns 0 -u http://en.wiktionary.org", "w")
out.write("%%h English Wiktionary\n%s" % info)
xml.sax.parse(f, WordHandler())
f.close()
out.close()
| 24,342 | 30.369845 | 200 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/servers/asterisk/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3 python3.pkgs.packaging python3.pkgs.beautifulsoup4 python3.pkgs.requests
# mirrored in ./default.nix
from packaging import version
from bs4 import BeautifulSoup
import re, requests, json
import os, sys
from pathlib import Path
URL = "https://downloads.asterisk.org/pub/telephony/asterisk/"
page = requests.get(URL)
changelog = re.compile("^ChangeLog-\d+\.\d+\.\d+\.md$")
changelogs = [a.get_text() for a in BeautifulSoup(page.text, 'html.parser').find_all('a') if changelog.match(a.get_text())]
major_versions = {}
for changelog in changelogs:
v = version.parse(changelog.removeprefix("ChangeLog-").removesuffix(".md"))
major_versions.setdefault(v.major, []).append(v)
out = {}
for mv in major_versions.keys():
v = max(major_versions[mv])
sha = requests.get(f"{URL}/asterisk-{v}.sha256").text.split()[0]
out["asterisk_" + str(mv)] = {
"version": str(v),
"sha256": sha
}
versions_path = Path(sys.argv[0]).parent / "versions.json"
try:
with open(versions_path, "r") as in_file:
in_data = json.loads(in_file.read())
for v in in_data.keys():
print(v + ":", in_data[v]["version"], "->", out[v]["version"])
except:
# nice to have for the PR, not a requirement
pass
with open(versions_path, "w") as out_file:
out_file.write(json.dumps(out, sort_keys=True, indent=2) + "\n")
| 1,416 | 32.738095 | 123 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/servers/web-apps/lemmy/update.py
|
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p python3 python3.pkgs.semver nix-prefetch-github
from urllib.request import Request, urlopen
import dataclasses
import subprocess
import hashlib
import os.path
import semver
import base64
from typing import (
Optional,
Dict,
List,
)
import json
import os
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NIXPKGS = os.path.abspath(os.path.join(SCRIPT_DIR, "../../../../"))
OWNER = "LemmyNet"
UI_REPO = "lemmy-ui"
SERVER_REPO = "lemmy"
@dataclasses.dataclass
class Pin:
serverVersion: str
uiVersion: str
serverSha256: str = ""
serverCargoSha256: str = ""
uiSha256: str = ""
uiYarnDepsSha256: str = ""
filename: Optional[str] = None
def write(self) -> None:
if not self.filename:
raise ValueError("No filename set")
with open(self.filename, "w") as fd:
pin = dataclasses.asdict(self)
del pin["filename"]
json.dump(pin, fd, indent=2)
fd.write("\n")
def github_get(path: str) -> Dict:
"""Send a GET request to Gituhb, optionally adding GITHUB_TOKEN auth header"""
url = f"https://api.github.com/{path.lstrip('/')}"
print(f"Retreiving {url}")
req = Request(url)
if "GITHUB_TOKEN" in os.environ:
req.add_header("authorization", f"Bearer {os.environ['GITHUB_TOKEN']}")
with urlopen(req) as resp:
return json.loads(resp.read())
def get_latest_release(owner: str, repo: str) -> str:
return github_get(f"/repos/{owner}/{repo}/releases/latest")["tag_name"]
def sha256_url(url: str) -> str:
sha256 = hashlib.sha256()
with urlopen(url) as resp:
while data := resp.read(1024):
sha256.update(data)
return "sha256-" + base64.urlsafe_b64encode(sha256.digest()).decode()
def prefetch_github(owner: str, repo: str, rev: str) -> str:
"""Prefetch github rev and return sha256 hash"""
print(f"Prefetching {owner}/{repo}({rev})")
proc = subprocess.run(
["nix-prefetch-github", owner, repo, "--rev", rev, "--fetch-submodules"],
check=True,
stdout=subprocess.PIPE,
)
sha256 = json.loads(proc.stdout)["sha256"]
if not sha256.startswith("sha256-"): # Work around bug in nix-prefetch-github
return "sha256-" + sha256
return sha256
def get_latest_tag(owner: str, repo: str, prerelease: bool = False) -> str:
"""Get the latest tag from a Github Repo"""
tags: List[str] = []
# As the Github API doesn't have any notion of "latest" for tags we need to
# collect all of them and sort so we can figure out the latest one.
i = 0
while i <= 100: # Prevent infinite looping
i += 1
resp = github_get(f"/repos/{owner}/{repo}/tags?page={i}")
if not resp:
break
# Filter out unparseable tags
for tag in resp:
try:
parsed = semver.Version.parse(tag["name"])
if (
semver.Version.parse(tag["name"])
and not prerelease
and parsed.prerelease
): # Filter out release candidates
continue
except ValueError:
continue
else:
tags.append(tag["name"])
# Sort and return latest
return sorted(tags, key=lambda name: semver.Version.parse(name))[-1]
def get_fod_hash(attr: str) -> str:
"""
Get fixed output hash for attribute.
This depends on a fixed output derivation with an empty hash.
"""
print(f"Getting fixed output hash for {attr}")
proc = subprocess.run(["nix-build", NIXPKGS, "-A", attr], stderr=subprocess.PIPE)
if proc.returncode != 1:
raise ValueError("Expected nix-build to fail")
# Iterate list in reverse order so we get the "got:" line early
for line in proc.stderr.decode().split("\n")[::-1]:
cols = line.split()
if cols and cols[0] == "got:":
return cols[1]
raise ValueError("No fixed output hash found")
def make_server_pin(pin: Pin, attr: str) -> None:
pin.serverSha256 = prefetch_github(OWNER, SERVER_REPO, pin.serverVersion)
pin.write()
pin.serverCargoSha256 = get_fod_hash(attr)
pin.write()
def make_ui_pin(pin: Pin, package_json: str, attr: str) -> None:
# Save a copy of package.json
print("Getting package.json")
with urlopen(
f"https://raw.githubusercontent.com/{OWNER}/{UI_REPO}/{pin.uiVersion}/package.json"
) as resp:
with open(os.path.join(SCRIPT_DIR, package_json), "wb") as fd:
fd.write(resp.read())
pin.uiSha256 = prefetch_github(OWNER, UI_REPO, pin.uiVersion)
pin.write()
pin.uiYarnDepsSha256 = get_fod_hash(attr)
pin.write()
if __name__ == "__main__":
# Get server version
server_version = get_latest_tag(OWNER, SERVER_REPO)
# Get UI version (not always the same as lemmy-server)
ui_version = get_latest_tag(OWNER, UI_REPO)
pin = Pin(server_version, ui_version, filename=os.path.join(SCRIPT_DIR, "pin.json"))
make_server_pin(pin, "lemmy-server")
make_ui_pin(pin, "package.json", "lemmy-ui")
| 5,190 | 28.162921 | 91 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/servers/web-apps/discourse/update.py
|
#!/usr/bin/env nix-shell
#! nix-shell -i python3 -p bundix bundler nix-update nix-universal-prefetch python3 python3Packages.requests python3Packages.click python3Packages.click-log prefetch-yarn-deps
from __future__ import annotations
import click
import click_log
import shutil
import tempfile
import re
import logging
import subprocess
import os
import stat
import json
import requests
import textwrap
from functools import total_ordering
from distutils.version import LooseVersion
from itertools import zip_longest
from pathlib import Path
from typing import Union, Iterable
logger = logging.getLogger(__name__)
@total_ordering
class DiscourseVersion:
"""Represents a Discourse style version number and git tag.
This takes either a tag or version string as input and
extrapolates the other. Sorting is implemented to work as expected
in regard to A.B.C.betaD version numbers - 2.0.0.beta1 is
considered lower than 2.0.0.
"""
tag: str = ""
version: str = ""
split_version: Iterable[Union[None, int, str]] = []
def __init__(self, version: str):
"""Take either a tag or version number, calculate the other."""
if version.startswith('v'):
self.tag = version
self.version = version.lstrip('v')
else:
self.tag = 'v' + version
self.version = version
self.split_version = LooseVersion(self.version).version
def __eq__(self, other: DiscourseVersion):
"""Versions are equal when their individual parts are."""
return self.split_version == other.split_version
def __gt__(self, other: DiscourseVersion):
"""Check if this version is greater than the other.
Goes through the parts of the version numbers from most to
least significant, only continuing on to the next if the
numbers are equal and no decision can be made. If one version
ends in 'betaX' and the other doesn't, all else being equal,
the one without 'betaX' is considered greater, since it's the
release version.
"""
for (this_ver, other_ver) in zip_longest(self.split_version, other.split_version):
if this_ver == other_ver:
continue
if type(this_ver) is int and type(other_ver) is int:
return this_ver > other_ver
elif 'beta' in [this_ver, other_ver]:
# release version (None) is greater than beta
return this_ver is None
else:
return False
class DiscourseRepo:
version_regex = re.compile(r'^v\d+\.\d+\.\d+(\.beta\d+)?$')
_latest_commit_sha = None
def __init__(self, owner: str = 'discourse', repo: str = 'discourse'):
self.owner = owner
self.repo = repo
@property
def versions(self) -> Iterable[str]:
r = requests.get(f'https://api.github.com/repos/{self.owner}/{self.repo}/git/refs/tags').json()
tags = [x['ref'].replace('refs/tags/', '') for x in r]
# filter out versions not matching version_regex
versions = filter(self.version_regex.match, tags)
versions = [DiscourseVersion(x) for x in versions]
versions.sort(reverse=True)
return versions
@property
def latest_commit_sha(self) -> str:
if self._latest_commit_sha is None:
r = requests.get(f'https://api.github.com/repos/{self.owner}/{self.repo}/commits?per_page=1')
r.raise_for_status()
self._latest_commit_sha = r.json()[0]['sha']
return self._latest_commit_sha
def get_yarn_lock_hash(self, rev: str):
yarnLockText = self.get_file('app/assets/javascripts/yarn.lock', rev)
with tempfile.NamedTemporaryFile(mode='w') as lockFile:
lockFile.write(yarnLockText)
return subprocess.check_output(['prefetch-yarn-deps', lockFile.name]).decode('utf-8').strip()
def get_file(self, filepath, rev):
"""Return file contents at a given rev.
:param str filepath: the path to the file, relative to the repo root
:param str rev: the rev to fetch at :return:
"""
r = requests.get(f'https://raw.githubusercontent.com/{self.owner}/{self.repo}/{rev}/{filepath}')
r.raise_for_status()
return r.text
def _call_nix_update(pkg, version):
"""Call nix-update from nixpkgs root dir."""
nixpkgs_path = Path(__file__).parent / '../../../../'
return subprocess.check_output(['nix-update', pkg, '--version', version], cwd=nixpkgs_path)
def _nix_eval(expr: str):
nixpkgs_path = Path(__file__).parent / '../../../../'
try:
output = subprocess.check_output(['nix-instantiate', '--strict', '--json', '--eval', '-E', f'(with import {nixpkgs_path} {{}}; {expr})'], text=True)
except subprocess.CalledProcessError:
return None
return json.loads(output)
def _get_current_package_version(pkg: str):
return _nix_eval(f'{pkg}.version')
def _diff_file(filepath: str, old_version: DiscourseVersion, new_version: DiscourseVersion):
repo = DiscourseRepo()
current_dir = Path(__file__).parent
old = repo.get_file(filepath, old_version.tag)
new = repo.get_file(filepath, new_version.tag)
if old == new:
click.secho(f'{filepath} is unchanged', fg='green')
return
with tempfile.NamedTemporaryFile(mode='w') as o, tempfile.NamedTemporaryFile(mode='w') as n:
o.write(old), n.write(new)
width = shutil.get_terminal_size((80, 20)).columns
diff_proc = subprocess.run(
['diff', '--color=always', f'--width={width}', '-y', o.name, n.name],
stdout=subprocess.PIPE,
cwd=current_dir,
text=True
)
click.secho(f'Diff for {filepath} ({old_version.version} -> {new_version.version}):', fg='bright_blue', bold=True)
click.echo(diff_proc.stdout + '\n')
return
def _remove_platforms(rubyenv_dir: Path):
for platform in ['arm64-darwin-20', 'x86_64-darwin-18',
'x86_64-darwin-19', 'x86_64-darwin-20',
'x86_64-linux', 'aarch64-linux']:
with open(rubyenv_dir / 'Gemfile.lock', 'r') as f:
for line in f:
if platform in line:
subprocess.check_output(
['bundle', 'lock', '--remove-platform', platform], cwd=rubyenv_dir)
break
@click_log.simple_verbosity_option(logger)
@click.group()
def cli():
pass
@cli.command()
@click.argument('rev', default='latest')
@click.option('--reverse/--no-reverse', default=False, help='Print diffs from REV to current.')
def print_diffs(rev, reverse):
"""Print out diffs for files used as templates for the NixOS module.
The current package version found in the nixpkgs worktree the
script is run from will be used to download the "from" file and
REV used to download the "to" file for the diff, unless the
'--reverse' flag is specified.
REV should be the git rev to find changes in ('vX.Y.Z') or
'latest'; defaults to 'latest'.
"""
if rev == 'latest':
repo = DiscourseRepo()
rev = repo.versions[0].tag
old_version = DiscourseVersion(_get_current_package_version('discourse'))
new_version = DiscourseVersion(rev)
if reverse:
old_version, new_version = new_version, old_version
for f in ['config/nginx.sample.conf', 'config/discourse_defaults.conf']:
_diff_file(f, old_version, new_version)
@cli.command()
@click.argument('rev', default='latest')
def update(rev):
"""Update gem files and version.
REV: the git rev to update to ('vX.Y.Z[.betaA]') or
'latest'; defaults to 'latest'.
"""
repo = DiscourseRepo()
if rev == 'latest':
version = repo.versions[0]
else:
version = DiscourseVersion(rev)
logger.debug(f"Using rev {version.tag}")
logger.debug(f"Using version {version.version}")
rubyenv_dir = Path(__file__).parent / "rubyEnv"
for fn in ['Gemfile.lock', 'Gemfile']:
with open(rubyenv_dir / fn, 'w') as f:
f.write(repo.get_file(fn, version.tag))
subprocess.check_output(['bundle', 'lock'], cwd=rubyenv_dir)
_remove_platforms(rubyenv_dir)
subprocess.check_output(['bundix'], cwd=rubyenv_dir)
_call_nix_update('discourse', version.version)
old_yarn_hash = _nix_eval('discourse.assets.yarnOfflineCache.outputHash')
new_yarn_hash = repo.get_yarn_lock_hash(version.tag)
click.echo(f"Updating yarn lock hash, {old_yarn_hash} -> {new_yarn_hash}")
with open(Path(__file__).parent / "default.nix", 'r+') as f:
content = f.read()
content = content.replace(old_yarn_hash, new_yarn_hash)
f.seek(0)
f.write(content)
f.truncate()
@cli.command()
@click.argument('rev', default='latest')
def update_mail_receiver(rev):
"""Update discourse-mail-receiver.
REV: the git rev to update to ('vX.Y.Z') or 'latest'; defaults to
'latest'.
"""
repo = DiscourseRepo(repo="mail-receiver")
if rev == 'latest':
version = repo.versions[0]
else:
version = DiscourseVersion(rev)
_call_nix_update('discourse-mail-receiver', version.version)
@cli.command()
def update_plugins():
"""Update plugins to their latest revision."""
plugins = [
{'name': 'discourse-assign'},
{'name': 'discourse-bbcode-color'},
{'name': 'discourse-calendar'},
{'name': 'discourse-canned-replies'},
{'name': 'discourse-chat-integration'},
{'name': 'discourse-checklist'},
{'name': 'discourse-data-explorer'},
{'name': 'discourse-docs'},
{'name': 'discourse-github'},
{'name': 'discourse-ldap-auth', 'owner': 'jonmbake'},
{'name': 'discourse-math'},
{'name': 'discourse-migratepassword', 'owner': 'discoursehosting'},
{'name': 'discourse-openid-connect'},
{'name': 'discourse-prometheus'},
{'name': 'discourse-reactions'},
{'name': 'discourse-saved-searches'},
{'name': 'discourse-solved'},
{'name': 'discourse-spoiler-alert'},
{'name': 'discourse-voting'},
{'name': 'discourse-yearly-review'},
]
for plugin in plugins:
fetcher = plugin.get('fetcher') or "fetchFromGitHub"
owner = plugin.get('owner') or "discourse"
name = plugin.get('name')
repo_name = plugin.get('repo_name') or name
repo = DiscourseRepo(owner=owner, repo=repo_name)
# implement the plugin pinning algorithm laid out here:
# https://meta.discourse.org/t/pinning-plugin-and-theme-versions-for-older-discourse-installs/156971
# this makes sure we don't upgrade plugins to revisions that
# are incompatible with the packaged Discourse version
try:
compatibility_spec = repo.get_file('.discourse-compatibility', repo.latest_commit_sha)
versions = [(DiscourseVersion(discourse_version), plugin_rev.strip(' '))
for [discourse_version, plugin_rev]
in [line.split(':')
for line
in compatibility_spec.splitlines()]]
discourse_version = DiscourseVersion(_get_current_package_version('discourse'))
versions = list(filter(lambda ver: ver[0] >= discourse_version, versions))
if versions == []:
rev = repo.latest_commit_sha
else:
rev = versions[0][1]
print(rev)
except requests.exceptions.HTTPError:
rev = repo.latest_commit_sha
filename = _nix_eval(f'builtins.unsafeGetAttrPos "src" discourse.plugins.{name}')
if filename is None:
filename = Path(__file__).parent / 'plugins' / name / 'default.nix'
filename.parent.mkdir()
has_ruby_deps = False
for line in repo.get_file('plugin.rb', rev).splitlines():
if 'gem ' in line:
has_ruby_deps = True
break
with open(filename, 'w') as f:
f.write(textwrap.dedent(f"""
{{ lib, mkDiscoursePlugin, fetchFromGitHub }}:
mkDiscoursePlugin {{
name = "{name}";"""[1:] + ("""
bundlerEnvArgs.gemdir = ./.;""" if has_ruby_deps else "") + f"""
src = {fetcher} {{
owner = "{owner}";
repo = "{repo_name}";
rev = "replace-with-git-rev";
sha256 = "replace-with-sha256";
}};
meta = with lib; {{
homepage = "";
maintainers = with maintainers; [ ];
license = licenses.mit; # change to the correct license!
description = "";
}};
}}"""))
all_plugins_filename = Path(__file__).parent / 'plugins' / 'all-plugins.nix'
with open(all_plugins_filename, 'r+') as f:
content = f.read()
pos = -1
while content[pos] != '}':
pos -= 1
content = content[:pos] + f' {name} = callPackage ./{name} {{}};' + os.linesep + content[pos:]
f.seek(0)
f.write(content)
f.truncate()
else:
filename = filename['file']
prev_commit_sha = _nix_eval(f'discourse.plugins.{name}.src.rev')
if prev_commit_sha == rev:
click.echo(f'Plugin {name} is already at the latest revision')
continue
prev_hash = _nix_eval(f'discourse.plugins.{name}.src.outputHash')
new_hash = subprocess.check_output([
'nix-universal-prefetch', fetcher,
'--owner', owner,
'--repo', repo_name,
'--rev', rev,
], text=True).strip("\n")
click.echo(f"Update {name}, {prev_commit_sha} -> {rev} in {filename}")
with open(filename, 'r+') as f:
content = f.read()
content = content.replace(prev_commit_sha, rev)
content = content.replace(prev_hash, new_hash)
f.seek(0)
f.write(content)
f.truncate()
rubyenv_dir = Path(filename).parent
gemfile = rubyenv_dir / "Gemfile"
version_file_regex = re.compile(r'.*File\.expand_path\("\.\./(.*)", __FILE__\)')
gemfile_text = ''
plugin_file = repo.get_file('plugin.rb', rev)
plugin_file = plugin_file.replace(",\n", ", ") # fix split lines
for line in plugin_file.splitlines():
if 'gem ' in line:
line = ','.join(filter(lambda x: ":require_name" not in x, line.split(',')))
gemfile_text = gemfile_text + line + os.linesep
version_file_match = version_file_regex.match(line)
if version_file_match is not None:
filename = version_file_match.groups()[0]
content = repo.get_file(filename, rev)
with open(rubyenv_dir / filename, 'w') as f:
f.write(content)
if len(gemfile_text) > 0:
if os.path.isfile(gemfile):
os.remove(gemfile)
subprocess.check_output(['bundle', 'init'], cwd=rubyenv_dir)
os.chmod(gemfile, stat.S_IREAD | stat.S_IWRITE | stat.S_IRGRP | stat.S_IROTH)
with open(gemfile, 'a') as f:
f.write(gemfile_text)
subprocess.check_output(['bundle', 'lock', '--add-platform', 'ruby'], cwd=rubyenv_dir)
subprocess.check_output(['bundle', 'lock', '--update'], cwd=rubyenv_dir)
_remove_platforms(rubyenv_dir)
subprocess.check_output(['bundix'], cwd=rubyenv_dir)
if __name__ == '__main__':
cli()
| 16,096 | 35.418552 | 175 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/os-specific/linux/freeipa/paths.py
|
from ipaplatform.fedora.paths import FedoraPathNamespace
class NixOSPathNamespace(FedoraPathNamespace):
SBIN_IPA_JOIN = "@out@/bin/ipa-join"
IPA_GETCERT = "@out@/bin/ipa-getcert"
IPA_RMKEYTAB = "@out@/bin/ipa-rmkeytab"
IPA_GETKEYTAB = "@out@/bin/ipa-getkeytab"
NSUPDATE = "@bind@/bin/nsupdate"
BIN_CURL = "@curl@/bin/curl"
KINIT = "@kerberos@/bin/kinit"
KDESTROY = "@kerberos@/bin/kdestroy"
paths = NixOSPathNamespace()
| 454 | 31.5 | 56 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/os-specific/linux/kernel/update-zen.py
|
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p python3 nix nix-prefetch-git
import fileinput
import json
import os
import sys
import re
import subprocess
from datetime import datetime
from urllib.request import urlopen, Request
def panic(exc):
raise Exception(exc)
DIR = os.path.dirname(os.path.abspath(__file__))
HEADERS = {'Accept': 'application/vnd.github.v3+json'}
def github_api_request(endpoint):
base_url = 'https://api.github.com/'
request = Request(base_url + endpoint, headers=HEADERS)
with urlopen(request) as http_response:
return json.loads(http_response.read().decode('utf-8'))
def get_commit_date(repo, sha):
url = f'https://api.github.com/repos/{repo}/commits/{sha}'
request = Request(url, headers=HEADERS)
with urlopen(request) as http_response:
commit = json.loads(http_response.read().decode())
date = commit['commit']['committer']['date'].rstrip('Z')
date = datetime.fromisoformat(date).date().isoformat()
return 'unstable-' + date
def nix_prefetch_git(url, rev):
"""Prefetches the requested Git revision (incl. submodules) of the given repository URL."""
print(f'nix-prefetch-git {url} {rev}')
out = subprocess.check_output([
'nix-prefetch-git', '--quiet',
'--url', url,
'--rev', rev,
'--fetch-submodules'])
return json.loads(out)['sha256']
def nix_prefetch_url(url, unpack=False):
"""Prefetches the content of the given URL."""
print(f'nix-prefetch-url {url}')
options = ['--type', 'sha256']
if unpack:
options += ['--unpack']
out = subprocess.check_output(['nix-prefetch-url'] + options + [url])
return out.decode('utf-8').rstrip()
def update_file(relpath, variant, version, suffix, sha256):
file_path = os.path.join(DIR, relpath)
with fileinput.FileInput(file_path, inplace=True) as f:
for line in f:
result = line
result = re.sub(
fr'^ version = ".+"; #{variant}',
f' version = "{version}"; #{variant}',
result)
result = re.sub(
fr'^ suffix = ".+"; #{variant}',
f' suffix = "{suffix}"; #{variant}',
result)
result = re.sub(
fr'^ sha256 = ".+"; #{variant}',
f' sha256 = "{sha256}"; #{variant}',
result)
print(result, end='')
def read_file(relpath, variant):
file_path = os.path.join(DIR, relpath)
re_version = re.compile(fr'^\s*version = "(.+)"; #{variant}')
re_suffix = re.compile(fr'^\s*suffix = "(.+)"; #{variant}')
version = None
suffix = None
with fileinput.FileInput(file_path, mode='r') as f:
for line in f:
version_match = re_version.match(line)
if version_match:
version = version_match.group(1)
continue
suffix_match = re_suffix.match(line)
if suffix_match:
suffix = suffix_match.group(1)
continue
if version and suffix:
break
return version, suffix
if __name__ == "__main__":
if len(sys.argv) == 1:
panic("Update variant expected")
variant = sys.argv[1]
if variant not in ("zen", "lqx"):
panic(f"Unexepected variant instead of 'zen' or 'lqx': {sys.argv[1]}")
pattern = re.compile(fr"v(\d+\.\d+\.?\d*)-({variant}\d+)")
zen_tags = github_api_request('repos/zen-kernel/zen-kernel/releases')
for tag in zen_tags:
zen_match = pattern.match(tag['tag_name'])
if zen_match:
zen_tag = zen_match.group(0)
zen_version = zen_match.group(1)
zen_suffix = zen_match.group(2)
break
old_version, old_suffix = read_file('zen-kernels.nix', variant)
if old_version != zen_version or old_suffix != zen_suffix:
zen_hash = nix_prefetch_git('https://github.com/zen-kernel/zen-kernel.git', zen_tag)
update_file('zen-kernels.nix', variant, zen_version, zen_suffix, zen_hash)
| 4,102 | 32.357724 | 95 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/os-specific/linux/kernel/hardened/update.py
|
#! /usr/bin/env nix-shell
#! nix-shell -i python -p "python3.withPackages (ps: [ps.pygithub])" git gnupg
# This is automatically called by ../update.sh.
from __future__ import annotations
import json
import os
import re
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import (
Dict,
Iterator,
List,
Optional,
Sequence,
Tuple,
TypedDict,
Union,
)
from github import Github
from github.GitRelease import GitRelease
VersionComponent = Union[int, str]
Version = List[VersionComponent]
PatchData = TypedDict("PatchData", {"name": str, "url": str, "sha256": str, "extra": str})
Patch = TypedDict("Patch", {
"patch": PatchData,
"version": str,
"sha256": str,
})
@dataclass
class ReleaseInfo:
version: Version
release: GitRelease
HERE = Path(__file__).resolve().parent
NIXPKGS_KERNEL_PATH = HERE.parent
NIXPKGS_PATH = HERE.parents[4]
HARDENED_GITHUB_REPO = "anthraxx/linux-hardened"
HARDENED_TRUSTED_KEY = HERE / "anthraxx.asc"
HARDENED_PATCHES_PATH = HERE / "patches.json"
MIN_KERNEL_VERSION: Version = [4, 14]
def run(*args: Union[str, Path]) -> subprocess.CompletedProcess[bytes]:
try:
return subprocess.run(
args,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
except subprocess.CalledProcessError as err:
print(
f"error: `{err.cmd}` failed unexpectedly\n"
f"status code: {err.returncode}\n"
f"stdout:\n{err.stdout.strip()}\n"
f"stderr:\n{err.stderr.strip()}",
file=sys.stderr,
)
sys.exit(1)
def nix_prefetch_url(url: str) -> Tuple[str, Path]:
output = run("nix-prefetch-url", "--print-path", url).stdout
sha256, path = output.strip().split("\n")
return sha256, Path(path)
def verify_openpgp_signature(
*, name: str, trusted_key: Path, sig_path: Path, data_path: Path,
) -> bool:
with TemporaryDirectory(suffix=".nixpkgs-gnupg-home") as gnupg_home_str:
gnupg_home = Path(gnupg_home_str)
run("gpg", "--homedir", gnupg_home, "--import", trusted_key)
keyring = gnupg_home / "pubring.kbx"
try:
subprocess.run(
("gpgv", "--keyring", keyring, sig_path, data_path),
check=True,
stderr=subprocess.PIPE,
encoding="utf-8",
)
return True
except subprocess.CalledProcessError as err:
print(
f"error: signature for {name} failed to verify!",
file=sys.stderr,
)
print(err.stderr, file=sys.stderr, end="")
return False
def fetch_patch(*, name: str, release_info: ReleaseInfo) -> Optional[Patch]:
release = release_info.release
extra = f'-{release_info.version[-1]}'
def find_asset(filename: str) -> str:
try:
it: Iterator[str] = (
asset.browser_download_url
for asset in release.get_assets()
if asset.name == filename
)
return next(it)
except StopIteration:
raise KeyError(filename)
patch_filename = f"{name}.patch"
try:
patch_url = find_asset(patch_filename)
sig_url = find_asset(patch_filename + ".sig")
except KeyError:
print(f"error: {patch_filename}{{,.sig}} not present", file=sys.stderr)
return None
sha256, patch_path = nix_prefetch_url(patch_url)
_, sig_path = nix_prefetch_url(sig_url)
sig_ok = verify_openpgp_signature(
name=name,
trusted_key=HARDENED_TRUSTED_KEY,
sig_path=sig_path,
data_path=patch_path,
)
if not sig_ok:
return None
kernel_ver = re.sub(r"(.*)(-hardened[\d]+)$", r'\1', release_info.release.tag_name)
major = kernel_ver.split('.')[0]
sha256_kernel, _ = nix_prefetch_url(f"mirror://kernel/linux/kernel/v{major}.x/linux-{kernel_ver}.tar.xz")
return Patch(
patch=PatchData(name=patch_filename, url=patch_url, sha256=sha256, extra=extra),
version=kernel_ver,
sha256=sha256_kernel
)
def parse_version(version_str: str) -> Version:
version: Version = []
for component in re.split('\.|\-', version_str):
try:
version.append(int(component))
except ValueError:
version.append(component)
return version
def version_string(version: Version) -> str:
return ".".join(str(component) for component in version)
def major_kernel_version_key(kernel_version: Version) -> str:
return version_string(kernel_version[:-1])
def commit_patches(*, kernel_key: str, message: str) -> None:
new_patches_path = HARDENED_PATCHES_PATH.with_suffix(".new")
with open(new_patches_path, "w") as new_patches_file:
json.dump(patches, new_patches_file, indent=4, sort_keys=True)
new_patches_file.write("\n")
os.rename(new_patches_path, HARDENED_PATCHES_PATH)
message = f"linux/hardened/patches/{kernel_key}: {message}"
print(message)
if os.environ.get("COMMIT"):
run(
"git",
"-C",
NIXPKGS_PATH,
"commit",
f"--message={message}",
HARDENED_PATCHES_PATH,
)
# Load the existing patches.
patches: Dict[str, Patch]
with open(HARDENED_PATCHES_PATH) as patches_file:
patches = json.load(patches_file)
# Get the set of currently packaged kernel versions.
kernel_versions = {}
for filename in os.listdir(NIXPKGS_KERNEL_PATH):
filename_match = re.fullmatch(r"linux-(\d+)\.(\d+)\.nix", filename)
if filename_match:
nix_version_expr = f"""
with import {NIXPKGS_PATH} {{}};
(callPackage {NIXPKGS_KERNEL_PATH / filename} {{}}).version
"""
kernel_version_json = run(
"nix-instantiate", "--eval", "--system", "x86_64-linux", "--json", "--expr", nix_version_expr,
).stdout
kernel_version = parse_version(json.loads(kernel_version_json))
if kernel_version < MIN_KERNEL_VERSION:
continue
kernel_key = major_kernel_version_key(kernel_version)
kernel_versions[kernel_key] = kernel_version
# Remove patches for unpackaged kernel versions.
for kernel_key in sorted(patches.keys() - kernel_versions.keys()):
commit_patches(kernel_key=kernel_key, message="remove")
g = Github(os.environ.get("GITHUB_TOKEN"))
repo = g.get_repo(HARDENED_GITHUB_REPO)
failures = False
# Match each kernel version with the best patch version.
releases = {}
i = 0
for release in repo.get_releases():
# Dirty workaround to make sure that we don't run into issues because
# GitHub's API only allows fetching the last 1000 releases.
# It's not reliable to exit earlier because not every kernel minor may
# have hardened patches, hence the naive search below.
i += 1
if i > 500:
break
version = parse_version(release.tag_name)
# needs to look like e.g. 5.6.3-hardened1
if len(version) < 4:
continue
if not (isinstance(version[-2], int)):
continue
kernel_version = version[:-1]
kernel_key = major_kernel_version_key(kernel_version)
try:
packaged_kernel_version = kernel_versions[kernel_key]
except KeyError:
continue
release_info = ReleaseInfo(version=version, release=release)
if kernel_version == packaged_kernel_version:
releases[kernel_key] = release_info
else:
# Fall back to the latest patch for this major kernel version,
# skipping patches for kernels newer than the packaged one.
if '.'.join(str(x) for x in kernel_version) > '.'.join(str(x) for x in packaged_kernel_version):
continue
elif (
kernel_key not in releases or releases[kernel_key].version < version
):
releases[kernel_key] = release_info
# Update hardened-patches.json for each release.
for kernel_key in sorted(releases.keys()):
release_info = releases[kernel_key]
release = release_info.release
version = release_info.version
version_str = release.tag_name
name = f"linux-hardened-{version_str}"
old_version: Optional[Version] = None
old_version_str: Optional[str] = None
update: bool
try:
old_filename = patches[kernel_key]["patch"]["name"]
old_version_str = old_filename.replace("linux-hardened-", "").replace(
".patch", ""
)
old_version = parse_version(old_version_str)
update = old_version < version
except KeyError:
update = True
if update:
patch = fetch_patch(name=name, release_info=release_info)
if patch is None:
failures = True
else:
patches[kernel_key] = patch
if old_version:
message = f"{old_version_str} -> {version_str}"
else:
message = f"init at {version_str}"
commit_patches(kernel_key=kernel_key, message=message)
missing_kernel_versions = kernel_versions.keys() - patches.keys()
if missing_kernel_versions:
print(
f"warning: no patches for kernel versions "
+ ", ".join(missing_kernel_versions),
file=sys.stderr,
)
if failures:
sys.exit(1)
| 9,422 | 29.794118 | 109 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/os-specific/darwin/gen-frameworks.py
|
#!/usr/bin/env nix-shell
#!nix-shell -i python -p python3 swiftPackages.swift-unwrapped
"""
Generate a frameworks.nix for a macOS SDK.
You may point this tool at an Xcode bundled SDK, but more ideal is using the
SDK from Nixpkgs. For example:
SDK_PATH="$(nix-build --no-link -A darwin.apple_sdk_11_0.MacOSX-SDK)"
./gen-frameworks.py "$SDK_PATH" > ./new-frameworks.nix
"""
import json
import os
import subprocess
import sys
ALLOWED_LIBS = ["simd"]
HEADER = """\
# This file is generated by gen-frameworks.nix.
# Do not edit, put overrides in apple_sdk.nix instead.
{ libs, frameworks }: with libs; with frameworks;
{
"""
FOOTER = """\
}
"""
def eprint(*args):
print(*args, file=sys.stderr)
def name_from_ident(ident):
return ident.get("swift", ident.get("clang"))
def scan_sdk(sdk):
# Find frameworks by scanning the SDK frameworks directory.
frameworks = [
framework.removesuffix(".framework")
for framework in os.listdir(f"{sdk}/System/Library/Frameworks")
if not framework.startswith("_")
]
frameworks.sort()
# Determine the longest name for padding output.
width = len(max(frameworks, key=len))
output = HEADER
for framework in frameworks:
deps = []
# Use Swift to scan dependencies, because a module may have both Clang
# and Swift parts. Using Clang only imports the Clang module, whereas
# using Swift will usually import both Clang + Swift overlay.
#
# TODO: The above is an assumption. Not sure if it's possible a Swift
# module completely shadows a Clang module. (Seems unlikely)
#
# TODO: Handle "module 'Foobar' is incompatible with feature 'swift'"
#
# If there were a similar Clang invocation for scanning, we could fix
# the above todos, but that doesn't appear to exist.
eprint(f"# scanning {framework}")
result = subprocess.run(
[
"swiftc",
"-scan-dependencies",
# We provide a source snippet via stdin.
"-",
# Use the provided SDK.
"-sdk",
sdk,
# This search path is normally added automatically by the
# compiler based on the SDK, but we have a patch in place that
# removes that for SDKs in /nix/store, because our xcbuild stub
# SDK doesn't have the directory.
# (swift-prevent-sdk-dirs-warning.patch)
"-I",
f"{sdk}/usr/lib/swift",
# For some reason, 'lib/swift/shims' from both the SDK and
# Swift compiler are picked up, causing redefinition errors.
# This eliminates the latter.
"-resource-dir",
f"{sdk}/usr/lib/swift",
],
input=f"import {framework}".encode(),
stdout=subprocess.PIPE,
)
if result.returncode != 0:
eprint(f"# Scanning {framework} failed (exit code {result.returncode})")
result.stdout = b""
# Parse JSON output.
if len(result.stdout) != 0:
data = json.loads(result.stdout)
# Entries in the modules list come in pairs. The first is an
# identifier (`{ swift: "foobar" }` or `{ clang: "foobar" }`), and
# the second metadata for that module. Here we look for the pair
# that matches the framework we're scanning (and ignore the rest).
modules = data["modules"]
for i in range(0, len(modules), 2):
ident, meta = modules[i : i + 2]
# NOTE: We may match twice, for a Swift module _and_ for a
# Clang module. So matching here doesn't break from the loop,
# and deps is appended to.
if name_from_ident(ident) == framework:
dep_idents = meta["directDependencies"]
deps += [name_from_ident(ident) for ident in dep_idents]
# List unfiltered deps in progress output.
eprint(ident, "->", dep_idents)
# Filter out modules that are not separate derivations.
# Also filter out duplicates (when a Swift overlay imports the Clang module)
allowed = frameworks + ALLOWED_LIBS
deps = set([dep for dep in deps if dep in allowed])
# Filter out self-references. (Swift overlay importing Clang module.)
if framework in deps:
deps.remove(framework)
# Generate a Nix attribute line.
if len(deps) != 0:
deps = list(deps)
deps.sort()
deps = " ".join(deps)
output += f" {framework.ljust(width)} = {{ inherit {deps}; }};\n"
else:
output += f" {framework.ljust(width)} = {{}};\n"
output += FOOTER
sys.stdout.write(output)
if __name__ == "__main__":
if len(sys.argv) != 2:
eprint(f"Usage: {sys.argv[0]} <path to MacOSX.sdk>")
sys.exit(64)
scan_sdk(sys.argv[1])
| 5,101 | 33.472973 | 84 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/desktops/gnome/find-latest-version.py
|
import argparse
import math
import json
import requests
import sys
from enum import Enum
from libversion import Version
from typing import (
Callable,
Iterable,
List,
NamedTuple,
Optional,
Tuple,
TypeVar,
Type,
cast,
)
EnumValue = TypeVar("EnumValue", bound=Enum)
def enum_to_arg(enum: Enum) -> str:
return enum.name.lower().replace("_", "-")
def arg_to_enum(enum_meta: Type[EnumValue], name: str) -> EnumValue:
return enum_meta[name.upper().replace("-", "_")]
def enum_to_arg_choices(enum_meta: Type[EnumValue]) -> Tuple[str, ...]:
return tuple(enum_to_arg(v) for v in cast(Iterable[EnumValue], enum_meta))
class Stability(Enum):
STABLE = "stable"
UNSTABLE = "unstable"
VersionPolicy = Callable[[Version], bool]
VersionPredicate = Callable[[Version, Stability], bool]
class VersionPredicateHolder(NamedTuple):
function: VersionPredicate
def version_to_list(version: str) -> List[int]:
return list(map(int, version.split(".")))
def odd_unstable(version: Version, selected: Stability) -> bool:
try:
version_parts = version_to_list(version.value)
except:
# Failing to parse as a list of numbers likely means the version contains a string tag like “beta”, therefore it is not a stable release.
return selected != Stability.STABLE
if len(version_parts) < 2:
return True
even = version_parts[1] % 2 == 0
prerelease = (version_parts[1] >= 90 and version_parts[1] < 100) or (version_parts[1] >= 900 and version_parts[1] < 1000)
stable = even and not prerelease
if selected == Stability.STABLE:
return stable
else:
return True
def tagged(version: Version, selected: Stability) -> bool:
if selected == Stability.STABLE:
return not ("alpha" in version.value or "beta" in version.value or "rc" in version.value)
else:
return True
def no_policy(version: Version, selected: Stability) -> bool:
return True
class VersionPolicyKind(Enum):
# HACK: Using function as values directly would make Enum
# think they are methods and skip them.
ODD_UNSTABLE = VersionPredicateHolder(odd_unstable)
TAGGED = VersionPredicateHolder(tagged)
NONE = VersionPredicateHolder(no_policy)
def make_version_policy(
version_policy_kind: VersionPolicyKind,
selected: Stability,
upper_bound: Optional[Version],
) -> VersionPolicy:
version_predicate = version_policy_kind.value.function
if not upper_bound:
return lambda version: version_predicate(version, selected)
else:
return lambda version: version_predicate(version, selected) and version < upper_bound
def find_versions(package_name: str, version_policy: VersionPolicy) -> List[Version]:
# The structure of cache.json: https://gitlab.gnome.org/Infrastructure/sysadmin-bin/blob/master/ftpadmin#L762
cache = json.loads(requests.get(f"https://ftp.gnome.org/pub/GNOME/sources/{package_name}/cache.json").text)
if type(cache) != list or cache[0] != 4:
raise Exception("Unknown format of cache.json file.")
versions: Iterable[Version] = map(Version, cache[2][package_name])
versions = sorted(filter(version_policy, versions))
return versions
parser = argparse.ArgumentParser(
description="Find latest version for a GNOME package by crawling their release server.",
)
parser.add_argument(
"package-name",
help="Name of the directory in https://ftp.gnome.org/pub/GNOME/sources/ containing the package.",
)
parser.add_argument(
"version-policy",
help="Policy determining which versions are considered stable. GNOME packages usually denote stability by alpha/beta/rc tag in the version. For older packages, odd minor versions are unstable but there are exceptions.",
choices=enum_to_arg_choices(VersionPolicyKind),
nargs="?",
default=enum_to_arg(VersionPolicyKind.TAGGED),
)
parser.add_argument(
"requested-release",
help="Most of the time, we will want to update to stable version but sometimes it is useful to test.",
choices=enum_to_arg_choices(Stability),
nargs="?",
default=enum_to_arg(Stability.STABLE),
)
parser.add_argument(
"--upper-bound",
dest="upper-bound",
help="Only look for versions older than this one (useful for pinning dependencies).",
)
if __name__ == "__main__":
args = parser.parse_args()
package_name = getattr(args, "package-name")
requested_release = arg_to_enum(Stability, getattr(args, "requested-release"))
upper_bound = getattr(args, "upper-bound")
if upper_bound is not None:
upper_bound = Version(upper_bound)
version_policy_kind = arg_to_enum(VersionPolicyKind, getattr(args, "version-policy"))
version_policy = make_version_policy(version_policy_kind, requested_release, upper_bound)
try:
versions = find_versions(package_name, version_policy)
except Exception as error:
print(error, file=sys.stderr)
sys.exit(1)
if len(versions) == 0:
print("No versions matched.", file=sys.stderr)
sys.exit(1)
print(versions[-1].value)
| 5,123 | 30.054545 | 223 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/desktops/gnome/extensions/update-extensions.py
|
#!/usr/bin/env nix-shell
#!nix-shell -I nixpkgs=../../../.. -i python3 -p python3
import base64
import json
import logging
import subprocess
import urllib.error
import urllib.request
from operator import itemgetter
from pathlib import Path
from typing import List, Dict, Optional, Any, Tuple
# We don't want all those deprecated legacy extensions
# Group extensions by GNOME "major" version for compatibility reasons
supported_versions = {
"38": "3.38",
"40": "40",
"41": "41",
"42": "42",
"43": "43",
"44": "44",
}
# Some type alias to increase readability of complex compound types
PackageName = str
ShellVersion = str
Uuid = str
ExtensionVersion = int
# Keep track of all names that have been used till now to detect collisions.
# This works because we deterministically process all extensions in historical order
# The outer dict level is the shell version, as we are tracking duplicates only per same Shell version.
# key: shell version, value: Dict with key: pname, value: list of UUIDs with that pname
package_name_registry: Dict[ShellVersion, Dict[PackageName, List[Uuid]]] = {}
for shell_version in supported_versions.keys():
package_name_registry[shell_version] = {}
updater_dir_path = Path(__file__).resolve().parent
def fetch_extension_data(uuid: str, version: str) -> Tuple[str, str]:
"""
Download the extension and hash it. We use `nix-prefetch-url` for this for efficiency reasons.
Returns a tuple with the hash (Nix-compatible) of the zip file's content and the base64-encoded content of its metadata.json.
"""
# The download URLs follow this schema
uuid = uuid.replace("@", "")
url: str = f"https://extensions.gnome.org/extension-data/{uuid}.v{version}.shell-extension.zip"
# Download extension and add the zip content to nix-store
process = subprocess.run(
["nix-prefetch-url", "--unpack", "--print-path", url], capture_output=True, text=True
)
lines = process.stdout.splitlines()
# Get hash from first line of nix-prefetch-url output
hash = lines[0].strip()
# Get path from second line of nix-prefetch-url output
path = Path(lines[1].strip())
# Get metadata.json content from nix-store
with open(path / "metadata.json", "r") as out:
metadata = base64.b64encode(out.read().encode("ascii")).decode()
return hash, metadata
def generate_extension_versions(
extension_version_map: Dict[ShellVersion, ExtensionVersion], uuid: str
) -> Dict[ShellVersion, Dict[str, str]]:
"""
Takes in a mapping from shell versions to extension versions and transforms it the way we need it:
- Only take one extension version per GNOME Shell major version (as per `supported_versions`)
- Filter out versions that only support old GNOME versions
- Download the extension and hash it
"""
# Determine extension version per shell version
extension_versions: Dict[ShellVersion, ExtensionVersion] = {}
for shell_version, version_prefix in supported_versions.items():
# Newest compatible extension version
extension_version: Optional[int] = max(
(
int(ext_ver)
for shell_ver, ext_ver in extension_version_map.items()
if (shell_ver.startswith(version_prefix))
),
default=None,
)
# Extension is not compatible with this GNOME version
if not extension_version:
continue
extension_versions[shell_version] = extension_version
# Download information once for all extension versions chosen above
extension_info_cache: Dict[ExtensionVersion, Tuple[str, str]] = {}
for extension_version in sorted(set(extension_versions.values())):
logging.debug(
f"[{uuid}] Downloading v{extension_version}"
)
extension_info_cache[extension_version] = \
fetch_extension_data(uuid, str(extension_version))
# Fill map
extension_versions_full: Dict[ShellVersion, Dict[str, str]] = {}
for shell_version, extension_version in extension_versions.items():
sha256, metadata = extension_info_cache[extension_version]
extension_versions_full[shell_version] = {
"version": str(extension_version),
"sha256": sha256,
# The downloads are impure, their metadata.json may change at any time.
# Thus, we back it up / pin it to remain deterministic
# Upstream issue: https://gitlab.gnome.org/Infrastructure/extensions-web/-/issues/137
"metadata": metadata,
}
return extension_versions_full
def pname_from_url(url: str) -> Tuple[str, str]:
"""
Parse something like "/extension/1475/battery-time/" and output ("battery-time", "1475")
"""
url = url.split("/") # type: ignore
return url[3], url[2]
def process_extension(extension: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""
Process an extension. It takes in raw scraped data and downloads all the necessary information that buildGnomeExtension.nix requires
Input: a json object of one extension queried from the site. It has the following schema (only important key listed):
{
"uuid": str,
"name": str,
"description": str,
"link": str,
"shell_version_map": {
str: { "version": int, … },
…
},
…
}
"uuid" is an extension UUID that looks like this (most of the time): "[email protected]".
Don't make any assumptions on it, and treat it like an opaque string!
"link" follows the following schema: "/extension/$number/$string/"
The number is monotonically increasing and unique to every extension.
The string is usually derived from the extension name (but shortened, kebab-cased and URL friendly).
It may diverge from the actual name.
The keys of "shell_version_map" are GNOME Shell version numbers.
Output: a json object to be stored, or None if the extension should be skipped. Schema:
{
"uuid": str,
"name": str,
"pname": str,
"description": str,
"link": str,
"shell_version_map": {
str: { "version": int, "sha256": str, "metadata": <hex> },
…
}
}
Only "uuid" gets passed along unmodified. "name", "description" and "link" are taken from the input, but sanitized.
"pname" gets generated from other fields and "shell_version_map" has a completely different structure than the input
field with the same name.
"""
uuid = extension["uuid"]
# Yeah, there are some extensions without any releases
if not extension["shell_version_map"]:
return None
logging.info(f"Processing '{uuid}'")
# Input is a mapping str -> { version: int, … }
# We want to map shell versions to extension versions
shell_version_map: Dict[ShellVersion, int] = {
k: v["version"] for k, v in extension["shell_version_map"].items()
}
# Transform shell_version_map to be more useful for us. Also throw away unwanted versions
shell_version_map: Dict[ShellVersion, Dict[str, str]] = generate_extension_versions(shell_version_map, uuid) # type: ignore
# No compatible versions found
if not shell_version_map:
return None
# Fetch a human-readable name for the package.
(pname, _pname_id) = pname_from_url(extension["link"])
for shell_version in shell_version_map.keys():
if pname in package_name_registry[shell_version]:
logging.warning(f"Package name '{pname}' for GNOME '{shell_version}' is colliding.")
package_name_registry[shell_version][pname].append(uuid)
else:
package_name_registry[shell_version][pname] = [uuid]
return {
"uuid": uuid,
"name": extension["name"],
"pname": pname,
"description": extension["description"],
"link": "https://extensions.gnome.org" + extension["link"],
"shell_version_map": shell_version_map,
}
def scrape_extensions_index() -> List[Dict[str, Any]]:
"""
Scrape the list of extensions by sending search queries to the API. We simply go over it
page by page until we hit a non-full page or a 404 error.
The returned list is sorted by the age of the extension, in order to be deterministic.
"""
page = 0
extensions = []
while True:
page += 1
logging.info("Scraping page " + str(page))
try:
with urllib.request.urlopen(
f"https://extensions.gnome.org/extension-query/?n_per_page=25&page={page}"
) as response:
data = json.loads(response.read().decode())["extensions"]
response_length = len(data)
for extension in data:
extensions.append(extension)
# If our page isn't "full", it must have been the last one
if response_length < 25:
logging.debug(
f"\tThis page only has {response_length} entries, so it must be the last one."
)
break
except urllib.error.HTTPError as e:
if e.code == 404:
# We reached past the last page and are done now
break
else:
raise
# `pk` is the primary key in the extensions.gnome.org database. Sorting on it will give us a stable,
# deterministic ordering.
extensions.sort(key=itemgetter("pk"))
return extensions
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
raw_extensions = scrape_extensions_index()
logging.info(f"Downloaded {len(raw_extensions)} extensions. Processing …")
processed_extensions: List[Dict[str, Any]] = []
for num, raw_extension in enumerate(raw_extensions):
processed_extension = process_extension(raw_extension)
if processed_extension:
processed_extensions.append(processed_extension)
logging.debug(f"Processed {num + 1} / {len(raw_extensions)}")
with open(updater_dir_path / "extensions.json", "w") as out:
# Manually pretty-print the outer level, but then do one compact line per extension
# This allows for the diffs to be manageable (one line of change per extension) despite their quantity
for index, extension in enumerate(processed_extensions):
if index == 0:
out.write("[ ")
else:
out.write(", ")
json.dump(extension, out, ensure_ascii=False)
out.write("\n")
out.write("]\n")
logging.info(
f"Done. Writing results to extensions.json ({len(processed_extensions)} extensions in total)"
)
with open(updater_dir_path / "extensions.json", "r") as out:
# Check that the generated file actually is valid JSON, just to be sure
json.load(out)
with open(updater_dir_path / "collisions.json", "w") as out:
# Filter out those that are not duplicates
package_name_registry_filtered: Dict[ShellVersion, Dict[PackageName, List[Uuid]]] = {
# The outer level keys are shell versions
shell_version: {
# The inner keys are extension names, with a list of all extensions with that name as value.
pname: extensions for pname, extensions in collisions.items() if len(extensions) > 1
} for shell_version, collisions in package_name_registry.items()
}
json.dump(package_name_registry_filtered, out, indent=2, ensure_ascii=False)
out.write("\n")
logging.info(
"Done. Writing name collisions to collisions.json (please check manually)"
)
| 12,080 | 38.740132 | 136 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/games/factorio/update.py
|
#!/usr/bin/env nix-shell
#! nix-shell -i python -p "python3.withPackages (ps: with ps; [ ps.absl-py ps.requests ])" nix
from collections import defaultdict
import copy
from dataclasses import dataclass
import json
import os.path
import subprocess
from typing import Callable, Dict
from absl import app
from absl import flags
from absl import logging
import requests
FACTORIO_API = "https://factorio.com/api/latest-releases"
FLAGS = flags.FLAGS
flags.DEFINE_string('username', '', 'Factorio username for retrieving binaries.')
flags.DEFINE_string('token', '', 'Factorio token for retrieving binaries.')
flags.DEFINE_string('out', '', 'Output path for versions.json.')
flags.DEFINE_list('release_type', '', 'If non-empty, a comma-separated list of release types to update (e.g. alpha).')
flags.DEFINE_list('release_channel', '', 'If non-empty, a comma-separated list of release channels to update (e.g. experimental).')
@dataclass
class System:
nix_name: str
url_name: str
tar_name: str
@dataclass
class ReleaseType:
name: str
needs_auth: bool = False
@dataclass
class ReleaseChannel:
name: str
FactorioVersionsJSON = Dict[str, Dict[str, str]]
OurVersionJSON = Dict[str, Dict[str, Dict[str, Dict[str, str]]]]
SYSTEMS = [
System(nix_name="x86_64-linux", url_name="linux64", tar_name="x64"),
]
RELEASE_TYPES = [
ReleaseType("alpha", needs_auth=True),
ReleaseType("demo"),
ReleaseType("headless"),
]
RELEASE_CHANNELS = [
ReleaseChannel("experimental"),
ReleaseChannel("stable"),
]
def find_versions_json() -> str:
if FLAGS.out:
return FLAGS.out
try_paths = ["pkgs/games/factorio/versions.json", "versions.json"]
for path in try_paths:
if os.path.exists(path):
return path
raise Exception("Couldn't figure out where to write versions.json; try specifying --out")
def fetch_versions() -> FactorioVersionsJSON:
return json.loads(requests.get("https://factorio.com/api/latest-releases").text)
def generate_our_versions(factorio_versions: FactorioVersionsJSON) -> OurVersionJSON:
rec_dd = lambda: defaultdict(rec_dd)
output = rec_dd()
# Deal with times where there's no experimental version
for rc in RELEASE_CHANNELS:
if not factorio_versions[rc.name]:
factorio_versions[rc.name] = factorio_versions['stable']
for system in SYSTEMS:
for release_type in RELEASE_TYPES:
for release_channel in RELEASE_CHANNELS:
version = factorio_versions[release_channel.name].get(release_type.name)
if version == None:
continue
this_release = {
"name": f"factorio_{release_type.name}_{system.tar_name}-{version}.tar.xz",
"url": f"https://factorio.com/get-download/{version}/{release_type.name}/{system.url_name}",
"version": version,
"needsAuth": release_type.needs_auth,
"tarDirectory": system.tar_name,
}
output[system.nix_name][release_type.name][release_channel.name] = this_release
return output
def iter_version(versions: OurVersionJSON, it: Callable[[str, str, str, Dict[str, str]], Dict[str, str]]) -> OurVersionJSON:
versions = copy.deepcopy(versions)
for system_name, system in versions.items():
for release_type_name, release_type in system.items():
for release_channel_name, release in release_type.items():
release_type[release_channel_name] = it(system_name, release_type_name, release_channel_name, dict(release))
return versions
def merge_versions(old: OurVersionJSON, new: OurVersionJSON) -> OurVersionJSON:
"""Copies already-known hashes from version.json to avoid having to re-fetch."""
def _merge_version(system_name: str, release_type_name: str, release_channel_name: str, release: Dict[str, str]) -> Dict[str, str]:
old_system = old.get(system_name, {})
old_release_type = old_system.get(release_type_name, {})
old_release = old_release_type.get(release_channel_name, {})
if FLAGS.release_type and release_type_name not in FLAGS.release_type:
logging.info("%s/%s/%s: not in --release_type, not updating", system_name, release_type_name, release_channel_name)
return old_release
if FLAGS.release_channel and release_channel_name not in FLAGS.release_channel:
logging.info("%s/%s/%s: not in --release_channel, not updating", system_name, release_type_name, release_channel_name)
return old_release
if not "sha256" in old_release:
logging.info("%s/%s/%s: not copying sha256 since it's missing", system_name, release_type_name, release_channel_name)
return release
if not all(old_release.get(k, None) == release[k] for k in ['name', 'version', 'url']):
logging.info("%s/%s/%s: not copying sha256 due to mismatch", system_name, release_type_name, release_channel_name)
return release
release["sha256"] = old_release["sha256"]
return release
return iter_version(new, _merge_version)
def nix_prefetch_url(name: str, url: str, algo: str = 'sha256') -> str:
cmd = ['nix-prefetch-url', '--type', algo, '--name', name, url]
logging.info('running %s', cmd)
out = subprocess.check_output(cmd)
return out.decode('utf-8').strip()
def fill_in_hash(versions: OurVersionJSON) -> OurVersionJSON:
"""Fill in sha256 hashes for anything missing them."""
urls_to_hash = {}
def _fill_in_hash(system_name: str, release_type_name: str, release_channel_name: str, release: Dict[str, str]) -> Dict[str, str]:
if "sha256" in release:
logging.info("%s/%s/%s: skipping fetch, sha256 already present", system_name, release_type_name, release_channel_name)
return release
url = release["url"]
if url in urls_to_hash:
logging.info("%s/%s/%s: found url %s in cache", system_name, release_type_name, release_channel_name, url)
release["sha256"] = urls_to_hash[url]
return release
logging.info("%s/%s/%s: fetching %s", system_name, release_type_name, release_channel_name, url)
if release["needsAuth"]:
if not FLAGS.username or not FLAGS.token:
raise Exception("fetching %s/%s/%s from %s requires --username and --token" % (system_name, release_type_name, release_channel_name, url))
url += f"?username={FLAGS.username}&token={FLAGS.token}"
release["sha256"] = nix_prefetch_url(release["name"], url)
urls_to_hash[url] = release["sha256"]
return release
return iter_version(versions, _fill_in_hash)
def main(argv):
factorio_versions = fetch_versions()
new_our_versions = generate_our_versions(factorio_versions)
old_our_versions = None
our_versions_path = find_versions_json()
if our_versions_path:
logging.info('Loading old versions.json from %s', our_versions_path)
with open(our_versions_path, 'r') as f:
old_our_versions = json.load(f)
if old_our_versions:
logging.info('Merging in old hashes')
new_our_versions = merge_versions(old_our_versions, new_our_versions)
logging.info('Fetching necessary tars to get hashes')
new_our_versions = fill_in_hash(new_our_versions)
with open(our_versions_path, 'w') as f:
logging.info('Writing versions.json to %s', our_versions_path)
json.dump(new_our_versions, f, sort_keys=True, indent=2)
f.write("\n")
if __name__ == '__main__':
app.run(main)
| 7,701 | 39.324607 | 154 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/games/minecraft-servers/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3Packages.requests python3Packages.dataclasses-json
import json
from dataclasses import dataclass, field
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, List, Optional
import requests
from dataclasses_json import DataClassJsonMixin, LetterCase, config
from marshmallow import fields
@dataclass
class Download(DataClassJsonMixin):
sha1: str
size: int
url: str
@dataclass
class Version(DataClassJsonMixin):
id: str
type: str
url: str
time: datetime = field(
metadata=config(
encoder=datetime.isoformat,
decoder=datetime.fromisoformat,
mm_field=fields.DateTime(format="iso"),
)
)
release_time: datetime = field(
metadata=config(
encoder=datetime.isoformat,
decoder=datetime.fromisoformat,
mm_field=fields.DateTime(format="iso"),
letter_case=LetterCase.CAMEL,
)
)
def get_manifest(self) -> Any:
"""Return the version's manifest."""
response = requests.get(self.url)
response.raise_for_status()
return response.json()
def get_downloads(self) -> Dict[str, Download]:
"""
Return all downloadable files from the version's manifest, in Download
objects.
"""
return {
download_name: Download.from_dict(download_info)
for download_name, download_info in self.get_manifest()["downloads"].items()
}
def get_java_version(self) -> Any:
"""
Return the java version specified in a version's manifest, if it is
present. Versions <= 1.6 do not specify this.
"""
return self.get_manifest().get("javaVersion", {}).get("majorVersion", None)
def get_server(self) -> Optional[Download]:
"""
If the version has a server download available, return the Download
object for the server download. If the version does not have a server
download avilable, return None.
"""
downloads = self.get_downloads()
if "server" in downloads:
return downloads["server"]
return None
def get_versions() -> List[Version]:
"""Return a list of Version objects for all available versions."""
response = requests.get(
"https://launchermeta.mojang.com/mc/game/version_manifest.json"
)
response.raise_for_status()
data = response.json()
return [Version.from_dict(version) for version in data["versions"]]
def get_major_release(version_id: str) -> str:
"""
Return the major release for a version. The major release for 1.17 and
1.17.1 is 1.17.
"""
if not len(version_id.split(".")) >= 2:
raise ValueError(f"version not in expected format: '{version_id}'")
return ".".join(version_id.split(".")[:2])
def group_major_releases(releases: List[Version]) -> Dict[str, List[Version]]:
"""
Return a dictionary containing each version grouped by each major release.
The key "1.17" contains a list with two Version objects, one for "1.17"
and another for "1.17.1".
"""
groups: Dict[str, List[Version]] = {}
for release in releases:
major_release = get_major_release(release.id)
if major_release not in groups:
groups[major_release] = []
groups[major_release].append(release)
return groups
def get_latest_major_releases(releases: List[Version]) -> Dict[str, Version]:
"""
Return a dictionary containing the latest version for each major release.
The latest major release for 1.16 is 1.16.5, so the key "1.16" contains a
Version object for 1.16.5.
"""
return {
major_release: sorted(releases, key=lambda x: x.id, reverse=True)[0]
for major_release, releases in group_major_releases(releases).items()
}
def generate() -> Dict[str, Dict[str, str]]:
"""
Return a dictionary containing the latest url, sha1 and version for each major
release.
"""
versions = get_versions()
releases = list(
filter(lambda version: version.type == "release", versions)
) # remove snapshots and betas
latest_major_releases = get_latest_major_releases(releases)
servers = {
version: Download.schema().dump(download_info) # Download -> dict
for version, download_info in {
version: value.get_server()
for version, value in latest_major_releases.items()
}.items()
if download_info is not None # versions < 1.2 do not have a server
}
for server in servers.values():
del server["size"] # don't need it
for version, server in servers.items():
server["version"] = latest_major_releases[version].id
server["javaVersion"] = latest_major_releases[version].get_java_version()
return servers
if __name__ == "__main__":
with open(Path(__file__).parent / "versions.json", "w") as file:
json.dump(generate(), file, indent=2)
file.write("\n")
| 5,088 | 31.414013 | 88 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/version-management/gitlab/update.py
|
#!/usr/bin/env nix-shell
#! nix-shell -I nixpkgs=../../../.. -i python3 -p bundix bundler nix-update nix nix-universal-prefetch python3 python3Packages.requests python3Packages.click python3Packages.click-log python3Packages.packaging prefetch-yarn-deps git
import click
import click_log
import re
import logging
import subprocess
import json
import pathlib
import tempfile
from packaging.version import Version
from typing import Iterable
import requests
NIXPKGS_PATH = pathlib.Path(__file__).parent / "../../../../"
GITLAB_DIR = pathlib.Path(__file__).parent
logger = logging.getLogger(__name__)
click_log.basic_config(logger)
class GitLabRepo:
version_regex = re.compile(r"^v\d+\.\d+\.\d+(\-rc\d+)?(\-ee)?(\-gitlab)?")
def __init__(self, owner: str = "gitlab-org", repo: str = "gitlab"):
self.owner = owner
self.repo = repo
@property
def url(self):
return f"https://gitlab.com/{self.owner}/{self.repo}"
@property
def tags(self) -> Iterable[str]:
"""Returns a sorted list of repository tags"""
r = requests.get(self.url + "/refs?sort=updated_desc&ref=master").json()
tags = r.get("Tags", [])
# filter out versions not matching version_regex
versions = list(filter(self.version_regex.match, tags))
# sort, but ignore v, -ee and -gitlab for sorting comparisons
versions.sort(
key=lambda x: Version(
x.replace("v", "").replace("-ee", "").replace("-gitlab", "")
),
reverse=True,
)
return versions
def get_git_hash(self, rev: str):
return (
subprocess.check_output(
[
"nix-universal-prefetch",
"fetchFromGitLab",
"--owner",
self.owner,
"--repo",
self.repo,
"--rev",
rev,
]
)
.decode("utf-8")
.strip()
)
def get_yarn_hash(self, rev: str):
with tempfile.TemporaryDirectory() as tmp_dir:
with open(tmp_dir + "/yarn.lock", "w") as f:
f.write(self.get_file("yarn.lock", rev))
return (
subprocess.check_output(["prefetch-yarn-deps", tmp_dir + "/yarn.lock"])
.decode("utf-8")
.strip()
)
@staticmethod
def rev2version(tag: str) -> str:
"""
normalize a tag to a version number.
This obviously isn't very smart if we don't pass something that looks like a tag
:param tag: the tag to normalize
:return: a normalized version number
"""
# strip v prefix
version = re.sub(r"^v", "", tag)
# strip -ee and -gitlab suffixes
return re.sub(r"-(ee|gitlab)$", "", version)
def get_file(self, filepath, rev):
"""
returns file contents at a given rev
:param filepath: the path to the file, relative to the repo root
:param rev: the rev to fetch at
:return:
"""
return requests.get(self.url + f"/raw/{rev}/{filepath}").text
def get_data(self, rev):
version = self.rev2version(rev)
passthru = {
v: self.get_file(v, rev).strip()
for v in [
"GITALY_SERVER_VERSION",
"GITLAB_PAGES_VERSION",
"GITLAB_SHELL_VERSION",
"GITLAB_ELASTICSEARCH_INDEXER_VERSION",
]
}
passthru["GITLAB_WORKHORSE_VERSION"] = version
return dict(
version=self.rev2version(rev),
repo_hash=self.get_git_hash(rev),
yarn_hash=self.get_yarn_hash(rev),
owner=self.owner,
repo=self.repo,
rev=rev,
passthru=passthru,
)
def _get_data_json():
data_file_path = pathlib.Path(__file__).parent / "data.json"
with open(data_file_path, "r") as f:
return json.load(f)
def _call_nix_update(pkg, version):
"""calls nix-update from nixpkgs root dir"""
return subprocess.check_output(
["nix-update", pkg, "--version", version], cwd=NIXPKGS_PATH
)
@click_log.simple_verbosity_option(logger)
@click.group()
def cli():
pass
@cli.command("update-data")
@click.option("--rev", default="latest", help="The rev to use (vX.Y.Z-ee), or 'latest'")
def update_data(rev: str):
"""Update data.json"""
logger.info("Updating data.json")
repo = GitLabRepo()
if rev == "latest":
# filter out pre and rc releases
rev = next(filter(lambda x: not ("rc" in x or x.endswith("pre")), repo.tags))
data_file_path = pathlib.Path(__file__).parent / "data.json"
data = repo.get_data(rev)
with open(data_file_path.as_posix(), "w") as f:
json.dump(data, f, indent=2)
f.write("\n")
@cli.command("update-rubyenv")
def update_rubyenv():
"""Update rubyEnv"""
logger.info("Updating gitlab")
repo = GitLabRepo()
rubyenv_dir = pathlib.Path(__file__).parent / "rubyEnv"
# load rev from data.json
data = _get_data_json()
rev = data["rev"]
version = data["version"]
for fn in ["Gemfile.lock", "Gemfile"]:
with open(rubyenv_dir / fn, "w") as f:
f.write(repo.get_file(fn, rev))
# patch for openssl 3.x support
subprocess.check_output(
["sed", "-i", "s:'openssl', '2.*':'openssl', '3.0.2':g", "Gemfile"],
cwd=rubyenv_dir,
)
# Fetch vendored dependencies temporarily in order to build the gemset.nix
subprocess.check_output(["mkdir", "-p", "vendor/gems"], cwd=rubyenv_dir)
subprocess.check_output(
[
"sh",
"-c",
f"curl -L https://gitlab.com/gitlab-org/gitlab/-/archive/v{version}-ee/gitlab-v{version}-ee.tar.bz2?path=vendor/gems | tar -xj --strip-components=3",
],
cwd=f"{rubyenv_dir}/vendor/gems",
)
# Undo our gemset.nix patches so that bundix runs through
subprocess.check_output(
["sed", "-i", "-e", "1d", "-e", "s:\\${src}/::g", "gemset.nix"], cwd=rubyenv_dir
)
subprocess.check_output(["bundle", "lock"], cwd=rubyenv_dir)
subprocess.check_output(["bundix"], cwd=rubyenv_dir)
subprocess.check_output(
[
"sed",
"-i",
"-e",
"1i\\src:",
"-e",
's:path = \\(vendor/[^;]*\\);:path = "${src}/\\1";:g',
"gemset.nix",
],
cwd=rubyenv_dir,
)
subprocess.check_output(["rm", "-rf", "vendor"], cwd=rubyenv_dir)
@cli.command("update-gitaly")
def update_gitaly():
"""Update gitaly"""
logger.info("Updating gitaly")
data = _get_data_json()
gitaly_server_version = data['passthru']['GITALY_SERVER_VERSION']
_call_nix_update("gitaly", gitaly_server_version)
@cli.command("update-gitlab-pages")
def update_gitlab_pages():
"""Update gitlab-pages"""
logger.info("Updating gitlab-pages")
data = _get_data_json()
gitlab_pages_version = data["passthru"]["GITLAB_PAGES_VERSION"]
_call_nix_update("gitlab-pages", gitlab_pages_version)
def get_container_registry_version() -> str:
"""Returns the version attribute of gitlab-container-registry"""
return subprocess.check_output(
[
"nix",
"--experimental-features",
"nix-command",
"eval",
"-f",
".",
"--raw",
"gitlab-container-registry.version",
],
cwd=NIXPKGS_PATH,
).decode("utf-8")
@cli.command("update-gitlab-shell")
def update_gitlab_shell():
"""Update gitlab-shell"""
logger.info("Updating gitlab-shell")
data = _get_data_json()
gitlab_shell_version = data["passthru"]["GITLAB_SHELL_VERSION"]
_call_nix_update("gitlab-shell", gitlab_shell_version)
@cli.command("update-gitlab-workhorse")
def update_gitlab_workhorse():
"""Update gitlab-workhorse"""
logger.info("Updating gitlab-workhorse")
data = _get_data_json()
gitlab_workhorse_version = data["passthru"]["GITLAB_WORKHORSE_VERSION"]
_call_nix_update("gitlab-workhorse", gitlab_workhorse_version)
@cli.command("update-gitlab-container-registry")
@click.option("--rev", default="latest", help="The rev to use (vX.Y.Z-ee), or 'latest'")
@click.option(
"--commit", is_flag=True, default=False, help="Commit the changes for you"
)
def update_gitlab_container_registry(rev: str, commit: bool):
"""Update gitlab-container-registry"""
logger.info("Updading gitlab-container-registry")
repo = GitLabRepo(repo="container-registry")
old_container_registry_version = get_container_registry_version()
if rev == "latest":
rev = next(filter(lambda x: not ("rc" in x or x.endswith("pre")), repo.tags))
version = repo.rev2version(rev)
_call_nix_update("gitlab-container-registry", version)
if commit:
new_container_registry_version = get_container_registry_version()
commit_container_registry(
old_container_registry_version, new_container_registry_version
)
@cli.command('update-gitlab-elasticsearch-indexer')
def update_gitlab_elasticsearch_indexer():
"""Update gitlab-elasticsearch-indexer"""
data = _get_data_json()
gitlab_elasticsearch_indexer_version = data['passthru']['GITLAB_ELASTICSEARCH_INDEXER_VERSION']
_call_nix_update('gitlab-elasticsearch-indexer', gitlab_elasticsearch_indexer_version)
@cli.command("update-all")
@click.option("--rev", default="latest", help="The rev to use (vX.Y.Z-ee), or 'latest'")
@click.option(
"--commit", is_flag=True, default=False, help="Commit the changes for you"
)
@click.pass_context
def update_all(ctx, rev: str, commit: bool):
"""Update all gitlab components to the latest stable release"""
old_data_json = _get_data_json()
old_container_registry_version = get_container_registry_version()
ctx.invoke(update_data, rev=rev)
new_data_json = _get_data_json()
ctx.invoke(update_rubyenv)
ctx.invoke(update_gitaly)
ctx.invoke(update_gitlab_pages)
ctx.invoke(update_gitlab_shell)
ctx.invoke(update_gitlab_workhorse)
ctx.invoke(update_gitlab_elasticsearch_indexer)
if commit:
commit_gitlab(
old_data_json["version"], new_data_json["version"], new_data_json["rev"]
)
ctx.invoke(update_gitlab_container_registry)
if commit:
new_container_registry_version = get_container_registry_version()
commit_container_registry(
old_container_registry_version, new_container_registry_version
)
def commit_gitlab(old_version: str, new_version: str, new_rev: str) -> None:
"""Commits the gitlab changes for you"""
subprocess.run(
[
"git",
"add",
"data.json",
"rubyEnv",
"gitaly",
"gitlab-pages",
"gitlab-shell",
"gitlab-workhorse",
"gitlab-elasticsearch-indexer",
],
cwd=GITLAB_DIR,
)
subprocess.run(
[
"git",
"commit",
"--message",
f"""gitlab: {old_version} -> {new_version}\n\nhttps://gitlab.com/gitlab-org/gitlab/-/blob/{new_rev}/CHANGELOG.md""",
],
cwd=GITLAB_DIR,
)
def commit_container_registry(old_version: str, new_version: str) -> None:
"""Commits the gitlab-container-registry changes for you"""
subprocess.run(["git", "add", "gitlab-container-registry"], cwd=GITLAB_DIR)
subprocess.run(
[
"git",
"commit",
"--message",
f"gitlab-container-registry: {old_version} -> {new_version}\n\nhttps://gitlab.com/gitlab-org/container-registry/-/blob/v{new_version}-gitlab/CHANGELOG.md",
],
cwd=GITLAB_DIR,
)
if __name__ == "__main__":
cli()
| 11,960 | 30.067532 | 232 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/version-management/sapling/gen-deps.py
|
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p "python3.withPackages (ps: with ps; [ requests ])"
import json
import re
from hashlib import sha1
from struct import unpack
from subprocess import run
from requests import get
# Fetch the latest stable release metadata from GitHub
latestTag = get("https://api.github.com/repos/facebook/sapling/releases/latest").json()[
"tag_name"
]
def nixPrefetchUrl(url):
return run(
["nix-prefetch-url", "--type", "sha256", url],
check=True,
text=True,
capture_output=True,
).stdout.rstrip()
# Fetch the `setup.py` source and look for instances of assets being downloaded
# from files.pythonhosted.org.
setupPy = get(
f"https://github.com/facebook/sapling/raw/{latestTag}/eden/scm/setup.py"
).text
foundUrls = re.findall(r'(https://files\.pythonhosted\.org/packages/[^\s]+)"', setupPy)
dataDeps = {
"links": [{"url": url, "sha256": nixPrefetchUrl(url)} for url in foundUrls],
"version": latestTag,
# Find latest's git tag which corresponds to the Sapling version. Also
# needed is a hash of the version, so calculate that here. Taken from
# Sapling source `$root/eden/scm/setup_with_version.py`.
"versionHash": str(unpack(">Q", sha1(latestTag.encode("ascii")).digest()[:8])[0]),
}
open("deps.json", "w").write(json.dumps(dataDeps, indent=2, sort_keys=True) + "\n")
| 1,383 | 31.186047 | 88 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/networking/instant-messengers/discord/disable-breaking-updates.py
|
#!@pythonInterpreter@
# slightly tweaked from the script created by @lionirdeadman
# https://github.com/flathub/com.discordapp.Discord/blob/master/disable-breaking-updates.py
"""
Disable breaking updates which will prompt users to download a deb or tar file
and lock them out of Discord making the program unusable.
This will dramatically improve the experience :
1) The maintainer doesn't need to be worried at all times of an update which will break Discord.
2) People will not be locked out of the program while the maintainer runs to update it.
"""
import json
import os
import sys
from pathlib import Path
XDG_CONFIG_HOME = os.environ.get("XDG_CONFIG_HOME") or os.path.join(
os.path.expanduser("~"), ".config"
)
settings_path = Path(f"{XDG_CONFIG_HOME}/@configDirName@/settings.json")
settings_path_temp = Path(f"{XDG_CONFIG_HOME}/@configDirName@/settings.json.tmp")
if os.path.exists(settings_path):
with settings_path.open(encoding="utf-8") as settings_file:
try:
settings = json.load(settings_file)
except json.JSONDecodeError:
print("[Nix] settings.json is malformed, letting Discord fix itself")
sys.exit(0)
else:
settings = {}
if settings.get("SKIP_HOST_UPDATE"):
print("[Nix] Disabling updates already done")
else:
skip_host_update = {"SKIP_HOST_UPDATE": True}
settings.update(skip_host_update)
os.makedirs(os.path.dirname(settings_path), exist_ok=True)
with settings_path_temp.open("w", encoding="utf-8") as settings_file_temp:
json.dump(settings, settings_file_temp, indent=2)
settings_path_temp.rename(settings_path)
print("[Nix] Disabled updates")
| 1,677 | 32.56 | 97 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/networking/browsers/microsoft-edge/update.py
|
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p python3Packages.packaging python3Packages.debian
import base64
import gzip
import textwrap
from urllib import request
from debian.deb822 import Packages
from debian.debian_support import Version
def packages():
packages_url = 'https://packages.microsoft.com/repos/edge/dists/stable/main/binary-amd64/Packages'
handle = request.urlopen(packages_url)
return handle
def latest_packages(packages: bytes):
latest_packages: dict[str, Packages] = {}
for package in Packages.iter_paragraphs(packages, use_apt_pkg=False):
name: str = package['Package']
if not name.startswith('microsoft-edge-'):
continue
channel = name.replace('microsoft-edge-', '')
if channel not in latest_packages:
latest_packages[channel] = package
else:
old_package = latest_packages[channel]
if old_package.get_version() < package.get_version(): # type: ignore
latest_packages[channel] = package
return latest_packages
def nix_expressions(latest: dict[str, Packages]):
channel_strs: list[str] = []
for channel, package in latest.items():
print(f"Processing {channel} {package['Version']}")
match = Version.re_valid_version.match(package['Version'])
assert match is not None
version = match.group('upstream_version')
revision = match.group('debian_revision')
sri = 'sha256-' + \
base64.b64encode(bytes.fromhex(package['SHA256'])).decode('ascii')
channel_str = textwrap.dedent(
f'''\
{channel} = import ./browser.nix {{
channel = "{channel}";
version = "{version}";
revision = "{revision}";
sha256 = "{sri}";
}};'''
)
channel_strs.append(channel_str)
return channel_strs
def write_expression():
latest = latest_packages(packages())
channel_strs = nix_expressions(latest)
nix_expr = '{\n' + textwrap.indent('\n'.join(channel_strs), ' ') + '\n}\n'
with open('default.nix', 'w') as f:
f.write(nix_expr)
write_expression()
| 2,191 | 30.768116 | 102 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/networking/browsers/chromium/update.py
|
#! /usr/bin/env nix-shell
#! nix-shell -i python -p python3 nix nix-prefetch-git
"""This script automatically updates chromium, google-chrome, chromedriver, and ungoogled-chromium
via upstream-info.json."""
# Usage: ./update.py [--commit]
import base64
import csv
import json
import re
import subprocess
import sys
from codecs import iterdecode
from collections import OrderedDict
from datetime import datetime
from distutils.version import LooseVersion
from os.path import abspath, dirname
from urllib.request import urlopen
RELEASES_URL = 'https://versionhistory.googleapis.com/v1/chrome/platforms/linux/channels/all/versions/all/releases'
DEB_URL = 'https://dl.google.com/linux/chrome/deb/pool/main/g'
BUCKET_URL = 'https://commondatastorage.googleapis.com/chromium-browser-official'
JSON_PATH = dirname(abspath(__file__)) + '/upstream-info.json'
UNGOOGLED_FLAGS_PATH = dirname(abspath(__file__)) + '/ungoogled-flags.toml'
COMMIT_MESSAGE_SCRIPT = dirname(abspath(__file__)) + '/get-commit-message.py'
def load_json(path):
"""Loads the given JSON file."""
with open(path, 'r') as f:
return json.load(f)
def nix_prefetch_url(url, algo='sha256'):
"""Prefetches the content of the given URL."""
print(f'nix-prefetch-url {url}')
out = subprocess.check_output(['nix-prefetch-url', '--type', algo, url])
return out.decode('utf-8').rstrip()
def nix_prefetch_git(url, rev):
"""Prefetches the requested Git revision of the given repository URL."""
print(f'nix-prefetch-git {url} {rev}')
out = subprocess.check_output(['nix-prefetch-git', '--quiet', '--url', url, '--rev', rev])
return json.loads(out)
def get_file_revision(revision, file_path):
"""Fetches the requested Git revision of the given Chromium file."""
url = f'https://chromium.googlesource.com/chromium/src/+/refs/tags/{revision}/{file_path}?format=TEXT'
with urlopen(url) as http_response:
resp = http_response.read()
return base64.b64decode(resp)
def get_matching_chromedriver(version):
"""Gets the matching chromedriver version for the given Chromium version."""
# See https://chromedriver.chromium.org/downloads/version-selection
build = re.sub('.[0-9]+$', '', version)
chromedriver_version_url = f'https://chromedriver.storage.googleapis.com/LATEST_RELEASE_{build}'
with urlopen(chromedriver_version_url) as http_response:
chromedriver_version = http_response.read().decode()
def get_chromedriver_url(system):
return ('https://chromedriver.storage.googleapis.com/' +
f'{chromedriver_version}/chromedriver_{system}.zip')
return {
'version': chromedriver_version,
'sha256_linux': nix_prefetch_url(get_chromedriver_url('linux64')),
'sha256_darwin': nix_prefetch_url(get_chromedriver_url('mac64')),
'sha256_darwin_aarch64': nix_prefetch_url(get_chromedriver_url('mac_arm64'))
}
def get_channel_dependencies(version):
"""Gets all dependencies for the given Chromium version."""
deps = get_file_revision(version, 'DEPS')
gn_pattern = b"'gn_version': 'git_revision:([0-9a-f]{40})'"
gn_commit = re.search(gn_pattern, deps).group(1).decode()
gn = nix_prefetch_git('https://gn.googlesource.com/gn', gn_commit)
return {
'gn': {
'version': datetime.fromisoformat(gn['date']).date().isoformat(),
'url': gn['url'],
'rev': gn['rev'],
'sha256': gn['sha256']
}
}
def get_latest_ungoogled_chromium_tag(linux_stable_versions):
"""Returns the latest ungoogled-chromium tag for linux using the GitHub API."""
api_tag_url = 'https://api.github.com/repos/ungoogled-software/ungoogled-chromium/tags'
with urlopen(api_tag_url) as http_response:
tags = json.load(http_response)
for tag in tags:
if not tag['name'].split('-')[0] in linux_stable_versions:
continue
return tag['name']
def get_latest_ungoogled_chromium_build(linux_stable_versions):
"""Returns a dictionary for the latest ungoogled-chromium build."""
tag = get_latest_ungoogled_chromium_tag(linux_stable_versions)
version = tag.split('-')[0]
return {
'name': 'chrome/platforms/linux/channels/ungoogled-chromium/versions/',
'version': version,
'ungoogled_tag': tag
}
def get_ungoogled_chromium_gn_flags(revision):
"""Returns ungoogled-chromium's GN build flags for the given revision."""
gn_flags_url = f'https://raw.githubusercontent.com/ungoogled-software/ungoogled-chromium/{revision}/flags.gn'
return urlopen(gn_flags_url).read().decode()
def channel_name_to_attr_name(channel_name):
"""Maps a channel name to the corresponding main Nixpkgs attribute name."""
if channel_name == 'stable':
return 'chromium'
if channel_name == 'beta':
return 'chromiumBeta'
if channel_name == 'dev':
return 'chromiumDev'
if channel_name == 'ungoogled-chromium':
return 'ungoogled-chromium'
print(f'Error: Unexpected channel: {channel_name}', file=sys.stderr)
sys.exit(1)
def get_channel_key(item):
"""Orders Chromium channels by their name."""
channel_name = item[0]
if channel_name == 'stable':
return 0
if channel_name == 'beta':
return 1
if channel_name == 'dev':
return 2
if channel_name == 'ungoogled-chromium':
return 3
print(f'Error: Unexpected channel: {channel_name}', file=sys.stderr)
sys.exit(1)
def print_updates(channels_old, channels_new):
"""Print a summary of the updates."""
print('Updates:')
for channel_name in channels_old:
version_old = channels_old[channel_name]["version"]
version_new = channels_new[channel_name]["version"]
if LooseVersion(version_old) < LooseVersion(version_new):
attr_name = channel_name_to_attr_name(channel_name)
print(f'- {attr_name}: {version_old} -> {version_new}')
channels = {}
last_channels = load_json(JSON_PATH)
print(f'GET {RELEASES_URL}', file=sys.stderr)
with urlopen(RELEASES_URL) as resp:
releases = json.load(resp)['releases']
linux_stable_versions = [release['version'] for release in releases if release['name'].startswith('chrome/platforms/linux/channels/stable/versions/')]
releases.append(get_latest_ungoogled_chromium_build(linux_stable_versions))
for release in releases:
channel_name = re.findall("chrome\/platforms\/linux\/channels\/(.*)\/versions\/", release['name'])[0]
# If we've already found a newer release for this channel, we're
# no longer interested in it.
if channel_name in channels:
continue
# If we're back at the last release we used, we don't need to
# keep going -- there's no new version available, and we can
# just reuse the info from last time.
if release['version'] == last_channels[channel_name]['version']:
channels[channel_name] = last_channels[channel_name]
continue
channel = {'version': release['version']}
if channel_name == 'dev':
google_chrome_suffix = 'unstable'
elif channel_name == 'ungoogled-chromium':
google_chrome_suffix = 'stable'
else:
google_chrome_suffix = channel_name
try:
channel['sha256'] = nix_prefetch_url(f'{BUCKET_URL}/chromium-{release["version"]}.tar.xz')
channel['sha256bin64'] = nix_prefetch_url(
f'{DEB_URL}/google-chrome-{google_chrome_suffix}/' +
f'google-chrome-{google_chrome_suffix}_{release["version"]}-1_amd64.deb')
except subprocess.CalledProcessError:
# This release isn't actually available yet. Continue to
# the next one.
continue
channel['deps'] = get_channel_dependencies(channel['version'])
if channel_name == 'stable':
channel['chromedriver'] = get_matching_chromedriver(channel['version'])
elif channel_name == 'ungoogled-chromium':
ungoogled_repo_url = 'https://github.com/ungoogled-software/ungoogled-chromium.git'
channel['deps']['ungoogled-patches'] = {
'rev': release['ungoogled_tag'],
'sha256': nix_prefetch_git(ungoogled_repo_url, release['ungoogled_tag'])['sha256']
}
with open(UNGOOGLED_FLAGS_PATH, 'w') as out:
out.write(get_ungoogled_chromium_gn_flags(release['ungoogled_tag']))
channels[channel_name] = channel
sorted_channels = OrderedDict(sorted(channels.items(), key=get_channel_key))
if len(sys.argv) == 2 and sys.argv[1] == '--commit':
for channel_name in sorted_channels.keys():
version_old = last_channels[channel_name]['version']
version_new = sorted_channels[channel_name]['version']
if LooseVersion(version_old) < LooseVersion(version_new):
last_channels[channel_name] = sorted_channels[channel_name]
with open(JSON_PATH, 'w') as out:
json.dump(last_channels, out, indent=2)
out.write('\n')
attr_name = channel_name_to_attr_name(channel_name)
commit_message = f'{attr_name}: {version_old} -> {version_new}'
if channel_name == 'stable':
body = subprocess.check_output([COMMIT_MESSAGE_SCRIPT, version_new]).decode('utf-8')
commit_message += '\n\n' + body
elif channel_name == 'ungoogled-chromium':
subprocess.run(['git', 'add', UNGOOGLED_FLAGS_PATH], check=True)
subprocess.run(['git', 'add', JSON_PATH], check=True)
subprocess.run(['git', 'commit', '--file=-'], input=commit_message.encode(), check=True)
else:
with open(JSON_PATH, 'w') as out:
json.dump(sorted_channels, out, indent=2)
out.write('\n')
print_updates(last_channels, sorted_channels)
| 10,005 | 39.840816 | 154 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/networking/browsers/chromium/get-commit-message.py
|
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3Packages.feedparser python3Packages.requests
# This script prints the Git commit message for stable channel updates.
# Usage: ./get-commit-message.py [version]
import re
import sys
import textwrap
from collections import OrderedDict
import feedparser
import requests
feed = feedparser.parse('https://chromereleases.googleblog.com/feeds/posts/default')
html_tags = re.compile(r'<[^>]+>')
target_version = sys.argv[1] if len(sys.argv) == 2 else None
for entry in feed.entries:
url = requests.get(entry.link).url.split('?')[0]
if entry.title != 'Stable Channel Update for Desktop':
if target_version and entry.title == '':
# Workaround for a special case (Chrome Releases bug?):
if not 'the-stable-channel-has-been-updated-to' in url:
continue
else:
continue
content = entry.content[0].value
content = html_tags.sub('', content) # Remove any HTML tags
if re.search(r'Linux', content) is None:
continue
#print(url) # For debugging purposes
version = re.search(r'\d+(\.\d+){3}', content).group(0)
if target_version:
if version != target_version:
continue
else:
print('chromium: TODO -> ' + version + '\n')
print(url)
if fixes := re.search(r'This update includes .+ security fix(es)?\.', content):
fixes = fixes.group(0)
if zero_days := re.search(r'Google is aware( of reports)? th(e|at) .+ in the wild\.', content):
fixes += " " + zero_days.group(0)
print('\n' + '\n'.join(textwrap.wrap(fixes, width=72)))
if cve_list := re.findall(r'CVE-[^: ]+', content):
cve_list = list(OrderedDict.fromkeys(cve_list)) # Remove duplicates but preserve the order
cve_string = ' '.join(cve_list)
print("\nCVEs:\n" + '\n'.join(textwrap.wrap(cve_string, width=72)))
sys.exit(0) # We only care about the most recent stable channel update
print("Error: No match.")
sys.exit(1)
| 2,037 | 36.740741 | 103 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/emulators/retroarch/update_cores.py
|
#!/usr/bin/env nix-shell
#!nix-shell -I nixpkgs=../../../../ -i python3 -p "python3.withPackages (ps: with ps; [ requests nix-prefetch-github ])" -p "git"
import json
import os
import subprocess
import sys
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor
SCRIPT_PATH = Path(__file__).absolute().parent
HASHES_PATH = SCRIPT_PATH / "hashes.json"
GET_REPO_THREADS = int(os.environ.get("GET_REPO_THREADS", 8))
CORES = {
"2048": {"repo": "libretro-2048"},
"atari800": {"repo": "libretro-atari800"},
"beetle-gba": {"repo": "beetle-gba-libretro"},
"beetle-lynx": {"repo": "beetle-lynx-libretro"},
"beetle-ngp": {"repo": "beetle-ngp-libretro"},
"beetle-pce-fast": {"repo": "beetle-pce-fast-libretro"},
"beetle-pcfx": {"repo": "beetle-pcfx-libretro"},
"beetle-psx": {"repo": "beetle-psx-libretro"},
"beetle-saturn": {"repo": "beetle-saturn-libretro"},
"beetle-snes": {"repo": "beetle-bsnes-libretro"},
"beetle-supafaust": {"repo": "supafaust"},
"beetle-supergrafx": {"repo": "beetle-supergrafx-libretro"},
"beetle-vb": {"repo": "beetle-vb-libretro"},
"beetle-wswan": {"repo": "beetle-wswan-libretro"},
"blastem": {"repo": "blastem"},
"bluemsx": {"repo": "bluemsx-libretro"},
"bsnes": {"repo": "bsnes-libretro"},
"bsnes-hd": {"repo": "bsnes-hd", "owner": "DerKoun"},
"bsnes-mercury": {"repo": "bsnes-mercury"},
"citra": {"repo": "citra", "fetch_submodules": True},
"desmume": {"repo": "desmume"},
"desmume2015": {"repo": "desmume2015"},
"dolphin": {"repo": "dolphin"},
"dosbox": {"repo": "dosbox-libretro"},
"eightyone": {"repo": "81-libretro"},
"fbalpha2012": {"repo": "fbalpha2012"},
"fbneo": {"repo": "fbneo"},
"fceumm": {"repo": "libretro-fceumm"},
"flycast": {"repo": "flycast"},
"fmsx": {"repo": "fmsx-libretro"},
"freeintv": {"repo": "freeintv"},
"fuse": {"repo": "fuse-libretro"},
"gambatte": {"repo": "gambatte-libretro"},
"genesis-plus-gx": {"repo": "Genesis-Plus-GX"},
"gpsp": {"repo": "gpsp"},
"gw": {"repo": "gw-libretro"},
"handy": {"repo": "libretro-handy"},
"hatari": {"repo": "hatari"},
"mame": {"repo": "mame"},
"mame2000": {"repo": "mame2000-libretro"},
"mame2003": {"repo": "mame2003-libretro"},
"mame2003-plus": {"repo": "mame2003-plus-libretro"},
"mame2010": {"repo": "mame2010-libretro"},
"mame2015": {"repo": "mame2015-libretro"},
"mame2016": {"repo": "mame2016-libretro"},
"melonds": {"repo": "melonds"},
"mesen": {"repo": "mesen"},
"mesen-s": {"repo": "mesen-s"},
"meteor": {"repo": "meteor-libretro"},
"mgba": {"repo": "mgba"},
"mupen64plus": {"repo": "mupen64plus-libretro-nx"},
"neocd": {"repo": "neocd_libretro"},
"nestopia": {"repo": "nestopia"},
"nxengine": {"repo": "nxengine-libretro"},
"np2kai": {"repo": "NP2kai", "owner": "AZO234", "fetch_submodules": True},
"o2em": {"repo": "libretro-o2em"},
"opera": {"repo": "opera-libretro"},
"parallel-n64": {"repo": "parallel-n64"},
"pcsx2": {"repo": "pcsx2"},
"pcsx_rearmed": {"repo": "pcsx_rearmed"},
"picodrive": {"repo": "picodrive", "fetch_submodules": True},
"play": {"repo": "Play-", "owner": "jpd002", "fetch_submodules": True},
"ppsspp": {"repo": "ppsspp", "owner": "hrydgard", "fetch_submodules": True},
"prboom": {"repo": "libretro-prboom"},
"prosystem": {"repo": "prosystem-libretro"},
"puae": {"repo": "libretro-uae"},
"quicknes": {"repo": "QuickNES_Core"},
"sameboy": {"repo": "sameboy"},
"scummvm": {"repo": "scummvm"},
"smsplus-gx": {"repo": "smsplus-gx"},
"snes9x": {"repo": "snes9x", "owner": "snes9xgit"},
"snes9x2002": {"repo": "snes9x2002"},
"snes9x2005": {"repo": "snes9x2005"},
"snes9x2010": {"repo": "snes9x2010"},
"stella": {"repo": "stella", "owner": "stella-emu"},
"stella2014": {"repo": "stella2014-libretro"},
"swanstation": {"repo": "swanstation"},
"tgbdual": {"repo": "tgbdual-libretro"},
"thepowdertoy": {"repo": "ThePowderToy"},
"tic80": {"repo": "tic-80", "fetch_submodules": True},
"vba-m": {"repo": "vbam-libretro"},
"vba-next": {"repo": "vba-next"},
"vecx": {"repo": "libretro-vecx"},
"virtualjaguar": {"repo": "virtualjaguar-libretro"},
"yabause": {"repo": "yabause"},
}
def info(*msg):
print(*msg, file=sys.stderr)
def get_repo_hash_fetchFromGitHub(
repo,
owner="libretro",
deep_clone=False,
fetch_submodules=False,
leave_dot_git=False,
rev=None,
):
extra_args = []
if deep_clone:
extra_args.append("--deep-clone")
else:
extra_args.append("--no-deep-clone")
if fetch_submodules:
extra_args.append("--fetch-submodules")
else:
extra_args.append("--no-fetch-submodules")
if leave_dot_git:
extra_args.append("--leave-dot-git")
else:
extra_args.append("--no-leave-dot-git")
if rev:
extra_args.append("--rev")
extra_args.append(rev)
result = subprocess.run(
["nix-prefetch-github", owner, repo, *extra_args],
check=True,
capture_output=True,
text=True,
)
j = json.loads(result.stdout)
# Remove False values
return {k: v for k, v in j.items() if v}
def get_repo_hash(fetcher="fetchFromGitHub", **kwargs):
if fetcher == "fetchFromGitHub":
return get_repo_hash_fetchFromGitHub(**kwargs)
else:
raise ValueError(f"Unsupported fetcher: {fetcher}")
def get_repo_hashes(cores={}):
def get_repo_hash_from_core_def(core_def):
core, repo = core_def
info(f"Getting repo hash for '{core}'...")
result = core, get_repo_hash(**repo)
info(f"Got repo hash for '{core}'!")
return result
with open(HASHES_PATH) as f:
repo_hashes = json.loads(f.read())
info(f"Running with {GET_REPO_THREADS} threads!")
with ThreadPoolExecutor(max_workers=GET_REPO_THREADS) as executor:
new_repo_hashes = executor.map(get_repo_hash_from_core_def, cores.items())
for core, repo in new_repo_hashes:
repo_hashes[core] = repo
return repo_hashes
def main():
# If you don't want to update all cores, pass the name of the cores you
# want to update on the command line. E.g.:
# $ ./update.py citra snes9x
if len(sys.argv) > 1:
cores_to_update = sys.argv[1:]
else:
cores_to_update = CORES.keys()
cores = {core: repo for core, repo in CORES.items() if core in cores_to_update}
repo_hashes = get_repo_hashes(cores)
info(f"Generating '{HASHES_PATH}'...")
with open(HASHES_PATH, "w") as f:
f.write(json.dumps(dict(sorted(repo_hashes.items())), indent=4))
f.write("\n")
info("Finished!")
if __name__ == "__main__":
main()
| 6,821 | 34.905263 | 129 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/office/libreoffice/generate-libreoffice-srcs.py
|
#!/usr/bin/env python3
"""
Converts the LibreOffice `download.lst` file into a Nix expression.
Requires an environment variable named `downloadList` identifying the path
of the input file, and writes the result to stdout.
todo - Ideally we would move as much as possible into derivation dependencies.
"""
import collections, itertools, json, re, subprocess, sys, os
def main():
packages = list(get_packages())
for x in packages:
print(x, file=sys.stderr)
print('[')
for x in packages:
md5 = x['md5']
upstream_sha256 = x['sha256']
if upstream_sha256:
hash = upstream_sha256
hashtype = 'sha256'
else:
hash = md5
hashtype = 'md5'
tarball = x['tarball']
url = construct_url(x)
print('url: {}'.format(url), file=sys.stderr)
path = download(url, tarball, hash, hashtype)
print('path: {}'.format(path), file=sys.stderr)
sha256 = get_sha256(path)
print('sha256: {}'.format(sha256), file=sys.stderr)
print(' {')
print(' name = "{}";'.format(tarball))
print(' url = "{}";'.format(url))
print(' sha256 = "{}";'.format(sha256))
print(' md5 = "{}";'.format(md5))
print(' md5name = "{}-{}";'.format(md5 or upstream_sha256,tarball))
print(' }')
print(']')
def construct_url(x):
if x['brief']:
return 'https://dev-www.libreoffice.org/src/{}{}'.format(
x.get('subdir', ''), x['tarball'])
else:
return 'https://dev-www.libreoffice.org/src/{}{}-{}'.format(
x.get('subdir', ''), x['md5'], x['tarball'])
def download(url, name, hash, hashtype):
cmd = ['nix-prefetch-url', url, hash, '--print-path',
'--type', hashtype, '--name', name]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=True,
universal_newlines=True)
return proc.stdout.split('\n')[1].strip()
def get_sha256(path):
cmd = ['sha256sum', path]
proc = subprocess.run(cmd, stdout=subprocess.PIPE, check=True,
universal_newlines=True)
return proc.stdout.split(' ')[0].strip()
def get_packages():
"""
All of the package data: What's parsed from download.lst,
plus our additions.
"""
return apply_additions(get_packages_from_download_list(),
get_additions())
def get_additions():
"""
A mapping from package name (the all-caps identifiers used in
`download.lst`) to a dict of additional attributes to set on the package.
"""
with open('./libreoffice-srcs-additions.json') as f:
return json.load(f)
def apply_additions(xs, additions):
for x in xs:
yield dict_merge([x,
additions.get(x['name'], {})])
def get_packages_from_download_list():
"""
The result of parsing `download.lst`: A list of dicts containing keys
'name', 'tarball', 'md5', 'brief'.
"""
def lines():
for x in sub_symbols(parse_lines(get_lines())):
interpretation = interpret(x)
if interpretation == 'unrecognized':
print_skipped_line(x)
else:
yield dict_merge([x,
interpretation])
def cluster(xs):
"""
Groups lines according to their order within the file, to support
packages that are listed in `download.lst` more than once.
"""
keys = ['tarball', 'md5', 'sha256', 'brief']
a = {k: [x for x in xs if k in x['attrs']] for k in keys}
return zip(*[a[k] for k in keys])
def packages():
for (name, group) in groupby(lines(), lambda x: x['name']):
for xs in cluster(group):
yield {'name': name,
'attrs': dict_merge(x['attrs'] for x in xs),
'index': min(x['index'] for x in xs)}
for x in sorted(packages(), key=lambda x: x['index']):
yield dict_merge([{'name': x['name']},
x['attrs']])
def dict_merge(xs):
"""
>>> dict_merge([{1: 2}, {3: 4}, {3: 5}])
{1: 2, 3: 4}
"""
return dict(collections.ChainMap(*xs))
def groupby(xs, f):
"""
>>> groupby([1, 2, 3, 4], lambda x: x % 2)
[(0, [2, 4]), (1, [1, 3])]
"""
for (k, iter) in itertools.groupby(sorted(xs, key=f), f):
group = list(iter)
yield (f(group[0]), group)
def get_lines():
download_list = os.getenv('downloadList')
with open(download_list) as f:
return f.read().splitlines()
def print_skipped_line(x):
print('Skipped line {}: {}'.format(x['index'],
x['original']),
file=sys.stderr)
def parse_lines(lines):
"""
Input: List of strings (the lines from `download.lst`
Output: Iterator of dicts with keys 'key', 'value', and 'index'
"""
for (index, line) in enumerate(lines):
x = { 'index': index, 'original': line }
result = parse_line(line)
if result == 'nothing':
pass
elif result == 'unrecognized':
print_skipped_line(x)
else:
yield dict_merge([x,
result])
def parse_line(line):
"""
Input: A string
Output: One of 1. A dict with keys 'key', 'value'
2. 'nothing' (if the line contains no information)
2. 'unrecognized' (if parsing failed)
"""
if re.match('\s*(#.*)?$', line):
return 'nothing'
match = re.match('([^:\s]+)\s*:=\s*(.*)$', line)
if match:
return {
'key': match.group(1),
'value': match.group(2).strip()
}
else:
return 'unrecognized'
def sub_symbols(xs):
"""
Do substitution of variables across all lines.
>>> sub_symbols([{'key': 'a', 'value': 'x'},
... {'key': 'c': 'value': '$(a)yz'}])
[{'key': 'a', 'value': 'x'}, {'key': 'c': 'value': 'xyz'}]
"""
xs = list(xs)
symbols = {x['key']: x for x in xs}
def get_value(k):
x = symbols.get(k)
return x['value'] if x is not None else ''
for x in xs:
yield dict_merge([{'value': sub_str(x['value'], get_value)},
x])
def sub_str(string, func):
"""
Do substitution of variables in a single line.
>>> sub_str("x = $(x)", lambda k: {'x': 'a'}[k])
"x = a"
"""
def func2(m):
x = m.group(1)
result = func(x)
return result if result is not None else x
return re.sub(r'\$\(([^\$\(\)]+)\)', func2, string)
def interpret(x):
"""
Input: Dict with keys 'key' and 'value'
Output: One of 1. Dict with keys 'name' and 'attrs'
2. 'unrecognized' (if interpretation failed)
"""
for f in [interpret_md5, interpret_sha256, interpret_tarball_with_md5, interpret_tarball, interpret_jar]:
result = f(x)
if result is not None:
return result
return 'unrecognized'
def interpret_md5(x):
"""
>>> interpret_md5("ODFGEN_MD5SUM", "32572ea48d9021bbd6fa317ddb697abc")
{'name': 'ODFGEN', 'attrs': {'md5': '32572ea48d9021bbd6fa317ddb697abc'}}
"""
match = re.match('^(.*)_MD5SUM$', x['key'])
if match:
return {'name': match.group(1),
'attrs': {'md5': x['value'], 'sha256': ''}}
def interpret_sha256(x):
match = re.match('^(.*)_SHA256SUM$', x['key'])
if match:
return {'name': match.group(1),
'attrs': {'sha256': x['value'], 'md5': ''}}
def interpret_tarball(x):
"""
>>> interpret_tarball("FREEHAND_TARBALL", "libfreehand-0.1.1.tar.bz2")
{'name': 'FREEHAND',
'attrs': {'tarball': 'libfreehand-0.1.1.tar.bz2', 'brief': True}}
"""
match = re.match('^(.*)_TARBALL$', x['key'])
if match:
return {'name': match.group(1),
'attrs': {'tarball': x['value'], 'brief': True}}
def interpret_jar(x):
match = re.match('^(.*)_JAR$', x['key'])
if match:
return {'name': match.group(1),
'attrs': {'tarball': x['value'], 'brief': True}}
def interpret_tarball_with_md5(x):
"""
>>> interpret_tarball_with_md5("CLUCENE_TARBALL",\
"48d647fbd8ef8889e5a7f422c1bfda94-clucene-core-2.3.3.4.tar.gz")
{'name': 'CLUCENE',
'attrs': {'tarball': 'clucene-core-2.3.3.4.tar.gz',
'md5': '48d647fbd8ef8889e5a7f422c1bfda94', 'brief': False}}
"""
match = {'key': re.match('^(.*)_(TARBALL|JAR)$', x['key']),
'value': re.match('(?P<md5>[0-9a-fA-F]{32})-(?P<tarball>.+)$',
x['value'])}
if match['key'] and match['value']:
return {'name': match['key'].group(1),
'attrs': {'tarball': match['value'].group('tarball'),
'md5': match['value'].group('md5'),
'sha256': '',
'brief': False}}
main()
| 9,070 | 26.571429 | 109 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/science/electronics/picoscope/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell --pure -i python3 -p "python3.withPackages (ps: with ps; [ requests ])"
import json
import os
import requests
import sys
def parse_packages(text):
res = []
for package in resp.text.split("\n\n"):
if not package: continue
pkg = {}
for field in package.split("\n"):
if field.startswith(" "): # multiline string
pkg[k] += "\n" + field[1:]
else:
[k, v] = field.split(": ", 1)
pkg[k] = v
res.append(pkg)
return res
def generate_sources(packages):
sources_spec = {}
for pkg in pkgs:
sources_spec[pkg['Package']] = {
"url": "https://labs.picotech.com/rc/picoscope7/debian/" + pkg["Filename"],
"sha256": pkg["SHA256"],
"version": pkg["Version"]
}
return sources_spec
out = {}
for nix_system, release in {"x86_64-linux": "amd64"}.items():
resp = requests.get("https://labs.picotech.com/rc/picoscope7/debian//dists/picoscope/main/binary-"+release+"/Packages")
if resp.status_code != 200:
print("error: could not fetch data for release {} (code {})".format(release, resp.code), file=sys.stderr)
sys.exit(1)
pkgs = parse_packages(resp.text)
out[nix_system] = generate_sources(pkgs)
with open(os.path.dirname(__file__) + "/sources.json", "w") as f:
json.dump(out, f, indent=2, sort_keys=True)
f.write('\n')
| 1,456 | 31.377778 | 123 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/editors/emacs/elisp-packages/manual-packages/tree-sitter-langs/update-defaults.py
|
#!/usr/bin/env nix-shell
#! nix-shell ../../../../../../../. -i python3 -p python3 -p nix
from os.path import (
dirname,
abspath,
join,
)
from typing import (
List,
Any,
)
import subprocess
import json
import sys
import os
def fmt_grammar(grammar: str) -> str:
return "tree-sitter-" + grammar
def eval_expr(nixpkgs: str, expr: str) -> Any:
p = subprocess.run(
[
"nix-instantiate",
"--json",
"--eval",
"--expr",
("with import %s {}; %s" % (nixpkgs, expr)),
],
check=True,
stdout=subprocess.PIPE,
)
return json.loads(p.stdout)
def check_grammar_exists(nixpkgs: str, grammar: str) -> bool:
return eval_expr(
nixpkgs, f'lib.hasAttr "{fmt_grammar(grammar)}" tree-sitter-grammars'
)
def build_attr(nixpkgs, attr: str) -> str:
return (
subprocess.run(
["nix-build", "--no-out-link", nixpkgs, "-A", attr],
check=True,
stdout=subprocess.PIPE,
)
.stdout.decode()
.strip()
)
if __name__ == "__main__":
cwd = dirname(abspath(__file__))
nixpkgs = abspath(join(cwd, "../../../../../.."))
src_dir = build_attr(nixpkgs, "emacs.pkgs.tree-sitter-langs.src")
existing: List[str] = []
grammars = os.listdir(join(src_dir, "repos"))
for g in grammars:
exists = check_grammar_exists(nixpkgs, g)
if exists:
existing.append(fmt_grammar(g))
else:
sys.stderr.write("Missing grammar: " + fmt_grammar(g) + "\n")
sys.stderr.flush()
with open(join(cwd, "default-grammars.json"), mode="w") as f:
json.dump(sorted(existing), f, indent=2)
f.write("\n")
| 1,758 | 22.144737 | 77 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/editors/emacs/elisp-packages/manual-packages/tsc/update.py
|
#!/usr/bin/env python3
from textwrap import dedent
from os.path import (
abspath,
dirname,
join,
)
from typing import (
Dict,
Any,
)
import subprocess
import tempfile
import json
import sys
import re
import requests
def eval_drv(nixpkgs: str, expr: str) -> Any:
expr = "\n".join(
(
"with (import %s {});" % nixpkgs,
expr,
)
)
with tempfile.NamedTemporaryFile(mode="w") as f:
f.write(dedent(expr))
f.flush()
p = subprocess.run(
["nix-instantiate", "--json", f.name], stdout=subprocess.PIPE, check=True
)
return p.stdout.decode().strip()
def get_src(tag_name: str) -> Dict[str, str]:
p = subprocess.run(
[
"nix-prefetch-github",
"--rev",
tag_name,
"--json",
"emacs-tree-sitter",
"elisp-tree-sitter",
],
stdout=subprocess.PIPE,
check=True,
)
src = json.loads(p.stdout)
fields = ["owner", "repo", "rev", "sha256"]
return {f: src[f] for f in fields}
def get_cargo_sha256(drv_path: str):
# Note: No check=True since we expect this command to fail
p = subprocess.run(["nix-store", "-r", drv_path], stderr=subprocess.PIPE)
stderr = p.stderr.decode()
lines = iter(stderr.split("\n"))
for l in lines:
if l.startswith("error: hash mismatch in fixed-output derivation"):
break
else:
raise ValueError("Did not find expected hash mismatch message")
for l in lines:
m = re.match(r"\s+got:\s+(.+)$", l)
if m:
return m.group(1)
raise ValueError("Could not extract actual sha256 hash: ", stderr)
if __name__ == "__main__":
cwd = sys.argv[1]
# This should point to the root default.nix of Nixpkgs tree
nixpkgs = abspath(join(cwd, "../../../../../../.."))
tag_name = requests.get(
"https://api.github.com/repos/emacs-tree-sitter/elisp-tree-sitter/releases/latest"
).json()["tag_name"]
src = get_src(tag_name)
with tempfile.NamedTemporaryFile(mode="w") as f:
json.dump(src, f)
f.flush()
drv_path = eval_drv(
nixpkgs,
"""
rustPlatform.buildRustPackage {
pname = "tsc-dyn";
version = "%s";
nativeBuildInputs = [ clang ];
src = fetchFromGitHub (lib.importJSON %s);
sourceRoot = "source/core";
cargoSha256 = lib.fakeSha256;
}
"""
% (tag_name, f.name),
)
cargo_sha256 = get_cargo_sha256(drv_path)
with open(join(cwd, "src.json"), mode="w") as f:
json.dump(
{
"src": src,
"version": tag_name,
"cargoSha256": cargo_sha256,
},
f,
indent=2,
)
f.write("\n")
| 2,899 | 22.387097 | 90 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/editors/jetbrains/update_ides.py
|
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p python3 python3.pkgs.packaging python3.pkgs.requests python3.pkgs.xmltodict
import json
import pathlib
import logging
import requests
import subprocess
import sys
import xmltodict
from packaging import version
updates_url = "https://www.jetbrains.com/updates/updates.xml"
current_path = pathlib.Path(__file__).parent
versions_file_path = current_path.joinpath("versions.json").resolve()
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def one_or_more(x):
return x if isinstance(x, list) else [x]
def download_channels():
logging.info("Checking for updates from %s", updates_url)
updates_response = requests.get(updates_url)
updates_response.raise_for_status()
root = xmltodict.parse(updates_response.text)
products = root["products"]["product"]
return {
channel["@name"]: channel
for product in products
for channel in one_or_more(product["channel"])
}
def build_version(build):
build_number = build["@fullNumber"] if "@fullNumber" in build else build["@number"]
return version.parse(build_number)
def latest_build(channel):
builds = one_or_more(channel["build"])
latest = max(builds, key=build_version)
return latest
def download_sha256(url):
url = f"{url}.sha256"
download_response = requests.get(url)
download_response.raise_for_status()
return download_response.content.decode('UTF-8').split(' ')[0]
channels = download_channels()
def update_product(name, product):
update_channel = product["update-channel"]
logging.info("Updating %s", name)
channel = channels.get(update_channel)
if channel is None:
logging.error("Failed to find channel %s.", update_channel)
logging.error("Check that the update-channel in %s matches the name in %s", versions_file_path, updates_url)
else:
try:
build = latest_build(channel)
new_version = build["@version"]
new_build_number = build["@fullNumber"]
if "EAP" not in channel["@name"]:
version_or_build_number = new_version
else:
version_or_build_number = new_build_number
version_number = new_version.split(' ')[0]
download_url = product["url-template"].format(version=version_or_build_number, versionMajorMinor=version_number)
product["url"] = download_url
if "sha256" not in product or product.get("build_number") != new_build_number:
logging.info("Found a newer version %s with build number %s.", new_version, new_build_number)
product["version"] = new_version
product["build_number"] = new_build_number
product["sha256"] = download_sha256(download_url)
else:
logging.info("Already at the latest version %s with build number %s.", new_version, new_build_number)
except Exception as e:
logging.exception("Update failed:", exc_info=e)
logging.warning("Skipping %s due to the above error.", name)
logging.warning("It may be out-of-date. Fix the error and rerun.")
def update_products(products):
for name, product in products.items():
update_product(name, product)
with open(versions_file_path, "r") as versions_file:
versions = json.load(versions_file)
for products in versions.values():
update_products(products)
with open(versions_file_path, "w") as versions_file:
json.dump(versions, versions_file, indent=2)
versions_file.write("\n")
logging.info("#### Updating plugins ####")
plugin_script = current_path.joinpath("plugins/update_plugins.py").resolve()
subprocess.call(plugin_script)
| 3,754 | 34.093458 | 124 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/editors/jetbrains/plugins/update_plugins.py
|
#! /usr/bin/env nix-shell
#! nix-shell -i python3 -p python3 python3.pkgs.requests nix.out
from json import load, dumps
from pathlib import Path
from requests import get
from subprocess import run
from argparse import ArgumentParser
# Token priorities for version checking
# From https://github.com/JetBrains/intellij-community/blob/94f40c5d77f60af16550f6f78d481aaff8deaca4/platform/util-rt/src/com/intellij/util/text/VersionComparatorUtil.java#L50
TOKENS = {
"snap": 10, "snapshot": 10,
"m": 20,
"eap": 25, "pre": 25, "preview": 25,
"alpha": 30, "a": 30,
"beta": 40, "betta": 40, "b": 40,
"rc": 50,
"sp": 70,
"rel": 80, "release": 80, "r": 80, "final": 80
}
SNAPSHOT_VALUE = 99999
PLUGINS_FILE = Path(__file__).parent.joinpath("plugins.json").resolve()
IDES_FILE = Path(__file__).parent.joinpath("../versions.json").resolve()
# The plugin compatibility system uses a different naming scheme to the ide update system.
# These dicts convert between them
FRIENDLY_TO_PLUGIN = {
"clion": "CLION",
"datagrip": "DBE",
"goland": "GOLAND",
"idea-community": "IDEA_COMMUNITY",
"idea-ultimate": "IDEA",
"mps": "MPS",
"phpstorm": "PHPSTORM",
"pycharm-community": "PYCHARM_COMMUNITY",
"pycharm-professional": "PYCHARM",
"rider": "RIDER",
"ruby-mine": "RUBYMINE",
"webstorm": "WEBSTORM"
}
PLUGIN_TO_FRIENDLY = {j: i for i, j in FRIENDLY_TO_PLUGIN.items()}
def tokenize_stream(stream):
for item in stream:
if item in TOKENS:
yield TOKENS[item], 0
elif item.isalpha():
for char in item:
yield 90, ord(char) - 96
elif item.isdigit():
yield 100, int(item)
def split(version_string: str):
prev_type = None
block = ""
for char in version_string:
if char.isdigit():
cur_type = "number"
elif char.isalpha():
cur_type = "letter"
else:
cur_type = "other"
if cur_type != prev_type and block:
yield block.lower()
block = ""
if cur_type in ("letter", "number"):
block += char
prev_type = cur_type
if block:
yield block
def tokenize_string(version_string: str):
return list(tokenize_stream(split(version_string)))
def pick_newest(ver1: str, ver2: str) -> str:
if ver1 is None or ver1 == ver2:
return ver2
if ver2 is None:
return ver1
presort = [tokenize_string(ver1), tokenize_string(ver2)]
postsort = sorted(presort)
if presort == postsort:
return ver2
else:
return ver1
def is_build_older(ver1: str, ver2: str) -> int:
ver1 = [int(i) for i in ver1.replace("*", str(SNAPSHOT_VALUE)).split(".")]
ver2 = [int(i) for i in ver2.replace("*", str(SNAPSHOT_VALUE)).split(".")]
for i in range(min(len(ver1), len(ver2))):
if ver1[i] == ver2[i] and ver1[i] == SNAPSHOT_VALUE:
return 0
if ver1[i] == SNAPSHOT_VALUE:
return 1
if ver2[i] == SNAPSHOT_VALUE:
return -1
result = ver1[i] - ver2[i]
if result != 0:
return result
return len(ver1) - len(ver2)
def is_compatible(build, since, until) -> bool:
return (not since or is_build_older(since, build) < 0) and (not until or 0 < is_build_older(until, build))
def get_newest_compatible(pid: str, build: str, plugin_infos: dict, quiet: bool) -> [None, str]:
newest_ver = None
newest_index = None
for index, info in enumerate(plugin_infos):
if pick_newest(newest_ver, info["version"]) != newest_ver and \
is_compatible(build, info["since"], info["until"]):
newest_ver = info["version"]
newest_index = index
if newest_ver is not None:
return "https://plugins.jetbrains.com/files/" + plugin_infos[newest_index]["file"]
else:
if not quiet:
print(f"WARNING: Could not find version of plugin {pid} compatible with build {build}")
return None
def flatten(main_list: list[list]) -> list:
return [item for sublist in main_list for item in sublist]
def get_compatible_ides(pid: str) -> list[str]:
int_id = pid.split("-", 1)[0]
url = f"https://plugins.jetbrains.com/api/plugins/{int_id}/compatible-products"
result = get(url).json()
return sorted([PLUGIN_TO_FRIENDLY[i] for i in result if i in PLUGIN_TO_FRIENDLY])
def id_to_name(pid: str, channel="") -> str:
channel_ext = "-" + channel if channel else ""
resp = get("https://plugins.jetbrains.com/api/plugins/" + pid).json()
return resp["link"].split("-", 1)[1] + channel_ext
def sort_dict(to_sort: dict) -> dict:
return {i: to_sort[i] for i in sorted(to_sort.keys())}
def make_name_mapping(infos: dict) -> dict[str, str]:
return sort_dict({i: id_to_name(*i.split("-", 1)) for i in infos.keys()})
def make_plugin_files(plugin_infos: dict, ide_versions: dict, quiet: bool, extra_builds: list[str]) -> dict:
result = {}
names = make_name_mapping(plugin_infos)
for pid in plugin_infos:
plugin_versions = {
"compatible": get_compatible_ides(pid),
"builds": {},
"name": names[pid]
}
relevant_builds = [builds for ide, builds in ide_versions.items() if ide in plugin_versions["compatible"]] + [extra_builds]
relevant_builds = sorted(list(set(flatten(relevant_builds)))) # Flatten, remove duplicates and sort
for build in relevant_builds:
plugin_versions["builds"][build] = get_newest_compatible(pid, build, plugin_infos[pid], quiet)
result[pid] = plugin_versions
return result
def get_old_file_hashes() -> dict[str, str]:
return load(open(PLUGINS_FILE))["files"]
def get_hash(url):
print(f"Downloading {url}")
args = ["nix-prefetch-url", url, "--print-path"]
if url.endswith(".zip"):
args.append("--unpack")
else:
args.append("--executable")
path_process = run(args, capture_output=True)
path = path_process.stdout.decode().split("\n")[1]
result = run(["nix", "--extra-experimental-features", "nix-command", "hash", "path", path], capture_output=True)
result_contents = result.stdout.decode()[:-1]
if not result_contents:
raise RuntimeError(result.stderr.decode())
return result_contents
def print_file_diff(old, new):
added = new.copy()
removed = old.copy()
to_delete = []
for file in added:
if file in removed:
to_delete.append(file)
for file in to_delete:
added.remove(file)
removed.remove(file)
if removed:
print("\nRemoved:")
for file in removed:
print(" - " + file)
print()
if added:
print("\nAdded:")
for file in added:
print(" + " + file)
print()
def get_file_hashes(file_list: list[str], refetch_all: bool) -> dict[str, str]:
old = {} if refetch_all else get_old_file_hashes()
print_file_diff(list(old.keys()), file_list)
file_hashes = {}
for file in sorted(file_list):
if file in old:
file_hashes[file] = old[file]
else:
file_hashes[file] = get_hash(file)
return file_hashes
def get_args() -> tuple[list[str], list[str], bool, bool, bool, list[str]]:
parser = ArgumentParser(
description="Add/remove/update entries in plugins.json",
epilog="To update all plugins, run with no args.\n"
"To add a version of a plugin from a different channel, append -[channel] to the id.\n"
"The id of a plugin is the number before the name in the address of its page on https://plugins.jetbrains.com/"
)
parser.add_argument("-r", "--refetch-all", action="store_true",
help="don't use previously collected hashes, redownload all")
parser.add_argument("-l", "--list", action="store_true",
help="list plugin ids")
parser.add_argument("-q", "--quiet", action="store_true",
help="suppress warnings about not being able to find compatible plugin versions")
parser.add_argument("-w", "--with-build", action="append", default=[],
help="append [builds] to the list of builds to fetch plugin versions for")
sub = parser.add_subparsers(dest="action")
sub.add_parser("add").add_argument("ids", type=str, nargs="+", help="plugin(s) to add")
sub.add_parser("remove").add_argument("ids", type=str, nargs="+", help="plugin(s) to remove")
args = parser.parse_args()
add = []
remove = []
if args.action == "add":
add = args.ids
elif args.action == "remove":
remove = args.ids
return add, remove, args.refetch_all, args.list, args.quiet, args.with_build
def sort_ids(ids: list[str]) -> list[str]:
sortable_ids = []
for pid in ids:
if "-" in pid:
split_pid = pid.split("-", 1)
sortable_ids.append((int(split_pid[0]), split_pid[1]))
else:
sortable_ids.append((int(pid), ""))
sorted_ids = sorted(sortable_ids)
return [(f"{i}-{j}" if j else str(i)) for i, j in sorted_ids]
def get_plugin_ids(add: list[str], remove: list[str]) -> list[str]:
ids = list(load(open(PLUGINS_FILE))["plugins"].keys())
for pid in add:
if pid in ids:
raise ValueError(f"ID {pid} already in JSON file")
ids.append(pid)
for pid in remove:
try:
ids.remove(pid)
except ValueError:
raise ValueError(f"ID {pid} not in JSON file")
return sort_ids(ids)
def get_plugin_info(pid: str, channel: str) -> dict:
url = f"https://plugins.jetbrains.com/api/plugins/{pid}/updates?channel={channel}"
resp = get(url)
decoded = resp.json()
if resp.status_code != 200:
print(f"Server gave non-200 code {resp.status_code} with message " + decoded["message"])
exit(1)
return decoded
def ids_to_infos(ids: list[str]) -> dict:
result = {}
for pid in ids:
if "-" in pid:
int_id, channel = pid.split("-", 1)
else:
channel = ""
int_id = pid
result[pid] = get_plugin_info(int_id, channel)
return result
def get_ide_versions() -> dict:
ide_data = load(open(IDES_FILE))
result = {}
for platform in ide_data:
for product in ide_data[platform]:
version = ide_data[platform][product]["build_number"]
if product not in result:
result[product] = [version]
elif version not in result[product]:
result[product].append(version)
# Gateway isn't a normal IDE, so it doesn't use the same plugins system
del result["gateway"]
return result
def get_file_names(plugins: dict[str, dict]) -> list[str]:
result = []
for plugin_info in plugins.values():
for url in plugin_info["builds"].values():
if url is not None:
result.append(url)
return list(set(result))
def dump(obj, file):
file.write(dumps(obj, indent=2))
file.write("\n")
def write_result(to_write):
dump(to_write, open(PLUGINS_FILE, "w"))
def main():
add, remove, refetch_all, list_ids, quiet, extra_builds = get_args()
result = {}
print("Fetching plugin info")
ids = get_plugin_ids(add, remove)
if list_ids:
print(*ids)
plugin_infos = ids_to_infos(ids)
print("Working out which plugins need which files")
ide_versions = get_ide_versions()
result["plugins"] = make_plugin_files(plugin_infos, ide_versions, quiet, extra_builds)
print("Getting file hashes")
file_list = get_file_names(result["plugins"])
result["files"] = get_file_hashes(file_list, refetch_all)
write_result(result)
if __name__ == '__main__':
main()
| 11,903 | 29.839378 | 175 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/editors/neovim/update-treesitter-parsers.py
|
#!/usr/bin/env nix-shell
#!nix-shell -i python3 -p python3
import re
import subprocess
from pathlib import Path
parsers = {}
dir = Path(__file__).parent
regex = re.compile(r"^set\(TREESITTER_([A-Z_]+)_(URL|SHA256)\s+([^ \)]+)\s*\)\s*$")
src = subprocess.check_output(
[
"nix-build",
dir.parent.parent.parent.parent,
"-A",
"neovim-unwrapped.src",
"--no-out-link",
],
text=True,
).strip()
for line in open(f"{src}/cmake.deps/CMakeLists.txt"):
m = regex.fullmatch(line)
if m is None:
continue
lang = m[1].lower()
ty = m[2]
val = m[3]
if not lang in parsers:
parsers[lang] = {}
parsers[lang][ty] = val
with open(dir / "treesitter-parsers.nix", "w") as f:
f.write("{ fetchurl }:\n\n{\n")
for lang, src in parsers.items():
f.write(
f""" {lang} = fetchurl {{
url = "{src["URL"]}";
hash = "sha256:{src["SHA256"]}";
}};
"""
)
f.write("}\n")
| 986 | 20 | 83 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/editors/kakoune/plugins/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell update-shell.nix -i python3
# format:
# $ nix run nixpkgs.python3Packages.black -c black update.py
# type-check:
# $ nix run nixpkgs.python3Packages.mypy -c mypy update.py
# linted:
# $ nix run nixpkgs.python3Packages.flake8 -c flake8 --ignore E501,E265,E402 update.py
import inspect
import os
import sys
from typing import List, Tuple
from pathlib import Path
# Import plugin update library from maintainers/scripts/pluginupdate.py
ROOT = Path(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))) # type: ignore
sys.path.insert(
0, os.path.join(ROOT.parent.parent.parent.parent.parent, "maintainers", "scripts")
)
import pluginupdate
GET_PLUGINS = f"""(with import <localpkgs> {{}};
let
inherit (kakouneUtils.override {{}}) buildKakounePluginFrom2Nix;
generated = callPackage {ROOT}/generated.nix {{
inherit buildKakounePluginFrom2Nix;
}};
hasChecksum = value: lib.isAttrs value && lib.hasAttrByPath ["src" "outputHash"] value;
getChecksum = name: value:
if hasChecksum value then {{
submodules = value.src.fetchSubmodules or false;
sha256 = value.src.outputHash;
rev = value.src.rev;
}} else null;
checksums = lib.mapAttrs getChecksum generated;
in lib.filterAttrs (n: v: v != null) checksums)"""
HEADER = "# This file has been generated by ./pkgs/applications/editors/kakoune/plugins/update.py. Do not edit!"
class KakouneEditor(pluginupdate.Editor):
def generate_nix(self, plugins: List[Tuple[pluginupdate.PluginDesc, pluginupdate.Plugin]], outfile: str):
sorted_plugins = sorted(plugins, key=lambda v: v[1].name.lower())
with open(outfile, "w+") as f:
f.write(HEADER)
f.write(
"""
{ lib, buildKakounePluginFrom2Nix, fetchFromGitHub, overrides ? (self: super: {}) }:
let
packages = ( self:
{"""
)
for pluginDesc, plugin in sorted_plugins:
f.write(
f"""
{plugin.normalized_name} = buildKakounePluginFrom2Nix {{
pname = "{plugin.normalized_name}";
version = "{plugin.version}";
src = {pluginDesc.repo.as_nix(plugin)};
meta.homepage = "{pluginDesc.repo.url("")}";
}};
"""
)
f.write(
"""
});
in lib.fix' (lib.extends overrides packages)
"""
)
print(f"updated {outfile}")
def main():
editor = KakouneEditor("kakoune", ROOT, GET_PLUGINS)
editor.run()
if __name__ == "__main__":
main()
| 2,517 | 28.97619 | 112 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/editors/vim/plugins/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell update-shell.nix -i python3
# format:
# $ nix run nixpkgs.python3Packages.black -c black update.py
# type-check:
# $ nix run nixpkgs.python3Packages.mypy -c mypy update.py
# linted:
# $ nix run nixpkgs.python3Packages.flake8 -c flake8 --ignore E501,E265,E402 update.py
# If you see `HTTP Error 429: too many requests` errors while running this script,
# refer to:
#
# https://github.com/NixOS/nixpkgs/blob/master/doc/languages-frameworks/vim.section.md#updating-plugins-in-nixpkgs-updating-plugins-in-nixpkgs
#
# (or the equivalent file /doc/languages-frameworks/vim.section.md from Nixpkgs master tree).
#
import inspect
import os
import sys
import logging
import subprocess
import textwrap
from typing import List, Tuple
from pathlib import Path
import git
log = logging.getLogger()
sh = logging.StreamHandler()
formatter = logging.Formatter('%(name)s:%(levelname)s: %(message)s')
sh.setFormatter(formatter)
log.addHandler(sh)
# Import plugin update library from maintainers/scripts/pluginupdate.py
ROOT = Path(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))))
# Ideally, ROOT.(parent^5) points to root of Nixpkgs official tree
sys.path.insert(0, os.path.join(ROOT.parent.parent.parent.parent.parent, "maintainers", "scripts"))
import pluginupdate
from pluginupdate import run_nix_expr, PluginDesc
GET_PLUGINS_LUA = """
with import <localpkgs> {};
lib.attrNames lua51Packages"""
HEADER = (
"# GENERATED by ./pkgs/applications/editors/vim/plugins/update.py. Do not edit!"
)
def isNeovimPlugin(plug: pluginupdate.Plugin) -> bool:
'''
Whether it's a neovim-only plugin
We can check if it's available in lua packages
'''
global luaPlugins
if plug.normalized_name in luaPlugins:
log.debug("%s is a neovim plugin", plug)
return True
return False
class VimEditor(pluginupdate.Editor):
nvim_treesitter_updated = False
def generate_nix(self, plugins: List[Tuple[PluginDesc, pluginupdate.Plugin]], outfile: str):
sorted_plugins = sorted(plugins, key=lambda v: v[0].name.lower())
nvim_treesitter_rev = pluginupdate.run_nix_expr("(import <localpkgs> { }).vimPlugins.nvim-treesitter.src.rev")
with open(outfile, "w+") as f:
f.write(HEADER)
f.write(textwrap.dedent("""
{ lib, buildVimPluginFrom2Nix, buildNeovimPlugin, fetchFromGitHub, fetchgit }:
final: prev:
{
"""
))
for pdesc, plugin in sorted_plugins:
content = self.plugin2nix(pdesc, plugin)
f.write(content)
if plugin.name == "nvim-treesitter" and plugin.commit != nvim_treesitter_rev:
self.nvim_treesitter_updated = True
f.write("\n}\n")
print(f"updated {outfile}")
def plugin2nix(self, pdesc: PluginDesc, plugin: pluginupdate.Plugin) -> str:
repo = pdesc.repo
isNeovim = isNeovimPlugin(plugin)
content = f" {plugin.normalized_name} = "
src_nix = repo.as_nix(plugin)
content += """{buildFn} {{
pname = "{plugin.name}";
version = "{plugin.version}";
src = {src_nix};
meta.homepage = "{repo.uri}";
}};
""".format(
buildFn="buildNeovimPlugin" if isNeovim else "buildVimPluginFrom2Nix", plugin=plugin, src_nix=src_nix, repo=repo)
log.debug(content)
return content
def update(self, args):
pluginupdate.update_plugins(self, args)
if self.nvim_treesitter_updated:
print("updating nvim-treesitter grammars")
nvim_treesitter_dir = ROOT.joinpath("nvim-treesitter")
subprocess.check_call([nvim_treesitter_dir.joinpath("update.py")])
if self.nixpkgs_repo:
index = self.nixpkgs_repo.index
for diff in index.diff(None):
if diff.a_path == "pkgs/applications/editors/vim/plugins/nvim-treesitter/generated.nix":
msg = "vimPlugins.nvim-treesitter: update grammars"
print(f"committing to nixpkgs: {msg}")
index.add([str(nvim_treesitter_dir.joinpath("generated.nix"))])
index.commit(msg)
return
print("no updates to nvim-treesitter grammars")
def main():
global luaPlugins
luaPlugins = run_nix_expr(GET_PLUGINS_LUA)
with open(f"{ROOT}/get-plugins.nix") as f:
GET_PLUGINS = f.read()
editor = VimEditor("vim", ROOT, GET_PLUGINS)
editor.run()
if __name__ == "__main__":
main()
| 4,649 | 31.291667 | 142 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/editors/vim/plugins/nvim-treesitter/update.py
|
#!/usr/bin/env nix-shell
#!nix-shell update-shell.nix -i python
import json
import subprocess
from concurrent.futures import ThreadPoolExecutor
from os import environ
from os.path import dirname, join
lockfile = json.load(open(join(environ["NVIM_TREESITTER"], "lockfile.json")))
configs = json.loads(
subprocess.check_output(
[
"nvim",
"--headless",
"-u",
"NONE",
"+lua io.write(vim.json.encode(require('nvim-treesitter.parsers').get_parser_configs()))",
"+quit!",
]
)
)
def generate_grammar(item):
lang, lock = item
cfg = configs.get(lang)
if not cfg:
return ""
info = cfg["install_info"]
url = info["url"]
rev = lock["revision"]
generated = f""" {lang} = buildGrammar {{
language = "{lang}";
version = "0.0.0+rev={rev[:7]}";
src = """
generated += subprocess.check_output(["nurl", url, rev, "--indent=4"], text=True)
generated += ";"
location = info.get("location")
if location:
generated += f"""
location = "{location}";"""
if info.get("requires_generate_from_grammar"):
generated += """
generate = true;"""
generated += f"""
meta.homepage = "{url}";
}};
"""
return generated
generated_file = """# generated by pkgs/applications/editors/vim/plugins/nvim-treesitter/update.py
{ buildGrammar, """
generated_file += subprocess.check_output(["nurl", "-Ls", ", "], text=True)
generated_file += """ }:
{
"""
for generated in ThreadPoolExecutor().map(generate_grammar, lockfile.items()):
generated_file += generated
generated_file += "}\n"
open(join(dirname(__file__), "generated.nix"), "w").write(generated_file)
| 1,738 | 21.294872 | 102 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/virtualization/crosvm/update.py
|
#! /usr/bin/env nix-shell
#! nix-shell -p common-updater-scripts python3
#! nix-shell -i python
import csv
import json
import re
import shlex
import subprocess
from os.path import abspath, dirname, splitext
from urllib.request import urlopen
# CrOS version numbers look like this:
# [<chrome-major-version>.]<tip-build>.<branch-build>.<branch-branch-build>
#
# As far as I can tell, branches are where internal Google
# modifications are added to turn Chromium OS into Chrome OS, and
# branch branches are used for fixes for specific devices. So for
# Chromium OS they will always be 0. This is a best guess, and is not
# documented.
with urlopen('https://chromiumdash.appspot.com/cros/download_serving_builds_csv?deviceCategory=ChromeOS') as resp:
reader = csv.reader(map(bytes.decode, resp))
header = reader.__next__()
cr_stable_index = header.index('cr_stable')
cros_stable_index = header.index('cros_stable')
chrome_version = []
platform_version = []
for line in reader:
if line[cr_stable_index] == "no update":
continue
this_chrome_version = list(map(int, line[cr_stable_index].split('.')))
this_platform_version = list(map(int, line[cros_stable_index].split('.')))
chrome_version = max(chrome_version, this_chrome_version)
platform_version = max(platform_version, this_platform_version)
chrome_major_version = chrome_version[0]
chromeos_tip_build = platform_version[0]
release_branch = f'release-R{chrome_major_version}-{chromeos_tip_build}.B'
# Determine the git revision.
with urlopen(f'https://chromium.googlesource.com/chromiumos/platform/crosvm/+/refs/heads/{release_branch}?format=JSON') as resp:
resp.readline() # Remove )]}' header
rev = json.load(resp)['commit']
# Determine the patch version by counting the commits that have been
# added to the release branch since it forked off the chromeos branch.
with urlopen(f'https://chromium.googlesource.com/chromiumos/platform/crosvm/+log/refs/heads/chromeos..{rev}?format=JSON') as resp:
resp.readline() # Remove )]}' header
branch_commits = json.load(resp)['log']
version = f'{chrome_major_version}.{len(branch_commits)}'
# Update the version, git revision, and hash in crosvm's default.nix.
subprocess.run(['update-source-version', 'crosvm', f'--rev={rev}', version])
| 2,340 | 41.563636 | 130 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/applications/window-managers/sommelier/update.py
|
#! /usr/bin/env nix-shell
#! nix-shell -p common-updater-scripts python3
#! nix-shell -i python
import csv
import json
import re
import shlex
import subprocess
from os.path import abspath, dirname, splitext
from urllib.request import urlopen
# CrOS version numbers look like this:
# [<chrome-major-version>.]<tip-build>.<branch-build>.<branch-branch-build>
#
# As far as I can tell, branches are where internal Google
# modifications are added to turn Chromium OS into Chrome OS, and
# branch branches are used for fixes for specific devices. So for
# Chromium OS they will always be 0. This is a best guess, and is not
# documented.
with urlopen('https://chromiumdash.appspot.com/cros/download_serving_builds_csv?deviceCategory=ChromeOS') as resp:
reader = csv.reader(map(bytes.decode, resp))
header = reader.__next__()
cr_stable_index = header.index('cr_stable')
cros_stable_index = header.index('cros_stable')
chrome_version = []
platform_version = []
for line in reader:
this_chrome_version = list(map(int, line[cr_stable_index].split('.')))
this_platform_version = list(map(int, line[cros_stable_index].split('.')))
chrome_version = max(chrome_version, this_chrome_version)
platform_version = max(platform_version, this_platform_version)
chrome_major_version = chrome_version[0]
chromeos_tip_build = platform_version[0]
release_branch = f'release-R{chrome_major_version}-{chromeos_tip_build}.B'
# Determine the git revision.
with urlopen(f'https://chromium.googlesource.com/chromiumos/platform2/+/refs/heads/{release_branch}?format=JSON') as resp:
resp.readline() # Remove )]}' header
rev = json.load(resp)['commit']
# Determine the patch version by counting the commits that have been
# added to the release branch since it forked off the chromeos branch.
with urlopen(f'https://chromium.googlesource.com/chromiumos/platform2/+log/refs/heads/main..{rev}/vm_tools/sommelier?format=JSON') as resp:
resp.readline() # Remove )]}' header
branch_commits = json.load(resp)['log']
version = f'{chrome_major_version}.{len(branch_commits)}'
# Update the version, git revision, and hash in sommelier's default.nix.
subprocess.run(['update-source-version', 'sommelier', f'--rev={rev}', version])
# Find the path to sommelier's default.nix, so Cargo.lock can be written
# into the same directory.
argv = ['nix-instantiate', '--eval', '--json', '-A', 'sommelier.meta.position']
position = json.loads(subprocess.check_output(argv).decode('utf-8'))
filename = re.match(r'[^:]*', position)[0]
| 2,572 | 42.610169 | 139 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/tools/parsing/tree-sitter/update_impl.py
|
from urllib.parse import quote
import json
import subprocess as sub
import os
import sys
from typing import Iterator, Any, Literal, TypedDict, Optional
from tempfile import NamedTemporaryFile
debug: bool = True if os.environ.get("DEBUG", False) else False
Bin = str
args: dict[str, Any] = json.loads(os.environ["ARGS"])
bins: dict[str, Bin] = args["binaries"]
mode: str = sys.argv[1]
jsonArg: dict = json.loads(sys.argv[2])
Args = Iterator[str]
def log(msg: str) -> None:
print(msg, file=sys.stderr)
def atomically_write(file_path: str, content: bytes) -> None:
"""atomically write the content into `file_path`"""
with NamedTemporaryFile(
# write to the parent dir, so that it’s guaranteed to be on the same filesystem
dir=os.path.dirname(file_path),
delete=False
) as tmp:
try:
tmp.write(content)
os.rename(
src=tmp.name,
dst=file_path
)
except Exception:
os.unlink(tmp.name)
def curl_github_args(token: str | None, url: str) -> Args:
"""Query the github API via curl"""
yield bins["curl"]
if not debug:
yield "--silent"
# follow redirects
yield "--location"
if token:
yield "-H"
yield f"Authorization: token {token}"
yield url
def curl_result(output: bytes) -> Any | Literal["not found"]:
"""Parse the curl result of the github API"""
res: Any = json.loads(output)
match res:
case dict(res):
message: str = res.get("message", "")
if "rate limit" in message:
sys.exit("Rate limited by the Github API")
if "Not Found" in message:
return "not found"
# if the result is another type, we can pass it on
return res
def nix_prefetch_git_args(url: str, version_rev: str) -> Args:
"""Prefetch a git repository"""
yield bins["nix-prefetch-git"]
if not debug:
yield "--quiet"
yield "--no-deepClone"
yield "--url"
yield url
yield "--rev"
yield version_rev
def run_cmd(args: Args) -> bytes:
all = list(args)
if debug:
log(str(all))
return sub.check_output(all)
Dir = str
def fetchRepo() -> None:
"""fetch the given repo and write its nix-prefetch output to the corresponding grammar json file"""
match jsonArg:
case {
"orga": orga,
"repo": repo,
"outputDir": outputDir,
"nixRepoAttrName": nixRepoAttrName,
}:
token: str | None = os.environ.get("GITHUB_TOKEN", None)
out = run_cmd(
curl_github_args(
token,
url=f"https://api.github.com/repos/{quote(orga)}/{quote(repo)}/releases/latest"
)
)
release: str
match curl_result(out):
case "not found":
if "branch" in jsonArg:
branch = jsonArg.get("branch")
release = f"refs/heads/{branch}"
else:
# github sometimes returns an empty list even tough there are releases
log(f"uh-oh, latest for {orga}/{repo} is not there, using HEAD")
release = "HEAD"
case {"tag_name": tag_name}:
release = tag_name
case _:
sys.exit(f"git result for {orga}/{repo} did not have a `tag_name` field")
log(f"Fetching latest release ({release}) of {orga}/{repo} …")
res = run_cmd(
nix_prefetch_git_args(
url=f"https://github.com/{quote(orga)}/{quote(repo)}",
version_rev=release
)
)
atomically_write(
file_path=os.path.join(
outputDir,
f"{nixRepoAttrName}.json"
),
content=res
)
case _:
sys.exit("input json must have `orga` and `repo` keys")
def fetchOrgaLatestRepos(orga: str) -> set[str]:
"""fetch the latest (100) repos from the given github organization"""
token: str | None = os.environ.get("GITHUB_TOKEN", None)
out = run_cmd(
curl_github_args(
token,
url=f"https://api.github.com/orgs/{quote(orga)}/repos?per_page=100"
)
)
match curl_result(out):
case "not found":
sys.exit(f"github organization {orga} not found")
case list(repos):
res: list[str] = []
for repo in repos:
name = repo.get("name")
if name:
res.append(name)
return set(res)
case _:
sys.exit("github result was not a list of repos, but {other}")
def checkTreeSitterRepos(latest_github_repos: set[str]) -> None:
"""Make sure we know about all tree sitter repos on the tree sitter orga."""
known: set[str] = set(args["knownTreeSitterOrgGrammarRepos"])
ignored: set[str] = set(args["ignoredTreeSitterOrgRepos"])
unknown = latest_github_repos - (known | ignored)
if unknown:
sys.exit(f"These repositories are neither known nor ignored:\n{unknown}")
Grammar = TypedDict(
"Grammar",
{
"nixRepoAttrName": str,
"orga": str,
"repo": str,
"branch": Optional[str]
}
)
def printAllGrammarsNixFile() -> None:
"""Print a .nix file that imports all grammars."""
allGrammars: list[dict[str, Grammar]] = jsonArg["allGrammars"]
outputDir: Dir = jsonArg["outputDir"]
def file() -> Iterator[str]:
yield "{ lib }:"
yield "{"
for grammar in allGrammars:
n = grammar["nixRepoAttrName"]
yield f" {n} = lib.importJSON ./{n}.json;"
yield "}"
yield ""
atomically_write(
file_path=os.path.join(
outputDir,
"default.nix"
),
content="\n".join(file()).encode()
)
def fetchAndCheckTreeSitterRepos() -> None:
log("fetching list of grammars")
latest_repos = fetchOrgaLatestRepos(orga="tree-sitter")
log("checking the tree-sitter repo list against the grammars we know")
checkTreeSitterRepos(latest_repos)
match mode:
case "fetch-repo":
fetchRepo()
case "fetch-and-check-tree-sitter-repos":
fetchAndCheckTreeSitterRepos()
case "print-all-grammars-nix-file":
printAllGrammarsNixFile()
case _:
sys.exit(f"mode {mode} unknown")
| 6,610 | 28.64574 | 103 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/tools/build-managers/bazel/update-srcDeps.py
|
#!/usr/bin/env python3
import sys
import json
if len(sys.argv) == 1:
print("usage: ./this-script WORKSPACE", file=sys.stderr)
print("Takes the bazel WORKSPACE file and reads all archives into a json dict (by evaling it as python code)", file=sys.stderr)
print("Hail Eris.", file=sys.stderr)
sys.exit(1)
http_archives = []
# just the kw args are the dict { name, sha256, urls … }
def http_archive(**kw):
http_archives.append(kw)
# like http_file
def http_file(**kw):
http_archives.append(kw)
# this is inverted from http_archive/http_file and bundles multiple archives
def distdir_tar(**kw):
for archive_name in kw['archives']:
http_archives.append({
"name": archive_name,
"sha256": kw['sha256'][archive_name],
"urls": kw['urls'][archive_name]
})
# stubs for symbols we are not interested in
# might need to be expanded if new bazel releases add symbols to the workspace
def workspace(name): pass
def load(*args): pass
def bind(**kw): pass
def list_source_repository(**kw): pass
def new_local_repository(**kw): pass
def local_repository(**kw): pass
DOC_VERSIONS = []
def stardoc_repositories(**kw): pass
def skydoc_repositories(**kw): pass
def rules_sass_dependencies(**kw): pass
def node_repositories(**kw): pass
def sass_repositories(**kw): pass
def register_execution_platforms(*args): pass
def rbe_autoconfig(*args, **kw): pass
def rules_pkg_dependencies(*args, **kw): pass
def winsdk_configure(*args, **kw): pass
def register_local_rc_exe_toolchains(*args, **kw): pass
def register_toolchains(*args, **kw): pass
def debian_deps(): pass
def grpc_deps(): pass
def grpc_extra_deps(): pass
def bazel_skylib_workspace(): pass
# execute the WORKSPACE like it was python code in this module,
# using all the function stubs from above.
with open(sys.argv[1]) as f:
exec(f.read())
# transform to a dict with the names as keys
d = { el['name']: el for el in http_archives }
print(json.dumps(d, sort_keys=True, indent=4))
| 2,007 | 30.873016 | 131 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/tools/build-managers/bazel/bazel_5/update-srcDeps.py
|
#!/usr/bin/env python3
import sys
import json
if len(sys.argv) != 2:
print("usage: ./this-script src-deps.json < WORKSPACE", file=sys.stderr)
print("Takes the bazel WORKSPACE file and reads all archives into a json dict (by evaling it as python code)", file=sys.stderr)
print("Hail Eris.", file=sys.stderr)
sys.exit(1)
http_archives = []
# just the kw args are the dict { name, sha256, urls … }
def http_archive(**kw):
http_archives.append(kw)
# like http_file
def http_file(**kw):
http_archives.append(kw)
# this is inverted from http_archive/http_file and bundles multiple archives
def _distdir_tar(**kw):
for archive_name in kw['archives']:
http_archives.append({
"name": archive_name,
"sha256": kw['sha256'][archive_name],
"urls": kw['urls'][archive_name]
})
# TODO?
def git_repository(**kw):
print(json.dumps(kw, sort_keys=True, indent=4), file=sys.stderr)
sys.exit(1)
# execute the WORKSPACE like it was python code in this module,
# using all the function stubs from above.
exec(sys.stdin.read())
# transform to a dict with the names as keys
d = { el['name']: el for el in http_archives }
def has_urls(el):
return ('url' in el and el['url']) or ('urls' in el and el['urls'])
def has_sha256(el):
return 'sha256' in el and el['sha256']
bad_archives = list(filter(lambda el: not has_urls(el) or not has_sha256(el), d.values()))
if bad_archives:
print('Following bazel dependencies are missing url or sha256', file=sys.stderr)
print('Check bazel sources for master or non-checksummed dependencies', file=sys.stderr)
for el in bad_archives:
print(json.dumps(el, sort_keys=True, indent=4), file=sys.stderr)
sys.exit(1)
with open(sys.argv[1], "w") as f:
print(json.dumps(d, sort_keys=True, indent=4), file=f)
| 1,842 | 32.509091 | 131 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/tools/build-managers/bazel/bazel_4/update-srcDeps.py
|
#!/usr/bin/env python3
import sys
import json
if len(sys.argv) != 2:
print("usage: ./this-script src-deps.json < WORKSPACE", file=sys.stderr)
print("Takes the bazel WORKSPACE file and reads all archives into a json dict (by evaling it as python code)", file=sys.stderr)
print("Hail Eris.", file=sys.stderr)
sys.exit(1)
http_archives = []
# just the kw args are the dict { name, sha256, urls … }
def http_archive(**kw):
http_archives.append(kw)
# like http_file
def http_file(**kw):
http_archives.append(kw)
# this is inverted from http_archive/http_file and bundles multiple archives
def distdir_tar(**kw):
for archive_name in kw['archives']:
http_archives.append({
"name": archive_name,
"sha256": kw['sha256'][archive_name],
"urls": kw['urls'][archive_name]
})
# TODO?
def git_repository(**kw):
print(json.dumps(kw, sort_keys=True, indent=4), file=sys.stderr)
sys.exit(1)
# execute the WORKSPACE like it was python code in this module,
# using all the function stubs from above.
exec(sys.stdin.read())
# transform to a dict with the names as keys
d = { el['name']: el for el in http_archives }
def has_urls(el):
return ('url' in el and el['url']) or ('urls' in el and el['urls'])
def has_sha256(el):
return 'sha256' in el and el['sha256']
bad_archives = list(filter(lambda el: not has_urls(el) or not has_sha256(el), d.values()))
if bad_archives:
print('Following bazel dependencies are missing url or sha256', file=sys.stderr)
print('Check bazel sources for master or non-checksummed dependencies', file=sys.stderr)
for el in bad_archives:
print(json.dumps(el, sort_keys=True, indent=4), file=sys.stderr)
sys.exit(1)
with open(sys.argv[1], "w") as f:
print(json.dumps(d, sort_keys=True, indent=4), file=f)
| 1,841 | 32.490909 | 131 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/tools/build-managers/bazel/bazel_6/update-srcDeps.py
|
#!/usr/bin/env python3
import sys
import json
if len(sys.argv) != 2:
print("usage: ./this-script src-deps.json < WORKSPACE", file=sys.stderr)
print("Takes the bazel WORKSPACE file and reads all archives into a json dict (by evaling it as python code)", file=sys.stderr)
print("Hail Eris.", file=sys.stderr)
sys.exit(1)
http_archives = []
# just the kw args are the dict { name, sha256, urls … }
def http_archive(**kw):
http_archives.append(kw)
# like http_file
def http_file(**kw):
http_archives.append(kw)
# this is inverted from http_archive/http_file and bundles multiple archives
def _distdir_tar(**kw):
for archive_name in kw['archives']:
http_archives.append({
"name": archive_name,
"sha256": kw['sha256'][archive_name],
"urls": kw['urls'][archive_name]
})
# TODO?
def git_repository(**kw):
print(json.dumps(kw, sort_keys=True, indent=4), file=sys.stderr)
sys.exit(1)
# execute the WORKSPACE like it was python code in this module,
# using all the function stubs from above.
exec(sys.stdin.read())
# transform to a dict with the names as keys
d = { el['name']: el for el in http_archives }
def has_urls(el):
return ('url' in el and el['url']) or ('urls' in el and el['urls'])
def has_sha256(el):
return 'sha256' in el and el['sha256']
bad_archives = list(filter(lambda el: not has_urls(el) or not has_sha256(el), d.values()))
if bad_archives:
print('Following bazel dependencies are missing url or sha256', file=sys.stderr)
print('Check bazel sources for master or non-checksummed dependencies', file=sys.stderr)
for el in bad_archives:
print(json.dumps(el, sort_keys=True, indent=4), file=sys.stderr)
sys.exit(1)
with open(sys.argv[1], "w") as f:
print(json.dumps(d, sort_keys=True, indent=4), file=f)
| 1,842 | 32.509091 | 131 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/tools/poetry2nix/poetry2nix/fetch_from_legacy.py
|
# Some repositories (such as Devpi) expose the Pypi legacy API
# (https://warehouse.pypa.io/api-reference/legacy.html).
#
# Note it is not possible to use pip
# https://discuss.python.org/t/pip-download-just-the-source-packages-no-building-no-metadata-etc/4651/12
import os
import sys
import netrc
from urllib.parse import urlparse, urlunparse
from html.parser import HTMLParser
import urllib.request
import shutil
import ssl
from os.path import normpath
# Parse the legacy index page to extract the href and package names
class Pep503(HTMLParser):
def __init__(self):
super().__init__()
self.sources = {}
self.url = None
self.name = None
def handle_data(self, data):
if self.url is not None:
self.name = data
def handle_starttag(self, tag, attrs):
if tag == "a":
for name, value in attrs:
if name == "href":
self.url = value
def handle_endtag(self, tag):
if self.url is not None:
self.sources[self.name] = self.url
self.url = None
url = sys.argv[1]
package_name = sys.argv[2]
index_url = url + "/" + package_name + "/"
package_filename = sys.argv[3]
# Parse username and password for this host from the netrc file if given.
username, password = None, None
if os.environ["NETRC"]:
netrc_obj = netrc.netrc(os.environ["NETRC"])
host = urlparse(index_url).netloc
# Strip port number if present
if ":" in host:
host = host.split(":")[0]
username, _, password = netrc_obj.authenticators(host)
print("Reading index %s" % index_url)
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# Extract out username/password from index_url, if present.
parsed_url = urlparse(index_url)
username = parsed_url.username or username
password = parsed_url.password or password
index_url = parsed_url._replace(netloc=parsed_url.netloc.rpartition("@")[-1]).geturl()
req = urllib.request.Request(index_url)
if username and password:
import base64
password_b64 = base64.b64encode(":".join((username, password)).encode()).decode(
"utf-8"
)
req.add_header("Authorization", "Basic {}".format(password_b64))
response = urllib.request.urlopen(req, context=context)
index = response.read()
parser = Pep503()
parser.feed(str(index, "utf-8"))
if package_filename not in parser.sources:
print(
"The file %s has not be found in the index %s" % (package_filename, index_url)
)
exit(1)
package_file = open(package_filename, "wb")
# Sometimes the href is a relative or absolute path within the index's domain.
indicated_url = urlparse(parser.sources[package_filename])
if indicated_url.netloc == "":
parsed_url = urlparse(index_url)
if indicated_url.path.startswith("/"):
# An absolute path within the index's domain.
path = parser.sources[package_filename]
else:
# A relative path.
path = parsed_url.path + "/" + parser.sources[package_filename]
package_url = urlunparse(
(
parsed_url.scheme,
parsed_url.netloc,
path,
None,
None,
None,
)
)
else:
package_url = parser.sources[package_filename]
# Handle urls containing "../"
parsed_url = urlparse(package_url)
real_package_url = urlunparse(
(
parsed_url.scheme,
parsed_url.netloc,
normpath(parsed_url.path),
parsed_url.params,
parsed_url.query,
parsed_url.fragment,
)
)
print("Downloading %s" % real_package_url)
req = urllib.request.Request(real_package_url)
if username and password:
req.add_unredirected_header("Authorization", "Basic {}".format(password_b64))
response = urllib.request.urlopen(req, context=context)
with response as r:
shutil.copyfileobj(r, package_file)
| 3,896 | 27.866667 | 104 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/tools/poetry2nix/poetry2nix/hooks/python-requires-patch-hook.py
|
#!/usr/bin/env python
import ast
import sys
import io
# Python2 compat
if sys.version_info[0] < 3:
FileNotFoundError = IOError
# Python <= 3.8 compat
def astunparse(tree):
# Use bundled unparse by default
if hasattr(ast, "unparse"):
return ast.unparse(tree)
# Use example tool from Python sources for older interpreter versions
from poetry2nix_astunparse import Unparser
buf = io.StringIO()
up = Unparser(tree, buf)
return buf.getvalue()
class Rewriter(ast.NodeVisitor):
def __init__(self, *args, **kwargs):
super(Rewriter, self).__init__(*args, **kwargs)
self.modified = False
def visit_Call(self, node):
function_name = ""
if isinstance(node.func, ast.Name):
function_name = node.func.id
elif isinstance(node.func, ast.Attribute):
function_name = node.func.attr
else:
return
if function_name != "setup":
return
for kw in node.keywords:
if kw.arg != "python_requires":
continue
value = kw.value
if not isinstance(value, ast.Constant):
return
# Rewrite version constraints without wildcard characters.
#
# Only rewrite the file if the modified value actually differs, as we lose whitespace and comments when rewriting
# with the AST module.
python_requires = ", ".join(
[v.strip().rstrip(".*") for v in value.value.split(",")]
)
if value.value != python_requires:
value.value = python_requires
self.modified = True
if __name__ == "__main__":
sys.path.extend(sys.argv[1:])
try:
with open("setup.py", encoding="utf-8-sig") as f:
tree = ast.parse(f.read())
except FileNotFoundError:
exit(0)
r = Rewriter()
r.visit(tree)
if r.modified:
with open("setup.py", "w") as f:
f.write(astunparse(tree))
| 2,043 | 24.55 | 125 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/tools/poetry2nix/poetry2nix/hooks/pyproject-without-special-deps.py
|
#!/usr/bin/env python
# Patch out special dependencies (git and path) from a pyproject.toml file
import argparse
import sys
import tomlkit
def main(input, output, fields_to_remove):
data = tomlkit.loads(input.read())
try:
deps = data["tool"]["poetry"]["dependencies"]
except KeyError:
pass
else:
for dep in deps.values():
if isinstance(dep, dict):
any_removed = False
for field in fields_to_remove:
any_removed |= dep.pop(field, None) is not None
if any_removed:
dep["version"] = "*"
dep.pop("develop", None)
output.write(tomlkit.dumps(data))
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument(
"-i",
"--input",
type=argparse.FileType("r"),
default=sys.stdin,
help="Location from which to read input TOML",
)
p.add_argument(
"-o",
"--output",
type=argparse.FileType("w"),
default=sys.stdout,
help="Location to write output TOML",
)
p.add_argument(
"-f",
"--fields-to-remove",
nargs="+",
help="The fields to remove from the dependency's TOML",
)
args = p.parse_args()
main(args.input, args.output, args.fields_to_remove)
| 1,362 | 23.781818 | 74 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/tools/poetry2nix/poetry2nix/overrides/shapely-rewrite.py
|
"""
Rewrite libc/library path references to Nix store paths
Nixpkgs uses a normal patch for this but we need to be less
sensitive to changes between versions.
"""
from textwrap import dedent
import sys
import ast
import os
with open(sys.argv[1]) as f:
mod = ast.parse(f.read(), "geos.py")
class LibTransformer(ast.NodeTransformer):
_lgeos_replaced = False
def visit_If(self, node):
if ast.unparse(node).startswith("if sys.platform.startswith('linux')"):
return ast.parse(
dedent(
"""
free = CDLL(%s).free
free.argtypes = [c_void_p]
free.restype = None
"""
)
% (lambda x: "'" + x + "'" if x else None)(os.environ.get("GEOS_LIBC"))
)
return node
def visit_Assign(self, node):
_target = node.targets[0]
if (
not self._lgeos_replaced
and isinstance(_target, ast.Name)
and _target.id == "_lgeos"
):
self._lgeos_replaced = True
return ast.parse("_lgeos = CDLL('%s')" % os.environ["GEOS_LIBRARY_PATH"])
return node
with open(sys.argv[1], "w") as f:
f.write(ast.unparse(LibTransformer().visit(mod)))
| 1,268 | 26 | 87 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/python-modules/recursive-pth-loader/sitecustomize.py
|
"""Recursively load pth files in site-packages of sys.path
- iterate over sys.path
- check for pth in dirs that end in site-packages
- ignore import statements in pth files
- add dirs listed in pth files right after current sys.path element,
they will be processed in next iteration
"""
import os
import site
import sys
for path_idx, sitedir in enumerate(sys.path):
# ignore non-site-packages
if not sitedir.endswith('site-packages'):
continue
# find pth files
try:
names = os.listdir(sitedir)
except os.error:
continue
dotpth = os.extsep + "pth"
pths = [name for name in names if name.endswith(dotpth)]
for pth in pths:
fullname = os.path.join(sitedir, pth)
try:
f = open(fullname, "rU")
except IOError:
continue
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
if line.startswith(("import ", "import\t")):
continue
line = line.rstrip()
dir, dircase = site.makepath(sitedir, line)
if not dircase in sys.path:
sys.path.insert(path_idx+1, dir)
| 1,240 | 25.404255 | 68 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/python-modules/spacy/annotation-test/annotate.py
|
import pytest
import spacy
en_text = (
"When Sebastian Thrun started working on self-driving cars at "
"Google in 2007, few people outside of the company took him "
"seriously. “I can tell you very senior CEOs of major American "
"car companies would shake my hand and turn away because I wasn’t "
"worth talking to,” said Thrun, in an interview with Recode earlier "
"this week.")
@pytest.fixture
def en_core_web_sm():
return spacy.load("en_core_web_sm")
@pytest.fixture
def doc_en_core_web_sm(en_core_web_sm):
return en_core_web_sm(en_text)
def test_entities(doc_en_core_web_sm):
entities = list(map(lambda e: (e.text, e.label_),
doc_en_core_web_sm.ents))
assert entities == [
('Sebastian Thrun', 'PERSON'),
('Google', 'ORG'),
('2007', 'DATE'),
('American', 'NORP'),
('Thrun', 'PERSON'),
('Recode', 'ORG'),
('earlier this week', 'DATE'),
]
def test_nouns(doc_en_core_web_sm):
assert [
chunk.text for chunk in doc_en_core_web_sm.noun_chunks] == [
'Sebastian Thrun',
'self-driving cars',
'Google',
'few people',
'the company',
'him',
'I',
'you',
'very senior CEOs',
'major American car companies',
'my hand',
'I',
'Thrun',
'an interview',
'Recode']
def test_verbs(doc_en_core_web_sm):
assert [
token.lemma_ for token in doc_en_core_web_sm if token.pos_ == "VERB"] == [
'start',
'work',
'drive',
'take',
'tell',
'shake',
'turn',
'talk',
'say']
| 1,695 | 23.228571 | 82 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/python-modules/waitress-django/setup.py
|
#!/usr/bin/env python
from distutils.core import setup
setup( name = "waitress-django"
, version = "1.0.0"
, description = "A waitress WSGI server serving django"
, author = "Bas van Dijk"
, author_email = "[email protected]"
, package_dir = {"" : "src"}
, scripts = ["src/waitress-serve-django"]
)
| 365 | 27.153846 | 61 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/python-modules/spacy-transformers/annotation-test/annotate.py
|
import pytest
import spacy
en_text = (
"When Sebastian Thrun started working on self-driving cars at "
"Google in 2007, few people outside of the company took him "
"seriously. “I can tell you very senior CEOs of major American "
"car companies would shake my hand and turn away because I wasn’t "
"worth talking to,” said Thrun, in an interview with Recode earlier "
"this week.")
@pytest.fixture
def en_core_web_trf():
return spacy.load("en_core_web_trf")
@pytest.fixture
def doc_en_core_web_trf(en_core_web_trf):
return en_core_web_trf(en_text)
def test_entities(doc_en_core_web_trf):
entities = list(map(lambda e: (e.text, e.label_),
doc_en_core_web_trf.ents))
assert entities == [
('Sebastian Thrun', 'PERSON'),
('Google', 'ORG'),
('2007', 'DATE'),
('American', 'NORP'),
('Thrun', 'PERSON'),
('Recode', 'ORG'),
('earlier this week', 'DATE'),
]
def test_nouns(doc_en_core_web_trf):
assert [
chunk.text for chunk in doc_en_core_web_trf.noun_chunks] == [
'Sebastian Thrun',
'self-driving cars',
'Google',
'few people',
'the company',
'him',
'I',
'you',
'very senior CEOs',
'major American car companies',
'my hand',
'I',
'Thrun',
'an interview',
'Recode']
def test_verbs(doc_en_core_web_trf):
assert [
token.lemma_ for token in doc_en_core_web_trf if token.pos_ == "VERB"] == [
'start',
'work',
'drive',
'take',
'tell',
'shake',
'turn',
'talk',
'say']
| 1,706 | 23.385714 | 83 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/python-modules/bpycv/bpycv-test.py
|
# based on https://github.com/DIYer22/bpycv/blob/c576e01622d87eb3534f73bf1a5686bd2463de97/example/ycb_demo.py
import bpy
import bpycv
import os
import glob
import random
from pathlib import Path
example_data_dir = os.environ['BPY_EXAMPLE_DATA']
out_dir = Path(os.environ['out'])
out_dir.mkdir(parents=True, exist_ok=True)
models = sorted(glob.glob(os.path.join(example_data_dir, "model", "*", "*.obj")))
cat_id_to_model_path = dict(enumerate(sorted(models), 1))
distractors = sorted(glob.glob(os.path.join(example_data_dir, "distractor", "*.obj")))
bpycv.clear_all()
bpy.context.scene.frame_set(1)
bpy.context.scene.render.engine = "CYCLES"
bpy.context.scene.cycles.samples = 32
bpy.context.scene.render.resolution_y = 1024
bpy.context.scene.render.resolution_x = 1024
bpy.context.view_layer.cycles.denoising_store_passes = False
# A transparency stage for holding rigid body
stage = bpycv.add_stage(transparency=True)
bpycv.set_cam_pose(cam_radius=1, cam_deg=45)
hdri_dir = os.path.join(example_data_dir, "background_and_light")
hdri_manager = bpycv.HdriManager(
hdri_dir=hdri_dir, download=False
) # if download is True, will auto download .hdr file from HDRI Haven
hdri_path = hdri_manager.sample()
bpycv.load_hdri_world(hdri_path, random_rotate_z=True)
# load 5 objects
for index in range(5):
cat_id = random.choice(list(cat_id_to_model_path))
model_path = cat_id_to_model_path[cat_id]
obj = bpycv.load_obj(model_path)
obj.location = (
random.uniform(-0.2, 0.2),
random.uniform(-0.2, 0.2),
random.uniform(0.1, 0.3),
)
obj.rotation_euler = [random.uniform(-3.1415, 3.1415) for _ in range(3)]
# set each instance a unique inst_id, which is used to generate instance annotation.
obj["inst_id"] = cat_id * 1000 + index
with bpycv.activate_obj(obj):
bpy.ops.rigidbody.object_add()
# load 6 distractors
for index in range(6):
distractor_path = random.choice(distractors)
target_size = random.uniform(0.1, 0.3)
distractor = bpycv.load_distractor(distractor_path, target_size=target_size)
distractor.location = (
random.uniform(-0.2, 0.2),
random.uniform(-0.2, 0.2),
random.uniform(0.1, 0.3),
)
distractor.rotation_euler = [random.uniform(-3.1415, 3.1415) for _ in range(3)]
with bpycv.activate_obj(distractor):
bpy.ops.rigidbody.object_add()
# run pyhsic engine for 20 frames
for i in range(20):
bpy.context.scene.frame_set(bpy.context.scene.frame_current + 1)
# render image, instance annoatation and depth in one line code
result = bpycv.render_data()
result.save(dataset_dir=str(out_dir.resolve()), fname="0", save_blend=True)
print(f'Save to "{out_dir}"')
print(f'Open "{out_dir}/vis/" to see visualize result.')
| 2,766 | 33.160494 | 109 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/interpreters/python/run_setup.py
|
# -*- coding: utf-8 -*-
import setuptools
import tokenize
__file__='setup.py';
exec(compile(getattr(tokenize, 'open', open)(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
| 189 | 20.111111 | 106 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/interpreters/python/sitecustomize.py
|
"""
This is a Nix-specific module for discovering modules built with Nix.
The module recursively adds paths that are on `NIX_PYTHONPATH` to `sys.path`. In
order to process possible `.pth` files `site.addsitedir` is used.
The paths listed in `PYTHONPATH` are added to `sys.path` afterwards, but they
will be added before the entries we add here and thus take precedence.
Note the `NIX_PYTHONPATH` environment variable is unset in order to prevent leakage.
Similarly, this module listens to the environment variable `NIX_PYTHONEXECUTABLE`
and sets `sys.executable` to its value.
"""
import site
import sys
import os
import functools
paths = os.environ.pop('NIX_PYTHONPATH', None)
if paths:
functools.reduce(lambda k, p: site.addsitedir(p, k), paths.split(':'), site._init_pathinfo())
# Check whether we are in a venv or virtualenv.
# For Python 3 we check whether our `base_prefix` is different from our current `prefix`.
# For Python 2 we check whether the non-standard `real_prefix` is set.
# https://stackoverflow.com/questions/1871549/determine-if-python-is-running-inside-virtualenv
in_venv = (sys.version_info.major == 3 and sys.prefix != sys.base_prefix) or (sys.version_info.major == 2 and hasattr(sys, "real_prefix"))
if not in_venv:
executable = os.environ.pop('NIX_PYTHONEXECUTABLE', None)
prefix = os.environ.pop('NIX_PYTHONPREFIX', None)
if 'PYTHONEXECUTABLE' not in os.environ and executable is not None:
sys.executable = executable
if prefix is not None:
# Sysconfig does not like it when sys.prefix is set to None
sys.prefix = sys.exec_prefix = prefix
site.PREFIXES.insert(0, prefix)
| 1,659 | 40.5 | 138 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/interpreters/python/update-python-libraries/update-python-libraries.py
|
#!/usr/bin/env python3
"""
Update a Python package expression by passing in the `.nix` file, or the directory containing it.
You can pass in multiple files or paths.
You'll likely want to use
``
$ ./update-python-libraries ../../pkgs/development/python-modules/**/default.nix
``
to update all non-pinned libraries in that folder.
"""
import argparse
import json
import logging
import os
import re
import requests
from concurrent.futures import ThreadPoolExecutor as Pool
from packaging.version import Version as _Version
from packaging.version import InvalidVersion
from packaging.specifiers import SpecifierSet
from typing import Optional, Any
import collections
import subprocess
INDEX = "https://pypi.io/pypi"
"""url of PyPI"""
EXTENSIONS = ['tar.gz', 'tar.bz2', 'tar', 'zip', '.whl']
"""Permitted file extensions. These are evaluated from left to right and the first occurance is returned."""
PRERELEASES = False
BULK_UPDATE = False
GIT = "git"
NIXPKGS_ROOT = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).decode('utf-8').strip()
logging.basicConfig(level=logging.INFO)
class Version(_Version, collections.abc.Sequence):
def __init__(self, version):
super().__init__(version)
# We cannot use `str(Version(0.04.21))` because that becomes `0.4.21`
# https://github.com/avian2/unidecode/issues/13#issuecomment-354538882
self.raw_version = version
def __getitem__(self, i):
return self._version.release[i]
def __len__(self):
return len(self._version.release)
def __iter__(self):
yield from self._version.release
def _get_values(attribute, text):
"""Match attribute in text and return all matches.
:returns: List of matches.
"""
regex = fr'{re.escape(attribute)}\s+=\s+"(.*)";'
regex = re.compile(regex)
values = regex.findall(text)
return values
def _get_attr_value(attr_path: str) -> Optional[Any]:
try:
response = subprocess.check_output([
"nix",
"--extra-experimental-features", "nix-command",
"eval",
"-f", f"{NIXPKGS_ROOT}/default.nix",
"--json",
f"{attr_path}"
])
return json.loads(response.decode())
except (subprocess.CalledProcessError, ValueError):
return None
def _get_unique_value(attribute, text):
"""Match attribute in text and return unique match.
:returns: Single match.
"""
values = _get_values(attribute, text)
n = len(values)
if n > 1:
raise ValueError("found too many values for {}".format(attribute))
elif n == 1:
return values[0]
else:
raise ValueError("no value found for {}".format(attribute))
def _get_line_and_value(attribute, text, value=None):
"""Match attribute in text. Return the line and the value of the attribute."""
if value is None:
regex = rf'({re.escape(attribute)}\s+=\s+\"(.*)\";)'
else:
regex = rf'({re.escape(attribute)}\s+=\s+\"({re.escape(value)})\";)'
regex = re.compile(regex)
results = regex.findall(text)
n = len(results)
if n > 1:
raise ValueError("found too many values for {}".format(attribute))
elif n == 1:
return results[0]
else:
raise ValueError("no value found for {}".format(attribute))
def _replace_value(attribute, value, text, oldvalue=None):
"""Search and replace value of attribute in text."""
if oldvalue is None:
old_line, old_value = _get_line_and_value(attribute, text)
else:
old_line, old_value = _get_line_and_value(attribute, text, oldvalue)
new_line = old_line.replace(old_value, value)
new_text = text.replace(old_line, new_line)
return new_text
def _fetch_page(url):
r = requests.get(url)
if r.status_code == requests.codes.ok:
return r.json()
else:
raise ValueError("request for {} failed".format(url))
def _fetch_github(url):
headers = {}
token = os.environ.get('GITHUB_API_TOKEN')
if token:
headers["Authorization"] = f"token {token}"
r = requests.get(url, headers=headers)
if r.status_code == requests.codes.ok:
return r.json()
else:
raise ValueError("request for {} failed".format(url))
def _hash_to_sri(algorithm, value):
"""Convert a hash to its SRI representation"""
return subprocess.check_output([
"nix",
"hash",
"to-sri",
"--type", algorithm,
value
]).decode().strip()
def _skip_bulk_update(attr_name: str) -> bool:
return bool(_get_attr_value(
f"{attr_name}.skipBulkUpdate"
))
SEMVER = {
'major' : 0,
'minor' : 1,
'patch' : 2,
}
def _determine_latest_version(current_version, target, versions):
"""Determine latest version, given `target`.
"""
current_version = Version(current_version)
def _parse_versions(versions):
for v in versions:
try:
yield Version(v)
except InvalidVersion:
pass
versions = _parse_versions(versions)
index = SEMVER[target]
ceiling = list(current_version[0:index])
if len(ceiling) == 0:
ceiling = None
else:
ceiling[-1]+=1
ceiling = Version(".".join(map(str, ceiling)))
# We do not want prereleases
versions = SpecifierSet(prereleases=PRERELEASES).filter(versions)
if ceiling is not None:
versions = SpecifierSet(f"<{ceiling}").filter(versions)
return (max(sorted(versions))).raw_version
def _get_latest_version_pypi(package, extension, current_version, target):
"""Get latest version and hash from PyPI."""
url = "{}/{}/json".format(INDEX, package)
json = _fetch_page(url)
versions = json['releases'].keys()
version = _determine_latest_version(current_version, target, versions)
try:
releases = json['releases'][version]
except KeyError as e:
raise KeyError('Could not find version {} for {}'.format(version, package)) from e
for release in releases:
if release['filename'].endswith(extension):
# TODO: In case of wheel we need to do further checks!
sha256 = release['digests']['sha256']
break
else:
sha256 = None
return version, sha256, None
def _get_latest_version_github(package, extension, current_version, target):
def strip_prefix(tag):
return re.sub("^[^0-9]*", "", tag)
def get_prefix(string):
matches = re.findall(r"^([^0-9]*)", string)
return next(iter(matches), "")
# when invoked as an updateScript, UPDATE_NIX_ATTR_PATH will be set
# this allows us to work with packages which live outside of python-modules
attr_path = os.environ.get("UPDATE_NIX_ATTR_PATH", f"python3Packages.{package}")
try:
homepage = subprocess.check_output(
["nix", "eval", "-f", f"{NIXPKGS_ROOT}/default.nix", "--raw", f"{attr_path}.src.meta.homepage"])\
.decode('utf-8')
except Exception as e:
raise ValueError(f"Unable to determine homepage: {e}")
owner_repo = homepage[len("https://github.com/"):] # remove prefix
owner, repo = owner_repo.split("/")
url = f"https://api.github.com/repos/{owner}/{repo}/releases"
all_releases = _fetch_github(url)
releases = list(filter(lambda x: not x['prerelease'], all_releases))
if len(releases) == 0:
raise ValueError(f"{homepage} does not contain any stable releases")
versions = map(lambda x: strip_prefix(x['tag_name']), releases)
version = _determine_latest_version(current_version, target, versions)
release = next(filter(lambda x: strip_prefix(x['tag_name']) == version, releases))
prefix = get_prefix(release['tag_name'])
# some attributes require using the fetchgit
git_fetcher_args = []
if (_get_attr_value(f"{attr_path}.src.fetchSubmodules")):
git_fetcher_args.append("--fetch-submodules")
if (_get_attr_value(f"{attr_path}.src.fetchLFS")):
git_fetcher_args.append("--fetch-lfs")
if (_get_attr_value(f"{attr_path}.src.leaveDotGit")):
git_fetcher_args.append("--leave-dotGit")
if git_fetcher_args:
algorithm = "sha256"
cmd = [
"nix-prefetch-git",
f"https://github.com/{owner}/{repo}.git",
"--hash", algorithm,
"--rev", f"refs/tags/{release['tag_name']}"
]
cmd.extend(git_fetcher_args)
response = subprocess.check_output(cmd)
document = json.loads(response.decode())
hash = _hash_to_sri(algorithm, document[algorithm])
else:
try:
hash = subprocess.check_output([
"nix-prefetch-url",
"--type", "sha256",
"--unpack",
f"{release['tarball_url']}"
], stderr=subprocess.DEVNULL).decode('utf-8').strip()
except (subprocess.CalledProcessError, UnicodeError):
# this may fail if they have both a branch and a tag of the same name, attempt tag name
tag_url = str(release['tarball_url']).replace("tarball","tarball/refs/tags")
hash = subprocess.check_output([
"nix-prefetch-url",
"--type", "sha256",
"--unpack",
tag_url
], stderr=subprocess.DEVNULL).decode('utf-8').strip()
return version, hash, prefix
FETCHERS = {
'fetchFromGitHub' : _get_latest_version_github,
'fetchPypi' : _get_latest_version_pypi,
'fetchurl' : _get_latest_version_pypi,
}
DEFAULT_SETUPTOOLS_EXTENSION = 'tar.gz'
FORMATS = {
'setuptools' : DEFAULT_SETUPTOOLS_EXTENSION,
'wheel' : 'whl',
'pyproject' : 'tar.gz',
'flit' : 'tar.gz'
}
def _determine_fetcher(text):
# Count occurrences of fetchers.
nfetchers = sum(text.count('src = {}'.format(fetcher)) for fetcher in FETCHERS.keys())
if nfetchers == 0:
raise ValueError("no fetcher.")
elif nfetchers > 1:
raise ValueError("multiple fetchers.")
else:
# Then we check which fetcher to use.
for fetcher in FETCHERS.keys():
if 'src = {}'.format(fetcher) in text:
return fetcher
def _determine_extension(text, fetcher):
"""Determine what extension is used in the expression.
If we use:
- fetchPypi, we check if format is specified.
- fetchurl, we determine the extension from the url.
- fetchFromGitHub we simply use `.tar.gz`.
"""
if fetcher == 'fetchPypi':
try:
src_format = _get_unique_value('format', text)
except ValueError:
src_format = None # format was not given
try:
extension = _get_unique_value('extension', text)
except ValueError:
extension = None # extension was not given
if extension is None:
if src_format is None:
src_format = 'setuptools'
elif src_format == 'other':
raise ValueError("Don't know how to update a format='other' package.")
extension = FORMATS[src_format]
elif fetcher == 'fetchurl':
url = _get_unique_value('url', text)
extension = os.path.splitext(url)[1]
if 'pypi' not in url:
raise ValueError('url does not point to PyPI.')
elif fetcher == 'fetchFromGitHub':
extension = "tar.gz"
return extension
def _update_package(path, target):
# Read the expression
with open(path, 'r') as f:
text = f.read()
# Determine pname. Many files have more than one pname
pnames = _get_values('pname', text)
# Determine version.
version = _get_unique_value('version', text)
# First we check how many fetchers are mentioned.
fetcher = _determine_fetcher(text)
extension = _determine_extension(text, fetcher)
# Attempt a fetch using each pname, e.g. backports-zoneinfo vs backports.zoneinfo
successful_fetch = False
for pname in pnames:
if BULK_UPDATE and _skip_bulk_update(f"python3Packages.{pname}"):
raise ValueError(f"Bulk update skipped for {pname}")
try:
new_version, new_sha256, prefix = FETCHERS[fetcher](pname, extension, version, target)
successful_fetch = True
break
except ValueError:
continue
if not successful_fetch:
raise ValueError(f"Unable to find correct package using these pnames: {pnames}")
if new_version == version:
logging.info("Path {}: no update available for {}.".format(path, pname))
return False
elif Version(new_version) <= Version(version):
raise ValueError("downgrade for {}.".format(pname))
if not new_sha256:
raise ValueError("no file available for {}.".format(pname))
text = _replace_value('version', new_version, text)
# hashes from pypi are 16-bit encoded sha256's, normalize it to sri to avoid merge conflicts
# sri hashes have been the default format since nix 2.4+
sri_hash = _hash_to_sri("sha256", new_sha256)
# retrieve the old output hash for a more precise match
if old_hash := _get_attr_value(f"python3Packages.{pname}.src.outputHash"):
# fetchers can specify a sha256, or a sri hash
try:
text = _replace_value('hash', sri_hash, text, old_hash)
except ValueError:
text = _replace_value('sha256', sri_hash, text, old_hash)
else:
raise ValueError(f"Unable to retrieve old hash for {pname}")
if fetcher == 'fetchFromGitHub':
# in the case of fetchFromGitHub, it's common to see `rev = version;` or `rev = "v${version}";`
# in which no string value is meant to be substituted. However, we can just overwrite the previous value.
regex = r'(rev\s+=\s+[^;]*;)'
regex = re.compile(regex)
matches = regex.findall(text)
n = len(matches)
if n == 0:
raise ValueError("Unable to find rev value for {}.".format(pname))
else:
# forcefully rewrite rev, incase tagging conventions changed for a release
match = matches[0]
text = text.replace(match, f'rev = "refs/tags/{prefix}${{version}}";')
# incase there's no prefix, just rewrite without interpolation
text = text.replace('"${version}";', 'version;')
with open(path, 'w') as f:
f.write(text)
logging.info("Path {}: updated {} from {} to {}".format(path, pname, version, new_version))
result = {
'path' : path,
'target': target,
'pname': pname,
'old_version' : version,
'new_version' : new_version,
#'fetcher' : fetcher,
}
return result
def _update(path, target):
# We need to read and modify a Nix expression.
if os.path.isdir(path):
path = os.path.join(path, 'default.nix')
# If a default.nix does not exist, we quit.
if not os.path.isfile(path):
logging.info("Path {}: does not exist.".format(path))
return False
# If file is not a Nix expression, we quit.
if not path.endswith(".nix"):
logging.info("Path {}: does not end with `.nix`.".format(path))
return False
try:
return _update_package(path, target)
except ValueError as e:
logging.warning("Path {}: {}".format(path, e))
return False
def _commit(path, pname, old_version, new_version, pkgs_prefix="python: ", **kwargs):
"""Commit result.
"""
msg = f'{pkgs_prefix}{pname}: {old_version} -> {new_version}'
try:
subprocess.check_call([GIT, 'add', path])
subprocess.check_call([GIT, 'commit', '-m', msg])
except subprocess.CalledProcessError as e:
subprocess.check_call([GIT, 'checkout', path])
raise subprocess.CalledProcessError(f'Could not commit {path}') from e
return True
def main():
epilog = """
environment variables:
GITHUB_API_TOKEN\tGitHub API token used when updating github packages
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog)
parser.add_argument('package', type=str, nargs='+')
parser.add_argument('--target', type=str, choices=SEMVER.keys(), default='major')
parser.add_argument('--commit', action='store_true', help='Create a commit for each package update')
parser.add_argument('--use-pkgs-prefix', action='store_true', help='Use python3Packages.${pname}: instead of python: ${pname}: when making commits')
args = parser.parse_args()
target = args.target
packages = list(map(os.path.abspath, args.package))
if len(packages) > 1:
global BULK_UPDATE
BULK_UPDATE = True
logging.info("Updating packages...")
# Use threads to update packages concurrently
with Pool() as p:
results = list(filter(bool, p.map(lambda pkg: _update(pkg, target), packages)))
logging.info("Finished updating packages.")
commit_options = {}
if args.use_pkgs_prefix:
logging.info("Using python3Packages. prefix for commits")
commit_options["pkgs_prefix"] = "python3Packages."
# Commits are created sequentially.
if args.commit:
logging.info("Committing updates...")
# list forces evaluation
list(map(lambda x: _commit(**x, **commit_options), results))
logging.info("Finished committing updates")
count = len(results)
logging.info("{} package(s) updated".format(count))
if __name__ == '__main__':
main()
| 17,659 | 31.050817 | 152 |
py
|
nixpkgs
|
nixpkgs-master/pkgs/development/interpreters/python/catch_conflicts/catch_conflicts.py
|
import pkg_resources
import collections
import sys
do_abort = False
packages = collections.defaultdict(list)
for f in sys.path:
for req in pkg_resources.find_distributions(f):
if req not in packages[req.project_name]:
# some exceptions inside buildPythonPackage
if req.project_name in ['setuptools', 'pip', 'wheel']:
continue
packages[req.project_name].append(req)
for name, duplicates in packages.items():
if len(duplicates) > 1:
do_abort = True
print("Found duplicated packages in closure for dependency '{}': ".format(name))
for dup in duplicates:
print(" " + repr(dup))
if do_abort:
print("")
print(
'Package duplicates found in closure, see above. Usually this '
'happens if two packages depend on different version '
'of the same dependency.')
sys.exit(1)
| 908 | 28.322581 | 88 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.