content
stringlengths 10
4.9M
|
---|
# coding: utf-8
import matplotlib.pyplot as plt
from perceptron import prepare_data, train_batch
from const import FEATURE_NUM, IMG_FOLDER, CUR_FOLDER
path = CUR_FOLDER + IMG_FOLDER
def plot_dot(x, y, data='0', y_label='number of updates', margin=False,
agg=False):
fig = plt.figure()
ax = fig.add_subplot(111)
if not margin:
title = 'simple perceptron'
else:
title = 'margin perceptron'
ax.set_title(title)
ax.set_xlabel('dimension')
ax.set_ylabel(y_label)
ax.plot(x, y, 'o')
x_lo, x_hi = min(x), max(x)
y_lo, y_hi = min(y), max(y)
ax.axis([x_lo-10, x_hi+10, y_lo-0.01, y_hi+0.01])
if not agg:
plt.savefig(path+data+title+y_label+' not agg.png')
else:
plt.savefig(path+data+title+y_label+' agg.png')
def precoss_data():
data = prepare_data()
# simple perceptron
mistakes, precisions = train_batch(data, r=0.8)
plot_dot(FEATURE_NUM, mistakes[0][0])
plot_dot(FEATURE_NUM, precisions[0][0], y_label='precision')
plot_dot(FEATURE_NUM, mistakes[1][0], data='1')
plot_dot(FEATURE_NUM, precisions[1][0], data='1', y_label='precision')
# margin perceptron
mistakes, precisions = train_batch(data, mu=0.1, r=0.8)
plot_dot(FEATURE_NUM, mistakes[0][1], margin=True)
plot_dot(FEATURE_NUM, precisions[0][1], margin=True, y_label='precision')
plot_dot(FEATURE_NUM, mistakes[1][1], margin=True,
data='1')
plot_dot(FEATURE_NUM, precisions[1][1], margin=True, y_label='precision',
data='1')
# simple aggressive perceptron
mistakes, precisions = train_batch(data, r=0.8, agg=True)
plot_dot(FEATURE_NUM, mistakes[0][0], agg=True)
plot_dot(FEATURE_NUM, precisions[0][0], agg=True, y_label='precision')
plot_dot(FEATURE_NUM, mistakes[1][0], agg=True, data='1')
plot_dot(FEATURE_NUM, precisions[1][0], agg=True, data='1',
y_label='precision')
# margin perceptron
mistakes, precisions = train_batch(data, mu=0.1, r=0.8, agg=True)
plot_dot(FEATURE_NUM, mistakes[1][0], margin=True, agg=True)
plot_dot(FEATURE_NUM, precisions[1][0], margin=True, y_label='precision',
agg=True)
plot_dot(FEATURE_NUM, mistakes[1][1], margin=True,
agg=True, data='1')
plot_dot(FEATURE_NUM, precisions[1][1], margin=True,
y_label='precision', agg=True, data='1')
if __name__ == "__main__":
precoss_data()
|
<filename>Godeps/_workspace/src/github.com/untoldwind/routing/matcher.go
package routing
import "net/http"
type Matcher func(remainingPath string, resp http.ResponseWriter, req *http.Request) bool
|
def config_dump(config_obj, config_path, keyfile_path):
if keyfile_path is None:
files.json_dump(config_obj, config_path)
else:
box = crypto.load_box(keyfile_path)
crypto.json_secret_dump(config_obj, box, config_path) |
import awesome_print
class AttrDict(object):
def __init__(self, dct):
self.dict = dct
def __repr__(self):
return repr(self.dict)
def __str__(self):
return awesome_print.awesome_print.format(self.dict)
def __getattr__(self, attr):
try:
val = self.dict[attr]
if isinstance(val, dict):
val = AttrDict(val)
return val
except KeyError:
raise AttributeError
|
<reponame>cliveyao/waltz<gh_stars>1-10
/*
* Waltz - Enterprise Architecture
* Copyright (C) 2016, 2017, 2018, 2019 Waltz open source project
* See README.md for more information
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific
*
*/
package com.khartec.waltz.model.physical_specification;
import com.khartec.waltz.common.Aliases;
import com.khartec.waltz.common.EnumUtilities;
import java.util.function.Function;
/**
* How an specification manifests itself 'physically'.
* <p/>
* The UNKNOWN option is not intended to be exposed as
* as selectable choice for users. It is intended to be
* used when bulk importing from systems which do not have
* any equivalent format representation
*/
public enum DataFormatKind {
BINARY,
DATABASE,
FLAT_FILE,
JSON,
OTHER,
UNSTRUCTURED,
UNKNOWN,
XML;
private static final Aliases<DataFormatKind> defaultAliases = new Aliases<>()
.register(DataFormatKind.FLAT_FILE,
"CSV_FILE", "CSV", "EXCEL", "PDF", "EXCEL/PDF",
"FILE", "TEXT", "XL", "XLS", "XLM", "PIPE DELIMITED")
.register(DataFormatKind.DATABASE, "DATA")
.register(DataFormatKind.UNSTRUCTURED, "EMAIL", "STRING", "TXT")
.register(DataFormatKind.OTHER, "OTHER", "JOB_ROW_CRM")
.register(DataFormatKind.XML, "Webservice");
public static DataFormatKind parse(String value, Function<String, DataFormatKind> failedParseSupplier) {
return EnumUtilities.parseEnumWithAliases(value, DataFormatKind.class, failedParseSupplier, defaultAliases);
}
}
|
When George Ayittey saw rock star Bono in the audience at a conference, he made a special effort to rip into the foreign aid establishment.
Bono later told Ayittey that he liked his speech but did not agree with that foreign aid is not effective in ending poverty.
Ayittey gave Bono a copy of his book, Africa Unchained: The Blueprint for Development.”
Bono is the lead singer of one of the most successful rock groups in history. He has also been a major advocate of foreign aid and other government programs to alleviate the suffering of poor nations.
Recently, Bono has been singing a new tune, and it appears he has been influenced by Ayittey’s book.
At a recent Georgetown University speech, Bono declared that only capitalism can end poverty.
“Aid is just a stopgap,” he said. “Commerce and entrepreneurial capitalism take more people out of poverty than aid. We need Africa to become an economic powerhouse.”
Not the sort of thing we are accustomed to hearing from pandering rock stars. Even Bono said he was shocked to hear himself.
Africa needs electric power plants, not lectures on solar panels.
Africa needs property rights and the rule of law, not the rule of the jungle or Karl Marx.
Africa needs investment, not pity.
Bono and Ayittey encourage students to think of what they can do to support those in Africa that are in need of improved living standards, by creating businesses and increasing trade – not with handouts.
“That’s what I’m hoping happens here at Georgetown with you,” he said.
“Because when you truly accept that those children in some far off place in the global village have the same value as you in God’s eyes or even in just your eyes, then your life is forever changed, you see something that you can’t un-see.”
The same is true when one investigates history. When you see the overwhelming evidence that no system has raised more people out of poverty than free-market capitalism – you can’t un-see it.
This understanding is beginning to pop up in the most unlikely of places.
A recent statement out of the Obama White House stated, “Economic growth is the only sustainable way to accelerate development and eradicate poverty.”
Hey, if it’s the solution for Africa, shouldn’t we be focusing on the same thing here? |
// This module contains a collection of YANG definitions
// for Cisco IOS-XR infra-infra package configuration.
//
// This module contains definitions
// for the following management objects:
// banners: Schema for Banner configuration commands
//
// Copyright (c) 2013-2017 by Cisco Systems, Inc.
// All rights reserved.
package infra_infra_cfg
import (
"fmt"
"github.com/CiscoDevNet/ydk-go/ydk"
"github.com/CiscoDevNet/ydk-go/ydk/types"
"github.com/CiscoDevNet/ydk-go/ydk/types/yfilter"
"github.com/CiscoDevNet/ydk-go/ydk/models/cisco_ios_xr"
"reflect"
)
func init() {
ydk.YLogDebug(fmt.Sprintf("Registering top level entities for package infra_infra_cfg"))
ydk.RegisterEntity("{http://cisco.com/ns/yang/Cisco-IOS-XR-infra-infra-cfg banners}", reflect.TypeOf(Banners{}))
ydk.RegisterEntity("Cisco-IOS-XR-infra-infra-cfg:banners", reflect.TypeOf(Banners{}))
}
// Banner represents Banner
type Banner string
const (
// Set EXEC process creation banner
Banner_exec Banner = "exec"
// Set incoming terminal line banner
Banner_incoming Banner = "incoming"
// Set Message of the Day banner
Banner_motd Banner = "motd"
// Set login banner
Banner_login Banner = "login"
// Set Message for SLIP/PPP
Banner_slip_ppp Banner = "slip-ppp"
// Set Message for login authentication timeout
Banner_prompt_timeout Banner = "prompt-timeout"
)
// Banners
// Schema for Banner configuration commands
type Banners struct {
parent types.Entity
YFilter yfilter.YFilter
// Select a Banner Type. The type is slice of Banners_Banner.
Banner []Banners_Banner
}
func (banners *Banners) GetFilter() yfilter.YFilter { return banners.YFilter }
func (banners *Banners) SetFilter(yf yfilter.YFilter) { banners.YFilter = yf }
func (banners *Banners) GetGoName(yname string) string {
if yname == "banner" { return "Banner" }
return ""
}
func (banners *Banners) GetSegmentPath() string {
return "Cisco-IOS-XR-infra-infra-cfg:banners"
}
func (banners *Banners) GetChildByName(childYangName string, segmentPath string) types.Entity {
if childYangName == "banner" {
for _, c := range banners.Banner {
if banners.GetSegmentPath() == segmentPath {
return &c
}
}
child := Banners_Banner{}
banners.Banner = append(banners.Banner, child)
return &banners.Banner[len(banners.Banner)-1]
}
return nil
}
func (banners *Banners) GetChildren() map[string]types.Entity {
children := make(map[string]types.Entity)
for i := range banners.Banner {
children[banners.Banner[i].GetSegmentPath()] = &banners.Banner[i]
}
return children
}
func (banners *Banners) GetLeafs() map[string]interface{} {
leafs := make(map[string]interface{})
return leafs
}
func (banners *Banners) GetBundleName() string { return "cisco_ios_xr" }
func (banners *Banners) GetYangName() string { return "banners" }
func (banners *Banners) GetBundleYangModelsLocation() string { return cisco_ios_xr.GetModelsPath() }
func (banners *Banners) GetCapabilitiesTable() map[string]string {
return cisco_ios_xr.GetCapabilities() }
func (banners *Banners) GetNamespaceTable() map[string]string {
return cisco_ios_xr.GetNamespaces() }
func (banners *Banners) SetParent(parent types.Entity) { banners.parent = parent }
func (banners *Banners) GetParent() types.Entity { return banners.parent }
func (banners *Banners) GetParentYangName() string { return "Cisco-IOS-XR-infra-infra-cfg" }
// Banners_Banner
// Select a Banner Type
type Banners_Banner struct {
parent types.Entity
YFilter yfilter.YFilter
// This attribute is a key. Banner Type. The type is Banner.
BannerName interface{}
// Banner text message. The type is string. This attribute is mandatory.
BannerText interface{}
}
func (banner *Banners_Banner) GetFilter() yfilter.YFilter { return banner.YFilter }
func (banner *Banners_Banner) SetFilter(yf yfilter.YFilter) { banner.YFilter = yf }
func (banner *Banners_Banner) GetGoName(yname string) string {
if yname == "banner-name" { return "BannerName" }
if yname == "banner-text" { return "BannerText" }
return ""
}
func (banner *Banners_Banner) GetSegmentPath() string {
return "banner" + "[banner-name='" + fmt.Sprintf("%v", banner.BannerName) + "']"
}
func (banner *Banners_Banner) GetChildByName(childYangName string, segmentPath string) types.Entity {
return nil
}
func (banner *Banners_Banner) GetChildren() map[string]types.Entity {
children := make(map[string]types.Entity)
return children
}
func (banner *Banners_Banner) GetLeafs() map[string]interface{} {
leafs := make(map[string]interface{})
leafs["banner-name"] = banner.BannerName
leafs["banner-text"] = banner.BannerText
return leafs
}
func (banner *Banners_Banner) GetBundleName() string { return "cisco_ios_xr" }
func (banner *Banners_Banner) GetYangName() string { return "banner" }
func (banner *Banners_Banner) GetBundleYangModelsLocation() string { return cisco_ios_xr.GetModelsPath() }
func (banner *Banners_Banner) GetCapabilitiesTable() map[string]string {
return cisco_ios_xr.GetCapabilities() }
func (banner *Banners_Banner) GetNamespaceTable() map[string]string {
return cisco_ios_xr.GetNamespaces() }
func (banner *Banners_Banner) SetParent(parent types.Entity) { banner.parent = parent }
func (banner *Banners_Banner) GetParent() types.Entity { return banner.parent }
func (banner *Banners_Banner) GetParentYangName() string { return "banners" }
|
// CrossedOut crosses out provided string.
func CrossedOut(s string) string {
if s == "" {
return ""
}
return CrossedOutOn + s + CrossedOutOff
} |
// based on the longest side of the cell
public double radiusMeters()
{
double width = this.widthMeters();
double height = this.heightMeters();
return MathUtils.overlappingCircleRadius(width, height);
} |
import { fetch } from 'undici'
import http from 'node:http'
import net from 'node:net'
import { fetchEventSource } from '../src/sse/fetchEventSource'
let url: string
let server: http.Server
beforeAll(async () => {
server = http.createServer()
await new Promise<void>((resolve) => server.listen(0, resolve))
const { port } = server.address() as net.AddressInfo
url = `http://localhost:${port}`
})
afterAll(() => server.close())
afterEach(() => server.removeAllListeners())
describe('fetchEventSource', () => {
it('should handle simple response', async () => {
server.on('request', async (req, res) => {
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
})
res.write('id: 1337\n')
res.write('event: next\n')
res.write('data: hello\n')
res.write('\n')
res.end()
})
const result = await new Promise((resolve) => {
fetchEventSource(url, {
onMessage: resolve,
fetch: fetch as any,
})
})
expect(result).toMatchInlineSnapshot(`
Object {
"data": "hello",
"event": "next",
"id": "1337",
"retry": undefined,
}
`)
})
it('should handle split data in chunks', async () => {
server.on('request', async (req, res) => {
// `\r` should be ignored
res.write('event: next\r\n')
res.write('data:')
await new Promise((resolve) => setTimeout(resolve, 100))
res.write('hello world\n')
res.write('\n')
res.end()
})
const result = await new Promise((resolve) => {
fetchEventSource(url, {
onMessage: resolve,
fetch: fetch as any,
})
})
expect(result).toMatchInlineSnapshot(`
Object {
"data": "hello world",
"event": "next",
"id": "",
"retry": undefined,
}
`)
})
it('should abort', async () => {
server.on('request', async (req, res) => {
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
Connection: 'keep-alive',
})
res.write('event: next\n')
res.write('data: hello\n')
res.write('\n')
})
const ac = new AbortController()
const sse = fetchEventSource(url, {
signal: ac.signal,
fetch: fetch as any,
})
process.nextTick(() => ac.abort())
await expect(sse).resolves.toBeUndefined()
expect(ac.signal.aborted).toBe(true)
})
})
|
class S3Handler:
"""
Interface for interacting with an S3 Bucket.
"""
def __init__(self, bucket):
self.s3 = boto3.resource('s3')
self.s3_client = boto3.client('s3')
self.s3_bucket = self.s3.Bucket(bucket)
def store_content_in_s3(self, obj_key: str, content: str):
s3_obj = self._s3_object(obj_key)
s3_obj.put(Body=content)
return s3_obj.key
def load_content_from_obj_key(self, obj_key):
obj = self._s3_object(obj_key)
content = obj.get()['Body'].read()
return content.decode().strip()
def _s3_object(self, obj_key: str):
return self.s3_bucket.Object(obj_key)
def copy_obj(self, src_key, dst_key):
src = {
'Bucket': self.s3_bucket.name,
'Key': src_key
}
self.s3_bucket.copy(src, dst_key)
def ls(self, key):
response = self.s3_client.list_objects_v2(Bucket=self.s3_bucket.name, Prefix=key)
return response.get('Contents', [])
def exists(self, key):
objects = self.ls(key)
return len(objects) > 0
def size(self, key):
response = self.s3_client.list_objects_v2(Bucket=self.s3_bucket.name, Prefix=key)
contents = response.get('Contents', [])
if len(contents) != 1:
return None
return contents[0]['Size']
def delete_objects(self, keys: list) -> list:
"""
Deletes a list keys from this S3 bucket.
:param keys: list of S3 keys to delete
:return: List of successfully deleted objects
"""
objects = [{'Key': key} for key in keys]
response = self.s3_bucket.delete_objects(
Delete={'Objects': objects}
)
return response['Deleted'] if 'Deleted' in response else [] |
//Promena stanja igre(Inicijalizija, Pomeri, Gradi)
public void nextState() {
if (nextMove)
{
nextMove=false;
switch (player_state)
{
case Game.STATE_INIT:{
if (player_turn==1)
{
player_state=Game.STATE_MOVE;
}
player_turn=(1-player_turn);
break;
}
case Game.STATE_MOVE:{
player_state=Game.STATE_BUILD;
break;
}
case Game.STATE_BUILD:{
player_turn=(1-player_turn);
player_state=Game.STATE_MOVE;
myActivity.refreshBoard();
break;
}
case Game.STATE_END:{
player_turn=winner;
myActivity.setTitleWon();
break;
}
default:{
break;
}
}
myActivity.nextMoveRefresh();
}else
{
}
if ((GameActivity.bot_view==1) && (GameActivity.bots[player_turn]))
{
Log.d("SANTORINI_LOG","Bots now plays");
botPlayNextMove(GameActivity.difficulty);
}
} |
Evaluating the intersection between "green events" and sense of community at Liverpool's Lark Lane Farmers Market.
More research needs to evaluate links between community psychology and event impacts. Events are not just entertainment focused economic drivers, but gatherings contributing to society, community, and local cultural identity. There is also a need to address "green" philosophies, while visible and widespread, are not just environmentally focused, but also local community focused-concerning elements of social sustainability, belonging and sense of community. This makes the discussion of "green events" relevant to community psychology and local well-being researchers. The aim of this study is to find other possibilities of how green events could contribute to local community well-being by investigating how farmer markets unite people to understand how such events reinforce and shape a sense of community. This paper is based on participant observations and semistructured interviews to explore the conceptual notion of sense of community. Three emerged themes are presented: local participation, social atmosphere, and a sense of belonging. |
/**
* @file src/r2plugin/r2cgen.cpp
* @brief C code generation and token marking.
* @copyright (c) 2020 Avast Software, licensed under the MIT license.
*/
#include <fstream>
#include <optional>
#include "r2plugin/r2data.h"
#include "r2plugin/r2cgen.h"
using namespace retdec::r2plugin;
/**
* Translation map between decompilation JSON output and r2 understandable
* annotations.
*/
std::map<const std::string, RSyntaxHighlightType> R2CGenerator::_hig2token = {
// {"nl", ... }
// {"ws", ... }
// {"punc", ... }
// {"op", ... }
{"i_var", R_SYNTAX_HIGHLIGHT_TYPE_GLOBAL_VARIABLE},
// {"i_var", R_SYNTAX_HIGHLIGHT_TYPE_LOCAL_VARIABLE},
// {"i_mem", R_SYNTAX_HIGHLIGHT_TYPE_DATATYPE},
{"i_lab", R_SYNTAX_HIGHLIGHT_TYPE_KEYWORD},
{"i_fnc", R_SYNTAX_HIGHLIGHT_TYPE_FUNCTION_NAME},
{"i_arg", R_SYNTAX_HIGHLIGHT_TYPE_FUNCTION_PARAMETER},
{"keyw" , R_SYNTAX_HIGHLIGHT_TYPE_KEYWORD},
{"type" , R_SYNTAX_HIGHLIGHT_TYPE_DATATYPE},
{"preproc" , R_SYNTAX_HIGHLIGHT_TYPE_KEYWORD},
{"inc", R_SYNTAX_HIGHLIGHT_TYPE_COMMENT},
{"l_bool", R_SYNTAX_HIGHLIGHT_TYPE_CONSTANT_VARIABLE},
{"l_int", R_SYNTAX_HIGHLIGHT_TYPE_CONSTANT_VARIABLE},
{"l_fp", R_SYNTAX_HIGHLIGHT_TYPE_CONSTANT_VARIABLE},
{"l_str", R_SYNTAX_HIGHLIGHT_TYPE_CONSTANT_VARIABLE},
{"l_sym", R_SYNTAX_HIGHLIGHT_TYPE_CONSTANT_VARIABLE},
{"l_ptr", R_SYNTAX_HIGHLIGHT_TYPE_CONSTANT_VARIABLE},
{"cmnt" , R_SYNTAX_HIGHLIGHT_TYPE_COMMENT}
};
/**
* Translaction map interaction method. Usage of this method is preffered to obtain r2 understandable
* annotation from JSON config token.
*/
std::optional<RSyntaxHighlightType> R2CGenerator::highlightTypeForToken(const std::string &token) const
{
if (_hig2token.count(token)) {
return _hig2token.at(token);
}
return {};
}
/**
* Generates annotated code from RetDec's output obrained as JSON.
*
* @param root The root of JSON decompilation output.
*/
RAnnotatedCode* R2CGenerator::provideAnnotations(const rapidjson::Document &root) const
{
RAnnotatedCode *code = r_annotated_code_new(nullptr);
if (code == nullptr) {
throw DecompilationError("unable to allocate memory");
}
std::ostringstream planecode;
std::optional<unsigned long> lastAddr;
if (!root["tokens"].IsArray()) {
throw DecompilationError("malformed JSON");
}
auto tokens = root["tokens"].GetArray();
for (auto& token: tokens) {
if (token.HasMember("addr")) {
std::string addrRaw = token["addr"].GetString();
if (addrRaw == "") {
lastAddr.reset();
}
else {
try {
lastAddr = std::stoll(addrRaw, nullptr, 16);
} catch (std::exception &e) {
throw DecompilationError("invalid address: "+addrRaw);
}
}
continue;
}
else if (token.HasMember("val") && token.HasMember("kind")) {
unsigned long bpos = planecode.tellp();
planecode << token["val"].GetString();
unsigned long epos = planecode.tellp();
if (lastAddr.has_value()) {
RCodeAnnotation annotation = {};
annotation.type = R_CODE_ANNOTATION_TYPE_OFFSET;
annotation.offset.offset = lastAddr.value();
annotation.start = bpos;
annotation.end = epos;
r_annotated_code_add_annotation(code, &annotation);
}
auto higlight = highlightTypeForToken(token["kind"].GetString());
if (higlight.has_value()) {
RCodeAnnotation annotation = {};
annotation.type = R_CODE_ANNOTATION_TYPE_SYNTAX_HIGHLIGHT;
annotation.syntax_highlight.type = higlight.value();
annotation.start = bpos;
annotation.end = epos;
r_annotated_code_add_annotation(code, &annotation);
}
}
else {
throw DecompilationError("malformed RetDec JSON output");
}
}
std::string str = planecode.str();
code->code = reinterpret_cast<char *>(r_malloc(str.length() + 1));
if(!code->code) {
r_annotated_code_free(code);
throw DecompilationError("unable to allocate memory");
}
memcpy(code->code, str.c_str(), str.length());
code->code[str.length()] = '\0';
return code;
}
/**
* Generates output by parsing RetDec's JSON output and calling R2CGenerator::provideAnnotations.
*/
RAnnotatedCode* R2CGenerator::generateOutput(const std::string &rdoutJson) const
{
std::ifstream jsonFile(rdoutJson, std::ios::in | std::ios::binary);
if (!jsonFile) {
throw DecompilationError("unable to open RetDec output: "+rdoutJson);
}
std::string jsonContent;
jsonFile.seekg(0, std::ios::end);
jsonContent.resize(jsonFile.tellg());
jsonFile.seekg(0, std::ios::beg);
jsonFile.read(&jsonContent[0], jsonContent.size());
jsonFile.close();
rapidjson::Document root;
rapidjson::ParseResult success = root.Parse(jsonContent);
if (!success) {
throw DecompilationError("unable to parse RetDec JSON output");
}
return provideAnnotations(root);
}
|
def reset(self, reset_params = None):
vis_obs, vec_obs = self._env.reset(reset_params = reset_params)
vis_obs = self._vis_obs_to_gray(vis_obs)
return vis_obs, vec_obs |
OXIDATION OF PSYCHOTROPIC DRUGS BY BROMAMINE-B IN ACIDIC BUFFER: A KINETIC STUDY USING SPECTROPHOTOMETRY
The kinetics of oxidation of chlorpromazine hydrochloride (CPH) and fluphenazine dihydrochloride (FPH) by bromamine-B (BAB) in pH 1.6 buffer solution has been studied spectrophotometrically at λmax = 570 nm and 530 nm, respectively. The reaction rate shows a fractional-order dependence on and a first-order dependence on . The reaction rate also shows a fractional-order dependence on . Additions of halide ions and the reduction product of BAB, benzenesulfonamide, and variation of the ionic strength and dielectric constant of the medium do not have any significant effect on the reaction rate. The reaction was studied at different temperatures and activation parameters were evaluated. The proposed general mechanism and the derived rate law are in agreement with the observed kinetics. |
<gh_stars>1-10
import React from 'react'
import Navigation from './Navigation'
import { screen, render } from '@testing'
describe('Navigation', () => {
it('renders without crashing', async () => {
render(<Navigation />)
await screen.findByTestId('navigation')
})
})
|
class DecodeContext:
"""
Internal class to keep tracks of types during typed decoding
DecodeContext are immutable
"""
doc: Union[str, JSONObject] # pylint: disable=unsubscriptable-object
pos: Pos
globalns: Dict
localns: Dict
types: TypeTuple
get_args: Callable[[TypeHint], Tuple[TypeHint]] = None
origs: OrigTuple = None
args: ArgsTuple = None
parent: Optional["DecodeContext"] = None # pylint: disable=unsubscriptable-object
pkey: Optional[ # pylint: disable=unsubscriptable-object
Union[int, str] # pylint: disable=unsubscriptable-object
] = None
@staticmethod
def process_tp(tp, *, globalns=None, localns=None):
if get_origin(tp) is Union:
return get_args(tp)
return (tp,)
def __attrs_post_init__(self, /):
# cache types origins and arguments for performance
if self.get_args is None:
self.__dict__["get_args"] = functools.partial(
get_args, globalns=self.globalns, localns=self.localns
)
if self.origs is None:
self.__dict__["origs"] = tuple(get_origin(tp) for tp in self.types)
if self.args is None:
self.__dict__["args"] = tuple(self.get_args(tp) for tp in self.types)
if self.parent is not None and self.pkey is None:
raise ValueError("DecodeContext with a parent must be given a parent key")
def err_msg(self, /, *, msg: str = None, value=MISSING) -> str:
parts = []
if msg:
parts.append(msg)
else:
parts.append(
f"Expected type {Union[self.types]}" # pylint: disable=unsubscriptable-object
)
if value is not MISSING:
parts.append(f"got {value}")
parts.append("at")
return ", ".join(parts)
# HELPER METHODS
def filter_types(self, /, filter_func: TypeFilter) -> TypeCache:
return sync_filter(filter_func, self.types, self.origs, self.args)
def filtered(
self, /, filter_func: TypeFilter, err_msg: str, *, err_pos=MISSING
) -> "DecodeContext":
types, origs, args = sync_filter(filter_func, self.types, self.origs, self.args)
if not types:
raise TypedJSONDecodeError(
msg=err_msg,
doc=self.doc,
pos=self.pos if err_pos is MISSING else err_pos,
)
return evolve(self, types=types, origs=origs, args=args)
def get_array_subtypes(self, /, index: int) -> TypeTuple:
subtypes = []
for tp, orig, arg in zip(self.types, self.origs, self.args):
found = MISSING
if isinstance(orig, type):
if issubclass(orig, tuple):
found = arg[index]
elif issubclass(orig, list):
found = arg[0]
elif is_jarray_class(tp):
data: ArrayData = getattr(tp, PHERES_ATTR)
found = data.types[index if data.is_fixed else 0]
if found is MISSING:
raise PheresInternalError(f"Unhandled Array type {tp}")
elif get_origin(found) is Union:
subtypes.extend(self.get_args(found))
else:
subtypes.append(found)
return tuple(subtypes)
def get_object_subtypes(self, /, key: str) -> TypeTuple:
subtypes = []
for tp, orig, arg in zip(self.types, self.origs, self.args):
if isinstance(orig, type) and issubclass(orig, dict):
tp = arg[1]
elif is_jdict_class(tp):
data: DictData = getattr(tp, PHERES_ATTR)
tp = data.type
elif is_jobject_class(tp):
data: ObjectData = getattr(tp, PHERES_ATTR)
tp = data.attrs[key].type
else:
raise PheresInternalError(f"Unhandled Object type {tp}")
if get_origin(tp) is Union:
subtypes.extend(self.get_args(tp))
else:
subtypes.append(tp)
return tuple(subtypes)
# FILTERS AND FILTER FACTORIES
@staticmethod
def accept_array(tp: TypeHint, orig: TypeOrig, arg: TypeArgs) -> bool:
return (
isinstance(orig, type) and issubclass(orig, _JSONArrayTypes)
) or is_jarray_class(tp)
@staticmethod
def accept_min_length(index: int) -> TypeFilter:
def accept(tp: TypeHint, orig: TypeOrig, args: TypeArgs) -> bool:
if isinstance(orig, type):
if issubclass(orig, list):
return True
elif issubclass(orig, tuple):
return len(args) > index
elif is_jarray_class(tp):
data: ArrayData = getattr(tp, PHERES_ATTR)
return not data.is_fixed or len(data.types) > index
return False
return accept
@staticmethod
def accept_array_value(index: int, value: object) -> TypeFilter:
def accept(tp: TypeHint, orig: TypeOrig, args: TypeArgs) -> bool:
if isinstance(orig, type):
if issubclass(orig, tuple):
return typecheck(value, args[index])
elif issubclass(orig, list):
return typecheck(value, args[0])
elif is_jarray_class(tp):
data: ArrayData = getattr(tp, PHERES_ATTR)
return typecheck(value, data.types[index if data.is_fixed else 0])
raise PheresInternalError(f"Unhandled Array type {tp}")
return accept
@staticmethod
def accept_length(length: int) -> TypeFilter:
def accept(tp: TypeHint, orig: TypeOrig, args: TypeArgs) -> bool:
if isinstance(orig, type):
if issubclass(orig, list):
return True
elif issubclass(orig, tuple):
return len(args) == length
elif is_jarray_class(tp):
data: ArrayData = getattr(tp, PHERES_ATTR)
return not data.is_fixed or len(data.types) == length
raise PheresInternalError(f"Unhandled Array type {tp}")
return accept
@staticmethod
def accept_object(tp: TypeHint, orig: TypeOrig, arg: TypeArgs) -> bool:
return (
(isinstance(orig, type) and issubclass(orig, _JSONObjectTypes))
or is_jdict_class(tp)
or is_jobject_class(tp)
)
@staticmethod
def accept_key(key: str) -> TypeFilter:
def accept(tp: TypeHint, orig: TypeOrig, args: TypeArgs) -> bool:
if isinstance(orig, type) and issubclass(orig, dict):
return True
elif is_jdict_class(tp):
return True
elif is_jobject_class(tp):
data: ObjectData = getattr(tp, PHERES_ATTR)
return key in data.attrs
return False
return accept
@staticmethod
def accept_object_value(key: str, value: object) -> TypeFilter:
def accept(tp: TypeHint, orig: TypeOrig, args: TypeArgs) -> bool:
if isinstance(orig, type) and issubclass(orig, dict):
return typecheck(value, args[1])
elif is_jdict_class(tp):
data: DictData = getattr(tp, PHERES_ATTR)
return typecheck(value, data.type)
elif is_jobject_class(tp):
data: ObjectData = getattr(tp, PHERES_ATTR)
return typecheck(value, data.attrs[key].type)
raise PheresInternalError(f"Unhandled Object type {tp}")
return accept
# CONTEXT SPECIALIZATION METHODS
def array_context(self, /) -> "DecodeContext":
return self.filtered(self.accept_array, self.err_msg(value="'Array'"))
def index_context(self, /, index: int, pos: Pos) -> "DecodeContext":
parent = self.filtered(
self.accept_min_length(index),
self.err_msg(value=f"'Array' of length >={index+1}"),
)
return DecodeContext(
doc=self.doc,
pos=pos,
globalns=self.globalns,
localns=self.localns,
types=parent.get_array_subtypes(index),
get_args=self.get_args,
parent=parent,
pkey=index,
)
def object_context(self, /) -> "DecodeContext":
return self.filtered(self.accept_object, self.err_msg(value="'Object'"))
def key_context(self, /, key: str, key_pos: Pos) -> "DecodeContext":
parent = self.filtered(
self.accept_key(key),
self.err_msg(
msg=f"Inferred type {Union[self.types]} has no key '{key}'", # pylint: disable=unsubscriptable-object
),
err_pos=key_pos,
)
return DecodeContext(
doc=self.doc,
pos=key_pos,
globalns=self.globalns,
localns=self.localns,
types=parent.get_object_subtypes(key),
get_args=self.get_args,
parent=parent,
pkey=key,
)
# TYPECHECKING METHODS
def typecheck_value(
self, /, value: JSONValue, end_pos: U, start_pos: Pos
) -> Tuple[JSONValue, U, "DecodeContext"]:
types, classes = [], []
for tp in self.types:
if is_jvalue_class(tp):
data: ValueData = getattr(tp, PHERES_ATTR)
if typecheck(value, data.type):
classes.append(tp)
elif typecheck(value, tp):
types.append(tp)
if not types and not classes:
raise TypedJSONDecodeError(
msg=self.err_msg(value=value), doc=self.doc, pos=start_pos
)
if classes:
if len(classes) > 1:
raise TypedJSONDecodeError(
msg=self.err_msg(
msg=f"Multiple JSONable class found for value {value}"
),
doc=self.doc,
pos=self.pos,
)
value = _make_value(classes[0], value)
parent = None
if self.parent is not None:
parent = self.parent
key = self.pkey
if isinstance(key, int):
filter_func = self.accept_array_value(key, value)
elif isinstance(key, str):
filter_func = self.accept_object_value(key, value)
else:
raise PheresInternalError(f"Unhandled parent key {key}")
parent = parent.filtered(
filter_func, parent.err_msg(value=f"{value} of type {type(value)}")
)
return value, end_pos, parent
def typecheck_array(
self, /, array: JSONArray, end_pos: U
) -> Tuple[JSONArray, U, "DecodeContext"]:
types, *_ = self.filter_types(self.accept_length(len(array)))
if not types:
raise TypedJSONDecodeError(
msg=self.err_msg(
msg=f"Inferred type {Union[self.types]}", # pylint: disable=unsubscriptable-object
value=f"{array}, an 'Array' of len {len(array)} which is too short",
),
doc=self.doc,
pos=self.pos,
)
classes = [tp for tp in types if is_jarray_class(tp)]
parent = self.parent
if classes:
if len(classes) > 1:
raise TypedJSONDecodeError(
msg=self.err_msg(
msg=f"Multiple JSONable class found for array {array}"
),
doc=self.doc,
pos=self.pos,
)
array = _make_array(classes[0], array)
if parent is not None:
key = self.pkey
if isinstance(key, int):
filter_func = self.accept_array_value(key, array)
elif isinstance(key, str):
filter_func = self.accept_object_value(key, array)
else:
raise PheresInternalError(f"Unhandled parent key {key}")
parent = parent.filtered(
filter_func, parent.err_msg(value=f"{array} of type {type(array)}")
)
return array, end_pos, parent
def typecheck_object(
self, /, obj: JSONObject, end_pos: U
) -> Tuple[JSONObject, int, "DecodeContext"]:
classes = [
tp for tp in self.types if is_jdict_class(tp) or is_jobject_class(tp)
]
classes = [
cls
for i, cls in enumerate(classes)
if all(not issubclass(cls, other) for other in classes[i + 1 :])
]
parent = self.parent
if classes:
if len(classes) > 1:
raise TypedJSONDecodeError(
msg=self.err_msg(
msg=f"Multiple JSONable class found for object {obj}"
),
doc=self.doc,
pos=self.pos,
)
cls = classes[0]
if is_jdict_class(cls):
obj = _make_dict(cls, obj)
elif is_jobject_class(cls):
obj = _make_object(classes[0], obj)
if parent is not None:
key = self.pkey
if isinstance(key, int):
filter_func = self.accept_array_value(key, obj)
elif isinstance(key, str):
filter_func = self.accept_object_value(key, obj)
else:
raise PheresInternalError(f"Unhandled parent key {key}")
parent = parent.filtered(
filter_func, parent.err_msg(value=f"{obj} of type {type(obj)}")
)
return obj, end_pos, parent |
<gh_stars>0
import { ChangeDetectorRef, Component, OnInit } from '@angular/core';
import { ActivatedRoute } from '@angular/router';
import { ApolloQueryResult } from '@apollo/client/core';
import {
GetApplicationGQL,
GetApplicationQuery,
GetWorksGQL,
GetWorksQuery,
WorkFragment,
WorkSpecificationFragment,
} from 'generated/types.graphql-gen';
import { Observable, BehaviorSubject, of, combineLatest } from 'rxjs';
import { switchMap } from 'rxjs/operators';
@Component({
selector: 'app-preview-application.dark',
templateUrl: './preview-application.component.html',
styleUrls: ['./preview-application.component.scss'],
})
export class PreviewApplicationComponent implements OnInit {
application$: Observable<ApolloQueryResult<GetApplicationQuery>>;
works$: Observable<ApolloQueryResult<GetWorksQuery>>;
private _application_id = this.route.snapshot.params['id'];
set application_id(id: string) {
this._application_id = id;
}
get application_id() {
return this._application_id;
}
private _currentWorkIndex: BehaviorSubject<number> = new BehaviorSubject(0);
currentWorkIndex = this._currentWorkIndex.asObservable();
currentWork: Observable<WorkFragment | null> = of(null);
private _currentFileIndex: BehaviorSubject<number> = new BehaviorSubject(0);
currentFileIndex = this._currentFileIndex.asObservable();
private _currentSpecificationIndex: BehaviorSubject<number> = new BehaviorSubject(
0
);
currentSpecificationIndex = this._currentSpecificationIndex.asObservable();
currentSpecification: Observable<WorkSpecificationFragment | null> = of(null);
constructor(
private route: ActivatedRoute,
private cdRef: ChangeDetectorRef,
private getApplicationGQL: GetApplicationGQL,
private getWorksGQL: GetWorksGQL
) {
this.application$ = this.getApplicationGQL.watch(
{ id: this.application_id },
{
fetchPolicy: 'cache-and-network',
}
).valueChanges;
this.works$ = this.getWorksGQL.watch(
{
application_id: this.application_id,
},
{ fetchPolicy: 'cache-and-network' }
).valueChanges;
this.currentWork = combineLatest([this.works$, this.currentWorkIndex]).pipe(
switchMap(([works, index]) => {
const work = works?.data?.works[index] || null;
return of(work);
})
);
this.currentSpecification = combineLatest([
this.currentWork,
this.currentSpecificationIndex,
]).pipe(
switchMap(([work, index]) => {
const specification = work?.specifications[index] || null;
return of(specification);
})
);
}
ngOnInit(): void {}
setCurrentWork(index: number) {
this._currentWorkIndex.next(index);
this._currentFileIndex.next(0);
this._currentSpecificationIndex.next(0);
}
setCurrentFile(index_file: number, event: Event, index_work: number = -1) {
event.preventDefault();
event.stopImmediatePropagation();
if (index_work >= 0) this._currentWorkIndex.next(index_work);
this._currentFileIndex.next(index_file);
this._currentSpecificationIndex.next(0);
}
incrementFileIndex(amount: number, length: number) {
this._currentFileIndex.next(
this._currentFileIndex.value + amount < 0
? length - 1
: this._currentFileIndex.value + amount >= length
? 0
: this._currentFileIndex.value + amount
);
}
showSpecification: boolean = false;
setCurrentSpecification(index: number) {
if (this._currentSpecificationIndex.value === index) {
this.showSpecification = !this.showSpecification;
} else {
this.showSpecification = true;
}
this._currentSpecificationIndex.next(index);
this.showChat = false;
}
toggleSpecifications() {
this.chatLoaded = true;
this.showChat = false;
this.showOverview = false;
this.showStatement = false;
this.showSpecification = !this.showSpecification;
}
chatLoaded: boolean = false;
showChat: boolean = false;
toggleChat() {
this.chatLoaded = true;
this.showStatement = false;
this.showOverview = false;
this.showChat = !this.showChat;
}
showOverview: boolean = false;
toggleOverview() {
this.showChat = false;
this.showStatement = false;
this.showOverview = !this.showOverview;
}
showStatement: boolean = false;
toggleStatement() {
this.showChat = false;
this.showOverview = false;
this.showStatement = !this.showStatement;
}
}
|
/// nekowin: OpenGL and Vulkan compatible library for context / surface window generation
/// licence: Apache, see LICENCE
/// file: napi.h - source file for platform independant api calls
/// author: <NAME>
#define __NAPI_C
#include <napi.h>
void neko_ChangeVCMode(bool is_vcp, neko_Window win) {
wslots[win].vc_data.is_enabled = is_vcp;
if(wslots[win].vc_data.is_enabled)
neko_SetMouseCoords(win, wslots[win].vc_data.orig_x, wslots[win].vc_data.orig_y);
}
void neko_ToggleVCMode(neko_Window win) {
wslots[win].vc_data.is_enabled = !wslots[win].vc_data.is_enabled;
if(wslots[win].vc_data.is_enabled)
neko_SetMouseCoords(win, wslots[win].vc_data.orig_x, wslots[win].vc_data.orig_y);
}
bool neko_IsVCMode(neko_Window win) {
return wslots[win].vc_data.is_enabled;
}
bool neko_ResizeNotify(neko_Window win) {
return wslots[win].resize_notify;
}
const char *neko_GetTitle(neko_Window win) {
return wslots[win].window_title;
}
void neko_GetWindowSize(neko_Window win, int32_t *x, int32_t *y) {
*x = wslots[win].cwidth;
*y = wslots[win].cheight;
}
void neko_GetWindowHints(neko_Window win, neko_Hint *hints) {
*hints = wslots[win].hints;
}
void neko_GetPixelSize(neko_Window win, float *x, float *y) {
*x = 2.0f / (float) wslots[win].cwidth;
*y = 2.0f / (float) wslots[win].cheight;
}
void neko_GetMousePos(neko_Window win, int64_t *x, int64_t *y) {
if(wslots[win].vc_data.is_enabled) {
*x = wslots[win].vc_data.x;
*y = wslots[win].vc_data.y;
} else {
*x = wslots[win].mx;
*y = wslots[win].my;
}
}
void neko_FindDeltaMovement(neko_Window win, uint64_t *x, uint64_t *y) {
if(wslots[win].vc_data.is_enabled) {
*x = wslots[win].vc_data.x - __prev_x;
*y = wslots[win].vc_data.y - __prev_y;
} else {
*x = wslots[win].mx - __prev_x;
*y = wslots[win].my - __prev_y;
}
}
/// Limit the largest and smallest virtual cursor position that can be achieved using
/// virtual mouse positioning
void neko_LimitVirtualPos (
int64_t max_x,
int64_t min_x,
int64_t max_y,
int64_t min_y
) {
__max_vc_x = max_x;
__min_vc_x = min_x;
__max_vc_y = max_y;
__min_vc_y = min_y;
}
/// Set virtual mouse position overflow actions that specify what
/// should happen if virtual mouse position limit has been reached
void neko_SetOverflowAction (
neko_VCPOverflowAction x_overflow_act,
neko_VCPOverflowAction y_overflow_act
) {
__x_overflow_act = x_overflow_act;
__y_overflow_act = y_overflow_act;
}
|
President-elect Donald Trump Donald John TrumpHouse committee believes it has evidence Trump requested putting ally in charge of Cohen probe: report Vietnamese airline takes steps to open flights to US on sidelines of Trump-Kim summit Manafort's attorneys say he should get less than 10 years in prison MORE is upending the GOP’s plans on ObamaCare.
Trump over the weekend said the ObamaCare replacement plan should cover everyone, a pledge that congressional Republicans have repeatedly declined to make.
The remark came just a few days after Trump said at a press conference that his administration would put forward its own replacement plan for ObamaCare, an announcement that seemed to catch lawmakers by surprise.
At the same press conference, Trump said Congress should repeal and replace ObamaCare simultaneously, shooting down talk that Republicans might delay the replacement plan to a later date.
Finally, Trump has ripped drug companies and called for Medicare to negotiate prices, embracing a position that has long been taken by Democrats.
Sen. John Cornyn John CornynHillicon Valley: Senators urge Trump to bar Huawei products from electric grid | Ex-security officials condemn Trump emergency declaration | New malicious cyber tool found | Facebook faces questions on treatment of moderators Key senators say administration should ban Huawei tech in US electric grid Senate plots to avoid fall shutdown brawl MORE (Texas), the Senate’s No. 2 Republican, told reporters Tuesday that he did not know that Trump was going to announce his intention to put forward his own ObamaCare replacement last week. And if the new administration follows through on putting forward a plan, Cornyn would not commit that it would be the baseline for work on the Republican replacement.
ADVERTISEMENT
“I don't know,” Cornyn said. “I want to see it first.”
Trump created a stir with his remarks to The Washington Post over the weekend about universal coverage.
“We’re going to have insurance for everybody,” Trump said. “There was a philosophy in some circles that if you can’t pay for it, you don’t get it. That’s not going to happen with us.”
The promise sounded more like the Democrats’ push for universal coverage than the GOP’s focus on bringing down costs and regulation in healthcare.
Republican congressional leaders have repeatedly declined to pledge that all of the 20 million people who gained coverage under ObamaCare will stay covered under a replacement.
“Look, I’m not going to get ahead of our committee process,” Speaker Paul Ryan Paul Davis RyanBrexit and exit: A transatlantic comparison Five takeaways from McCabe’s allegations against Trump The Hill's 12:30 Report: Sanders set to shake up 2020 race MORE (R-Wis.) said earlier this month when asked if everyone would keep coverage. “We’re just beginning to put this together.”
House Majority Leader Kevin McCarthy (R-Calif.) said separately this month that “there's a lot of areas that you want to look at” in a replacement, including cost, not just coverage.
Trump is also making his voice heard on when an ObamaCare replacement should happen.
"It will be essentially simultaneously," Trump said last week of repeal and replacement. "It will be various segments, you understand, but it will most likely be on the same day or the same week, but probably the same day, it could be the same hour.”
After the election, Republicans in Congress had floated the idea of passing repeal first and then focusing on the replacement later.
“My personal belief, and nothing’s been decided yet, but I would move through and repeal and then go to work on replacing,” McCarthy said in late November.
Ryan said in December that a replacement would “take time.”
“It’s not going to be replaced come next football season,” Ryan told the Milwaukee Journal Sentinel.
Last week, Ryan said the process of repeal and replace would occur “concurrently.”
Ryan said there could be elements of a replacement plan in the repeal bill. But including a full replacement could prove difficult, given complex Senate rules that control which provisions can be included in a bill using the fast-track process known as reconciliation.
“Nobody wants to go out and disagree publicly with their incoming President-elect before he's in office,” Tom Scully, the former Medicare and Medicaid chief under President George W. Bush, said of Ryan’s new comments moving toward the idea of doing repeal and replace at the same time.
Rep. Chris Collins (R-N.Y.), an early Trump backer, said Wednesday after Trump’s comments, “President-elect Trump’s timeline is a tighter timeline than our leadership was speaking about a month ago.”
Adding another wrinkle, Trump indicated at his press conference for the first time that his administration would put forward its own replacement plan. GOP sources say that announcement caught congressional Republicans off guard, and there has been a round of puzzlement over what is in the plan that Trump says his team is crafting.
“It’s the first I’ve heard about it,” Collins said after Trump said he would put forward his own plan.
Ryan said in an interview with Fox6 in Wisconsin, posted Tuesday, that Congress is working with Trump. “We’re working on it all together,” Ryan said. “It’s not his or ours.”
Healthcare experts say sticking to Trump’s promise of doing a replacement simultaneously with repeal could prove nearly impossible.
The drafting, negotiations and political maneuvering involved in a replacement plan would take months, if not longer, and could hardly be finished in Trump’s first 100 days.
“A detailed bill in the next few months, it's very hard to see,” Scully said. He said Republicans could be more likely to put forward an outline of a replacement at the time they repeal the law, which would be faster than working out the details of a full plan.
Congressional Republicans also say they want the replacement to be a series of smaller bills, not one large bill. Some of those smaller bills could be considered at the same time as repeal.
In the end, Trump’s healthcare plan could be closely in line with what congressional Republicans have been working on.
One of his top advisers, Kellyanne Conway, went on MSNBC shortly after his comments about “insurance for everybody” and pivoted back to standard Republican ideas like selling insurance across state lines and expanding health savings accounts.
Trump also promised during the campaign to “take care of everybody,” but the healthcare plan he proposed would have resulted in 21 million people losing coverage, according to an analysis from the Committee for a Responsible Federal Budget. |
package org.linkedbuildingdata.ifc2lbd.core.utils;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStreamWriter;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
import org.apache.jena.query.Dataset;
import org.apache.jena.rdf.model.Model;
import org.apache.jena.rdf.model.RDFNode;
import org.apache.jena.rdf.model.Resource;
import org.apache.jena.riot.RDFDataMgr;
import org.apache.jena.riot.RDFFormat;
import org.apache.jena.riot.system.StreamRDFWriter;
import org.apache.jena.vocabulary.RDF;
import org.linkedbuildingdata.ifc2lbd.IFCtoLBDConverter;
import org.linkedbuildingdata.ifc2lbd.application_messaging.events.IFCtoLBD_SystemStatusEvent;
import org.linkedbuildingdata.ifc2lbd.core.utils.rdfpath.RDFStep;
import com.google.common.eventbus.EventBus;
/*
* Copyright (c) 2017, 2021 <NAME> (<EMAIL>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
public abstract class RDFUtils {
/**
*
* This method is used to write the Turtle formatted output files that are
* the result of of the conversion process.
*
* An utility method to export an Apache Jena RDF storage content into a
* Turtle formatted file- Supports UTF.
*
* @param m
* an Apache Jena model
* @param target_file
* absolute path name for an output file
*
* @param eventBus
* A queue to communicate between program component. Here: status
* messages and errors are sent to user interface If no user
* interface is present, adding messages to the channel does
* nothing.
*
*/
public static void writeModel(Model m, String target_file, EventBus eventBus) {
OutputStreamWriter fo = null;
try {
fo = new OutputStreamWriter(new FileOutputStream(new File(target_file)), Charset.forName("UTF-8").newEncoder());
m.write(fo, "TTL");
} catch (FileNotFoundException e) {
e.printStackTrace();
eventBus.post(new IFCtoLBD_SystemStatusEvent("Error : " + e.getMessage()));
} finally {
if (fo != null)
try {
fo.close();
} catch (IOException e) {
}
}
}
public static void writeModelRDFStream(Model m, String target_file, EventBus eventBus) {
FileOutputStream fo = null;
try {
fo =new FileOutputStream(new File(target_file));
StreamRDFWriter.write(fo, m.getGraph(), RDFFormat.TURTLE_BLOCKS) ;
} catch (FileNotFoundException e) {
e.printStackTrace();
eventBus.post(new IFCtoLBD_SystemStatusEvent("Error : " + e.getMessage()));
} finally {
if (fo != null)
try {
fo.close();
} catch (IOException e) {
}
}
}
public static void writeDataset(Dataset ds, String target_file, EventBus eventBus) {
FileOutputStream fo = null; // Outputstream for RDFDataMgr.write is deprecated
try {
fo = new FileOutputStream(new File(target_file));
RDFDataMgr.write(fo, ds, RDFFormat.TRIG_PRETTY);
} catch (FileNotFoundException e) {
e.printStackTrace();
eventBus.post(new IFCtoLBD_SystemStatusEvent("Error : " + e.getMessage()));
} finally {
if (fo != null)
try {
fo.close();
} catch (IOException e) {
}
}
}
/**
*
* Reads in a Turtle - Terse RDF Triple Language (TTL) formatted ontology
* file: Turtle - Terse RDF Triple Language:
* https://www.w3.org/TeamSubmission/turtle/
*
* The extra lines make sure that the files are found if run under Eclipse
* IDE or as a runnable Java Archive file (JAR).
*
* Eclipse: https://www.eclipse.org/
*
* @param model
* An Apache Jena model: RDF store run on memory.
* @param ontology_file
* An Apache Jena ontokigy model: RDF store run on memory
* containing an ontology engine.
* @param eventBus
* A queue to communicate between program component. Here: status
* messages and errors are sent to user interface If no user
* interface is present, adding messages to the channel does
* nothing.
*
*/
public static void readInOntologyTTL(Model model, String ontology_file, EventBus eventBus) {
InputStream in = null;
try {
in = IFCtoLBDConverter.class.getResourceAsStream("/" + ontology_file);
if (in == null) {
try {
in = IFCtoLBDConverter.class.getResourceAsStream("/resources/" + ontology_file);
if (in == null)
in = IFCtoLBDConverter.class.getResourceAsStream("/" + ontology_file);
} catch (Exception e) {
eventBus.post(new IFCtoLBD_SystemStatusEvent("Error : " + e.getMessage()));
e.printStackTrace();
return;
}
}
model.read(in, null, "TTL");
in.close();
} catch (Exception e) {
eventBus.post(new IFCtoLBD_SystemStatusEvent("Error : " + e.getMessage()));
System.out.println("Missing file: " + ontology_file);
System.out.println("In the rare case, when you have a \"pset\" subdirectory at the current folder, \nan extra error message may be given. ");
//e.printStackTrace();
}
}
/**
* An utility method to copy of conected Abox triples unmodified from one
* Jena model to another. This is used to copy ifcOWL property set data as
* is.
*
* @param level
* how many steps from the start node
* @param r
* A RDF node to start the copying
* @param output_model
* A Jena model where the triples are copied to
*/
public static void copyTriples(int level, RDFNode r, Model output_model) {
if (level > 4)
return;
if (!r.isResource())
return;
r.asResource().listProperties().forEachRemaining(s -> {
// No ontology
if (!s.getPredicate().asResource().getURI().startsWith("http://www.w3.org/2000/01/rdf-schema#")) {
output_model.add(s);
copyTriples(level + 1, s.getObject(), output_model);
}
});
}
/**
* A helper method to find a list of nodes that match a given RDF path
* pattern
*
* @param r
* the starting point
* @param path
* the path pattern
* @return the list of found noded at the RDF graoh
*/
public static List<RDFNode> pathQuery(Resource r, RDFStep[] path) {
List<RDFStep> path_list = Arrays.asList(path);
if (r.getModel() == null)
return new ArrayList<RDFNode>();
Optional<RDFStep> step = path_list.stream().findFirst();
if (step.isPresent()) {
List<RDFNode> step_result = step.get().next(r);
if (path.length > 1) {
final List<RDFNode> result = new ArrayList<RDFNode>();
step_result.stream().filter(rn1 -> rn1.isResource()).map(rn2 -> rn2.asResource()).forEach(r1 -> {
List<RDFStep> tail = path_list.stream().skip(1).collect(Collectors.toList());
result.addAll(pathQuery(r1, tail.toArray(new RDFStep[tail.size()])));
});
return result;
} else
return step_result;
}
return new ArrayList<RDFNode>();
}
/**
*
* Gives the corresponding RDF ontology class type of the RDF node in the
* Apache Jena RDF model.
*
* @param r
* An RDF recource in a Apache Jena RDF store.
* @return The ontology class Resource node as an optional, that is, if
* exists. An empty return is given, if the triples does not exists
* at the graph.
*/
public static Optional<Resource> getType(Resource r) {
RDFStep[] path = { new RDFStep(RDF.type) };
return RDFUtils.pathQuery(r, path).stream().map(rn -> rn.asResource()).findAny();
}
}
|
// CopyTree recursively copies a directory tree.
//
// The destination directory must not already exist.
//
// If the optional Symlinks flag is true, symbolic links in the source tree
// result in symbolic links in the destination tree; if it is false, the
// contents of the files pointed to by symbolic links are copied. If the file
// pointed by the symlink doesn't exist, an error will be returned.
//
// You can set the optional IgnoreDanglingSymlinks flag to true if you want to
// silence this error. Notice that this has no effect on platforms that don't
// support os.Symlink.
//
// The optional ignore argument is a callable. If given, it is called with the
// `src` parameter, which is the directory being visited by CopyTree(), and
// `names` which is the list of `src` contents, as returned by os.ReadDir():
//
// callable(src, entries) -> ignoredNames
//
// Since CopyTree() is called recursively, the callable will be called once for
// each directory that is copied. It returns a list of names relative to the
// `src` directory that should not be copied.
//
// The optional copyFunction argument is a callable that will be used to copy
// each file. It will be called with the source path and the destination path as
// arguments. By default, Copy() is used, but any function that supports the
// same signature (like Copy2() when it exists) can be used.
func CopyTree(src, dst string, options *CopyTreeOptions) error {
if options == nil {
options = DefaultCopyTreeOptions
}
srcFileInfo, err := os.Stat(src)
if err != nil {
return err
}
if !srcFileInfo.IsDir() {
return &ErrNotDir{src}
}
_, err = os.Open(dst)
if !os.IsNotExist(err) {
ls, err := os.ReadDir(dst)
if err != nil || len(ls) > 0 {
return &ErrExists{dst}
}
}
entries, err := os.ReadDir(src)
if err != nil {
return fmt.Errorf("CopyTree: %w", err)
}
if err = os.MkdirAll(dst, srcFileInfo.Mode()); err != nil {
return fmt.Errorf("CopyTree: %w", err)
}
ignoredNames := []string{}
if options.Ignore != nil {
ignoredNames = options.Ignore(src, entries)
}
for _, entry := range entries {
if zstring.Contains(ignoredNames, entry.Name()) {
continue
}
srcPath := filepath.Join(src, entry.Name())
dstPath := filepath.Join(dst, entry.Name())
entryFileInfo, err := os.Lstat(srcPath)
if err != nil {
return err
}
switch {
case IsSymlink(entryFileInfo):
linkTo, err := os.Readlink(srcPath)
if err != nil {
return err
}
dir := filepath.Dir(srcPath)
linkTo, err = filepath.Abs(filepath.Join(dir, linkTo))
if err != nil {
return err
}
if options.Symlinks {
err = os.Symlink(linkTo, dstPath)
if err != nil {
return err
}
} else {
linkToStat, err := os.Stat(linkTo)
if os.IsNotExist(err) && options.IgnoreDanglingSymlinks {
continue
}
if linkToStat.IsDir() {
err = CopyTree(srcPath, dstPath, options)
} else {
err = options.CopyFunction(srcPath, dstPath, Modes{})
}
if err != nil {
return err
}
}
case entryFileInfo.IsDir():
err = CopyTree(srcPath, dstPath, options)
if err != nil {
return err
}
default:
err = options.CopyFunction(srcPath, dstPath, Modes{})
if err != nil {
return err
}
}
}
return nil
} |
/**
* Default implementation of the {@link OmniEclipseClasspathContainer} interface.
*
* @author Donat Csikos
*/
public class DefaultOmniEclipseClasspathContainer extends AbstractOmniClasspathEntry implements OmniEclipseClasspathContainer {
private final String path;
private final boolean isExported;
private DefaultOmniEclipseClasspathContainer(String path, boolean isExported, Optional<List<OmniClasspathAttribute>> attributes, Optional<List<OmniAccessRule>> accessRules) {
super(attributes, accessRules);
this.path = path;
this.isExported = isExported;
}
@Override
public String getPath() {
return this.path;
}
@Override
public boolean isExported() {
return this.isExported;
}
public static DefaultOmniEclipseClasspathContainer from(EclipseClasspathContainer container) {
return new DefaultOmniEclipseClasspathContainer(
container.getPath(),
container.isExported(),
getClasspathAttributes(container),
getAccessRules(container));
}
} |
// MethodOverrideWithConfig returns a MethodOverride middleware with config.
// See: `MethodOverride()`.
func MethodOverrideWithConfig(config MethodOverrideConfig) makross.Handler {
if config.Skipper == nil {
config.Skipper = DefaultMethodOverrideConfig.Skipper
}
if config.Getter == nil {
config.Getter = DefaultMethodOverrideConfig.Getter
}
return func(c *makross.Context) error {
if config.Skipper(c) {
return c.Next()
}
req := c.Request
if req.Method == makross.POST {
m := config.Getter(c)
if len(m) != 0 {
req.Method = m
}
}
return c.Next()
}
} |
<reponame>ajitkhaparde/trex-core<filename>src/dpdk/drivers/event/dlb/pf/base/dlb_resource.c
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2016-2020 Intel Corporation
*/
#include "dlb_hw_types.h"
#include "../../dlb_user.h"
#include "dlb_resource.h"
#include "dlb_osdep.h"
#include "dlb_osdep_bitmap.h"
#include "dlb_osdep_types.h"
#include "dlb_regs.h"
#include "../../dlb_priv.h"
#include "../../dlb_inline_fns.h"
#define DLB_DOM_LIST_HEAD(head, type) \
DLB_LIST_HEAD((head), type, domain_list)
#define DLB_FUNC_LIST_HEAD(head, type) \
DLB_LIST_HEAD((head), type, func_list)
#define DLB_DOM_LIST_FOR(head, ptr, iter) \
DLB_LIST_FOR_EACH(head, ptr, domain_list, iter)
#define DLB_FUNC_LIST_FOR(head, ptr, iter) \
DLB_LIST_FOR_EACH(head, ptr, func_list, iter)
#define DLB_DOM_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
DLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, domain_list, it, it_tmp)
#define DLB_FUNC_LIST_FOR_SAFE(head, ptr, ptr_tmp, it, it_tmp) \
DLB_LIST_FOR_EACH_SAFE((head), ptr, ptr_tmp, func_list, it, it_tmp)
static inline void dlb_flush_csr(struct dlb_hw *hw)
{
DLB_CSR_RD(hw, DLB_SYS_TOTAL_VAS);
}
static void dlb_init_fn_rsrc_lists(struct dlb_function_resources *rsrc)
{
dlb_list_init_head(&rsrc->avail_domains);
dlb_list_init_head(&rsrc->used_domains);
dlb_list_init_head(&rsrc->avail_ldb_queues);
dlb_list_init_head(&rsrc->avail_ldb_ports);
dlb_list_init_head(&rsrc->avail_dir_pq_pairs);
dlb_list_init_head(&rsrc->avail_ldb_credit_pools);
dlb_list_init_head(&rsrc->avail_dir_credit_pools);
}
static void dlb_init_domain_rsrc_lists(struct dlb_domain *domain)
{
dlb_list_init_head(&domain->used_ldb_queues);
dlb_list_init_head(&domain->used_ldb_ports);
dlb_list_init_head(&domain->used_dir_pq_pairs);
dlb_list_init_head(&domain->used_ldb_credit_pools);
dlb_list_init_head(&domain->used_dir_credit_pools);
dlb_list_init_head(&domain->avail_ldb_queues);
dlb_list_init_head(&domain->avail_ldb_ports);
dlb_list_init_head(&domain->avail_dir_pq_pairs);
dlb_list_init_head(&domain->avail_ldb_credit_pools);
dlb_list_init_head(&domain->avail_dir_credit_pools);
}
int dlb_resource_init(struct dlb_hw *hw)
{
struct dlb_list_entry *list;
unsigned int i;
/* For optimal load-balancing, ports that map to one or more QIDs in
* common should not be in numerical sequence. This is application
* dependent, but the driver interleaves port IDs as much as possible
* to reduce the likelihood of this. This initial allocation maximizes
* the average distance between an ID and its immediate neighbors (i.e.
* the distance from 1 to 0 and to 2, the distance from 2 to 1 and to
* 3, etc.).
*/
u32 init_ldb_port_allocation[DLB_MAX_NUM_LDB_PORTS] = {
0, 31, 62, 29, 60, 27, 58, 25, 56, 23, 54, 21, 52, 19, 50, 17,
48, 15, 46, 13, 44, 11, 42, 9, 40, 7, 38, 5, 36, 3, 34, 1,
32, 63, 30, 61, 28, 59, 26, 57, 24, 55, 22, 53, 20, 51, 18, 49,
16, 47, 14, 45, 12, 43, 10, 41, 8, 39, 6, 37, 4, 35, 2, 33
};
/* Zero-out resource tracking data structures */
memset(&hw->rsrcs, 0, sizeof(hw->rsrcs));
memset(&hw->pf, 0, sizeof(hw->pf));
dlb_init_fn_rsrc_lists(&hw->pf);
for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
memset(&hw->domains[i], 0, sizeof(hw->domains[i]));
dlb_init_domain_rsrc_lists(&hw->domains[i]);
hw->domains[i].parent_func = &hw->pf;
}
/* Give all resources to the PF driver */
hw->pf.num_avail_domains = DLB_MAX_NUM_DOMAINS;
for (i = 0; i < hw->pf.num_avail_domains; i++) {
list = &hw->domains[i].func_list;
dlb_list_add(&hw->pf.avail_domains, list);
}
hw->pf.num_avail_ldb_queues = DLB_MAX_NUM_LDB_QUEUES;
for (i = 0; i < hw->pf.num_avail_ldb_queues; i++) {
list = &hw->rsrcs.ldb_queues[i].func_list;
dlb_list_add(&hw->pf.avail_ldb_queues, list);
}
hw->pf.num_avail_ldb_ports = DLB_MAX_NUM_LDB_PORTS;
for (i = 0; i < hw->pf.num_avail_ldb_ports; i++) {
struct dlb_ldb_port *port;
port = &hw->rsrcs.ldb_ports[init_ldb_port_allocation[i]];
dlb_list_add(&hw->pf.avail_ldb_ports, &port->func_list);
}
hw->pf.num_avail_dir_pq_pairs = DLB_MAX_NUM_DIR_PORTS;
for (i = 0; i < hw->pf.num_avail_dir_pq_pairs; i++) {
list = &hw->rsrcs.dir_pq_pairs[i].func_list;
dlb_list_add(&hw->pf.avail_dir_pq_pairs, list);
}
hw->pf.num_avail_ldb_credit_pools = DLB_MAX_NUM_LDB_CREDIT_POOLS;
for (i = 0; i < hw->pf.num_avail_ldb_credit_pools; i++) {
list = &hw->rsrcs.ldb_credit_pools[i].func_list;
dlb_list_add(&hw->pf.avail_ldb_credit_pools, list);
}
hw->pf.num_avail_dir_credit_pools = DLB_MAX_NUM_DIR_CREDIT_POOLS;
for (i = 0; i < hw->pf.num_avail_dir_credit_pools; i++) {
list = &hw->rsrcs.dir_credit_pools[i].func_list;
dlb_list_add(&hw->pf.avail_dir_credit_pools, list);
}
/* There are 5120 history list entries, which allows us to overprovision
* the inflight limit (4096) by 1k.
*/
if (dlb_bitmap_alloc(hw,
&hw->pf.avail_hist_list_entries,
DLB_MAX_NUM_HIST_LIST_ENTRIES))
return -1;
if (dlb_bitmap_fill(hw->pf.avail_hist_list_entries))
return -1;
if (dlb_bitmap_alloc(hw,
&hw->pf.avail_qed_freelist_entries,
DLB_MAX_NUM_LDB_CREDITS))
return -1;
if (dlb_bitmap_fill(hw->pf.avail_qed_freelist_entries))
return -1;
if (dlb_bitmap_alloc(hw,
&hw->pf.avail_dqed_freelist_entries,
DLB_MAX_NUM_DIR_CREDITS))
return -1;
if (dlb_bitmap_fill(hw->pf.avail_dqed_freelist_entries))
return -1;
if (dlb_bitmap_alloc(hw,
&hw->pf.avail_aqed_freelist_entries,
DLB_MAX_NUM_AQOS_ENTRIES))
return -1;
if (dlb_bitmap_fill(hw->pf.avail_aqed_freelist_entries))
return -1;
/* Initialize the hardware resource IDs */
for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++)
hw->domains[i].id = i;
for (i = 0; i < DLB_MAX_NUM_LDB_QUEUES; i++)
hw->rsrcs.ldb_queues[i].id = i;
for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
hw->rsrcs.ldb_ports[i].id = i;
for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
hw->rsrcs.dir_pq_pairs[i].id = i;
for (i = 0; i < DLB_MAX_NUM_LDB_CREDIT_POOLS; i++)
hw->rsrcs.ldb_credit_pools[i].id = i;
for (i = 0; i < DLB_MAX_NUM_DIR_CREDIT_POOLS; i++)
hw->rsrcs.dir_credit_pools[i].id = i;
for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
hw->rsrcs.sn_groups[i].id = i;
/* Default mode (0) is 32 sequence numbers per queue */
hw->rsrcs.sn_groups[i].mode = 0;
hw->rsrcs.sn_groups[i].sequence_numbers_per_queue = 32;
hw->rsrcs.sn_groups[i].slot_use_bitmap = 0;
}
return 0;
}
void dlb_resource_free(struct dlb_hw *hw)
{
dlb_bitmap_free(hw->pf.avail_hist_list_entries);
dlb_bitmap_free(hw->pf.avail_qed_freelist_entries);
dlb_bitmap_free(hw->pf.avail_dqed_freelist_entries);
dlb_bitmap_free(hw->pf.avail_aqed_freelist_entries);
}
static struct dlb_domain *dlb_get_domain_from_id(struct dlb_hw *hw, u32 id)
{
if (id >= DLB_MAX_NUM_DOMAINS)
return NULL;
return &hw->domains[id];
}
static int dlb_attach_ldb_queues(struct dlb_hw *hw,
struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
u32 num_queues,
struct dlb_cmd_response *resp)
{
unsigned int i, j;
if (rsrcs->num_avail_ldb_queues < num_queues) {
resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
return -1;
}
for (i = 0; i < num_queues; i++) {
struct dlb_ldb_queue *queue;
queue = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_queues,
typeof(*queue));
if (queue == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: domain validation failed\n",
__func__);
goto cleanup;
}
dlb_list_del(&rsrcs->avail_ldb_queues, &queue->func_list);
queue->domain_id = domain->id;
queue->owned = true;
dlb_list_add(&domain->avail_ldb_queues, &queue->domain_list);
}
rsrcs->num_avail_ldb_queues -= num_queues;
return 0;
cleanup:
/* Return the assigned queues */
for (j = 0; j < i; j++) {
struct dlb_ldb_queue *queue;
queue = DLB_FUNC_LIST_HEAD(domain->avail_ldb_queues,
typeof(*queue));
/* Unrecoverable internal error */
if (queue == NULL)
break;
queue->owned = false;
dlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);
dlb_list_add(&rsrcs->avail_ldb_queues, &queue->func_list);
}
return -EFAULT;
}
static struct dlb_ldb_port *
dlb_get_next_ldb_port(struct dlb_hw *hw,
struct dlb_function_resources *rsrcs,
u32 domain_id)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
/* To reduce the odds of consecutive load-balanced ports mapping to the
* same queue(s), the driver attempts to allocate ports whose neighbors
* are owned by a different domain.
*/
DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
u32 next, prev;
u32 phys_id;
phys_id = port->id;
next = phys_id + 1;
prev = phys_id - 1;
if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
next = 0;
if (phys_id == 0)
prev = DLB_MAX_NUM_LDB_PORTS - 1;
if (!hw->rsrcs.ldb_ports[next].owned ||
hw->rsrcs.ldb_ports[next].domain_id == domain_id)
continue;
if (!hw->rsrcs.ldb_ports[prev].owned ||
hw->rsrcs.ldb_ports[prev].domain_id == domain_id)
continue;
return port;
}
/* Failing that, the driver looks for a port with one neighbor owned by
* a different domain and the other unallocated.
*/
DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
u32 next, prev;
u32 phys_id;
phys_id = port->id;
next = phys_id + 1;
prev = phys_id - 1;
if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
next = 0;
if (phys_id == 0)
prev = DLB_MAX_NUM_LDB_PORTS - 1;
if (!hw->rsrcs.ldb_ports[prev].owned &&
hw->rsrcs.ldb_ports[next].owned &&
hw->rsrcs.ldb_ports[next].domain_id != domain_id)
return port;
if (!hw->rsrcs.ldb_ports[next].owned &&
hw->rsrcs.ldb_ports[prev].owned &&
hw->rsrcs.ldb_ports[prev].domain_id != domain_id)
return port;
}
/* Failing that, the driver looks for a port with both neighbors
* unallocated.
*/
DLB_FUNC_LIST_FOR(rsrcs->avail_ldb_ports, port, iter) {
u32 next, prev;
u32 phys_id;
phys_id = port->id;
next = phys_id + 1;
prev = phys_id - 1;
if (phys_id == DLB_MAX_NUM_LDB_PORTS - 1)
next = 0;
if (phys_id == 0)
prev = DLB_MAX_NUM_LDB_PORTS - 1;
if (!hw->rsrcs.ldb_ports[prev].owned &&
!hw->rsrcs.ldb_ports[next].owned)
return port;
}
/* If all else fails, the driver returns the next available port. */
return DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_ports, typeof(*port));
}
static int dlb_attach_ldb_ports(struct dlb_hw *hw,
struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
u32 num_ports,
struct dlb_cmd_response *resp)
{
unsigned int i, j;
if (rsrcs->num_avail_ldb_ports < num_ports) {
resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
return -1;
}
for (i = 0; i < num_ports; i++) {
struct dlb_ldb_port *port;
port = dlb_get_next_ldb_port(hw, rsrcs, domain->id);
if (port == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: domain validation failed\n",
__func__);
goto cleanup;
}
dlb_list_del(&rsrcs->avail_ldb_ports, &port->func_list);
port->domain_id = domain->id;
port->owned = true;
dlb_list_add(&domain->avail_ldb_ports, &port->domain_list);
}
rsrcs->num_avail_ldb_ports -= num_ports;
return 0;
cleanup:
/* Return the assigned ports */
for (j = 0; j < i; j++) {
struct dlb_ldb_port *port;
port = DLB_FUNC_LIST_HEAD(domain->avail_ldb_ports,
typeof(*port));
/* Unrecoverable internal error */
if (port == NULL)
break;
port->owned = false;
dlb_list_del(&domain->avail_ldb_ports, &port->domain_list);
dlb_list_add(&rsrcs->avail_ldb_ports, &port->func_list);
}
return -EFAULT;
}
static int dlb_attach_dir_ports(struct dlb_hw *hw,
struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
u32 num_ports,
struct dlb_cmd_response *resp)
{
unsigned int i, j;
if (rsrcs->num_avail_dir_pq_pairs < num_ports) {
resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
return -1;
}
for (i = 0; i < num_ports; i++) {
struct dlb_dir_pq_pair *port;
port = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_pq_pairs,
typeof(*port));
if (port == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: domain validation failed\n",
__func__);
goto cleanup;
}
dlb_list_del(&rsrcs->avail_dir_pq_pairs, &port->func_list);
port->domain_id = domain->id;
port->owned = true;
dlb_list_add(&domain->avail_dir_pq_pairs, &port->domain_list);
}
rsrcs->num_avail_dir_pq_pairs -= num_ports;
return 0;
cleanup:
/* Return the assigned ports */
for (j = 0; j < i; j++) {
struct dlb_dir_pq_pair *port;
port = DLB_FUNC_LIST_HEAD(domain->avail_dir_pq_pairs,
typeof(*port));
/* Unrecoverable internal error */
if (port == NULL)
break;
port->owned = false;
dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
dlb_list_add(&rsrcs->avail_dir_pq_pairs, &port->func_list);
}
return -EFAULT;
}
static int dlb_attach_ldb_credits(struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
u32 num_credits,
struct dlb_cmd_response *resp)
{
struct dlb_bitmap *bitmap = rsrcs->avail_qed_freelist_entries;
if (dlb_bitmap_count(bitmap) < (int)num_credits) {
resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
return -1;
}
if (num_credits) {
int base;
base = dlb_bitmap_find_set_bit_range(bitmap, num_credits);
if (base < 0)
goto error;
domain->qed_freelist.base = base;
domain->qed_freelist.bound = base + num_credits;
domain->qed_freelist.offset = 0;
dlb_bitmap_clear_range(bitmap, base, num_credits);
}
return 0;
error:
resp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;
return -1;
}
static int dlb_attach_dir_credits(struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
u32 num_credits,
struct dlb_cmd_response *resp)
{
struct dlb_bitmap *bitmap = rsrcs->avail_dqed_freelist_entries;
if (dlb_bitmap_count(bitmap) < (int)num_credits) {
resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
return -1;
}
if (num_credits) {
int base;
base = dlb_bitmap_find_set_bit_range(bitmap, num_credits);
if (base < 0)
goto error;
domain->dqed_freelist.base = base;
domain->dqed_freelist.bound = base + num_credits;
domain->dqed_freelist.offset = 0;
dlb_bitmap_clear_range(bitmap, base, num_credits);
}
return 0;
error:
resp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;
return -1;
}
static int dlb_attach_ldb_credit_pools(struct dlb_hw *hw,
struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
u32 num_credit_pools,
struct dlb_cmd_response *resp)
{
unsigned int i, j;
if (rsrcs->num_avail_ldb_credit_pools < num_credit_pools) {
resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
return -1;
}
for (i = 0; i < num_credit_pools; i++) {
struct dlb_credit_pool *pool;
pool = DLB_FUNC_LIST_HEAD(rsrcs->avail_ldb_credit_pools,
typeof(*pool));
if (pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: domain validation failed\n",
__func__);
goto cleanup;
}
dlb_list_del(&rsrcs->avail_ldb_credit_pools,
&pool->func_list);
pool->domain_id = domain->id;
pool->owned = true;
dlb_list_add(&domain->avail_ldb_credit_pools,
&pool->domain_list);
}
rsrcs->num_avail_ldb_credit_pools -= num_credit_pools;
return 0;
cleanup:
/* Return the assigned credit pools */
for (j = 0; j < i; j++) {
struct dlb_credit_pool *pool;
pool = DLB_FUNC_LIST_HEAD(domain->avail_ldb_credit_pools,
typeof(*pool));
/* Unrecoverable internal error */
if (pool == NULL)
break;
pool->owned = false;
dlb_list_del(&domain->avail_ldb_credit_pools,
&pool->domain_list);
dlb_list_add(&rsrcs->avail_ldb_credit_pools,
&pool->func_list);
}
return -EFAULT;
}
static int dlb_attach_dir_credit_pools(struct dlb_hw *hw,
struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
u32 num_credit_pools,
struct dlb_cmd_response *resp)
{
unsigned int i, j;
if (rsrcs->num_avail_dir_credit_pools < num_credit_pools) {
resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
return -1;
}
for (i = 0; i < num_credit_pools; i++) {
struct dlb_credit_pool *pool;
pool = DLB_FUNC_LIST_HEAD(rsrcs->avail_dir_credit_pools,
typeof(*pool));
if (pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: domain validation failed\n",
__func__);
goto cleanup;
}
dlb_list_del(&rsrcs->avail_dir_credit_pools,
&pool->func_list);
pool->domain_id = domain->id;
pool->owned = true;
dlb_list_add(&domain->avail_dir_credit_pools,
&pool->domain_list);
}
rsrcs->num_avail_dir_credit_pools -= num_credit_pools;
return 0;
cleanup:
/* Return the assigned credit pools */
for (j = 0; j < i; j++) {
struct dlb_credit_pool *pool;
pool = DLB_FUNC_LIST_HEAD(domain->avail_dir_credit_pools,
typeof(*pool));
/* Unrecoverable internal error */
if (pool == NULL)
break;
pool->owned = false;
dlb_list_del(&domain->avail_dir_credit_pools,
&pool->domain_list);
dlb_list_add(&rsrcs->avail_dir_credit_pools,
&pool->func_list);
}
return -EFAULT;
}
static int
dlb_attach_domain_hist_list_entries(struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
u32 num_hist_list_entries,
struct dlb_cmd_response *resp)
{
struct dlb_bitmap *bitmap;
int base;
if (num_hist_list_entries) {
bitmap = rsrcs->avail_hist_list_entries;
base = dlb_bitmap_find_set_bit_range(bitmap,
num_hist_list_entries);
if (base < 0)
goto error;
domain->total_hist_list_entries = num_hist_list_entries;
domain->avail_hist_list_entries = num_hist_list_entries;
domain->hist_list_entry_base = base;
domain->hist_list_entry_offset = 0;
dlb_bitmap_clear_range(bitmap, base, num_hist_list_entries);
}
return 0;
error:
resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
return -1;
}
static int dlb_attach_atomic_inflights(struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
u32 num_atomic_inflights,
struct dlb_cmd_response *resp)
{
if (num_atomic_inflights) {
struct dlb_bitmap *bitmap =
rsrcs->avail_aqed_freelist_entries;
int base;
base = dlb_bitmap_find_set_bit_range(bitmap,
num_atomic_inflights);
if (base < 0)
goto error;
domain->aqed_freelist.base = base;
domain->aqed_freelist.bound = base + num_atomic_inflights;
domain->aqed_freelist.offset = 0;
dlb_bitmap_clear_range(bitmap, base, num_atomic_inflights);
}
return 0;
error:
resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
return -1;
}
static int
dlb_domain_attach_resources(struct dlb_hw *hw,
struct dlb_function_resources *rsrcs,
struct dlb_domain *domain,
struct dlb_create_sched_domain_args *args,
struct dlb_cmd_response *resp)
{
int ret;
ret = dlb_attach_ldb_queues(hw,
rsrcs,
domain,
args->num_ldb_queues,
resp);
if (ret < 0)
return ret;
ret = dlb_attach_ldb_ports(hw,
rsrcs,
domain,
args->num_ldb_ports,
resp);
if (ret < 0)
return ret;
ret = dlb_attach_dir_ports(hw,
rsrcs,
domain,
args->num_dir_ports,
resp);
if (ret < 0)
return ret;
ret = dlb_attach_ldb_credits(rsrcs,
domain,
args->num_ldb_credits,
resp);
if (ret < 0)
return ret;
ret = dlb_attach_dir_credits(rsrcs,
domain,
args->num_dir_credits,
resp);
if (ret < 0)
return ret;
ret = dlb_attach_ldb_credit_pools(hw,
rsrcs,
domain,
args->num_ldb_credit_pools,
resp);
if (ret < 0)
return ret;
ret = dlb_attach_dir_credit_pools(hw,
rsrcs,
domain,
args->num_dir_credit_pools,
resp);
if (ret < 0)
return ret;
ret = dlb_attach_domain_hist_list_entries(rsrcs,
domain,
args->num_hist_list_entries,
resp);
if (ret < 0)
return ret;
ret = dlb_attach_atomic_inflights(rsrcs,
domain,
args->num_atomic_inflights,
resp);
if (ret < 0)
return ret;
domain->configured = true;
domain->started = false;
rsrcs->num_avail_domains--;
return 0;
}
static void dlb_ldb_port_cq_enable(struct dlb_hw *hw,
struct dlb_ldb_port *port)
{
union dlb_lsp_cq_ldb_dsbl reg;
/* Don't re-enable the port if a removal is pending. The caller should
* mark this port as enabled (if it isn't already), and when the
* removal completes the port will be enabled.
*/
if (port->num_pending_removals)
return;
reg.field.disabled = 0;
DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);
dlb_flush_csr(hw);
}
static void dlb_dir_port_cq_enable(struct dlb_hw *hw,
struct dlb_dir_pq_pair *port)
{
union dlb_lsp_cq_dir_dsbl reg;
reg.field.disabled = 0;
DLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);
dlb_flush_csr(hw);
}
static void dlb_ldb_port_cq_disable(struct dlb_hw *hw,
struct dlb_ldb_port *port)
{
union dlb_lsp_cq_ldb_dsbl reg;
reg.field.disabled = 1;
DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_DSBL(port->id), reg.val);
dlb_flush_csr(hw);
}
static void dlb_dir_port_cq_disable(struct dlb_hw *hw,
struct dlb_dir_pq_pair *port)
{
union dlb_lsp_cq_dir_dsbl reg;
reg.field.disabled = 1;
DLB_CSR_WR(hw, DLB_LSP_CQ_DIR_DSBL(port->id), reg.val);
dlb_flush_csr(hw);
}
void dlb_disable_dp_vasr_feature(struct dlb_hw *hw)
{
union dlb_dp_dir_csr_ctrl r0;
r0.val = DLB_CSR_RD(hw, DLB_DP_DIR_CSR_CTRL);
r0.field.cfg_vasr_dis = 1;
DLB_CSR_WR(hw, DLB_DP_DIR_CSR_CTRL, r0.val);
}
void dlb_enable_excess_tokens_alarm(struct dlb_hw *hw)
{
union dlb_chp_cfg_chp_csr_ctrl r0;
r0.val = DLB_CSR_RD(hw, DLB_CHP_CFG_CHP_CSR_CTRL);
r0.val |= 1 << DLB_CHP_CFG_EXCESS_TOKENS_SHIFT;
DLB_CSR_WR(hw, DLB_CHP_CFG_CHP_CSR_CTRL, r0.val);
}
void dlb_hw_enable_sparse_ldb_cq_mode(struct dlb_hw *hw)
{
union dlb_sys_cq_mode r0;
r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
r0.field.ldb_cq64 = 1;
DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
}
void dlb_hw_enable_sparse_dir_cq_mode(struct dlb_hw *hw)
{
union dlb_sys_cq_mode r0;
r0.val = DLB_CSR_RD(hw, DLB_SYS_CQ_MODE);
r0.field.dir_cq64 = 1;
DLB_CSR_WR(hw, DLB_SYS_CQ_MODE, r0.val);
}
void dlb_hw_disable_pf_to_vf_isr_pend_err(struct dlb_hw *hw)
{
union dlb_sys_sys_alarm_int_enable r0;
r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
r0.field.pf_to_vf_isr_pend_error = 0;
DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
}
static unsigned int
dlb_get_num_ports_in_use(struct dlb_hw *hw)
{
unsigned int i, n = 0;
for (i = 0; i < DLB_MAX_NUM_LDB_PORTS; i++)
if (hw->rsrcs.ldb_ports[i].owned)
n++;
for (i = 0; i < DLB_MAX_NUM_DIR_PORTS; i++)
if (hw->rsrcs.dir_pq_pairs[i].owned)
n++;
return n;
}
static bool dlb_port_find_slot(struct dlb_ldb_port *port,
enum dlb_qid_map_state state,
int *slot)
{
int i;
for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
if (port->qid_map[i].state == state)
break;
}
*slot = i;
return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
}
static bool dlb_port_find_slot_queue(struct dlb_ldb_port *port,
enum dlb_qid_map_state state,
struct dlb_ldb_queue *queue,
int *slot)
{
int i;
for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
if (port->qid_map[i].state == state &&
port->qid_map[i].qid == queue->id)
break;
}
*slot = i;
return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
}
static int dlb_port_slot_state_transition(struct dlb_hw *hw,
struct dlb_ldb_port *port,
struct dlb_ldb_queue *queue,
int slot,
enum dlb_qid_map_state new_state)
{
enum dlb_qid_map_state curr_state = port->qid_map[slot].state;
struct dlb_domain *domain;
domain = dlb_get_domain_from_id(hw, port->domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: unable to find domain %d\n",
__func__, port->domain_id);
return -EFAULT;
}
switch (curr_state) {
case DLB_QUEUE_UNMAPPED:
switch (new_state) {
case DLB_QUEUE_MAPPED:
queue->num_mappings++;
port->num_mappings++;
break;
case DLB_QUEUE_MAP_IN_PROGRESS:
queue->num_pending_additions++;
domain->num_pending_additions++;
break;
default:
goto error;
}
break;
case DLB_QUEUE_MAPPED:
switch (new_state) {
case DLB_QUEUE_UNMAPPED:
queue->num_mappings--;
port->num_mappings--;
break;
case DLB_QUEUE_UNMAP_IN_PROGRESS:
port->num_pending_removals++;
domain->num_pending_removals++;
break;
case DLB_QUEUE_MAPPED:
/* Priority change, nothing to update */
break;
default:
goto error;
}
break;
case DLB_QUEUE_MAP_IN_PROGRESS:
switch (new_state) {
case DLB_QUEUE_UNMAPPED:
queue->num_pending_additions--;
domain->num_pending_additions--;
break;
case DLB_QUEUE_MAPPED:
queue->num_mappings++;
port->num_mappings++;
queue->num_pending_additions--;
domain->num_pending_additions--;
break;
default:
goto error;
}
break;
case DLB_QUEUE_UNMAP_IN_PROGRESS:
switch (new_state) {
case DLB_QUEUE_UNMAPPED:
port->num_pending_removals--;
domain->num_pending_removals--;
queue->num_mappings--;
port->num_mappings--;
break;
case DLB_QUEUE_MAPPED:
port->num_pending_removals--;
domain->num_pending_removals--;
break;
case DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:
/* Nothing to update */
break;
default:
goto error;
}
break;
case DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP:
switch (new_state) {
case DLB_QUEUE_UNMAP_IN_PROGRESS:
/* Nothing to update */
break;
case DLB_QUEUE_UNMAPPED:
/* An UNMAP_IN_PROGRESS_PENDING_MAP slot briefly
* becomes UNMAPPED before it transitions to
* MAP_IN_PROGRESS.
*/
queue->num_mappings--;
port->num_mappings--;
port->num_pending_removals--;
domain->num_pending_removals--;
break;
default:
goto error;
}
break;
default:
goto error;
}
port->qid_map[slot].state = new_state;
DLB_HW_INFO(hw,
"[%s()] queue %d -> port %d state transition (%d -> %d)\n",
__func__, queue->id, port->id, curr_state,
new_state);
return 0;
error:
DLB_HW_ERR(hw,
"[%s()] Internal error: invalid queue %d -> port %d state transition (%d -> %d)\n",
__func__, queue->id, port->id, curr_state,
new_state);
return -EFAULT;
}
/* dlb_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as their
* function names imply, and should only be called by the dynamic CQ mapping
* code.
*/
static void dlb_ldb_queue_disable_mapped_cqs(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_queue *queue)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
int slot;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
if (!dlb_port_find_slot_queue(port, state, queue, &slot))
continue;
if (port->enabled)
dlb_ldb_port_cq_disable(hw, port);
}
}
static void dlb_ldb_queue_enable_mapped_cqs(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_queue *queue)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
int slot;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
if (!dlb_port_find_slot_queue(port, state, queue, &slot))
continue;
if (port->enabled)
dlb_ldb_port_cq_enable(hw, port);
}
}
static int dlb_ldb_port_map_qid_static(struct dlb_hw *hw,
struct dlb_ldb_port *p,
struct dlb_ldb_queue *q,
u8 priority)
{
union dlb_lsp_cq2priov r0;
union dlb_lsp_cq2qid r1;
union dlb_atm_pipe_qid_ldb_qid2cqidx r2;
union dlb_lsp_qid_ldb_qid2cqidx r3;
union dlb_lsp_qid_ldb_qid2cqidx2 r4;
enum dlb_qid_map_state state;
int i;
/* Look for a pending or already mapped slot, else an unused slot */
if (!dlb_port_find_slot_queue(p, DLB_QUEUE_MAP_IN_PROGRESS, q, &i) &&
!dlb_port_find_slot_queue(p, DLB_QUEUE_MAPPED, q, &i) &&
!dlb_port_find_slot(p, DLB_QUEUE_UNMAPPED, &i)) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: CQ has no available QID mapping slots\n",
__func__, __LINE__);
return -EFAULT;
}
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
/* Read-modify-write the priority and valid bit register */
r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(p->id));
r0.field.v |= 1 << i;
r0.field.prio |= (priority & 0x7) << i * 3;
DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(p->id), r0.val);
/* Read-modify-write the QID map register */
r1.val = DLB_CSR_RD(hw, DLB_LSP_CQ2QID(p->id, i / 4));
if (i == 0 || i == 4)
r1.field.qid_p0 = q->id;
if (i == 1 || i == 5)
r1.field.qid_p1 = q->id;
if (i == 2 || i == 6)
r1.field.qid_p2 = q->id;
if (i == 3 || i == 7)
r1.field.qid_p3 = q->id;
DLB_CSR_WR(hw, DLB_LSP_CQ2QID(p->id, i / 4), r1.val);
r2.val = DLB_CSR_RD(hw,
DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,
p->id / 4));
r3.val = DLB_CSR_RD(hw,
DLB_LSP_QID_LDB_QID2CQIDX(q->id,
p->id / 4));
r4.val = DLB_CSR_RD(hw,
DLB_LSP_QID_LDB_QID2CQIDX2(q->id,
p->id / 4));
switch (p->id % 4) {
case 0:
r2.field.cq_p0 |= 1 << i;
r3.field.cq_p0 |= 1 << i;
r4.field.cq_p0 |= 1 << i;
break;
case 1:
r2.field.cq_p1 |= 1 << i;
r3.field.cq_p1 |= 1 << i;
r4.field.cq_p1 |= 1 << i;
break;
case 2:
r2.field.cq_p2 |= 1 << i;
r3.field.cq_p2 |= 1 << i;
r4.field.cq_p2 |= 1 << i;
break;
case 3:
r2.field.cq_p3 |= 1 << i;
r3.field.cq_p3 |= 1 << i;
r4.field.cq_p3 |= 1 << i;
break;
}
DLB_CSR_WR(hw,
DLB_ATM_PIPE_QID_LDB_QID2CQIDX(q->id,
p->id / 4),
r2.val);
DLB_CSR_WR(hw,
DLB_LSP_QID_LDB_QID2CQIDX(q->id,
p->id / 4),
r3.val);
DLB_CSR_WR(hw,
DLB_LSP_QID_LDB_QID2CQIDX2(q->id,
p->id / 4),
r4.val);
dlb_flush_csr(hw);
p->qid_map[i].qid = q->id;
p->qid_map[i].priority = priority;
state = DLB_QUEUE_MAPPED;
return dlb_port_slot_state_transition(hw, p, q, i, state);
}
static int dlb_ldb_port_set_has_work_bits(struct dlb_hw *hw,
struct dlb_ldb_port *port,
struct dlb_ldb_queue *queue,
int slot)
{
union dlb_lsp_qid_aqed_active_cnt r0;
union dlb_lsp_qid_ldb_enqueue_cnt r1;
union dlb_lsp_ldb_sched_ctrl r2 = { {0} };
/* Set the atomic scheduling haswork bit */
r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
r2.field.cq = port->id;
r2.field.qidix = slot;
r2.field.value = 1;
r2.field.rlist_haswork_v = r0.field.count > 0;
/* Set the non-atomic scheduling haswork bit */
DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
r1.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
memset(&r2, 0, sizeof(r2));
r2.field.cq = port->id;
r2.field.qidix = slot;
r2.field.value = 1;
r2.field.nalb_haswork_v = (r1.field.count > 0);
DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
dlb_flush_csr(hw);
return 0;
}
static void dlb_ldb_port_clear_queue_if_status(struct dlb_hw *hw,
struct dlb_ldb_port *port,
int slot)
{
union dlb_lsp_ldb_sched_ctrl r0 = { {0} };
r0.field.cq = port->id;
r0.field.qidix = slot;
r0.field.value = 0;
r0.field.inflight_ok_v = 1;
DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);
dlb_flush_csr(hw);
}
static void dlb_ldb_port_set_queue_if_status(struct dlb_hw *hw,
struct dlb_ldb_port *port,
int slot)
{
union dlb_lsp_ldb_sched_ctrl r0 = { {0} };
r0.field.cq = port->id;
r0.field.qidix = slot;
r0.field.value = 1;
r0.field.inflight_ok_v = 1;
DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r0.val);
dlb_flush_csr(hw);
}
static void dlb_ldb_queue_set_inflight_limit(struct dlb_hw *hw,
struct dlb_ldb_queue *queue)
{
union dlb_lsp_qid_ldb_infl_lim r0 = { {0} };
r0.field.limit = queue->num_qid_inflights;
DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r0.val);
}
static void dlb_ldb_queue_clear_inflight_limit(struct dlb_hw *hw,
struct dlb_ldb_queue *queue)
{
DLB_CSR_WR(hw,
DLB_LSP_QID_LDB_INFL_LIM(queue->id),
DLB_LSP_QID_LDB_INFL_LIM_RST);
}
static int dlb_ldb_port_finish_map_qid_dynamic(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_port *port,
struct dlb_ldb_queue *queue)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_lsp_qid_ldb_infl_cnt r0;
enum dlb_qid_map_state state;
int slot, ret;
u8 prio;
r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
if (r0.field.count) {
DLB_HW_ERR(hw,
"[%s()] Internal error: non-zero QID inflight count\n",
__func__);
return -EFAULT;
}
/* For each port with a pending mapping to this queue, perform the
* static mapping and set the corresponding has_work bits.
*/
state = DLB_QUEUE_MAP_IN_PROGRESS;
if (!dlb_port_find_slot_queue(port, state, queue, &slot))
return -EINVAL;
if (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
prio = port->qid_map[slot].priority;
/* Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
* the port's qid_map state.
*/
ret = dlb_ldb_port_map_qid_static(hw, port, queue, prio);
if (ret)
return ret;
ret = dlb_ldb_port_set_has_work_bits(hw, port, queue, slot);
if (ret)
return ret;
/* Ensure IF_status(cq,qid) is 0 before enabling the port to
* prevent spurious schedules to cause the queue's inflight
* count to increase.
*/
dlb_ldb_port_clear_queue_if_status(hw, port, slot);
/* Reset the queue's inflight status */
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
state = DLB_QUEUE_MAPPED;
if (!dlb_port_find_slot_queue(port, state, queue, &slot))
continue;
dlb_ldb_port_set_queue_if_status(hw, port, slot);
}
dlb_ldb_queue_set_inflight_limit(hw, queue);
/* Re-enable CQs mapped to this queue */
dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
/* If this queue has other mappings pending, clear its inflight limit */
if (queue->num_pending_additions > 0)
dlb_ldb_queue_clear_inflight_limit(hw, queue);
return 0;
}
/**
* dlb_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
* @hw: dlb_hw handle for a particular device.
* @port: load-balanced port
* @queue: load-balanced queue
* @priority: queue servicing priority
*
* Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
* at a later point, and <0 if an error occurred.
*/
static int dlb_ldb_port_map_qid_dynamic(struct dlb_hw *hw,
struct dlb_ldb_port *port,
struct dlb_ldb_queue *queue,
u8 priority)
{
union dlb_lsp_qid_ldb_infl_cnt r0 = { {0} };
enum dlb_qid_map_state state;
struct dlb_domain *domain;
int slot, ret;
domain = dlb_get_domain_from_id(hw, port->domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: unable to find domain %d\n",
__func__, port->domain_id);
return -EFAULT;
}
/* Set the QID inflight limit to 0 to prevent further scheduling of the
* queue.
*/
DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), 0);
if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &slot)) {
DLB_HW_ERR(hw,
"Internal error: No available unmapped slots\n");
return -EFAULT;
}
if (slot >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
port->qid_map[slot].qid = queue->id;
port->qid_map[slot].priority = priority;
state = DLB_QUEUE_MAP_IN_PROGRESS;
ret = dlb_port_slot_state_transition(hw, port, queue, slot, state);
if (ret)
return ret;
r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
if (r0.field.count) {
/* The queue is owed completions so it's not safe to map it
* yet. Schedule a kernel thread to complete the mapping later,
* once software has completed all the queue's inflight events.
*/
if (!os_worker_active(hw))
os_schedule_work(hw);
return 1;
}
/* Disable the affected CQ, and the CQs already mapped to the QID,
* before reading the QID's inflight count a second time. There is an
* unlikely race in which the QID may schedule one more QE after we
* read an inflight count of 0, and disabling the CQs guarantees that
* the race will not occur after a re-read of the inflight count
* register.
*/
if (port->enabled)
dlb_ldb_port_cq_disable(hw, port);
dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(queue->id));
if (r0.field.count) {
if (port->enabled)
dlb_ldb_port_cq_enable(hw, port);
dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
/* The queue is owed completions so it's not safe to map it
* yet. Schedule a kernel thread to complete the mapping later,
* once software has completed all the queue's inflight events.
*/
if (!os_worker_active(hw))
os_schedule_work(hw);
return 1;
}
return dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
}
static int dlb_ldb_port_map_qid(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_port *port,
struct dlb_ldb_queue *queue,
u8 prio)
{
if (domain->started)
return dlb_ldb_port_map_qid_dynamic(hw, port, queue, prio);
else
return dlb_ldb_port_map_qid_static(hw, port, queue, prio);
}
static int dlb_ldb_port_unmap_qid(struct dlb_hw *hw,
struct dlb_ldb_port *port,
struct dlb_ldb_queue *queue)
{
enum dlb_qid_map_state mapped, in_progress, pending_map, unmapped;
union dlb_lsp_cq2priov r0;
union dlb_atm_pipe_qid_ldb_qid2cqidx r1;
union dlb_lsp_qid_ldb_qid2cqidx r2;
union dlb_lsp_qid_ldb_qid2cqidx2 r3;
u32 queue_id;
u32 port_id;
int i;
/* Find the queue's slot */
mapped = DLB_QUEUE_MAPPED;
in_progress = DLB_QUEUE_UNMAP_IN_PROGRESS;
pending_map = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;
if (!dlb_port_find_slot_queue(port, mapped, queue, &i) &&
!dlb_port_find_slot_queue(port, in_progress, queue, &i) &&
!dlb_port_find_slot_queue(port, pending_map, queue, &i)) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: QID %d isn't mapped\n",
__func__, __LINE__, queue->id);
return -EFAULT;
}
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
port_id = port->id;
queue_id = queue->id;
/* Read-modify-write the priority and valid bit register */
r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port_id));
r0.field.v &= ~(1 << i);
DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port_id), r0.val);
r1.val = DLB_CSR_RD(hw,
DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id,
port_id / 4));
r2.val = DLB_CSR_RD(hw,
DLB_LSP_QID_LDB_QID2CQIDX(queue_id,
port_id / 4));
r3.val = DLB_CSR_RD(hw,
DLB_LSP_QID_LDB_QID2CQIDX2(queue_id,
port_id / 4));
switch (port_id % 4) {
case 0:
r1.field.cq_p0 &= ~(1 << i);
r2.field.cq_p0 &= ~(1 << i);
r3.field.cq_p0 &= ~(1 << i);
break;
case 1:
r1.field.cq_p1 &= ~(1 << i);
r2.field.cq_p1 &= ~(1 << i);
r3.field.cq_p1 &= ~(1 << i);
break;
case 2:
r1.field.cq_p2 &= ~(1 << i);
r2.field.cq_p2 &= ~(1 << i);
r3.field.cq_p2 &= ~(1 << i);
break;
case 3:
r1.field.cq_p3 &= ~(1 << i);
r2.field.cq_p3 &= ~(1 << i);
r3.field.cq_p3 &= ~(1 << i);
break;
}
DLB_CSR_WR(hw,
DLB_ATM_PIPE_QID_LDB_QID2CQIDX(queue_id, port_id / 4),
r1.val);
DLB_CSR_WR(hw,
DLB_LSP_QID_LDB_QID2CQIDX(queue_id, port_id / 4),
r2.val);
DLB_CSR_WR(hw,
DLB_LSP_QID_LDB_QID2CQIDX2(queue_id, port_id / 4),
r3.val);
dlb_flush_csr(hw);
unmapped = DLB_QUEUE_UNMAPPED;
return dlb_port_slot_state_transition(hw, port, queue, i, unmapped);
}
static int
dlb_verify_create_sched_domain_args(struct dlb_hw *hw,
struct dlb_function_resources *rsrcs,
struct dlb_create_sched_domain_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_bitmap *ldb_credit_freelist;
struct dlb_bitmap *dir_credit_freelist;
unsigned int ldb_credit_freelist_count;
unsigned int dir_credit_freelist_count;
unsigned int max_contig_aqed_entries;
unsigned int max_contig_dqed_entries;
unsigned int max_contig_qed_entries;
unsigned int max_contig_hl_entries;
struct dlb_bitmap *aqed_freelist;
enum dlb_dev_revision revision;
ldb_credit_freelist = rsrcs->avail_qed_freelist_entries;
dir_credit_freelist = rsrcs->avail_dqed_freelist_entries;
aqed_freelist = rsrcs->avail_aqed_freelist_entries;
ldb_credit_freelist_count = dlb_bitmap_count(ldb_credit_freelist);
dir_credit_freelist_count = dlb_bitmap_count(dir_credit_freelist);
max_contig_hl_entries =
dlb_bitmap_longest_set_range(rsrcs->avail_hist_list_entries);
max_contig_aqed_entries =
dlb_bitmap_longest_set_range(aqed_freelist);
max_contig_qed_entries =
dlb_bitmap_longest_set_range(ldb_credit_freelist);
max_contig_dqed_entries =
dlb_bitmap_longest_set_range(dir_credit_freelist);
if (rsrcs->num_avail_domains < 1)
resp->status = DLB_ST_DOMAIN_UNAVAILABLE;
else if (rsrcs->num_avail_ldb_queues < args->num_ldb_queues)
resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
else if (rsrcs->num_avail_ldb_ports < args->num_ldb_ports)
resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
else if (args->num_ldb_queues > 0 && args->num_ldb_ports == 0)
resp->status = DLB_ST_LDB_PORT_REQUIRED_FOR_LDB_QUEUES;
else if (rsrcs->num_avail_dir_pq_pairs < args->num_dir_ports)
resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
else if (ldb_credit_freelist_count < args->num_ldb_credits)
resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
else if (dir_credit_freelist_count < args->num_dir_credits)
resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
else if (rsrcs->num_avail_ldb_credit_pools < args->num_ldb_credit_pools)
resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
else if (rsrcs->num_avail_dir_credit_pools < args->num_dir_credit_pools)
resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
else if (max_contig_hl_entries < args->num_hist_list_entries)
resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
else if (max_contig_aqed_entries < args->num_atomic_inflights)
resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
else if (max_contig_qed_entries < args->num_ldb_credits)
resp->status = DLB_ST_QED_FREELIST_ENTRIES_UNAVAILABLE;
else if (max_contig_dqed_entries < args->num_dir_credits)
resp->status = DLB_ST_DQED_FREELIST_ENTRIES_UNAVAILABLE;
/* DLB A-stepping workaround for hardware write buffer lock up issue:
* limit the maximum configured ports to less than 128 and disable CQ
* occupancy interrupts.
*/
revision = os_get_dev_revision(hw);
if (revision < DLB_B0) {
u32 n = dlb_get_num_ports_in_use(hw);
n += args->num_ldb_ports + args->num_dir_ports;
if (n >= DLB_A_STEP_MAX_PORTS)
resp->status = args->num_ldb_ports ?
DLB_ST_LDB_PORTS_UNAVAILABLE :
DLB_ST_DIR_PORTS_UNAVAILABLE;
}
if (resp->status)
return -1;
return 0;
}
static void
dlb_log_create_sched_domain_args(struct dlb_hw *hw,
struct dlb_create_sched_domain_args *args)
{
DLB_HW_INFO(hw, "DLB create sched domain arguments:\n");
DLB_HW_INFO(hw, "\tNumber of LDB queues: %d\n",
args->num_ldb_queues);
DLB_HW_INFO(hw, "\tNumber of LDB ports: %d\n",
args->num_ldb_ports);
DLB_HW_INFO(hw, "\tNumber of DIR ports: %d\n",
args->num_dir_ports);
DLB_HW_INFO(hw, "\tNumber of ATM inflights: %d\n",
args->num_atomic_inflights);
DLB_HW_INFO(hw, "\tNumber of hist list entries: %d\n",
args->num_hist_list_entries);
DLB_HW_INFO(hw, "\tNumber of LDB credits: %d\n",
args->num_ldb_credits);
DLB_HW_INFO(hw, "\tNumber of DIR credits: %d\n",
args->num_dir_credits);
DLB_HW_INFO(hw, "\tNumber of LDB credit pools: %d\n",
args->num_ldb_credit_pools);
DLB_HW_INFO(hw, "\tNumber of DIR credit pools: %d\n",
args->num_dir_credit_pools);
}
/**
* dlb_hw_create_sched_domain() - Allocate and initialize a DLB scheduling
* domain and its resources.
* @hw: Contains the current state of the DLB hardware.
* @args: User-provided arguments.
* @resp: Response to user.
*
* Return: returns < 0 on error, 0 otherwise. If the driver is unable to
* satisfy a request, resp->status will be set accordingly.
*/
int dlb_hw_create_sched_domain(struct dlb_hw *hw,
struct dlb_create_sched_domain_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_domain *domain;
struct dlb_function_resources *rsrcs;
int ret;
rsrcs = &hw->pf;
dlb_log_create_sched_domain_args(hw, args);
/* Verify that hardware resources are available before attempting to
* satisfy the request. This simplifies the error unwinding code.
*/
if (dlb_verify_create_sched_domain_args(hw, rsrcs, args, resp))
return -EINVAL;
domain = DLB_FUNC_LIST_HEAD(rsrcs->avail_domains, typeof(*domain));
/* Verification should catch this. */
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: no available domains\n",
__func__, __LINE__);
return -EFAULT;
}
if (domain->configured) {
DLB_HW_ERR(hw,
"[%s()] Internal error: avail_domains contains configured domains.\n",
__func__);
return -EFAULT;
}
dlb_init_domain_rsrc_lists(domain);
/* Verification should catch this too. */
ret = dlb_domain_attach_resources(hw, rsrcs, domain, args, resp);
if (ret < 0) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to verify args.\n",
__func__);
return -EFAULT;
}
dlb_list_del(&rsrcs->avail_domains, &domain->func_list);
dlb_list_add(&rsrcs->used_domains, &domain->func_list);
resp->id = domain->id;
resp->status = 0;
return 0;
}
static void
dlb_configure_ldb_credit_pool(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_create_ldb_pool_args *args,
struct dlb_credit_pool *pool)
{
union dlb_sys_ldb_pool_enbld r0 = { {0} };
union dlb_chp_ldb_pool_crd_lim r1 = { {0} };
union dlb_chp_ldb_pool_crd_cnt r2 = { {0} };
union dlb_chp_qed_fl_base r3 = { {0} };
union dlb_chp_qed_fl_lim r4 = { {0} };
union dlb_chp_qed_fl_push_ptr r5 = { {0} };
union dlb_chp_qed_fl_pop_ptr r6 = { {0} };
r1.field.limit = args->num_ldb_credits;
DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_LIM(pool->id), r1.val);
r2.field.count = args->num_ldb_credits;
DLB_CSR_WR(hw, DLB_CHP_LDB_POOL_CRD_CNT(pool->id), r2.val);
r3.field.base = domain->qed_freelist.base + domain->qed_freelist.offset;
DLB_CSR_WR(hw, DLB_CHP_QED_FL_BASE(pool->id), r3.val);
r4.field.freelist_disable = 0;
r4.field.limit = r3.field.base + args->num_ldb_credits - 1;
DLB_CSR_WR(hw, DLB_CHP_QED_FL_LIM(pool->id), r4.val);
r5.field.push_ptr = r3.field.base;
r5.field.generation = 1;
DLB_CSR_WR(hw, DLB_CHP_QED_FL_PUSH_PTR(pool->id), r5.val);
r6.field.pop_ptr = r3.field.base;
r6.field.generation = 0;
DLB_CSR_WR(hw, DLB_CHP_QED_FL_POP_PTR(pool->id), r6.val);
r0.field.pool_enabled = 1;
DLB_CSR_WR(hw, DLB_SYS_LDB_POOL_ENBLD(pool->id), r0.val);
pool->avail_credits = args->num_ldb_credits;
pool->total_credits = args->num_ldb_credits;
domain->qed_freelist.offset += args->num_ldb_credits;
pool->configured = true;
}
static int
dlb_verify_create_ldb_pool_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_ldb_pool_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_freelist *qed_freelist;
struct dlb_domain *domain;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -1;
}
if (!domain->configured) {
resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
return -1;
}
qed_freelist = &domain->qed_freelist;
if (dlb_freelist_count(qed_freelist) < args->num_ldb_credits) {
resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
return -1;
}
if (dlb_list_empty(&domain->avail_ldb_credit_pools)) {
resp->status = DLB_ST_LDB_CREDIT_POOLS_UNAVAILABLE;
return -1;
}
if (domain->started) {
resp->status = DLB_ST_DOMAIN_STARTED;
return -1;
}
return 0;
}
static void
dlb_log_create_ldb_pool_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_ldb_pool_args *args)
{
DLB_HW_INFO(hw, "DLB create load-balanced credit pool arguments:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
DLB_HW_INFO(hw, "\tNumber of LDB credits: %d\n",
args->num_ldb_credits);
}
/**
* dlb_hw_create_ldb_pool() - Allocate and initialize a DLB credit pool.
* @hw: Contains the current state of the DLB hardware.
* @args: User-provided arguments.
* @resp: Response to user.
*
* Return: returns < 0 on error, 0 otherwise. If the driver is unable to
* satisfy a request, resp->status will be set accordingly.
*/
int dlb_hw_create_ldb_pool(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_ldb_pool_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_credit_pool *pool;
struct dlb_domain *domain;
dlb_log_create_ldb_pool_args(hw, domain_id, args);
/* Verify that hardware resources are available before attempting to
* satisfy the request. This simplifies the error unwinding code.
*/
if (dlb_verify_create_ldb_pool_args(hw, domain_id, args, resp))
return -EINVAL;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
pool = DLB_DOM_LIST_HEAD(domain->avail_ldb_credit_pools, typeof(*pool));
/* Verification should catch this. */
if (pool == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: no available ldb credit pools\n",
__func__, __LINE__);
return -EFAULT;
}
dlb_configure_ldb_credit_pool(hw, domain, args, pool);
/* Configuration succeeded, so move the resource from the 'avail' to
* the 'used' list.
*/
dlb_list_del(&domain->avail_ldb_credit_pools, &pool->domain_list);
dlb_list_add(&domain->used_ldb_credit_pools, &pool->domain_list);
resp->status = 0;
resp->id = pool->id;
return 0;
}
static void
dlb_configure_dir_credit_pool(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_create_dir_pool_args *args,
struct dlb_credit_pool *pool)
{
union dlb_sys_dir_pool_enbld r0 = { {0} };
union dlb_chp_dir_pool_crd_lim r1 = { {0} };
union dlb_chp_dir_pool_crd_cnt r2 = { {0} };
union dlb_chp_dqed_fl_base r3 = { {0} };
union dlb_chp_dqed_fl_lim r4 = { {0} };
union dlb_chp_dqed_fl_push_ptr r5 = { {0} };
union dlb_chp_dqed_fl_pop_ptr r6 = { {0} };
r1.field.limit = args->num_dir_credits;
DLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_LIM(pool->id), r1.val);
r2.field.count = args->num_dir_credits;
DLB_CSR_WR(hw, DLB_CHP_DIR_POOL_CRD_CNT(pool->id), r2.val);
r3.field.base = domain->dqed_freelist.base +
domain->dqed_freelist.offset;
DLB_CSR_WR(hw, DLB_CHP_DQED_FL_BASE(pool->id), r3.val);
r4.field.freelist_disable = 0;
r4.field.limit = r3.field.base + args->num_dir_credits - 1;
DLB_CSR_WR(hw, DLB_CHP_DQED_FL_LIM(pool->id), r4.val);
r5.field.push_ptr = r3.field.base;
r5.field.generation = 1;
DLB_CSR_WR(hw, DLB_CHP_DQED_FL_PUSH_PTR(pool->id), r5.val);
r6.field.pop_ptr = r3.field.base;
r6.field.generation = 0;
DLB_CSR_WR(hw, DLB_CHP_DQED_FL_POP_PTR(pool->id), r6.val);
r0.field.pool_enabled = 1;
DLB_CSR_WR(hw, DLB_SYS_DIR_POOL_ENBLD(pool->id), r0.val);
pool->avail_credits = args->num_dir_credits;
pool->total_credits = args->num_dir_credits;
domain->dqed_freelist.offset += args->num_dir_credits;
pool->configured = true;
}
static int
dlb_verify_create_dir_pool_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_dir_pool_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_freelist *dqed_freelist;
struct dlb_domain *domain;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -1;
}
if (!domain->configured) {
resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
return -1;
}
dqed_freelist = &domain->dqed_freelist;
if (dlb_freelist_count(dqed_freelist) < args->num_dir_credits) {
resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
return -1;
}
if (dlb_list_empty(&domain->avail_dir_credit_pools)) {
resp->status = DLB_ST_DIR_CREDIT_POOLS_UNAVAILABLE;
return -1;
}
if (domain->started) {
resp->status = DLB_ST_DOMAIN_STARTED;
return -1;
}
return 0;
}
static void
dlb_log_create_dir_pool_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_dir_pool_args *args)
{
DLB_HW_INFO(hw, "DLB create directed credit pool arguments:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
DLB_HW_INFO(hw, "\tNumber of DIR credits: %d\n",
args->num_dir_credits);
}
/**
* dlb_hw_create_dir_pool() - Allocate and initialize a DLB credit pool.
* @hw: Contains the current state of the DLB hardware.
* @args: User-provided arguments.
* @resp: Response to user.
*
* Return: returns < 0 on error, 0 otherwise. If the driver is unable to
* satisfy a request, resp->status will be set accordingly.
*/
int dlb_hw_create_dir_pool(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_dir_pool_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_credit_pool *pool;
struct dlb_domain *domain;
dlb_log_create_dir_pool_args(hw, domain_id, args);
/* Verify that hardware resources are available before attempting to
* satisfy the request. This simplifies the error unwinding code.
*/
/* At least one available pool */
if (dlb_verify_create_dir_pool_args(hw, domain_id, args, resp))
return -EINVAL;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
pool = DLB_DOM_LIST_HEAD(domain->avail_dir_credit_pools, typeof(*pool));
/* Verification should catch this. */
if (pool == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: no available dir credit pools\n",
__func__, __LINE__);
return -EFAULT;
}
dlb_configure_dir_credit_pool(hw, domain, args, pool);
/* Configuration succeeded, so move the resource from the 'avail' to
* the 'used' list.
*/
dlb_list_del(&domain->avail_dir_credit_pools, &pool->domain_list);
dlb_list_add(&domain->used_dir_credit_pools, &pool->domain_list);
resp->status = 0;
resp->id = pool->id;
return 0;
}
static u32 dlb_ldb_cq_inflight_count(struct dlb_hw *hw,
struct dlb_ldb_port *port)
{
union dlb_lsp_cq_ldb_infl_cnt r0;
r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));
return r0.field.count;
}
static u32 dlb_ldb_cq_token_count(struct dlb_hw *hw,
struct dlb_ldb_port *port)
{
union dlb_lsp_cq_ldb_tkn_cnt r0;
r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_TKN_CNT(port->id));
return r0.field.token_count;
}
static int dlb_drain_ldb_cq(struct dlb_hw *hw, struct dlb_ldb_port *port)
{
u32 infl_cnt, tkn_cnt;
unsigned int i;
infl_cnt = dlb_ldb_cq_inflight_count(hw, port);
/* Account for the initial token count, which is used in order to
* provide a CQ with depth less than 8.
*/
tkn_cnt = dlb_ldb_cq_token_count(hw, port) - port->init_tkn_cnt;
if (infl_cnt || tkn_cnt) {
struct dlb_hcw hcw_mem[8], *hcw;
void *pp_addr;
pp_addr = os_map_producer_port(hw, port->id, true);
/* Point hcw to a 64B-aligned location */
hcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
/* Program the first HCW for a completion and token return and
* the other HCWs as NOOPS
*/
memset(hcw, 0, 4 * sizeof(*hcw));
hcw->qe_comp = (infl_cnt > 0);
hcw->cq_token = (tkn_cnt > 0);
hcw->lock_id = tkn_cnt - 1;
/* Return tokens in the first HCW */
dlb_movdir64b(pp_addr, hcw);
hcw->cq_token = 0;
/* Issue remaining completions (if any) */
for (i = 1; i < infl_cnt; i++)
dlb_movdir64b(pp_addr, hcw);
os_fence_hcw(hw, pp_addr);
os_unmap_producer_port(hw, pp_addr);
}
return 0;
}
static int dlb_domain_drain_ldb_cqs(struct dlb_hw *hw,
struct dlb_domain *domain,
bool toggle_port)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
int ret;
/* If the domain hasn't been started, there's no traffic to drain */
if (!domain->started)
return 0;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
if (toggle_port)
dlb_ldb_port_cq_disable(hw, port);
ret = dlb_drain_ldb_cq(hw, port);
if (ret < 0)
return ret;
if (toggle_port)
dlb_ldb_port_cq_enable(hw, port);
}
return 0;
}
static void dlb_domain_disable_ldb_queue_write_perms(struct dlb_hw *hw,
struct dlb_domain *domain)
{
int domain_offset = domain->id * DLB_MAX_NUM_LDB_QUEUES;
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_sys_ldb_vasqid_v r0;
struct dlb_ldb_queue *queue;
r0.field.vasqid_v = 0;
DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
int idx = domain_offset + queue->id;
DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(idx), r0.val);
}
}
static void dlb_domain_disable_ldb_seq_checks(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_chp_sn_chk_enbl r1;
struct dlb_ldb_port *port;
r1.field.en = 0;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
DLB_CSR_WR(hw,
DLB_CHP_SN_CHK_ENBL(port->id),
r1.val);
}
static void dlb_domain_disable_ldb_port_crd_updates(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_chp_ldb_pp_crd_req_state r0;
struct dlb_ldb_port *port;
r0.field.no_pp_credit_update = 1;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),
r0.val);
}
static void dlb_domain_disable_ldb_port_interrupts(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_chp_ldb_cq_int_enb r0 = { {0} };
union dlb_chp_ldb_cq_wd_enb r1 = { {0} };
struct dlb_ldb_port *port;
r0.field.en_tim = 0;
r0.field.en_depth = 0;
r1.field.wd_enable = 0;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
DLB_CSR_WR(hw,
DLB_CHP_LDB_CQ_INT_ENB(port->id),
r0.val);
DLB_CSR_WR(hw,
DLB_CHP_LDB_CQ_WD_ENB(port->id),
r1.val);
}
}
static void dlb_domain_disable_dir_queue_write_perms(struct dlb_hw *hw,
struct dlb_domain *domain)
{
int domain_offset = domain->id * DLB_MAX_NUM_DIR_PORTS;
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_sys_dir_vasqid_v r0;
struct dlb_dir_pq_pair *port;
r0.field.vasqid_v = 0;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
int idx = domain_offset + port->id;
DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(idx), r0.val);
}
}
static void dlb_domain_disable_dir_port_interrupts(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_chp_dir_cq_int_enb r0 = { {0} };
union dlb_chp_dir_cq_wd_enb r1 = { {0} };
struct dlb_dir_pq_pair *port;
r0.field.en_tim = 0;
r0.field.en_depth = 0;
r1.field.wd_enable = 0;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
DLB_CSR_WR(hw,
DLB_CHP_DIR_CQ_INT_ENB(port->id),
r0.val);
DLB_CSR_WR(hw,
DLB_CHP_DIR_CQ_WD_ENB(port->id),
r1.val);
}
}
static void dlb_domain_disable_dir_port_crd_updates(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_chp_dir_pp_crd_req_state r0;
struct dlb_dir_pq_pair *port;
r0.field.no_pp_credit_update = 1;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
r0.val);
}
static void dlb_domain_disable_dir_cqs(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_dir_pq_pair *port;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
port->enabled = false;
dlb_dir_port_cq_disable(hw, port);
}
}
static void dlb_domain_disable_ldb_cqs(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
port->enabled = false;
dlb_ldb_port_cq_disable(hw, port);
}
}
static void dlb_domain_enable_ldb_cqs(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
port->enabled = true;
dlb_ldb_port_cq_enable(hw, port);
}
}
static struct dlb_ldb_queue *dlb_get_ldb_queue_from_id(struct dlb_hw *hw,
u32 id)
{
if (id >= DLB_MAX_NUM_LDB_QUEUES)
return NULL;
return &hw->rsrcs.ldb_queues[id];
}
static void dlb_ldb_port_clear_has_work_bits(struct dlb_hw *hw,
struct dlb_ldb_port *port,
u8 slot)
{
union dlb_lsp_ldb_sched_ctrl r2 = { {0} };
r2.field.cq = port->id;
r2.field.qidix = slot;
r2.field.value = 0;
r2.field.rlist_haswork_v = 1;
DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
memset(&r2, 0, sizeof(r2));
r2.field.cq = port->id;
r2.field.qidix = slot;
r2.field.value = 0;
r2.field.nalb_haswork_v = 1;
DLB_CSR_WR(hw, DLB_LSP_LDB_SCHED_CTRL, r2.val);
dlb_flush_csr(hw);
}
static void dlb_domain_finish_map_port(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_port *port)
{
int i;
for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
union dlb_lsp_qid_ldb_infl_cnt r0;
struct dlb_ldb_queue *queue;
int qid;
if (port->qid_map[i].state != DLB_QUEUE_MAP_IN_PROGRESS)
continue;
qid = port->qid_map[i].qid;
queue = dlb_get_ldb_queue_from_id(hw, qid);
if (queue == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: unable to find queue %d\n",
__func__, qid);
continue;
}
r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));
if (r0.field.count)
continue;
/* Disable the affected CQ, and the CQs already mapped to the
* QID, before reading the QID's inflight count a second time.
* There is an unlikely race in which the QID may schedule one
* more QE after we read an inflight count of 0, and disabling
* the CQs guarantees that the race will not occur after a
* re-read of the inflight count register.
*/
if (port->enabled)
dlb_ldb_port_cq_disable(hw, port);
dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_LDB_INFL_CNT(qid));
if (r0.field.count) {
if (port->enabled)
dlb_ldb_port_cq_enable(hw, port);
dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
continue;
}
dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
}
}
static unsigned int
dlb_domain_finish_map_qid_procedures(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
if (!domain->configured || domain->num_pending_additions == 0)
return 0;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
dlb_domain_finish_map_port(hw, domain, port);
return domain->num_pending_additions;
}
unsigned int dlb_finish_map_qid_procedures(struct dlb_hw *hw)
{
int i, num = 0;
/* Finish queue map jobs for any domain that needs it */
for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
struct dlb_domain *domain = &hw->domains[i];
num += dlb_domain_finish_map_qid_procedures(hw, domain);
}
return num;
}
static int dlb_domain_wait_for_ldb_cqs_to_empty(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
int i;
for (i = 0; i < DLB_MAX_CQ_COMP_CHECK_LOOPS; i++) {
if (dlb_ldb_cq_inflight_count(hw, port) == 0)
break;
}
if (i == DLB_MAX_CQ_COMP_CHECK_LOOPS) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to flush load-balanced port %d's completions.\n",
__func__, port->id);
return -EFAULT;
}
}
return 0;
}
static void dlb_domain_finish_unmap_port_slot(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_port *port,
int slot)
{
enum dlb_qid_map_state state;
struct dlb_ldb_queue *queue;
queue = &hw->rsrcs.ldb_queues[port->qid_map[slot].qid];
state = port->qid_map[slot].state;
/* Update the QID2CQIDX and CQ2QID vectors */
dlb_ldb_port_unmap_qid(hw, port, queue);
/* Ensure the QID will not be serviced by this {CQ, slot} by clearing
* the has_work bits
*/
dlb_ldb_port_clear_has_work_bits(hw, port, slot);
/* Reset the {CQ, slot} to its default state */
dlb_ldb_port_set_queue_if_status(hw, port, slot);
/* Re-enable the CQ if it was not manually disabled by the user */
if (port->enabled)
dlb_ldb_port_cq_enable(hw, port);
/* If there is a mapping that is pending this slot's removal, perform
* the mapping now.
*/
if (state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP) {
struct dlb_ldb_port_qid_map *map;
struct dlb_ldb_queue *map_queue;
u8 prio;
map = &port->qid_map[slot];
map->qid = map->pending_qid;
map->priority = map->pending_priority;
map_queue = &hw->rsrcs.ldb_queues[map->qid];
prio = map->priority;
dlb_ldb_port_map_qid(hw, domain, port, map_queue, prio);
}
}
static bool dlb_domain_finish_unmap_port(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_port *port)
{
union dlb_lsp_cq_ldb_infl_cnt r0;
int i;
if (port->num_pending_removals == 0)
return false;
/* The unmap requires all the CQ's outstanding inflights to be
* completed.
*/
r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_LDB_INFL_CNT(port->id));
if (r0.field.count > 0)
return false;
for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
struct dlb_ldb_port_qid_map *map;
map = &port->qid_map[i];
if (map->state != DLB_QUEUE_UNMAP_IN_PROGRESS &&
map->state != DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP)
continue;
dlb_domain_finish_unmap_port_slot(hw, domain, port, i);
}
return true;
}
static unsigned int
dlb_domain_finish_unmap_qid_procedures(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
if (!domain->configured || domain->num_pending_removals == 0)
return 0;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
dlb_domain_finish_unmap_port(hw, domain, port);
return domain->num_pending_removals;
}
unsigned int dlb_finish_unmap_qid_procedures(struct dlb_hw *hw)
{
int i, num = 0;
/* Finish queue unmap jobs for any domain that needs it */
for (i = 0; i < DLB_MAX_NUM_DOMAINS; i++) {
struct dlb_domain *domain = &hw->domains[i];
num += dlb_domain_finish_unmap_qid_procedures(hw, domain);
}
return num;
}
/* Returns whether the queue is empty, including its inflight and replay
* counts.
*/
static bool dlb_ldb_queue_is_empty(struct dlb_hw *hw,
struct dlb_ldb_queue *queue)
{
union dlb_lsp_qid_ldb_replay_cnt r0;
union dlb_lsp_qid_aqed_active_cnt r1;
union dlb_lsp_qid_atq_enqueue_cnt r2;
union dlb_lsp_qid_ldb_enqueue_cnt r3;
union dlb_lsp_qid_ldb_infl_cnt r4;
r0.val = DLB_CSR_RD(hw,
DLB_LSP_QID_LDB_REPLAY_CNT(queue->id));
if (r0.val)
return false;
r1.val = DLB_CSR_RD(hw,
DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
if (r1.val)
return false;
r2.val = DLB_CSR_RD(hw,
DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));
if (r2.val)
return false;
r3.val = DLB_CSR_RD(hw,
DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
if (r3.val)
return false;
r4.val = DLB_CSR_RD(hw,
DLB_LSP_QID_LDB_INFL_CNT(queue->id));
if (r4.val)
return false;
return true;
}
static bool dlb_domain_mapped_queues_empty(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_queue *queue;
DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
if (queue->num_mappings == 0)
continue;
if (!dlb_ldb_queue_is_empty(hw, queue))
return false;
}
return true;
}
static int dlb_domain_drain_mapped_queues(struct dlb_hw *hw,
struct dlb_domain *domain)
{
int i, ret;
/* If the domain hasn't been started, there's no traffic to drain */
if (!domain->started)
return 0;
if (domain->num_pending_removals > 0) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to unmap domain queues\n",
__func__);
return -EFAULT;
}
for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
ret = dlb_domain_drain_ldb_cqs(hw, domain, true);
if (ret < 0)
return ret;
if (dlb_domain_mapped_queues_empty(hw, domain))
break;
}
if (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to empty queues\n",
__func__);
return -EFAULT;
}
/* Drain the CQs one more time. For the queues to go empty, they would
* have scheduled one or more QEs.
*/
ret = dlb_domain_drain_ldb_cqs(hw, domain, true);
if (ret < 0)
return ret;
return 0;
}
static int dlb_domain_drain_unmapped_queue(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_queue *queue)
{
struct dlb_ldb_port *port;
int ret;
/* If a domain has LDB queues, it must have LDB ports */
if (dlb_list_empty(&domain->used_ldb_ports)) {
DLB_HW_ERR(hw,
"[%s()] Internal error: No configured LDB ports\n",
__func__);
return -EFAULT;
}
port = DLB_DOM_LIST_HEAD(domain->used_ldb_ports, typeof(*port));
/* If necessary, free up a QID slot in this CQ */
if (port->num_mappings == DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
struct dlb_ldb_queue *mapped_queue;
mapped_queue = &hw->rsrcs.ldb_queues[port->qid_map[0].qid];
ret = dlb_ldb_port_unmap_qid(hw, port, mapped_queue);
if (ret)
return ret;
}
ret = dlb_ldb_port_map_qid_dynamic(hw, port, queue, 0);
if (ret)
return ret;
return dlb_domain_drain_mapped_queues(hw, domain);
}
static int dlb_domain_drain_unmapped_queues(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_queue *queue;
int ret;
/* If the domain hasn't been started, there's no traffic to drain */
if (!domain->started)
return 0;
DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
if (queue->num_mappings != 0 ||
dlb_ldb_queue_is_empty(hw, queue))
continue;
ret = dlb_domain_drain_unmapped_queue(hw, domain, queue);
if (ret)
return ret;
}
return 0;
}
static int dlb_domain_wait_for_ldb_pool_refill(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_credit_pool *pool;
/* Confirm that all credits are returned to the domain's credit pools */
DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
union dlb_chp_qed_fl_push_ptr r0;
union dlb_chp_qed_fl_pop_ptr r1;
unsigned long pop_offs, push_offs;
int i;
push_offs = DLB_CHP_QED_FL_PUSH_PTR(pool->id);
pop_offs = DLB_CHP_QED_FL_POP_PTR(pool->id);
for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
r0.val = DLB_CSR_RD(hw, push_offs);
r1.val = DLB_CSR_RD(hw, pop_offs);
/* Break early if the freelist is replenished */
if (r1.field.pop_ptr == r0.field.push_ptr &&
r1.field.generation != r0.field.generation) {
break;
}
}
/* Error if the freelist is not full */
if (r1.field.pop_ptr != r0.field.push_ptr ||
r1.field.generation == r0.field.generation) {
return -EFAULT;
}
}
return 0;
}
static int dlb_domain_wait_for_dir_pool_refill(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_credit_pool *pool;
/* Confirm that all credits are returned to the domain's credit pools */
DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
union dlb_chp_dqed_fl_push_ptr r0;
union dlb_chp_dqed_fl_pop_ptr r1;
unsigned long pop_offs, push_offs;
int i;
push_offs = DLB_CHP_DQED_FL_PUSH_PTR(pool->id);
pop_offs = DLB_CHP_DQED_FL_POP_PTR(pool->id);
for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
r0.val = DLB_CSR_RD(hw, push_offs);
r1.val = DLB_CSR_RD(hw, pop_offs);
/* Break early if the freelist is replenished */
if (r1.field.pop_ptr == r0.field.push_ptr &&
r1.field.generation != r0.field.generation) {
break;
}
}
/* Error if the freelist is not full */
if (r1.field.pop_ptr != r0.field.push_ptr ||
r1.field.generation == r0.field.generation) {
return -EFAULT;
}
}
return 0;
}
static u32 dlb_dir_queue_depth(struct dlb_hw *hw,
struct dlb_dir_pq_pair *queue)
{
union dlb_lsp_qid_dir_enqueue_cnt r0;
r0.val = DLB_CSR_RD(hw, DLB_LSP_QID_DIR_ENQUEUE_CNT(queue->id));
return r0.field.count;
}
static bool dlb_dir_queue_is_empty(struct dlb_hw *hw,
struct dlb_dir_pq_pair *queue)
{
return dlb_dir_queue_depth(hw, queue) == 0;
}
static bool dlb_domain_dir_queues_empty(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_dir_pq_pair *queue;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
if (!dlb_dir_queue_is_empty(hw, queue))
return false;
}
return true;
}
static u32 dlb_dir_cq_token_count(struct dlb_hw *hw,
struct dlb_dir_pq_pair *port)
{
union dlb_lsp_cq_dir_tkn_cnt r0;
r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ_DIR_TKN_CNT(port->id));
return r0.field.count;
}
static void dlb_drain_dir_cq(struct dlb_hw *hw, struct dlb_dir_pq_pair *port)
{
unsigned int port_id = port->id;
u32 cnt;
/* Return any outstanding tokens */
cnt = dlb_dir_cq_token_count(hw, port);
if (cnt != 0) {
struct dlb_hcw hcw_mem[8], *hcw;
void *pp_addr;
pp_addr = os_map_producer_port(hw, port_id, false);
/* Point hcw to a 64B-aligned location */
hcw = (struct dlb_hcw *)((uintptr_t)&hcw_mem[4] & ~0x3F);
/* Program the first HCW for a batch token return and
* the rest as NOOPS
*/
memset(hcw, 0, 4 * sizeof(*hcw));
hcw->cq_token = 1;
hcw->lock_id = cnt - 1;
dlb_movdir64b(pp_addr, hcw);
os_fence_hcw(hw, pp_addr);
os_unmap_producer_port(hw, pp_addr);
}
}
static int dlb_domain_drain_dir_cqs(struct dlb_hw *hw,
struct dlb_domain *domain,
bool toggle_port)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_dir_pq_pair *port;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter) {
/* Can't drain a port if it's not configured, and there's
* nothing to drain if its queue is unconfigured.
*/
if (!port->port_configured || !port->queue_configured)
continue;
if (toggle_port)
dlb_dir_port_cq_disable(hw, port);
dlb_drain_dir_cq(hw, port);
if (toggle_port)
dlb_dir_port_cq_enable(hw, port);
}
return 0;
}
static int dlb_domain_drain_dir_queues(struct dlb_hw *hw,
struct dlb_domain *domain)
{
int i;
/* If the domain hasn't been started, there's no traffic to drain */
if (!domain->started)
return 0;
for (i = 0; i < DLB_MAX_QID_EMPTY_CHECK_LOOPS; i++) {
dlb_domain_drain_dir_cqs(hw, domain, true);
if (dlb_domain_dir_queues_empty(hw, domain))
break;
}
if (i == DLB_MAX_QID_EMPTY_CHECK_LOOPS) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to empty queues\n",
__func__);
return -EFAULT;
}
/* Drain the CQs one more time. For the queues to go empty, they would
* have scheduled one or more QEs.
*/
dlb_domain_drain_dir_cqs(hw, domain, true);
return 0;
}
static void dlb_domain_disable_dir_producer_ports(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_dir_pq_pair *port;
union dlb_sys_dir_pp_v r1;
r1.field.pp_v = 0;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP_V(port->id),
r1.val);
}
static void dlb_domain_disable_ldb_producer_ports(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_sys_ldb_pp_v r1;
struct dlb_ldb_port *port;
r1.field.pp_v = 0;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter) {
DLB_CSR_WR(hw,
DLB_SYS_LDB_PP_V(port->id),
r1.val);
hw->pf.num_enabled_ldb_ports--;
}
}
static void dlb_domain_disable_dir_pools(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_sys_dir_pool_enbld r0 = { {0} };
struct dlb_credit_pool *pool;
DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
DLB_CSR_WR(hw,
DLB_SYS_DIR_POOL_ENBLD(pool->id),
r0.val);
}
static void dlb_domain_disable_ldb_pools(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
union dlb_sys_ldb_pool_enbld r0 = { {0} };
struct dlb_credit_pool *pool;
DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
DLB_CSR_WR(hw,
DLB_SYS_LDB_POOL_ENBLD(pool->id),
r0.val);
}
static int dlb_reset_hw_resource(struct dlb_hw *hw, int type, int id)
{
union dlb_cfg_mstr_diag_reset_sts r0 = { {0} };
union dlb_cfg_mstr_bcast_reset_vf_start r1 = { {0} };
int i;
r1.field.vf_reset_start = 1;
r1.field.vf_reset_type = type;
r1.field.vf_reset_id = id;
DLB_CSR_WR(hw, DLB_CFG_MSTR_BCAST_RESET_VF_START, r1.val);
/* Wait for hardware to complete. This is a finite time operation,
* but wait set a loop bound just in case.
*/
for (i = 0; i < 1024 * 1024; i++) {
r0.val = DLB_CSR_RD(hw, DLB_CFG_MSTR_DIAG_RESET_STS);
if (r0.field.chp_vf_reset_done &&
r0.field.rop_vf_reset_done &&
r0.field.lsp_vf_reset_done &&
r0.field.nalb_vf_reset_done &&
r0.field.ap_vf_reset_done &&
r0.field.dp_vf_reset_done &&
r0.field.qed_vf_reset_done &&
r0.field.dqed_vf_reset_done &&
r0.field.aqed_vf_reset_done)
return 0;
os_udelay(1);
}
return -ETIMEDOUT;
}
static int dlb_domain_reset_hw_resources(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_dir_pq_pair *dir_port;
struct dlb_ldb_queue *ldb_queue;
struct dlb_ldb_port *ldb_port;
struct dlb_credit_pool *pool;
int ret;
DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
ret = dlb_reset_hw_resource(hw,
VF_RST_TYPE_POOL_LDB,
pool->id);
if (ret)
return ret;
}
DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
ret = dlb_reset_hw_resource(hw,
VF_RST_TYPE_POOL_DIR,
pool->id);
if (ret)
return ret;
}
DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
ret = dlb_reset_hw_resource(hw,
VF_RST_TYPE_QID_LDB,
ldb_queue->id);
if (ret)
return ret;
}
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
ret = dlb_reset_hw_resource(hw,
VF_RST_TYPE_QID_DIR,
dir_port->id);
if (ret)
return ret;
}
DLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {
ret = dlb_reset_hw_resource(hw,
VF_RST_TYPE_CQ_LDB,
ldb_port->id);
if (ret)
return ret;
}
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
ret = dlb_reset_hw_resource(hw,
VF_RST_TYPE_CQ_DIR,
dir_port->id);
if (ret)
return ret;
}
return 0;
}
static int dlb_domain_verify_reset_success(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_dir_pq_pair *dir_port;
struct dlb_ldb_port *ldb_port;
struct dlb_credit_pool *pool;
struct dlb_ldb_queue *queue;
/* Confirm that all credits are returned to the domain's credit pools */
DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
union dlb_chp_dqed_fl_pop_ptr r0;
union dlb_chp_dqed_fl_push_ptr r1;
r0.val = DLB_CSR_RD(hw,
DLB_CHP_DQED_FL_POP_PTR(pool->id));
r1.val = DLB_CSR_RD(hw,
DLB_CHP_DQED_FL_PUSH_PTR(pool->id));
if (r0.field.pop_ptr != r1.field.push_ptr ||
r0.field.generation == r1.field.generation) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to refill directed pool %d's credits.\n",
__func__, pool->id);
return -EFAULT;
}
}
/* Confirm that all the domain's queue's inflight counts and AQED
* active counts are 0.
*/
DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
if (!dlb_ldb_queue_is_empty(hw, queue)) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to empty ldb queue %d\n",
__func__, queue->id);
return -EFAULT;
}
}
/* Confirm that all the domain's CQs inflight and token counts are 0. */
DLB_DOM_LIST_FOR(domain->used_ldb_ports, ldb_port, iter) {
if (dlb_ldb_cq_inflight_count(hw, ldb_port) ||
dlb_ldb_cq_token_count(hw, ldb_port)) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to empty ldb port %d\n",
__func__, ldb_port->id);
return -EFAULT;
}
}
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_port, iter) {
if (!dlb_dir_queue_is_empty(hw, dir_port)) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to empty dir queue %d\n",
__func__, dir_port->id);
return -EFAULT;
}
if (dlb_dir_cq_token_count(hw, dir_port)) {
DLB_HW_ERR(hw,
"[%s()] Internal error: failed to empty dir port %d\n",
__func__, dir_port->id);
return -EFAULT;
}
}
return 0;
}
static void __dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,
struct dlb_ldb_port *port)
{
union dlb_chp_ldb_pp_state_reset r0 = { {0} };
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id),
DLB_CHP_LDB_PP_CRD_REQ_STATE_RST);
/* Reset the port's load-balanced and directed credit state */
r0.field.dir_type = 0;
r0.field.reset_pp_state = 1;
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_STATE_RESET(port->id),
r0.val);
r0.field.dir_type = 1;
r0.field.reset_pp_state = 1;
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_STATE_RESET(port->id),
r0.val);
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id),
DLB_CHP_LDB_PP_DIR_PUSH_PTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id),
DLB_CHP_LDB_PP_LDB_PUSH_PTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),
DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id),
DLB_CHP_LDB_PP_LDB_CRD_LWM_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id),
DLB_CHP_LDB_PP_LDB_CRD_HWM_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_LDB_PP2POOL(port->id),
DLB_CHP_LDB_LDB_PP2POOL_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),
DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id),
DLB_CHP_LDB_PP_DIR_CRD_LWM_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id),
DLB_CHP_LDB_PP_DIR_CRD_HWM_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_DIR_PP2POOL(port->id),
DLB_CHP_LDB_DIR_PP2POOL_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_PP2LDBPOOL(port->id),
DLB_SYS_LDB_PP2LDBPOOL_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_PP2DIRPOOL(port->id),
DLB_SYS_LDB_PP2DIRPOOL_RST);
DLB_CSR_WR(hw,
DLB_CHP_HIST_LIST_LIM(port->id),
DLB_CHP_HIST_LIST_LIM_RST);
DLB_CSR_WR(hw,
DLB_CHP_HIST_LIST_BASE(port->id),
DLB_CHP_HIST_LIST_BASE_RST);
DLB_CSR_WR(hw,
DLB_CHP_HIST_LIST_POP_PTR(port->id),
DLB_CHP_HIST_LIST_POP_PTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_HIST_LIST_PUSH_PTR(port->id),
DLB_CHP_HIST_LIST_PUSH_PTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_CQ_WPTR(port->id),
DLB_CHP_LDB_CQ_WPTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_CQ_INT_DEPTH_THRSH(port->id),
DLB_CHP_LDB_CQ_INT_DEPTH_THRSH_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_CQ_TMR_THRESHOLD(port->id),
DLB_CHP_LDB_CQ_TMR_THRESHOLD_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_CQ_INT_ENB(port->id),
DLB_CHP_LDB_CQ_INT_ENB_RST);
DLB_CSR_WR(hw,
DLB_LSP_CQ_LDB_INFL_LIM(port->id),
DLB_LSP_CQ_LDB_INFL_LIM_RST);
DLB_CSR_WR(hw,
DLB_LSP_CQ2PRIOV(port->id),
DLB_LSP_CQ2PRIOV_RST);
DLB_CSR_WR(hw,
DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL(port->id),
DLB_LSP_CQ_LDB_TOT_SCH_CNT_CTRL_RST);
DLB_CSR_WR(hw,
DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),
DLB_LSP_CQ_LDB_TKN_DEPTH_SEL_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),
DLB_CHP_LDB_CQ_TKN_DEPTH_SEL_RST);
DLB_CSR_WR(hw,
DLB_LSP_CQ_LDB_DSBL(port->id),
DLB_LSP_CQ_LDB_DSBL_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_CQ2VF_PF(port->id),
DLB_SYS_LDB_CQ2VF_PF_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_PP2VF_PF(port->id),
DLB_SYS_LDB_PP2VF_PF_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_CQ_ADDR_L(port->id),
DLB_SYS_LDB_CQ_ADDR_L_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_CQ_ADDR_U(port->id),
DLB_SYS_LDB_CQ_ADDR_U_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_PP_ADDR_L(port->id),
DLB_SYS_LDB_PP_ADDR_L_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_PP_ADDR_U(port->id),
DLB_SYS_LDB_PP_ADDR_U_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_PP_V(port->id),
DLB_SYS_LDB_PP_V_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_PP2VAS(port->id),
DLB_SYS_LDB_PP2VAS_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_CQ_ISR(port->id),
DLB_SYS_LDB_CQ_ISR_RST);
DLB_CSR_WR(hw,
DLB_SYS_WBUF_LDB_FLAGS(port->id),
DLB_SYS_WBUF_LDB_FLAGS_RST);
}
static void __dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,
struct dlb_dir_pq_pair *port)
{
union dlb_chp_dir_pp_state_reset r0 = { {0} };
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
DLB_CHP_DIR_PP_CRD_REQ_STATE_RST);
/* Reset the port's load-balanced and directed credit state */
r0.field.dir_type = 0;
r0.field.reset_pp_state = 1;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_STATE_RESET(port->id),
r0.val);
r0.field.dir_type = 1;
r0.field.reset_pp_state = 1;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_STATE_RESET(port->id),
r0.val);
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),
DLB_CHP_DIR_PP_DIR_PUSH_PTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),
DLB_CHP_DIR_PP_LDB_PUSH_PTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),
DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),
DLB_CHP_DIR_PP_LDB_CRD_LWM_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),
DLB_CHP_DIR_PP_LDB_CRD_HWM_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_LDB_PP2POOL(port->id),
DLB_CHP_DIR_LDB_PP2POOL_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),
DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),
DLB_CHP_DIR_PP_DIR_CRD_LWM_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),
DLB_CHP_DIR_PP_DIR_CRD_HWM_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_DIR_PP2POOL(port->id),
DLB_CHP_DIR_DIR_PP2POOL_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP2LDBPOOL(port->id),
DLB_SYS_DIR_PP2LDBPOOL_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP2DIRPOOL(port->id),
DLB_SYS_DIR_PP2DIRPOOL_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_CQ_WPTR(port->id),
DLB_CHP_DIR_CQ_WPTR_RST);
DLB_CSR_WR(hw,
DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),
DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),
DLB_CHP_DIR_CQ_TKN_DEPTH_SEL_RST);
DLB_CSR_WR(hw,
DLB_LSP_CQ_DIR_DSBL(port->id),
DLB_LSP_CQ_DIR_DSBL_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_CQ_WPTR(port->id),
DLB_CHP_DIR_CQ_WPTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_CQ_INT_DEPTH_THRSH(port->id),
DLB_CHP_DIR_CQ_INT_DEPTH_THRSH_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_CQ_TMR_THRESHOLD(port->id),
DLB_CHP_DIR_CQ_TMR_THRESHOLD_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_CQ_INT_ENB(port->id),
DLB_CHP_DIR_CQ_INT_ENB_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_CQ2VF_PF(port->id),
DLB_SYS_DIR_CQ2VF_PF_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP2VF_PF(port->id),
DLB_SYS_DIR_PP2VF_PF_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_CQ_ADDR_L(port->id),
DLB_SYS_DIR_CQ_ADDR_L_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_CQ_ADDR_U(port->id),
DLB_SYS_DIR_CQ_ADDR_U_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP_ADDR_L(port->id),
DLB_SYS_DIR_PP_ADDR_L_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP_ADDR_U(port->id),
DLB_SYS_DIR_PP_ADDR_U_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP_V(port->id),
DLB_SYS_DIR_PP_V_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP2VAS(port->id),
DLB_SYS_DIR_PP2VAS_RST);
DLB_CSR_WR(hw,
DLB_SYS_DIR_CQ_ISR(port->id),
DLB_SYS_DIR_CQ_ISR_RST);
DLB_CSR_WR(hw,
DLB_SYS_WBUF_DIR_FLAGS(port->id),
DLB_SYS_WBUF_DIR_FLAGS_RST);
}
static void dlb_domain_reset_dir_port_registers(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_dir_pq_pair *port;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
__dlb_domain_reset_dir_port_registers(hw, port);
}
static void dlb_domain_reset_ldb_queue_registers(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_queue *queue;
DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter) {
DLB_CSR_WR(hw,
DLB_AQED_PIPE_FL_LIM(queue->id),
DLB_AQED_PIPE_FL_LIM_RST);
DLB_CSR_WR(hw,
DLB_AQED_PIPE_FL_BASE(queue->id),
DLB_AQED_PIPE_FL_BASE_RST);
DLB_CSR_WR(hw,
DLB_AQED_PIPE_FL_POP_PTR(queue->id),
DLB_AQED_PIPE_FL_POP_PTR_RST);
DLB_CSR_WR(hw,
DLB_AQED_PIPE_FL_PUSH_PTR(queue->id),
DLB_AQED_PIPE_FL_PUSH_PTR_RST);
DLB_CSR_WR(hw,
DLB_AQED_PIPE_QID_FID_LIM(queue->id),
DLB_AQED_PIPE_QID_FID_LIM_RST);
DLB_CSR_WR(hw,
DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id),
DLB_LSP_QID_AQED_ACTIVE_LIM_RST);
DLB_CSR_WR(hw,
DLB_LSP_QID_LDB_INFL_LIM(queue->id),
DLB_LSP_QID_LDB_INFL_LIM_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_QID_V(queue->id),
DLB_SYS_LDB_QID_V_RST);
DLB_CSR_WR(hw,
DLB_SYS_LDB_QID_V(queue->id),
DLB_SYS_LDB_QID_V_RST);
DLB_CSR_WR(hw,
DLB_CHP_ORD_QID_SN(queue->id),
DLB_CHP_ORD_QID_SN_RST);
DLB_CSR_WR(hw,
DLB_CHP_ORD_QID_SN_MAP(queue->id),
DLB_CHP_ORD_QID_SN_MAP_RST);
DLB_CSR_WR(hw,
DLB_RO_PIPE_QID2GRPSLT(queue->id),
DLB_RO_PIPE_QID2GRPSLT_RST);
}
}
static void dlb_domain_reset_dir_queue_registers(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_dir_pq_pair *queue;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, queue, iter) {
DLB_CSR_WR(hw,
DLB_SYS_DIR_QID_V(queue->id),
DLB_SYS_DIR_QID_V_RST);
}
}
static void dlb_domain_reset_ldb_pool_registers(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_credit_pool *pool;
DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter) {
DLB_CSR_WR(hw,
DLB_CHP_LDB_POOL_CRD_LIM(pool->id),
DLB_CHP_LDB_POOL_CRD_LIM_RST);
DLB_CSR_WR(hw,
DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
DLB_CHP_LDB_POOL_CRD_CNT_RST);
DLB_CSR_WR(hw,
DLB_CHP_QED_FL_BASE(pool->id),
DLB_CHP_QED_FL_BASE_RST);
DLB_CSR_WR(hw,
DLB_CHP_QED_FL_LIM(pool->id),
DLB_CHP_QED_FL_LIM_RST);
DLB_CSR_WR(hw,
DLB_CHP_QED_FL_PUSH_PTR(pool->id),
DLB_CHP_QED_FL_PUSH_PTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_QED_FL_POP_PTR(pool->id),
DLB_CHP_QED_FL_POP_PTR_RST);
}
}
static void dlb_domain_reset_dir_pool_registers(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_credit_pool *pool;
DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter) {
DLB_CSR_WR(hw,
DLB_CHP_DIR_POOL_CRD_LIM(pool->id),
DLB_CHP_DIR_POOL_CRD_LIM_RST);
DLB_CSR_WR(hw,
DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
DLB_CHP_DIR_POOL_CRD_CNT_RST);
DLB_CSR_WR(hw,
DLB_CHP_DQED_FL_BASE(pool->id),
DLB_CHP_DQED_FL_BASE_RST);
DLB_CSR_WR(hw,
DLB_CHP_DQED_FL_LIM(pool->id),
DLB_CHP_DQED_FL_LIM_RST);
DLB_CSR_WR(hw,
DLB_CHP_DQED_FL_PUSH_PTR(pool->id),
DLB_CHP_DQED_FL_PUSH_PTR_RST);
DLB_CSR_WR(hw,
DLB_CHP_DQED_FL_POP_PTR(pool->id),
DLB_CHP_DQED_FL_POP_PTR_RST);
}
}
static void dlb_domain_reset_ldb_port_registers(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
RTE_SET_USED(iter);
struct dlb_ldb_port *port;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
__dlb_domain_reset_ldb_port_registers(hw, port);
}
static void dlb_domain_reset_registers(struct dlb_hw *hw,
struct dlb_domain *domain)
{
dlb_domain_reset_ldb_port_registers(hw, domain);
dlb_domain_reset_dir_port_registers(hw, domain);
dlb_domain_reset_ldb_queue_registers(hw, domain);
dlb_domain_reset_dir_queue_registers(hw, domain);
dlb_domain_reset_ldb_pool_registers(hw, domain);
dlb_domain_reset_dir_pool_registers(hw, domain);
}
static int dlb_domain_reset_software_state(struct dlb_hw *hw,
struct dlb_domain *domain)
{
struct dlb_ldb_queue *tmp_ldb_queue;
RTE_SET_USED(tmp_ldb_queue);
struct dlb_dir_pq_pair *tmp_dir_port;
RTE_SET_USED(tmp_dir_port);
struct dlb_ldb_port *tmp_ldb_port;
RTE_SET_USED(tmp_ldb_port);
struct dlb_credit_pool *tmp_pool;
RTE_SET_USED(tmp_pool);
struct dlb_list_entry *iter1;
RTE_SET_USED(iter1);
struct dlb_list_entry *iter2;
RTE_SET_USED(iter2);
struct dlb_ldb_queue *ldb_queue;
struct dlb_dir_pq_pair *dir_port;
struct dlb_ldb_port *ldb_port;
struct dlb_credit_pool *pool;
struct dlb_function_resources *rsrcs;
struct dlb_list_head *list;
int ret;
rsrcs = domain->parent_func;
/* Move the domain's ldb queues to the function's avail list */
list = &domain->used_ldb_queues;
DLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
if (ldb_queue->sn_cfg_valid) {
struct dlb_sn_group *grp;
grp = &hw->rsrcs.sn_groups[ldb_queue->sn_group];
dlb_sn_group_free_slot(grp, ldb_queue->sn_slot);
ldb_queue->sn_cfg_valid = false;
}
ldb_queue->owned = false;
ldb_queue->num_mappings = 0;
ldb_queue->num_pending_additions = 0;
dlb_list_del(&domain->used_ldb_queues, &ldb_queue->domain_list);
dlb_list_add(&rsrcs->avail_ldb_queues, &ldb_queue->func_list);
rsrcs->num_avail_ldb_queues++;
}
list = &domain->avail_ldb_queues;
DLB_DOM_LIST_FOR_SAFE(*list, ldb_queue, tmp_ldb_queue, iter1, iter2) {
ldb_queue->owned = false;
dlb_list_del(&domain->avail_ldb_queues,
&ldb_queue->domain_list);
dlb_list_add(&rsrcs->avail_ldb_queues,
&ldb_queue->func_list);
rsrcs->num_avail_ldb_queues++;
}
/* Move the domain's ldb ports to the function's avail list */
list = &domain->used_ldb_ports;
DLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {
int i;
ldb_port->owned = false;
ldb_port->configured = false;
ldb_port->num_pending_removals = 0;
ldb_port->num_mappings = 0;
for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)
ldb_port->qid_map[i].state = DLB_QUEUE_UNMAPPED;
dlb_list_del(&domain->used_ldb_ports, &ldb_port->domain_list);
dlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);
rsrcs->num_avail_ldb_ports++;
}
list = &domain->avail_ldb_ports;
DLB_DOM_LIST_FOR_SAFE(*list, ldb_port, tmp_ldb_port, iter1, iter2) {
ldb_port->owned = false;
dlb_list_del(&domain->avail_ldb_ports, &ldb_port->domain_list);
dlb_list_add(&rsrcs->avail_ldb_ports, &ldb_port->func_list);
rsrcs->num_avail_ldb_ports++;
}
/* Move the domain's dir ports to the function's avail list */
list = &domain->used_dir_pq_pairs;
DLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
dir_port->owned = false;
dir_port->port_configured = false;
dlb_list_del(&domain->used_dir_pq_pairs,
&dir_port->domain_list);
dlb_list_add(&rsrcs->avail_dir_pq_pairs,
&dir_port->func_list);
rsrcs->num_avail_dir_pq_pairs++;
}
list = &domain->avail_dir_pq_pairs;
DLB_DOM_LIST_FOR_SAFE(*list, dir_port, tmp_dir_port, iter1, iter2) {
dir_port->owned = false;
dlb_list_del(&domain->avail_dir_pq_pairs,
&dir_port->domain_list);
dlb_list_add(&rsrcs->avail_dir_pq_pairs,
&dir_port->func_list);
rsrcs->num_avail_dir_pq_pairs++;
}
/* Return hist list entries to the function */
ret = dlb_bitmap_set_range(rsrcs->avail_hist_list_entries,
domain->hist_list_entry_base,
domain->total_hist_list_entries);
if (ret) {
DLB_HW_ERR(hw,
"[%s()] Internal error: domain hist list base does not match the function's bitmap.\n",
__func__);
return -EFAULT;
}
domain->total_hist_list_entries = 0;
domain->avail_hist_list_entries = 0;
domain->hist_list_entry_base = 0;
domain->hist_list_entry_offset = 0;
/* Return QED entries to the function */
ret = dlb_bitmap_set_range(rsrcs->avail_qed_freelist_entries,
domain->qed_freelist.base,
(domain->qed_freelist.bound -
domain->qed_freelist.base));
if (ret) {
DLB_HW_ERR(hw,
"[%s()] Internal error: domain QED base does not match the function's bitmap.\n",
__func__);
return -EFAULT;
}
domain->qed_freelist.base = 0;
domain->qed_freelist.bound = 0;
domain->qed_freelist.offset = 0;
/* Return DQED entries back to the function */
ret = dlb_bitmap_set_range(rsrcs->avail_dqed_freelist_entries,
domain->dqed_freelist.base,
(domain->dqed_freelist.bound -
domain->dqed_freelist.base));
if (ret) {
DLB_HW_ERR(hw,
"[%s()] Internal error: domain DQED base does not match the function's bitmap.\n",
__func__);
return -EFAULT;
}
domain->dqed_freelist.base = 0;
domain->dqed_freelist.bound = 0;
domain->dqed_freelist.offset = 0;
/* Return AQED entries back to the function */
ret = dlb_bitmap_set_range(rsrcs->avail_aqed_freelist_entries,
domain->aqed_freelist.base,
(domain->aqed_freelist.bound -
domain->aqed_freelist.base));
if (ret) {
DLB_HW_ERR(hw,
"[%s()] Internal error: domain AQED base does not match the function's bitmap.\n",
__func__);
return -EFAULT;
}
domain->aqed_freelist.base = 0;
domain->aqed_freelist.bound = 0;
domain->aqed_freelist.offset = 0;
/* Return ldb credit pools back to the function's avail list */
list = &domain->used_ldb_credit_pools;
DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
pool->owned = false;
pool->configured = false;
dlb_list_del(&domain->used_ldb_credit_pools,
&pool->domain_list);
dlb_list_add(&rsrcs->avail_ldb_credit_pools,
&pool->func_list);
rsrcs->num_avail_ldb_credit_pools++;
}
list = &domain->avail_ldb_credit_pools;
DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
pool->owned = false;
dlb_list_del(&domain->avail_ldb_credit_pools,
&pool->domain_list);
dlb_list_add(&rsrcs->avail_ldb_credit_pools,
&pool->func_list);
rsrcs->num_avail_ldb_credit_pools++;
}
/* Move dir credit pools back to the function */
list = &domain->used_dir_credit_pools;
DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
pool->owned = false;
pool->configured = false;
dlb_list_del(&domain->used_dir_credit_pools,
&pool->domain_list);
dlb_list_add(&rsrcs->avail_dir_credit_pools,
&pool->func_list);
rsrcs->num_avail_dir_credit_pools++;
}
list = &domain->avail_dir_credit_pools;
DLB_DOM_LIST_FOR_SAFE(*list, pool, tmp_pool, iter1, iter2) {
pool->owned = false;
dlb_list_del(&domain->avail_dir_credit_pools,
&pool->domain_list);
dlb_list_add(&rsrcs->avail_dir_credit_pools,
&pool->func_list);
rsrcs->num_avail_dir_credit_pools++;
}
domain->num_pending_removals = 0;
domain->num_pending_additions = 0;
domain->configured = false;
domain->started = false;
/* Move the domain out of the used_domains list and back to the
* function's avail_domains list.
*/
dlb_list_del(&rsrcs->used_domains, &domain->func_list);
dlb_list_add(&rsrcs->avail_domains, &domain->func_list);
rsrcs->num_avail_domains++;
return 0;
}
static void dlb_log_reset_domain(struct dlb_hw *hw, u32 domain_id)
{
DLB_HW_INFO(hw, "DLB reset domain:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
}
/**
* dlb_reset_domain() - Reset a DLB scheduling domain and its associated
* hardware resources.
* @hw: Contains the current state of the DLB hardware.
* @args: User-provided arguments.
* @resp: Response to user.
*
* Note: User software *must* stop sending to this domain's producer ports
* before invoking this function, otherwise undefined behavior will result.
*
* Return: returns < 0 on error, 0 otherwise.
*/
int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id)
{
struct dlb_domain *domain;
int ret;
dlb_log_reset_domain(hw, domain_id);
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL || !domain->configured)
return -EINVAL;
/* For each queue owned by this domain, disable its write permissions to
* cause any traffic sent to it to be dropped. Well-behaved software
* should not be sending QEs at this point.
*/
dlb_domain_disable_dir_queue_write_perms(hw, domain);
dlb_domain_disable_ldb_queue_write_perms(hw, domain);
/* Disable credit updates and turn off completion tracking on all the
* domain's PPs.
*/
dlb_domain_disable_dir_port_crd_updates(hw, domain);
dlb_domain_disable_ldb_port_crd_updates(hw, domain);
dlb_domain_disable_dir_port_interrupts(hw, domain);
dlb_domain_disable_ldb_port_interrupts(hw, domain);
dlb_domain_disable_ldb_seq_checks(hw, domain);
/* Disable the LDB CQs and drain them in order to complete the map and
* unmap procedures, which require zero CQ inflights and zero QID
* inflights respectively.
*/
dlb_domain_disable_ldb_cqs(hw, domain);
ret = dlb_domain_drain_ldb_cqs(hw, domain, false);
if (ret < 0)
return ret;
ret = dlb_domain_wait_for_ldb_cqs_to_empty(hw, domain);
if (ret < 0)
return ret;
ret = dlb_domain_finish_unmap_qid_procedures(hw, domain);
if (ret < 0)
return ret;
ret = dlb_domain_finish_map_qid_procedures(hw, domain);
if (ret < 0)
return ret;
/* Re-enable the CQs in order to drain the mapped queues. */
dlb_domain_enable_ldb_cqs(hw, domain);
ret = dlb_domain_drain_mapped_queues(hw, domain);
if (ret < 0)
return ret;
ret = dlb_domain_drain_unmapped_queues(hw, domain);
if (ret < 0)
return ret;
ret = dlb_domain_wait_for_ldb_pool_refill(hw, domain);
if (ret) {
DLB_HW_ERR(hw,
"[%s()] Internal error: LDB credits failed to refill\n",
__func__);
return ret;
}
/* Done draining LDB QEs, so disable the CQs. */
dlb_domain_disable_ldb_cqs(hw, domain);
/* Directed queues are reset in dlb_domain_reset_hw_resources(), but
* that process does not decrement the directed queue size counters used
* by SMON for its average DQED depth measurement. So, we manually drain
* the directed queues here.
*/
dlb_domain_drain_dir_queues(hw, domain);
ret = dlb_domain_wait_for_dir_pool_refill(hw, domain);
if (ret) {
DLB_HW_ERR(hw,
"[%s()] Internal error: DIR credits failed to refill\n",
__func__);
return ret;
}
/* Done draining DIR QEs, so disable the CQs. */
dlb_domain_disable_dir_cqs(hw, domain);
dlb_domain_disable_dir_producer_ports(hw, domain);
dlb_domain_disable_ldb_producer_ports(hw, domain);
dlb_domain_disable_dir_pools(hw, domain);
dlb_domain_disable_ldb_pools(hw, domain);
/* Reset the QID, credit pool, and CQ hardware.
*
* Note: DLB 1.0 A0 h/w does not disarm CQ interrupts during sched
* domain reset.
* A spurious interrupt can occur on subsequent use of a reset CQ.
*/
ret = dlb_domain_reset_hw_resources(hw, domain);
if (ret)
return ret;
ret = dlb_domain_verify_reset_success(hw, domain);
if (ret)
return ret;
dlb_domain_reset_registers(hw, domain);
/* Hardware reset complete. Reset the domain's software state */
ret = dlb_domain_reset_software_state(hw, domain);
if (ret)
return ret;
return 0;
}
void dlb_hw_get_num_resources(struct dlb_hw *hw,
struct dlb_get_num_resources_args *arg)
{
struct dlb_function_resources *rsrcs;
struct dlb_bitmap *map;
rsrcs = &hw->pf;
arg->num_sched_domains = rsrcs->num_avail_domains;
arg->num_ldb_queues = rsrcs->num_avail_ldb_queues;
arg->num_ldb_ports = rsrcs->num_avail_ldb_ports;
arg->num_dir_ports = rsrcs->num_avail_dir_pq_pairs;
map = rsrcs->avail_aqed_freelist_entries;
arg->num_atomic_inflights = dlb_bitmap_count(map);
arg->max_contiguous_atomic_inflights =
dlb_bitmap_longest_set_range(map);
map = rsrcs->avail_hist_list_entries;
arg->num_hist_list_entries = dlb_bitmap_count(map);
arg->max_contiguous_hist_list_entries =
dlb_bitmap_longest_set_range(map);
map = rsrcs->avail_qed_freelist_entries;
arg->num_ldb_credits = dlb_bitmap_count(map);
arg->max_contiguous_ldb_credits = dlb_bitmap_longest_set_range(map);
map = rsrcs->avail_dqed_freelist_entries;
arg->num_dir_credits = dlb_bitmap_count(map);
arg->max_contiguous_dir_credits = dlb_bitmap_longest_set_range(map);
arg->num_ldb_credit_pools = rsrcs->num_avail_ldb_credit_pools;
arg->num_dir_credit_pools = rsrcs->num_avail_dir_credit_pools;
}
void dlb_hw_disable_vf_to_pf_isr_pend_err(struct dlb_hw *hw)
{
union dlb_sys_sys_alarm_int_enable r0;
r0.val = DLB_CSR_RD(hw, DLB_SYS_SYS_ALARM_INT_ENABLE);
r0.field.vf_to_pf_isr_pend_error = 0;
DLB_CSR_WR(hw, DLB_SYS_SYS_ALARM_INT_ENABLE, r0.val);
}
static void dlb_configure_ldb_queue(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_queue *queue,
struct dlb_create_ldb_queue_args *args)
{
union dlb_sys_ldb_vasqid_v r0 = { {0} };
union dlb_lsp_qid_ldb_infl_lim r1 = { {0} };
union dlb_lsp_qid_aqed_active_lim r2 = { {0} };
union dlb_aqed_pipe_fl_lim r3 = { {0} };
union dlb_aqed_pipe_fl_base r4 = { {0} };
union dlb_chp_ord_qid_sn_map r7 = { {0} };
union dlb_sys_ldb_qid_cfg_v r10 = { {0} };
union dlb_sys_ldb_qid_v r11 = { {0} };
union dlb_aqed_pipe_fl_push_ptr r5 = { {0} };
union dlb_aqed_pipe_fl_pop_ptr r6 = { {0} };
union dlb_aqed_pipe_qid_fid_lim r8 = { {0} };
union dlb_ro_pipe_qid2grpslt r9 = { {0} };
struct dlb_sn_group *sn_group;
unsigned int offs;
/* QID write permissions are turned on when the domain is started */
r0.field.vasqid_v = 0;
offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + queue->id;
DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
/*
* Unordered QIDs get 4K inflights, ordered get as many as the number
* of sequence numbers.
*/
r1.field.limit = args->num_qid_inflights;
DLB_CSR_WR(hw, DLB_LSP_QID_LDB_INFL_LIM(queue->id), r1.val);
r2.field.limit = queue->aqed_freelist.bound -
queue->aqed_freelist.base;
if (r2.field.limit > DLB_MAX_NUM_AQOS_ENTRIES)
r2.field.limit = DLB_MAX_NUM_AQOS_ENTRIES;
/* AQOS */
DLB_CSR_WR(hw, DLB_LSP_QID_AQED_ACTIVE_LIM(queue->id), r2.val);
r3.field.freelist_disable = 0;
r3.field.limit = queue->aqed_freelist.bound - 1;
DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_LIM(queue->id), r3.val);
r4.field.base = queue->aqed_freelist.base;
DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_BASE(queue->id), r4.val);
r5.field.push_ptr = r4.field.base;
r5.field.generation = 1;
DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_PUSH_PTR(queue->id), r5.val);
r6.field.pop_ptr = r4.field.base;
r6.field.generation = 0;
DLB_CSR_WR(hw, DLB_AQED_PIPE_FL_POP_PTR(queue->id), r6.val);
/* Configure SNs */
sn_group = &hw->rsrcs.sn_groups[queue->sn_group];
r7.field.mode = sn_group->mode;
r7.field.slot = queue->sn_slot;
r7.field.grp = sn_group->id;
DLB_CSR_WR(hw, DLB_CHP_ORD_QID_SN_MAP(queue->id), r7.val);
/*
* This register limits the number of inflight flows a queue can have
* at one time. It has an upper bound of 2048, but can be
* over-subscribed. 512 is chosen so that a single queue doesn't use
* the entire atomic storage, but can use a substantial portion if
* needed.
*/
r8.field.qid_fid_limit = 512;
DLB_CSR_WR(hw, DLB_AQED_PIPE_QID_FID_LIM(queue->id), r8.val);
r9.field.group = sn_group->id;
r9.field.slot = queue->sn_slot;
DLB_CSR_WR(hw, DLB_RO_PIPE_QID2GRPSLT(queue->id), r9.val);
r10.field.sn_cfg_v = (args->num_sequence_numbers != 0);
r10.field.fid_cfg_v = (args->num_atomic_inflights != 0);
DLB_CSR_WR(hw, DLB_SYS_LDB_QID_CFG_V(queue->id), r10.val);
r11.field.qid_v = 1;
DLB_CSR_WR(hw, DLB_SYS_LDB_QID_V(queue->id), r11.val);
}
int dlb_get_group_sequence_numbers(struct dlb_hw *hw, unsigned int group_id)
{
if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
return -EINVAL;
return hw->rsrcs.sn_groups[group_id].sequence_numbers_per_queue;
}
int dlb_get_group_sequence_number_occupancy(struct dlb_hw *hw,
unsigned int group_id)
{
if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
return -EINVAL;
return dlb_sn_group_used_slots(&hw->rsrcs.sn_groups[group_id]);
}
static void dlb_log_set_group_sequence_numbers(struct dlb_hw *hw,
unsigned int group_id,
unsigned long val)
{
DLB_HW_INFO(hw, "DLB set group sequence numbers:\n");
DLB_HW_INFO(hw, "\tGroup ID: %u\n", group_id);
DLB_HW_INFO(hw, "\tValue: %lu\n", val);
}
int dlb_set_group_sequence_numbers(struct dlb_hw *hw,
unsigned int group_id,
unsigned long val)
{
u32 valid_allocations[6] = {32, 64, 128, 256, 512, 1024};
union dlb_ro_pipe_grp_sn_mode r0 = { {0} };
struct dlb_sn_group *group;
int mode;
if (group_id >= DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS)
return -EINVAL;
group = &hw->rsrcs.sn_groups[group_id];
/* Once the first load-balanced queue using an SN group is configured,
* the group cannot be changed.
*/
if (group->slot_use_bitmap != 0)
return -EPERM;
for (mode = 0; mode < DLB_MAX_NUM_SEQUENCE_NUMBER_MODES; mode++)
if (val == valid_allocations[mode])
break;
if (mode == DLB_MAX_NUM_SEQUENCE_NUMBER_MODES)
return -EINVAL;
group->mode = mode;
group->sequence_numbers_per_queue = val;
r0.field.sn_mode_0 = hw->rsrcs.sn_groups[0].mode;
r0.field.sn_mode_1 = hw->rsrcs.sn_groups[1].mode;
r0.field.sn_mode_2 = hw->rsrcs.sn_groups[2].mode;
r0.field.sn_mode_3 = hw->rsrcs.sn_groups[3].mode;
DLB_CSR_WR(hw, DLB_RO_PIPE_GRP_SN_MODE, r0.val);
dlb_log_set_group_sequence_numbers(hw, group_id, val);
return 0;
}
static int
dlb_ldb_queue_attach_to_sn_group(struct dlb_hw *hw,
struct dlb_ldb_queue *queue,
struct dlb_create_ldb_queue_args *args)
{
int slot = -1;
int i;
queue->sn_cfg_valid = false;
if (args->num_sequence_numbers == 0)
return 0;
for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
if (group->sequence_numbers_per_queue ==
args->num_sequence_numbers &&
!dlb_sn_group_full(group)) {
slot = dlb_sn_group_alloc_slot(group);
if (slot >= 0)
break;
}
}
if (slot == -1) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: no sequence number slots available\n",
__func__, __LINE__);
return -EFAULT;
}
queue->sn_cfg_valid = true;
queue->sn_group = i;
queue->sn_slot = slot;
return 0;
}
static int
dlb_ldb_queue_attach_resources(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_queue *queue,
struct dlb_create_ldb_queue_args *args)
{
int ret;
ret = dlb_ldb_queue_attach_to_sn_group(hw, queue, args);
if (ret)
return ret;
/* Attach QID inflights */
queue->num_qid_inflights = args->num_qid_inflights;
/* Attach atomic inflights */
queue->aqed_freelist.base = domain->aqed_freelist.base +
domain->aqed_freelist.offset;
queue->aqed_freelist.bound = queue->aqed_freelist.base +
args->num_atomic_inflights;
domain->aqed_freelist.offset += args->num_atomic_inflights;
return 0;
}
static int
dlb_verify_create_ldb_queue_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_ldb_queue_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_freelist *aqed_freelist;
struct dlb_domain *domain;
int i;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -1;
}
if (!domain->configured) {
resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
return -1;
}
if (domain->started) {
resp->status = DLB_ST_DOMAIN_STARTED;
return -1;
}
if (dlb_list_empty(&domain->avail_ldb_queues)) {
resp->status = DLB_ST_LDB_QUEUES_UNAVAILABLE;
return -1;
}
if (args->num_sequence_numbers) {
for (i = 0; i < DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS; i++) {
struct dlb_sn_group *group = &hw->rsrcs.sn_groups[i];
if (group->sequence_numbers_per_queue ==
args->num_sequence_numbers &&
!dlb_sn_group_full(group))
break;
}
if (i == DLB_MAX_NUM_SEQUENCE_NUMBER_GROUPS) {
resp->status = DLB_ST_SEQUENCE_NUMBERS_UNAVAILABLE;
return -1;
}
}
if (args->num_qid_inflights > 4096) {
resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
return -1;
}
/* Inflights must be <= number of sequence numbers if ordered */
if (args->num_sequence_numbers != 0 &&
args->num_qid_inflights > args->num_sequence_numbers) {
resp->status = DLB_ST_INVALID_QID_INFLIGHT_ALLOCATION;
return -1;
}
aqed_freelist = &domain->aqed_freelist;
if (dlb_freelist_count(aqed_freelist) < args->num_atomic_inflights) {
resp->status = DLB_ST_ATOMIC_INFLIGHTS_UNAVAILABLE;
return -1;
}
return 0;
}
static void
dlb_log_create_ldb_queue_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_ldb_queue_args *args)
{
DLB_HW_INFO(hw, "DLB create load-balanced queue arguments:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n",
domain_id);
DLB_HW_INFO(hw, "\tNumber of sequence numbers: %d\n",
args->num_sequence_numbers);
DLB_HW_INFO(hw, "\tNumber of QID inflights: %d\n",
args->num_qid_inflights);
DLB_HW_INFO(hw, "\tNumber of ATM inflights: %d\n",
args->num_atomic_inflights);
}
/**
* dlb_hw_create_ldb_queue() - Allocate and initialize a DLB LDB queue.
* @hw: Contains the current state of the DLB hardware.
* @args: User-provided arguments.
* @resp: Response to user.
*
* Return: returns < 0 on error, 0 otherwise. If the driver is unable to
* satisfy a request, resp->status will be set accordingly.
*/
int dlb_hw_create_ldb_queue(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_ldb_queue_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_ldb_queue *queue;
struct dlb_domain *domain;
int ret;
dlb_log_create_ldb_queue_args(hw, domain_id, args);
/* Verify that hardware resources are available before attempting to
* satisfy the request. This simplifies the error unwinding code.
*/
/* At least one available queue */
if (dlb_verify_create_ldb_queue_args(hw, domain_id, args, resp))
return -EINVAL;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
queue = DLB_DOM_LIST_HEAD(domain->avail_ldb_queues, typeof(*queue));
/* Verification should catch this. */
if (queue == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: no available ldb queues\n",
__func__, __LINE__);
return -EFAULT;
}
ret = dlb_ldb_queue_attach_resources(hw, domain, queue, args);
if (ret < 0) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: failed to attach the ldb queue resources\n",
__func__, __LINE__);
return ret;
}
dlb_configure_ldb_queue(hw, domain, queue, args);
queue->num_mappings = 0;
queue->configured = true;
/* Configuration succeeded, so move the resource from the 'avail' to
* the 'used' list.
*/
dlb_list_del(&domain->avail_ldb_queues, &queue->domain_list);
dlb_list_add(&domain->used_ldb_queues, &queue->domain_list);
resp->status = 0;
resp->id = queue->id;
return 0;
}
static void
dlb_log_create_dir_queue_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_dir_queue_args *args)
{
DLB_HW_INFO(hw, "DLB create directed queue arguments:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
DLB_HW_INFO(hw, "\tPort ID: %d\n", args->port_id);
}
static struct dlb_dir_pq_pair *
dlb_get_domain_used_dir_pq(u32 id, struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
struct dlb_dir_pq_pair *port;
RTE_SET_USED(iter);
if (id >= DLB_MAX_NUM_DIR_PORTS)
return NULL;
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, port, iter)
if (port->id == id)
return port;
return NULL;
}
static int
dlb_verify_create_dir_queue_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_dir_queue_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_domain *domain;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -1;
}
if (!domain->configured) {
resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
return -1;
}
if (domain->started) {
resp->status = DLB_ST_DOMAIN_STARTED;
return -1;
}
/* If the user claims the port is already configured, validate the port
* ID, its domain, and whether the port is configured.
*/
if (args->port_id != -1) {
struct dlb_dir_pq_pair *port;
port = dlb_get_domain_used_dir_pq(args->port_id, domain);
if (port == NULL || port->domain_id != domain->id ||
!port->port_configured) {
resp->status = DLB_ST_INVALID_PORT_ID;
return -1;
}
}
/* If the queue's port is not configured, validate that a free
* port-queue pair is available.
*/
if (args->port_id == -1 &&
dlb_list_empty(&domain->avail_dir_pq_pairs)) {
resp->status = DLB_ST_DIR_QUEUES_UNAVAILABLE;
return -1;
}
return 0;
}
static void dlb_configure_dir_queue(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_dir_pq_pair *queue)
{
union dlb_sys_dir_vasqid_v r0 = { {0} };
union dlb_sys_dir_qid_v r1 = { {0} };
unsigned int offs;
/* QID write permissions are turned on when the domain is started */
r0.field.vasqid_v = 0;
offs = (domain->id * DLB_MAX_NUM_DIR_PORTS) + queue->id;
DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
r1.field.qid_v = 1;
DLB_CSR_WR(hw, DLB_SYS_DIR_QID_V(queue->id), r1.val);
queue->queue_configured = true;
}
/**
* dlb_hw_create_dir_queue() - Allocate and initialize a DLB DIR queue.
* @hw: Contains the current state of the DLB hardware.
* @args: User-provided arguments.
* @resp: Response to user.
*
* Return: returns < 0 on error, 0 otherwise. If the driver is unable to
* satisfy a request, resp->status will be set accordingly.
*/
int dlb_hw_create_dir_queue(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_dir_queue_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_dir_pq_pair *queue;
struct dlb_domain *domain;
dlb_log_create_dir_queue_args(hw, domain_id, args);
/* Verify that hardware resources are available before attempting to
* satisfy the request. This simplifies the error unwinding code.
*/
if (dlb_verify_create_dir_queue_args(hw, domain_id, args, resp))
return -EINVAL;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
if (args->port_id != -1)
queue = dlb_get_domain_used_dir_pq(args->port_id, domain);
else
queue = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
typeof(*queue));
/* Verification should catch this. */
if (queue == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: no available dir queues\n",
__func__, __LINE__);
return -EFAULT;
}
dlb_configure_dir_queue(hw, domain, queue);
/* Configuration succeeded, so move the resource from the 'avail' to
* the 'used' list (if it's not already there).
*/
if (args->port_id == -1) {
dlb_list_del(&domain->avail_dir_pq_pairs, &queue->domain_list);
dlb_list_add(&domain->used_dir_pq_pairs, &queue->domain_list);
}
resp->status = 0;
resp->id = queue->id;
return 0;
}
static void dlb_log_create_ldb_port_args(struct dlb_hw *hw,
u32 domain_id,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_create_ldb_port_args *args)
{
DLB_HW_INFO(hw, "DLB create load-balanced port arguments:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n",
domain_id);
DLB_HW_INFO(hw, "\tLDB credit pool ID: %d\n",
args->ldb_credit_pool_id);
DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
args->ldb_credit_high_watermark);
DLB_HW_INFO(hw, "\tLDB credit low watermark: %d\n",
args->ldb_credit_low_watermark);
DLB_HW_INFO(hw, "\tLDB credit quantum: %d\n",
args->ldb_credit_quantum);
DLB_HW_INFO(hw, "\tDIR credit pool ID: %d\n",
args->dir_credit_pool_id);
DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
args->dir_credit_high_watermark);
DLB_HW_INFO(hw, "\tDIR credit low watermark: %d\n",
args->dir_credit_low_watermark);
DLB_HW_INFO(hw, "\tDIR credit quantum: %d\n",
args->dir_credit_quantum);
DLB_HW_INFO(hw, "\tpop_count_address: 0x%"PRIx64"\n",
pop_count_dma_base);
DLB_HW_INFO(hw, "\tCQ depth: %d\n",
args->cq_depth);
DLB_HW_INFO(hw, "\tCQ hist list size: %d\n",
args->cq_history_list_size);
DLB_HW_INFO(hw, "\tCQ base address: 0x%"PRIx64"\n",
cq_dma_base);
}
static struct dlb_credit_pool *
dlb_get_domain_ldb_pool(u32 id, struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
struct dlb_credit_pool *pool;
RTE_SET_USED(iter);
if (id >= DLB_MAX_NUM_LDB_CREDIT_POOLS)
return NULL;
DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
if (pool->id == id)
return pool;
return NULL;
}
static struct dlb_credit_pool *
dlb_get_domain_dir_pool(u32 id, struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
struct dlb_credit_pool *pool;
RTE_SET_USED(iter);
if (id >= DLB_MAX_NUM_DIR_CREDIT_POOLS)
return NULL;
DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
if (pool->id == id)
return pool;
return NULL;
}
static int
dlb_verify_create_ldb_port_args(struct dlb_hw *hw,
u32 domain_id,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_create_ldb_port_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_domain *domain;
struct dlb_credit_pool *pool;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -1;
}
if (!domain->configured) {
resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
return -1;
}
if (domain->started) {
resp->status = DLB_ST_DOMAIN_STARTED;
return -1;
}
if (dlb_list_empty(&domain->avail_ldb_ports)) {
resp->status = DLB_ST_LDB_PORTS_UNAVAILABLE;
return -1;
}
/* If the scheduling domain has no LDB queues, we configure the
* hardware to not supply the port with any LDB credits. In that
* case, ignore the LDB credit arguments.
*/
if (!dlb_list_empty(&domain->used_ldb_queues) ||
!dlb_list_empty(&domain->avail_ldb_queues)) {
pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
domain);
if (pool == NULL || !pool->configured ||
pool->domain_id != domain->id) {
resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
return -1;
}
if (args->ldb_credit_high_watermark > pool->avail_credits) {
resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
return -1;
}
if (args->ldb_credit_low_watermark >=
args->ldb_credit_high_watermark) {
resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
return -1;
}
if (args->ldb_credit_quantum >=
args->ldb_credit_high_watermark) {
resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
return -1;
}
if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
return -1;
}
}
/* Likewise, if the scheduling domain has no DIR queues, we configure
* the hardware to not supply the port with any DIR credits. In that
* case, ignore the DIR credit arguments.
*/
if (!dlb_list_empty(&domain->used_dir_pq_pairs) ||
!dlb_list_empty(&domain->avail_dir_pq_pairs)) {
pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
domain);
if (pool == NULL || !pool->configured ||
pool->domain_id != domain->id) {
resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
return -1;
}
if (args->dir_credit_high_watermark > pool->avail_credits) {
resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
return -1;
}
if (args->dir_credit_low_watermark >=
args->dir_credit_high_watermark) {
resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
return -1;
}
if (args->dir_credit_quantum >=
args->dir_credit_high_watermark) {
resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
return -1;
}
if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
return -1;
}
}
/* Check cache-line alignment */
if ((pop_count_dma_base & 0x3F) != 0) {
resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
return -1;
}
if ((cq_dma_base & 0x3F) != 0) {
resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
return -1;
}
if (args->cq_depth != 1 &&
args->cq_depth != 2 &&
args->cq_depth != 4 &&
args->cq_depth != 8 &&
args->cq_depth != 16 &&
args->cq_depth != 32 &&
args->cq_depth != 64 &&
args->cq_depth != 128 &&
args->cq_depth != 256 &&
args->cq_depth != 512 &&
args->cq_depth != 1024) {
resp->status = DLB_ST_INVALID_CQ_DEPTH;
return -1;
}
/* The history list size must be >= 1 */
if (!args->cq_history_list_size) {
resp->status = DLB_ST_INVALID_HIST_LIST_DEPTH;
return -1;
}
if (args->cq_history_list_size > domain->avail_hist_list_entries) {
resp->status = DLB_ST_HIST_LIST_ENTRIES_UNAVAILABLE;
return -1;
}
return 0;
}
static void dlb_ldb_pool_update_credit_count(struct dlb_hw *hw,
u32 pool_id,
u32 count)
{
hw->rsrcs.ldb_credit_pools[pool_id].avail_credits -= count;
}
static void dlb_dir_pool_update_credit_count(struct dlb_hw *hw,
u32 pool_id,
u32 count)
{
hw->rsrcs.dir_credit_pools[pool_id].avail_credits -= count;
}
static int dlb_ldb_port_configure_pp(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_port *port,
struct dlb_create_ldb_port_args *args)
{
union dlb_sys_ldb_pp2ldbpool r0 = { {0} };
union dlb_sys_ldb_pp2dirpool r1 = { {0} };
union dlb_sys_ldb_pp2vf_pf r2 = { {0} };
union dlb_sys_ldb_pp2vas r3 = { {0} };
union dlb_sys_ldb_pp_v r4 = { {0} };
union dlb_chp_ldb_pp_ldb_crd_hwm r6 = { {0} };
union dlb_chp_ldb_pp_dir_crd_hwm r7 = { {0} };
union dlb_chp_ldb_pp_ldb_crd_lwm r8 = { {0} };
union dlb_chp_ldb_pp_dir_crd_lwm r9 = { {0} };
union dlb_chp_ldb_pp_ldb_min_crd_qnt r10 = { {0} };
union dlb_chp_ldb_pp_dir_min_crd_qnt r11 = { {0} };
union dlb_chp_ldb_pp_ldb_crd_cnt r12 = { {0} };
union dlb_chp_ldb_pp_dir_crd_cnt r13 = { {0} };
union dlb_chp_ldb_ldb_pp2pool r14 = { {0} };
union dlb_chp_ldb_dir_pp2pool r15 = { {0} };
union dlb_chp_ldb_pp_crd_req_state r16 = { {0} };
union dlb_chp_ldb_pp_ldb_push_ptr r17 = { {0} };
union dlb_chp_ldb_pp_dir_push_ptr r18 = { {0} };
struct dlb_credit_pool *ldb_pool = NULL;
struct dlb_credit_pool *dir_pool = NULL;
if (port->ldb_pool_used) {
ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
domain);
if (ldb_pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: port validation failed\n",
__func__);
return -EFAULT;
}
}
if (port->dir_pool_used) {
dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
domain);
if (dir_pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: port validation failed\n",
__func__);
return -EFAULT;
}
}
r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
DLB_CSR_WR(hw, DLB_SYS_LDB_PP2LDBPOOL(port->id), r0.val);
r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
DLB_CSR_WR(hw, DLB_SYS_LDB_PP2DIRPOOL(port->id), r1.val);
r2.field.is_pf = 1;
DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VF_PF(port->id), r2.val);
r3.field.vas = domain->id;
DLB_CSR_WR(hw, DLB_SYS_LDB_PP2VAS(port->id), r3.val);
r6.field.hwm = args->ldb_credit_high_watermark;
DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_HWM(port->id), r6.val);
r7.field.hwm = args->dir_credit_high_watermark;
DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_HWM(port->id), r7.val);
r8.field.lwm = args->ldb_credit_low_watermark;
DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_LWM(port->id), r8.val);
r9.field.lwm = args->dir_credit_low_watermark;
DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_LWM(port->id), r9.val);
r10.field.quanta = args->ldb_credit_quantum;
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_LDB_MIN_CRD_QNT(port->id),
r10.val);
r11.field.quanta = args->dir_credit_quantum;
DLB_CSR_WR(hw,
DLB_CHP_LDB_PP_DIR_MIN_CRD_QNT(port->id),
r11.val);
r12.field.count = args->ldb_credit_high_watermark;
DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_CRD_CNT(port->id), r12.val);
r13.field.count = args->dir_credit_high_watermark;
DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_CRD_CNT(port->id), r13.val);
r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
DLB_CSR_WR(hw, DLB_CHP_LDB_LDB_PP2POOL(port->id), r14.val);
r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
DLB_CSR_WR(hw, DLB_CHP_LDB_DIR_PP2POOL(port->id), r15.val);
r16.field.no_pp_credit_update = 0;
DLB_CSR_WR(hw, DLB_CHP_LDB_PP_CRD_REQ_STATE(port->id), r16.val);
r17.field.push_pointer = 0;
DLB_CSR_WR(hw, DLB_CHP_LDB_PP_LDB_PUSH_PTR(port->id), r17.val);
r18.field.push_pointer = 0;
DLB_CSR_WR(hw, DLB_CHP_LDB_PP_DIR_PUSH_PTR(port->id), r18.val);
r4.field.pp_v = 1;
DLB_CSR_WR(hw,
DLB_SYS_LDB_PP_V(port->id),
r4.val);
return 0;
}
static int dlb_ldb_port_configure_cq(struct dlb_hw *hw,
struct dlb_ldb_port *port,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_create_ldb_port_args *args)
{
int i;
union dlb_sys_ldb_cq_addr_l r0 = { {0} };
union dlb_sys_ldb_cq_addr_u r1 = { {0} };
union dlb_sys_ldb_cq2vf_pf r2 = { {0} };
union dlb_chp_ldb_cq_tkn_depth_sel r3 = { {0} };
union dlb_chp_hist_list_lim r4 = { {0} };
union dlb_chp_hist_list_base r5 = { {0} };
union dlb_lsp_cq_ldb_infl_lim r6 = { {0} };
union dlb_lsp_cq2priov r7 = { {0} };
union dlb_chp_hist_list_push_ptr r8 = { {0} };
union dlb_chp_hist_list_pop_ptr r9 = { {0} };
union dlb_lsp_cq_ldb_tkn_depth_sel r10 = { {0} };
union dlb_sys_ldb_pp_addr_l r11 = { {0} };
union dlb_sys_ldb_pp_addr_u r12 = { {0} };
/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
r0.field.addr_l = cq_dma_base >> 6;
DLB_CSR_WR(hw,
DLB_SYS_LDB_CQ_ADDR_L(port->id),
r0.val);
r1.field.addr_u = cq_dma_base >> 32;
DLB_CSR_WR(hw,
DLB_SYS_LDB_CQ_ADDR_U(port->id),
r1.val);
r2.field.is_pf = 1;
DLB_CSR_WR(hw,
DLB_SYS_LDB_CQ2VF_PF(port->id),
r2.val);
if (args->cq_depth <= 8) {
r3.field.token_depth_select = 1;
} else if (args->cq_depth == 16) {
r3.field.token_depth_select = 2;
} else if (args->cq_depth == 32) {
r3.field.token_depth_select = 3;
} else if (args->cq_depth == 64) {
r3.field.token_depth_select = 4;
} else if (args->cq_depth == 128) {
r3.field.token_depth_select = 5;
} else if (args->cq_depth == 256) {
r3.field.token_depth_select = 6;
} else if (args->cq_depth == 512) {
r3.field.token_depth_select = 7;
} else if (args->cq_depth == 1024) {
r3.field.token_depth_select = 8;
} else {
DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
__func__, __LINE__);
return -EFAULT;
}
DLB_CSR_WR(hw,
DLB_CHP_LDB_CQ_TKN_DEPTH_SEL(port->id),
r3.val);
r10.field.token_depth_select = r3.field.token_depth_select;
r10.field.ignore_depth = 0;
/* TDT algorithm: DLB must be able to write CQs with depth < 4 */
r10.field.enab_shallow_cq = 1;
DLB_CSR_WR(hw,
DLB_LSP_CQ_LDB_TKN_DEPTH_SEL(port->id),
r10.val);
/* To support CQs with depth less than 8, program the token count
* register with a non-zero initial value. Operations such as domain
* reset must take this initial value into account when quiescing the
* CQ.
*/
port->init_tkn_cnt = 0;
if (args->cq_depth < 8) {
union dlb_lsp_cq_ldb_tkn_cnt r12 = { {0} };
port->init_tkn_cnt = 8 - args->cq_depth;
r12.field.token_count = port->init_tkn_cnt;
DLB_CSR_WR(hw,
DLB_LSP_CQ_LDB_TKN_CNT(port->id),
r12.val);
}
r4.field.limit = port->hist_list_entry_limit - 1;
DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_LIM(port->id), r4.val);
r5.field.base = port->hist_list_entry_base;
DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_BASE(port->id), r5.val);
r8.field.push_ptr = r5.field.base;
r8.field.generation = 0;
DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_PUSH_PTR(port->id), r8.val);
r9.field.pop_ptr = r5.field.base;
r9.field.generation = 0;
DLB_CSR_WR(hw, DLB_CHP_HIST_LIST_POP_PTR(port->id), r9.val);
/* The inflight limit sets a cap on the number of QEs for which this CQ
* can owe completions at one time.
*/
r6.field.limit = args->cq_history_list_size;
DLB_CSR_WR(hw, DLB_LSP_CQ_LDB_INFL_LIM(port->id), r6.val);
/* Disable the port's QID mappings */
r7.field.v = 0;
DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r7.val);
/* Two cache lines (128B) are dedicated for the port's pop counts */
r11.field.addr_l = pop_count_dma_base >> 7;
DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_L(port->id), r11.val);
r12.field.addr_u = pop_count_dma_base >> 32;
DLB_CSR_WR(hw, DLB_SYS_LDB_PP_ADDR_U(port->id), r12.val);
for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++)
port->qid_map[i].state = DLB_QUEUE_UNMAPPED;
return 0;
}
static void dlb_update_ldb_arb_threshold(struct dlb_hw *hw)
{
union dlb_lsp_ctrl_config_0 r0 = { {0} };
/* From the hardware spec:
* "The optimal value for ldb_arb_threshold is in the region of {8 *
* #CQs}. It is expected therefore that the PF will change this value
* dynamically as the number of active ports changes."
*/
r0.val = DLB_CSR_RD(hw, DLB_LSP_CTRL_CONFIG_0);
r0.field.ldb_arb_threshold = hw->pf.num_enabled_ldb_ports * 8;
r0.field.ldb_arb_ignore_empty = 1;
r0.field.ldb_arb_mode = 1;
DLB_CSR_WR(hw, DLB_LSP_CTRL_CONFIG_0, r0.val);
dlb_flush_csr(hw);
}
static int dlb_configure_ldb_port(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_ldb_port *port,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_create_ldb_port_args *args)
{
struct dlb_credit_pool *ldb_pool, *dir_pool;
int ret;
port->hist_list_entry_base = domain->hist_list_entry_base +
domain->hist_list_entry_offset;
port->hist_list_entry_limit = port->hist_list_entry_base +
args->cq_history_list_size;
domain->hist_list_entry_offset += args->cq_history_list_size;
domain->avail_hist_list_entries -= args->cq_history_list_size;
port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
!dlb_list_empty(&domain->avail_ldb_queues);
port->dir_pool_used = !dlb_list_empty(&domain->used_dir_pq_pairs) ||
!dlb_list_empty(&domain->avail_dir_pq_pairs);
if (port->ldb_pool_used) {
u32 cnt = args->ldb_credit_high_watermark;
ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
domain);
if (ldb_pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: port validation failed\n",
__func__);
return -EFAULT;
}
dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
} else {
args->ldb_credit_high_watermark = 0;
args->ldb_credit_low_watermark = 0;
args->ldb_credit_quantum = 0;
}
if (port->dir_pool_used) {
u32 cnt = args->dir_credit_high_watermark;
dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
domain);
if (dir_pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: port validation failed\n",
__func__);
return -EFAULT;
}
dlb_dir_pool_update_credit_count(hw, dir_pool->id, cnt);
} else {
args->dir_credit_high_watermark = 0;
args->dir_credit_low_watermark = 0;
args->dir_credit_quantum = 0;
}
ret = dlb_ldb_port_configure_cq(hw,
port,
pop_count_dma_base,
cq_dma_base,
args);
if (ret < 0)
return ret;
ret = dlb_ldb_port_configure_pp(hw, domain, port, args);
if (ret < 0)
return ret;
dlb_ldb_port_cq_enable(hw, port);
port->num_mappings = 0;
port->enabled = true;
hw->pf.num_enabled_ldb_ports++;
dlb_update_ldb_arb_threshold(hw);
port->configured = true;
return 0;
}
/**
* dlb_hw_create_ldb_port() - Allocate and initialize a load-balanced port and
* its resources.
* @hw: Contains the current state of the DLB hardware.
* @args: User-provided arguments.
* @resp: Response to user.
*
* Return: returns < 0 on error, 0 otherwise. If the driver is unable to
* satisfy a request, resp->status will be set accordingly.
*/
int dlb_hw_create_ldb_port(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_ldb_port_args *args,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_cmd_response *resp)
{
struct dlb_ldb_port *port;
struct dlb_domain *domain;
int ret;
dlb_log_create_ldb_port_args(hw,
domain_id,
pop_count_dma_base,
cq_dma_base,
args);
/* Verify that hardware resources are available before attempting to
* satisfy the request. This simplifies the error unwinding code.
*/
if (dlb_verify_create_ldb_port_args(hw,
domain_id,
pop_count_dma_base,
cq_dma_base,
args,
resp))
return -EINVAL;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
port = DLB_DOM_LIST_HEAD(domain->avail_ldb_ports, typeof(*port));
/* Verification should catch this. */
if (port == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: no available ldb ports\n",
__func__, __LINE__);
return -EFAULT;
}
if (port->configured) {
DLB_HW_ERR(hw,
"[%s()] Internal error: avail_ldb_ports contains configured ports.\n",
__func__);
return -EFAULT;
}
ret = dlb_configure_ldb_port(hw,
domain,
port,
pop_count_dma_base,
cq_dma_base,
args);
if (ret < 0)
return ret;
/* Configuration succeeded, so move the resource from the 'avail' to
* the 'used' list.
*/
dlb_list_del(&domain->avail_ldb_ports, &port->domain_list);
dlb_list_add(&domain->used_ldb_ports, &port->domain_list);
resp->status = 0;
resp->id = port->id;
return 0;
}
static void dlb_log_create_dir_port_args(struct dlb_hw *hw,
u32 domain_id,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_create_dir_port_args *args)
{
DLB_HW_INFO(hw, "DLB create directed port arguments:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n",
domain_id);
DLB_HW_INFO(hw, "\tLDB credit pool ID: %d\n",
args->ldb_credit_pool_id);
DLB_HW_INFO(hw, "\tLDB credit high watermark: %d\n",
args->ldb_credit_high_watermark);
DLB_HW_INFO(hw, "\tLDB credit low watermark: %d\n",
args->ldb_credit_low_watermark);
DLB_HW_INFO(hw, "\tLDB credit quantum: %d\n",
args->ldb_credit_quantum);
DLB_HW_INFO(hw, "\tDIR credit pool ID: %d\n",
args->dir_credit_pool_id);
DLB_HW_INFO(hw, "\tDIR credit high watermark: %d\n",
args->dir_credit_high_watermark);
DLB_HW_INFO(hw, "\tDIR credit low watermark: %d\n",
args->dir_credit_low_watermark);
DLB_HW_INFO(hw, "\tDIR credit quantum: %d\n",
args->dir_credit_quantum);
DLB_HW_INFO(hw, "\tpop_count_address: 0x%"PRIx64"\n",
pop_count_dma_base);
DLB_HW_INFO(hw, "\tCQ depth: %d\n",
args->cq_depth);
DLB_HW_INFO(hw, "\tCQ base address: 0x%"PRIx64"\n",
cq_dma_base);
}
static int
dlb_verify_create_dir_port_args(struct dlb_hw *hw,
u32 domain_id,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_create_dir_port_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_domain *domain;
struct dlb_credit_pool *pool;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -1;
}
if (!domain->configured) {
resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
return -1;
}
if (domain->started) {
resp->status = DLB_ST_DOMAIN_STARTED;
return -1;
}
/* If the user claims the queue is already configured, validate
* the queue ID, its domain, and whether the queue is configured.
*/
if (args->queue_id != -1) {
struct dlb_dir_pq_pair *queue;
queue = dlb_get_domain_used_dir_pq(args->queue_id,
domain);
if (queue == NULL || queue->domain_id != domain->id ||
!queue->queue_configured) {
resp->status = DLB_ST_INVALID_DIR_QUEUE_ID;
return -1;
}
}
/* If the port's queue is not configured, validate that a free
* port-queue pair is available.
*/
if (args->queue_id == -1 &&
dlb_list_empty(&domain->avail_dir_pq_pairs)) {
resp->status = DLB_ST_DIR_PORTS_UNAVAILABLE;
return -1;
}
/* If the scheduling domain has no LDB queues, we configure the
* hardware to not supply the port with any LDB credits. In that
* case, ignore the LDB credit arguments.
*/
if (!dlb_list_empty(&domain->used_ldb_queues) ||
!dlb_list_empty(&domain->avail_ldb_queues)) {
pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
domain);
if (pool == NULL || !pool->configured ||
pool->domain_id != domain->id) {
resp->status = DLB_ST_INVALID_LDB_CREDIT_POOL_ID;
return -1;
}
if (args->ldb_credit_high_watermark > pool->avail_credits) {
resp->status = DLB_ST_LDB_CREDITS_UNAVAILABLE;
return -1;
}
if (args->ldb_credit_low_watermark >=
args->ldb_credit_high_watermark) {
resp->status = DLB_ST_INVALID_LDB_CREDIT_LOW_WATERMARK;
return -1;
}
if (args->ldb_credit_quantum >=
args->ldb_credit_high_watermark) {
resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
return -1;
}
if (args->ldb_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
resp->status = DLB_ST_INVALID_LDB_CREDIT_QUANTUM;
return -1;
}
}
pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
domain);
if (pool == NULL || !pool->configured ||
pool->domain_id != domain->id) {
resp->status = DLB_ST_INVALID_DIR_CREDIT_POOL_ID;
return -1;
}
if (args->dir_credit_high_watermark > pool->avail_credits) {
resp->status = DLB_ST_DIR_CREDITS_UNAVAILABLE;
return -1;
}
if (args->dir_credit_low_watermark >= args->dir_credit_high_watermark) {
resp->status = DLB_ST_INVALID_DIR_CREDIT_LOW_WATERMARK;
return -1;
}
if (args->dir_credit_quantum >= args->dir_credit_high_watermark) {
resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
return -1;
}
if (args->dir_credit_quantum > DLB_MAX_PORT_CREDIT_QUANTUM) {
resp->status = DLB_ST_INVALID_DIR_CREDIT_QUANTUM;
return -1;
}
/* Check cache-line alignment */
if ((pop_count_dma_base & 0x3F) != 0) {
resp->status = DLB_ST_INVALID_POP_COUNT_VIRT_ADDR;
return -1;
}
if ((cq_dma_base & 0x3F) != 0) {
resp->status = DLB_ST_INVALID_CQ_VIRT_ADDR;
return -1;
}
if (args->cq_depth != 8 &&
args->cq_depth != 16 &&
args->cq_depth != 32 &&
args->cq_depth != 64 &&
args->cq_depth != 128 &&
args->cq_depth != 256 &&
args->cq_depth != 512 &&
args->cq_depth != 1024) {
resp->status = DLB_ST_INVALID_CQ_DEPTH;
return -1;
}
return 0;
}
static int dlb_dir_port_configure_pp(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_dir_pq_pair *port,
struct dlb_create_dir_port_args *args)
{
union dlb_sys_dir_pp2ldbpool r0 = { {0} };
union dlb_sys_dir_pp2dirpool r1 = { {0} };
union dlb_sys_dir_pp2vf_pf r2 = { {0} };
union dlb_sys_dir_pp2vas r3 = { {0} };
union dlb_sys_dir_pp_v r4 = { {0} };
union dlb_chp_dir_pp_ldb_crd_hwm r6 = { {0} };
union dlb_chp_dir_pp_dir_crd_hwm r7 = { {0} };
union dlb_chp_dir_pp_ldb_crd_lwm r8 = { {0} };
union dlb_chp_dir_pp_dir_crd_lwm r9 = { {0} };
union dlb_chp_dir_pp_ldb_min_crd_qnt r10 = { {0} };
union dlb_chp_dir_pp_dir_min_crd_qnt r11 = { {0} };
union dlb_chp_dir_pp_ldb_crd_cnt r12 = { {0} };
union dlb_chp_dir_pp_dir_crd_cnt r13 = { {0} };
union dlb_chp_dir_ldb_pp2pool r14 = { {0} };
union dlb_chp_dir_dir_pp2pool r15 = { {0} };
union dlb_chp_dir_pp_crd_req_state r16 = { {0} };
union dlb_chp_dir_pp_ldb_push_ptr r17 = { {0} };
union dlb_chp_dir_pp_dir_push_ptr r18 = { {0} };
struct dlb_credit_pool *ldb_pool = NULL;
struct dlb_credit_pool *dir_pool = NULL;
if (port->ldb_pool_used) {
ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
domain);
if (ldb_pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: port validation failed\n",
__func__);
return -EFAULT;
}
}
if (port->dir_pool_used) {
dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id,
domain);
if (dir_pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: port validation failed\n",
__func__);
return -EFAULT;
}
}
r0.field.ldbpool = (port->ldb_pool_used) ? ldb_pool->id : 0;
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP2LDBPOOL(port->id),
r0.val);
r1.field.dirpool = (port->dir_pool_used) ? dir_pool->id : 0;
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP2DIRPOOL(port->id),
r1.val);
r2.field.is_pf = 1;
r2.field.is_hw_dsi = 0;
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP2VF_PF(port->id),
r2.val);
r3.field.vas = domain->id;
DLB_CSR_WR(hw,
DLB_SYS_DIR_PP2VAS(port->id),
r3.val);
r6.field.hwm = args->ldb_credit_high_watermark;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_LDB_CRD_HWM(port->id),
r6.val);
r7.field.hwm = args->dir_credit_high_watermark;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_DIR_CRD_HWM(port->id),
r7.val);
r8.field.lwm = args->ldb_credit_low_watermark;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_LDB_CRD_LWM(port->id),
r8.val);
r9.field.lwm = args->dir_credit_low_watermark;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_DIR_CRD_LWM(port->id),
r9.val);
r10.field.quanta = args->ldb_credit_quantum;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_LDB_MIN_CRD_QNT(port->id),
r10.val);
r11.field.quanta = args->dir_credit_quantum;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_DIR_MIN_CRD_QNT(port->id),
r11.val);
r12.field.count = args->ldb_credit_high_watermark;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_LDB_CRD_CNT(port->id),
r12.val);
r13.field.count = args->dir_credit_high_watermark;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_DIR_CRD_CNT(port->id),
r13.val);
r14.field.pool = (port->ldb_pool_used) ? ldb_pool->id : 0;
DLB_CSR_WR(hw,
DLB_CHP_DIR_LDB_PP2POOL(port->id),
r14.val);
r15.field.pool = (port->dir_pool_used) ? dir_pool->id : 0;
DLB_CSR_WR(hw,
DLB_CHP_DIR_DIR_PP2POOL(port->id),
r15.val);
r16.field.no_pp_credit_update = 0;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_CRD_REQ_STATE(port->id),
r16.val);
r17.field.push_pointer = 0;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_LDB_PUSH_PTR(port->id),
r17.val);
r18.field.push_pointer = 0;
DLB_CSR_WR(hw,
DLB_CHP_DIR_PP_DIR_PUSH_PTR(port->id),
r18.val);
r4.field.pp_v = 1;
r4.field.mb_dm = 0;
DLB_CSR_WR(hw, DLB_SYS_DIR_PP_V(port->id), r4.val);
return 0;
}
static int dlb_dir_port_configure_cq(struct dlb_hw *hw,
struct dlb_dir_pq_pair *port,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_create_dir_port_args *args)
{
union dlb_sys_dir_cq_addr_l r0 = { {0} };
union dlb_sys_dir_cq_addr_u r1 = { {0} };
union dlb_sys_dir_cq2vf_pf r2 = { {0} };
union dlb_chp_dir_cq_tkn_depth_sel r3 = { {0} };
union dlb_lsp_cq_dir_tkn_depth_sel_dsi r4 = { {0} };
union dlb_sys_dir_pp_addr_l r5 = { {0} };
union dlb_sys_dir_pp_addr_u r6 = { {0} };
/* The CQ address is 64B-aligned, and the DLB only wants bits [63:6] */
r0.field.addr_l = cq_dma_base >> 6;
DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_L(port->id), r0.val);
r1.field.addr_u = cq_dma_base >> 32;
DLB_CSR_WR(hw, DLB_SYS_DIR_CQ_ADDR_U(port->id), r1.val);
r2.field.is_pf = 1;
DLB_CSR_WR(hw, DLB_SYS_DIR_CQ2VF_PF(port->id), r2.val);
if (args->cq_depth == 8) {
r3.field.token_depth_select = 1;
} else if (args->cq_depth == 16) {
r3.field.token_depth_select = 2;
} else if (args->cq_depth == 32) {
r3.field.token_depth_select = 3;
} else if (args->cq_depth == 64) {
r3.field.token_depth_select = 4;
} else if (args->cq_depth == 128) {
r3.field.token_depth_select = 5;
} else if (args->cq_depth == 256) {
r3.field.token_depth_select = 6;
} else if (args->cq_depth == 512) {
r3.field.token_depth_select = 7;
} else if (args->cq_depth == 1024) {
r3.field.token_depth_select = 8;
} else {
DLB_HW_ERR(hw, "[%s():%d] Internal error: invalid CQ depth\n",
__func__, __LINE__);
return -EFAULT;
}
DLB_CSR_WR(hw,
DLB_CHP_DIR_CQ_TKN_DEPTH_SEL(port->id),
r3.val);
r4.field.token_depth_select = r3.field.token_depth_select;
r4.field.disable_wb_opt = 0;
DLB_CSR_WR(hw,
DLB_LSP_CQ_DIR_TKN_DEPTH_SEL_DSI(port->id),
r4.val);
/* Two cache lines (128B) are dedicated for the port's pop counts */
r5.field.addr_l = pop_count_dma_base >> 7;
DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_L(port->id), r5.val);
r6.field.addr_u = pop_count_dma_base >> 32;
DLB_CSR_WR(hw, DLB_SYS_DIR_PP_ADDR_U(port->id), r6.val);
return 0;
}
static int dlb_configure_dir_port(struct dlb_hw *hw,
struct dlb_domain *domain,
struct dlb_dir_pq_pair *port,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_create_dir_port_args *args)
{
struct dlb_credit_pool *ldb_pool, *dir_pool;
int ret;
port->ldb_pool_used = !dlb_list_empty(&domain->used_ldb_queues) ||
!dlb_list_empty(&domain->avail_ldb_queues);
/* Each directed port has a directed queue, hence this port requires
* directed credits.
*/
port->dir_pool_used = true;
if (port->ldb_pool_used) {
u32 cnt = args->ldb_credit_high_watermark;
ldb_pool = dlb_get_domain_ldb_pool(args->ldb_credit_pool_id,
domain);
if (ldb_pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: port validation failed\n",
__func__);
return -EFAULT;
}
dlb_ldb_pool_update_credit_count(hw, ldb_pool->id, cnt);
} else {
args->ldb_credit_high_watermark = 0;
args->ldb_credit_low_watermark = 0;
args->ldb_credit_quantum = 0;
}
dir_pool = dlb_get_domain_dir_pool(args->dir_credit_pool_id, domain);
if (dir_pool == NULL) {
DLB_HW_ERR(hw,
"[%s()] Internal error: port validation failed\n",
__func__);
return -EFAULT;
}
dlb_dir_pool_update_credit_count(hw,
dir_pool->id,
args->dir_credit_high_watermark);
ret = dlb_dir_port_configure_cq(hw,
port,
pop_count_dma_base,
cq_dma_base,
args);
if (ret < 0)
return ret;
ret = dlb_dir_port_configure_pp(hw, domain, port, args);
if (ret < 0)
return ret;
dlb_dir_port_cq_enable(hw, port);
port->enabled = true;
port->port_configured = true;
return 0;
}
/**
* dlb_hw_create_dir_port() - Allocate and initialize a DLB directed port and
* queue. The port/queue pair have the same ID and name.
* @hw: Contains the current state of the DLB hardware.
* @args: User-provided arguments.
* @resp: Response to user.
*
* Return: returns < 0 on error, 0 otherwise. If the driver is unable to
* satisfy a request, resp->status will be set accordingly.
*/
int dlb_hw_create_dir_port(struct dlb_hw *hw,
u32 domain_id,
struct dlb_create_dir_port_args *args,
u64 pop_count_dma_base,
u64 cq_dma_base,
struct dlb_cmd_response *resp)
{
struct dlb_dir_pq_pair *port;
struct dlb_domain *domain;
int ret;
dlb_log_create_dir_port_args(hw,
domain_id,
pop_count_dma_base,
cq_dma_base,
args);
/* Verify that hardware resources are available before attempting to
* satisfy the request. This simplifies the error unwinding code.
*/
if (dlb_verify_create_dir_port_args(hw,
domain_id,
pop_count_dma_base,
cq_dma_base,
args,
resp))
return -EINVAL;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
if (args->queue_id != -1)
port = dlb_get_domain_used_dir_pq(args->queue_id,
domain);
else
port = DLB_DOM_LIST_HEAD(domain->avail_dir_pq_pairs,
typeof(*port));
/* Verification should catch this. */
if (port == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: no available dir ports\n",
__func__, __LINE__);
return -EFAULT;
}
ret = dlb_configure_dir_port(hw,
domain,
port,
pop_count_dma_base,
cq_dma_base,
args);
if (ret < 0)
return ret;
/* Configuration succeeded, so move the resource from the 'avail' to
* the 'used' list (if it's not already there).
*/
if (args->queue_id == -1) {
dlb_list_del(&domain->avail_dir_pq_pairs, &port->domain_list);
dlb_list_add(&domain->used_dir_pq_pairs, &port->domain_list);
}
resp->status = 0;
resp->id = port->id;
return 0;
}
static struct dlb_ldb_port *
dlb_get_domain_used_ldb_port(u32 id, struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
struct dlb_ldb_port *port;
RTE_SET_USED(iter);
if (id >= DLB_MAX_NUM_LDB_PORTS)
return NULL;
DLB_DOM_LIST_FOR(domain->used_ldb_ports, port, iter)
if (port->id == id)
return port;
DLB_DOM_LIST_FOR(domain->avail_ldb_ports, port, iter)
if (port->id == id)
return port;
return NULL;
}
static void
dlb_log_pending_port_unmaps_args(struct dlb_hw *hw,
struct dlb_pending_port_unmaps_args *args)
{
DLB_HW_INFO(hw, "DLB pending port unmaps arguments:\n");
DLB_HW_INFO(hw, "\tPort ID: %d\n", args->port_id);
}
int dlb_hw_pending_port_unmaps(struct dlb_hw *hw,
u32 domain_id,
struct dlb_pending_port_unmaps_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_domain *domain;
struct dlb_ldb_port *port;
dlb_log_pending_port_unmaps_args(hw, args);
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -EINVAL;
}
port = dlb_get_domain_used_ldb_port(args->port_id, domain);
if (port == NULL || !port->configured) {
resp->status = DLB_ST_INVALID_PORT_ID;
return -EINVAL;
}
resp->id = port->num_pending_removals;
return 0;
}
static void dlb_log_unmap_qid(struct dlb_hw *hw,
u32 domain_id,
struct dlb_unmap_qid_args *args)
{
DLB_HW_INFO(hw, "DLB unmap QID arguments:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n",
domain_id);
DLB_HW_INFO(hw, "\tPort ID: %d\n",
args->port_id);
DLB_HW_INFO(hw, "\tQueue ID: %d\n",
args->qid);
if (args->qid < DLB_MAX_NUM_LDB_QUEUES)
DLB_HW_INFO(hw, "\tQueue's num mappings: %d\n",
hw->rsrcs.ldb_queues[args->qid].num_mappings);
}
static struct dlb_ldb_queue *dlb_get_domain_ldb_queue(u32 id,
struct dlb_domain *domain)
{
struct dlb_list_entry *iter;
struct dlb_ldb_queue *queue;
RTE_SET_USED(iter);
if (id >= DLB_MAX_NUM_LDB_QUEUES)
return NULL;
DLB_DOM_LIST_FOR(domain->used_ldb_queues, queue, iter)
if (queue->id == id)
return queue;
return NULL;
}
static bool
dlb_port_find_slot_with_pending_map_queue(struct dlb_ldb_port *port,
struct dlb_ldb_queue *queue,
int *slot)
{
int i;
for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
struct dlb_ldb_port_qid_map *map = &port->qid_map[i];
if (map->state == DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP &&
map->pending_qid == queue->id)
break;
}
*slot = i;
return (i < DLB_MAX_NUM_QIDS_PER_LDB_CQ);
}
static int dlb_verify_unmap_qid_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_unmap_qid_args *args,
struct dlb_cmd_response *resp)
{
enum dlb_qid_map_state state;
struct dlb_domain *domain;
struct dlb_ldb_port *port;
struct dlb_ldb_queue *queue;
int slot;
int id;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -1;
}
if (!domain->configured) {
resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
return -1;
}
id = args->port_id;
port = dlb_get_domain_used_ldb_port(id, domain);
if (port == NULL || !port->configured) {
resp->status = DLB_ST_INVALID_PORT_ID;
return -1;
}
if (port->domain_id != domain->id) {
resp->status = DLB_ST_INVALID_PORT_ID;
return -1;
}
queue = dlb_get_domain_ldb_queue(args->qid, domain);
if (queue == NULL || !queue->configured) {
DLB_HW_ERR(hw, "[%s()] Can't unmap unconfigured queue %d\n",
__func__, args->qid);
resp->status = DLB_ST_INVALID_QID;
return -1;
}
/* Verify that the port has the queue mapped. From the application's
* perspective a queue is mapped if it is actually mapped, the map is
* in progress, or the map is blocked pending an unmap.
*/
state = DLB_QUEUE_MAPPED;
if (dlb_port_find_slot_queue(port, state, queue, &slot))
return 0;
state = DLB_QUEUE_MAP_IN_PROGRESS;
if (dlb_port_find_slot_queue(port, state, queue, &slot))
return 0;
if (dlb_port_find_slot_with_pending_map_queue(port, queue, &slot))
return 0;
resp->status = DLB_ST_INVALID_QID;
return -1;
}
int dlb_hw_unmap_qid(struct dlb_hw *hw,
u32 domain_id,
struct dlb_unmap_qid_args *args,
struct dlb_cmd_response *resp)
{
enum dlb_qid_map_state state;
struct dlb_ldb_queue *queue;
struct dlb_ldb_port *port;
struct dlb_domain *domain;
bool unmap_complete;
int i, ret, id;
dlb_log_unmap_qid(hw, domain_id, args);
/* Verify that hardware resources are available before attempting to
* satisfy the request. This simplifies the error unwinding code.
*/
if (dlb_verify_unmap_qid_args(hw, domain_id, args, resp))
return -EINVAL;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
id = args->port_id;
port = dlb_get_domain_used_ldb_port(id, domain);
if (port == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port not found\n",
__func__, __LINE__);
return -EFAULT;
}
queue = dlb_get_domain_ldb_queue(args->qid, domain);
if (queue == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: queue not found\n",
__func__, __LINE__);
return -EFAULT;
}
/* If the queue hasn't been mapped yet, we need to update the slot's
* state and re-enable the queue's inflights.
*/
state = DLB_QUEUE_MAP_IN_PROGRESS;
if (dlb_port_find_slot_queue(port, state, queue, &i)) {
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
/* Since the in-progress map was aborted, re-enable the QID's
* inflights.
*/
if (queue->num_pending_additions == 0)
dlb_ldb_queue_set_inflight_limit(hw, queue);
state = DLB_QUEUE_UNMAPPED;
ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
if (ret)
return ret;
goto unmap_qid_done;
}
/* If the queue mapping is on hold pending an unmap, we simply need to
* update the slot's state.
*/
if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
state = DLB_QUEUE_UNMAP_IN_PROGRESS;
ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
if (ret)
return ret;
goto unmap_qid_done;
}
state = DLB_QUEUE_MAPPED;
if (!dlb_port_find_slot_queue(port, state, queue, &i)) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: no available CQ slots\n",
__func__, __LINE__);
return -EFAULT;
}
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
/* QID->CQ mapping removal is an asynchronous procedure. It requires
* stopping the DLB from scheduling this CQ, draining all inflights
* from the CQ, then unmapping the queue from the CQ. This function
* simply marks the port as needing the queue unmapped, and (if
* necessary) starts the unmapping worker thread.
*/
dlb_ldb_port_cq_disable(hw, port);
state = DLB_QUEUE_UNMAP_IN_PROGRESS;
ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
if (ret)
return ret;
/* Attempt to finish the unmapping now, in case the port has no
* outstanding inflights. If that's not the case, this will fail and
* the unmapping will be completed at a later time.
*/
unmap_complete = dlb_domain_finish_unmap_port(hw, domain, port);
/* If the unmapping couldn't complete immediately, launch the worker
* thread (if it isn't already launched) to finish it later.
*/
if (!unmap_complete && !os_worker_active(hw))
os_schedule_work(hw);
unmap_qid_done:
resp->status = 0;
return 0;
}
static void dlb_log_map_qid(struct dlb_hw *hw,
u32 domain_id,
struct dlb_map_qid_args *args)
{
DLB_HW_INFO(hw, "DLB map QID arguments:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
DLB_HW_INFO(hw, "\tPort ID: %d\n", args->port_id);
DLB_HW_INFO(hw, "\tQueue ID: %d\n", args->qid);
DLB_HW_INFO(hw, "\tPriority: %d\n", args->priority);
}
static int dlb_verify_map_qid_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_map_qid_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_domain *domain;
struct dlb_ldb_port *port;
struct dlb_ldb_queue *queue;
int id;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -1;
}
if (!domain->configured) {
resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
return -1;
}
id = args->port_id;
port = dlb_get_domain_used_ldb_port(id, domain);
if (port == NULL || !port->configured) {
resp->status = DLB_ST_INVALID_PORT_ID;
return -1;
}
if (args->priority >= DLB_QID_PRIORITIES) {
resp->status = DLB_ST_INVALID_PRIORITY;
return -1;
}
queue = dlb_get_domain_ldb_queue(args->qid, domain);
if (queue == NULL || !queue->configured) {
resp->status = DLB_ST_INVALID_QID;
return -1;
}
if (queue->domain_id != domain->id) {
resp->status = DLB_ST_INVALID_QID;
return -1;
}
if (port->domain_id != domain->id) {
resp->status = DLB_ST_INVALID_PORT_ID;
return -1;
}
return 0;
}
static int dlb_verify_start_domain_args(struct dlb_hw *hw,
u32 domain_id,
struct dlb_cmd_response *resp)
{
struct dlb_domain *domain;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -1;
}
if (!domain->configured) {
resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
return -1;
}
if (domain->started) {
resp->status = DLB_ST_DOMAIN_STARTED;
return -1;
}
return 0;
}
static int dlb_verify_map_qid_slot_available(struct dlb_ldb_port *port,
struct dlb_ldb_queue *queue,
struct dlb_cmd_response *resp)
{
enum dlb_qid_map_state state;
int i;
/* Unused slot available? */
if (port->num_mappings < DLB_MAX_NUM_QIDS_PER_LDB_CQ)
return 0;
/* If the queue is already mapped (from the application's perspective),
* this is simply a priority update.
*/
state = DLB_QUEUE_MAPPED;
if (dlb_port_find_slot_queue(port, state, queue, &i))
return 0;
state = DLB_QUEUE_MAP_IN_PROGRESS;
if (dlb_port_find_slot_queue(port, state, queue, &i))
return 0;
if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i))
return 0;
/* If the slot contains an unmap in progress, it's considered
* available.
*/
state = DLB_QUEUE_UNMAP_IN_PROGRESS;
if (dlb_port_find_slot(port, state, &i))
return 0;
state = DLB_QUEUE_UNMAPPED;
if (dlb_port_find_slot(port, state, &i))
return 0;
resp->status = DLB_ST_NO_QID_SLOTS_AVAILABLE;
return -EINVAL;
}
static void dlb_ldb_port_change_qid_priority(struct dlb_hw *hw,
struct dlb_ldb_port *port,
int slot,
struct dlb_map_qid_args *args)
{
union dlb_lsp_cq2priov r0;
/* Read-modify-write the priority and valid bit register */
r0.val = DLB_CSR_RD(hw, DLB_LSP_CQ2PRIOV(port->id));
r0.field.v |= 1 << slot;
r0.field.prio |= (args->priority & 0x7) << slot * 3;
DLB_CSR_WR(hw, DLB_LSP_CQ2PRIOV(port->id), r0.val);
dlb_flush_csr(hw);
port->qid_map[slot].priority = args->priority;
}
int dlb_hw_map_qid(struct dlb_hw *hw,
u32 domain_id,
struct dlb_map_qid_args *args,
struct dlb_cmd_response *resp)
{
enum dlb_qid_map_state state;
struct dlb_ldb_queue *queue;
struct dlb_ldb_port *port;
struct dlb_domain *domain;
int ret, i, id;
u8 prio;
dlb_log_map_qid(hw, domain_id, args);
/* Verify that hardware resources are available before attempting to
* satisfy the request. This simplifies the error unwinding code.
*/
if (dlb_verify_map_qid_args(hw, domain_id, args, resp))
return -EINVAL;
prio = args->priority;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
id = args->port_id;
port = dlb_get_domain_used_ldb_port(id, domain);
if (port == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port not found\n",
__func__, __LINE__);
return -EFAULT;
}
queue = dlb_get_domain_ldb_queue(args->qid, domain);
if (queue == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: queue not found\n",
__func__, __LINE__);
return -EFAULT;
}
/* If there are any outstanding detach operations for this port,
* attempt to complete them. This may be necessary to free up a QID
* slot for this requested mapping.
*/
if (port->num_pending_removals)
dlb_domain_finish_unmap_port(hw, domain, port);
ret = dlb_verify_map_qid_slot_available(port, queue, resp);
if (ret)
return ret;
/* Hardware requires disabling the CQ before mapping QIDs. */
if (port->enabled)
dlb_ldb_port_cq_disable(hw, port);
/* If this is only a priority change, don't perform the full QID->CQ
* mapping procedure
*/
state = DLB_QUEUE_MAPPED;
if (dlb_port_find_slot_queue(port, state, queue, &i)) {
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
if (prio != port->qid_map[i].priority) {
dlb_ldb_port_change_qid_priority(hw, port, i, args);
DLB_HW_INFO(hw, "DLB map: priority change only\n");
}
state = DLB_QUEUE_MAPPED;
ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
if (ret)
return ret;
goto map_qid_done;
}
state = DLB_QUEUE_UNMAP_IN_PROGRESS;
if (dlb_port_find_slot_queue(port, state, queue, &i)) {
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
if (prio != port->qid_map[i].priority) {
dlb_ldb_port_change_qid_priority(hw, port, i, args);
DLB_HW_INFO(hw, "DLB map: priority change only\n");
}
state = DLB_QUEUE_MAPPED;
ret = dlb_port_slot_state_transition(hw, port, queue, i, state);
if (ret)
return ret;
goto map_qid_done;
}
/* If this is a priority change on an in-progress mapping, don't
* perform the full QID->CQ mapping procedure.
*/
state = DLB_QUEUE_MAP_IN_PROGRESS;
if (dlb_port_find_slot_queue(port, state, queue, &i)) {
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
port->qid_map[i].priority = prio;
DLB_HW_INFO(hw, "DLB map: priority change only\n");
goto map_qid_done;
}
/* If this is a priority change on a pending mapping, update the
* pending priority
*/
if (dlb_port_find_slot_with_pending_map_queue(port, queue, &i)) {
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
port->qid_map[i].pending_priority = prio;
DLB_HW_INFO(hw, "DLB map: priority change only\n");
goto map_qid_done;
}
/* If all the CQ's slots are in use, then there's an unmap in progress
* (guaranteed by dlb_verify_map_qid_slot_available()), so add this
* mapping to pending_map and return. When the removal is completed for
* the slot's current occupant, this mapping will be performed.
*/
if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &i)) {
if (dlb_port_find_slot(port, DLB_QUEUE_UNMAP_IN_PROGRESS, &i)) {
enum dlb_qid_map_state state;
if (i >= DLB_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: port slot tracking failed\n",
__func__, __LINE__);
return -EFAULT;
}
port->qid_map[i].pending_qid = queue->id;
port->qid_map[i].pending_priority = prio;
state = DLB_QUEUE_UNMAP_IN_PROGRESS_PENDING_MAP;
ret = dlb_port_slot_state_transition(hw, port, queue,
i, state);
if (ret)
return ret;
DLB_HW_INFO(hw, "DLB map: map pending removal\n");
goto map_qid_done;
}
}
/* If the domain has started, a special "dynamic" CQ->queue mapping
* procedure is required in order to safely update the CQ<->QID tables.
* The "static" procedure cannot be used when traffic is flowing,
* because the CQ<->QID tables cannot be updated atomically and the
* scheduler won't see the new mapping unless the queue's if_status
* changes, which isn't guaranteed.
*/
ret = dlb_ldb_port_map_qid(hw, domain, port, queue, prio);
/* If ret is less than zero, it's due to an internal error */
if (ret < 0)
return ret;
map_qid_done:
if (port->enabled)
dlb_ldb_port_cq_enable(hw, port);
resp->status = 0;
return 0;
}
static void dlb_log_start_domain(struct dlb_hw *hw, u32 domain_id)
{
DLB_HW_INFO(hw, "DLB start domain arguments:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
}
static void dlb_ldb_pool_write_credit_count_reg(struct dlb_hw *hw,
u32 pool_id)
{
union dlb_chp_ldb_pool_crd_cnt r0 = { {0} };
struct dlb_credit_pool *pool;
pool = &hw->rsrcs.ldb_credit_pools[pool_id];
r0.field.count = pool->avail_credits;
DLB_CSR_WR(hw,
DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
r0.val);
}
static void dlb_dir_pool_write_credit_count_reg(struct dlb_hw *hw,
u32 pool_id)
{
union dlb_chp_dir_pool_crd_cnt r0 = { {0} };
struct dlb_credit_pool *pool;
pool = &hw->rsrcs.dir_credit_pools[pool_id];
r0.field.count = pool->avail_credits;
DLB_CSR_WR(hw,
DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
r0.val);
}
/**
* dlb_hw_start_domain() - Lock the domain configuration
* @hw: Contains the current state of the DLB hardware.
* @args: User-provided arguments.
* @resp: Response to user.
*
* Return: returns < 0 on error, 0 otherwise. If the driver is unable to
* satisfy a request, resp->status will be set accordingly.
*/
int dlb_hw_start_domain(struct dlb_hw *hw,
u32 domain_id,
struct dlb_start_domain_args *arg,
struct dlb_cmd_response *resp)
{
struct dlb_list_entry *iter;
struct dlb_dir_pq_pair *dir_queue;
struct dlb_ldb_queue *ldb_queue;
struct dlb_credit_pool *pool;
struct dlb_domain *domain;
RTE_SET_USED(arg);
RTE_SET_USED(iter);
dlb_log_start_domain(hw, domain_id);
if (dlb_verify_start_domain_args(hw, domain_id, resp))
return -EINVAL;
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
DLB_HW_ERR(hw,
"[%s():%d] Internal error: domain not found\n",
__func__, __LINE__);
return -EFAULT;
}
/* Write the domain's pool credit counts, which have been updated
* during port configuration. The sum of the pool credit count plus
* each producer port's credit count must equal the pool's credit
* allocation *before* traffic is sent.
*/
DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
dlb_ldb_pool_write_credit_count_reg(hw, pool->id);
DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
dlb_dir_pool_write_credit_count_reg(hw, pool->id);
/* Enable load-balanced and directed queue write permissions for the
* queues this domain owns. Without this, the DLB will drop all
* incoming traffic to those queues.
*/
DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
union dlb_sys_ldb_vasqid_v r0 = { {0} };
unsigned int offs;
r0.field.vasqid_v = 1;
offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + ldb_queue->id;
DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
}
DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
union dlb_sys_dir_vasqid_v r0 = { {0} };
unsigned int offs;
r0.field.vasqid_v = 1;
offs = domain->id * DLB_MAX_NUM_DIR_PORTS + dir_queue->id;
DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
}
dlb_flush_csr(hw);
domain->started = true;
resp->status = 0;
return 0;
}
static void dlb_log_get_dir_queue_depth(struct dlb_hw *hw,
u32 domain_id,
u32 queue_id)
{
DLB_HW_INFO(hw, "DLB get directed queue depth:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
DLB_HW_INFO(hw, "\tQueue ID: %d\n", queue_id);
}
int dlb_hw_get_dir_queue_depth(struct dlb_hw *hw,
u32 domain_id,
struct dlb_get_dir_queue_depth_args *args,
struct dlb_cmd_response *resp)
{
struct dlb_dir_pq_pair *queue;
struct dlb_domain *domain;
int id;
id = domain_id;
dlb_log_get_dir_queue_depth(hw, domain_id, args->queue_id);
domain = dlb_get_domain_from_id(hw, id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -EINVAL;
}
id = args->queue_id;
queue = dlb_get_domain_used_dir_pq(args->queue_id, domain);
if (queue == NULL) {
resp->status = DLB_ST_INVALID_QID;
return -EINVAL;
}
resp->id = dlb_dir_queue_depth(hw, queue);
return 0;
}
static void dlb_log_get_ldb_queue_depth(struct dlb_hw *hw,
u32 domain_id,
u32 queue_id)
{
DLB_HW_INFO(hw, "DLB get load-balanced queue depth:\n");
DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
DLB_HW_INFO(hw, "\tQueue ID: %d\n", queue_id);
}
int dlb_hw_get_ldb_queue_depth(struct dlb_hw *hw,
u32 domain_id,
struct dlb_get_ldb_queue_depth_args *args,
struct dlb_cmd_response *resp)
{
union dlb_lsp_qid_aqed_active_cnt r0;
union dlb_lsp_qid_atq_enqueue_cnt r1;
union dlb_lsp_qid_ldb_enqueue_cnt r2;
struct dlb_ldb_queue *queue;
struct dlb_domain *domain;
dlb_log_get_ldb_queue_depth(hw, domain_id, args->queue_id);
domain = dlb_get_domain_from_id(hw, domain_id);
if (domain == NULL) {
resp->status = DLB_ST_INVALID_DOMAIN_ID;
return -EINVAL;
}
queue = dlb_get_domain_ldb_queue(args->queue_id, domain);
if (queue == NULL) {
resp->status = DLB_ST_INVALID_QID;
return -EINVAL;
}
r0.val = DLB_CSR_RD(hw,
DLB_LSP_QID_AQED_ACTIVE_CNT(queue->id));
r1.val = DLB_CSR_RD(hw,
DLB_LSP_QID_ATQ_ENQUEUE_CNT(queue->id));
r2.val = DLB_CSR_RD(hw,
DLB_LSP_QID_LDB_ENQUEUE_CNT(queue->id));
resp->id = r0.val + r1.val + r2.val;
return 0;
}
|
"""Example for exploring SQLAlchemy
This project provide code how to use AQLAlchemy. THis idea is to build an
example sequentially in steps to give new users the idea on where to start
and how to progress. Along the way some principles will be exhibited. The
code should be self-explanatory without as little as possible documentation,
else the project is failing.
This example illustrate the following:
--------------------------------------
1. Establis a link to a MySQL or SQLite database
2. Create tables
3. Populate the tables
4. Configure a many--to-may relationship
5. Query the tables
References
----------
- https://www.tutorialspoint.com/sqlalchemy/sqlalchemy_orm_many_to_many_relationships.htm
- https://docs.sqlalchemy.org/en/13/orm/tutorial.html#building-a-relationship
- https://cyruslab.net/2020/07/16/pythoncreate-database-if-not-exists-with-sqlalchemy/
"""
import argparse
import configparserext
import logging
from pathlib import Path
# from sqlalchemy import create_engine, Column, Integer, String
# from sqlalchemy.orm import declarative_base
# from sqlalchemy_utils import database_exists, create_database, drop_database
from sqlalchemy.orm import declarative_base
from beetools import beeutils, beearchiver, msg_info, msg_milestone
import sqlalchemy as db
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy_utils import database_exists, create_database, drop_database
_PROJ_DESC = __doc__.split('\n')[0]
_PROJ_PATH = Path(__file__)
def project_desc():
return _PROJ_DESC
Base = declarative_base()
# ass_country_currency = db.Table(
# 'country_currency', Base.metadata,
# db.Column('country_id', db.ForeignKey('country.id'), primary_key=True),
# db.Column('currency_id', db.ForeignKey('currency.id'), primary_key=True)
# )
class CountryCurrency(Base):
__tablename__ = 'country_currency'
country_id = db.Column(db.Integer, db.ForeignKey('country.id'), primary_key=True)
currency_id = db.Column(db.Integer, db.ForeignKey('currency.id'), primary_key=True)
class Country(Base):
__tablename__ = 'country'
id = db.Column(db.Integer, primary_key=True)
cca2 = db.Column(db.String(2), unique=True)
cca3 = db.Column(db.String(3), unique=True)
name_common = db.Column(db.String(100))
# many to many Country<->Currency
currencies = relationship(
'Currency', secondary="country_currency", viewonly=True
) # , overlaps="currencies")
def __init__(self, cca2, cca3, name_common):
self.cca2 = cca2
self.cca3 = cca3
self.name_common = name_common
def __str__(self):
return (
f"<Country(id = {self.id}, cca3 = {self.cca3}, "
f"cca2 = {self.cca2}, name_common = {self.name_common}, "
f"curr_iso = {self.curr_iso}"
)
def __repr__(self):
return f"Country({self.cca2}, {self.cca3}, {self.name_common})"
class Currency(Base):
__tablename__ = 'currency'
id = db.Column(db.Integer, primary_key=True)
curr_iso = db.Column(db.String(3), unique=True)
name = db.Column(db.String(50))
symbol = db.Column(db.String(25))
# many to many Country<->Currency
countries = relationship(
'Country', secondary='country_currency', viewonly=True
) # , overlaps="currencies")
def __init__(self, curr_iso, name, symbol):
self.curr_iso = curr_iso
self.name = name
self.symbol = symbol
def __str__(self):
return f"<Currency(curr_iso = {self.curr_iso}, name = {self.name}"
def __repr__(self):
return f"Country({self.curr_iso}, {self.name}, {self.symbol})"
class SQLAlchemyExample:
"""Class description"""
def __init__(self, p_ini_pth, p_logger=False):
"""Method description"""
print(msg_info('Instantiating the class (example)...'))
self.success = True
self.ini_pth = p_ini_pth
self.logger_name = None
self.logger = None
if p_logger:
self.logger_name = "SQLAlchemyExample"
self.logger = logging.getLogger(self.logger_name)
self.ini = configparserext.ConfigParserExt(inline_comment_prefixes='#')
self.verbose = False
self.ini.read([self.ini_pth])
self.proj_root_dir = _PROJ_PATH.parents[1]
# self.url = 'sqlite:///CountryData.sqlite'
self.url = 'mysql://TestUser1:1re$UtseT@localhost/t_sqlalchemy'
# self.base = None
self.engine = None
self.meta_data = None
self.conn = None
self.Session = None
self.sess = None
self.country_tab = None
self.currency_tab = None
self.ass_country_currency_tab = None
def step_01_assign_attributes(self):
"""Method description"""
print(msg_info('Step 1: Assign the attributes...'))
self.engine = db.create_engine(self.url)
self.conn = self.engine.connect()
self.Session = sessionmaker(bind=self.engine)
self.sess = self.Session()
return True
def step_02_create_the_db(self):
print(msg_info('Step 2: Create the database...'))
if database_exists(self.url):
drop_database(self.url)
create_database(self.url)
Base.metadata.create_all(self.engine)
pass
def step_03_populate_the_db(self):
print(msg_info('Step 3: Add data to the tables...'))
PopulateCountry(self.sess).add_test_data()
PopulateCurrency(self.sess).add_test_data()
PopulateCountryCurrency(self.sess).add_test_data()
pass
def step_04_display_data(self):
print(msg_info('Step 4: Read data from the tables...'))
print(msg_milestone('- Country table'))
for country in self.sess.query(Country).order_by(Country.cca2):
print(country.cca2, country.cca3, country.name_common)
print()
print(msg_milestone('- Currency table'))
for currency in self.sess.query(Currency).order_by(Currency.curr_iso):
print(currency.curr_iso, currency.name, currency.symbol)
print()
print(msg_milestone('- Country <-> Currency table'))
for x in (
self.sess.query(Country, Currency)
.filter(
CountryCurrency.country_id == Country.id,
CountryCurrency.currency_id == Currency.id,
)
.order_by(Country.name_common)
.all()
):
print(f"Country: {x.Country.name_common} Currency: {x.Currency.curr_iso}")
pass
def step_05(self):
pass
def step_06(self):
pass
def run(self):
"""Method description"""
self.step_01_assign_attributes()
self.step_02_create_the_db()
self.step_03_populate_the_db()
self.step_04_display_data()
self.step_05()
self.step_06()
pass
class PopulateCountry:
def __init__(self, p_session):
self.sess = p_session
self.test_data = [
('ZAF', 'ZA', 'South Africa', 'ZAR'),
('USA', 'US', 'United States of America', 'USD'),
('GBR', 'GB', 'United Kingdom', 'GBP'),
('DER', 'DE', 'Federal Republic of Germany', 'EUR'),
('AUS', 'AU', 'Australia', 'AUD'),
('LSO', 'LS', 'Lesotho', 'ZAR'),
('SWZ', 'SZ', 'Eswatini', 'ZAR'),
]
pass
def add_test_data(self):
for country in self.test_data:
self.sess.add(
Country(
cca3=country[0],
cca2=country[1],
name_common=country[2],
# curr_iso = country[3]
)
)
self.sess.commit()
pass
class PopulateCurrency:
def __init__(self, p_session):
self.sess = p_session
self.test_data = [
('ZAR', 'South African rand', 'R'),
('USD', 'United States dollar', '$'),
('GBP', 'Pound sterling', '£'),
('EUR', 'Euro', '€'),
('AUD', 'Australian dollar', '$'),
('LSL', 'Lesotho loti', 'l'),
('SZL', 'Swazi lilangeni', 'L'),
]
def add_test_data(self):
for currency in self.test_data:
self.sess.add(
Currency(
curr_iso=currency[0],
name=currency[1],
symbol=currency[2],
)
)
pass
self.sess.commit()
class PopulateCountryCurrency:
def __init__(self, p_session):
self.sess = p_session
self.test_data = [
('ZA', 'ZAR'),
('US', 'USD'),
('GB', 'GBP'),
('DE', 'EUR'),
('AU', 'AUD'),
('LS', 'ZAR'),
('LS', 'LSL'),
('SZ', 'ZAR'),
('SZ', 'SZL'),
]
def add_test_data(self):
for cntry, curr in self.test_data:
cntry_det = self.sess.query(Country).filter(Country.cca2 == cntry).first()
curr_det = (
self.sess.query(Currency).filter(Currency.curr_iso == curr).first()
)
self.sess.add(
CountryCurrency(country_id=cntry_det.id, currency_id=curr_det.id)
)
pass
self.sess.commit()
pass
def init_logger():
logger = logging.getLogger('SQLAlchemyExample')
logger.setLevel(beeutils.DEF_LOG_LEV)
file_handle = logging.FileHandler(beeutils.LOG_FILE_NAME, mode='w')
file_handle.setLevel(beeutils.DEF_LOG_LEV_FILE)
console_handle = logging.StreamHandler()
console_handle.setLevel(beeutils.DEF_LOG_LEV_CON)
file_format = logging.Formatter(
beeutils.LOG_FILE_FORMAT, datefmt=beeutils.LOG_DATE_FORMAT
)
console_format = logging.Formatter(beeutils.LOG_CONSOLE_FORMAT)
file_handle.setFormatter(file_format)
console_handle.setFormatter(console_format)
logger.addHandler(file_handle)
logger.addHandler(console_handle)
def read_args():
arg_parser = argparse.ArgumentParser(description='Get configuration parameters')
arg_parser.add_argument(
'-c',
'--config-path',
help='Config file name',
default=arg_parser.prog[: arg_parser.prog.find('.') + 1] + 'ini',
)
arg_parser.add_argument(
'-e', '--arc-extern-dir', help='Path to external archive', default=None
)
args = arg_parser.parse_args()
arc_extern_dir = args.arc_extern_dir
ini_path = args.config_path
return ini_path, arc_extern_dir
if __name__ == '__main__':
ini_pth, arc_extern_dir = read_args()
init_logger()
b_tls = beearchiver.Archiver(_PROJ_DESC, _PROJ_PATH)
b_tls.print_header(p_cls=False)
t_sqlalchemyexample = SQLAlchemyExample(ini_pth)
if t_sqlalchemyexample.success:
t_sqlalchemyexample.run()
b_tls.print_footer()
# end __main__
|
<filename>src/Compiler/index.ts<gh_stars>1-10
import { Expression, ExpressionType } from '../Parser';
import { Evaluator, EvaluatorContext, CompilerOptions, EvaluatorFactory } from './Evaluator';
import * as Error from './Error';
import { ArrayPrototype } from './ArrayPrototype';
import { makeValueMarshaller } from './Evaluator/util';
import { EventEmitter } from '../common/EventEmitter';
const DefaultOptions: CompilerOptions = {
NoUndefinedVars: false,
NoNewVars: false,
ImmutableContext: false,
NoProtoAccess: true,
ObjectPrototype: null,
ArrayPrototype,
EnforceMarshalling: false,
Constants: Object.create(null),
};
export abstract class Compiler extends EventEmitter {
private compilers: Map<ExpressionType.Any, EvaluatorFactory> = new Map();
compile(expression: Expression.Any, options?: CompilerOptions): Evaluator {
const scopeStack: ExpressionType.Any[] = [];
const compile = (expr: Expression.Any, opts: CompilerOptions) => {
const makeEval = this.compilers.get(expr.type);
if (typeof makeEval === 'undefined') {
throw new TypeError(Error.UnknownExpression(expr));
}
scopeStack.push(expr.type);
this.fireEvent('scope:enter', expr.type, scopeStack);
const evaluator = makeEval(expr, opts, compile);
this.fireEvent('scope:leave', expr.type, scopeStack, evaluator);
return evaluator;
};
const compileOptions = { ...DefaultOptions, ...options };
const evaluator = compile(expression, compileOptions);
const marshallValue = makeValueMarshaller(compileOptions);
return (context?: EvaluatorContext) => {
return marshallValue(evaluator(context || Object.create(null), []));
};
}
protected setCompiler(expressionType: ExpressionType.Any, compiler: EvaluatorFactory) {
this.compilers.set(expressionType, compiler);
}
}
|
<filename>vendor/code.cloudfoundry.org/locket/grpcserver/server_test.go
package grpcserver_test
import (
"crypto/tls"
"fmt"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"code.cloudfoundry.org/cfhttp"
"code.cloudfoundry.org/inigo/helpers/portauthority"
"code.cloudfoundry.org/lager/lagertest"
"code.cloudfoundry.org/locket/grpcserver"
"code.cloudfoundry.org/locket/models"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
"golang.org/x/net/context"
)
var _ = Describe("GRPCServer", func() {
var (
logger *lagertest.TestLogger
listenAddress string
runner ifrit.Runner
serverProcess ifrit.Process
tlsConfig *tls.Config
certFixture, keyFixture, caCertFixture string
portAllocator portauthority.PortAllocator
)
BeforeSuite(func() {
node := GinkgoParallelNode()
startPort := 1050 * node
portRange := 1000
endPort := startPort + portRange
var err error
portAllocator, err = portauthority.New(startPort, endPort)
Expect(err).NotTo(HaveOccurred())
})
BeforeEach(func() {
var err error
certFixture = "fixtures/cert.crt"
keyFixture = "fixtures/cert.key"
caCertFixture = "fixtures/ca.crt"
tlsConfig, err = cfhttp.NewTLSConfig(certFixture, keyFixture, caCertFixture)
Expect(err).NotTo(HaveOccurred())
logger = lagertest.NewTestLogger("grpc-server")
port, err := portAllocator.ClaimPorts(1)
Expect(err).NotTo(HaveOccurred())
listenAddress = fmt.Sprintf("localhost:%d", port)
runner = grpcserver.NewGRPCServer(logger, listenAddress, tlsConfig, &testHandler{})
})
JustBeforeEach(func() {
serverProcess = ginkgomon.Invoke(runner)
})
AfterEach(func() {
ginkgomon.Kill(serverProcess)
})
It("serves on the listen address", func() {
conn, err := grpc.Dial(listenAddress, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))
Expect(err).NotTo(HaveOccurred())
locketClient := models.NewLocketClient(conn)
_, err = locketClient.Lock(context.Background(), &models.LockRequest{})
Expect(err).NotTo(HaveOccurred())
_, err = locketClient.Release(context.Background(), &models.ReleaseRequest{})
Expect(err).NotTo(HaveOccurred())
_, err = locketClient.Fetch(context.Background(), &models.FetchRequest{})
Expect(err).NotTo(HaveOccurred())
_, err = locketClient.FetchAll(context.Background(), &models.FetchAllRequest{})
Expect(err).NotTo(HaveOccurred())
})
Context("when the server fails to listen", func() {
var alternateRunner ifrit.Runner
BeforeEach(func() {
alternateRunner = grpcserver.NewGRPCServer(logger, listenAddress, tlsConfig, &testHandler{})
})
It("exits with an error", func() {
var err error
process := ifrit.Background(alternateRunner)
Eventually(process.Wait()).Should(Receive(&err))
Expect(err).To(HaveOccurred())
})
})
})
type testHandler struct{}
func (h *testHandler) Lock(ctx context.Context, req *models.LockRequest) (*models.LockResponse, error) {
return &models.LockResponse{}, nil
}
func (h *testHandler) Release(ctx context.Context, req *models.ReleaseRequest) (*models.ReleaseResponse, error) {
return &models.ReleaseResponse{}, nil
}
func (h *testHandler) Fetch(ctx context.Context, req *models.FetchRequest) (*models.FetchResponse, error) {
return &models.FetchResponse{}, nil
}
func (h *testHandler) FetchAll(ctx context.Context, req *models.FetchAllRequest) (*models.FetchAllResponse, error) {
return &models.FetchAllResponse{}, nil
}
|
# _*_ using utf-8 _*_
A, B, T = [int(e) for e in input().strip().split()]
ans = (T//A) * B
print(ans) |
<gh_stars>1-10
use super::pbrt::Spectrum;
use super::primitive::Primitive;
use super::geometry::{Bounds3f, Ray, Vector3f};
use super::light::{Light, LightFlags};
use super::sampler::Sampler;
use super::interaction::{Interaction, SurfaceInteraction};
use super::stats_accumulator::StatsAccumulator;
use std::sync::Arc;
pub struct Scene {
aggregate: Arc<dyn Primitive + Send + Sync>,
_world_bound: Bounds3f,
pub lights: Vec<Arc<dyn Light + Send + Sync>>,
// Store infinite light sources separately for cases where we only want
// to loop over them.
pub infinite_lights: Vec<Arc<dyn Light + Send + Sync>>
}
impl Scene {
pub fn new(
aggregate: Arc<dyn Primitive + Send + Sync>,
lights: Vec<Arc<dyn Light + Send + Sync>>
) -> Scene {
let mut scene = Scene {
aggregate,
_world_bound: aggregate.world_bound().aabb(),
lights: lights,
infinite_lights: Vec::new()
};
for light in scene.lights {
light.preprocess(&scene);
if light.get_flags() & LightFlags::Infinite != 0 {
scene.infinite_lights.push(light.clone());
}
}
scene
}
pub fn world_bound(&self) -> Bounds3f {
self._world_bound
}
pub fn intersect(&self, ray: &Ray) -> Option<SurfaceInteraction> {
StatsAccumulator::instance().report_counter(String::from("Intersections/Regular ray intersection tests"), 1);
debug_assert!(ray.d != Vector3f::default());
self.aggregate.intersect(ray)
}
pub fn intersect_p(&self, ray: &Ray) -> bool {
StatsAccumulator::instance().report_counter(String::from("Intersections/Shadow ray intersection tests"), 1);
debug_assert!(ray.d != Vector3f::default());
self.aggregate.intersect_p(ray)
}
/// A generalization of Scene::Intersect() that returns the first intersection with a light-scattering surface along the given ray as well as the beam transmittance up to that point.
pub fn intersect_tr(&self, mut ray: &Ray, sampler: Box<dyn Sampler>) -> (Option<SurfaceInteraction>, Spectrum) {
let mut tr = Spectrum::new(1.0);
loop {
// Accumulate beam transmittance for ray segment
if let Some(medium) = ray.medium {
tr *= medium.tr(ray, sampler);
}
if let Some(hit_surface) = self.intersect(ray) {
// Initialize next ray segment or terminate transmittance computation
if let Some(primitive) = hit_surface.primitive {
if primitive.get_material().is_none() {
return (Some(hit_surface), tr);
}
}
ray = &hit_surface.spawn_ray(&ray.d);
}
else {
return (None, tr);
}
}
}
} |
<reponame>hdansou/vscode-ansible-autocomplete<filename>src/completion-item-provider.ts
import { CompletionItemProvider, TextDocument, Position, CancellationToken, CompletionItem } from 'vscode';
import { CompletionEngine } from './completion-engine';
export class AnsibleCompletionItemProvider implements CompletionItemProvider {
private completionEngine: CompletionEngine;
constructor() {
this.completionEngine = new CompletionEngine();
}
provideCompletionItems(document: TextDocument, position: Position, token: CancellationToken): Promise<CompletionItem[]> {
if (!this.completionEngine.ready()) {
return Promise.resolve([]);
}
let range = document.getWordRangeAtPosition(position);
let prefix = range ? document.getText(range) : '';
let line = document.lineAt(position.line).text;
return this.completionEngine.getCompletions(prefix, line);
}
}
|
package uniswap
import (
"fmt"
"testing"
"github.com/ethereum/go-ethereum/accounts/abi/bind"
"github.com/ethereum/go-ethereum/common"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/snebel29/uniswap-go-sdk/contracts/erc20"
)
// TODO: Create some live test scenario in some of the test network.
func TestFetcher_FetchTokenData(t *testing.T) {
decimals := uint8(18)
symbol := "WBTC"
name := "Wrapped BTC"
address := "0x2260fac5e5542a773aa44fbcfedf7c193bc2c599"
// We can pass a nil client because erc20 caller contract wil be mocked.
fetcher := NewFetcher(nil)
newErc20CallerThatReturnError := func(addr common.Address, client bind.ContractBackend) (erc20.ReadOnlyContract, error) {
return &erc20.ReadOnlyContractMock{}, fmt.Errorf("test error")
}
fetcher.newErc20Caller = newErc20CallerThatReturnError
_, err := fetcher.FetchTokenData(MAINNET, address)
assert.Error(t, err, "When erc20 caller return error FetchTokenData should do as well")
decimalsThatReturnError := func(addr common.Address, client bind.ContractBackend) (erc20.ReadOnlyContract, error) {
mock := &erc20.ReadOnlyContractMock{}
mock.OnDecimals(func(*bind.CallOpts) (uint8, error) { return uint8(0), fmt.Errorf("test error") })
return mock, nil
}
fetcher.newErc20Caller = decimalsThatReturnError
_, err = fetcher.FetchTokenData(MAINNET, address)
assert.Error(t, err, "When decimals return error FetchTokenData should do as well")
symbolThatReturnError := func(addr common.Address, client bind.ContractBackend) (erc20.ReadOnlyContract, error) {
mock := &erc20.ReadOnlyContractMock{}
mock.OnSymbol(func(*bind.CallOpts) (string, error) { return "", fmt.Errorf("test error") })
return mock, nil
}
fetcher.newErc20Caller = symbolThatReturnError
_, err = fetcher.FetchTokenData(MAINNET, address)
assert.Error(t, err, "When symbol return error FetchTokenData should do as well")
nameThatReturnError := func(addr common.Address, client bind.ContractBackend) (erc20.ReadOnlyContract, error) {
mock := &erc20.ReadOnlyContractMock{}
mock.OnName(func(*bind.CallOpts) (string, error) { return "", fmt.Errorf("test error") })
return mock, nil
}
fetcher.newErc20Caller = nameThatReturnError
_, err = fetcher.FetchTokenData(MAINNET, address)
assert.Error(t, err, "When name return error FetchTokenData should do as well")
mock := &erc20.ReadOnlyContractMock{}
successfulERC20Fetch := func(addr common.Address, client bind.ContractBackend) (erc20.ReadOnlyContract, error) {
mock.OnDecimals(func(*bind.CallOpts) (uint8, error) { return decimals, nil })
mock.OnSymbol(func(*bind.CallOpts) (string, error) { return symbol, nil })
mock.OnName(func(*bind.CallOpts) (string, error) { return name, nil })
return mock, nil
}
fetcher.newErc20Caller = successfulERC20Fetch
token, err := fetcher.FetchTokenData(MAINNET, "badAddress")
require.Error(t, err, "Bad address should produce an error")
require.Equal(t, 0, len(fetcher.tokenDataCache), "At this point token data cache should still be empty")
token, err = fetcher.FetchTokenData(MAINNET, address)
require.NoError(t, err, "When no function return errors there should be no errors")
assert.Equal(t, decimals, token.Decimals)
assert.Equal(t, symbol, token.Symbol)
assert.Equal(t, name, token.Name)
assert.Equal(t, 1, len(fetcher.tokenDataCache), "By now the token data should have been cached")
assert.EqualValues(t, 2, mock.DecimalsCalled(), "FetchTokenData was successfully called twice")
assert.EqualValues(t, 2, mock.SymbolCalled(), "FetchTokenData was successfully called twice")
assert.EqualValues(t, 2, mock.NameCalled(), "FetchTokenData was successfully called twice")
token, err = fetcher.FetchTokenData(MAINNET, address)
require.NoError(t, err, "When no function return errors there should be no errors")
assert.EqualValues(t, 2, mock.DecimalsCalled(), "When token data is cached there should be no calls")
assert.EqualValues(t, 2, mock.SymbolCalled(), "When token data is cached there should be no calls")
assert.EqualValues(t, 2, mock.NameCalled(), "When token data is cached there should be no calls")
}
|
/*
** Try to convert a value from string to a number value.
** If the value is not a string or is a string not representing
** a valid numeral (or if coercions from strings to numbers
** are disabled via macro 'cvt2num'), do not modify 'result'
** and return 0.
*/
static int l_strton (const TValue *obj, TValue *result) {
lua_assert(obj != result);
if (!cvt2num(obj))
return 0;
else
return (luaO_str2num(svalue(obj), result) == vslen(obj) + 1);
} |
def main(
filename: Iterable[PathLike],
config_file: PathLike,
exclude: "Optional[List[str]]",
colour: "ColourTrilean" = None,
verbose: bool = False,
show_traceback: bool = False,
show_diff: bool = False,
):
import fnmatch
import re
from domdf_python_tools.paths import PathPlus
from formate.config import load_toml
from formate.utils import SyntaxTracebackHandler, syntaxerror_for_file
retv = 0
try:
config = load_toml(config_file)
except FileNotFoundError:
raise click.UsageError(f"Config file '{config_file}' not found")
for path in filename:
for pattern in exclude or []:
if re.match(fnmatch.translate(pattern), str(path)):
continue
path = PathPlus(path)
if path.suffix not in {".py", ".pyi", ''} or path.is_dir():
if verbose >= 2:
click.echo(f"Skipping {path} as it doesn't appear to be a Python file")
continue
r = Reformatter(path, config=config)
with handle_tracebacks(show_traceback, cls=SyntaxTracebackHandler):
with syntaxerror_for_file(path):
ret_for_file = r.run()
if ret_for_file:
if verbose:
click.echo(f"Reformatting {path}")
if show_diff:
click.echo(r.get_diff(), color=resolve_color_default(colour))
r.to_file()
elif verbose >= 2:
click.echo(f"Checking {path}")
retv |= ret_for_file
sys.exit(retv) |
import java.util.Scanner;
import java.util.SortedSet;
import java.util.TreeSet;
public class Main {
public static void main(String[] args) {
Scanner scan = new Scanner(System.in);
for (;;) {
int n = Integer.parseInt(scan.next());
int m = Integer.parseInt(scan.next());
if (n == 0 && m == 0) break;
int a[] = new int[n];
int w[] = new int[m];
for (int i = 0; i < n; i++) {
a[i] = Integer.parseInt(scan.next());
}
for (int j = 0; j < m; j++) {
w[j] = Integer.parseInt(scan.next());
}
SortedSet<Integer> commonAdditions = null;
for (int i = 0; i < n; i++) {
SortedSet<Integer> additions = new TreeSet<Integer>();
boolean bJust = calcNearestWeight(a[i], w, 0, 0, additions);
if (!bJust) {
if (commonAdditions == null) {
commonAdditions = additions;
} else {
commonAdditions.retainAll(additions);
}
}
}
if (commonAdditions == null) {
System.out.println(0);
} else {
if (commonAdditions.size() > 0) {
System.out.println(commonAdditions.first());
} else {
System.out.println(-1);
}
}
}
scan.close();
}
private static boolean calcNearestWeight(int x, int w[], int i, int sum, SortedSet<Integer> additions) {
if (x == sum) return true;
if (i == w.length) {
additions.add(Math.abs(x - sum));
return false;
}
if (calcNearestWeight(x, w, i + 1, sum, additions)) return true;
if (calcNearestWeight(x, w, i + 1, sum + w[i], additions)) return true;
return calcNearestWeight(x, w, i + 1, sum - w[i], additions);
}
}
|
// Alright here's the big stuff. A Serializer needs to implement a _lot_ of methods. Fortunately
// for us, we don't actually need that much functionality out of our serializer. So we're going to
// leave most things unimplemented, and then implement them if we ever end up needing them.
impl<'a> Serializer for &'a mut TlsSerializer {
type Ok = ();
type Error = crate::error::Error;
type SerializeSeq = Self;
type SerializeTuple = Self;
type SerializeTupleStruct = Self;
type SerializeTupleVariant = Self;
type SerializeMap = Self;
type SerializeStruct = Self;
type SerializeStructVariant = Self;
//
// Implemented stuff
//
fn serialize_u8(self, v: u8) -> Result<Self::Ok, Self::Error> {
self.buf.write_u8(v)?;
Ok(())
}
fn serialize_u16(self, v: u16) -> Result<Self::Ok, Self::Error> {
self.buf.write_u16::<BigEndian>(v)?;
Ok(())
}
fn serialize_u32(self, v: u32) -> Result<Self::Ok, Self::Error> {
self.buf.write_u32::<BigEndian>(v)?;
Ok(())
}
fn serialize_u64(self, v: u64) -> Result<Self::Ok, Self::Error> {
self.buf.write_u64::<BigEndian>(v)?;
Ok(())
}
// From the spec:
// struct {
// uint8 present;
// switch (present) {
// case 0: struct{};
// case 1: T value;
// }
// } optional<T>;
fn serialize_none(self) -> Result<Self::Ok, Self::Error> {
self.serialize_u8(0)
}
fn serialize_some<T: ?Sized + Serialize>(self, value: &T) -> Result<Self::Ok, Self::Error> {
self.serialize_u8(1)?;
value.serialize(self)
}
/// Serializes a newtype struct. This is a bit of a hack: if the name of the struct ends with
/// `__bound_uX` where X = 8, 16, 24, 32, or 64, then we prefix the serialized inner type with
/// its length in bytes. This length tag will be the width of the specified X.
fn serialize_newtype_struct<T>(
mut self,
name: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: ?Sized + Serialize,
{
serialize_with_optional_bound(name, value, &mut self)
}
/// This just forwards to `serialize_seq`
fn serialize_bytes(self, v: &[u8]) -> Result<Self::Ok, Self::Error> {
let mut seq = self.serialize_seq(Some(v.len()))?;
for b in v {
seq.serialize_element(b)?;
}
seq.end()
}
/// `TlsSerializer` is also a `SerializeSeq` (see impl below)
fn serialize_seq(self, _len: Option<usize>) -> Result<Self::SerializeSeq, Self::Error> {
Ok(self)
}
/// `TlsSerializer` is also a `SerializeStruct` (see impl below)
fn serialize_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeStruct, Self::Error> {
Ok(self)
}
/// To serialize unit types, just write the variant down as a number
fn serialize_unit_variant(
self,
name: &'static str,
variant_index: u32,
_variant: &'static str,
) -> Result<Self::Ok, Self::Error> {
if name.ends_with("__enum_u8") {
// Make sure the variant index isn't out of our range
let byte = u8::try_from(variant_index).expect("enum variant index out of bounds");
self.serialize_u8(byte)
} else {
let err = <Error as serde::ser::Error>::custom(
"don't know how to serialize a non-__enum_u8 enum",
);
Err(err)
}
}
/// To serialize newtypes, we serialize it like a unit type, and then serialize the contents.
fn serialize_newtype_variant<T>(
self,
name: &'static str,
variant_index: u32,
variant: &'static str,
value: &T,
) -> Result<Self::Ok, Self::Error>
where
T: ?Sized + Serialize,
{
self.serialize_unit_variant(name, variant_index, variant)?;
value.serialize(self)
}
/// Same thing as newtype variant. Serialize a struct variant by treating it as a unit variant,
/// then serializing the struct it contains. `TlsSerializer` is also a `SerializeStructVariant`
fn serialize_struct_variant(
self,
name: &'static str,
variant_index: u32,
variant: &'static str,
_len: usize,
) -> Result<Self::SerializeStructVariant, Self::Error> {
// Serialize the variant and then return myself as a SerializeStructVariant
self.serialize_unit_variant(name, variant_index, variant)?;
Ok(self)
}
/// `TlsSerializer` is also a `SerializeTuple` (see impl below)
fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple, Self::Error> {
Ok(self)
}
//
// Unimplemented stuff
//
fn serialize_bool(self, _v: bool) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_i8(self, _v: i8) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_i16(self, _v: i16) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_i32(self, _v: i32) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_i64(self, _v: i64) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_f32(self, _v: f32) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_f64(self, _v: f64) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_char(self, _v: char) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_str(self, _v: &str) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_unit(self) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_unit_struct(self, _name: &'static str) -> Result<Self::Ok, Self::Error> {
unimplemented!()
}
fn serialize_map(self, _len: Option<usize>) -> Result<Self::SerializeMap, Self::Error> {
unimplemented!()
}
fn serialize_tuple_struct(
self,
_name: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleStruct, Self::Error> {
unimplemented!()
}
fn serialize_tuple_variant(
self,
_name: &'static str,
_variant_index: u32,
_variant: &'static str,
_len: usize,
) -> Result<Self::SerializeTupleVariant, Self::Error> {
unimplemented!()
}
} |
A study of the coastal climates in France using temperature and precipitation data (1961–1990)
The coastal climates of France have been studied using temperature and precipitation data from 1961 to 1990. It is found that the coastlines of the French Atlantic (and adjacent seas) and Mediterranean show many similar climatic features, but the coastal orientation and nearby topography cause some local climatic peculiarities. The relatively flat topography near the coasts of the North Sea, English Channel and the Atlantic result in a fairly homogeneous coastal climate. In these coastal regions there are days of frost each year, but the precipitation amount is usually not high, especially in summer on the Atlantic coast. On the other hand, the mountainous areas of Provence and the Riviera, which are adjacent to the Mediterranean, cause a juxtaposition of varied coastal climates. Large temperature and rainfall differences are observed, especially between the capes and the heads of gulfs, but also between the narrow and wide gulfs. During the warm months, some sea breezes cause coastal thermal peculiarities, which depend on the orientation of the prevailing flow. Copyright © 2000 Royal Meteorological Society |
A couple of years ago, the basketball team I root for, the University of Michigan, had a player named Zack Novak. Everybody loved his story. He was slow and pudgy coming out of high school, recruited by nobody, and offered a spot at Michigan. Turned out he could play, at least some. He was only six foot three, but he played power forward, compensating for his lack of size and skill with unrelenting effort. He graduated from the business school and became team captain. If you watch college sports, you know the kind of player I’m talking about. Right: a white guy.
Michigan guard Zach Novak (0) reacts to his slam dunk in a contest during the college basketball team's "Michigan Madness" festivities at Crisler Arena, Friday, Oct. 16, 2009, in Ann Arbor, Mich. (AP Photo/Tony Ding) Photo: Tony Ding/AP
If you ever watched a Michigan game, you knew Novak’s story, because the announcers talked about it every time. And it was very easy to see: Watching him hang in there on the low block against opponents five or six inches taller every game was a constant miracle. He was a gritty, hustling, tough, smart overachiever. Racial stereotyping in sports coverage is a decades-old phenomenon. In Novak’s case, nobody minded because the stereotypes were completely true. But watching every single game Michigan plays has given me some perspective on how race continues to inflect coverage in sports-media.
The player for Michigan the announcers like to talk about now is a kid named Nik Stauskas. He’s also white. He’s not especially gritty or tough. He’s an incredible scorer with good height, phenomenal shooting range, and an ability to unleash dunks or acrobatic layups. The thing the announcers mention every game — the fact that is incorporated into the shorthand notes they use to define a player’s story line — is that Stauskas worked hard in the off-season to gain muscle.
That’s true. Though it’s also true that Stauskas remains really skinnyand shows no special grit. Also, in contrast to the easy stereotypes of white players, I’ve noticed he makes a lot of mental errors with his passes and sometimes lacks effort on defense. Watch him on these two plays letting opposing players run right past him on the fast break for a layup:
He seems like a nice kid, and he’s still a great player. But mainly he’s just a remarkable talent playing in a great offensive system. Michigan fans have a running joke about announcers marveling that Stauskas is (they say this nearly every game) “not just a shooter” — unlike the stereotypical white player, he has the athletic ability to fly past defenders and soar into the air. The announcers are sufficiently aware of his ability to acknowledge that he is not just a shooter, but not aware enough to realize that the fact they need to mention this is a joke on their eyes.
The thing is, there is a player on the team this year who’s almost exactly like Zach Novak. His name is Jordan Morgan. In high school, he was pudgy, slow, and small. No major conference programs except Michigan offered him a scholarship. But he physically transformed himself and has become, like Novak, an amazing overachiever. As a six-foot-seven center, he’s almost as undersize for his position as Novak is. He won’t shoot unless he’s within a couple feet of the basket, and often not even then, because opposing players can often swat away his shot attempts. Instead he spends most of every offensive possession throwing his body around the court, setting screen after screen to open up shots for his teammates.
He uses leverage, smarts, and unrelenting effort to gain every inch of advantage fighting against opposing centers. Morgan is a fifth-year senior who already graduated with a degree in engineering, and is studying for a master’s degree in manufacturing engineering.
But the announcers don’t talk about this stuff even one-tenth as often as they did Novak. And when they do, they don’t use terms like “gritty,” “unselfish,” “scrappy,” and “smart.” My explanation is that it’s because he looks like this:
Jordan Morgan #52 of the Michigan Wolverines reacts while taking on the Duke Blue Devils during the third round of the 2011 NCAA men's basketball tournament at Time Warner Cable Arena on March 20, 2011 in Charlotte, North Carolina. Photo: Kevin C. Cox/Getty Images
Yeah, he’s black. Now, don’t get me wrong — I don’t think anybody wants to suppress the story of an undersize, pudgy engineering nerd who made himself into a gritty, overachieving captain on a Big Ten champion team and who is also black. I bet the national media would love a story like that. I suspect they just don’t see it.
The gulf in physical talent between Novak and other players was glaring. Now, look at Morgan, with his gigantic biceps. If you don’t know him and you are using a simple heuristic, you probably think he’s a pretty good athletic talent, even if if is a little short. You don’t think about the fact that he gained that muscle after intense weight training. (Announcers never mention it.) And so one player is surrounded by a narrative of hustle, smarts, and toughness, and another player with the exact same qualities is not.
The situation is far better than it was three or four decades ago, when announcers would liken the skills of black players to animals. Today, they have some awareness of racial stereotyping. What’s left, I think, is far more characteristic of how racial bias typically works. Bad intent does not come into play. White people simply have certain preconceptions, and preconceptions make you see the things you expect to see and miss the things you don’t. |
#include<stdio.h>
int main()
{
int t,a,b,c,d,ans,n,emp,full,g,m,rem,i;
scanf("%d",&t);
while(t--)
{
int l[1000];
scanf("%d %d",&a,&b);
if(a==1)
{
printf("0\n");
}
else if(a==2)
{
printf("%d\n",b);
}
else if(a>2)
{
if(a%2==1)
{
emp = a/2+1;
full = a-emp;
rem = b%full;
g = b/full;
int sum=g*full*2;
printf("%d\n",sum+(2*rem));
}
else if(a%2==0)
{
emp = a/2;
full = a-emp;
rem = b%full;
g = b/full;
int sum=g*full*2;
printf("%d\n",sum+(2*rem));
}
}
}
} |
import path from 'path'
import { stop, lookItUp, lookItUpSync } from '../src'
import { CWD, PKG_PATH, BAR_PATH, isFileInDir } from './utils'
const hasPkgJson = (dir: string): string | null =>
isFileInDir('package.json', dir)
describe('lookItUp', () => {
it('should return package.json path', async () => {
const result = await lookItUp('package.json', BAR_PATH)
expect(result).toBe(PKG_PATH)
})
it('should return CWD if matcher function is provided', async () => {
const result = await lookItUp(dir => hasPkgJson(dir), BAR_PATH)
expect(result).toBe(CWD)
})
it('should return CWD if async matcher function is provided', async () => {
const result = await lookItUp(async dir => hasPkgJson(dir), BAR_PATH)
expect(result).toBe(CWD)
})
it('should return null if no file is found', async () => {
const result = await lookItUp('no_such_file')
expect(result).toBe(null)
})
it('should stop in advance if stop is returned from matcher function', async () => {
const result = await lookItUp(dir => {
if (dir === CWD) {
return stop
}
return hasPkgJson(dir)
}, BAR_PATH)
expect(result).toBe(null)
})
it('should return null if dir is provided', async () => {
const result = await lookItUp(dir => hasPkgJson(dir), path.join(CWD, '..'))
expect(result).toBe(null)
})
})
describe('lookItUpSync', () => {
it('should return package.json path', () => {
const result = lookItUpSync('package.json', BAR_PATH)
expect(result).toBe(PKG_PATH)
})
it('should return CWD if matcher function is provided', () => {
const result = lookItUpSync(dir => hasPkgJson(dir), BAR_PATH)
expect(result).toBe(CWD)
})
it('should return null if no file is found', () => {
const result = lookItUpSync('no_such_file')
expect(result).toBe(null)
})
it('should stop in advance if stop is returned from matcher function', () => {
const result = lookItUpSync(dir => {
if (dir === CWD) {
return stop
}
return hasPkgJson(dir)
}, BAR_PATH)
expect(result).toBe(null)
})
it('should return null if dir is provided', () => {
const result = lookItUpSync(dir => hasPkgJson(dir), path.join(CWD, '..'))
expect(result).toBe(null)
})
it('should throw an error if async matcher is provided', () => {
expect.assertions(1)
try {
lookItUpSync(
// @ts-expect-error throw new Error(ERROR_MSG)
async (dir: string) => hasPkgJson(dir),
path.join(CWD, '..')
)
} catch (err) {
expect(err.message).toBe(
'Async matcher can not be used in `lookItUpSync`'
)
}
})
})
|
def list_to_cell_grid(cells: List[A],
mapping: Dict[Tuple[int, int], int]) -> CellGrid[A]:
cell_grid = []
last_i = None
for i, j in sorted(list(mapping.keys())):
if i != last_i:
cell_grid.append([])
cell_grid[i].append(cells[mapping[(i, j)]])
last_i = i
return cell_grid |
<gh_stars>1-10
#include <QFontDialog>
#include "appearancesettingsmenu.h"
#include "game.h"
AppearanceSettingsMenu::AppearanceSettingsMenu(Game *game, QGridLayout *grid) :
Menu(game, grid) {
grid->addWidget(&appearanceSettingsLabel, 0, 1);
appearanceSettingsLabel.setVisible(false);
appearanceSettingsLabel.setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);
appearanceSettingsLabel.setSizePolicy(QSizePolicy::Preferred, QSizePolicy::Fixed);
grid->addWidget(&inventoryTypeLabel, 1, 0);
inventoryTypeLabel.setVisible(false);
inventoryTypeLabel.setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);
grid->addWidget(&inventoryTypeSelector, 1, 1);
inventoryTypeSelector.setCurrentIndex(-1);
inventoryTypeSelector.setVisible(false);
inventoryTypeSelector.setEnabled(false);
grid->addWidget(&fontLabel, 2, 0);
fontLabel.setVisible(false);
fontLabel.setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);
grid->addWidget(&fontSetupButton, 2, 1);
fontSetupButton.setVisible(false);
fontSetupButton.setEnabled(false);
connect(&fontSetupButton, SIGNAL(released()), this, SLOT(fontSetupFunction()));
grid->addWidget(&colorThemeLabel, 3, 0);
colorThemeLabel.setVisible(false);
colorThemeLabel.setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);
grid->addWidget(&colorThemeSelector, 3, 1);
colorThemeSelector.setCurrentIndex(-1);
colorThemeSelector.setVisible(false);
colorThemeSelector.setEnabled(false);
grid->addWidget(&backgroundImagesLabel, 4, 0);
backgroundImagesLabel.setWordWrap(true);
backgroundImagesLabel.setVisible(false);
backgroundImagesLabel.setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);
grid->addWidget(&backgroundImagesSwitch, 4, 1);
backgroundImagesSwitch.setTristate(false);
backgroundImagesSwitch.setVisible(false);
backgroundImagesSwitch.setEnabled(false);
grid->addWidget(&backButton, 5, 1);
backButton.setVisible(false);
backButton.setEnabled(false);
connect(&backButton, SIGNAL(released()), this, SLOT(backFunction()));
}
void AppearanceSettingsMenu::display() {
this->pre_display();
appearanceSettingsLabel.setText(game->str.appearanceSettings);
appearanceSettingsLabel.setVisible(true);
inventoryTypeLabel.setText(game->str.inventoryType);
inventoryTypeLabel.setVisible(true);
inventoryTypeSelector.addItem(game->str.popUp);
inventoryTypeSelector.addItem(game->str.builtIn);
inventoryTypeSelector.setCurrentIndex(static_cast<int>(game->inventoryType));
inventoryTypeSelector.setVisible(true);
inventoryTypeSelector.setEnabled(true);
inventoryTypeUpdater = connect(&inventoryTypeSelector, static_cast<void(QComboBox::*)(int)>(&QComboBox::currentIndexChanged),
[this](int index) {
if (index != -1) {
this->game->inventoryType = static_cast<InventoryType>(index);
}
});
fontLabel.setText(game->str.font);
fontLabel.setVisible(true);
fontSetupButton.setText(game->str.setup);
fontSetupButton.setVisible(true);
fontSetupButton.setEnabled(true);
colorThemeLabel.setText(game->str.colorTheme);
colorThemeLabel.setVisible(true);
colorThemeSelector.addItem(game->str.lightTheme);
colorThemeSelector.addItem(game->str.darkTheme);
colorThemeSelector.setCurrentIndex(static_cast<int>(game->colorTheme));
colorThemeSelector.setVisible(true);
colorThemeSelector.setEnabled(true);
colorThemeUpdater = connect(&colorThemeSelector, static_cast<void(QComboBox::*)(int)>(&QComboBox::currentIndexChanged),
[this](int index) {
if (index != -1) {
this->game->colorTheme = static_cast<ColorTheme>(index);
this->game->applyColorTheme(this->game->colorTheme);
}
});
backgroundImagesLabel.setText(game->str.showBackgroundImages);
backgroundImagesLabel.setVisible(true);
backgroundImagesSwitch.setVisible(true);
backgroundImagesSwitch.setEnabled(true);
backgroundImagesSwitch.setChecked(game->showBgImages);
backgroundImagesUpdater = connect(&backgroundImagesSwitch, static_cast<void(QCheckBox::*)(int)>(&QCheckBox::stateChanged),
[this](int state) {
this->game->showBgImages = bool(state);
});
backButton.setText(game->str.back);
backButton.setVisible(true);
backButton.setEnabled(true);
displayed = true;
}
void AppearanceSettingsMenu::fontSetupFunction() {
QFont newFont = QFontDialog::getFont(nullptr, game->textFont, nullptr);
game->setFont(newFont);
game->console.setFont(newFont);
game->aboutMenu.setFont(newFont);
game->gameMenu.getPopUpInventoryTable().setFont(newFont);
}
void AppearanceSettingsMenu::backFunction() {
this->hide();
game->settingsMenu.display();
}
void AppearanceSettingsMenu::hide() {
this->pre_hide();
appearanceSettingsLabel.setVisible(false);
inventoryTypeLabel.setVisible(false);
inventoryTypeSelector.setVisible(false);
inventoryTypeSelector.setEnabled(false);
disconnect(inventoryTypeUpdater);
inventoryTypeSelector.clear();
fontLabel.setVisible(false);
fontSetupButton.setVisible(false);
fontSetupButton.setEnabled(false);
backgroundImagesLabel.setVisible(false);
backgroundImagesSwitch.setVisible(false);
backgroundImagesSwitch.setEnabled(false);
disconnect(backgroundImagesUpdater);
colorThemeLabel.setVisible(false);
colorThemeSelector.setVisible(false);
colorThemeSelector.setEnabled(false);
disconnect(colorThemeUpdater);
colorThemeSelector.clear();
backButton.setVisible(false);
backButton.setEnabled(false);
displayed = false;
}
AppearanceSettingsMenu::~AppearanceSettingsMenu() {
}
|
import * as vscode from 'vscode'
import { EOL } from 'os'
import type { Extension } from '../main'
export class TeXMagician {
extension: Extension
constructor(extension: Extension) {
this.extension = extension
}
getFileName(file: string): string {
const segments = file.replace(/\\/g, '/').match(/([^/]+$)/)
if (segments) {
return segments[0]
}
return ''
}
getRelativePath(file: string, currentFile: string): string {
// replace '\' in windows paths with '/'
file = file.replace(/\\/g, '/')
// get path of current folder, including to the last '/'
let currentFolder = currentFile.replace(/\\/g, '/').replace(/[^/]+$/gi, '')
// find index up to which paths match
let i = 0
while (file.charAt(i) === currentFolder.charAt(i)) {
i++
}
// select nonmatching substring
file = file.substring(i)
currentFolder = currentFolder.substring(i)
// replace each '/foldername/' in path with '/../'
currentFolder = currentFolder.replace(/[^/]+/g, '..')
return ('./' + currentFolder + file).replace(/^\.\/\.\./, '..')
}
addTexRoot() {
// taken from here: https://github.com/DonJayamanne/listFilesVSCode/blob/master/src/extension.ts (MIT licensed, should be fine)
void vscode.workspace.findFiles('**/*.{tex}').then(files => {
const displayFiles = files.map(file => {
return { description: file.fsPath, label: this.getFileName(file.fsPath), filePath: file.fsPath }
})
void vscode.window.showQuickPick(displayFiles).then(val => {
const editor = vscode.window.activeTextEditor
if (!(val && editor)) {
return
}
const relativePath = this.getRelativePath(val.filePath, editor.document.fileName)
const magicComment = `% !TeX root = ${relativePath}`
const line0 = editor.document.lineAt(0).text
const edits = [(line0.match(/^\s*%\s*!TeX root/gmi)) ?
vscode.TextEdit.replace(new vscode.Range(0, 0, 0, line0.length), magicComment)
:
vscode.TextEdit.insert(new vscode.Position(0, 0), magicComment + EOL)
]
// Insert the text
const uri = editor.document.uri
const edit = new vscode.WorkspaceEdit()
edit.set(uri, edits)
void vscode.workspace.applyEdit(edit)
})
})
}
}
|
/**
* Adds the connecting IP and the time of the connection to the database.
*/
@RequestMapping(
value = "/add",
produces = MediaType.APPLICATION_JSON_VALUE,
method = RequestMethod.POST)
@ResponseBody
String addRequest(HttpServletRequest request) {
JsonObject aOutJson = new JsonObject();
try {
mLogger.info("add called");
IRI aSubject = Values.iri(mPrefix + UUID.randomUUID().toString());
IRI aIpPredicate = mVF.createIRI(mPrefix + "hasIp");
IRI aIpObject = mVF.createIRI("urn:" + request.getRemoteAddr());
String aConnectionId = Long.toString(System.currentTimeMillis());
IRI aTimePredicate = mVF.createIRI(mPrefix + "connectedAt");
IRI aTimeObject = mVF.createIRI("urn:" + aConnectionId);
Connection aConn = mFactory.connect();
aConn.begin();
aConn.add().statement(aSubject, aIpPredicate, aIpObject);
aConn.add().statement(aSubject, aTimePredicate, aTimeObject);
aConn.commit();
aConn.close();
aOutJson.addProperty("ip", request.getRemoteAddr());
aOutJson.addProperty("time", aConnectionId);
aOutJson.addProperty("status", "SUCCESS");
aOutJson.addProperty("message", "Successfully added a new connection");
}
catch (RuntimeException rt) {
aOutJson.addProperty("status", "FAILED");
aOutJson.addProperty("message", rt.toString());
}
return aOutJson.toString();
} |
/*
*+
* Name:
* smf_clipped_stats1D
* Purpose:
* Calculate mean, median and standard deviation of data with sigma clipping
* Language:
* Starlink ANSI C
* Type of Module:
* Library routine
* Invocation:
* void smf_clipped_stats1D( const double *data, dim_t nclips,
* const float clips[], dim_t stride,
* dim_t nsamp, smf_qual_t *quality,
* dim_t qstride, smf_qual_t mask, double *mean,
* double *sigma, double *median, int usemedian,
* dim_t *ngood, int *status );
* Arguments:
* data = const double* (Given)
* Pointer to input data array
* nclips = dim_t (Given)
* Number of clips to read from the clips[] array.
* clips[] = const float (Given)
* Array of sigma-clips to apply to the data before recalculating mean and
* standard deviation.
* stride = dim_t (Given)
* Index stride between elements
* nsamp = dim_t (Given)
* Length of the interval to analyze
* quality = smf_qual_t* (Given)
* If specified, use this QUALITY array to decide which samples
* to use (provided mask). Otherwise data are only ignored if set
* to VAL__BADD.
* qstride = dim_t (Given)
* Stride for qual. If 0 assumed to be stride.
* mask = smf_qual_t (Given)
* Use with qual to define which bits in quality are relevant to
* ignore data in the calculation.
* mean = double* (Given and Returned)
* Pointer to variable that will contain the mean of the data.
* sigma = double* (Given and Returned)
* Pointer to variable that will contain the standard deviation of
* the data. If NULL this routine will run faster and not calculate
* the standard deviation.
* usemedian = int (Given)
* If set, when applying the clips, will check for offsets from the
* median rather than the mean.
* ngood = dim_t* (Given and Returned)
* Pointer to variable that will indicate how many samples were used
* to calculate the statistics.
* status = int* (Given and Returned)
* Pointer to global status.
* Description:
* Calculates the mean and standard deviation. Then applies a sigma
* clip to the data and recalculates the mean and standard deviation
* (if some additional points were removed). This is repeated for
* each supplied clipping level.
* Authors:
* TIMJ: Tim Jenness (JAC, Hawaii)
* EC: Ed Chapin (UBC)
* {enter_new_authors_here}
* Notes:
* - smf_stats1D is run at most nclips+1 times.
* - if nclips is 0 smf_stats1D will be run and no clipping applied.
* History:
* 2010-07-02 (TIMJ):
* Initial version
* 2011-06-16 (EC):
* Add median / usemedian
* {enter_further_changes_here}
* Copyright:
* Copyright (C) 2010 Science and Technology Facilities Council.
* Copyright (C) 2011 University of British Columbia.
* All Rights Reserved.
* Licence:
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 3 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the Free
* Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA
* Bugs:
* {note_any_bugs_here}
*-
*/
#include "smf.h"
#include "sae_par.h"
static dim_t smf__flag_clipped_data( const double *data, dim_t stride,
dim_t nsamp, smf_qual_t *qua,
smf_qual_t badqual, double mean,
double sigma, double sigclip,
int * status );
void smf_clipped_stats1D( const double *data, dim_t nclips,
const float clips[], dim_t stride, dim_t nsamp,
const smf_qual_t *quality, dim_t qstride,
smf_qual_t mask, double *mean, double *sigma,
double *median, int usemedian, dim_t *ngood,
int *status ) {
dim_t clip = 0; /* Clip index */
dim_t i = 0; /* Loop counter */
double lmean = VAL__BADD; /* Local mean */
double lmedian = VAL__BADD; /* Local median */
dim_t lngood = 0; /* Local ngood */
double lsigma = VAL__BADD; /* Local sigma */
const smf_qual_t BADQUAL = 1; /* The value we use for local bad quality */
smf_qual_t * qua = NULL; /* Quality we will be using locally */
/* initialise return values */
if (sigma) *sigma = VAL__BADD;
if (ngood) *ngood = 0;
if (mean) *mean = VAL__BADD;
if (median) *median = VAL__BADD;
/* Check status */
if (*status != SAI__OK) return;
/* set up the stride through the quality whilst we copy quality */
/* We are going to control smf_stats1 by using quality so we have to either
create our own quality or copy an existing one. We will then use our own
mask and a qstride that is 1 by definition. */
qua = astCalloc( nsamp, sizeof(*qua) );
if ( quality ) {
dim_t j = 0;
/* make sure we step through properly */
if (!qstride) qstride = stride;
/* copy it over. i indexes the output quality */
for ( i = 0; j < nsamp; i++) {
if ( quality[j] & mask ) {
qua[i] |= BADQUAL;
}
j += qstride;
}
} else {
dim_t j = 0;
/* fill it from the bad values */
for (i=0; i<nsamp*stride; i+=stride ) {
if ( data[i] == VAL__BADD ) {
qua[j] |= BADQUAL;
}
j++;
}
}
/* Run stats and then clip */
for ( clip = 0; clip < nclips; clip++) {
/* Calculate stats with our quality and qstride of 1 */
smf_stats1D( data, stride, nsamp, qua, 1, BADQUAL, &lmean, &lsigma,
usemedian ? &lmedian : NULL, &lngood, status );
/* Flag any values exceeding the specified clip */
lngood = smf__flag_clipped_data( data, stride, nsamp, qua, BADQUAL,
usemedian ? lmedian : lmean,
lsigma, clips[clip], status );
}
/* and one final stats now that all clips have been applied*/
smf_stats1D( data, stride, nsamp, qua, 1, BADQUAL, &lmean, &lsigma,
median ? &lmedian : NULL, &lngood, status );
/* Free quality */
qua = astFree( qua );
/* Copy results */
if (mean) *mean = lmean;
if (sigma) *sigma = lsigma;
if (median) *median = lmedian;
if (ngood) *ngood = lngood;
}
/* Helper routine to run through the data array and setting quality to
the suppliedq value if a data point is out of range. It does
recalculate ngood and returns it. */
static dim_t smf__flag_clipped_data( const double *data, dim_t stride,
dim_t nsamp, smf_qual_t *qua,
smf_qual_t badqual, double mean,
double sigma, double sigclip,
int * status ) {
dim_t i = 0;
dim_t j = 0;
dim_t ngood = 0;
double dmax;
double dmin;
if (*status != SAI__OK) return 0;
if (mean == VAL__BADD || sigma == VAL__BADD) {
for ( i = 0; i<nsamp; i++) {
qua[i] |= badqual;
}
return 0;
}
/* calculate acceptable bounds */
dmax = mean + ( sigclip * sigma );
dmin = mean - ( sigclip * sigma );
for ( i = 0; i < nsamp*stride; i+=stride ) {
if ( ! qua[j] ) { /* any zero quality is good */
if ( data[i] < dmin || data[i] > dmax ) {
qua[j] |= badqual;
} else {
ngood++;
}
}
j++; /* increment quality index */
}
return ngood;
}
|
/**
* (U) This class is responsible for generating the Software Bill Of Materials (SBOM) for all Ubuntu
* Linux Operating Systems.
*
* @author wrgoff
* @since 23 April 2020
*/
public class UbuntuSBomGenerator extends UnixSBomGenerator
{
private static final String PACKAGE_MANAGER = "apt";
private static final String SOFTWARE_INSTALLED_VERSION = "apt policy";
private static final String SOFTWARE_DETAIL_CMD = "apt show";
public static final String SOFTWARE_LIST_CMD = "apt list --installed";
private ProcessBuilder processBuilder = new ProcessBuilder();
/**
* (U) This method is used to generate the Software Bill Of Materials (SBOM) for all Ubuntu
* Linux Operating systems.
*
* @return Bom The Software Bill Of Materials for this Ubuntu Linux Operating System.
* @throws SBomException if we are unable to build the SBOM.
*/
public Bom generateSBom()
{
List<String> softwareList = generateListOfSoftware(SOFTWARE_LIST_CMD, '/',
"");
Bom bom = new Bom();
if (logger.isDebugEnabled())
logger.debug("Processing " + softwareList.size() + " software programs.");
Map<String, String> detailMap = null;
String version = null;
String group = null;
LicenseChoice license = null;
Component component = null;
for (String software : softwareList)
{
if (logger.isDebugEnabled())
logger.debug("Generating Component (" + software + ")");
detailMap = produceDetailMap(software);
version = detailMap.get("Version");
group = detailMap.get("Release");
license = processLicense(software);
component = createComponents(software, detailMap, license, group,
version, null, detailMap.get("Priority"));
bom.addComponent(addPackageManager(component, PACKAGE_MANAGER));
}
return bom;
}
/**
* (U) This method is used to run the command to get the version of the package that is
* currently installed.
*
* @param software String value of the software package to get the version of.
* @return String value of the version of the software that is currently installed.
* @throws SBomException in the event we can NOT get the version.
*/
public String getInstalledVersion(String software)
{
String version = "";
String cmd = SOFTWARE_INSTALLED_VERSION + " " + software;
if (logger.isDebugEnabled())
logger.debug("Attempting to get software (" + software + ") version via: " + cmd);
processBuilder.command("bash", "-c", cmd);
try
{
Process process = processBuilder.start();
version = readVersion(process);
}
catch (IOException ioe)
{
String error = "Unable to build unix process to get software version (" +
cmd +
") on the server!";
logger.error(error, ioe);
throw new SBomException(error, ioe);
}
if (logger.isDebugEnabled())
logger.debug("Found Version (" + version + ") for " + software + ".");
return version;
}
/**
* (U) This method is used to parse the version from the reader of the Unix Command being run.
*
* @param reader BufferedReader of the contents of the Unix command being run.
* @return String the version pulled from the reader.
* @throws SBomException in the event we are unable to read from the Unix Command.
*/
public String parseVersion(BufferedReader reader)
{
String version = null;
String line;
try
{
while ((line = reader.readLine()) != null)
{
line = line.trim();
if (line.startsWith("Installed"))
{
int index = line.indexOf(':');
version = line.substring(index + 1).trim();
break;
}
}
}
catch (IOException e)
{
String error = "Failed to read version.";
logger.error(error, e);
throw new SBomException(error, e);
}
return version;
}
/**
* (U) This method is responsible for getting the license (if present) and placing it in the
* LicenseChoice Object passed back.
*
* @param software String value of the software we are attempting to get the license for.
* @return LicenseChoice that contains information about the license.
*/
private LicenseChoice processLicense(String software)
{
LicenseChoice licenseChoice = null;
String licenseFile = SOFTWARE_LICENSE_DIR + software + "/copyright";
if (logger.isDebugEnabled())
logger.debug("Attempting to process license (" + licenseFile + ")");
try
{
String licenseTxt = new String(Files.readAllBytes(Paths.get(licenseFile)));
licenseChoice = parseLicenseText(licenseTxt, AVAILABLE_LINUX_FLAVORS.UBUNTU);
}
catch (IOException ioe)
{
logger.warn("Unable to read license file (" + licenseFile + ")", ioe);
}
return licenseChoice;
}
/**
* (U) This method is used to produce a Detail Map of the Software in question. This will be
* used to create a CycloneDx Component.
*
* @param software String value of the component to build the detail map for.
* @return Map containing the key value pairs about the software.
* @throws SBomException in the event we can NOT produce the detail map of the software.
*/
private Map<String, String> produceDetailMap(String software)
{
String cmd = SOFTWARE_DETAIL_CMD + " " + software + "=" + getInstalledVersion(software);
return (produceDetailMap(cmd, AVAILABLE_LINUX_FLAVORS.UBUNTU));
}
/**
* (U) This method is used to read the Version from the Command Process.
*
* @param process Process to read the version from.
* @return String the version.
* @throws SBomException in the event we are unable to process the command.
*/
public String readVersion(Process process)
{
String version = null;
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(process.getInputStream())))
{
version = parseVersion(reader);
int exitVal = process.waitFor();
if (exitVal != 0)
{
String error = "Unexpected process exit value (" + exitVal + "), while " +
"attempting to get Installed Software Version!";
logger.error(error);
throw new SBomException(error);
}
}
catch (SBomException sbom)
{
throw sbom;
}
catch (Exception e)
{
String error = "Unexpected error while attempting to get the software version!";
logger.error(error, e);
throw new SBomException(error, e);
}
return version;
}
} |
<filename>UtilPrograms/userDataGen/src/main/java/com/adu/dataGen/UserRepository.java
package com.adu.dataGen;
import org.springframework.data.mongodb.repository.MongoRepository;
public interface UserRepository extends MongoRepository<User,String> {
}
|
/**
* This example demonstrates how to access the local storage and perform operations with it.
*
* <p>Accessing the session storage is performed by calling {@code Frame.sessionStorage()} instead
* of {@code Frame.localStorage()}
*/
public final class LocalWebStorage {
private static final String KEY = "Name";
public static void main(String[] args) {
Engine engine = Engine.newInstance(OFF_SCREEN);
Browser browser = engine.newBrowser();
browser.navigation().loadUrlAndWait("https://www.google.com");
browser.mainFrame().ifPresent(frame -> {
frame.localStorage().putItem(KEY, "Tom");
System.out.println((String) frame.executeJavaScript(
format("window.localStorage.getItem(\"%s\")", KEY)));
});
}
} |
Integrating big data into the computing curricula
An important recent technological development in computer science is the availability of highly distributed and scalable systems to process Big Data, i.e., datasets with high volume, velocity and variety. Given the extensive and effective use of systems incorporating Big Data in many application scenarios, these systems have become a key component in the broad landscape of database systems. This fact creates the need to integrate the study of Big Data Management Systems as part of the computing curricula. This paper presents well-structured guidelines to perform this integration by describing the important types of Big Data systems and demonstrating how each type of system can be integrated into the curriculum. A key contribution of this paper is the description of an array of course resources, e.g., virtual machines, sample projects, and in-class exercises, and how these resources support the learning outcomes and enable a hands-on experience with Big Data technologies. |
import 'express-session';
import { UserAttributes } from '../app/models/User';
declare module 'express-session' {
export interface SessionData {
userId: number;
authenticated: boolean;
user: Partial<UserAttributes>;
}
}
|
<filename>components/credentials/src/test/java/org/cloudfoundry/credhub/domain/CertificateGenerationParametersTest.java
package org.cloudfoundry.credhub.domain;
import org.cloudfoundry.credhub.requests.CertificateGenerationRequestParameters;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.core.IsEqual.equalTo;
@RunWith(JUnit4.class)
public class CertificateGenerationParametersTest {
@Test
public void constructor_prependsForwardSlashToCaName() throws Exception {
final CertificateGenerationRequestParameters requestParameters1 = new CertificateGenerationRequestParameters();
requestParameters1.setCaName("ca-name");
requestParameters1.setCommonName("a-common-name");
final CertificateGenerationRequestParameters requestParameters2 = new CertificateGenerationRequestParameters();
requestParameters2.setCaName("/ca-name");
requestParameters2.setCommonName("a-common-name");
final CertificateGenerationParameters parameters1 = new CertificateGenerationParameters(requestParameters1);
final CertificateGenerationParameters parameters2 = new CertificateGenerationParameters(requestParameters2);
assertThat(parameters1.equals(parameters2), equalTo(true));
}
@Test
public void equals_returnsTrueWhenKeyUsagesAreIdentical() throws Exception {
final CertificateGenerationRequestParameters requestParameters1 = new CertificateGenerationRequestParameters();
final String[] keyUsages1 = new String[]{"data_encipherment", "digital_signature", "non_repudiation", "key_encipherment"};
final String[] keyUsages2 = new String[]{"digital_signature", "non_repudiation", "key_encipherment", "data_encipherment"};
requestParameters1.setKeyUsage(keyUsages1);
requestParameters1.setCommonName("a-common-name");
final CertificateGenerationRequestParameters requestParameters2 = new CertificateGenerationRequestParameters();
requestParameters2.setKeyUsage(keyUsages2);
requestParameters2.setCommonName("a-common-name");
final CertificateGenerationParameters parameters1 = new CertificateGenerationParameters(requestParameters1);
final CertificateGenerationParameters parameters2 = new CertificateGenerationParameters(requestParameters2);
assertThat(parameters1.equals(parameters2), equalTo(true));
}
@Test
public void equals_returnsFalseWhenKeyUsagesAreDifferent() throws Exception {
final CertificateGenerationRequestParameters requestParameters1 = new CertificateGenerationRequestParameters();
final String[] keyUsages1 = new String[]{"data_encipherment", "digital_signature", "non_repudiation", "key_encipherment"};
final String[] keyUsages2 = new String[]{"data_encipherment", "digital_signature", "non_repudiation"};
requestParameters1.setKeyUsage(keyUsages1);
requestParameters1.setCommonName("a-common-name");
final CertificateGenerationRequestParameters requestParameters2 = new CertificateGenerationRequestParameters();
requestParameters2.setKeyUsage(keyUsages2);
requestParameters2.setCommonName("a-common-name");
final CertificateGenerationParameters parameters1 = new CertificateGenerationParameters(requestParameters1);
final CertificateGenerationParameters parameters2 = new CertificateGenerationParameters(requestParameters2);
assertThat(parameters1.equals(parameters2), equalTo(false));
}
@Test
public void equals_returnsTrueWhenExtendedKeyUsagesAreIdentical() throws Exception {
final CertificateGenerationRequestParameters requestParameters1 = new CertificateGenerationRequestParameters();
final String[] keyUsages1 = new String[]{"server_auth", "client_auth", "code_signing", "email_protection", "timestamping"};
final String[] keyUsages2 = new String[]{"server_auth", "client_auth", "code_signing", "email_protection", "timestamping"};
requestParameters1.setExtendedKeyUsage(keyUsages1);
requestParameters1.setCommonName("a-common-name");
final CertificateGenerationRequestParameters requestParameters2 = new CertificateGenerationRequestParameters();
requestParameters2.setExtendedKeyUsage(keyUsages2);
requestParameters2.setCommonName("a-common-name");
final CertificateGenerationParameters parameters1 = new CertificateGenerationParameters(requestParameters1);
final CertificateGenerationParameters parameters2 = new CertificateGenerationParameters(requestParameters2);
assertThat(parameters1.equals(parameters2), equalTo(true));
}
@Test
public void equals_returnsFalseWhenExtendedKeyUsagesAreDifferent() throws Exception {
final CertificateGenerationRequestParameters requestParameters1 = new CertificateGenerationRequestParameters();
final String[] keyUsages1 = new String[]{"server_auth", "client_auth", "code_signing", "email_protection", "timestamping"};
final String[] keyUsages2 = new String[]{"server_auth", "client_auth", "code_signing", "email_protection"};
requestParameters1.setExtendedKeyUsage(keyUsages1);
requestParameters1.setCommonName("a-common-name");
final CertificateGenerationRequestParameters requestParameters2 = new CertificateGenerationRequestParameters();
requestParameters2.setExtendedKeyUsage(keyUsages2);
requestParameters2.setCommonName("a-common-name");
final CertificateGenerationParameters parameters1 = new CertificateGenerationParameters(requestParameters1);
final CertificateGenerationParameters parameters2 = new CertificateGenerationParameters(requestParameters2);
assertThat(parameters1.equals(parameters2), equalTo(false));
}
//ParameterizedValidationExceptionTest.java
}
|
<reponame>diegomacario/Quaternion-Experiments
#ifndef FINITE_STATE_MACHINE_H
#define FINITE_STATE_MACHINE_H
#include <unordered_map>
#include <memory>
#include "state.h"
class FiniteStateMachine
{
public:
FiniteStateMachine() = default;
~FiniteStateMachine() = default;
FiniteStateMachine(const FiniteStateMachine&) = delete;
FiniteStateMachine& operator=(const FiniteStateMachine&) = delete;
FiniteStateMachine(FiniteStateMachine&&) = delete;
FiniteStateMachine& operator=(FiniteStateMachine&&) = delete;
void initialize(std::unordered_map<std::string, std::shared_ptr<State>>&& states,
const std::string& initialStateID);
void processInputInCurrentState(float deltaTime) const;
void updateCurrentState(float deltaTime) const;
void renderCurrentState() const;
void changeState(const std::string& newStateID);
std::shared_ptr<State> getPreviousState();
std::string getPreviousStateID() const;
std::string getCurrentStateID() const;
private:
std::unordered_map<std::string, std::shared_ptr<State>> mStates;
std::shared_ptr<State> mCurrentState;
std::string mPreviousStateID;
std::string mCurrentStateID;
};
#endif
|
<filename>src/main/java/at/tugraz/ist/ase/common/CmdLineOptionsBase.java
/*
* CommonPackage
*
* Copyright (c) 2022-2022
*
* @author: <NAME> (<EMAIL>)
*/
package at.tugraz.ist.ase.common;
import lombok.Getter;
import lombok.NonNull;
import lombok.extern.slf4j.Slf4j;
import org.kohsuke.args4j.CmdLineException;
import org.kohsuke.args4j.CmdLineParser;
import org.kohsuke.args4j.Option;
import static com.google.common.base.Preconditions.checkState;
@Slf4j
public class CmdLineOptionsBase {
@Getter
@Option(name = "-h",
aliases="--help",
usage = "Prints usage info.")
private boolean help = false;
protected CmdLineParser parser;
private final String banner;
private final String programTitle;
private final String subtitle;
private final String usage;
public CmdLineOptionsBase(String banner, @NonNull String programTitle, String subtitle, @NonNull String usage) {
this.banner = banner;
this.programTitle = programTitle;
this.subtitle = subtitle;
this.usage = usage;
this.parser = null;
}
public void parseArgument(@NonNull String[] args) {
checkState(parser != null, "CmdLineParser not initialized");
if (args.length < 1) {
printUsage();
System.exit(-1);
}
try {
parser.parseArgument(args);
} catch (CmdLineException clEx) {
log.error("{}Unable to parse command-line options: {}", LoggerUtils.tab, clEx.getMessage());
printUsage();
System.exit(-1);
}
}
public void printUsage() {
checkState(parser != null, "CmdLineParser not initialized");
if (banner != null) {
System.out.println(banner);
}
System.out.println(programTitle);
if (subtitle != null) {
System.out.println(subtitle);
}
System.out.println(usage);
System.out.println("Options:");
parser.printUsage(System.out);
}
}
|
def is_operator_next(self):
operators = [
TokenType.ADD,
TokenType.SUBTRACT,
TokenType.DIVIDE,
TokenType.MULTIPLY
]
return any([self.peek(operator) for operator in operators]) |
#include <stdio.h>
int main()
{
int n,l,r,x,c=1,s1=0,s2=0,s=0;
scanf("%d %d %d",&n,&l,&r);
int mat[n];
for(x=0;x<l;x++)
{
s=s+c;
c=c*2;
}
s1=s+(n-l);
for(x=x;x<r;x++)
{
s=s+c;
c=c*2;
}
s2=s+(n-r)*(c/2);
printf("%d %d",s1,s2);
} |
<reponame>devan-huapaya/community-organization-operations-suite
/*!
* Copyright (c) Microsoft. All rights reserved.
* Licensed under the MIT license. See LICENSE file in the project.
*/
import { ServiceAnswerInput } from '@cbosuite/schema/dist/provider-types'
import { v4 as createId } from 'uuid'
import { DbServiceAnswer } from '~db/types'
import { empty } from '~utils/noop'
import { createDbServiceAnswerField } from './createDbServiceAnswerField'
export function createDBServiceAnswer(answer: ServiceAnswerInput): DbServiceAnswer {
return {
id: createId(),
service_id: answer.serviceId,
contacts: answer.contacts || empty,
fields: answer.fields.map(createDbServiceAnswerField) || empty
}
}
|
#include "starkware/proof_system/proof_system.h"
#include <stdexcept>
#include "gtest/gtest.h"
#include "starkware/error_handling/error_handling.h"
namespace starkware {
namespace {
TEST(FalseOnError, Correctness) {
EXPECT_TRUE(FalseOnError([]() { ASSERT_RELEASE(true, ""); }));
EXPECT_FALSE(FalseOnError([]() { ASSERT_RELEASE(false, ""); }));
EXPECT_THROW(FalseOnError([]() { throw std::invalid_argument(""); }), std::invalid_argument);
}
} // namespace
} // namespace starkware
|
// Waits for a sync token and import the mailbox as texture.
GLuint SynchronizeAndImportMailbox(gpu::gles2::GLES2Interface* gl,
const gpu::SyncToken& sync_token,
const gpu::Mailbox& mailbox) {
gl->WaitSyncTokenCHROMIUM(sync_token.GetConstData());
return mailbox.IsSharedImage()
? gl->CreateAndTexStorage2DSharedImageCHROMIUM(mailbox.name)
: gl->CreateAndConsumeTextureCHROMIUM(mailbox.name);
} |
// Determinate the bit length of p1, p2 for bit length of p.
// Per FIPS186-4, table Table B.1., p52
static cc_size
ccrsa_fips186_auxiliary_prime_length(cc_size plen) {
cc_size auxiliary_prime_bitlen;
if (plen<=512) {
auxiliary_prime_bitlen=101;
}
else if (plen<=1024) {
auxiliary_prime_bitlen=141;
}
else {
auxiliary_prime_bitlen=171;
}
return auxiliary_prime_bitlen;
} |
// clearMapping prepares all in-memory-mappings and other cache fields. All previous cached entries are removed.
func (plugin *IPSecConfigurator) clearMapping() {
plugin.spdIndexes.Clear()
plugin.cachedSpdIndexes.Clear()
plugin.saIndexes.Clear()
} |
import csv
import io
import json
import unittest
from scripts.migrate_to_whatsapp_templates.prebirth1 import Prebirth1Migration
class TestPrebirth1(unittest.TestCase):
def setUp(self):
self.prebirth1 = Prebirth1Migration()
def test_sequence_number_to_weeks(self):
"""
Given a certain sequence number for the prebirth 1 messageset, it should return
the correct number of weeks pregnant
"""
self.assertEqual(self.prebirth1.sequence_number_to_weeks(1), 5)
self.assertEqual(self.prebirth1.sequence_number_to_weeks(2), 5)
self.assertEqual(self.prebirth1.sequence_number_to_weeks(35), 22)
self.assertEqual(self.prebirth1.sequence_number_to_weeks(36), 22)
self.assertEqual(self.prebirth1.sequence_number_to_weeks(73), 41)
self.assertEqual(self.prebirth1.sequence_number_to_weeks(74), 41)
def test_prebirth_1(self):
"""
With a valid CSV, it should output a valid CSV with all the same fields, except
the metadata field should have a template key with the appropriate details.
"""
input = io.StringIO()
output = io.StringIO()
writer = csv.writer(input)
writer.writerow(
[
"id",
"messageset",
"sequence_number",
"lang",
"text_content",
"binary_content",
"metadata",
]
)
writer.writerow(["1", "2", "3", "zul_ZA", "Test message", "", '{"foo": "bar"}'])
self.prebirth1.run(io.StringIO(input.getvalue()), output)
reader = csv.DictReader(io.StringIO(output.getvalue()))
[row] = list(reader)
self.assertEqual(row["id"], "1")
self.assertEqual(row["messageset"], "2")
self.assertEqual(row["sequence_number"], "3")
self.assertEqual(row["lang"], "zul_ZA")
self.assertEqual(row["text_content"], "Test message")
self.assertEqual(row["binary_content"], "")
self.assertEqual(
json.loads(row["metadata"]),
{
"foo": "bar",
"template": {
"name": "mc_prebirth",
"language": "en",
"variables": ["6", "Test message"],
},
},
)
|
/**
* Created by Dal-Dev-Team on 17/3/7.
*/
public class DDLSlow extends DetectStrategy {
private static final Logger LOGGER = LoggerFactory.getLogger(DDLSlow.class);
@Override public void detect0() {
if (curSql.type == QUERY_TYPE.COMMIT) {
sqlScore = curSql.result.getAvgDurInCurWindow() / 200;
}
}
@Override protected void recordEtrace0() {
etrace(curSql.getPattern());
}
@Override protected void log0() {
}
@Override protected void collectInfo() {
tags.put("avgDur", curSql.result.getAvgDurInCurWindow());
}
@Override public String name() {
return "ddl_slow";
}
} |
<filename>src/app/customers/card/card.component.ts<gh_stars>0
import {Component, Input, OnInit} from '@angular/core';
import {Customer} from '../interfaces/customer.interface';
@Component({
selector: 'app-customer-card',
templateUrl: './card.component.html',
styleUrls: ['./card.component.css']
})
export class CardComponent {
@Input() public customer: Customer;
}
|
Parity Violating Deep Inelastic Electron-Deuteron Scattering: Higher Twist and Parton Angular Momentum
We study the effect of parton angular momentum on the twist-four correction to the left-right asymmetry in the electron-deuteron parity-violating deep inelastic scattering (PVDIS). We show that this higher-twist correction is transparent to the dynamics of parton angular momentum needed to account for the Sivers and Boer-Mulders functions and spin-independent parton distribution functions. A sufficiently precise measurement of the PVDIS asymmetry may, thus, provide additional information about the parton dynamics responsible for nucleon spin.
I. INTRODUCTION
As a complement to the studies at high-energy frontier, measurements at the intensity frontier (or precision frontier) provide powerful tools in the search for physics Beyond the Standard Model (BSM). Observables such as the muon anomalous magnetic moment are measured to very high precision, and experimental results are then compared with theoretical predictions. To the extent that the latter are sufficiently reliable, any possible deviation would point to BSM physics. Alternately, these experiments can provide new insights into the dynamics of the Standard Model.
Electron-deuteron parity violating deep inelastic scattering (eD PVDIS) is an excellent example of this class of studies. Historically, it provided the first experimental measurement of weak mixing angle θ W . Nowadays, with the prospect of the Jefferson Laboratory 12-GeV upgrade and the use of a new spectrometer called SoLID, the left-right asymmetry of PVDIS can be measured with 0.5% precision over the kinematic range 0.3 < x B < 0.7 . With this level of precision, one will be able to probe or constrain an interesting set of BSM scenarios, such as a leptophobic Z' boson and supersymmetry , as well as to study hadronic physics effects which are yet to be fully understood, such as charge symmetry violation (CVC) and higher-twist (HT).
The effect of HT is a potentially important Standard Model correction that originates from the interaction between partons. This correction in general scales as (Q 2 ) −(τ −2)/2 , with the twist τ > 2, so its effect is enhanced at low Q 2 . In the framework of the operator product expansion (OPE), the higher-twist correction can be expressed as a convolution of a high-energy and low-energy piece; the former (embodied in the Wilson coefficients) can be calculated using perturbative methods, whereas the latter involves hadronic matrix elements that require understanding of non-perturbative QCD. Studying the higher-twist correction may help us in probing correlations between the confined quarks and gluons inside the nucleon, so it is interesting to explore HT matrix elements within various model approaches. One advantage of eD PVDIS process is that the HT contribution to the leading term in the PV asymmetry (defined below) arises from a single operator matrix element and can, in principle, be separated kinematically from the subleading terms that have a more complicated HT structure. With this motivation in mind, several previous works have been carried out to study the twist-four (i.e. τ = 4) correction to the left-right asymmetry of eD PVDIS. In what follows, we report on a study that follows-up these earlier works.
The study of HT may also shed light on another important issue, namely, the spin structure of the nucleon. Nearly twenty-five years ago, the EMC collaboration performed a DIS experiment with longitudinally-polarized muons on a target of longitudinally-polarized protons, obtaining a value for the structure function g 1 (x B ) over the range 0.01 < x B < 0.7.
After extrapolating to the low-and high-x B region, the collaboration obtained a value for the leading moment of g 1 (x B ) that contradicted the Ellis-Jaffe sum rule and implied that that the spin of proton is not built up entirely from the quark spin. The result has been confirmed by a variety of subsequent studies. A key question in nuclear physics research has, thus, become explaining in detail the source of nucleon spin in terms of QCD degrees of freedom.
From a theoretical perspective, arriving at a decomposition of the nucleon spin in terms of gauge-invariant matrix elements of local operators that afford a straightforward partonic interpretation has been a vexing problem, and different approaches have been pursued over the years . In each case, reference is usually made to the interpretation in the lightcone -gauge dependence notwithstanding -given its historical importance for thinking about parton dynamics. However, while the meaning of the quark helicity is gauge invariant, the relative importance of other aspects of partonic angular momentum (gluon helicity and quark and gluon orbital angular momentum) in general vary with the choice of gauge and even definition. Nonetheless, it is interesting to ask how different observables may probe different aspects of partonic angular momentum and to do so in a way that is both gaugeinvariant and as insensitive as possible to a particular angular momentum decomposition.
In this respect, we will study HT in the context of light-cone quantization. In early work within this framework, it has been shown that one particular component of parton angular momentum -identified as quark orbital angular momentum (OAM) under light-cone quantization using light-cone gauge -is responsible for the non-zero value of Sivers function and Boer-Mulders function in semi-inclusive deep inelastic scattering (SIDIS) . In light of these results, it is also interesting to study how the inclusion of the same component of parton angular momentum modifies the current model predictions for HT corrections to eD PVDIS. Indeed, in all the previous studies of eD PVDIS, only the Fock component of the nucleon wavefunction with zero parton OAM has been included.
After including quark OAM in the light-cone amplitudes, we observe a rather non-intuitive phenomenon: although the absolute magnitude of individual non-zero quark OAM contributions can be significant, they largely cancel against each other. We will argue that this cancelation is largely independent of the detailed model for the relevant light-cone amplitudes. As a result, the twist-four correction to PVDIS is almost transparent to the inclusion of quark OAM. In contrast, other hadronic quantities, such as the parton distribution functions (PDF), Sivers function, and Boer-Mulders function, manifest non-negligible dependence on quark OAM. Generalizing from the particular choice of light-cone quantization and light-cone gauge, we thus conclude that whatever features of parton angular momentum are responsible for the observed behavior of the PDFs, Sivers, and Boer-Mulders functions, they should have a relatively minor impact on the HT correction to eD PVDIS of interest here. Moreover, any deviation from the light-cone predictions obtained here and in previous works -should they be observed expermentally -would signal the importance of other aspects of parton angular momentum and/or higher Fock space components of the nucleon wavefunction.
The discussion of the computation leading to these observations is arranged in the following order: in Section II we summarize the relevant results of the general formulation of the twist-four correction to eD PVDIS; in Section III we introduce the light-cone wavefunction with quark OAM-dependence; in Section IV we present the analytic expressions of the hadronic matrix elements needed for the twist-4 correction, and demonstrate the generic cancelation between non-zero quark OAM components; in Section V we present the numerical results using one specific choice of nucleon wavefunction, and discuss their physical significance. Detailed formulae appear in the Appendix.
II. HIGHER-TWIST IN PVDIS: GENERAL FORMULATION
Here, we review the well-known results for the twist-four correction in eD PVDIS. We will simply quote the central equations that are relevant to our study without any derivation and refer the reader to Refs. for the details.
In eD PVDIS, longitudinally-polarized electron beams are incident on unpolarized deuteron targets. One measures the PV right-left asymmetry where dσ R/L is the differential cross-section for the scattering of the right/left-handed electrons. At the one-boson exchange (OBE) level, the leading parity-violating piece comes from the interference between photon and Z-boson exchange diagrams (see Fig 1). The lowenergy Z-boson exchange interaction can be described by the following effective 4-fermion interaction: where, at tree level, we have: Neglecting contributions from sea quarks, assuming charge symmetry (u p V = d n V , etc. with q N V being the valence quark PDF of nucleon N ), the leading-twist SM prediction is given by the Cahn-Gilman formula : where Q 2 = −q 2 and y = P · q/P · k.
To include corrections from possible BSM and as well as other SM pieces, we can reparametrize the Cahn-Gilman formula : Here, R i describes any deviation of the C i from the expressions in Eqs.
(3) to (6), including both SM and BSM corrections. In this paper we concentrate on R HT 1 , namely the higher-twist correction toã 1 . Bjorken and Wolfenstein showed that, if one assumes isospin symmetry and neglects sea quark contributions, then there is only one matrix element that contributes to R HT 1 (for a detailed review of these arguments in a more modern context, see Ref. ). This observation significantly simplifies the theoretical interpretation of the asymmetry, allowing us to concentrate on one particular matrix element without needing to to disentangle the contributions from many different sources. In brief, the Bjorken and Wolfenstein argument works as follows: A RL arises from the interference between the electromagnetic and weak neutral currents. First, one can decompose both currents into an isoscalar S and an isovector V term. The matrix elements of the S × V cross-term vanishes because deuteron is an isosinglet. Furthermore, at leading twist, we have SS = V V . Therefore, the difference between SS and V V that enters hadronic tensor W µν with M D being the mass of deuteron, is the only matrix element giving a HT correction to Below, we will compute the matrix element (9) using an expansion of string operators in order to extract the twist-four piece; the latter is expressed in terms of the deuteron twist-four distribution functionQ D (x B ), which will be computed in Section IV.
III. THE LIGHT-CONE AMPLITUDES
The main challenge in proceeding from (9) is our ignorance of the details of the nucleon wavefunctions. As QCD is non-perturbative at the hadronic scale, analytical expressions for the wavefunctions are unknown. At present, lattice QCD can provide only HT contributions to structure function moments and not the x B -dependence of the R HT 1 that is of interest to the SoLID experiment. Consequently, one must turn to various models that seek to incorporate non-perturbative dynamics. Previous works on R HT 1 include the use of MIT bag model and isotropic light-cone wavefunctions that contain both quark and gluon Fock components ; their results yield similar shapes for the x B -dependence but differ somewhat in magnitude, with a maximum R HT which is a little bit lower than the achievable precision level in the SoLID experiment.
In this work we study how the inclusion of additional parton angular momentum might modify the R HT 1 prediction. For this purpose, we adopt the formalism developed in Ref. , starting from a light-cone formulation of quark states which is equivalent to the well-known "infinite momentum frame" point of view that gives the PDF its intuitive meaning as a parton momentum probability distribution . We then perform a light-cone expansion of the nucleon state, retaining only the portion of Fock space containing three valence quarks with all possible quark OAM. To illustrate, we consider a spin-up proton. Its three valence quarks can form a total helicity of ±1/2, ±3/2; therefore in order to keep the total proton spin in z-direction to be 1/2 we need to assign different z-component quark OAM (i.e. l z ) for different combinations.
A spin-up proton state, then, can be parametrized as the follows: with where k ± i⊥ = k x i ± ik y i , while u † ai (1) means the creation operator of an up-quark (same for down-quark) with color a, spin i and momentum k 1 etc, satisfying the light-cone anticommutation relation: The integration measure is 1 : The proton wavefunction amplitudes {ψ (1) ...ψ (6) } are generally unknown functions. Although the expansion (11)∼(14) is generic, the explicit form of ψ (i) is model-dependent. In this work, we chose the form of ψ (i) derived in Ref. by starting from the static solution of a constituent quark model (which works well in predicting many electroweak properties of the baryons) and applying a Melosh rotation to the solution to obtain non-zero l z components . This choice of proton wavefunction is used to predict the first moment of Sivers function, and turns out to agree fairly well with the experimental measurements from HERMES and COMPASS 2 .
IV. MATRIX ELEMENTS BETWEEN NUCLEON STATES
Following , in order to obtain the twist-four distribution functionQ D (x) we need to evaluate the matrix elements between state |D(P ) of the following operators: where z is a coordinate on light cone, and the parameters b ≡ {b 1 , b 2 , b 3 , b 4 } characterize the light-cone separation between quark field operators.
When computing the matrix elements of Q V,A in Eq. (17) we assume an incoherent impulse approximation in which the incoming photon strikes only one of the two nucleons (see, 1 There might be difference in constant factors in the definition of integration measure by different authors, which only affects the overall normalization. 2 Ref. and Ref. defined their first moment of Sivers function with a sign difference. e.g. Ref. for further discussions regarding the impulse approximation); hence, matrix elements of the operators (17) can be related to the same matrix elements taken between proton states (or equivalently between neutron states, given isospin symmetry). Also, since the quantities we compute do not depend on the proton spin, we can take it to be +1/2 along the z-direction without loss of generality. Now, starting from the operators (17), we define two distribution functions Q ± (x ξ ) via with x ξ collectively representing {x ξ 1 , x ξ 2 , x ξ 3 , x ξ 4 }, the light-cone momentum fractions: ξ + i = x ξ i p + . Meanwhile |P (p) ↑ is the spin-up proton state with momentum p. Substituting (11)∼(14) into (18) we are able to express Q ± (x ξ ) in terms of the proton wavefunction amplitudes. It is easy to observe that only diagonal terms, (i.e. terms with the same l z in initial and final states), could give non-vanishing contributions. After a rather lengthy derivation with the aid of Eq. (A2), we obtain: where the explicit formulas of ψ ± lz are given in Appendix B. The proton twist-four distribution function can now be expressed in terms of the Q ± (refer to Eq. (42) of Ref. after some rearrangement): Here P ij is the permutation operator, e.g.
The deuteron twist-four distribution functionQ D (x B ) can be expressed in terms ofQ p (x B ) through an incoherent impulse approximation , which says that a general deuteron hadronic tensor can be related to the corresponding hadronic tensors of proton and neutron by: where M N is the mass of nucleon. In the equation above each hadronic tensor is multiplied by the particle's mass, because following Eq. (9) the hadronic tensor we defined has dimension -1. Now we can express both sides of Eq. (21) in terms of dimensionless structure functions Using isospin symmetry and the fact thatQ(x B ) is proportional to x −1 B F ud 1 (x B ) (see Eq. (34) of Ref. ), we obtain 3 : Finally, following the logic of Ref. , one can the derive the twist-four contribution to with q D (x B ) being the parton distribution function for quark of flavor q in the deuteron Note that we neglect the logarithmic Q 2 -dependence of the structure functions in this analysis. We can express q D in terms of PDF of the proton and neutron again by the impulse approximation (21), but now comparing the structure function F 2 (x B ) on both sides, which is proportional to x −1 B q(x B ). The result is: where q p (x) and q n (x) are defined as in Eq. (24) but for proton/neutron states. Furthermore, neglecting CSV effects we have: Therefore, it is sufficient to just calculate u p (x B ) and d p (x B ) using the proton light-cone wavefunction (11)∼ (14). Using (A3) and (A4) , we can compute the quark PDFs of the (spin-up) nucleons by calculating the matrix element on LHS of Eq. (24) with nucleon states, and compare it with the form on RHS to extract the PDFs. Same with the twist-four 3 In Ref. , the authors did not multiply their hadronic tensors by particle mass in the impulse approximation formula, therefore the corresponding relation they obtained is off by a factor 1/2; same for the relation of quark distribution functions.
distribution functions, only terms diagonal to l z survive, so we can separate the result into components of different l z as the following: where the functions A lz (q, 1, 2) are given in Appendix B.
We now proceed to show that a partial cancelation occurs between contributions of l z = +1 and l z = −1. For this purpose, we combine (19) and (20), together with the fact that ψ ± lz (q, l, q , l ) * = ψ ± lz (q , l , q, l), to simplify the expression ofQ p (x B ) as: wherẽ First we qualitatively analyze the contribution from each l z -component toQ ± p (x B ). This can be done by simply referring to Eqs. (B1)∼(B8) of the Appendix B. The result is summarized in Table I. We observe that the l z = +1 (-1) piece contributes mainly toQ − p (Q + p ). Also notice that we do not include the l z = 2 component as its effect is tiny.
Next we study the behavior of different contributions toQ ± p (x B ) with respect to x B , showing that those associated with the l z ± 1 components largely cancel. The individual contributions from the latter are shown in the top two panels of Fig. 2. We observe that the Table I: The contributions from different l z -components toQ ± p (x B ). The l z =0,+1 components contribute mostly toQ − p ("dominant") and less so toQ + p ("subdominant"), while the l z =-1 component contributes only toQ + p . We also note that this sign change and cancellation appears to be rather generic. To see why, let us naively take: assuming the function above is well-behaved with respect to {x ξ i }. This approximation simply means that we do not care about the details of the proton wavefunction amplitudes.
Under this approximation, the numerical integration (29) and (30) can be performed quite trivially, and the result is shown in the lower two panels of Fig 2. In this case, we show Q ± p (x B ) as the l z = ±1 components contribute primarily to one or the other of these two quantities (see Table I). Although the the assumption in Eq. (31) breaks down at large and small x B , one can see that a sign change ofQ + p (x B ) from negative to positive occurs near Therefore, according to Table I
Our main result is shown in Fig. 3, which gives R HT First, let us compare this outcome with that of Refs. and . It turns out that all three calculations predict similar curve shape for R HT 1 , only with slightly different positions of peak and zero-point. Concerning the magnitude, our work predicts a maximum absolute value |R HT 1 | ≈ 2.6 × 10 −3 between 0.2 < x B < 0.7, which is smallest in magnitude among all the three predictions, and is about a half of the size to that of Ref. . This is understandable because the authors include a 3-quark+1-gluon Fock-space component whose contribution is comparable in magnitude to that of the pure 3-quark state. Nonetheless, all three calculations suggest that |R HT 1 | lies below that of the expected SoLID precision. Next we study the OAM-dependence in detail. To that end, we first introduce some nomenclature: in the following, we will use the notation (|l z | ⊗ |l z |), which denotes a generic matrix element taking between two hadronic states, of which one of them has absolute value of quark OAM in z-direction being |l z | and the other being |l z |.
From our arguments at the end of Section IV, we expect that although l z = ±1 individually contribute a significant amount toQ p (x B ), they should largely cancel against each other This (0 ⊗ 0) dominance is a rather unique feature of the particular twist-four contribution of interest here, and one that is not shared by other diagonal matrix elements. For example, if one calculate proton quark PDFs (leading twist) using the same set of wavefunctions, the (0 ⊗ 0) and (1 ⊗ 1) contributions are comparable; moreover, since they have the same sign, the two |l z | = 1 pieces do not cancel each other (see Fig.4).
On the other hand, we also note that there are hadronic matrix elements that depend crucially on the existence of non-zero quark OAM in light cone quantization. In particular, in Ref. , the authors studied the Sivers function and Boer-Mulders function , which are examples of transverse momentum dependent parton distribution functions (TMDs), appearing in semi-inclusive deep inelastic scattering. Importantly, both distribution functions depend on off-diagonal matrix elements of l z : the Sivers function is sensitive to (0 ⊗ 1) while Boer-Mulders function is sensitive to both (0 ⊗ 1) and (1 ⊗ 2). Simply speaking, the existence of non-zeo quark OAM is responsible for the non-vanishing values of the Sivers and Boer-Mulders functions. Combining this observation with our analysis of the HT matrix element, we conclude that the twist-four correction to eD PVDIS is essentially transparent to the parton angular momentum dynamics that generate the Sivers and Boer-Mulders functions.
It is also interesting to study the impact of sea-parton dynamics on the behavior of the HT matrix element. To that end, we performed a qualitative analysis of the contribution made by the Fock space component containing 3 quarks + 1 gluon, using the general form suggested in Ref. that includes non-zero gluon OAM. The authors of Ref. computed the contribution of the 3q+1g state with l z = 0 , which turns out to have a similar shape to that of the l z = 0 3q-state contribution. To our knowledge, however, there exist no explicit functional forms for the 3q+1g nucleon wavefunction with non-zero parton OAM. Consequently, our analysis is purely analytic at this point. We observe that, in contrast to the 3q state contribution, the matrix element of 3q+1g state for a fixed l z can contribute significantly to bothQ ± p (x B ) simultaneously; therefore there is no obvious correlation between l z andQ ± p (x B ) and hence no obvious pattern of partial cancelation. In Table II we summarize the importance of different (|l z | ⊗ |l z |) contributions to various distribution functions, considering only the contributions of 3q states.
Combining observations, we may draw the following conclusion: if a future eD PVDIS measurement yields a sufficiently precise determination of R HT 1 as a function of x B , one can compare the experimental curve with our current theoretical prediction. A significant deviation from the predicted curve (e.g., the peak and zero-point are shifted by a considerable amount), could signal the importance of parton angular momentum dynamics beyond those responsible for the Sivers, Boer-Mulders, and spin-independent parton distribution functions. |
EVE: Valkyrie – Warzone review
Imagine yourself in a single-seat fighter-ship. You’re floating in the vast emptiness of space with three other pilots like yourself escorting a cargo ship. You think it’s just another quiet mission, easy credits in your pocket, but that’s not the case. Bogies appear on your radar – one blip, two blips, three, four. You and your fellow wingmen spring into action as you fire your cannons at the closest fighter and see it smash into the cargo ship’s shields. They’re swarming all around you and you’re taking shots from every which way. You’re breathing hard and sweat is dripping down your neck. There doesn’t seem to be an escape from this deadly rain of hellfire and blanketing you.
This is Eve: Valkyrie – Warzone
It’s been about a year since the original release of Eve: Valkyrie for the PSVR. As one of PS4’s launch titles for their VR, it was taken with both open arms and criticism at the same time. However, with the release of Eve: Valkyrie – Warzone, they took the original game along with all the current DLCs and made it readily available to all players who are just getting into the PSVR craziness. Not only do you experience the original hectic space battles coupled with tremendous visuals, all the added content makes for a complete game now. Not that I’m taking anything away from the initial launch.
The Base Game
Eve: Valkyrie -Warzone is an awesome experience if you’ve never played a space shooter before. Sitting there with your VR headset on makes it all the more engaging since you have a 360-degree visual of everything around the cockpit up to whatever or whoever is chasing you on your six. Another step in the immersion is that it takes place within the EVE Online PC universe where you will definitely get blasted into kingdom come if you’re not paying attention. Your main goal in Eve: Valkyrie – Warzone is to take in as much cash as you can so you can upgrade and customize both your ships and your pilot. It’s a repeatable process, but an enjoyable one.
I wouldn’t say that there are classes in this game, but the selection of ships and their abilities lets you decipher what kind of space pilot you want to be within combat. With the added customization that I stated earlier, you can add to what your ship has to offer in flight. Do you want to be a defensive support ship? A high DPS damage dealer? Or do you want to play a healer type that flies around recharging your fellow wingmen’s shields? This all depends on your personal taste as a player/pilot along with the bevy of customizations for your ship.
Solo Play
The story mode in this game is quite lacking. Playing solo feels limited in this aspect as there doesn’t seem to be anything else other than the training scenarios, the survival modes, and the weird “recall” style missions. In fact, the story mode felt extremely disjointed and didn’t feel like anything was at stake. There were other modes such as training scenarios, scout missions, and survival, but that gets old pretty quickly. Once you’re done with the story mode and the other options, there’s nothing left for you to keep playing as a solo player.
Multiplayer
This is where the core gameplay comes into action. If you flag the cross-platform option you will be able to play with and against PC players. This will allow you to test your skill as a console gamer versus PC Gamers and it can get very intense. There are five different modes in multiplayer which makes for an exciting experience every time since the battles will never be the same. At one point you can feel like an ace pilot in Team Deathmatch, but then completely be a noob during Carrier Assualt. Also, the skill level of the players will also vary since you’re playing online. In solo play, the computer can become predictable at times, but in multiplayer, it’s a whole other ball game since the ship customizations come into play.
Final Reaction
While the overall gameplay and visuals of Eve: Valkyrie – Warzone is spectacular, it does leave a little bit to be desired when you’re playing solo. The immersion factor is top-notch along with the suspense of trying not to get blown to kingdom come in the middle of the empty blackness of space. Dodging, weaving, shooting, and barrel-rolling through asteroids and starship cruisers made me tingle in my seat with delight. I feel this is currently the only space shooter that’s captured what it feels like to sit in a cockpit of an actual spaceship and fight for your life. If you’ve ever watched Battlestar Galactica, Firefly, or any other space opera movie/show and love that genre, then this is the game for you. It’s a blast to play!
Rating: 4/5 Atoms |
#pragma once
#include "../Core/Types.hpp"
#include "../Core/String.hpp"
namespace Ax { namespace System {
struct SUUID
{
uint32 data1;
uint16 data2;
uint16 data3;
uint8 data4[ 8 ];
String ToString() const;
};
void GenerateUuid( SUUID &uuid );
}}
|
n, m = input().split(' ')
n = int(n)
m = int(m)
bulbs_list=[]
for i in range(n):
temp = input().split(' ')
bulbs_list = bulbs_list + temp[1:]
bulbs_set = set(bulbs_list)
if len(bulbs_set)==m:
print("YES")
else:
print("NO") |
<reponame>nealholt/python_programming_curricula
import pygame, math, random
#Setup
pygame.init()
width = 900
height = 600
screen = pygame.display.set_mode((width,height))
clock = pygame.time.Clock()
font = pygame.font.SysFont('Arial', 24)
black = 0,0,0
white = 255,255,255
class Asteroid:
def __init__(self, surface, x, y, dx, dy):
self.surface = surface
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.sides = random.randint(5,12)
self.max_radius = 60
self.radii = []
for i in range(self.sides):
temp = random.randint(40,self.max_radius)
self.radii.append(temp)
self.line_thickness = 2
self.angle = 0
#Randomly rotate +-math.pi/256 per frame
self.rotation_rate = random.random()*math.pi/128
self.rotation_rate = self.rotation_rate-math.pi/256
def move(self, border_right, border_bottom):
#Rotate a little
self.angle = (self.angle + self.rotation_rate) % (math.pi*2)
#Move dx and dy
self.x += self.dx
self.y += self.dy
#print(self.x)
if self.x+self.max_radius < 0:
self.x += border_right
elif self.x > border_right:
self.x -= border_right
if self.y+self.max_radius < 0:
self.y += border_bottom
elif self.y > border_bottom:
self.y -= border_bottom
def draw(self):
point_list = []
for i in range(self.sides):
angle = self.angle+math.pi*2*(i/self.sides)
x = self.x + math.cos(angle)*self.radii[i]
y = self.y + math.sin(angle)*self.radii[i]
point_list.append([x, y])
pygame.draw.polygon(self.surface, white, point_list, self.line_thickness)
#Test asteroids
asteroid_list = []
#Add initial asteroids at random locations
min_dx = -2
max_dx = 2
for i in range(10):
x = random.randint(0, width)
y = random.randint(0, height)
dx = random.random()*(max_dx-min_dx)+min_dx
dy = random.random()*(max_dx-min_dx)+min_dx
asteroid_list.append(Asteroid(screen,x,y,dx,dy))
#Main loop
done = False
while not done:
#Handle events
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
#fill screen with black
screen.fill(black)
#Move and draw asteroids
for a in asteroid_list:
a.move(width, height)
a.draw()
#Update the screen
pygame.display.flip()
pygame.display.update()
#Delay to get 30 fps
clock.tick(30)
|
Storage of multivolume holograms using the phase-encoding technique
Reconfigural volume holograms are important for a wide range of multiple data storage applications, including optical interconnection systems, image processing and neural network models. Several techniques for multiplexing to obtain a large number of stored images which can be recalled independently have been developed. But even the most promising of these multiplexing techniques, angular multiplexing using the selectivity of the Bragg-condition, revealed to be limited primarily because of cross-correlation noise. Other problems are not less severe. In this paper we present an alternative approach implementing a phase coding method of the reference beam. Phase encoding has been discussed for interconnecting vector arrays in thin holograms and to perform array interconnections by correlation of a reference beam with a supplementary phase-coded input beam. In contrast to these investigations, we use a reference beam phase coding method in thick volume holographic media, taking thus full advantage of the selectivity of the Bragg-condition in volume storage media. In our experiments, we stored with pure and deterministic orthogonal phase references 64 images into a photorefractive BaTiO3-crystal. For this we developed a special phase modulator on the basis of a liquid crystal display. Good reconstruction with low crosstalk could be observed. Compared to other multiplexing techniques as angular multiplexing, our method allows high storage capacity without alignment problems. Moreover, easy, light efficient as well as immediate image retrieval without any time delay is possible. Experimental setup and results will be presented. We will especially discuss in detail advantages and disadvantages of this coding method compared to others. |
def update_stochastic_states(self):
for statename, generator in self.rngs.items():
if self._rng_params[statename][1]:
gen_method = getattr(generator, self._rng_params[statename][1])
setattr(self, statename, gen_method(*self._rng_params[statename][2])) |
ATLANTA — Pushing aside years of funding problems and construction dilemmas, this city on Tuesday opened a small loop of a transportation option that last operated here more than six decades ago: streetcars.
Although the electric streetcars, condemned by some as a $98 million gimmick, will not relieve Atlanta’s traffic woes as they glide across nearly three miles of track, the system is part of a broader strategy that supporters contend will help remake a city long regarded as something less than an archetype of urban design. It also gives Atlanta a place on the expanding list of cities that, backed by millions of dollars from the federal government, recently have constructed streetcar networks.
“These are not projects for right now,” said Keith T. Parker, the chief executive of the Metropolitan Atlanta Rapid Transit Authority. “These are projects for the future, and when you look around, the cities who we’re competing with around this nation and around the world, they’ve made investments in public transportation.”
Whether the streetcars will ever become moving landmarks of Atlanta, as they are in New Orleans and San Francisco, may not be known for decades. But they are already a faint throwback in a state where historians say every major city once had streetcars. |
use proconio::{input, fastout};
use std::cmp::*;
#[fastout]
fn main() {
input!{
x:usize, y:usize, a:usize, b:usize, c:usize,
mut p: [i64; a],
mut q: [i64; b],
mut r: [i64; c],
}
p.sort();
p.reverse();
q.sort();
q.reverse();
r.extend_from_slice(&p[0..x]);
r.extend_from_slice(&q[0..y]);
r.sort();
r.reverse();
let r_iter = r[0..x+y].iter();
let tot: i64 = r_iter.sum();
println!("{}", tot)
} |
Tropical Race Four, a soil-born fungus, has been destroying bananas across the world. It kills the plant and makes bananas smell like garbage. That deadly fungus is expected to hit Central America, which is where we get all our bananas from.
There are a thousand types of bananas in the world but only one represents 99% of the banana export market. That'd be the Cavendish banana. Cavendish bananas dominate the export market because they provide farmers "with a high yield of palatable fruit that can endure overseas trip without ripening too quickly or bruising too easily".
Advertisement
One problem, though. By relying solely on the Cavendish banana (and clones of the Cavendish), one disease can wipe out a whole ton o' bananas in one sweeping motion. Tropical Race Four is that disease, and it's already wiped out Cavendish bananas in Asia and Australia with newspapers around the world calling it the "HIV of banana plantations".
The funny thing is the Cavendish banana actually replaced another banana (Gros Michel) in the 1950's because that one got stricken with the Panama disease. History is repeating itself but this time scientists are working feverishly in an attempt to save our banana population. Let's hope they succeed. Read the full story at the New Yorker[New Yorker]
Image Credit: Ian Ransley |
/**
* this handles all cool effects that go on per arrow
*
* @param g2d copy of the graphics from {@link GameGame#paint(java.awt.Graphics)}
* @param imageLayer seperate image for effects
*/
public void paint(Graphics2D g2d, BufferedImage imageLayer) {
AffineTransform a = new AffineTransform();
a.translate(x + xsize / 2, y + ysize / 2);
a.rotate(Math.toRadians(angle + 180));
a.translate(-(x + xsize / 2), -(y + ysize / 2));
a.translate(x + xsize / 2.0, y + ysize / 4.0);
a.scale(scale, scale);
a.translate(-(x + xsize / 2), -(y + ysize / 4));
g2d.setTransform(a);
Graphics2D imageGraphics = (Graphics2D) imageLayer.getGraphics();
imageLayer.((Graphics2D)getGraphics()).setComposite(ac);
int scaledScale = (int) (scale * 255) - 255;
objectImage = makeImagePartiallyTransparent((255 - scaledScale), objectImage);
g2d.drawImage(objectImage, x, y, main);
g2d.fillRect(x, y, 30, 30);
g2d.setComposite(og);
imageGraphics.drawRect(0, 0, 500, 500);
g2d.drawImage
g2d.setTransform(new AffineTransform());
} |
/**
* Tests the visit alias declaration method.
*/
@Test
public final void testVisitAliasDeclaration() {
AddDeclarationProgramVisitor visitor = new AddDeclarationProgramVisitor();
visitor.setProgram(program);
visitor.visit(aliasDeclaration);
verify(program, times(1)).addAliasDeclaration(aliasDeclaration);
} |
// GetNetwork retrieves a single network by id
func (s *Service) GetNetwork(networkID string) (Network, error) {
body, err := s.getNetworkResponseBody(networkID)
return body.Data, err
} |
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_INSTALLER_GCAPI_GOOGLE_UPDATE_UTIL_H_
#define CHROME_INSTALLER_GCAPI_GOOGLE_UPDATE_UTIL_H_
#include <string>
namespace gcapi_internals {
extern const wchar_t kChromeRegClientsKey[];
extern const wchar_t kChromeRegClientStateKey[];
extern const wchar_t kChromeRegClientStateMediumKey[];
// Reads Chrome's brand from HKCU or HKLM. Returns true if |value| is populated
// with the brand.
bool GetBrand(std::wstring* value);
// Reads Chrome's experiment labels into |experiment_labels|.
bool ReadExperimentLabels(bool system_install, std::wstring* experiment_labels);
// Sets Chrome's experiment labels to |experiment_labels|.
bool SetExperimentLabels(bool system_install,
const std::wstring& experiment_labels);
} // namespace gcapi_internals
#endif // CHROME_INSTALLER_GCAPI_GOOGLE_UPDATE_UTIL_H_
|
Recent developments in renal cell cancer immunotherapy
Various immunotherapeutic approaches for the treatment of renal cell carcinoma (RCC) have been developed for > 90 years. Existing immunotherapeutic strategies against RCC include: systemic administration of cytokines; therapeutic vaccines based on tumor cells or dendritic cells; monoclonal antibodies; and adoptive immunotherapy (T cell transfer or allogeneic hematopoietic cell transplantation). However, the overall efficacy of immunotherapy for advanced RCC remains moderate. With the advent of molecularly targeted biological therapies that turned out to be significantly effective in the treatment of metastatic RCC, to many oncologists immunotherapy may seem to be moving into the periphery of RCC treatment strategies. However, for the last 2 years there has been significant progress made in immunotherapeutic approaches for the treatment of RCC. Immunotherapy still remains the only systemic therapeutic strategy that is believed to potentially cure RCC patients. The development of active and passive specific immunotherapeutic approaches, along with the possibility to ‘switch off’ particular immunosuppressive mechanisms (e.g., elimination of regulatory T cells, blockage of cytotoxic T lymphocyte antigen-4 signaling), have paved the way for future trials of new immunotherapies of RCC. However, the new studies will have to enroll optimally selected patients (nephrectomized, with non-massive metastases and good performance status) and will use tumor response criteria that are specifically optimized for clinical trials of immunotherapy. |
/*
* by: Tyler Brazill
* problem: p082A
**/
import java.util.Scanner;
public class p082A {
public void start() throws Exception {
Scanner in = new Scanner(System.in);
String[] ppl = {"Sheldon", "Leonard", "Penny", "Rajesh", "Howard"};
int target = in.nextInt()-1;
int sodas = 0;
int length = 5;
int doubles = 1;
while(true){
sodas += length;
if(sodas > target) break;
doubles *= 2;
length *= 2;
}
sodas -= length;
int place = target-sodas;
System.out.println(ppl[place/doubles]);
}
public static void main(String[] args) {
try {
new p082A().start();
} catch (Exception e) {
e.printStackTrace();
}
}
} |
The members of the Security Council condemned the attack “in the strongest terms,” the 15-member body said in a statement issued to the press this afternoon.
According to media reports, at least six people were killed and several more were injured yesterday in the bombing when a remote controlled device was detonated on a crowded street in Mogadishu. News agencies suggest that a former Government official, who was among those killed in the attack, was being targeted.
If those reports bear out, this will be latest in a wave of recent attacks against the Government of the long-troubled Horn of Africa nation. In mid-April, two Somali parliamentarians were killed in separate incidents: a shooting and a car bombing.
February was also marked by violence, with a suicide bombing near the country's intelligence headquarters and an attack, for which Al-Shabaab also claimed responsibility, on the presidential compound in Mogadishu.
In their statement on the latest violence, the Council members emphasized that neither this nor any other terrorist attack would undermine their support to the people of Somalia. “In that context, the members of the Security Council underlines their enduring support for the peace and reconciliation process in Somalia,” it said.
The Council also reaffirmed that “terrorism in all its forms and manifestations constitutes one of the most serious threats to international peace and security, and that any acts of terrorism are criminal and unjustifiable regardless of their motivation, wherever and whenever and by whomsoever committed.”
Expressing their condolences to the families of the victims, the Council members also wished a swift recovery to those injured. |
Energy Star standards, which rate the energy efficiency of buildings and products, need an update, according to Consumer Reports.
On the heels of criticism of Energy Star last week from a New York congresswoman, the magazine said the ratings program needed to raise the bar on qualifying products and develop better testing standards.
More than 35% of all products sold in certain categories get the Energy Star rating, according to Consumer Reports. Having a glut of qualifying items results in consumers having a harder time choosing truly eco-friendly goods, the magazine said.
The Energy Star system is run by the Environmental Protection Agency and the Department of Energy. Consumer Reports also found that some appliances tested under Department of Energy standards ended up performing differently in home situations.
More than 40% of Americans have purchased an Energy Star product, Consumer Reports said, and 23% have upgraded to a more energy-efficient heating or cooling system. The vast majority -- 77% -- made the switch to cut down on their energy costs.
Potential cost savings abound, Consumer Reports found: $200 a year just by programming the thermostat, $400 by fixing leaky ducts, $75 by avoiding pre-rinsing dishes before they go in the washer.
But conserving energy and using it wisely, even without Energy Star, can be complicated. In the last 12 months, 91% of all homeowners made an energy purchase or improvement that qualified for a government rebate or tax credit. But just a quarter of those said they took advantage of an incentive program, Consumer Reports said.
Most were confused by the tangle of rules behind many programs, and others thought the incentives were too small to justify the hassle. Consumer Reports surveyed more than 1,500 homeowners on the subject in June.
-- Tiffany Hsu |
/**
* compute and return the size of the network - the number of adjustable parameters
* @return the size of the network - the number of adjustable parameters
*/
int ANN::size()const{
if(NN_PRINT_FUNC>0) std::cout<<"ANN::size():\n";
int s=0;
for(int n=0; n<nlayer_; ++n) s+=bias_[n].size();
for(int n=0; n<nlayer_; ++n) s+=edge_[n].size();
return s;
} |
import { LanguageDecorator } from '../language-decorators/language_decorator'
import { Argument } from './argument'
import { Receiver } from './receiver'
import { Utils } from '../utils';
export abstract class Expression implements Receiver {
comment?: string
chainedMethodCall: MethodCall|null = null;
abstract forceSameLine: boolean
abstract isNullable: boolean
abstract isStatic: boolean
abstract toSnippet(language: LanguageDecorator): string
addComment(comment: string): Expression {
this.comment = comment;
return this;
}
getExpressions(): Expression[] {
return [this];
}
addMethodCall(methodName: string, args: Argument[]|any[] = [], isNullable = false, forceSameLine = false): Expression {
if (this.chainedMethodCall != null) {
this.chainedMethodCall.addMethodCall(methodName, args, isNullable, forceSameLine)
} else {
this.chainedMethodCall = new MethodCall(this, methodName, args, isNullable, forceSameLine)
}
return this;
}
addMethodCallSameLine(methodName: string, args: Argument[]|any[] = [], isNullable = false): Expression {
return this.addMethodCall(methodName, args, isNullable, true);
}
abstract toReceiverString(langaugeDecorator: LanguageDecorator): string
}
export abstract class CallExpression extends Expression {
receiver: Receiver|null = null;
args: Argument[] = [];
isNullable: boolean = false;
abstract forceSameLine: boolean;
argsFormatted: boolean = false;
isStatic = false;
constructor(receiver: Receiver|string|null, args: Argument[]|any[] = [], isNullable = false) {
super();
if (typeof receiver == 'string') {
this.receiver = new Class(receiver);
} else if (receiver) {
this.receiver = receiver as Receiver;
}
args.forEach(i => {
if (typeof i == 'object' && i != null && 'toArgumentSnippet' in i) {
this.args.push(i)
} else {
this.args.push(new ValueExpression(i));
}
});
let nullableReceiver = this.receiver ? this.receiver.isNullable : false;
this.isNullable = isNullable || nullableReceiver;
}
setArgsFormatted(argsFormatted: boolean): CallExpression {
this.argsFormatted = argsFormatted;
return this;
}
protected toChainedSnippet(snippet: string, languageDecorator: LanguageDecorator): string {
if (!this.chainedMethodCall || this.chainedMethodCall == null) {
return snippet
}
if (this.chainedMethodCall.forceSameLine) {
snippet += languageDecorator.methodCallSnippet('', this.chainedMethodCall, this.isNullable, this.receiver?.isStatic)
return this.chainedMethodCall.toChainedSnippet(snippet, languageDecorator);
}
if (languageDecorator.canChainMethodCalls) {
snippet += '\n' + languageDecorator.tab + languageDecorator.methodCallSnippet('', this.chainedMethodCall, this.isNullable, this.receiver?.isStatic);
} else {
let receiverString = '';
if (this.receiver != null) {
receiverString = this.receiver.toReceiverString(languageDecorator);
}
snippet += '\n' + languageDecorator.methodCallSnippet(receiverString, this.chainedMethodCall, this.isNullable, this.receiver?.isStatic)
}
return this.chainedMethodCall.toChainedSnippet(snippet, languageDecorator);
}
abstract toSnippet(languageDecorator: LanguageDecorator): string
}
export class MethodCall extends CallExpression implements Receiver, Argument {
methodName: String = '';
forceSameLine: boolean;
binaryOperation: boolean = false;
constructor(receiver: Receiver|string|null, methodName: string, args: any[]|Argument[] = [], nullable: boolean = false, forceSameLine: boolean = false) {
super(receiver, args, nullable);
this.methodName = methodName;
this.forceSameLine = forceSameLine;
}
toArgumentSnippet(langaugeDecorator: LanguageDecorator): string {
return this.toSnippet(langaugeDecorator)
}
toSnippet(languageDecorator: LanguageDecorator) {
let receiverString = null;
let nullableReceiver = false
if (this.receiver != null) {
receiverString = this.receiver.toReceiverString(languageDecorator);
nullableReceiver = this.receiver.isNullable
}
return this.toChainedSnippet(languageDecorator.methodCallSnippet(receiverString, this, nullableReceiver, this.receiver?.isStatic), languageDecorator);
}
toReceiverString(languageDecorator: LanguageDecorator): string {
return this.toSnippet(languageDecorator);
}
//this might just be a kotlin thing
setBinaryOperationIfPossible(binaryOperation: boolean): MethodCall {
this.binaryOperation = binaryOperation;
return this;
}
}
export class Constructor extends CallExpression implements Receiver, Argument {
type: Class;
forceSameLine: boolean
constructor(type: string, args: Argument[]|any[] = [], forceSameLine = false) {
super(new Class(type), args, false)
this.type = new Class(type);
this.forceSameLine = forceSameLine;
}
toArgumentSnippet(language: LanguageDecorator): string {
return this.toSnippet(language)
}
setGenerics(...generics: string[]): Constructor {
this.type.setGenerics(...generics)
return this;
}
toSnippet(languageDecorator: LanguageDecorator) {
return this.toChainedSnippet(languageDecorator.constructorSnippet(this.type, this.args, this.argsFormatted), languageDecorator);
}
toReceiverString(languageDecorator: LanguageDecorator) {
return this.toSnippet(languageDecorator);
}
}
export class ValueExpression extends Expression implements Argument {
private value: any;
private isString: boolean
isStatic = false;
forceSameLine = false;
isNullable = this.value == null
constructor(value: any, isString = true) {
super();
this.value = value;
this.isString = isString
}
toSnippet(languageDecorator: LanguageDecorator): string {
return this.toArgumentSnippet(languageDecorator);
}
toArgumentSnippet(language: LanguageDecorator): string {
if (this.isString && typeof this.value == 'string') {
return language.stringDenoter + this.value + language.stringDenoter
} else if (this.value == null){
return language.nullValue;
} else {
return this.value?.toString()
};
}
toReceiverString(langaugeDecorator: LanguageDecorator): string {
return this.toArgumentSnippet(langaugeDecorator)
}
}
export class Class extends Expression implements Receiver {
className: string;
generics: string[] = [];
forceSameLine = true;
isNullable = false
isStatic = true
constructor(className: string) {
super();
this.className = className;
}
setGenerics(...generics: string[]): Class {
this.generics = generics
return this;
}
toSnippet(languageDecorator: LanguageDecorator): string {
if (languageDecorator.usesGenerics) {
return languageDecorator.getGenericClassDeclaration(this.className, ...this.generics);
} else {
return this.className;
}
}
toReceiverString(languageDecorator: LanguageDecorator) {
return this.toSnippet(languageDecorator);
}
}
export class Variable extends Expression implements Receiver, Argument {
type: Class;
generics: string[] = []
variableName: string;
initialization?: Expression;
methodCalls: MethodCall[] = []
forceSameLine: boolean
forceExplicitType: boolean = false
isNullable: boolean = false
isStatic = false;
private explicitlySetNullability = false;
private utils = new Utils();
constructor(type: string|Class, variableName?: string, forceSameLine = true, isNullable?: boolean) {
super();
if (typeof type == 'string') {
this.type = new Class(type);
} else {
this.type = type as Class
}
this.variableName = variableName != null ? variableName : this.utils.camelCase(this.type.className);
this.forceSameLine = forceSameLine
if (isNullable) {
// this.explicitlySetNullability = true;
this.isNullable = isNullable
}
}
//adds a no-arg constructor invokation for Class this.type.className
addDefaultInitializer(constructorFunction?: (constructor: Constructor) => void): Variable {
let constructor = new Constructor(this.type.className)
constructorFunction?.(constructor)
this.initialization = constructor
return this;
}
setGenerics(...generics: string[]): Variable {
this.type.setGenerics(...generics);
return this;
}
setForceExplicitType(forceExplicitType: boolean): Variable {
this.forceExplicitType = forceExplicitType;
return this;
}
public initializer(initializer?: CallExpression|any): Variable {
if (!(initializer instanceof CallExpression)) {
this.initialization = new ValueExpression(this.initialization)
} else {
this.initialization = initializer;
}
if (!this.explicitlySetNullability) {
this.isNullable = initializer.isNullable
}
return this
}
public addMethodCall(methodName: string, args: Argument[]|any[] = [], nullable = false): Variable {
this.methodCalls.push(new MethodCall(this, methodName, args, nullable));
return this
}
public createMethodCall(methodName: string, args: Argument[]|any[] = [], nullable = false): MethodCall {
return new MethodCall(this, methodName, args, nullable);
}
public toSnippet(languageDecorator: LanguageDecorator): string {
let initializationSnippet = languageDecorator.variableDeclarationSnippet(this.type, this.variableName, this.initialization == null)
if (this.initialization != null) {
initializationSnippet += ' ' + languageDecorator.assignmentOperator + ' ' + this.initialization.toSnippet(languageDecorator);
}
return initializationSnippet
}
toReceiverString(languageDecorator: LanguageDecorator): string {
return this.variableName;
}
toArgumentSnippet(languageDecorator: LanguageDecorator): string {
return this.variableName;
}
getExpressions(): Expression[] {
return [this as Expression].concat(this.methodCalls as Expression[])
}
} |
//~ Innere Klassen ----------------------------------------------------
private static class DayOperator<T extends ChronoEntity<T>>
implements ChronoOperator<T> {
//~ Instanzvariablen ----------------------------------------------
private final boolean backwards;
//~ Konstruktoren -------------------------------------------------
DayOperator(boolean backwards) {
super();
this.backwards = backwards;
}
//~ Methoden ------------------------------------------------------
public T apply(T entity) {
long e = entity.get(EpochDays.UTC);
if (this.backwards) {
e--;
} else {
e++;
}
return entity.with(EpochDays.UTC, e);
}
} |
<gh_stars>0
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DerivingStrategies #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE UndecidableInstances #-}
module Cardano.Crypto.ProtocolMagic
( ProtocolMagicId(..)
, ProtocolMagic
, AProtocolMagic(..)
, RequiresNetworkMagic(..)
, getProtocolMagic
, getProtocolMagicId
)
where
import Cardano.Prelude
import qualified Data.Aeson as A
import Data.Aeson ((.:), (.=))
import Text.JSON.Canonical (FromJSON(..), JSValue(..), ToJSON(..), expected)
import Cardano.Binary (Annotated(..), FromCBOR, ToCBOR)
-- | Magic number which should differ for different clusters. It's
-- defined here, because it's used for signing. It also used for other
-- things (e. g. it's part of a serialized block).
--
-- mhueschen: As part of CO-353 I am adding `getRequiresNetworkMagic` in
-- order to pipe configuration to functions which must generate & verify
-- Addresses (which now must be aware of `NetworkMagic`).
data AProtocolMagic a = AProtocolMagic
{ getAProtocolMagicId :: !(Annotated ProtocolMagicId a)
, getRequiresNetworkMagic :: !RequiresNetworkMagic
} deriving (Eq, Show, Generic, NFData)
type ProtocolMagic = AProtocolMagic ()
newtype ProtocolMagicId = ProtocolMagicId
{ unProtocolMagicId :: Word32
} deriving (Show, Eq, Generic)
deriving newtype (FromCBOR, ToCBOR)
deriving anyclass NFData
instance A.ToJSON ProtocolMagicId where
toJSON = A.toJSON . unProtocolMagicId
instance A.FromJSON ProtocolMagicId where
parseJSON v = ProtocolMagicId <$> A.parseJSON v
getProtocolMagicId :: AProtocolMagic a -> ProtocolMagicId
getProtocolMagicId = unAnnotated . getAProtocolMagicId
-- mhueschen: For backwards-compatibility reasons, I redefine this function
-- in terms of the two record accessors.
getProtocolMagic :: AProtocolMagic a -> Word32
getProtocolMagic = unProtocolMagicId . getProtocolMagicId
instance A.ToJSON ProtocolMagic where
toJSON (AProtocolMagic (Annotated (ProtocolMagicId ident) ()) rnm) =
A.object ["pm" .= ident, "requiresNetworkMagic" .= rnm]
instance A.FromJSON ProtocolMagic where
parseJSON = A.withObject "ProtocolMagic" $ \o ->
AProtocolMagic
<$> o .: "pm"
<*> o .: "requiresNetworkMagic"
-- Canonical JSON instances
instance Monad m => ToJSON m ProtocolMagicId where
toJSON (ProtocolMagicId ident) = toJSON ident
instance MonadError SchemaError m => FromJSON m ProtocolMagicId where
fromJSON v = ProtocolMagicId <$> fromJSON v
--------------------------------------------------------------------------------
-- RequiresNetworkMagic
--------------------------------------------------------------------------------
-- | Bool-isomorphic flag indicating whether we're on testnet
-- or mainnet/staging.
data RequiresNetworkMagic
= RequiresNoMagic
| RequiresMagic
deriving (Show, Eq, Generic, NFData)
-- Aeson JSON instances
-- N.B @RequiresNetworkMagic@'s ToJSON & FromJSON instances do not round-trip.
-- They should only be used from a parent instance which handles the
-- `requiresNetworkMagic` key.
instance A.ToJSON RequiresNetworkMagic where
toJSON RequiresNoMagic = A.String "RequiresNoMagic"
toJSON RequiresMagic = A.String "RequiresMagic"
instance A.FromJSON RequiresNetworkMagic where
parseJSON = A.withText "requiresNetworkMagic" $ toAesonError . \case
"RequiresNoMagic" -> Right RequiresNoMagic
"RequiresMagic" -> Right RequiresMagic
"NMMustBeNothing" -> Right RequiresNoMagic
"NMMustBeJust" -> Right RequiresMagic
other -> Left ("invalid value " <> other <>
", acceptable values are RequiresNoMagic | RequiresMagic")
-- Canonical JSON instances
instance Monad m => ToJSON m RequiresNetworkMagic where
toJSON RequiresNoMagic = pure (JSString "RequiresNoMagic")
toJSON RequiresMagic = pure (JSString "RequiresMagic")
instance MonadError SchemaError m => FromJSON m RequiresNetworkMagic where
fromJSON = \case
JSString "RequiresNoMagic" -> pure RequiresNoMagic
JSString "RequiresMagic" -> pure RequiresMagic
other ->
expected "RequiresNoMagic | RequiresMagic" (Just $ show other)
|
<gh_stars>1-10
// Copyright 2017 <NAME> aka. Luxko
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Contains foundamental geometry definitions.
//!
//! - `foundamental` ports some foundamental definitions from `cgmath`.
//! - `float` defines functions dealing with basic type `Float`.
//! - `ray` defines the ray interface.
//! - `bbox` defines the bounding box interface.
//! - `transform` defines the transform interface.
//! - `interaction` defines the interaction interface.
pub mod float;
pub mod ray;
pub mod bbox;
pub mod transform;
pub mod foundamental;
pub mod interaction;
pub mod prelude;
pub use self::foundamental::*;
pub use self::ray::{Ray, RawRay, RayDifferential};
pub use self::transform::TransformExt;
pub use self::bbox::{BBox2, BBox3, BBox2f, BBox3f};
pub use self::interaction::{DuvInfo, InteractInfo, SurfaceInteraction};
#[cfg(test)]
mod tests; |
/* Copyright 2015 The Native Client Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
#ifndef PN_FUNCTION_H_
#define PN_FUNCTION_H_
static PNConstant* pn_function_append_constant(PNModule* module,
PNFunction* function,
PNConstantId* out_constant_id) {
*out_constant_id = function->num_constants++;
return pn_allocator_realloc_add(&module->allocator,
(void**)&function->constants,
sizeof(PNConstant), PN_DEFAULT_ALIGN);
}
static PNValue* pn_function_get_value(PNModule* module,
PNFunction* function,
PNValueId value_id) {
if (value_id < module->num_values) {
return &module->values[value_id];
}
value_id -= module->num_values;
if (value_id < function->num_values) {
return &function->values[value_id];
}
PN_FATAL("accessing invalid value %d (max %d)\n",
value_id + module->num_values,
module->num_values + function->num_values);
}
static PNValue* pn_function_append_value(PNModule* module,
PNFunction* function,
PNValueId* out_value_id) {
uint32_t index = function->num_values++;
*out_value_id = module->num_values + index;
PNValue* ret = pn_allocator_realloc_add(&module->value_allocator,
(void**)&function->values,
sizeof(PNValue), PN_DEFAULT_ALIGN);
return ret;
}
static uint32_t pn_function_num_values(PNModule* module, PNFunction* function) {
return module->num_values + function->num_values;
}
#endif /* PN_FUNCTION_H_ */
|
Human echolocation operates as a viable “sense,” working in tandem with other senses to deliver information to people with visual impairment, according to new research published in Psychological Science, a journal of the Association for Psychological Science.
Ironically, the proof for the vision-like qualities of echolocation came from blind echolocators wrongly judging how heavy objects of different sizes felt.
The experiment, conducted by psychological scientist Gavin Buckingham of Heriot-Watt University in Scotland and his colleagues at the Brain and Mind Institute at Western University in Canada, demonstrated that echolocators experience a “size-weight illusion” when they use their echolocation to get a sense of how big objects are, in just the same way as sighted people do when using their normal vision.
“Some blind people use echolocation to assess their environment and find their way around,” said Buckingham. “They will either snap their fingers or click their tongue to bounce sound waves off objects, a skill often associated with bats, which use echolocation when flying. However, we don’t yet understand how much echolocation in humans has in common with how a sighted individual would use their vision.”
The researchers had three groups taking part in the experiment: blind echolocators, blind non-echolocators, and control subjects with no visual impairment. All three groups were asked to judge the weight of three cubes which were identical in weight but differed in size.
“The blind group who did not echolocate experienced no illusion, correctly judging the boxes as weighing the same amount as one another because they had no indication of how big each box was,” said Buckingham. “The sighted group, where each member was able to see how big each box was, overwhelmingly succumbed to the ‘size-weight illusion’ and experienced the smaller box as feeling a lot heavier than the largest one.”
“We were interested to discover that echolocators, who only experienced the size of the box through echolocation, also experienced this illusion,” Buckingham added. “This showed that echolocation was able to influence their sense of how heavy something felt. This resembles how visual assessment influenced how heavy the boxes felt in the sighted group.”
The findings are consistent with earlier work showing that blind echolocators use “visual” regions of their brain when listening to their own echoes. This new work shows that echolocation is not just a functional tool to help visually-impaired individuals navigate their environment, but actually has the potential to be a complete sensory replacement for vision. |
// sign creates the signature for specific checkpoint
// with local key. Only contract admins have the permission to
// sign checkpoint.
func sign(ctx *cli.Context) error {
var (
offline bool
chash common.Hash
cindex uint64
address common.Address
node *rpc.Client
oracle *checkpointoracle.CheckpointOracle
)
if !ctx.GlobalIsSet(nodeURLFlag.Name) {
offline = true
if !ctx.IsSet(hashFlag.Name) {
utils.Fatalf("Please specify the checkpoint hash (--hash) to sign in offline mode")
}
chash = common.HexToHash(ctx.String(hashFlag.Name))
if !ctx.IsSet(indexFlag.Name) {
utils.Fatalf("Please specify checkpoint index (--index) to sign in offline mode")
}
cindex = ctx.Uint64(indexFlag.Name)
if !ctx.IsSet(oracleFlag.Name) {
utils.Fatalf("Please specify oracle address (--oracle) to sign in offline mode")
}
address = common.HexToAddress(ctx.String(oracleFlag.Name))
} else {
node = newRPCClient(ctx.GlobalString(nodeURLFlag.Name))
checkpoint := getCheckpoint(ctx, node)
chash, cindex, address = checkpoint.Hash(), checkpoint.SectionIndex, getContractAddr(node)
reqCtx, cancelFn := context.WithTimeout(context.Background(), 10*time.Second)
defer cancelFn()
head, err := ethclient.NewClient(node).HeaderByNumber(reqCtx, nil)
if err != nil {
return err
}
num := head.Number.Uint64()
if num < ((cindex+1)*params.CheckpointFrequency + params.CheckpointProcessConfirmations) {
utils.Fatalf("Invalid future checkpoint")
}
_, oracle = newContract(node)
latest, _, h, err := oracle.Contract().GetLatestCheckpoint(nil)
if err != nil {
return err
}
if cindex < latest {
utils.Fatalf("Checkpoint is too old")
}
if cindex == latest && (latest != 0 || h.Uint64() != 0) {
utils.Fatalf("Stale checkpoint, latest registered %d, given %d", latest, cindex)
}
}
var (
signature string
signer string
)
isAdmin := func(addr common.Address) error {
signers, err := oracle.Contract().GetAllAdmin(nil)
if err != nil {
return err
}
for _, s := range signers {
if s == addr {
return nil
}
}
return fmt.Errorf("signer %v is not the admin", addr.Hex())
}
fmt.Printf("Oracle => %s\n", address.Hex())
fmt.Printf("Index %4d => %s\n", cindex, chash.Hex())
signer = ctx.String(signerFlag.Name)
if !offline {
if err := isAdmin(common.HexToAddress(signer)); err != nil {
return err
}
}
clef := newRPCClient(ctx.String(clefURLFlag.Name))
p := make(map[string]string)
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, cindex)
p["address"] = address.Hex()
p["message"] = hexutil.Encode(append(buf, chash.Bytes()...))
fmt.Println("Sending signing request to Clef...")
if err := clef.Call(&signature, "account_signData", accounts.MimetypeDataWithValidator, signer, p); err != nil {
utils.Fatalf("Failed to sign checkpoint, err %v", err)
}
fmt.Printf("Signer => %s\n", signer)
fmt.Printf("Signature => %s\n", signature)
return nil
} |
/**
* Callback method that is executed after the user has selected his photos
*
* @param requestCode the code from the startActivityForResult(...) call
* @param resultCode a code indicating whether the user has completed or cancelled the selection
* @param data the activity specific data (see below)
*/
@Override
protected void handleOnActivityResult(int requestCode, int resultCode, Intent data) {
Log.v(LOG_TAG, "entering handleOnActivityResult()");
super.handleOnActivityResult(requestCode, resultCode, data);
PluginCall savedCall = getSavedCall();
if(savedCall == null) {
Log.d(LOG_TAG, "saved call data not found");
return;
}
JSObject result = new JSObject();
JSArray urls = new JSArray();
result.put("urls", urls);
if(requestCode == REQUEST_IMAGE_PICK) {
if(resultCode == RESULT_OK) {
if(data.getClipData() != null) {
Log.d(LOG_TAG, "user selected multiple photos");
int count = data.getClipData().getItemCount();
for(int i = 0; i < count; i++) {
Uri imageUri = data.getClipData().getItemAt(i).getUri();
imageUri = getTempFile(imageUri);
urls.put(imageUri != null ? FileUtils.getFileUrlForUri(getContext(), imageUri) : null); }
} else if(data.getData() != null) {
Log.d(LOG_TAG, "user selected single photo");
Uri imageUri = data.getData();
imageUri = getTempFile(imageUri);
urls.put(imageUri != null ? FileUtils.getFileUrlForUri(getContext(), imageUri) : null);
}
result.put("selected", true);
savedCall.resolve(result);
} else if(resultCode == RESULT_CANCELED) {
Log.d(LOG_TAG, "user canceled selection");
result.put("selected", false);
savedCall.resolve(result);
}
}
} |
/**
* Notify the framework that the device is connected to the IMS network.
*
* @param attributes The attributes associated with the IMS registration.
*/
public final void onRegistered(@NonNull ImsRegistrationAttributes attributes) {
updateToState(attributes, RegistrationManager.REGISTRATION_STATE_REGISTERED);
mCallbacks.broadcastAction((c) -> {
try {
c.onRegistered(attributes);
} catch (RemoteException e) {
Log.w(LOG_TAG, e + "onRegistered(int, Set) - Skipping callback.");
}
});
} |
export {createWorkerMessenger} from './worker';
export {createIframeWorkerMessenger} from './iframe';
|
package tableTest
import (
"bytes"
"io"
"strconv"
"strings"
"testing"
)
// Test defines an expected output given a specified input.
type Test struct {
Input string
Output string
}
// RwFunc consumes input from a Reader and writes output to a Writer.
type RwFunc func(io.Reader, io.Writer) error
// Run executes a set of Tests on an RwFunc.
func Run(f RwFunc, tts []Test, t *testing.T) {
for i, tt := range tts {
t.Run(strconv.Itoa(i+1), func(t *testing.T) {
tt.run(f, t)
})
}
}
func (tt Test) run(f RwFunc, t *testing.T) {
var b bytes.Buffer
err := f(strings.NewReader(tt.Input), &b)
if err != nil {
t.Error(err)
t.FailNow()
}
s := b.String()
if s != tt.Output {
t.Errorf("expected '%v' actual '%v'", tt.Output, s)
}
}
|
Symmetries of the asymptotically de Sitter spacetimes
We start a systematic investigation of possible isometries of the asymptotically de Sitter solutions to Einstein equations. We reformulate the Killing equation as conformal equations for the initial data at $\mathcal{I}^+$. This allows for partial classification of possible symmetry algebras. In particular, if they are not maximal, they may be at most $4$-dimensional. We provide several examples. As a simple collorary it is shown that the only spacetime in which the Killing horizon intersects $\mathcal{I}^+$ (after a conformal completion) is locally the de Sitter universe.
I. INTRODUCTION
It is quite well-established nowadays that we live in the universe with a positive cosmological constant . Despite that, we are still lacking the universal framework for the description of the gravitational radiation and different astrophysical phenomena when Λ > 0. Not surprisingly, the last few years witnessed a growing interest in the topic -many different approaches, definitions and solutions were presented (see for example . The fundamental reason for the difficulty of the task lies within the nature of the null infinity I + . When Λ > 0, the null infinity is spacelike and thus there is no notion of the asymptotic time translation and subsequently of the positive-definite hamiltonian. Moreover, I + carries no natural structure besides that of a smooth manifold which leads to the conclusion that all diffeomorphisms of I + are asymptotic symmetries which renders the notion rather useless. We do not plan to offer solution to those hard and important issues. The scope of this note is far more modest. We aim to begin a systematic investigations of the isometries of the asymptotically de Sitter spacetimes. Of course, metrics with many symmetries are rather rare and cannot properly describe physical processes like e.g. merger of two black holes. Nevertheless, they may be quite useful, for example as a starting point for the perturbation theory. In this work we will be concerned with the vacuum asymptotically de Sitter spacetimes. We will show that they may admit (locally) 10-dimensional algebras of symmetries (in which case it is locally de Sitter) or the algebra is 4, 3, 2, 1-dimensional in which case it can be chosen to act by isometries on the boundary data. Moreover, we will classify all maximally symmetric and almost maximally symmetric solutions with I + = S 3 -they corresponds to the de Sitter and Taub-NUT-de Sitter spacetimes, respectively. Although we do not have a full classification of all possible 3d algebras, such classification seem to be feasible (although rather long -in particular, all Bianchi algebras would appear as special cases). Most approaches to the asymptotically de Sitter spacetimes rely heavily nomen omen on the asymptotic behavior of the fields and on the existence of the null infinity I + . We are going to use this structure to our benefit as well. However, let us mention a different paradigm. In it was proposed that one could use (certain generalization of) the cosmological horizon as a local null infinity. Further progress in this direction was presented in where the BMS-like symmetry group of such horizons were identified and used to calculate physical quantities (multipole moments, charges and their fluxed) within the perturbation theory. In the context of our current investigations it is rather natural to ask about the Killing horizons associated with the symmetries described in the previous paragraph. One striking property of the de Sitter spacetime is the fact that each Killing horizon can be identified with a lightcone emanating from a point on I + in a conformally completed spacetime. The question that arises is how common is the presence of a Killing horizon intersecting I + in the asymptotically de Sitter spacetimes. This is the next issue we address in this paper and find its precise solution -this property is unique to the de Sitter spacetime. Of course, it does not mean that different solutions do not have Killing horizons (the simplest example being the Schwarzschild-de Sitter black hole) but rather, using a picturesque language, that 'point of intersection' lies at the 'infinity of I + . This is heavily tied with the topological properties of I + for different solutions, as analyzed in many examples in . Incidentally, both questions can be analyzed in the same language -by the extensive usage of the conformal properties of the Cauchy data. In fact, we will start with the latter problem simply because it is easier and it naturally sets up the stage for the classification.
II. KILLING VECTOR FIELDS AT I +
A prerequisite for a Killing horizon to exist is the existence of a Killing vector field (KVF) itself. Thus, we will start by showing how they can be read off from the initial value formulation in the spirit of Friedrich . Asymptotically de Sitter spacetimes can be put into the Fefferman-Graham gauge: where q ab = q ab (ρ, x c ) is smooth and ℓ = 3 Λ . The null infinity I + corresponds to ρ = 0. Let us denote q From the vacuum Einstein equations, the following constraints follow whereR ab andR are Ricci tensor and scalar (respectively) of q (0) and D is its covariant derivative. All q (n) n>3 are recursively given as functions of q (0) and q (3) . One can easily notice that q (2) is twice the Schouten tensor of q (0) . Notice that since q ab is merely smooth, it is only an asymptotic expansion and not necessary a convergent series. Nevertheless, it follows from that the proper initial data at I + are classes , where two pairs are equivalent if they are connected by a (non-singular) conformal transformation: (2.8) One can easily checked that the constraints for q (3) are conformally covariant. To agree with a standard notation, we will introduce a holographic energy-momentum tensor ab . (2.9) Since we are mainly interested in the behavior at ρ = 0, it is convenient to introduce an unphysical metricĝ = ℓ −2 ρ 2 g, (2.10) which is smooth at all values of ρ.
We are looking for the KVFs X: This equation can be rewritten in a more convenient way as Let us evaluate first its ρρ component: The solution is immediate: whereX ρ ,ρ = 0. Thus, it follows that a KVF must be tangent to I + . Let ξ ∈ Γ(T I + ) denotes restriction of X to I + . We will now solve the remaining equations perturbatively in ρ. Evaluating ab components of (2.12) at ρ = 0 gives Thus, we see that ξ is a conformal Killing vector field (CKVF) of q (0) and ρa components of (2.12) gives X b ρ q ab − X ρ ,a = 0, (2.17) which can be immediately solved Thus, every KVF is uniquely defined by its restriction to I + : Of course, even when ξ is a CKVF of q (0) , (2.19) is not necessarily going to be a KVF of g.
To make sure that it is so, we need to evaluate (2.12) in higher orders of ρ -to this end we will differentiate it with respect to ρ. The first derivative of both sides automatically vanish since q (1) = 0. The second one reads: which is just a geometrical identity whenever q (2) satisfies (2.5). Notice that X a ,ρ | ρ=0 = X a ,ρρρ | ρ=0 = 0 due to the form (2.19) and (2.4). Thanks to that, the third derivative of (2.12) is extremely simple, and it reads This should not be a surprise since (q ab , T ab ) are defined up to a common conformal transformation. One can show that if ξ satisfies conditions derived above, then an associated X given by (2.19) is truly the KVF of g.
Indeed, let us consider one parameter family of the local initial data for some conformal factors ω s and local diffeomorphisms φ s . Uniqueness results of show that there exists one parameter family of diffeomorphisms Φ s of the spacetime which transform the development of the data at s into the one for s = 0. Suppose that X = dφs ds is CKVFs symmetry of the initial data. We can locally integrate it and choose a conformal factor such that (ω 2 ) is independent of s. In this situation, the derivative dΦs ds defines the corresponding KVF of the spacetime. We have thus reformulated the problem of finding isometries to the problem of identifying conformal symmetries of the initial data: We will use those equations extensively in the following sections.
III. KILLING HORIZONS
Thus far, we have only focused on the existence of KVFs for themselves. However, our main object of interest is a Killing horizon H, it means such a null hypersurface that X is its null normal. Moreover, we assume that after a conformal completion, an intersection of H and I + is non-empty, just as it has place in the de Sitter spacetime. Since X is tangent to I + , it is spacelike and its length (say, inĝ metric) is nonnegative. On the other hand, its length on H vanishes. Thus, at the (non-empty) intersection, X must vanish. All Killing horizons are nonexpanding, it means all cross-sections of H have the same area 1 . In particular, we can take ρ = const. cross-sections and their area (in g metric) 1 We assume that H has a product topology R × K, where K is a compact surface, and thus it makes sense to talk about a cross-section's area. An example of a situation in which it does not hold and one has well-defined I + can be given by the Kerr-Taub-NUT-de Sitter. Then, the horizon is topologically S 3 . Nevertheless, even in this example the horizon is separated from from I + .
is ρ-independent. On the other hand, their area inĝ decreases as ρ 2 . In particular, it vanishes on I + . Thus, H ∩ I + cannot be a 2-surface, so it is either a curve or a point 2 . As we will show, in a moment, the first case is excluded. Let us first assume that ξ is a KVF of q (0) . Then, it follows from (2.19) that and so it is spacelike everywhere (or vanishing) in the domain in which coordinates (ρ, x c ) are well-defined. In particular, it cannot have a horizon. As a next step, let us assume that it is not a KVF, but there exists a positive function ω on a neighborhood of H ∩ I + such that Using terminology from we introduce a definition: Definition III.1. Conformal vector field ξ is called non-essential if there exists nonzero ω satisfying (3.2). Otherwise, it is called essential.
We will also use local version of these definitions. Vector field is essential at x if it is essential for every neighborhood of x. Such ξ are called non-essentials CKVF. If ξ is non-essential, we can consider a spacetime (M ′ , g ′ ) with an initial data (ω 2 q (0) ab , ω −1 T ab ) which is diffeomorphic to the one we considered so far. It follows that ξ is spacelike on (M ′ , g ′ ) and so also on (M, g). Thus, if ξ is a nonessential CKVF of q (0) , it does not have a Killing horizon intersecting with I + and we are left only with an essential case. Fortunately, if a Riemannian manifold possess an essential CKVF for the point x 0 , it is locally conformally flat around this point . Without loss of generality, we may assume that q (0) ab = δ ab . One can easily check that if ξ is an essential CKVF on every neighborhood of a given point, then this point is an isolated zero of ξ. Thus, as promised, it follows that (each connected component of) H ∩ I + is a point.
A. Holographic energy-momentum tensor Thus far, we were able to establish that q (0) is conformally flat. We assumed without loss of generality that it is simply flat around H ∩ I + . The only remaining part of the initial data is thus T ab . It is easier to deal with scalars rather than tensors, so let us introduce It is easy to notice that χ is a smooth function satisfying Thus, it is constant along the integral lines of ξ. As we already emphasized, we are interested in local symmetries and so let us assume that (3.4) holds in a ball of radius ǫ > 0 around the fixed point of ξ. The following useful lemma holds: Proof. If the metric is not conformally flat, then the point x 0 is not essential and there exists a metric q (1) which is preserved by ξ. The ball B(x 0 , ǫ) is preserved by the flow of the Killing vector field. We will now focus on the conformally flat case. Let us introduce cartesian coordinates centered around x 0 . Since we know the form of q (0) , we know its CKVFs as well: where p i , r i j , S, K i are covariantly constant and r ij is antisymmetric. Since 0 is a zero of ξ, we put p i = 0. Moreover, we can always put ξ into one of the following three standard forms: We can restrict attention to vectors in these standard forms. If ξ = r i j x j ∂ x i then it is not essential since it preserves q (0) . Its integral line are circle around x 0 and thus are surely contained in a small ball. For the two remaining vector fields, we can show that the thesis of the lemma holds for |x| ≤ ǫ for any ǫ > 0. In order to show this, we apply inversion. We then need to show that either the forward or the backward flow for a given ξ ′ preserves |x| > ǫ −1 and converges to infinity.
1. For the vector field ξ ′ = r i j x j + Sx i ∂ x i , we have L ξ |x| = S thus |x| → ∞ for either the forward flow (S > 0) or the backward flow (S < 0). Moreover the space |x| > ǫ −1 is preserved.
In case of the vector field ξ
The space K · x ≥ 0 and |x| > ǫ −1 is preserved by the forward flow and moreover |x| → ∞. Similarly, for the space K · x ≤ 0 and |x| > ǫ −1 .
Since χ vanishes at the fixed point and is constant along every integral line, it vanishes in the whole ball and so does T ab (since that fixed point is an isolated zero of ξ). Thus, there is a neighborhood of the fixed point, on which the initial data are those of the de Sitter spacetime. Since the Cauchy problem is well-posed, its past development is isometric to the de Sitter spacetime. Let us formulate this result as a theorem: Our proof that T ab vanishes can be easily generalized to show that there is no tensor S ab satisfying when s < 2. To this end, we just need to consider the following function: which is (at least) continuous at the origin, smooth everywhere else, and is constant along the integral lines of ξ.
IV. LOCAL SYMMETRIES
We will now extend the characterization of the essential conformal structures to the case with tensor T ab . Let V be the algebra of CKVF symmetries of the neighborhood of x ∈ I + , that is ∀ξ ∈ V We introduce Definition IV.1. Algebra of symmetries V is non-essential at x if there exists an open neighborhood of x and a conformal factor ω 2 such that Otherwise, we call it essential.
Our goal is to extend a result of . Namely, Let us remark, that it is not obvious that essentiality could in principle follows from properties of the whole algebra. Our proof of Proposition IV.1 will be based on the following result: Lemma IV.2. Let V be an subalgebra of conformal vector fields. Denote by V x a stabilizer of x (a subalgebra of vectors vanishing at x). If V x is non-essential then it is also true for V.
Remark: It is a local version of property of the proper action (see ).
Proof. LetG be a universal group generated by the algebra V with a subgroupK generated by V x . There exists a representative g 0 in a conformal class, which is preserved by V x . We can consider a ball in this metric B(x, η 1 ) for small enough η 1 such that it is inside normal coordinates chart around x. As x is a fixed point, it is preserved by V x . We can integrate the action of the algebra V x to the action of a groupK. Let H be a subgroup ofK which acts trivially on this ball. Let us notice that H also acts trivially on V and thus it is normal as it belongs to the center ofG. We may consider We notice that K is a compact group (by an injective homomorphism K → End(T x M) we can identify it with a subgroup of SO (3)). We will consider local action of G. Let L be a complementary subspace to V x in V. We equip it with an auxiliary norm.
1. There exists ǫ 1 > 0 such that is injective. This is thanks to the compactness of K. We denote the pull-back of the left Haar measure by dµ.
From continuity of the action there exists ǫ 4 < ǫ 3 and η 3 < η 2 such that for all k ∈ K and l ∈ L satisfying ǫ 4 /2 < |l| < 2ǫ 4 it holds exp l · k(B(x, η 3 )) ∩ B(x, η 3 ) = ∅ (4.6) Let us define where f is a smooth function which is one in zero and vanishes for all argument bigger equal than 1 (and nowhere else). This is a K invariant metric in B(x, η 3 ). We define a metric by integration This metric is invariant under V in some small neighborhood of x. In fact the integral changes under the action of the algebra only by boundary terms (due to invariance of the Haar measure). However, the boundary does not contribute to the tensor in small neighbourhood of x. Thus V is non-essential.
Proof of IV.1. From Lemma IV.2 we know that V x is essential. However, then by Theorem 7.1 from , the neighborhood of x is conformally flat. If every X ∈ V X is non-essential then after applying inversion X ′ is an element of Euclidean-Poincare transformation and it has a fixed point. From Lemma 7.2 of we then know that there is a common fixed point for all vectors. Applying inversion with respect to this point we see that we transformed all vector fields in V into Euclidean-Poincare vector fields.
The main result can be summarized as follows: Theorem IV.3. Let V be an algebra of local conformal symmetries at x of the data (q ab , T ab ). Then 1. either there exists a choice of non-vanishing ω in some neighborhood of x such that L ξ ω 2 q ab = 0, L ξ ω −1 T ab = 0, ∀ξ ∈ V (4.9) 2. or the metric is conformally flat and T ab = 0 in the neighborhood of x and at least one of the vectors in the algebra is essential at x.
In the second case we will say that the data is locally de Sitter.
Proof. Either V is non-essential at x and then the case 1 holds or there is an essential vector field in V and then by Lemma III.1 and the text below we are in the case 2.
V. GLOBAL SYMMETRIES
In this section, we will use our just gained knowledge of the initial data and their symmetries to classify what are possible isometries of the asymptotically de Sitter spacetime. To be more precise, we will show that the group of isometries of the asymptotically de Sitter spacetime can be only 0, 1, 2, 3 and 4-dimensional, unless the spacetime is locally isomorphic to the de Sitter universe. Let us start with some properties of the algebra of symmetries: Lemma V.1. Suppose that the algebra of conformal symmetries V is at least 4 dimensional and non-essential at x. Then the dimension of V is either 4 or 6 and the algebra acts locally transitively around x. In the case of dimension 6 the data is locally de Sitter around x.
Proof. As the algebra is non-essential we can assume it consists of Killing vector fields. We can identify the stabilizer V x ⊂ so(3). As a Lie subalgebra, it can be either 3 (full so(3)) or 1 dimensional. In the first case, it acts transitively on T x M. However, it preserves the space which is nontrivial as dimension of V is at least 4. This means that Y = T x M. Counting dimensions shows that dim V = 6. However, this means that the metric is maximally symmetric and conformally flat. Additionally, T ab = cq ab and as it is traceless T ab = 0. This is the case of locally de Sitter.
On the other hand, if V x is one-dimensional then Y = T x M, the dimension of V is 4 and the algebra acts locally transitively.
Theorem V.2. If the spacetime is not everywhere locally de Sitter, then one of the following holds: 1. The connected group of symmetries is 4-dimensional. It acts transitively on I + and there is a choice of conformal class of the metric for which the action is by isometries.
The group of symmetries is at most 3-dimensional.
Proof. We consider the connected component of the symmetry group and its Lie algebra V. Let us introduce the following scalar: where C abc is a Cotton tensor of q (0) . Since the Cotton tensor is a conformal invariant, it follows that χ has a conformal weight −6. If χ vanishes in a neighborhood of a point x , then the data is locally de Sitter around this point (the metric is conformally flat and T ab vanishes). Suppose now that the point x is non-essential and χ(x) = 0. Then by Lemma V.1 the action of algebra is locally transitive and thus χ = 0 in the whole neighborhood of x. The same is true if x is an essential point and so the set {χ = 0} is open. As both open and closed it needs to be either an empty set or the whole I + .
2. If χ = 0 everywhere, then we can introduce equivalent data: which satisfy for every ξ ∈ V: and thus ξ must be KVF of q ′ . From Lemma V.1 the orbits of every point is the whole I + (both orbit and its complement is an open set).
Thus the metric q ′ ab is preserved by the connected component of the symmetry group.
VI. EXAMPLES
Thus far, we have shown that the possible dimensions of the isometry group are d = 4, 3, 2, 1, assuming that the spacetime is not locally de Sitter. Of course, that does not prove that all of those cases are actually realized. What is left is to construct examples for each of those values. We will divide our discussion into different topologies of I + which are commonly encountered. With the exception of d = 4, we do not claim any sort of completeness.
Obviously, the first example that comes to one's mind is the global de Sitter spacetime. It is maximally symmetric and thus d = 10. We already learned that when d = 4, symmetries act transitively on I + and (in an appropriate conformal frame) are isometries. Thus, I + with a metric q (0) must be a homogeneous space. Fortunately, all homogeneous metrics on a simply connected 3-spaces are classified . On S 3 they are simply given by squashed spheres: where σ i are standard left invariant one-forms on S 3 . When all λ are different, this metric has SU(2) symmetry (d = 3). When two of them coincide, the symmetry is enlarged to U(1) × SU(2) = U(2) (d = 4). Let us focus for a moment on the latter, we can take λ 1 = λ 2 . The only holographic energy-momentum tensor consistent with the symmetry is This is a two parameter family of initial data parameterized by λ 1 λ 3 and α √ λ 1 . It describes Taub-NUT-de Sitter and the two parameters correspond to the NUT parameter l and mass parameter m, respectively. Notice, that m is not a physical mass -since I + is topologically a sphere, any conserved charge associated with a symmetry generator ξ and a surface C must vanish identically. Moreover, notice that the Killing horizon in the Killing horizon in this solution is in fact a Cauchy horizon and so it is a breakdown of the unique evolution. Notice that if we allow λ 1 = λ 2 the possible T ab are subject to the usual constraints. They all correspond to Bianchi IX universes.
B. R 3
Let us start with a metric q (0) which is not conformally flat. It follows than that (in an appropriate conformal frame) all symmetries acts as isometries. It is well-known that a nonmaximally symmetric metric can be at most 4-dimensional. Thus, one can generate plenty of examples even with T ab = 0. In particular, all homogeneous metrics (up to accidental additional symmetries) on R 3 could be used and they are already classified .
Euclidean space
Let us now discuss the case when the metric induced on I + is flat. Then, our starting point is 7-dimensional group 3 of R 3 . We want to break symmetry explicitly through the introduction of a non-trivial T ab . All complete CKVFs on R 3 are of the form: As follows from our previous discussions, we must put S = 0 -otherwise T ab would have to vanish. We are left only with Killing vectors of the flat metric. Thus, we are now looking for T ab such that L ξ T ab = 0, (6.6) where ξ is in a proper subalgebra of the Euclidean algebra. That subalgebra cannot be 6 dimensional, since then T ab would be proportional to δ ab and hence vanishing. Moreover, there are no 5-dimensional subalgebras. Thus, the smallest possible one is 4-dimensional and there is only one (up to an isomorphism). It is generated by ∂ x , ∂ y , ∂ z , x∂ y − y∂ z . The most general T ab it preserves is of the form where a is an arbitrary constant. Such initial data correspond to the Bianchi I cosmology with an additional axial symmetry. There are several 3-dimensional subalgebras. We obviously have an algebra of translations (isomorphic to R 3 ). Clearly it preserves T ab of the form where h xx + h yy + h zz = 0 and all h ij are constant. Such initial data corresponds to (now more general) Bianchi I universes. Different 3 dimensional algebras are so (3), an euclidean algebra of a plane (spanned by ∂ x , ∂ y and x∂ y − y∂ x ) and a helical algebra ((spanned by ∂ x , ∂ y and α∂ z + x∂ y − y∂ x ). It is easy to see that the only T ab preserved by these symmetries (and no other) is simply zero. It is also easy to construct examples with lower symmetry. In particular, an algebra R 2 spanned by ∂ x , ∂ y can be obtained by a choice where h xx + h yy + h zz = 0 and h iz are constants. It is not clear to us whether such solutions occur in any physically interesting scenarios.
C. Cylinder R × S 2 Yet another possible topology of I + is a cylinder R×S 2 . This should describe BH spacetimes. Let us take q (0) to be conformally flat and given by q (0) = du 2 + ℓ 2 dθ 2 + ℓ 2 sin 2 θdφ 2 . (6.10) Among all CKVFs, only 4-dimensional subalgebra is complete in this case and is generated by ∂ u and rotations. Thus, there are no solutions with more than 4d isometries. The most general form of T ab consistent with those symmetries is where a is an arbitrary constant. It clearly corresponds to the Schwarzschild-de Sitter spacetime. The only three dimensional subalgebra is so(3) and again Schwarschild-de Sitter is the only example (as stated by the Birkhoff theorem). The only two dimensional subalgebra is spanned by ∂ u and ∂ φ . If we consider a one-dimensional algebra spanned by ∂ u , we can write down the most general form of T ab : T = ℓ −2 (∆ + 2)Φdu 2 + 2ǫ ABD B χdx A + (D ADB − (∆ + 1)γ AB )Φdx A dx B , (6.12) where Φ and χ are arbitrary (u-independent) functions on S 2 . If they are additionally axially symmetric we are back to the previous case. Notice that Kerr-de Sitter can be put in this form .
VII. DISCUSSION
In this paper we considered asymptotically de Sitter spacetimes that satisfied the vacuum Einstein equations in a neighborhood of the null infinity I + . We assumed an existence of KVFs in that neighborhood and studied their properties. The key elements of our analysis were the initial Cauchy problem at I + and the Fefferman-Graham expansion. The first result is Theorem III.2. It states that if there is a Killing horizon that intersects I + (after a conformal completion), then in a neighborhood of the intersection point, the spacetime is isometric to the de Sitter spacetime. We explain now, why the theorem is true. To begin with, every KVF X admitted by a neighborhood of I + turns out to be tangent to I + . Conversely, a vector field ξ tangent to I + is a restriction of a KVF defined in a neighborhood of I + if and only it is a symmetry of the data that determines a solution of Einstein's equations in the neighborhood. The data is a pair: the induced metric tensor q (0) and holographic stress-energy tensor T defined up to the conformal transformations (2.8).
Next, it turns out that the intersection of the Killing horizon with I + is an isolated point, a zero of the vector field ξ. That follows from the properties of essential CKVF of 3 dimensional conformal geometries. On the other hand, if ξ were a non-essential CKVF, then the solution X of the Killing equation that is determined in the neighborhood would be spacelike (hence, without a Killing horizon). The last step of the reasoning is the construction of the scalar that is shown to be zero on a neighborhood of the zero of the symmetry ξ. We have systematically investigated possible symmetries of the initial data. In particular, we have proven that they exhibit the gap phenomenon with the submaximal symmetry being only 4-dimensional. We have also shown that this case reduces to the homogeneous geometry on I + and as such is much easier to understand. In particular, if the null infinity is topologically a sphere, then the solution is necessarily Taub-NUT-de Sitter. We have also provided a lot of examples in other situations. Hopefully they can be useful as a starting point for the perturbative treatment of the gravitational radiation. |
import json
import os
import unittest
from jsonschema import validate
from Routing.MockPathfinder import MockPathfinder
def load_schema(filename):
file = os.path.join(os.path.dirname(__file__), "schemas/" + filename)
with open(file) as f:
return json.loads(f.read())
class MockPathfinderTest(unittest.TestCase):
def setUp(self):
self.payload = MockPathfinder.getRoutes(0)
def testPayloadSchema(self):
schema = load_schema("payload.json")
validate(self.payload, schema)
def testDroneRouteSchema(self):
for drone_route in self.payload["drone_routes"]:
validate(drone_route, load_schema("route.json"))
def testWaypointSchema(self):
for drone_route in self.payload["drone_routes"]:
for waypoint in drone_route["waypoints"]:
validate(waypoint, load_schema("waypoint.json"))
if __name__ == "__main__":
unittest.main()
|
Pig Intestinal Membrane‐Bound Receptor (Guanylyl Cyclase) for Heat‐Stable Enterotoxin: cDNA Cloning, Functional Expression, and Characterization
A cDNA encoding the receptor protein for a heat‐stable enterotoxin (STa) produced by enterotoxigenic Escherichia coli was cloned from intestinal epithelial cells of a 10‐week‐old pig. The cDNA had an open reading frame of 3,219 base pairs and coded for a protein with 1,073 amino acid residues. The mature protein consisted of 1,050 amino acid residues with a molecular mass of ca. 121 kDa and was 87% and 82% identical with the human and rat protein, respectively. The CHO cell line overexpressing the pig recombinant STa receptor specifically bound to a photoaffinity‐labeled analog of STa and showed marked elevation of the cellular content of cGMP in response to STa. |
/*
* Unit test for verifying logic after removing redundant ClassTag fields for key and value from
* com.basho.riak.spark.japi.rdd.RiakJavaPairRDD class
*/
@SuppressWarnings("unchecked")
@Test
public void createWithClass() {
final RiakRDD<Tuple2<String, Object>> rdd = mock(RiakRDD.class);
RiakJavaPairRDD<String, Object> pairRDD = new RiakJavaPairRDD<>(rdd, String.class, Object.class);
assertEquals(getClassTag(String.class), pairRDD.kClassTag());
assertEquals(getClassTag(Object.class), pairRDD.vClassTag());
} |
<filename>src/module/enterprise/availability/availability.component.ts
import {
AfterViewInit, ChangeDetectorRef, Component, ElementRef, EventEmitter, Input,
OnChanges, OnInit, Output, SimpleChanges, ViewChild,
} from '@angular/core';
import { debounceTime } from 'rxjs/operators';
@Component({
selector: 'amexio-availability',
templateUrl: './availability.component.html',
})
export class AvailabilityComponent implements OnInit, AfterViewInit, OnChanges {
@Input('start-date') startDate: string;
@Input('end-date') endDate: string;
@Input('start-time') startTime: number;
@Input('end-time') endTime: number;
@Input('time-zone-data') zoneData: any;
@Input('undo-button') undoFlag = false;
@Input('enable-drag') enableDrag = true;
_labelData: any;
@Input('label-data')
set labelData(value: any[]) {
this._labelData = value;
}
get labelData(): any[] {
return this._labelData;
}
@Input('default-radio') defaultRadio = '';
@Input('no-change') nocellchange = false;
@ViewChild('datesdiv') elementView: ElementRef;
@ViewChild('datesseconddiv') elementView1: ElementRef;
@ViewChild('datesfirstdiv') elementView2: ElementRef;
@Output() onClick: any = new EventEmitter<any>();
@Output() onRadioClick: any = new EventEmitter<any>();
@Output('onUndoClick') UndoBtnClick: any = new EventEmitter<any>();
@Output('onDragStart') onDragStartEvent: any = new EventEmitter<any>();
@Output('onDragOver') onDragOverEvent: any = new EventEmitter<any>();
@Output('onDragEnd') onDragEndEvent: any = new EventEmitter<any>();
radioValue = '';
selectedIndexArr: any[];
styleVar: any;
completeNewArr: any[];
datesArrlen = 0;
slotTimeArr: any[];
sDate = new Date();
eDate = new Date();
dateArr: any[];
dateArr1: any[];
completeTimeArr: any[];
dateSpanHt = 18;
dateSpanWt = 46;
dateSpanlist: any[];
legendArr: any[];
dragStartObj: any;
dragEndObj: any = {};
dragFlag = false;
legendObj = {};
newTimeArr: any[];
minIndex: number;
maxIndex: number;
count = 0;
newTimeArr2: any = [];
constructor(public cdf: ChangeDetectorRef) {
}
ngOnInit() {
this.generateData();
if ((this.defaultRadio.length > 0)) {
this.radioValue = this.defaultRadio;
// this.styleVar will be initialized
this.legendArr.forEach((element: any) => {
if (element.label === this.defaultRadio) {
this.styleVar = element;
this.onRadioClick.emit(element);
}
});
}
const tarr: any = [];
this.dateArr1[0].slots.forEach((slele: any, sindex: number) => {
if (((sindex % 2) === 0) || (sindex === 0)) {
tarr.push(slele);
}
});
let tcnt = 0;
const newttarr2: any = [];
let prevt;
while (tcnt <= tarr.length - 2) {
let obj;
if (tcnt === 0) {
obj = { starttime: tarr[tcnt].starttime, endtime: tarr[tcnt + 1].starttime };
prevt = obj.endtime;
newttarr2.push(obj);
} else {
obj = { starttime: prevt, endtime: tarr[tcnt + 1].starttime };
prevt = obj.endtime;
if (!((obj.starttime.getHours() === obj.endtime.getHours()) &&
(obj.starttime.getMinutes() === obj.endtime.getMinutes()))) {
newttarr2.push(obj);
}
}
tcnt = tcnt + 1;
}
const prevlastobj = newttarr2[newttarr2.length - 1];
const lastendtime = new Date(newttarr2[newttarr2.length - 1].endtime);
lastendtime.setHours(lastendtime.getHours() + 1);
const lastobj = { starttime: prevlastobj.endtime, endtime: lastendtime };
newttarr2.push(lastobj);
this.newTimeArr2 = newttarr2;
}
updateComponent() {
this.generateData();
}
ngOnChanges(changes: SimpleChanges) {
if (changes['labelData'] && changes.labelData.currentValue) {
this.labelData = changes.labelData.currentValue;
}
}
ngOnchanges() {
this.dateSpanWt = 37;
this.generateData();
}
// generate data structure
generateData() {
this.selectedIndexArr = [];
this.completeNewArr = [];
this.slotTimeArr = [];
this.dateArr = [];
this.dateArr1 = [];
this.completeTimeArr = [];
this.dateSpanlist = [];
this.legendArr = [];
this.newTimeArr = [];
this.sDate = new Date(this.startDate);
this.eDate = new Date(this.endDate);
let i = 0;
this.dateArr = [{ dates: [], timearr: [] }];
this.dateArr1 = [];
let d;
// if startdate is less than enddate
if (this.sDate < this.eDate) {
do {
d = new Date(this.sDate.getFullYear(), this.sDate.getMonth(), this.sDate.getDate() + i);
const dobj = { date: d };
this.dateArr[0].dates.push(dobj);
i++;
} while (d < this.eDate);
} else if (this.sDate === this.eDate) {
// if startdate equals enddate
d = new Date(this.sDate.getFullYear(), this.sDate.getMonth(), this.sDate.getDate() + i);
const dobj = { date: d };
this.dateArr[0].dates.push(dobj);
}
i = 0;
const arr: any = [];
this.sDate = new Date(this.startDate);
this.eDate = new Date(this.eDate);
if (this.sDate < this.eDate) {
do {
d = new Date(this.sDate.getFullYear(), this.sDate.getMonth(), this.sDate.getDate() + i);
const dobj = { date: d, slots: arr };
dobj.slots = this.setSlots1(d);
this.dateArr1.push(dobj);
i++;
} while (d < this.eDate);
} else if (this.sDate === this.eDate) {
const arry: any = [];
d = new Date(this.sDate.getFullYear(), this.sDate.getMonth(), this.sDate.getDate() + i);
const dobj = { date: d, slots: arry };
dobj.slots = this.setSlots1(d);
this.dateArr1.push(dobj);
}
this.initializeTimeArr();
this.generateTimeArr();
this.datesArrlen = this.dateArr[0].dates.length;
let j;
for (j = 0; j < this.datesArrlen; j++) {
this.dateSpanlist.push(j);
}
this.generateLegendArr();
this.generateSlotTimeArr();
}
generateSlotTimeArr() {
let i = this.startTime;
while (i <= this.endTime) {
let j = 0;
while (j <= 1) {
const d = new Date();
d.setHours(i);
if (j === 0) {
d.setMinutes(0);
}
if (j === 1) {
d.setMinutes(30);
}
this.newTimeArr.push(d);
j++;
}
i++;
}
}
setSlots1(d: Date) {
const slot: any = [];
const etime = this.endTime;
let i = this.startTime;
let j;
while (i <= etime) {
let previousendtime;
for (j = 0; j <= 1; j++) {
const obj = {};
let objstarttime = new Date(d);
const objendtime = new Date(d);
if (j === 0) {
objstarttime.setHours(i);
objendtime.setHours(i);
objstarttime.setMinutes(0);
objendtime.setMinutes(30);
previousendtime = objendtime;
}
if (j === 1) {
objstarttime = previousendtime;
objendtime.setHours(previousendtime.getHours() + 1);
objendtime.setMinutes(0);
}
obj['starttime'] = objstarttime;
obj['endtime'] = objendtime;
obj['colorflag'] = false;
slot.push(obj);
}
i++;
}
return this.chkLabels(d, slot);
}
chkLabels(d: Date, slotArray: any) {
const minindex: any = null;
const maxindex: any = null;
let minflag = false;
let maxflag = false;
this.labelData.forEach((labelelement: any) => {
if (labelelement.available) {
labelelement.available.forEach((availableElement: any) => {
if (availableElement.date) {
let minmaxarr: any = [];
const dt = new Date(availableElement.date);
let retflagObj: any;
if (availableElement.time) {
retflagObj = this.availableTimeTest(availableElement, slotArray, dt, d, minmaxarr);
minflag = retflagObj.minFlag;
maxflag = retflagObj.maxFlag;
minmaxarr = retflagObj.minmaxArr;
}
this.setRange(minflag, maxflag, slotArray, minmaxarr, labelelement);
}
});
}
});
return slotArray;
}
setRange(minflag: boolean, maxflag: boolean, slotArray: any, minmaxarr: any, labelelement: any) {
if (minflag && maxflag) {
this.setColorRangeTest(slotArray, minmaxarr, labelelement);
}
}
availableTimeTest(availableElement: any, slotArray: any, dt: Date, d: Date, minmaxarr: any) {
let minindex = null;
let maxindex = null;
let minflag = false;
let maxflag = false;
availableElement.time.forEach((timeElement: any) => {
minindex = null;
maxindex = null;
minflag = false;
maxflag = false;
const retminmaxObj = this.chkMinMaxIndexTest(slotArray, dt, d, timeElement);
minflag = retminmaxObj.minFlag;
maxflag = retminmaxObj.maxFlag;
minindex = retminmaxObj.minIndex;
maxindex = retminmaxObj.maxIndex;
if (minflag && maxflag) {
const minmaxobj = { minIndex: minindex, maxIndex: maxindex };
minmaxarr.push(minmaxobj);
}
});
return { minFlag: minflag, maxFlag: maxflag, minmaxArr: minmaxarr };
}
setColorRangeTest(slotArray: any, minmaxarr: any, labelelement: any) {
slotArray.forEach((individualSlot: any, slotindex: any) => {
minmaxarr.forEach((minmaxrange: any) => {
if ((slotindex >= minmaxrange.minIndex) && (slotindex <= minmaxrange.maxIndex)) {
if (individualSlot.label) {
individualSlot.label = labelelement.label;
individualSlot['color'] = labelelement.colorcode;
individualSlot.colorflag = true;
} else {
individualSlot['label'] = labelelement.label;
individualSlot['color'] = labelelement.colorcode;
individualSlot.colorflag = true;
}
}
});
});
}
chkMinMaxIndexTest(slotArray: any, dt: Date, d: Date, timeElement: any) {
let minindex: any = null;
let maxindex: any = null;
let minflag = false;
let maxflag = false;
slotArray.forEach((slotElement: any, slotIndex: number) => {
if (
(dt.getFullYear() === d.getFullYear()) &&
(dt.getMonth() === d.getMonth()) && (dt.getDate() === d.getDate())) {
// u hav to modify ur condns here
const starttimeobj = this.getHourMinuteFormat(timeElement.starttime);
if (
((starttimeobj.hours === slotElement.starttime.getHours()) && (starttimeobj.minutes === slotElement.starttime.getMinutes()))
) {
minindex = slotIndex;
minflag = true;
}
}
if ((dt.getFullYear() === d.getFullYear()) && (dt.getMonth() === d.getMonth()) &&
(dt.getDate() === d.getDate())) {
const endtimeobj = this.getHourMinuteFormat(timeElement.endtime);
if ((endtimeobj.hours === slotElement.endtime.getHours()) && (endtimeobj.minutes === slotElement.endtime.getMinutes())) {
maxindex = slotIndex;
maxflag = true;
}
// start end
}
});
return { minFlag: minflag, maxFlag: maxflag, minIndex: minindex, maxIndex: maxindex };
}
getHourMinuteFormat(usertime: number) {
let arr = [];
arr = usertime.toString().split('.');
return { hours: parseInt((arr[0]), 10), minutes: arr[1] ? (parseInt((arr[1]), 10) * 10) : 0 };
}
ngAfterViewInit() {
let divHt;
let divWt;
divHt = this.elementView.nativeElement.offsetHeight;
divWt = this.elementView1.nativeElement.offsetWidth;
this.dateSpanHt = Math.round(divHt / this.datesArrlen);
this.dateSpanWt = Math.round((divWt) / this.newTimeArr.length);
this.dateSpanWt = 37;
}
generateLegendArr() {
this.labelData.forEach((element: any) => {
this.legendObj[element.label] = false;
});
this.labelData.forEach((element: any) => {
const obj = { label: element.label, colorcode: element.colorcode, textcolor: element.textcolor ? element.textcolor : 'black' };
this.legendArr.push(obj);
});
this.count++;
}
alterNoChangeFlag() {
this.nocellchange = true;
}
negateNoChangeFlag() {
this.nocellchange = false;
}
initializeTimeArr() {
this.completeTimeArr = ['12am', '1am', '2am', '3am', '4am', '5am', '6am', '7am', '8am',
'9am', '10am', '11am', '12pm', '1pm', '2pm', '3pm', '4pm',
'5pm', '6pm', '7pm', '8pm', '9pm', '10pm', '11pm',
];
}
generateTimeArr() {
let startindex;
let endindex;
this.completeTimeArr.forEach((element: any, index: number) => {
if (element === this.startTime) {
startindex = index;
}
if (element === this.endTime) {
endindex = index;
}
});
this.setTimeArr(startindex, endindex);
}
setTimeArr(startindex: number, endindex: number) {
const tarr: any = [];
this.completeTimeArr.forEach((element: any, index: number) => {
if ((index >= startindex) && (index <= endindex)) {
const tobj = { time: element };
tarr.push(tobj);
}
});
this.dateArr[0].timearr = tarr;
}
onSelection(radioData: any) {
this.styleVar = '';
const obj = { label: radioData.label, colorcode: radioData.colorcode };
this.styleVar = obj;
this.onRadioClick.emit(obj);
}
clearColorFlag() {
this.dateArr1.forEach((element: any) => {
if (element.slots) {
element.slots.forEach((individualSlot: any) => {
individualSlot.colorflag = false;
});
}
});
}
timeBlockWithoutUndo(parentiterateitem: any, parentindex: any, childiterateitem: any, childindex: any) {
const flag = false;
if (this.radioValue.length > 0) {
if ((this.dateArr1[parentindex].slots[childindex].colorflag)) {
} else {
const newobj = this.dateArr1[parentindex].slots[childindex];
newobj['label'] = this.styleVar.label;
newobj['color'] = this.styleVar.colorcode;
newobj.colorflag = true;
this.dateArr1[parentindex].slots[childindex] = newobj;
}
}
this.onClick.emit({
time: this.dateArr1[parentindex].slots[childindex].time,
label: this.dateArr1[parentindex].slots[childindex].label,
});
this.generateData();
}
timeBlockWithUndo(parentiterateitem: any, parentindex: any, childiterateitem: any, childindex: any) {
const flag = false;
if (this.radioValue.length > 0) {
if ((this.dateArr1[parentindex].slots[childindex].label)) {
// overriding logic wrks fr false
// overiding logic starts here
if (this.dateArr1[parentindex].slots[childindex].label === this.styleVar.label) {
// unselect logic
// label exist and same label
const newobj = {
time: this.dateArr1[parentindex].slots[childindex].time, colorflag: false,
};
// assignment
this.dateArr1[parentindex].slots[childindex] = newobj;
// blank
} else {
// label exist and diff label
// blank
const newobj2 = {
time: this.dateArr1[parentindex].slots[childindex].time, colorflag: false,
};
this.dateArr1[parentindex].slots[childindex] = newobj2;
}
// overiding logic ends here
}
}
this.onClick.emit({
time: this.dateArr1[parentindex].slots[childindex].time,
label: this.dateArr1[parentindex].slots[childindex].label,
});
this.generateData();
}
onTimeBlockClick(parentiterateitem: any, parentindex: any, childiterateitem: any, childindex: any) {
this.onClick.emit({
starttime: this.dateArr1[parentindex].slots[childindex].starttime,
endtime: this.dateArr1[parentindex].slots[childindex].endtime,
label: this.dateArr1[parentindex].slots[childindex].label ? this.dateArr1[parentindex].slots[childindex].label : 'not selected',
});
this.generateData();
}
onUndoClick() {
this.UndoBtnClick.emit('');
this.generateData();
}
onDragStart(event: any, iterate: any, parentindex: any, item: any, childindex: any) {
debounceTime(1000);
const img = document.createElement('img');
event.dataTransfer.setDragImage(img, 0, 0);
this.dragFlag = true;
this.dragStartObj = {
dateObj: iterate,
dragparentindex: parentindex, dragcell: item, dragchildindex: childindex,
};
this.onDragStartEvent.emit({
starttime: this.dateArr1[parentindex].slots[childindex].starttime,
endtime: this.dateArr1[parentindex].slots[childindex].endtime,
label: this.dateArr1[parentindex].slots[childindex].label ? this.dateArr1[parentindex].slots[childindex].label : null,
});
}
ondragover(event: any, iterate: any, parentindex: any, item: any, childindex: any) {
debounceTime(1000);
this.onDragOverEvent.emit({
starttime: this.dateArr1[parentindex].slots[childindex].starttime,
endtime: this.dateArr1[parentindex].slots[childindex].endtime,
label: this.dateArr1[parentindex].slots[childindex].label ? this.dateArr1[parentindex].slots[childindex].label : null,
});
this.generateData();
this.dragEndObj = {
iterate1: iterate, parentindex1: parentindex,
item1: item, childindex1: childindex,
};
}
onDragEnd(event: any, iterate: any, parentindex: any, item: any, childindex: any) {
debounceTime(1000);
this.onDragEndEvent.emit('');
this.generateData();
}
}
|
/**
* Created with IntelliJ IDEA.
* User: neil
* Date: 18/04/2013
* Time: 13:52
* To change this template use File | Settings | File Templates.
*/
public class RulesKeyValueExtractorTest {
private RulesKeyValueExtractor kve;
@Before
public void before() {
kve = new RulesKeyValueExtractor();
}
@Test
public void testShouldSpeedFactSet() throws Exception {
if (true) return;
String lastLine = "";
int lineCount = 0;
try {
// while (true) {
String file = "/WORK/logs/FactSet/access.log";
// while (true) {
RAF raf = RafFactory.getRafSingleLine(file);
String line = "";
long start = System.currentTimeMillis();
lineCount = 0;
int count = 0;
RulesKeyValueExtractor kve = new RulesKeyValueExtractor();
while ((line = raf.readLine()) != null) {
lastLine = line;
// System.out.println(line);
List<Pair> fields = kve.getFields(line);
count += fields.size();
lineCount++;
}
long end = System.currentTimeMillis();
System.out.println("Lines:" + lineCount + " Elapsed:" + (end - start) + " Fields:" + count);
printThroughputRate(file, (end - start));
// }
} catch (Throwable t) {
System.out.println("Line: " + lineCount + "\nLastLine:" + lastLine);
t.printStackTrace();
}
}
private void printThroughputRate(String absolutePath, long elapsed) {
long length = new File(absolutePath).length();
double seconds = elapsed/1000.0;
long mbytes = length/ FileUtil.MEGABYTES;
System.out.println(String.format("Throughput:%f MB/sec", (mbytes/seconds)));
}
@Test
public void testThingy() throws Exception {
//ns1:datacenterId="NA1"
//"OpenFileDescriptorCount": "183",
String [] rules = new String[] {
"[\"](A1:.)\": \"(^\"])[\"]",
};
RulesKeyValueExtractor.Config config = new RulesKeyValueExtractor.Config(Arrays.asList(rules), true);
config.startPos = 0;
kve = new RulesKeyValueExtractor(config);
// String nLine = "2013-08-23 08:00:18 REPORT_SCHEDULE Schedule:Windows_Disk_Space_Alert Action:Triggered Events:2 ThresholdPassed[1]\n" +
// "\n" +
// " \t23/08/2013 08:00:11,EX311,HarddiskVolume1,71,71";
//String nLine = "2013-09-18 10:01:25,747 INFO pool-2-thread-1 (license.TrialListener)\t Action:'Download' Email:'[email protected]' IpAddress:'217.20.25.200' Company:'mysome'";
//String nLine = "Oct 29 15:44:43 logscape-dev vmunix: [357366.319836] type=1701 audit(1383061483.300:21308): auid=4294967295 uid=1000 gid=1000 ses=4294967295 pid=21276 comm=\"chrome\" reason=\"seccomp\" sig=0 syscall=4 compat=0 ip=0x7f0b83906215 code=0x50000";
// String nLine = "08/11/13 09:24:43.360 INFO: [ServiceEvent] Engine:CDCS192022-18:TaskAccepted:FENG_VAR_LITE-3626900278003326020-0:'489726/0/6/0'";
// String nLine = "Nov 14 11:09:23 battlestar kernel: [ 13.148848] type = 1400 audit(1384427363.907:14): apparmor=\"STATUS\" operation=\"profile_load\" name=\"/usr/sbin/tcpdump\" pid=950 comm=\"apparmor_parser\"";
// String nLine = "2009-04-22 18:40:24,109 WARN main (ORMapperFactory)123 Aa = bB Service";
//String nLine = "Jul 31 00:00String: cloudAdminHost=admin.opsourcecloud.net|two";
// String nLine = "23-Jan-2014 15:41:23\tAGT-LON-UBU3\troot\t7901\t0\t0.0\t00:00:03\t1.2\t37988\t102980\t?\tS\t2-09:07:28\truby\t/usr/bin/puppet_agent";
//String nLine = "2014-01-30 16:29:35,238 INFO netty-reply-10-1 (space.AggSpaceImpl) LOGGER - REGISTER SEARCH:0_0_0_0_0_0_0_1%0_62570-sysadmin-LLABS-13910993750640-alteredcarbon.local-20140130_162935 req:0_0_0_0_0_0_0_1%0_62570-sysadmin-LLABS-13910993750640-alteredcarbon.local-20140130_162935\n";
// returning numeric key 189957: no value was found....
//String nLine2 = "Jul 30 23:11:50 189957: [syslog@9 s_id=\"as2ag1ccnlabash01:514\"]: *Jul 19 06:26:19.290: %CDP-4-DUPLEX_MISMATCH: duplex mismatch discovered on GigabitEthernet0/21 (not half duplex), with fs2ag1ccnlabash01(AMS16460245) mgmt0 (half duplex).";
//String nLine2 = "Jul 31 00:00:02 10.162.0.25 TP-Processor822 ERROR handlers.AbstractRestHandler error.159 - Exception occurred. Reason Code: SERVER_DOES_NOT_EXIST. Reason Detail: Could not find server with Id f2c50d77-e523-4b51-9c74-c4b2f438878e : Text logged: Could not find Server with Id f2c50d77-e523-4b51-9c74-c4b2f438878e";
// Zs example
String zLine = "06-Mar-2014 14:59:22 GMT\t{ \"objectName\": \"java.lang:type=OperatingSystem\", \"OpenFileDescriptorCount\": \"183\", \"MaxFileDescriptorCount\": \"40960\", \"FreeSwapSpaceSize\": \"19019595776\", \"ProcessCpuLoad\": \"0.0\", \"SystemCpuLoad\": \"0.0\", \"namepsace\": \"_.group1\", \"host\": \"10.28.1.170\" } ";
String zLine1 = "25-Jul-13 17:38:00\tjava.lang:type=Memory.HeapMemoryUsage: committed:991744000\tinit:268435456\tmax:1065025536\tused:323971800\tpid:3930\tport:8989";
// String zLine = "19-Aug-13 14:09:34 vhostTS=2013-08-19T14:09:00+01:00 entity=\"2400\" name=\"VPN-Unicredit-Smart\" type=\"VirtualMachine?\",diskusage=\"0\",cpu=\"71\",mem=\"399\",maxNetUsage=\"1\",diskRead=\"0\",diskWrite=\"0\",diskUsageMax=\"0\",netRcvRate=\"1\",netTransRate=\"0\",memConsumed=\"3144804\",netUsage=\"1\"";
String zLine2 = "6/15/2013 10:17:12 AM Thread: Autobahn background (ID: 4996) 6022 Information - After service call: IStreamingSettingsProvider.GetStreamingSettings(). Response: \n" +
" {\n" +
" Type: StreamingSettings\n" +
" DefaultAmountsSettings = '\n" +
" {\n" +
" Type: AmountsSettings\n" +
" LeftMinAmount = '100'\n" +
" RightMinAmount = '100'\n" +
" LeftDefaultAmount = '1000000'\n" +
" service=\"WorkstationFiles\"" +
" RightDefaultAmount=\"1000000\"'\n" +
" }'\n" +
" DefaultMaxFarTenor = '120M'\n" +
" DefaultNdfTenor = '1M'\n" +
" MinNdfTenor = 'SN'\n" +
" DefaultFixingReferenceCode = 'MANUAL'\n" +
" Headers = '";
// damians example
String dLine = "2013-06-13 \n" +
"06:08:02,573 \n" +
"[Thread-0] \n" +
"INFO \n" +
"instrumentation \n" +
"- \n" +
" ODCThread[pool-2-thread-4] \n" +
" sessionId:2e7a6a57-4dc7-449e-a222-a0b0ca62a353, \n" +
" queryId:5d08de71-470c-432f-92d4-28bb363b7e52, \n" +
" user:vest, \n" +
" status:finished, \n" +
" partitionCount:100, \n" +
" objectCount:0, \n" +
" metric:ExecutePage, \n" +
" took:335, \n" +
" query:odc.query().fromTransaction().where(transaction().getId()).equalTo($0).joinToCashflows(); \n" +
" args:$0 \n" +
"= \n" +
"TransactionIdImpl \n" +
"[sourceSystemInstance=WSS \n" +
"GBLO \n" +
"RBS, \n" +
"sourceSystemTradeId=727272_PC_decr1]; \n" +
" ";
List<Pair> fields = kve.getFields(zLine);
System.out.println("Fields:" + fields.size());
for (Pair field : fields) {
System.out.println(field.key + " => " + field.value);
}
}
@Test
public void testSingleKey_KVSlider() throws Exception {
assertEquals("[Pair(2,9,14)]", scanLine(new KeySlider("[\t, ](A1.) = (^, )"), " a USER = NEIL "));
}
@Test
public void testThingy2() throws Exception {
String line = "2013-06-18 syslog.uri=\"udp://alteredcarbon.local:1514/syslog\"";
List<Pair> fields = kve.getFields(line);
System.out.println(fields);
Assert.assertEquals(1, fields.size());
Assert.assertEquals("udp://alteredcarbon.local:1514/syslog", fields.get(0).value);
}
@Test
public void testTibcoXMLMessageAttributes() throws Exception {
String msg = "<Trace Level=\"MIN\">\n" +
" <Time Millis=\"1444646759145\">2015-10-12 12:45:59.145+02:00</Time>\n" +
" <Server Format=\"IP\">EURV192D01.eu.rabodev.com</Server>\n" +
" <LogText><![CDATA[hardware outputFile=-480440992]]></LogText>\n" +
" <Source FileName=\"./../../../src/invscan/scanengine/wscanhw.cpp\" Method=\"wscanhw()\" Line=\"472\"/>\n" +
" <Thread>7868</Thread>\n" +
" <Process>10672</Process>\n" +
"</Trace>";
List<Pair> fields = kve.getFields(msg);
System.out.println(fields.toString());
Assert.assertTrue(fields.get(0).toString().contains("Level"));
}
@Test
public void testEMSXML() throws Exception {
String ems = "2013-07-10 15:00:39,763 [EMSDATA] <?xml version=\"1.0\" encoding=\"UTF-8\"?>[loyaltyrefdata][][/loyaltyrefdata] Request:<SOAP-ENV:Envelope xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\"><SOAP-ENV:Header/><SOAP-ENV:Body><RetrieveReasonCodesResponse xmlns=\"http://soa.delta.com/loyaltyreferencedata/v1\"><ReasonCodesResponse Description=\"PREVIOUSLY REPORTED\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"A\"/><ReasonCodesResponse Description=\"NON-QUAL ACTIVITY\" MileageIndicator=\"Y\" PublishCode=\"Y\" ReasonCode=\"B\"/><ReasonCodesResponse Description=\"NEED DOCUMENTATION\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"C\"/><ReasonCodesResponse Description=\"CPN REFUNDED/XCHGD\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"D\"/><ReasonCodesResponse Description=\"POST TO ONE PROGRAM\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"F\"/><ReasonCodesResponse Description=\"PROMO CAP REACHED\" MileageIndicator=\"Y\" PublishCode=\"Y\" ReasonCode=\"H\"/><ReasonCodesResponse Description=\"PROMO CAP EXCEEDED\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"I\"/><ReasonCodesResponse Description=\"TRNSFR TO FLYINGBLUE\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"K\"/><ReasonCodesResponse Description=\"RETRO OUTSIDE 9 MO\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"L\"/><ReasonCodesResponse Description=\"DUPE ACT FROM MERGE\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"M\"/><ReasonCodesResponse Description=\"NON-QUAL ACTIVITY\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"N\"/><ReasonCodesResponse Description=\"DUPE ORIG OR DEST\" MileageIndicator=\"N\" PublishCode=\"Y\" ReasonCode=\"O\"/><ReasonCodesResponse Description=\"PENDING FROM PARTNER\" ";
List<Pair> fields = kve.getFields(ems);
System.out.println(fields);
}
//@Test DodgyTest - this test is bogus
public void testFromSOAPMessage() throws Exception {
//String line = "Jul 31 00:51:43 10.162.0.25 service.oec.vm.provision.submission.3 INFO transformer.CloudRequestJaxbTransformer info.89 - Got cloud request: datacenterIdentifier=null, oecRp=f47add5a-9e76-49d2-af07-02235ff98d4a,params={},payload=<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?><ns1:deployServerRequest ns1:serverId=\"f47add5a-9e76-49d2-af07-02235ff98d4a\" ns1:datacenterId=\"NA3\" xmlns:ns1=\"http://oec.messaging.opsource.net/schemas/messaging\"><ns1:organizationId>bc14fcd1-a3ae-4d51-b801-400d5c946adf</ns1:organizationId><ns1:serverName>serv6</ns1:serverName><ns1:machineName>10-226-181-11</ns1:machineName><ns1:serverImageVmwareId>vm-22621</ns1:serverImageVmwareId><ns1:cpuCount>2</ns1:cpuCount><ns1:memoryAmountMB>4096</ns1:memoryAmountMB><ns1:osStorageGb>50</ns1:osStorageGb><ns1:localStorageGb>0</ns1:localStorageGb><ns1:isStarted>false</ns1:isStarted><ns1:ipAddress>10.226.181.11</ns1:ipAddress><ns1:networkVirtualContextId>3b05b256-c922-11e2-b29c-001517c4643e</ns1:networkVirtualContextId><ns1:privateNet>10.";
// String line = " ns1:id=\"21000\"";
String line = " <ns1:ipAddress>10.226.181.11</ns1:ipAddress>";
//PARAMS !!
// ?option=com_content§ionid=-1&task=edit&cid[]=3"
//KeySlider s1 = new KeySlider("<(A1:.)>(^<)");
KeySlider s1 = new KeySlider("[\t, ](A1:.)=\"(^\")");
scanLine(new KeySlider[] { s1 }, line,false);
String[] kv2 = s1.results.get(0).getKeyValue(line);
assertEquals("ns1:ipAddress=10.226.181.11", kv2[0] +"="+ kv2[1]);
// kv2 = s1.results.get(1).getKeyValue(line);
// assertEquals("Key2:Value2", kv2[0] +":"+ kv2[1]);
//
// kv2 = s1.results.get(2).getKeyValue(line);
// assertEquals("aqs:chrome.0.57j0l3j60j62.641j0", kv2[0] +":"+ kv2[1]);
//
// kv2 = s1.results.get(3).getKeyValue(line);
// assertEquals("sourceid:chrome", kv2[0] +":"+ kv2[1]);
}
@Test
public void testFromURLParams() throws Exception {
String line = "https://www.google.co.uk/search?Key1=Value1&Key2=Value2&aqs=chrome.0.57j0l3j60j62.641j0&sourceid=chrome&ie=UTF-8";
//PARAMS !!
// ?option=com_content§ionid=-1&task=edit&cid[]=3"
KeySlider s1 = new KeySlider("[?&](A1.)=(A1.)");
scanLine(new KeySlider[] { s1 }, line,false);
String[] kv2 = s1.results.get(0).getKeyValue(line);
assertEquals("Key1:Value1", kv2[0] +":"+ kv2[1]);
kv2 = s1.results.get(1).getKeyValue(line);
assertEquals("Key2:Value2", kv2[0] +":"+ kv2[1]);
kv2 = s1.results.get(2).getKeyValue(line);
assertEquals("aqs:chrome.0.57j0l3j60j62.641j0", kv2[0] +":"+ kv2[1]);
kv2 = s1.results.get(3).getKeyValue(line);
assertEquals("sourceid:chrome", kv2[0] +":"+ kv2[1]);
}
@Test
public void testFromRealLINE_Weblogs() throws Exception {
String line = "147.114.226.182 - - [31/Mar/2010:12:02:29 -0400] \"GET /plu/bullist.gif HTTP/1.1\" 200 108 \"http://www.liquidlabs-cloud.com/administrator/index.php?option=com_content§ionid=-1&task=edit&cid[]=3\" \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.2) Gecko/20100316 Firefox/3.6.2\"\n";
// "147.114.226.182 - - [31/Mar/2010:12:02:29 -0400] \"GET /plugins/editors/tinymce/jscripts/tiny_mce/themes/advanced/images/justifyfull.gif HTTP/1.1\" 200 71 \"http://www.liquidlabs-cloud.com/administrator/index.php?option=com_content§ionid=-1&task=edit&cid[]=3\" \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.2) Gecko/20100316 Firefox/3.6.2\"\n" +
// "147.114.226.182 - - [31/Mar/2010:12:02:29 -0400] \"GET /plugins/editors/tinymce/jscripts/tiny_mce/themes/advanced/images/indent.gif HTTP/1.1\" 200 112 \"http://www.liquidlabs-cloud.com/administrator/index.php?option=com_content§ionid=-1&task=edit&cid[]=3\" \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2.2) Gecko/20100316 Firefox/3.6.2\"\n";
//PARAMS !!
// ?option=com_content§ionid=-1&task=edit&cid[]=3"
KeySlider s1 = new KeySlider("[?&](A1.)=(A1.)");
scanLine(new KeySlider[] { s1 }, line,false);
String[] kv2 = s1.results.get(0).getKeyValue(line);
assertEquals("option:com_content", kv2[0] +":"+ kv2[1]);
kv2 = s1.results.get(1).getKeyValue(line);
assertEquals("sectionid:-1", kv2[0] +":"+ kv2[1]);
}
@Test
public void testFromRealLINE_FACTSET() throws Exception {
// String line = "21-May-13 11:11:11\toType=\"VirtualMachine\",configStatus=\"GREEN\",guest.toolsStatus=\"TOOLS_NOT_INSTALLED\",name=\"darren-linux\",runtime.maxCpuUsage=\"3058\",runtime.maxMemoryUsage=\"2048\",runtime.powerState=\"POWERED_OFF\"\n" +
// "21-May-13 11:11:11\toType=\"VirtualMachine\",configStatus=\"GREEN\",guest.guestFamily=\"solarisGuest\",guest.hostName=\"soldev01\",guest.ipAddress=\"192.168.70.208\",guest.toolsStatus=\"TOOLS_OK\",name=\"soldev01\",runtime.maxCpuUsage=\"3058\",runtime.maxMemoryUsage=\"1024\",runtime.powerState=\"POWERED_ON\"";
// String line = "2013-04-03 12:40:04:70 service=\"WorkstationFiles\",serverName=\"Gateway\",feHost=\"cauthstagea02\",chainId=\"515C5B643267A439\",clientIp=\"192.168.1.1\",method=\"GET\",url=\"/services/WorkstationFiles/real_time/config/ic_bw.xmlss\",httpVer=\"HTTP/1.1\",hdrHost=\"lima-gateway-staging.factset.com\",hdrConnection=\"keep-alive\",hdrAcceptCharset=\"utf-8\",hdrIfModifiedSince=\"Fri, 04 May 2012 17:55:23 GMT\",hdrXFds3pAtlasVersion=\"2192.168.1.1\",hdrXFdsOverrideName=\"qa\",hdrAuthorization=\"Basic T1RQX1NFUlZJQ0VTOmQ0M3lQdnFrcEp0RUlmUTI=\",hdrXDatadirectAuthToken=\"724b192.168.1.1\",hdrUserAgent=\"Chrome/192.168.1.1 FactSet/2192.168.1.1\",hdrAcceptEncoding=\"gzip,deflate\",hdrAcceptLanguage=\"en-US,en\",hdrXForwardedFor=\"192.168.1.1\",user=\"NKOCHAKIAN\",serial=\"QA\",authUser=\"OTP_SERVICES\",beRspTime=\"42\",beMethod=\"GET\",beUrl=\"/qa/service/WorkstationFiles/real_time/config/ic_bw.xmlss?username=NKOCHAKIAN&serial_number=QA\",beHttpVer=\"HTTP/1.1\",beHdrHost=\"services-staging.factset.com\",beHdrConnection=\"keep-alive\",beHdrAcceptCharset=\"utf-8\",beHdrIfModifiedSince=\"Fri, 04 May 2012 17:55:23 GMT\",beHdrXFds3pAtlasVersion=\"2192.168.1.1\",beHdrXFdsOverrideName=\"qa\",beHdrAuthorization=\"Basic T1RQX1NFUlZJQ0VTOmQ0M3lQdnFrcEp0RUlmUTI=\",beHdrUserAgent=\"Chrome/192.168.1.1 FactSet/2192.168.1.1\",beHdrAcceptEncoding=\"gzip,deflate\",beHdrAcceptLanguage=\"en-US,en\",beHdrXForwardedFor=\"192.168.1.1\",beHdrXFdsaRequestKey=\"515C5B643267A439\",beHdrXFdsaProxyOrigClientAddr=\"192.168.1.1\",beRspCode=\"304\",beRspContentLength=\"0\",beRspHdrDate=\"Wed, 03 Apr 2013 16:40:04 GMT\",beRspHdrServer=\"Apache/2.2.3 (Red Hat)\",beRspHdrConnection=\"Keep-Alive\",beRspHdrKeepAlive=\"timeout=15, max=98\",beRspHdrCacheControl=\"private, max-age=86400, persistent-storage\",duration=\"58\",rspCode=\"304\",rspContentLength=\"0\",rspHdrDate=\"Wed, 03 Apr 2013 16:40:04 GMT\",rspHdrCacheControl=\"private, max-age=86400, persistent-storage\",rspHdrConnection=\"keep-alive\",rspHdrKeepAlive=\"timeout=30\",rspHdrXDatadirectRequestKey=\"515C5B643267A439\",rspHdrServer=\"FactSet Lima Proxy\",connLeased=\"0\",connAvailable=\"2\",connPending=\"0\",connMax=\"1024\",reqActive=\"0\",reqPoolSize=\"10\",reqMaxPoolSize=\"1024\",resActive=\"0\",resPoolSize=\"10\",resMaxPoolSize=\"1024\"";
//String line = "2013-04-03 12:40:03:899 service=\"WorkstationFiles\",rspHdrCacheControl=\"private, max-age=86400, persistent-storage\",serverName=\"Gateway\",feHost=\"cauthstagea02\",chainId=\"515C5B63EFBB1E46\",clientIp=\"192.168.1.1\",method=\"GET\",url=\"/services/WorkstationFiles/real_time/config/rt_fields.xml\",httpVer=\"HTTP/1.1\",hdrHost=\"lima-gateway-staging.factset.com\",hdrConnection=\"keep-alive\",hdrAcceptCharset=\"utf-8\",hdrIfModifiedSince=\"Thu, 10 Jan 2013 09:22:14 GMT\",hdrXFds3pAtlasVersion=\"2192.168.1.1\",hdrXFdsOverrideName=\"qa\",hdrAuthorization=\"Basic T1RQX1NFUlZJQ0VTOmQ0M3lQdnFrcEp0RUlmUTI=\",hdrXDatadirectAuthToken=\"724b192.168.1.1\",hdrUserAgent=\"Chrome/192.168.1.1 FactSet/2192.168.1.1\",hdrAcceptEncoding=\"gzip,deflate\",hdrAcceptLanguage=\"en-US,en\",hdrXForwardedFor=\"192.168.1.1\",user=\"NKOCHAKIAN\",serial=\"QA\",authUser=\"OTP_SERVICES\",beRspTime=\"136\",beMethod=\"GET\",beUrl=\"/qa/service/WorkstationFiles/real_time/config/rt_fields.xml?username=NKOCHAKIAN&serial_number=QA\",beHttpVer=\"HTTP/1.1\",beHdrHost=\"services-staging.factset.com\",beHdrConnection=\"keep-alive\",beHdrAcceptCharset=\"utf-8\",beHdrIfModifiedSince=\"Thu, 10 Jan 2013 09:22:14 GMT\",beHdrXFds3pAtlasVersion=\"2192.168.1.1\",beHdrXFdsOverrideName=\"qa\",beHdrAuthorization=\"Basic T1RQX1NFUlZJQ0VTOmQ0M3lQdnFrcEp0RUlmUTI=\",beHdrUserAgent=\"Chrome/192.168.1.1 FactSet/2192.168.1.1\",beHdrAcceptEncoding=\"gzip,deflate\",beHdrAcceptLanguage=\"en-US,en\",beHdrXForwardedFor=\"192.168.1.1\",beHdrXFdsaRequestKey=\"515C5B63EFBB1E46\",beHdrXFdsaProxyOrigClientAddr=\"192.168.1.1\",beRspCode=\"304\",beRspContentLength=\"0\",beRspHdrDate=\"Wed, 03 Apr 2013 16:40:03 GMT\",beRspHdrServer=\"Apache/2.2.3 (Red Hat)\",beRspHdrConnection=\"Keep-Alive\",beRspHdrKeepAlive=\"timeout=15, max=99\",beRspHdrCacheControl=\"private, max-age=86400, persistent-storage\",duration=\"150\",rspCode=\"304\",rspContentLength=\"0\",rspHdrDate=\"Wed, 03 Apr 2013 16:40:03 GMT\",rspHdrCacheControl=\"private, max-age=86400, persistent-storage\",rspHdrConnection=\"keep-alive\",rspHdrKeepAlive=\"timeout=30\",rspHdrXDatadirectRequestKey=\"515C5B63EFBB1E46\",rspHdrServer=\"FactSet Lima Proxy\",connLeased=\"0\",connAvailable=\"2\",connPending=\"0\",connMax=\"1024\",reqActive=\"0\",reqPoolSize=\"10\",reqMaxPoolSize=\"1024\",resActivne=\"0\",resPoolSize=\"10\",resMaxPoolSize=\"1024\"\n";
//String line = "2013-04-03 12:40:04:433 service=\"WorkstationFiles\",hdrIfModifiedSince=\"Wed, 24 Oct 2012 07:18:27 GMT\",serverName=\"Gateway\",feHost=\"cauthstagea02\",chainId=\"515C5B6448297805\",clientIp=\"192.168.1.1\",method=\"GET\",url=\"/services/WorkstationFiles/real_time/config/FloatFieldMapping.txt\",httpVer=\"HTTP/1.1\",hdrHost=\"lima-gateway-staging.factset.com\",hdrConnection=\"keep-alive\",hdrAcceptCharset=\"utf-8\",hdrXFds3pAtlasVersion=\"2192.168.1.1\",hdrXFdsOverrideName=\"qa\",hdrAuthorization=\"Basic T1RQX1NFUlZJQ0VTOmQ0M3lQdnFrcEp0RUlmUTI=\",hdrXDatadirectAuthToken=\"724b192.168.1.1\",hdrUserAgent=\"Chrome/192.168.1.1 FactSet/2192.168.1.1\",hdrAcceptEncoding=\"gzip,deflate\",hdrAcceptLanguage=\"en-US,en\",hdrXForwardedFor=\"192.168.1.1\",user=\"NKOCHAKIAN\",serial=\"QA\",authUser=\"OTP_SERVICES\",beRspTime=\"46\",beMethod=\"GET\",beUrl=\"/qa/service/WorkstationFiles/real_time/config/FloatFieldMapping.txt?username=NKOCHAKIAN&serial_number=QA\",beHttpVer=\"HTTP/1.1\",beHdrHost=\"services-staging.factset.com\",beHdrConnection=\"keep-alive\",beHdrAcceptCharset=\"utf-8\",beHdrIfModifiedSince=\"Wed, 24 Oct 2012 07:18:27 GMT\",beHdrXFds3pAtlasVersion=\"2192.168.1.1\",beHdrXFdsOverrideName=\"qa\",beHdrAuthorization=\"Basic T1RQX1NFUlZJQ0VTOmQ0M3lQdnFrcEp0RUlmUTI=\",beHdrUserAgent=\"Chrome/192.168.1.1 FactSet/2192.168.1.1\",beHdrAcceptEncoding=\"gzip,deflate\",beHdrAcceptLanguage=\"en-US,en\",beHdrXForwardedFor=\"192.168.1.1\",beHdrXFdsaRequestKey=\"515C5B6448297805\",beHdrXFdsaProxyOrigClientAddr=\"192.168.1.1\",beRspCode=\"304\",beRspContentLength=\"0\",beRspHdrDate=\"Wed, 03 Apr 2013 16:40:04 GMT\",beRspHdrServer=\"Apache/2.2.3 (Red Hat)\",beRspHdrConnection=\"Keep-Alive\",beRspHdrKeepAlive=\"timeout=15, max=96\",beRspHdrCacheControl=\"private, max-age=86400, persistent-storage\",duration=\"59\",rspCode=\"304\",rspContentLength=\"0\",rspHdrDate=\"Wed, 03 Apr 2013 16:40:04 GMT\",rspHdrCacheControl=\"private, max-age=86400, persistent-storage\",rspHdrConnection=\"keep-alive\",rspHdrKeepAlive=\"timeout=30\",rspHdrXDatadirectRequestKey=\"515C5B6448297805\",rspHdrServer=\"FactSet Lima Proxy\",connLeased=\"0\",connAvailable=\"2\",connPending=\"0\",connMax=\"1024\",reqActive=\"0\",reqPoolSize=\"10\",reqMaxPoolSize=\"1024\",resActive=\"0\",resPoolSize=\"10\",resMaxPoolSize=\"1024\"\n";
String line = "2013-04-03 12:40:04:433 service=\"WorkstationFiles\",beUrl=\"/qa/service/WorkstationFiles/real_time/config/rt_fields.xml?username=NKOCHAKIAN&serial_number=QA\",beHttpVer=\"HTTP/1.1\",beHd\n" +
"rHost=\"services-staging.factset.com\",beHdrConnection=\"keep-alive\",beHdrAcceptCharset=\"utf-8\",beHdrIfModifiedSince=\"Thu, 10 Jan 2013 09:22:14 GMT\",beHdrXFds3pAtlasVersion=\"2192.168.1.1\"\n" +
",beHdrXFdsOverrideName=\"qa\",beHdrAuthorization=\"Basic T1RQX1NFUlZJQ0VTOmQ0M3lQdnFrcEp0RUlmUTI=\",beHdrUserAgent=\"Chrome/192.168.1.1 FactSet/2192.168.1.1\",beHdrAcceptEncoding=\"gzip,deflate\",beHdrAcceptLanguage=\"en-US,en\",beHdrXForwardedFor=\"192.168.1.1\",beHdrXFdsaRequestKey=\"515C5B63EFBB1E46\",beHdrXFdsaProxyOrigClientAddr=\"192.168.1.1\",beRspCode=\"304\",beRspContentLeng\n" +
"th=\"0\",beRspHdrDate=\"Wed, 03 Apr 2013 16:40:03 GMT\",beRspHdrServer=\"Apache/2.2.3 (Red Hat)\",beRspHdrConnection=\"Keep-Alive\",beRspHdrKeepAlive=\"timeout=15, max=99\",beRspHdrCacheControl=\n" +
"\"private, max-age=86400, persistent-storage\",duration=\"150\",rspCode=\"304\",rspContentLength=\"0\",rspHdrDate=\"Wed, 03 Apr 2013 16:40:03 GMT\",rspHdrCacheControl=\"private, max-age=86400, pe\n" +
"rsistent-storage\",rspHdrConnection=\"keep-alive\",rspHdrKeepAlive=\"timeout=30\",rspHdrXDatadirectRequestKey=\"515C5B63EFBB1E46\",rspHdrServer=\"FactSet Lima Proxy\",connLeased=\"0\",connAvailab\n" +
"le=\"2\",connPending=\"0\",connMax=\"1024\",reqActive=\"0\",reqPoolSize=\"10\",reqMaxPoolSize=\"1024\",resActive=\"0\",resPoolSize=\"10\",resMaxPoolSize=\"1024\"\n";
// List<Pair> fields = kve.getFields(line);
// System.out.println("Fields:" + fields);
// KeySlider s1 = new KeySlider(" (A1)=\"(^\")");
KeySlider s2 = new KeySlider("[\t, ](A1)=\"(^\")");
KeySlider[] ss = new KeySlider[] { s2 };
scanLine(ss, line,false);
// String[] kv1 = s1.results.get(0).getKeyValue(line);
// assertEquals("service:WorkstationFiles", kv1[0] +":"+ kv1[1]);
String[] kv2 = s2.results.get(0).getKeyValue(line);
assertEquals("service:WorkstationFiles", kv2[0] +":"+ kv2[1]);
//assertEquals("beUrl:/qa/service/WorkstationFiles/real_time/config/rt_fields.xml?username=NKOCHAKIAN&serial_number=QA", kv2[0] +":"+ kv2[1]);
}
@Test
public void shouldLearnTheConfig() throws Exception {
String line = "2013-04-03 12:40:04:433 service=\"WorkstationFiles\",beUrl=\"/qa/service/WorkstationFiles/real_time/config/rt_fields.xml?username=NKOCHAKIAN&serial_number=QA\",beHttpVer=\"HTTP/1.1\",beHd\n" +
// "rHost=\"services-staging.factset.com\",beHdrConnection=\"keep-alive\",beHdrAcceptCharset=\"utf-8\",beHdrIfModifiedSince=\"Thu, 10 Jan 2013 09:22:14 GMT\",beHdrXFds3pAtlasVersion=\"2192.168.1.1\"\n" +
// ",beHdrXFdsOverrideName=\"qa\",beHdrAuthorization=\"Basic T1RQX1NFUlZJQ0VTOmQ0M3lQdnFrcEp0RUlmUTI=\",beHdrUserAgent=\"Chrome/192.168.1.1 FactSet/2192.168.1.1\",beHdrAcceptEncoding=\"gzip,deflate\",beHdrAcceptLanguage=\"en-US,en\",beHdrXForwardedFor=\"192.168.1.1\",beHdrXFdsaRequestKey=\"515C5B63EFBB1E46\",beHdrXFdsaProxyOrigClientAddr=\"192.168.1.1\",beRspCode=\"304\",beRspContentLeng\n" +
// "th=\"0\",beRspHdrDate=\"Wed, 03 Apr 2013 16:40:03 GMT\",beRspHdrServer=\"Apache/2.2.3 (Red Hat)\",beRspHdrConnection=\"Keep-Alive\",beRspHdrKeepAlive=\"timeout=15, max=99\",beRspHdrCacheControl=\n" +
// "\"private, max-age=86400, persistent-storage\",duration=\"150\",rspCode=\"304\",rspContentLength=\"0\",rspHdrDate=\"Wed, 03 Apr 2013 16:40:03 GMT\",rspHdrCacheControl=\"private, max-age=86400, pe\n" +
// "rsistent-storage\",rspHdrConnection=\"keep-alive\",rspHdrKeepAlive=\"timeout=30\",rspHdrXDatadirectRequestKey=\"515C5B63EFBB1E46\",rspHdrServer=\"FactSet Lima Proxy\",connLeased=\"0\",connAvailab\n" +
"le=\"2\",connPending=\"0\",resMaxPoolSize=\"1024\"\n";
List<Pair> fields1 = kve.getFields(line);
int pos = 0;
for (Pair fieldI : fields1) {
System.out.printf(pos++ + ") " + fieldI + "\n");
}
RulesKeyValueExtractor.Config config = kve.getConfig();
assertTrue(config.getRules().size() > 0);
config.learn = false;
kve = new RulesKeyValueExtractor(config);
List<Pair> fields2 = kve.getFields(line);
pos = 0;
for (Pair fieldI : fields2) {
System.out.printf(pos++ + ") " + fieldI + "\n");
}
Assert.assertEquals(fields1.size(),fields2.size());
}
@Test
public void shouldLearnTheConfigAgain() throws Exception {
String line = "2013-08-23 08:00:18 REPORT_SCHEDULE Schedule:Windows_Disk_Space_Alert Action:Triggered Events:2 ThresholdPassed[1]\n";
List<Pair> fields1 = kve.getFields(line);
RulesKeyValueExtractor.Config config1 = kve.getConfig();
assertEquals(1, config1.rules.size());
assertEquals(35, config1.startPos);
assertEquals(35, config1.rules.get(0).from);
// Assert.assertEquals(fields1.size(),fields2.size());
}
@Test
public void testFromRealLINE_WithQuotes() throws Exception {
String line = "2013-04-18 00:00:00,184 INFO manager-34-25 (tailer.TailerEmbeddedAggSpace)\tStarting SEARCH[all_X_System_Utilizati CPU=\"100\" on_X_LLABS-4a7c4b56-13e173f2eea-73d4-20130418-000000]";
KeySlider keySlider = new KeySlider(" (A1)=\"(1)");
scanLine(keySlider, line);
assertEquals("[Pair(113,118,122)]",keySlider.results().toString());
String[] keyValue = keySlider.results().get(0).getKeyValue(line);
assertEquals("CPU:100", keyValue[0] + ":" + keyValue[1]);
}
@Test
public void testFromRealLINE() throws Exception {
String line = "2013-04-18 00:00:00,184 INFO manager-34-25 (tailer.TailerEmbeddedAggSpace)\tStarting SEARCH[all_X_System_Utilizati CPU:100 on_X_LLABS-4a7c4b56-13e173f2eea-73d4-20130418-000000]";
KeySlider keySlider = new KeySlider(" (A1):(1)");
scanLine(keySlider, line);
assertEquals("[Pair(113,117,121)]",keySlider.results().toString());
String[] keyValue = keySlider.results().get(0).getKeyValue(line);
assertEquals("CPU:100", keyValue[0] + ":" + keyValue[1]);
}
@Test
public void testGrabMultipleKeys() throws Exception {
KeySlider keySlider = new KeySlider(" (A1):(1)");
scanLine(keySlider, " CPU:100 stuff CPU2:100 ");
assertEquals("[Pair(0,4,8), Pair(14,19,23)]",keySlider.results().toString());
}
@Test
public void testOffsetMoreKey_KVSlider() throws Exception {
KeySlider keySlider = new KeySlider(" (A1):(1)");
scanLine(keySlider, " CPU:100 stuff");
assertEquals("[Pair(2,6,10)]",keySlider.results().toString());
}
@Test
public void testOffsetKey_KVSlider() throws Exception {
KeySlider keySlider = new KeySlider(" (A1):(A1)");
scanLine(keySlider, " CPU:100 stuff");
assertEquals("[Pair(1,5,9)]",keySlider.results().toString());
}
@Test
public void testSingleKey_WithVQuotes() throws Exception {
String line = " host.cpu=\"100\"";
KeySlider slider = new KeySlider("[, ](A1.)=\"(^\")");
scanLine(slider, line);
System.out.println("GOT:" + slider.results().get(0).toString());
String[] keyValue = slider.results().get(0).getKeyValue(line);
Assert.assertEquals("host.cpu:100", keyValue[0] + ":" + keyValue[1]);
}
/**
* "key": "val"
* "key": 999
* "key": val,
* @throws Exception
*/
@Test
public void testSingleKey_WithJSonr() throws Exception {
assertEquals("[Pair(1,7,11)]", scanLine(new KeySlider("\"(A1)\": (1)"), " \"CPU\": 100, "));
}
@Test
public void testSingleKey_KVSliderEQ() throws Exception {
assertEquals("[Pair(0,4,8)]", scanLine(new KeySlider(" (A1):(1)"), " CPU:100 stuff"));
}
@Test
public void testSingleKey_KVSliderOptionalPre() throws Exception {
assertEquals("[Pair(0,4,8)]", scanLine(new KeySlider("[, ](A1):(1)"), " CPU:100 stuff"));
}
@Test
public void testGetSetupStuff() throws Exception {
String testLine = " \"stuff\": \"value\"";
KVRule.Rule preToken = kve.getPreText(" (A1):(A1)");
assertTrue(preToken.toString().contains("SingleChar"));
String postToken = kve.getPostText(" (A1)\":(A1)\"");
assertEquals("\":", postToken);
KVRule.Rule rule = kve.getKeyRule(" (A1) (1) ");
assertNotNull(rule);
KVRule.Rule vrule = kve.getValueRule(" (A1) (1) ");
assertNotNull(vrule);
}
@Test
public void shouldMatchFieldWithSpaces() {
final RulesKeyValueExtractor extractor = new RulesKeyValueExtractor("[\t, [](A1.)='(^')[']");
final List<Pair> fields = extractor.getFields(" FIELD1='abc' field2='def' field3='112 345' ");
//2015-08-23 20:43:24,473 INFO pool-2-thread-2 (license.TrialListener) Action:'Download' Email:'[email protected]' IpAddress:'173.219.61.203' Company:'AYS'
assertThat(fields.size(), is(3));
}
@Test
public void shouldFindkeyPrecededByTab() {
String foo = "2013-09-24 13:32:26,226 INFO long-running-11-22 (event.LoggingEventMonitor)\tevent:kvIndexRemove dbName:lut logId:242";
final RulesKeyValueExtractor extractor = new RulesKeyValueExtractor();
final List<Pair> fields = extractor.getFields(foo);
assertThat(fields.size(), is(3));
}
@Test
public void shouldDoSomething() {
KVRule.Rule preMatcher = kve.getPreText("[, ](A1):(A1)");
assertTrue(preMatcher.isValid(','));
assertTrue(preMatcher.isValid(' '));
}
private String scanLine(KeySlider keySlider, String line) {
return scanLine(new KeySlider[] { keySlider}, line, true);
}
private String scanLine(KeySlider[] slider, String line, boolean stringifyResults) {
char[] chars = line.toCharArray();
for (int i = 0; i < chars.length ; i++) {
for (int j = 0; j < slider.length; j++) {
slider[j].next(chars[i], i, chars.length);
}
}
if (stringifyResults) return slider[0].results().toString();
else return "";
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.