content
stringlengths 10
4.9M
|
---|
#ifndef INNER_NODE_H
#define INNER_NODE_H
#include <stdexcept>
#include "inode.hpp"
#include "ibackpropagation_strategy.hpp"
namespace mcts {
// ----------------------------------------------------------------------
/// @brief implements most methods of the inode interface. some methods
/// have to be implemented problem dependend. Inner nodes are
/// all nonterminal nodes.
///
/// @tparam Context @README
/// @tparam Config @README
// ----------------------------------------------------------------------
template <typename Context, typename Config>
class InnerNode : public INode<Context, Config> {
typedef typename INode<Context, Config>::node_t node_t;
typedef typename IVisitor<Context, Config>::visitor_t visitor_t;
/// stores the context for the current node
Context context_;
/// stores a pointer to the config obj
Config *config_;
node_t *parent_;
vector<node_t *> children_;
public:
/// strategy that is used for propagating values back in the tree.
IBackpropagationStrategy *backprop_strat_;
InnerNode(const Context &context, Config *config, node_t *parent,
IBackpropagationStrategy *backprop_strat)
: context_(context), config_(config), parent_(parent),
backprop_strat_(backprop_strat) {}
virtual void expand() = 0;
virtual node_t *select_child() = 0;
virtual void accept(visitor_t *visitor) { visitor->visit(this); }
virtual const vector<node_t *> children() const { return children_; }
virtual void add_child(node_t *child) { children_.push_back(child); }
virtual Config *config() const { return config_; }
virtual const Context context() const { return context_; }
virtual node_t *parent() const { return parent_; }
virtual node_t *select_recusively() {
if (children_.size() == 0)
expand();
node_t *selec = select_child();
node_t *selectedChild = selec->select_recusively();
return selectedChild;
}
virtual double simulate() const {
throw std::runtime_error("not supported.");
}
virtual double ev() const { return backprop_strat_->ev(); }
virtual double std_dev() const { return backprop_strat_->std_deviation(); }
virtual double variance() const { return backprop_strat_->variance(); }
virtual int nb_samples() const { return backprop_strat_->nb_samples(); }
virtual void backpropagate(const double &value) {
backprop_strat_->on_backpropagate(value);
parent_->backpropagate(value);
}
virtual ~InnerNode() {
for (unsigned i = 0; i < children_.size(); ++i) {
delete children_[i];
}
}
};
}
#endif
|
/* Drain all the messages currently in 'sock''s receive queue. */
int
nl_sock_drain(struct nl_sock *sock)
{
int error = nl_sock_cow__(sock);
if (error) {
return error;
}
return drain_rcvbuf(sock->fd);
} |
Short PLACE voting explanation
Jameson Quinn Blocked Unblock Follow Following Nov 2, 2017
Problem 1: gerrymandering has weaponized wasted votes.
Solution 1: Use a voting method that minimizes wasted votes. That’s the idea behind #PropRep (proportional representation) methods. In particular, transferable vote methods such as STV or PLACE eliminate candidates and transfer votes until each winner has the same number of votes, and there are fewer wasted votes than it would take to have one more winner. (Note: there are plenty of videos explaining the STV vote-transfer process, which is largely the same for PLACE. For instance, this Scottish one.)
Problem 2: PropRep methods that countries use now have flaws such as complex ballots, or lack of individual accountability for politicians (as opposed to parties). In particular, they’d be slow to implement in the US, requiring new machines and newly-drawn larger districts.
Solution 2: Use a PropRep method that doesn’t have these problems such as PLACE voting. (PLACE stands for “Proportional, locally-accountable, candidate endorsement”.) PLACE uses the same districts and voting machines we have today. Here’s the basics of PLACE works and why.
As a voter, you should be as free as possible to vote for any candidate. But you don’t want the ballot to list all the dozens of candidates statewide. So on the PLACE ballot, you can either choose a candidate running in your district from the list on the ballot, or write in one from somewhere else. If you like a party but not the local candidate for that party, and you don’t want to bother writing in another candidate from somewhere else, you can simply vote for the party without the local candidate. Here’s a sample ballot:
Winners should have at least some degree of broad-based appeal in their home district. In PLACE, that’s ensured by eliminating any candidate who gets less than 25% locally. (Of course, in the unlikely case that there’s no candidate with 25% in some district, the top one is not eliminated.)
If your chosen candidate cannot use your full vote — either because they clearly don’t have enough votes to win, or because they have more than enough and so each of those ballot still has some leftover voting strength after the first candidate wins—then you probably want your vote to still have an impact. So PLACE transfers your vote. Where? Well, most voters would probably want their vote to transfer first to the most similar candidates in the same party; then to those candidates in the same party who aren’t especially similar; then to the closest allies from other parties. So in PLACE, your ballot passes through those 3 groups, where “similar” is defined by the official endorsements your chosen candidate made publicly before the election. Within each of those groups, your ballot transfers in order of direct vote total.
In order to win, a candidate needs to get almost a full district worth of votes. For instance, in a state with 19 districts, they’d need 95% of an average district, which is 5% of the state as a whole, leaving under 5% of wasted votes. Until a full set of winners gets such a “quota” each, you eliminate the weakest candidates (the ones furthest behind the strongest candidate in their district) and transfer votes.
When one candidate wins in a district, all the other candidates from that district are immediately eliminated. That ensures that there will be one winner per district.
But if you voted against the winner in your district, they’re not a good representative for you. To ensure that you’ll still have a rep, your district will be assigned as “extra territory” to one winner from each party except the party that won locally.
That’s it.
For a more detailed, legalistic version of the process, you can look at the PLACE voting FAQ. There, you can also find a list of the various advantages of PLACE, and responses to the common questions or concerns people bring up. |
// Sending response for an incoming message
void
HTTPServerMarlin::SendResponse(HTTPMessage* p_message)
{
HTTPRequest* request = reinterpret_cast<HTTPRequest*>(p_message->GetRequestHandle());
if(request)
{
p_message->SetHasBeenAnswered();
request->StartResponse(p_message);
}
} |
<reponame>gusolsso/node-sp-auth-config
import { IStrategyDictItem, IOnpremiseTmgCredentials } from '../interfaces';
import * as url from 'url';
function isOnPremUrl(siteUrl: string): boolean {
let host: string = (url.parse(siteUrl)).host;
return host.indexOf('.sharepoint.com') === -1 && host.indexOf('.sharepoint.cn') === -1 && host.indexOf('.sharepoint.de') === -1
&& host.indexOf('.sharepoint-mil.us') === -1 && host.indexOf('.sharepoint.us') === -1;
}
function isTmgCredentialsOnpremise(siteUrl: string, T: IOnpremiseTmgCredentials): T is IOnpremiseTmgCredentials {
let isOnPrem: boolean = isOnPremUrl(siteUrl);
if (isOnPrem && (T).username !== undefined && (T).tmg) {
return true;
}
return false;
}
export const getStrategie = (): IStrategyDictItem => {
const strategie: IStrategyDictItem = {
id: 'OnpremiseTmgCredentials',
name: 'Form-based authentication (Forefront TMG)',
withPassword: true,
target: ['OnPremise'],
verifyCallback: isTmgCredentialsOnpremise
};
return strategie;
};
|
/*********************************************************************
Allocate memory for a string, copy the string into the memory and
then return the pointer to the memory.
*********************************************************************/
char *
strset (char *string)
{
char *pointer_to_string;
if (pointer_to_string = (char *) malloc (strlen (string)+1))
{
strcpy (pointer_to_string, string);
return (pointer_to_string);
}
else
{
exit (1);
}
} |
/**
* Resolves the string restrictions.
*
* @param refStringRestriction referred string restriction of typedef
* @throws DataModelException a violation in data model rule
*/
private void resolveStringRestriction(YangStringRestriction refStringRestriction)
throws DataModelException {
YangStringRestriction curStringRestriction = null;
YangRangeRestriction refRangeRestriction = null;
YangPatternRestriction refPatternRestriction = null;
/*
* Check that range restriction should be null when built-in type is
* string.
*/
if (!Strings.isNullOrEmpty(getRangeRestrictionString())) {
DataModelException dataModelException = new DataModelException("YANG file error: Range restriction " +
"should't be present for string data type.");
dataModelException.setLine(lineNumber);
dataModelException.setCharPosition(charPositionInLine);
throw dataModelException;
}
/*
* If referred restriction and self restriction both are null, no
* resolution is required.
*/
if (refStringRestriction == null && Strings.isNullOrEmpty(getLengthRestrictionString())
&& getPatternRestriction() == null) {
return;
}
/*
* If referred string restriction is not null, take value of length and
* pattern restriction and assign.
*/
if (refStringRestriction != null) {
refRangeRestriction = refStringRestriction.getLengthRestriction();
refPatternRestriction = refStringRestriction.getPatternRestriction();
}
YangRangeRestriction lengthRestriction = resolveLengthRestriction(refRangeRestriction);
YangPatternRestriction patternRestriction = resolvePatternRestriction(refPatternRestriction);
/*
* Check if either of length or pattern restriction is present, if yes
* create string restriction and assign value.
*/
if (lengthRestriction != null || patternRestriction != null) {
curStringRestriction = new YangStringRestriction();
curStringRestriction.setLengthRestriction(lengthRestriction);
curStringRestriction.setPatternRestriction(patternRestriction);
}
setResolvedExtendedInfo((T) curStringRestriction);
} |
import { Routes } from '@angular/router';
import { LoginComponent } from './login.component';
import { ByLocationComponent } from './byLocation/byLocation.component';
import { ByCodeModuleGuard } from './by-code.module.guard';
import { SocieModuleGuard } from './socie.module.guard';
export const routes: Routes = [
{
path: '',
component: LoginComponent,
canActivate: [ByCodeModuleGuard],
},
{
path: 'socie/:id',
component: LoginComponent,
canActivate: [SocieModuleGuard],
},
{
path: 'byLocation/:id',
component: ByLocationComponent,
},
{
path: 'byLocation',
component: ByLocationComponent,
},
];
|
<reponame>marians20/people_manager
import { Injectable } from '@angular/core';
import { HttpClient, HttpErrorResponse } from '@angular/common/http';
import { Observable, throwError } from 'rxjs';
import { catchError, map } from 'rxjs/operators';
import { MatSnackBar } from '@angular/material/snack-bar';
@Injectable({
providedIn: 'root'
})
export class RestService {
private _baseUrl: string;
public set baseUrl(value: string) {
this._baseUrl = value;
}
constructor(
private httpClient: HttpClient,
private snackBar: MatSnackBar) { }
public get<T>(params?: any): Observable<T[]> {
return this.httpClient.get<T[]>(this._baseUrl, { params }).pipe(
map(response => {
this.openSnackBar(`${response.length} item(s) retrieved.`);
return response;
})
);
}
public getCount(params?: any): Observable<number> {
return this.httpClient.get<number>(`${this._baseUrl}/count`, { params });
}
public post(data?: any): Observable<any> {
return this.httpClient.post(this._baseUrl, data);
}
public put(id: number, data?: any): Observable<any> {
return this.httpClient.put(`${this._baseUrl}/${id}`, data);
}
public delete(id: number): Observable<any> {
return this.httpClient.delete(`${this._baseUrl}\\${id}`);
}
private openSnackBar(message: string, duration: number = 500): void {
this.snackBar.open(message, 'Close', {
duration,
horizontalPosition: 'center',
verticalPosition: 'top',
});
}
private handleError(error: HttpErrorResponse): Observable<never> {
return throwError(error);
}
}
|
<filename>src/third_party/mozjs/extract/js/src/frontend/ParseNodeVerify.h<gh_stars>0
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
* vim: set ts=8 sts=2 et sw=2 tw=80:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef frontend_ParseNodeVerify_h
#define frontend_ParseNodeVerify_h
#include "ds/LifoAlloc.h" // LifoAlloc
#include "frontend/ParseNode.h" // ParseNode
#include "frontend/SyntaxParseHandler.h" // SyntaxParseHandler::Node
namespace js {
namespace frontend {
// In most builds, examine the given ParseNode and crash if it's not
// well-formed. (In late beta and shipping builds of Firefox, this does
// nothing.)
//
// This returns true on success, and false only if we hit the recursion limit.
// If the ParseNode is actually bad, we crash.
#ifdef DEBUG
[[nodiscard]] extern bool CheckParseTree(JSContext* cx, const LifoAlloc& alloc,
ParseNode* pn);
#else
[[nodiscard]] inline bool CheckParseTree(JSContext* cx, const LifoAlloc& alloc,
ParseNode* pn) {
return true;
}
#endif
[[nodiscard]] inline bool CheckParseTree(JSContext* cx, const LifoAlloc& alloc,
SyntaxParseHandler::Node pn) {
return true;
}
} /* namespace frontend */
} /* namespace js */
#endif // frontend_ParseNodeVerify_h
|
A MAN was decapitated and killed by a model remote-controlled helicopter in Brooklyn, sources said.
The victim, Roman Pirozek, was on the corner of Shore Parkway and Bay 44th Street around 3:40 p.m, when his toy aircraft boomeranged and chopped off his head, law enforcement sources told the New York Post
Mr Pirozek was slashed by the rotor of the remote controlled device during a stunt gone wrong. The blades cut off a piece of his skull, in turn scalping him, WNBC-TV reports.
An official confirmed Mr Pirozek also sheared off part of his shoulder.
When police arrived, Pirozek had already passed. Pictures show the body near the remote control.
Three people - members of his helicopter club, Seaview Rotary Wings - were with the victim at the time of the accident.
He was the vice president of his local helicopter hobby club, his Facebook page reveals. The page has since been removed.
There are reports his father, Roman Pirozek Sr., was also at the scene at the time.
Mr. Pirozek’s sister, Amy Pirozek, who said he worked as a cargo handler for an airline, described Pirozek as a brother who "would always protect me".
“He was the best person ever. He had the biggest heart,” she told the Wall Street Journal. “I think he was just doing it because it was his day off.”
“He was a good brother. I’ll never forget that.”
The fatal helicopter is reportedly a Taiwanese model, costing between $300 to more than $1000.
Neighbours described him as "an avid hobbyist who is frequently seen in the area flying model airplanes," reports the Daily Mail.
"He was known to be aggressive in his flying and often executed tricks. He was executing a trick when he was struck," a police source told the Wall Street Journal.
Pirozek developed a YouTube channel where he would post footage of various tricks and flights using a Trex 700 model helicopter. In one video, he drops the $1,500 model from above, only to restart the rotor just inches from his head.
The helicopter's blade span measures 62 inches, its rotor spins at more than 2,000 rpm.
“He went to good parts of the country to shows — he loved it. It was his father’s hobby and he picked it up from him. His father must be destroyed,” said neighbour Victor Tommaso, 68.
The field where he died is sanctioned by the Academy of Model Aeronautics, an unidentified friend told the Journal. In his final Facebook post on August 25, Pirozek wrote: "Great day for flying, the new frame brace is working great, breaking in some new packs i just got, they are the best that I have flown so far and loving them". Officials are investigating.
This is a similar model to the remote control helicopter that killed a 19 y/o hobbyist in Brooklyn. pic.twitter.com/vFUKd5q2Gm — Shimon Prokupecz (@shimon4ny) September 5, 2013
photo: detectives looking at remote control helicopter that killed 19 y/o near Coney Island. pic.twitter.com/SP6Hx17BjO — Shimon Prokupecz (@shimon4ny) September 5, 2013
Sources say 19 y/o killed by blades from model helicopter was doing "tricks" when something went wrong. Known for performing risky tricks. — Shimon Prokupecz (@shimon4ny) September 5, 2013
Scene at Calvert Vaux Park in Bklyn where 19 yo was killed flying his model helicopter. Live at 6pm on #cbs2news pic.twitter.com/iDA8sBb5jb — Jessica Schneider (@SchneiderJess) September 5, 2013 |
<reponame>kgb0255/deepCR<filename>deepCR/test/test_evaluate.py
import os
import numpy as np
import pytest
import deepCR.evaluate as evaluate
from deepCR.model import deepCR
def test_eval():
mdl = deepCR()
var = np.zeros((10,24,24))
tpr, fpr = evaluate.roc(mdl, image=var, mask=var, thresholds=np.linspace(0, 1, 10))
assert tpr.shape == (10,)
(tpr, fpr), (tpr1, fpr1) = evaluate.roc(mdl, image=var, mask=var, thresholds=np.linspace(0, 1, 10), dilate=True)
assert tpr1.shape == (10,)
def test_eval_gen():
mdl = deepCR()
# Generate fake data files
cwd = os.getcwd() + '/'
# Remove generated files
if 'temp' in os.listdir(cwd):
for root, dirs, files in os.walk(cwd + 'temp', topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir('temp')
os.mkdir('temp')
os.mkdir('temp/image')
os.mkdir('temp/dark')
var = np.zeros((2, 24, 24))
for i in range(6):
np.save(cwd + 'temp/image/%d.npy' % i, var)
np.save(cwd + 'temp/dark/%d.npy' % i, var)
image_list = [cwd + 'temp/image/' + f for f in os.listdir(cwd + 'temp/image')]
dark_list = [cwd + 'temp/dark/' + f for f in os.listdir(cwd + 'temp/dark')]
# Evaluate
tpr, fpr = evaluate.roc(mdl, image=image_list, mask=dark_list, sky=100, thresholds=np.linspace(0, 1, 10))
assert tpr.shape == (10,)
# Remove generated files
for root, dirs, files in os.walk(cwd + 'temp', topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir('temp')
def test_eval_gen_lacosmic():
# Generate fake data files
cwd = os.getcwd() + '/'
# Remove generated files
if 'temp' in os.listdir(cwd):
for root, dirs, files in os.walk(cwd + 'temp', topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir('temp')
os.mkdir('temp')
os.mkdir('temp/image')
os.mkdir('temp/dark')
var = np.zeros((2, 24, 24))
for i in range(6):
np.save(cwd + 'temp/image/%d.npy' % i, var)
np.save(cwd + 'temp/dark/%d.npy' % i, var)
image_list = [cwd + 'temp/image/' + f for f in os.listdir(cwd + 'temp/image')]
dark_list = [cwd + 'temp/dark/' + f for f in os.listdir(cwd + 'temp/dark')]
# Evaluate
tpr, fpr = evaluate.roc_lacosmic(image_list, dark_list, sigclip=np.linspace(5,10,10), ignore=None, sky=None,
n_mask=1, seed=1, objlim=2, gain=1, dilate=False, rad=1)
assert tpr.shape == (10,)
# Remove generated files
for root, dirs, files in os.walk(cwd + 'temp', topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir('temp')
if __name__ == '__main__':
test_eval()
test_eval_gen()
test_eval_gen_lacosmic()
|
nums = [int(x) for x in input().split()]
N,K = nums[0],nums[1]
D = set([int(x) for x in input().split()])
ans = N
def solve(M):
M = [int(x) for x in str(M)]
for m in M:
if m in D: return False
return True
while(1):
if solve(ans):
print(ans)
break
else:
ans +=1 |
def _get_files(self):
expanded_paths = set()
for path in self.options["path"]:
expanded_paths.update(glob.glob(path))
all_files = set()
for path in expanded_paths:
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for filename in files:
if filename.endswith(".robot") or filename.endswith(
".resource"
):
all_files.add(os.path.join(root, filename))
else:
all_files.add(path)
return all_files |
import { useState, useCallback } from 'react';
import { resolveValue } from '../utils';
/**
* Signature of the setters provided by `useCounter`
* @public
*/
export interface NumberSetter {
/** Sets the state to a given value */
set: React.Dispatch<React.SetStateAction<number>>;
/** Increments the state by a given value. Defaults to `1` */
inc: (value?: number) => void;
/** Decrements the state by a given value. Defaults to `1` */
dec: (value?: number) => void;
/** Multiplies the state by a given value. */
times: (value: number) => void;
/** Divides the state by a given value. */
divide: (value: number) => void;
/** Resets the state back to its initial value */
reset: () => void;
}
/**
* Simple hook to keep a numeric state with some useful setters.
*
* @public
* @param initialState - Either the initial value or a function that resolves to it for lazy loading.
* @returns A tuple with the state value, and an object with its setters.
*/
export const useNumber = (
initialState: number | (() => number),
): [number, NumberSetter] => {
const [value, set] = useState<number>(initialState);
const inc: NumberSetter['inc'] = useCallback(
(increment = 1) => set(current => current + increment),
[set],
);
const dec: NumberSetter['dec'] = useCallback(
(decrement = 1) => set(current => current - decrement),
[set],
);
const times: NumberSetter['times'] = useCallback(
times => set(current => current * times),
[set],
);
const divide: NumberSetter['times'] = useCallback(
value => set(current => current / value),
[set],
);
const reset: NumberSetter['reset'] = useCallback(
() => set(resolveValue(initialState)),
[set, initialState],
);
return [value, { set, inc, dec, times, divide, reset }];
};
|
#include<iostream>
using namespace std;
long long n, a[10000], i, j, m[1001], k[1001];
main(){
cin>>n;
for(i=0;i<n;i++){
cin>>m[i]>>k[i];
a[i]=m[i]+k[i];
}
for(i=0;i<n;i++){
for(j=i+1;j<n;j++){
if(a[i]==m[j] && a[j]==m[i]){
cout<<"YES";
// system("pause");
return 0;
}
}
}
cout<<"NO";
//system("pause");
}
|
def transform(self, indata, depth, parameters=None, uritransform=None):
if parameters is None:
parameters = {}
if self.config:
adapted_config = self.t.getconfig(self.config, depth)
else:
adapted_config = None
outdata = self.t.transform(indata, adapted_config, parameters)
if self.t.reparse:
outdata = etree.parse(BytesIO(etree.tostring(outdata)))
if uritransform:
self.transform_links(outdata.getroot(), uritransform)
return outdata |
/**
* Creation Date: 30.05.2018 11:18<br/>
* © Information Design One AG
* @author Eduard Beutel
*/
@Configuration
public class ValidatorConfig extends RepositoryRestConfigurerAdapter
{
@Bean
@Primary
Validator validator() {
return new LocalValidatorFactoryBean();
}
@Override
public void configureValidatingRepositoryEventListener(ValidatingRepositoryEventListener validatingListener) {
Validator validator = validator();
validatingListener.addValidator("beforeCreate", validator);
validatingListener.addValidator("beforeSave", validator);
}
} |
// Helper for numerical solution of tau from noise formula:
Double_t taufunNoise( Double_t* x, Double_t* par ) {
Double_t tau= x[0];
Int_t npar= par[0];
Double_t sum= 0.0;
for( Int_t ipar= 0; ipar < npar; ipar++ ) {
Double_t tausii= tau*par[ipar+1];
Double_t qp= par[npar+ipar+1];
sum+= ( pow( qp, 2 )*( 2.0*tausii + pow( tausii, 2 ) )/
pow( 1.0+tausii, 2 ) - 1.0 )*( 3.0/( 3.0 + tausii ) );
}
return sum;
} |
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Policies_StatusARM) DeepCopyInto(out *Policies_StatusARM) {
*out = *in
if in.ExportPolicy != nil {
in, out := &in.ExportPolicy, &out.ExportPolicy
*out = new(ExportPolicy_StatusARM)
(*in).DeepCopyInto(*out)
}
if in.QuarantinePolicy != nil {
in, out := &in.QuarantinePolicy, &out.QuarantinePolicy
*out = new(QuarantinePolicy_StatusARM)
(*in).DeepCopyInto(*out)
}
if in.RetentionPolicy != nil {
in, out := &in.RetentionPolicy, &out.RetentionPolicy
*out = new(RetentionPolicy_StatusARM)
(*in).DeepCopyInto(*out)
}
if in.TrustPolicy != nil {
in, out := &in.TrustPolicy, &out.TrustPolicy
*out = new(TrustPolicy_StatusARM)
(*in).DeepCopyInto(*out)
}
} |
def replace_reducer(next_reducer):
_state["current_reducer"] = next_reducer
dispatch({"type": __INIT}) |
Influence of Tumour Necrosis Factor‐α on the Expression of Fc IgG and IgA Receptors, and Other Markers by Cultured Human Blood Monocytes and U937 Cells
The expression of Fc receptors for IgG (FcγR) and IgA (FcαR) and of various other antigens on the human monocytic cell line U937 and peripheral blood monocytes, under stimulation with human recombinant tumour necrosis factor‐α (TNF‐α) and other cytokines, was investigated by flow cytomelry. TNF‐α, as well as interferon‐γ (IFN‐γ) or interleukin‐6 (IL‐6) had a significant up‐regulating effect on U937 expression of FcγRI/CD64. Furthermore, the action of TNF‐α was augmented by IL‐6, and more evidently by IFN‐γ. IFN‐α alone had only a marginal effect, but was able to increase the TNF‐α‐driven FcγRI expression. In contrast to U937 cells, TNF‐α did not enhance significantly FcγRI expression on human monocytes. Interestingly, on both U937 cells and monocytes. FcαR was augmented markedly by TNF‐α. Furthermore, TNF‐α induced the expression of HLA‐DR and HLA‐DP antigens on monocytes and U937 cells. The expression of FcγRII/CD32, FcγRIII/CD16. CD14, complement receptor type 1 (CR1/CD35). CR4 (CD11c/CD18), and MHC class‐I antigens, was not influenced significantly by TNF‐α. The results of this study show that TNF‐α may act on human mononuclear phagocytes, alone or in combination with other cytokines, by modulating the expression of various cell‐surface antigens. |
<gh_stars>10-100
from dpmModule.util.dpmgenerator import IndividualDPMGenerator
from dpmModule.util.configurations import export_configuration, export_enhancer_configuration
import json
import argparse
def get_args():
parser = argparse.ArgumentParser("DPM Test argument")
parser.add_argument("--job", type=str, help="Target class' Korean name to test DPM")
parser.add_argument("--level", type=int, default=None, help="Character's level, default depends on ulevel")
parser.add_argument("--ulevel", type=int, default=6000, help="Union level, default is 6000")
parser.add_argument("--time", type=int, default=1800, help="Test time in seconds, default is 1800(30 min)")
parser.add_argument("--cdr", type=int, default=0, help="Cooltime reduce (hat potential) in seconds, default is 0")
parser.add_argument("--log", action="store_true", help="print the log of the test")
parser.add_argument("--stat", action="store_true", help="print the statistics of the test")
parser.add_argument("--task", default="dpm")
return parser.parse_args()
def test():
args = get_args()
if args.task == "dpm":
dpm(args)
elif args.task == "conf":
conf(args)
def conf(args):
job_real = args.job[:].replace("-", "/")
configuration = export_configuration(job_real)
vEhc = export_enhancer_configuration(job_real)
regularized_configuration = {}
for k_name, v in configuration.items():
new_v = v
if new_v['cooltime'] == 99999999:
new_v['cooltime'] = -1
if 'static_character_modifier' in new_v:
for modifier_k, modifier_v in new_v.items():
if modifier_v != 0:
new_v[modifier_k] = modifier_v
new_v.pop('static_character_modifier')
if 'static_character_modifier' in new_v:
new_v['type'] = 'BuffSkill'
elif 'summondelay' in new_v:
new_v['type'] = 'SummonSkill'
else:
new_v['type'] = 'DamageSkill'
new_v.pop('explanation')
if '_static_skill_modifier' in new_v:
if '0' in new_v['_static_skill_modifier']:
for k in new_v['_static_skill_modifier']:
pops = [k1 for k1, v in new_v['_static_skill_modifier'][k].items() if v == 0]
for k2 in pops:
new_v['_static_skill_modifier'][k].pop(k2)
else:
pops = [k1 for k1, v in new_v['_static_skill_modifier'].items() if v == 0]
for k2 in pops:
new_v['_static_skill_modifier'].pop(k2)
regularized_configuration[k_name] = v
for idx, wrps in enumerate(vEhc.enhancer_priority):
for wrp in wrps:
skill_name = wrp.name
static_modifier_pos = sorted(regularized_configuration[skill_name]["_static_skill_modifier"].keys())[-1]
v_increment = regularized_configuration[skill_name]["_static_skill_modifier"][static_modifier_pos]['pdamage_indep'] // 60
regularized_configuration[wrp.name]["enhanced_by_v"] = True
regularized_configuration[wrp.name]["upgrade_priority"] = idx
regularized_configuration[wrp.name]["v_increment"] = v_increment
regularized_configuration[wrp.name]["v_crit"] = regularized_configuration[skill_name]["_static_skill_modifier"][static_modifier_pos].get("crit", False)
for v_priority in vEhc.v_skill_priority:
skill_name = v_priority['target'].name
regularized_configuration[skill_name]['tier'] = 5
regularized_configuration[skill_name]['use_priority'] = v_priority['useIdx']
regularized_configuration[skill_name]['upgrade_priority'] = v_priority['upgIdx']
for k_name in regularized_configuration:
if '_static_skill_modifier' in regularized_configuration[k_name]:
if "0" in regularized_configuration[k_name]['_static_skill_modifier']:
regularized_configuration[k_name]['modifier'] = regularized_configuration[k_name]['_static_skill_modifier']["0"]
regularized_configuration[k_name].pop('_static_skill_modifier')
else:
regularized_configuration[k_name]['modifier'] = regularized_configuration[k_name]['_static_skill_modifier']
regularized_configuration[k_name].pop('_static_skill_modifier')
if len(regularized_configuration[k_name]['modifier']) == 0:
regularized_configuration[k_name].pop('modifier')
with open(f"{args.job}.conf.json", "w", encoding="utf8") as f:
json.dump(regularized_configuration, f, ensure_ascii=False, indent=4)
def dpm(args):
parser = IndividualDPMGenerator(args.job)
parser.set_runtime(args.time * 1000)
try:
dpm = parser.get_dpm(
spec_name=str(args.ulevel),
ulevel=args.ulevel,
cdr=args.cdr,
printFlag=args.log,
statistics=args.stat or args.log,
)
finally:
print(args.job, f"{dpm:,.3f}")
if __name__ == "__main__":
test()
|
Ocular vestibular evoked myogenic potentials (o-VEMPs) testing in cervicogenic vertigo and its relation to radiological findings: a correlation study
Abstract Background: The term cervicogenic vertigo is reserved for cases where the suspected mechanism is proprioceptive. For a diagnosis of cervicogenic vertigo, it is essential to exclude other causes of vertigo. Generally, the patient has no hearing complaints and characteristically the symptoms are elicited by neck massage or neck vibration. The ocular vestibular evoked myogenic potential (o-VEMP) is a test for utricular function that has been introduced and validated. Objectives: To assesses o-VEMPs in patients with cervicogenic vertigo using 500 Hz tone bust stimuli at 95 dB nHL. Consequently, correlate the relation between o-VEMPs test results and radiological imaging, in the form of MRI cervical spine and CT angiography of the vertebral artery. Subjects: Fifty two subjects participated in the study; the group comprised 32 patients with diagnosed cervicogenic vertigo and also included 20 healthy matched adults. Methods: A random sample of consenting case and control subjects were obtained. MRI cervical spine with CT angiography of vertebral artery was done with o-VEMPs recordings. Results: Abnormal MRI cervical spines were found in all study group participants. The results for the CT angiography were variable with abnormal o-VEMPs latencies. Conclusion: A link co-exists between utricle and superior vestibular nerve in patients with cervicogenic vertigo. This may play a role in the pathogenesis of vertigo in these affected patients. |
/**
* Simplifies implementing a cell renderer, works around oddities in look and feels.
*
* @param <E> the type of values this renderer can be used for
*/
public abstract class DMDircListCellRenderer<E> implements ListCellRenderer<E> {
/**
* A version number for this class.
*/
private static final long serialVersionUID = 1;
/**
* Parent cell renderer.
*/
private final ListCellRenderer<? super E> parentRenderer;
/**
* Label to use if parent doesn't supply one.
*/
private JLabel label;
public DMDircListCellRenderer(final ListCellRenderer<? super E> parentRenderer) {
this.parentRenderer = parentRenderer;
}
/**
* Renders the cell on the given label.
*
* @param label Label to render
* @param value Object to render
* @param index Index of the cell in the list
* @param isSelected Is the cell selected
* @param hasFocus Does the cell have focus
*/
protected abstract void renderValue(final JLabel label, final E value,
final int index, final boolean isSelected, final boolean hasFocus);
@Override
public Component getListCellRendererComponent(final JList<? extends E> list,
final E value, final int index, final boolean isSelected, final boolean cellHasFocus) {
final Component component = parentRenderer.getListCellRendererComponent(list, value,
index, isSelected, cellHasFocus);
if (component instanceof JLabel) {
renderValue((JLabel) component, value, index, isSelected, cellHasFocus);
return component;
} else {
if (label == null) {
label = new JLabel();
}
renderValue(label, value, index, isSelected, cellHasFocus);
return label;
}
}
} |
def meanPrecision(self, doc_IDs_ordered, query_ids, qrels, k):
meanPrecision = 0.0
d = {}
for q in query_ids:
d[q] = set()
for e in qrels:
if int(e["query_num"]) in d:
d[int(e["query_num"])].add(int(e["id"]))
q = []
for i in range(len(query_ids)):
prec = self.queryPrecision(doc_IDs_ordered[i],query_ids[i],d[query_ids[i]],k)
meanPrecision += prec
q.append(prec)
meanPrecision /= len(query_ids)
return meanPrecision , q |
For reasons opaque and entangled, I am meeting Ridley Scott in Berlin, crashing the German junket for his new film Alien: Covenant. The location is a grand hotel in the old east of the city and I know I’m in the right place because of a large poster sitting on an easel. It shows the silhouetted image of a xenomorph – the classic “big boy” from Scott’s 1979 original Alien – with drool cascading from its unnerving gnashers. Only one word is written on the poster in block capitals: “LAUF”. Nope, Scott has not decided to make a zany space comedy. The injunction is a scream: “RUN”.
Covenant is, rightly, being touted as a strong, terrifying return to form for the 38-year-old Alien franchise, now in its sixth incarnation, and there’s certainly a buzz among the German handlers and journalists as we wait for our audience with Scott. “He’s a huge director here,” explains one. “It is Spielberg and him, really, that people know about. But you are from the UK, it must be the same there.”
Alien: Covenant review – Ridley Scott's latest space exploration feels all too familiar Read more
This makes me think, and leaves me a little unsure. It’s certainly not hard to make a case that Scott is Britain’s greatest living director. His work is era-defining, stylistically pioneering: Alien, Blade Runner, Thelma & Louise and Gladiator are all in the canon. And as he’s got older – he’ll turn 80 in November – he’s kept working at a demented pace, rarely letting his standards slip. His last film, The Martian, from 2015, starring Matt Damon as a homesteader on Mars, was actually his most successful: banking $630m worldwide and winning a Golden Globe for best comedy or musical (an exceptional feat, given that it was neither).
Some of it is art. But fundamentally, I entertain
Scott, who grew up in Stockton-on-Tees, was made a Sir in 2003 and Peter Blake honoured him a spot on an updated lineup of his Sgt Pepper’s cover in 2012. He’s a long way from underrated or starved of appreciation. But somehow he doesn’t receive the fond affection reserved for Ken Loach and Mike Leigh, or the reverence that Stephen Frears or even Danny Boyle get. “Most people in Britain don’t even know what Ridley looks like,” Alan Parker, the director of Bugsy Malone and The Commitments, once told me. “He doesn’t frequent the usual watering holes – he wouldn’t be seen dead there, wasting his time.”
Mulling this over, I get the nod that Scott is ready for me. Walking into the suite, I find him rearranging the furniture. “I can’t have those soft chairs,” he explains, his accent a bit Stockton, a bit Hollywood. “They’re no good for my back.” If, as Alan Parker suggests, you are struggling to place the face, Scott has something of the Bryan Cranston about him. His hair, once rusty red, is gradually turning metallic silver, his moustache holding out longest. He’s 5ft 7in, give or take, and is dressed today in his standard film-set attire: black fleece jacket and sensible shoes.
Why, I wonder aloud, don’t we see more of Scott? He looks perplexed by the inquiry, perhaps understandably, but answers gnomically: “I do enough.”
It’s a reasonable point. Already this year, Scott has been executive producer on Tom Hardy’s BBC drama Taboo and now there’s Alien: Covenant. Before the end of 2017, we’ll have the long-awaited follow-up to his 1982 film Blade Runner, based on a concept by Scott and writer Hampton Fancher. He says, “Alcon Entertainment were about to buy the title and they said to me, ‘Look, we’re about to pay God knows how many millions – do you think there’s a sequel here?’ And I said, ‘Absolutely.’ They said, ‘What is it?’ And I said, ‘I’ll tell you when you pay me!’”
Blade Runner 2049 is set 30 years on and follows a new LAPD blade runner – the special police operatives charged with “retiring” rogue replicants, androids indistinguishable from humans – played by Ryan Gosling. He sets out to track down the disillusioned, long-vanished blade runner Rick Deckard (Harrison Ford, reprising his role from the original). Scott couldn’t find the time to direct – Denis Villeneuve, who made Sicario and last year’s Arrival, has been anointed – but his fingerprints are all over the reboot. “It’s good,” he says, “very good.”
Is it funny, wry-not-haha, that there’s so much excitement about the new Blade Runner when the first one… “Bombed?” Scott says. “Yeah, I know, but I knew it was good. This goes on to what I learned from getting beaten up. Pauline Kael, do you know who that was?”
The New Yorker film critic from the 1970s and 80s, right? “Exactly. She spent three pages destroying Blade Runner and me. Even to the fact that I had a beard. I couldn’t believe it, it was personal. I never met her in my life and it was really distressing.”
Scott shakes his head, July 1982 suddenly seeming like a couple of hours ago – though, to be fair to Kael, reading her review now, while she clearly didn’t like the film, she kept any opinions she had on Scott’s facial hair to herself. “But after that moment, I never, ever read press again,” he continues. “Even if it’s glowing, best not read it, because you think you own the world. If it’s killer, best not read it because you think you’ve failed. You have to be your own critic.”
And, in a way, I’ve got my answer as to why Scott makes his films and then keeps his head down.
Scott has a fractious, on-off association with the Alien films. When the original script – about the crew of a spaceship who get picked off one by one by an extraterrestrial with anger-management issues – was doing the rounds in Hollywood in the 1970s, Scott found out that he was fifth in line to direct it. “I remember reading it: it took me an hour and 15 minutes, because I kind of speed-read – voom, voom…” says Scott, who at that time had made just one feature (1977’s The Duellists, set in the Napoleonic wars). “Then I had to wait four hours till Hollywood woke up to say, ‘Yes, I’ll do it.’ And I was anxious at that time because I knew there were others before me, including, for some bizarre reason, Robert Altman. But there was a gong in my head and I went ‘Daaaaamn! I know what to do.’”
The script that Scott read had an all-male crew, but there was a change of heart at 20th Century Fox and two of the characters were switched to women, including Ellen Ripley, the last surviving member of the ship. “I cast Ripley really late,” says Scott. “Two weeks out, we still hadn’t found her. Then somebody came up, I think it was Warren Beatty, and said, ‘There’s this woman, on off-Broadway, on the theatre boards called Sigourney Weaver, you should meet with her. Interesting.’ So I called her, went to New York, she walks in, she must have been 6ft 6in, with an afro, so she’s 7ft 2in. And I was like a midget, and I had dinner with her. That was it!”
Neither Scott nor Weaver, apparently, realised it, but Ripley would become an iconic heroine. “It hadn’t dawned on me,” admits Scott. “After the event, I went, ‘Oh, OK, yeah, absolutely,’ but I never thought about that.”
Facebook Twitter Pinterest Sigourney Weaver as Ellen Ripley with Jones the ship’s cat in the original Alien (1979). Photograph: Sportsphoto/Allstar/20th Century Fox
Empowered female leads have since become a trope in Scott’s work. Sometimes the results have been spectacular (notably Thelma & Louise, which led to the first of his three Oscar nominations for best director) and other times they have been less successful (the 1997 Demi Moore vehicle GI Jane). And it is part of the Alien DNA now, too. Scott returned to the franchise in 2012 after a long hiatus – Fox, to his annoyance, chose James Cameron to direct the first sequel, Aliens, in 1986. In the prequel Prometheus, Scott cast the Swedish actor Noomi Rapace as the archaeologist Dr Elizabeth Shaw, who, memorably, gives birth to an alien by C-section and survives. In the new film, Alien: Covenant, Ripley’s spiritual heir is Daniels, played by Fantastic Beasts and Where to Find Them’s Katherine Waterston. She even wears the famous singlet.
“It’s gratifying when people mention that,” says Scott, of the feminist undertones in several of his films. “I remember reading Thelma and the executives were saying, ‘Well, it’s two bitches in a car…’ And I said, ‘Actually, it’s a little bit more than that.’ Originally I was supposed to produce, and I offered it to four directors. One said, ‘I’ve got a problem with the women.’ And I said, ‘That’s the point, you dope! Clearly, you have big problems with women.’”
With Alien: Covenant, Scott set out to reprise the spirit of the original film (tagline: “In space no one can hear you scream”). “I wanted to really scare the shit out of people,” he says. “Totally, that’s the job. It’s like if I’m a comedian, I want to make you laugh like hell. My day job is to be an entertainer. Some of it is art, but fundamentally I entertain – never forget that.”
Scott was convinced that it made sense to resurrect Alien both artistically and, crucially, commercially. “Franchise always sounds like a vaguely not-very-nice word, because it means making money. And there’s nothing wrong with making money in the film industry – in fact it’s what it’s all about. If there’s a big film that’s a disaster, it’s bad for everybody. If there’s a little film that’s a huge success, it’s good for everybody. That’s the industry we’re in.”
Although technology and effects have moved on drastically in the four decades since Alien, Scott’s approach in many ways hasn’t. He uses computer-generated imagery and green screen only as a last resort. The Covenant spaceship, which contains a crew of settlers leaving Earth to make a new life on the far side of the galaxy, was effectively built from scratch on a soundstage in Australia. There were hundreds of switches and dials, more than 1,500 electrical circuits, and Scott insisted that they all work. The reason? If you really want to scare an audience, then he believes you need to create characters they care about and an atmosphere that feels real. That’s why many of the most enduring horror films are the age-old classics.
“I was always put off from swimming ever since I watched Jaws,” says Scott. “I quite liked swimming, actually, and I’d occasionally dive, but once I saw Jaws there was no way I was ever going to learn to surf – no way! Because I can imagine my legs, my little pinkies hanging underneath the surfboard and I know I’m going to be the one.”
Facebook Twitter Pinterest Michael Fassbender as ‘synthetic’ android David in Alien: Covenant. Photograph: Mark Rogers/Fox Film
Being in Berlin seems to have put Scott in a reflective mood and, without much prompting, he starts to talk about his childhood. His father Francis was a colonel in the Royal Engineers and the family spent five years in West Germany after the war as part of the Marshall plan. They lived in a house so grand it had a library and Scott went to boarding school in Wilhelmshaven, where he used to watch U-boats in the North Sea: “I loved it.” Francis was offered the prestigious position of head of port authority for the Elbe, based in Hamburg, but Scott’s mother wanted to return home to be near her relatives. Scott smiles, “I remember saying, at eight-and-a-half: ‘Take the job. You’re making the wrong decision.’ But it wasn’t to be, and we went back to Stockton-on-Tees and ended up on a council estate.
“My dad was a great guy, he did the right thing for the family, but he was not a happy man for the rest of his life,” Scott goes on. “And the interesting thing is that nobody said anything. We just looked at the house, pebble dash, move in and that’s it, dude. You’d get on with it.”
Back in the north-east, Scott went to a secondary modern school. He struggled academically; only later did he think that he might be slightly dyslexic. “After being bottom for five years, I decided that I wasn’t academically sound – for anything, truly. And I was really trying. I wasn’t lazy. I just couldn’t retain anything that I wasn’t interested in. If I’m interested, I’ve got a photographic brain. I could walk out of this room and in a year, I could draw it right down to the paintings on the wall.”
Scott painted and he played tennis: “a sissy’s game in those days”. His skill at the former earned him a place at the West Hartlepool College of Art and then the Royal College of Art, where he was a contemporary of David Hockney. “We have literally the same birthday, same year,” says Scott – though this, sadly, turns out not to be the case: Hockney was born in July 1937, Scott in November. “Have you seen his exhibition at the Tate? Fuck! David today, you have to compare him to Matisse. I know you can’t compare artists… he’s an entity now. Spectacular.”
It was at the Royal College that Scott made his first film: a black-and-white short, shot on 16mm, called Boy and Bicycle. “My brother, Tony Scott, was the actor and chief equipment carrier,” Scott smiles. “I remember it cost £65, and I took six weeks to bugger about on Redcar and Hartlepool beach to make this movie. Tony must have been 14, 15 and I ruined his summer holiday, but it was sinking in to him, so we were doing a thing that would inject lifelong dedication to making films.”
Facebook Twitter Pinterest Ridley Scott, right, with his brother Tony, at the Britannia awards, 2010. Photograph: Todd Williamson Archive/Getty Images
The Scott brothers could have been rivals: Tony, almost seven years younger, also became a director and his films include Top Gun, True Romance and Crimson Tide. But, in reality, they were close and rarely, if ever, went up for the same projects. “Ridley was tough but very protective, like an older brother should be,” Tony Scott told me in 2007. “We are very competitive, but this inspires us to do better and different work. I find myself stealing from Ridley a lot. He’s remarkably focused and immovable in terms of his vision, while I’m continually swayed by looking left rather than right. But we inspire each other.”
Tony Scott killed himself in 2012, jumping from a suspension bridge in Los Angeles. Ridley later revealed that Tony had cancer. “Being an elder brother, if ever there was an argument about anything, I’d back off, always,” says Scott now. “Nothing’s worth losing a relationship over. So I’d always back down, say, ‘OK, then.’” He laughs affectionately, “He was like the spoiled brat really.
“Tony was made for commercials: he was high-energy, fun, people loved him. We started our company together, Scott Free, and my biggest thing was to tell him, ‘Please don’t go for a job somewhere else, come with me. If you come with me, I guarantee you won’t be riding that bicycle in a year, you’ll be in a much better car. And if you really want a Ferrari, come with me.’ And he got several Ferraris.”
Family is Scott’s one great concern outside of films – his other big passion in life was tennis, which he played very competitively, always singles, well into his 70s, until he blew his knees. He is married to the Costa Rican actor Giannina Facio, his third wife, and they move between houses in Los Angeles, London and the Luberon in Provence. He has three grown-up children – Jake, Luke and Jordan – all of whom have followed him into the business, either in films or advertising. It is little surprise then that Scott has felt the loss of his brother keenly. He often called Tony the one person in Hollywood he could trust – and he still thinks of him every time he finishes a project.
“He’s the only one I’d show the film to,” says Scott. “I’d say, ‘What do you think of this?’ And he’d go, ‘Make it shorter.’ And I’d go, ‘Oh, OK. So it means he didn’t like it.’ I always remember showing him Legend” – a fantasy adventure made in 1985 and starring Tom Cruise (it crashed at the box office but went on to attain cult status) – “and I thought, ‘What am I going to do with Legend?’ It wasn’t his thing at all, but he was very polite, which means he really didn’t like it.”
Ridley Scott often says: “My plan is no plan.” He came to film-making relatively late: The Duellists, his debut, was released when he was 40. He’d already worked in advertising for almost 20 years, and he has made, by his estimate, something like 10,000 commercials, including the famous Hovis ad. This delayed start perhaps explains the furious work rate that he sustains to this day. When we meet in Berlin, Scott is itching to get back to Rome where he is about to start shooting his next film, about the kidnapping of 16-year-old John Paul Getty III in 1973; Kevin Spacey plays the boy’s father, who received an ear in the post (delayed by three weeks, because of a strike). After that, he wants to make a drama about the Battle of Britain. He also has to somehow fit in another Alien prequel, provisional title Alien: Awakening, and he has sketched out plans to make another three Alien films after that.
“It drives me crazy that an actor can do four movies in a year and I can’t,” he sighs. “I was saying that to Michael Fassbender.”
Alien: Covenant trailer: five things we've learned about the xenomorph saga Read more
Scott and Fassbender have made three films together (Prometheus, The Counsellor and now Alien: Covenant, where the actor plays two androids: sneaky David and benign Walter). He is becoming the director’s right hand in much the same way that Russell Crowe was in the 2000s. “Yeah, the two buddies really long-term are Michael and Russell, who I’ve made five films with,” says Scott. “You know whatever shouting and yelling and quarrels you’ve been through, after five movies, you’re definitely buddies. There’s nothing to hide, all the crap’s out the way.
“They are quite different as people, but they are both very smart. You better do your homework. Or you better have your point of view, because if you don’t, they’re going to stomp on you. When they say, ‘Why would I do that?’ you better have a bloody good answer.”
This, Scott believes, is the lesson that 40 years in Hollywood has taught him: you have to be decisive. He never makes cheap films, but he has earned a reputation for bringing them in on time and on budget. “It’s fatal to turn up on set and say, ‘What are we going to do?’” he says. “Fatal to discuss where the cameras are going to go. You cannot do that. That’s where it comes unhinged. A film like Alien: Covenant would normally be 100 days; we did it in 74. We made it for $111m, as opposed to $180m or $260m. It’s insane the amount of money spent. When you’re spending $250m on a movie, you should have been fired a year ago.
“It all comes full circle to starting out as a painter. You walk in the room in the morning, where you spent all day yesterday by yourself. You stare at the canvas and you go: ‘Bloody hell, I hate it.’ Painting is all about what you did yesterday, how you’re going to recorrect it, improve it, or go: ‘Holy shit, I got it.’” He pauses, perhaps remembering Pauline Kael and her real or imagined slights against his beard. “It’s being your own critic, that’s it. That’s the most important thing.”
Scott is well known for his time-keeping and, today, ours is up. After the current round of press requirements, he will again disappear. Back to his films, until the next time. It’s not that he doesn’t enjoy the attention, he just doesn’t need it. “Thanks,” he says, as I get up to leave, “but you know I won’t read it, don’t you?”
Alien: Covenant is out on 12 May |
Part of the Truthout Series Progressive Picks
Keeanga-Yamahtta Taylor. (Photo courtesy of Keeanga-Yamahtta Taylor)What led to the current movement insisting that Black lives do matter and demanding that Black people be treated accordingly? Find out in Keeanga-Yamahtta Taylor’s new book, which Michelle Alexander calls “a searching examination of the social, political and economic dimensions of the prevailing racial order.” From #BlackLivesMatter to Black Liberation also looks at what the future of the movement might hold – order the book today with a donation to Truthout!
As the Black Lives Matter movement hovers in the political imaginations of many across the United States amid a contentious election season, a book on the movement couldn’t be more timely. Keeanga-Yamahtta Taylor’s From #BlackLivesMatter to Black Liberation comes at a moment of thought, contemplation and uncertainty. The political atmosphere in the US has, for years, been dominated by a growing far-right base and a compromising liberal establishment. This environment has been conducive to disappointment and outrage, particularly on issues of racial injustice, and the Black Lives Matter movement has emerged as a current capturing the attention of the country. It’s for these very reasons that a book about where the movement is headed feels absolutely necessary amid election season. Taylor’s analysis of decisions that have been made and ones that need to be made point a liberatory way forward.
Keeanga-Yamahtta Taylor is a writer and activist with a reputation of being a dynamic public speaker. Just like a good speech, Taylor’s book builds in passion throughout. She gives us both a vital factual account of the history of Black liberation movements, and a sharp analysis that guides the reader through the corridors of critical thinking around issues of race and class. Taylor’s injection of a staunch left perspective on how to move forward centers her writing in a way many might not expect. The book demonstrates that the past is often one of our greatest guides when it comes to navigating the present.
I got the chance to speak with Taylor about her book. What follows are some key aspects of our conversation.
William C. Anderson: How does it feel to write your first book?
Keeanga-Yamahtta Taylor: I wrote this book really quickly. I was asked to write it in December of 2014. I didn’t think I’d be able to do it because I had just moved across the country. I was trying to adjust to a new job and I was actually supposed to be working on another book that, of course, I’m now behind on. When I was first asked I said “no” because I felt like I didn’t have time to do it. Maybe a week later I went to a coffee shop and just sat down to see if I could actually write anything about the movement … and there was so much.
“When I think of the history of Black people in this country, I think of endless struggle and resistance to oppression.”
I don’t know if you recall, but December of 2014 is really when Black Lives Matter, the movement, went from kind of being a kind of locally focused campaign in Ferguson to a national phenomenon over the course of a month. So it seemed like there was so much going on that it would be difficult to capture. But when I sat down to think about it and to really start to map out what I wanted to argue or what I could argue, I realized that there was quite a bit there to go through.
I probably spent between six and eight weeks writing the manuscript and at one point I was working maybe 15 to 16 hours a day around the clock. It really became almost like a math problem I was trying to figure out – how the history influenced the contemporary moment, trying to understand the politics of the movement, as it was something in development and evolving at the same time. All of that was pretty exciting. In the end, I feel like I wrote the book that I wanted to read – and that didn’t exist – and that answered a number of questions that I had about the history and politics of the movement. And I have to say I’m proud of the work that I produced.
Was it hard for you to relive a lot of that trauma that Black America has been through?
I think that if Black people’s history in this country was only about trauma, then yeah, that would be difficult. But, for as much trauma that has happened in the history of Black people in this country, it has often been met with the same amount of resistance. So, in that sense I was actually quite fascinated to sort of look at the history of different iterations of Black resistance or the different incarnations of the Black freedom struggle to understand how this contemporary moment fits into that overall history. To me, that was the most exciting but also the most important aspect of this. Especially when you get into the 1960s and really looking at the history and understanding the absolute centrality of the Black struggle to pushing all of American politics to the left, not just the Black movement itself. When we think back and look at the 1960s and the creation of the Johnson welfare state and all of the social policy that comes out of that era, there’s really an effort to keep pace and keep up with the movement as it is evolving. That to me is the centrality of the Black movement …
The reason why the state consistently overreacts to the Black movement – I think that we’re seeing that again today with Black Lives Matter – is because the movement itself really begins to explode all of the mythologies that the United States is deeply invested in: that this is a country of exception, the notion of American exceptionalism, the idea that the US is a place where anything can happen, boundless opportunity, and all of those lies that the United States really uses [in] its self-appointed role of policing the rest of the globe. It comes undone when you look at the history of Black people in this country … When I think of the history of Black people in this country, I think of endless struggle and resistance to oppression that has not only been important to our ability to survive in this country, but our struggle has been inspirational to people around the planet.
If Black people can survive the United States, then wherever we are around the world, we are able to struggle and potentially win as well.
You talk about the Black political class a lot. Can you talk about the Black political class’s conservative direction that you mention and their support for “law and order,” which is a very rich theme in your work? Can you explain the reason that you emphasized that for readers?
I think the most significant change in Black life over the last 50 years, since the last generation of the Black freedom struggle, has been the development of the Black middle class. That has many different elements to it. I think that there was an effort at the end of the ’60s to absorb a layer of African Americans into the mainstream of American society to create some success stories. This was in reaction to the massive Black insurgency that took place during the middle part of the 1960s. So from ’63 to ’68 you have almost 500,000 African Americans engaged in open rebellion in 500 cities in the US. So, there’s a recognition from the political establishment that this can’t go on. Part of the strategy is to create some success stories of African Americans who can live out their version of the American dream. Who can become homeowners, middle management types, and can have some level of investment in the current society. I think that was one aspect of it.
I think politically, there was also a need to approach the Black politics and really create a rift between radical elements who were pushing not just for Black political representation, but who were questioning the entire makeup and structure of American society. So, there was a concerted effort I think to really draw Black political operatives into the Democratic Party and to present the Democratic Party as a legitimate place for Black politics.
And so we see over time as the movement has increasingly less influence on Black political operatives and the deeper that they get into the political system, the more conservative they actually become. By today there is an enormous rift between the Black political class and the Black working class, but it’s mainly because Black political operatives have to organize themselves by the same principle every political operative must organize themselves by, which is fundraising and negotiation – and that’s it. And so to expect Black politicians to somehow act differently is really to engage in wishful thinking and to be naive.
In the book, you brought up the risk of the co-optation of Black Lives Matter and mentioned some weak spots. Can you talk about some of your critiques of the movement right now and expand on some of these risks?
There’s always an ongoing effort to try to undermine political movements, especially Black political movements – for the reasons that I said earlier – that the Black struggle, more than any other struggle in the United States, is always seen as the most threatening because it destroys all of the mythologies that are bound up in what the United States purports to be. So there’s always a particular focus on Black activists and Black activism. In a democracy, it’s very difficult for the state to just come out, denounce people and put people in jail – and do that sort of thing. So, there tends to be a sort of multipronged effort. One is from the foundations and funders and nongovernmental organizations [that] always find a way to insert themselves in the middle of political organizations around the question of funding and resource assistance. And that is certainly not a new phenomenon; it’s not something that has just happened just now with Black Lives Matter.
I think that probably the most concerted effort around these sorts of things really took place during the civil rights movement, when there was a concerted effort from funders working in concert with state operators or state actors – to try to warp the effects of the movement – to try to redirect the focus of the movement from a more broad and robust critique of society in general into a single-issue focus on voting. And you know voting is very important in terms of an expression of civil rights, but it’s not something that is going to challenge the basic order of society. So, I think there’s a historical record of interference and attempts to influence within movements, and I go through some of that history.
What do you want people to get out of your book?
I want people to understand why these movements happen in some circumstances and not others. I want people to understand the reason why police brutality is often a catalyst for Black protest because it serves as a constant reminder of the compromised citizenship that Black people have in this country. So, I want people to understand some of the issues affecting the movement, and not just understand them but really absorb some of the arguments that are made in the sixth chapter – so, for movement activists to debate and to talk about some of the issues that I raise among their fellow activists. And for different organizing groups to think about – and take on, and disagree or agree or whatever – with some of the things I’m raising, but to engage in those questions and begin to think about where is the movement going? And how do we gain control over directing it as opposed to being led by events where the next egregious police shooting will bring people out?
What does it mean for Black people to be free? It doesn’t mean that cops wear body cameras. It doesn’t mean that cops get sensitivity training. So, how do we think beyond the parameters of the existing society? That means raising the question of why we have police in the first place. And what function does racism play in this society. And how do we take and transform that knowledge into a deeper political commitment to a fight for an altogether different society.
When we talk about getting free, that’s a process and struggle that we must engage in directly. It can’t be outsourced to a city council person. It can’t be outsourced to the Black political class. It can’t be outsourced to any other entity.
In Chicago? Catch Keeanga-Yamahtta Taylor in conversation with activist and scholar Barbara Ransby on Wednesday, April 6. The event will begin at 7pm in the Chicago Cultural Center’s Preston Bradley Hall. Tickets are free and can be reserved in advance via Eventbrite. |
The Jets want to see what running back Denard Robinson has to offer. The former Michigan standout is working out for Gang Green this week, according to his agents (on Twitter).
[RELATED: Jets Release WR Eric Decker]
Robinson, 27 in September, will participate in the Jets’ three-day mandatory minicamp and try to make a solid impression on coaches. After making a name for himself as a quarterback at Michigan, Robinson converted to running back for the Jaguars.
The fifth-round pick appeared in 55 of the Jaguars’ 64 games in that span and made 13 starts, with the bulk of his career 263 carries coming in 2014. That season, Robinson piled up 135 rushes for 582 yards – good for a quality YPC of 4.3 – scored four touchdowns and totaled a personal-high 23 catches. Unfortunately, Robinson’s production has dipped since then, as he posted the second-worst YPC (3.5) of his career in 2016 off of 41 attempts.
The Jets have a pretty crowded group in the backfield, particularly after adding Jordan Todman on Monday. Still, Robinson is hoping to find his way on to the 90-man roster in New York.
Photo courtesy of USA Today Sports Images. |
package org.jasig.cas.mock;
import org.jasig.cas.authentication.Authentication;
import org.jasig.cas.authentication.BasicCredentialMetaData;
import org.jasig.cas.authentication.CredentialMetaData;
import org.jasig.cas.authentication.DefaultAuthenticationBuilder;
import org.jasig.cas.authentication.DefaultHandlerResult;
import org.jasig.cas.authentication.handler.support.SimpleTestUsernamePasswordAuthenticationHandler;
import org.jasig.cas.authentication.principal.DefaultPrincipalFactory;
import org.jasig.cas.authentication.principal.Service;
import org.jasig.cas.ticket.ExpirationPolicy;
import org.jasig.cas.ticket.ServiceTicket;
import org.jasig.cas.ticket.TicketGrantingTicket;
import org.jasig.cas.ticket.UniqueTicketIdGenerator;
import org.jasig.cas.ticket.proxy.ProxyGrantingTicket;
import org.jasig.cas.util.DefaultUniqueTicketIdGenerator;
import java.time.ZoneOffset;
import java.time.ZonedDateTime;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
/**
* Mock ticket-granting ticket.
*
* @author <NAME>
* @since 3.0.0
*/
public class MockTicketGrantingTicket implements TicketGrantingTicket {
public static final UniqueTicketIdGenerator ID_GENERATOR = new DefaultUniqueTicketIdGenerator();
private static final long serialVersionUID = 6546995681334670659L;
private final String id;
private final Authentication authentication;
private final ZonedDateTime created;
private int usageCount;
private boolean expired;
private Service proxiedBy;
private final Map<String, Service> services = new HashMap<>();
private final HashSet<ProxyGrantingTicket> proxyGrantingTickets = new HashSet<>();
public MockTicketGrantingTicket(final String principal) {
id = ID_GENERATOR.getNewTicketId("TGT");
final CredentialMetaData metaData = new BasicCredentialMetaData(
org.jasig.cas.authentication.TestUtils.getCredentialsWithSameUsernameAndPassword());
authentication = new DefaultAuthenticationBuilder(new DefaultPrincipalFactory().createPrincipal(principal))
.addCredential(metaData)
.addSuccess(SimpleTestUsernamePasswordAuthenticationHandler.class.getName(),
new DefaultHandlerResult(new SimpleTestUsernamePasswordAuthenticationHandler(), metaData))
.build();
created = ZonedDateTime.now(ZoneOffset.UTC);
}
@Override
public Authentication getAuthentication() {
return authentication;
}
public ServiceTicket grantServiceTicket(final Service service) {
return grantServiceTicket(ID_GENERATOR.getNewTicketId("ST"), service, null, true, true);
}
@Override
public ServiceTicket grantServiceTicket(
final String id,
final Service service,
final ExpirationPolicy expirationPolicy,
final boolean credentialsProvided,
final boolean onlyTrackMostRecentSession) {
usageCount++;
return new MockServiceTicket(id, service, this);
}
@Override
public Service getProxiedBy() {
return this.proxiedBy;
}
@Override
public boolean isRoot() {
return true;
}
@Override
public TicketGrantingTicket getRoot() {
return this;
}
@Override
public List<Authentication> getSupplementalAuthentications() {
return Collections.emptyList();
}
@Override
public List<Authentication> getChainedAuthentications() {
return Collections.emptyList();
}
@Override
public String getId() {
return id;
}
@Override
public boolean isExpired() {
return expired;
}
@Override
public TicketGrantingTicket getGrantingTicket() {
return this;
}
@Override
public ZonedDateTime getCreationTime() {
return created;
}
@Override
public int getCountOfUses() {
return usageCount;
}
@Override
public Map<String, Service> getServices() {
return this.services;
}
@Override
public Collection<ProxyGrantingTicket> getProxyGrantingTickets() {
return this.proxyGrantingTickets;
}
@Override
public void removeAllServices() {
}
@Override
public void markTicketExpired() {
expired = true;
}
}
|
/**
* A very insecure MD5 password encoder.
* Please DO NOT use this anywhere in production !!!!!!!!
*/
public class MD5PasswordEncoder implements PasswordEncoder {
private static final Logger LOG = LoggerFactory.getLogger(MD5PasswordEncoder.class);
@Override
public String encode(CharSequence rawPassword) {
try {
MessageDigest md = MessageDigest.getInstance("MD5");
return (new HexBinaryAdapter()).marshal(md.digest(rawPassword.toString().getBytes(StandardCharsets.UTF_8)));
} catch (NoSuchAlgorithmException e) {
LOG.warn("Error in encoding", e);
return null;
}
}
@Override
public boolean matches(CharSequence rawPassword, String encodedPassword) {
try {
MessageDigest md = MessageDigest.getInstance("MD5");
String rawPasswordEncoded = (new HexBinaryAdapter()).marshal(md.digest(rawPassword.toString().getBytes(StandardCharsets.UTF_8)));
return digestEquals(encodedPassword, rawPasswordEncoded);
} catch (NoSuchAlgorithmException e) {
LOG.warn("Error in matching", e);
return false;
}
}
private boolean digestEquals(String expected, String actual) {
byte[] expectedBytes = bytesUtf8(expected);
byte[] actualBytes = bytesUtf8(actual);
return MessageDigest.isEqual(expectedBytes, actualBytes);
}
private byte[] bytesUtf8(String s) {
return (s != null) ? Utf8.encode(s) : null;
}
} |
/**
* Strip off trailing zeroes
* @param s input string
* @return pruned string
*/
public static String stripTrailingZeroes(String s) {
char[] chars = s.toCharArray();
int length,index;
length = s.length();
index = length -1;
for (; index >=0;index--)
{
if (chars[index] != '0'){
break;}
}
return (index == length-1) ? s :s.substring(0,index+1);
} |
/*
Copyright (c) 2007-2013 Contributors as noted in the AUTHORS file
This file is part of 0MQ.
0MQ is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
0MQ is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ZMQ_UTILS_H_INCLUDED__
#define __ZMQ_UTILS_H_INCLUDED__
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
/* Define integer types needed for event interface */
#if defined ZMQ_HAVE_SOLARIS || defined ZMQ_HAVE_OPENVMS
# include <inttypes.h>
#elif defined _MSC_VER && _MSC_VER < 1600
# ifndef int32_t
typedef __int32 int32_t;
# endif
# ifndef uint16_t
typedef unsigned __int16 uint16_t;
# endif
#else
# include <stdint.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Handle DSO symbol visibility */
#if defined _WIN32
# if defined ZMQ_STATIC
# define ZMQ_EXPORT
# elif defined DLL_EXPORT
# define ZMQ_EXPORT __declspec(dllexport)
# else
# define ZMQ_EXPORT __declspec(dllimport)
# endif
#else
# if defined __SUNPRO_C || defined __SUNPRO_CC
# define ZMQ_EXPORT __global
# elif (defined __GNUC__ && __GNUC__ >= 4) || defined __INTEL_COMPILER
# define ZMQ_EXPORT __attribute__ ((visibility("default")))
# else
# define ZMQ_EXPORT
# endif
#endif
/* These functions are documented by man pages */
/* Encode data with Z85 encoding. Returns encoded data */
ZMQ_EXPORT char *zmq_z85_encode (char *dest, uint8_t *data, size_t size);
/* Decode data with Z85 encoding. Returns decoded data */
ZMQ_EXPORT uint8_t *zmq_z85_decode (uint8_t *dest, char *string);
/* Generate z85-encoded public and private keypair with libsodium. */
/* Returns 0 on success. */
ZMQ_EXPORT int zmq_curve_keypair (char *z85_public_key, char *z85_secret_key);
typedef void (zmq_thread_fn) (void*);
/* These functions are not documented by man pages */
/* Helper functions are used by perf tests so that they don't have to care */
/* about minutiae of time-related functions on different OS platforms. */
/* Starts the stopwatch. Returns the handle to the watch. */
ZMQ_EXPORT void *zmq_stopwatch_start (void);
/* Stops the stopwatch. Returns the number of microseconds elapsed since */
/* the stopwatch was started. */
ZMQ_EXPORT unsigned long zmq_stopwatch_stop (void *watch_);
/* Sleeps for specified number of seconds. */
ZMQ_EXPORT void zmq_sleep (int seconds_);
/* Start a thread. Returns a handle to the thread. */
ZMQ_EXPORT void *zmq_threadstart (zmq_thread_fn* func, void* arg);
/* Wait for thread to complete then free up resources. */
ZMQ_EXPORT void zmq_threadclose (void* thread);
#undef ZMQ_EXPORT
#ifdef __cplusplus
}
#endif
#endif
|
This year saw an explosion of racism, Islamophobia, xenophobia, and violence. While there were some significant progressive victories in 2015, there were also many dangerous developments -- from the proliferation of war around the world; to the worst refugee crisis since World War II; to the growth of far-right, neo-fascist movements in the West, with the National Front and Golden Dawn in Europe and Donald Trump in the U.S.
Amid this climate of escalating bigotry and hatred, racists have been emboldened and led to believe that their prejudices are now mainstream. We all have that racist family member who spouts unsubstantiated myths at Christmas dinner; at your holiday feast this year, your racist uncle may feel particularly encouraged to air his intolerant ideas.
Advertisement:
These are some of the best ways to challenge the Islamophobic, racist, anti-immigrant, and homophobic myths you may hear.
Islamophobia
If there was one group demonized more than any other in 2015, it was Muslims. The frequency of anti-Muslim attacks in the U.S. has skyrocketed. Muslims have been shot, assaulted, and threatened; their places of worship have been burned and vandalized. The number of anti-Muslim hate crimes tripled in December.
Islamophobes like your racist uncle may justify their hatred saying they are defending themselves from a supposedly "inherently violent" religion -- but it is important to remember more than 1.6 billion people in the world are Muslim.
That is to say, almost one-fourth of the entire global population is Muslim. And the Middle East is by no means representative of global Islam. Just 20 percent of the world's Muslims are in the Middle East and North Africa. More than three-fifths (62 percent) of Muslims in the world are in the Asia-Pacific region, on the other hand.
Around 50 countries have Muslim-majority populations. The largest Muslim population in the world is in fact in Indonesia. And India has the second-largest population of Muslims.
Even if your racist uncle concedes that not all of the quarter of the human population that is Muslim are terrorists, you should also remind him that, in the U.S., right-wing terrorism is much more common than Islamic extremist terror -- and the former is growing.
Advertisement:
Besides, at the end of the day, Americans are much more likely to be killed by cars, suicide, bees, wasps, and even furniture than they are by Muslims.
The real question to ask your racist uncle, then, is when is the government going to do something about the real threat: insects and furniture?
Is it true that terrorism is more common among Muslims? Well, it depends on what you mean by terrorism. Your racist uncle will doubtless insist that the West was built on Christianity. If so, it's important to point out that the West has also been the most violent region of the world in human history.
It has carried the largest genocides ever, against the indigenous peoples of the modern-day Americas -- which were committed partially in the name of Christianity. It created concentration camps and torture chambers. It has bombed countries all over the world, killing millions of people -- and continues to kill, injure, and displace millions of people in its wars around the world today. This is terror, according to any consistent definition of the term -- state terror.
Advertisement:
ISIS
If you are talking about Islamophobia, ISIS will inevitably come up. When your racist uncle mentions ISIS, it is important to remind him of a few things.
It is very important to stress that the vast majority of Muslims in the world have a very negative view of ISIS, and do not even consider the extremist group to truly be Islamic -- just as the vast majority of Christians do not consider the KKK to be Christian. Both extremist groups violate the most basic tenets of the religions they claim to represent. Saying ISIS is representative of Islam is like saying the KKK is representative of Christianity.
In September 2014, hundreds of Islamic scholars wrote an open letter to ISIS, its leaders, and its followers, detailing how the group is not Islamic and denouncing its violent extremism. The scholars quoted extensively from the Quran and used basic Islamic theology to pick apart ISIS' claims.
Advertisement:
Also, it must be remembered that the vast majority of the victims of ISIS are themselves Muslims.
ISIS is a horrific group that is similar in many ways to the fascist movements of the early 20th century. But in order to understand the rise of ISIS, we must also understand the gruesome crimes that gave birth to such a gruesome movement.
The U.S.-led war in Iraq, which killed at least one million people and destabilized the region, set the stage for the spread of extremism. Before the U.S. invaded Iraq in direct contravention of international law, al-Qaeda was not in the country (and ISIS eventually emerged out of Iraq's al-Qaeda branch). It was the U.S.-led war and occupation that brutalized and radicalized the Sunni minority of Iraq, which lived under an authoritarian, sectarian puppet government.
Advertisement:
It is also important to remember how the U.S. government supported sectarian Shia militias in Iraq, in order to fight the Sunni insurgency, which only further fueled the spread of extremism.
Trying to understand the rise of ISIS without addressing the catastrophic U.S. war in Iraq, and the decade-long foreign occupation of the country, would be like trying to understand biology without evolution. (If your racist uncle also refuses to accept the scientific validity of evolution — for which there is more evidence than there is for almost anything else in history — there may be no hope.)
Moreover, a question must be asked: If the U.S. is so concerned with combating Islamic extremism, why has it for so long remained allied with the most fundamentalist Islamist forces in the world? Saudi Arabia, among other theocratic and repressive Gulf monarchies, is one of the planet's most extreme religious forces. The Saudi regime has been described as the "fountainhead" of Sunni extremism, and has been called "an ISIS that has made it."
U.S. government cables released by WikiLeaks identity wealthy Saudis as the "chief funders" of al-Qaeda. And yet bending over backward to work with Saudi Arabia is a policy that has been pursued by both mainstream American political parties, without exception, since the early 20th century, when it was discovered that the Middle East has the largest oil reserves on the planet.
Advertisement:
Anti-black racism and police brutality
With the rise of the Black Lives Matter civil rights movement and increasing attention on police brutality against police brutality, racism has became a hot topic. Republican presidential candidate Donald Trump, in particular, has exploited the white supremacy deeply embedded in the roots of America. Particularly egregious was Trump's insistence that 81 percent of white Americans who are murdered were killed by black Americans, which monitoring group PolitiFact deems a "pants on fire"-level lie.
If your racist uncle asks about so-called "black-on-black" crime, you should reply, "What about white-on-white crime?"
Yes, it is true that 90 percent of black Americans are killed by fellow black Americans — but 83 percent of white Americans are killed by fellow white Americans. The vast majority of shootings occur within one's race. People are more likely to kill people they know, and the U.S. remains a very segregated country, in which most Americans interact primarily with people of their same race.
Moreover, police brutality in the U.S. is completely off the charts. At least 1179 Americans were killed by police from the beginning of 2015 to Christmas, and victims of police brutality are disproportionately people of color.
Advertisement:
American police shot and killed more people in the month of November 2015 alone than British cops have in 95 years.
This is not to say the U.K. is some magical utopia where police are free from prejudice -- far from it. Structural racism is still a big problem among British police, but most are not armed, and they are trained to use de-escalation tactics, not to shoot first and ask questions later.
And police brutality is just one expression of the countless forms of structural racism black Americans endure.
Numerous studies show that, when black and white applicants who are equally qualified apply for the same job, white Americans are significantly more likely to get an interview. This is systemic racism at work.
Advertisement:
Recent college graduates who are black are almost twice as likely to be unemployed as recent college graduates overall. This is not because they are lazy; it is because racism makes it much harder for black Americans to get a job.
Anti-immigrant myths and welfare
On the subject of laziness, your racist uncle may also claim that immigrants are lazy slackers. Yet the myth of the lazy slacker immigrant that exploits the system is exactly that — a myth.
The vast majority of immigrants pay taxes, and not just sales taxes. Moreover, most immigrants pay taxes and receive few government services in return.
Immigrants spend 45 times more in taxes than they receive in public benefits, according to a study by the American Immigration Council. On average, immigrants in the U.S. also work more than non-immigrants.
Advertisement:
A Chamber of Commerce report determined that "more than half of undocumented immigrants have federal and state income, Social Security and Medicare taxes automatically deducted from their paychecks."
There were an estimated 11.4 million undocumented immigrants in the U.S. as of 2012, according to the non-partisan think tank, the Institute on Taxation and Economic Policy (ITEP). Together, these 11.4 million undocumented migrants paid close to $12 billion in taxes.
Undocumented immigrants pay eight percent of their income in taxes, according to ITEP's research. In the meantime, the richest 1 percent of Americans — the so-called "job creators" — pay only 5.4 percent of their income in taxes.
As the New Republic puts it, "immigrants don't drain welfare; they fund it."
Advertisement:
If your racist uncle wants to blame migrants and minority groups for welfare spending, you should point out a few things. One, welfare as we know it was drastically gutted by Bill Clinton in 1995, with the support of both the Republican Party and the right wing of the Democratic Party.
Moreover, as for the paltry welfare that still exists, research conducted by experts at the University of California at Berkeley found that 73 percent of Americans who receive welfare are members of working families.
"It's poor-paying jobs, not unemployment, that strains the welfare system," reports the Wall Street Journal. "In some industries, about half the workforce relies on welfare," including 52 percent of fast-food workers. That is to say, it is low wages that create the need for welfare, not immigration or laziness.
Same-sex marriage
2015 was a big year for LGBTQ rights. Same-sex marriage was legalized in the U.S., and discussion of trans rights has increased.
Yet family members like your racist uncle may express opposition to marriage equality. He may claim that being gay is not "natural," and that it challenges the nuclear family structure upon which contemporary society is built.
First of all, it is important to indicate that homosexuality is documented in more than 1,500 different species, not just humans. In order to say it is "not natural," one must ignore the countless other examples of it in nature.
Furthermore, studies show that children of same-sex couples are in fact happier and healthier than children of heterosexual couples.
Your racist uncle may then, nevertheless, fall back on morality. Being gay is wrong, he may insist.
In reality, however, numerous scientific studies show that homosexuality is not a choice. And even if being LGTBQ were a choice, why should he have the right to take their rights to engage in romantic behavior with consenting adults?
Additionally, when it comes to marriage equality, the U.S. is really behind the times. Americans constantly claim that their country is "exceptional," but it tends to fall exceptionally behind.
The Netherlands legalized same-sex marriage way back in 2001. Belgium did in 2003. Canada and Spain followed in 2005. Uruguay, Brazil, the U.K., France, and many more countries also legalized same-sex marriage before the U.S. Of the fellow economically developed nations in the OECD, the U.S. took a long time to catch up on marriage equality.
Some of these facts may upset your racist uncle, but they are facts, and you can tell him to take it or leave it, to accept reality as it is or continue to deceive himself and others. Just make sure you get your fill before the plates start flying. |
package main
import (
"context"
"flag"
"fmt"
"io"
"log"
"time"
"github.com/sandokandias/grpc-web-research/backend/internal/grpc/api"
"google.golang.org/grpc"
"github.com/gosuri/uitable"
)
var (
address = flag.String("address", "localhost:9000", "Address of the server in format <ip/name:port>. e.g.: 0.0.0.0:9000")
)
func main() {
flag.Parse()
conn, err := grpc.Dial(*address, grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
timeCli := api.NewTimeServiceClient(conn)
getTime(timeCli)
payCli := api.NewPaymentServiceClient(conn)
pay(payCli)
}
func getTime(client api.TimeServiceClient) {
fmt.Println("Time service")
fmt.Println("---------------------------")
deadline := time.Now().Add(time.Duration(10) * time.Second)
ctx, cancel := context.WithDeadline(context.Background(), deadline)
defer cancel()
req := &api.TimeRequest{}
resp, err := client.GetTime(ctx, req)
if err != nil {
log.Fatalf("failed get time: %v", err)
}
table := uitable.New()
table.MaxColWidth = 100
table.AddRow("UNIX", "UTC")
table.AddRow(resp.Unix, resp.Utc)
fmt.Println(table)
fmt.Println("---------------------------")
fmt.Println()
}
func pay(client api.PaymentServiceClient) {
fmt.Println("Payment service")
fmt.Println("---------------------------")
deadline := time.Now().Add(time.Duration(10) * time.Second)
ctx, cancel := context.WithDeadline(context.Background(), deadline)
defer cancel()
req := &api.PayRequest{Amount: 100, Description: "test"}
stream, err := client.Pay(ctx, req)
if err != nil {
log.Fatalf("failed pay: %v", err)
}
for {
resp, err := stream.Recv()
if err == io.EOF {
break
}
if err != nil {
log.Fatalf("%v.Pay(_) = _, %v", client, err)
}
fmt.Printf("%v\n", resp)
}
fmt.Println("---------------------------")
}
|
// List lists all the elasticsearch clusters matching the filters
func List(params ListParams) (*models.ElasticsearchClustersInfo, error) {
params.fillValues()
if err := params.Validate(); err != nil {
return nil, err
}
res, err := params.V1API.ClustersElasticsearch.GetEsClusters(
clusters_elasticsearch.NewGetEsClustersParams().
WithSize(ec.Int64(params.Size)).
WithShowPlans(ec.Bool(true)).
WithQ(ec.String(params.Query)).
WithShowSystemAlerts(ec.Int64(params.SystemAlerts)).
WithTimeout(util.GetTimeoutFromSize(params.Size)).
WithShowMetadata(ec.Bool(params.ShowMetadata)),
params.AuthWriter,
)
if err != nil {
return nil, api.UnwrapError(err)
}
return filterVersion(res.Payload, params.Version), nil
} |
<gh_stars>1-10
package org.mockserver.cors;
import io.netty.handler.codec.http.HttpHeaderNames;
import org.mockserver.model.Headers;
import org.mockserver.model.HttpRequest;
import org.mockserver.model.HttpResponse;
import static io.netty.handler.codec.http.HttpMethod.OPTIONS;
import static org.mockserver.configuration.ConfigurationProperties.enableCORSForAPIHasBeenSetExplicitly;
/**
* @author jamesdbloom
*/
public class CORSHeaders {
private static final String ANY_ORIGIN = "*";
private static final String NULL_ORIGIN = "null";
public static boolean isPreflightRequest(HttpRequest request) {
final Headers headers = request.getHeaders();
return request.getMethod().getValue().equals(OPTIONS.name()) &&
headers.containsEntry(HttpHeaderNames.ORIGIN.toString()) &&
headers.containsEntry(HttpHeaderNames.ACCESS_CONTROL_REQUEST_METHOD.toString());
}
public void addCORSHeaders(HttpRequest request, HttpResponse response) {
String origin = request.getFirstHeader(HttpHeaderNames.ORIGIN.toString());
if (NULL_ORIGIN.equals(origin)) {
setHeaderIfNotAlreadyExists(response, HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN.toString(), NULL_ORIGIN);
} else if (!origin.isEmpty() && request.getFirstHeader(HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS.toString()).equals("true")) {
setHeaderIfNotAlreadyExists(response, HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN.toString(), origin);
setHeaderIfNotAlreadyExists(response, HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS.toString(), "true");
} else {
setHeaderIfNotAlreadyExists(response, HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN.toString(), ANY_ORIGIN);
}
setHeaderIfNotAlreadyExists(response, HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS.toString(), "CONNECT, DELETE, GET, HEAD, OPTIONS, POST, PUT, PATCH, TRACE");
String headers = "Allow, Content-Encoding, Content-Length, Content-Type, ETag, Expires, Last-Modified, Location, Server, Vary, Authorization";
setHeaderIfNotAlreadyExists(response, HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS.toString(), headers);
setHeaderIfNotAlreadyExists(response, HttpHeaderNames.ACCESS_CONTROL_EXPOSE_HEADERS.toString(), headers);
setHeaderIfNotAlreadyExists(response, HttpHeaderNames.ACCESS_CONTROL_MAX_AGE.toString(), "300");
if (!enableCORSForAPIHasBeenSetExplicitly()) {
setHeaderIfNotAlreadyExists(response, "x-cors", "MockServer CORS support enabled by default, to disable ConfigurationProperties.enableCORSForAPI(false) or -Dmockserver.enableCORSForAPI=false");
}
}
private void setHeaderIfNotAlreadyExists(HttpResponse response, String name, String value) {
if (response.getFirstHeader(name).isEmpty()) {
response.withHeader(name, value);
}
}
}
|
<filename>fdm/datasources/joinquant/fields.py
price_fields = [
'open',
'close',
'high',
'low',
'volume',
'money',
]
fs_fields = {
'STK_INCOME_STATEMENT': [
'pub_date', 'start_date', 'end_date', 'total_operating_revenue',
'operating_revenue', 'total_operating_cost', 'operating_cost',
'operating_tax_surcharges', 'sale_expense', 'administration_expense',
'exploration_expense', 'financial_expense', 'asset_impairment_loss',
'fair_value_variable_income', 'investment_income',
'invest_income_associates', 'exchange_income',
'other_items_influenced_income', 'operating_profit', 'subsidy_income',
'non_operating_revenue', 'non_operating_expense',
'disposal_loss_non_current_liability', 'other_items_influenced_profit',
'total_profit', 'income_tax', 'other_items_influenced_net_profit',
'net_profit', 'np_parent_company_owners', 'minority_profit', 'eps',
'basic_eps', 'diluted_eps', 'other_composite_income',
'total_composite_income', 'ci_parent_company_owners',
'ci_minority_owners', 'interest_income', 'premiums_earned',
'commission_income', 'interest_expense', 'commission_expense',
'refunded_premiums', 'net_pay_insurance_claims',
'withdraw_insurance_contract_reserve', 'policy_dividend_payout',
'reinsurance_cost', 'non_current_asset_disposed', 'other_earnings'
],
'STK_CASHFLOW_STATEMENT': [
'pub_date', 'start_date', 'end_date', 'goods_sale_and_service_render_cash',
'tax_levy_refund', 'subtotal_operate_cash_inflow',
'goods_and_services_cash_paid', 'staff_behalf_paid', 'tax_payments',
'subtotal_operate_cash_outflow', 'net_operate_cash_flow',
'invest_withdrawal_cash', 'invest_proceeds',
'fix_intan_other_asset_dispo_cash', 'net_cash_deal_subcompany',
'subtotal_invest_cash_inflow', 'fix_intan_other_asset_acqui_cash',
'invest_cash_paid', 'impawned_loan_net_increase',
'net_cash_from_sub_company', 'subtotal_invest_cash_outflow',
'net_invest_cash_flow', 'cash_from_invest', 'cash_from_borrowing',
'cash_from_bonds_issue', 'subtotal_finance_cash_inflow',
'borrowing_repayment', 'dividend_interest_payment',
'subtotal_finance_cash_outflow', 'net_finance_cash_flow',
'exchange_rate_change_effect', 'other_reason_effect_cash',
'cash_equivalent_increase', 'cash_equivalents_at_beginning',
'cash_and_equivalents_at_end', 'net_profit',
'assets_depreciation_reserves', 'fixed_assets_depreciation',
'intangible_assets_amortization', 'defferred_expense_amortization',
'fix_intan_other_asset_dispo_loss', 'fixed_asset_scrap_loss',
'fair_value_change_loss', 'financial_cost', 'invest_loss',
'deffered_tax_asset_decrease', 'deffered_tax_liability_increase',
'inventory_decrease', 'operate_receivables_decrease',
'operate_payable_increase', 'others', 'net_operate_cash_flow_indirect',
'debt_to_capital', 'cbs_expiring_in_one_year',
'financial_lease_fixed_assets', 'cash_at_end', 'cash_at_beginning',
'equivalents_at_end', 'equivalents_at_beginning',
'other_reason_effect_cash_indirect',
'cash_equivalent_increase_indirect', 'net_deposit_increase',
'net_borrowing_from_central_bank', 'net_borrowing_from_finance_co',
'net_original_insurance_cash',
'net_cash_received_from_reinsurance_business',
'net_insurer_deposit_investment', 'net_deal_trading_assets',
'interest_and_commission_cashin', 'net_increase_in_placements',
'net_buyback', 'net_loan_and_advance_increase',
'net_deposit_in_cb_and_ib', 'original_compensation_paid',
'handling_charges_and_commission', 'policy_dividend_cash_paid',
'cash_from_mino_s_invest_sub', 'proceeds_from_sub_to_mino_s',
'investment_property_depreciation'
],
'STK_BALANCE_SHEET': [
'pub_date',
'start_date',
'end_date',
'cash_equivalents',
'trading_assets',
'bill_receivable',
'account_receivable',
'advance_payment',
'other_receivable',
'affiliated_company_receivable',
'interest_receivable',
'dividend_receivable',
'inventories',
'expendable_biological_asset',
'non_current_asset_in_one_year',
'total_current_assets',
'hold_for_sale_assets',
'hold_to_maturity_investments',
'longterm_receivable_account',
'longterm_equity_invest',
'investment_property',
'fixed_assets',
'constru_in_process',
'construction_materials',
'fixed_assets_liquidation',
'biological_assets',
'oil_gas_assets',
'intangible_assets',
'development_expenditure',
'good_will',
'long_deferred_expense',
'deferred_tax_assets',
'total_non_current_assets',
'total_assets',
'shortterm_loan',
'trading_liability',
'notes_payable',
'accounts_payable',
'advance_peceipts',
'salaries_payable',
'taxs_payable',
'interest_payable',
'dividend_payable',
'other_payable',
'affiliated_company_payable',
'non_current_liability_in_one_year',
'total_current_liability',
'longterm_loan',
'bonds_payable',
'longterm_account_payable',
'specific_account_payable',
'estimate_liability',
'deferred_tax_liability',
'total_non_current_liability',
'total_liability',
'paidin_capital',
'capital_reserve_fund',
'specific_reserves',
'surplus_reserve_fund',
'treasury_stock',
'retained_profit',
'equities_parent_company_owners',
'minority_interests',
'foreign_currency_report_conv_diff',
'irregular_item_adjustment',
'total_owner_equities',
'total_sheet_owner_equities',
'other_comprehensive_income',
'deferred_earning',
'settlement_provi',
'lend_capital',
'loan_and_advance_current_assets',
'derivative_financial_asset',
'insurance_receivables',
'reinsurance_receivables',
'reinsurance_contract_reserves_receivable',
'bought_sellback_assets',
'hold_sale_asset',
'loan_and_advance_noncurrent_assets',
'borrowing_from_centralbank',
'deposit_in_interbank',
'borrowing_capital',
'derivative_financial_liability',
'sold_buyback_secu_proceeds',
'commission_payable',
'reinsurance_payables',
'insurance_contract_reserves',
'proxy_secu_proceeds',
'receivings_from_vicariously_sold_securities',
'hold_sale_liability',
'estimate_liability_current',
'deferred_earning_current',
'preferred_shares_noncurrent',
'pepertual_liability_noncurrent',
'longterm_salaries_payable',
'other_equity_tools',
'preferred_shares_equity',
'pepertual_liability_equity'
],
'FINANCE_INCOME_STATEMENT': [
'pub_date',
'start_date',
'end_date',
'operating_revenue',
'interest_net_revenue',
'interest_income',
'interest_expense',
'commission_net_income',
'commission_income',
'commission_expense',
'agent_security_income',
'sell_security_income',
'manage_income',
'premiums_earned',
'assurance_income',
'premiums_income',
'premiums_expense',
'prepare_money',
'investment_income',
'invest_income_associates',
'fair_value_variable_income',
'exchange_income',
'other_income',
'operation_expense',
'refunded_premiums',
'compensate_loss',
'compensation_back',
'insurance_reserve',
'insurance_reserve_back',
'policy_dividend_payout',
'reinsurance_cost',
'operating_tax_surcharges',
'commission_expense2',
'operation_manage_fee',
'separate_fee',
'asset_impairment_loss',
'other_cost',
'operating_profit',
'subsidy_income',
'non_operating_revenue',
'non_operating_expense',
'other_items_influenced_profit',
'total_profit',
'income_tax_expense',
'other_influence_net_profit',
'net_profit',
'np_parent_company_owners',
'minority_profit',
'eps',
'basic_eps',
'diluted_eps',
'other_composite_income',
'total_composite_income',
'ci_parent_company_owners',
'ci_minority_owners'
],
'FINANCE_CASHFLOW_STATEMENT': [
'pub_date',
'start_date',
'end_date',
'operate_cash_flow',
'net_loan_and_advance_decrease',
'net_deposit_increase',
'net_borrowing_from_central_bank',
'net_deposit_in_cb_and_ib_de',
'net_borrowing_from_finance_co',
'interest_and_commission_cashin',
'trade_asset_increase',
'net_increase_in_placements',
'net_buyback',
'tax_levy_refund',
'net_original_insurance_cash',
'insurance_cash_amount',
'net_insurer_deposit_investment',
'subtotal_operate_cash_inflow',
'net_loan_and_advance_increase',
'saving_clients_decrease_amount',
'net_deposit_in_cb_and_ib',
'central_borrowing_decrease',
'other_money_increase',
'purchase_trade_asset_increase',
'repurchase_decrease',
'handling_charges_and_commission',
'goods_sale_and_service_render_cash',
'net_cash_re_insurance',
'reserve_investment_decrease',
'original_compensation_paid',
'policy_dividend_cash_paid',
'staff_behalf_paid',
'tax_payments',
'subtotal_operate_cash_outflow',
'net_operate_cash_flow',
'invest_cash_flow',
'invest_withdrawal_cash',
'invest_proceeds',
'gain_from_disposal',
'subtotal_invest_cash_inflow',
'invest_cash_paid',
'impawned_loan_net_increase',
'fix_intan_other_asset_acqui_cash',
'subtotal_invest_cash_outflow',
'net_invest_cash_flow',
'finance_cash_flow',
'cash_from_invest',
'cash_from_bonds_issue',
'cash_from_borrowing',
'subtotal_finance_cash_inflow',
'borrowing_repayment',
'dividend_interest_payment',
'subtotal_finance_cash_outflow',
'net_finance_cash_flow',
'exchange_rate_change_effect',
'other_reason_effect_cash',
'cash_equivalent_increase',
'cash_equivalents_at_beginning',
'cash_and_equivalents_at_end',
'net_profit_cashflow_adjustment',
'net_profit',
'assets_depreciation_reserves',
'fixed_assets_depreciation',
'intangible_assets_amortization',
'defferred_expense_amortization',
'fix_intan_other_asset_dispo_loss',
'fixed_asset_scrap_loss',
'fair_value_change_loss',
'financial_cost',
'invest_loss',
'deffered_tax_asset_decrease',
'deffered_tax_liability_increase',
'inventory_decrease',
'operate_receivables_decrease',
'operate_payable_increase',
'others',
'net_operate_cash_flow2',
'activities_not_relate_major',
'debt_to_capital',
'cbs_expiring_in_one_year',
'financial_lease_fixed_assets',
'change_info_cash',
'cash_at_end',
'cash_at_beginning',
'equivalents_at_end',
'equivalents_at_beginning',
'other_influence2',
'cash_equivalent_increase2',
'investment_property_depreciation'
],
'FINANCE_BALANCE_SHEET': [
'pub_date',
'start_date',
'end_date',
'deposit_in_ib',
'cash_equivalents',
'deposit_client',
'cash_in_cb',
'settlement_provi',
'settlement_provi_client',
'metal',
'lend_capital',
'fairvalue_fianancial_asset',
'other_grow_asset',
'bought_sellback_assets',
'interest_receivable',
'insurance_receivables',
'recover_receivable',
'separate_receivable',
'not_time_fund',
'not_decide_fund',
'response_fund',
'health_fund',
'margin_loan',
'deposit_period',
'loan_and_advance',
'margin_out',
'agent_asset',
'investment_reveiable',
'advance_payment',
'hold_for_sale_assets',
'hold_to_maturity_investments',
'longterm_equity_invest',
'finance_out',
'capital_margin_out',
'investment_property',
'other_operate_cash_paid',
'subtotal_operate_cash_outflow',
'net_operate_cash_flow',
'invest_cash_flow',
'invest_withdrawal_cash',
'invest_proceeds',
'other_cash_from_invest_act',
'gain_from_disposal',
'subtotal_invest_cash_inflow',
'inventories',
'fixed_assets',
'constru_in_process',
'intangible_assets',
'trade_fee',
'long_deferred_expense',
'fixed_assets_liquidation',
'independent_account_asset',
'deferred_tax_assets',
'other_asset',
'total_assets',
'borrowing_from_centralbank',
'deposit_in_ib_and_other',
'shortterm_loan',
'loan_pledge',
'borrowing_capital',
'fairvalue_financial_liability',
'derivative_financial_liability',
'sold_buyback_secu_proceeds',
'deposit_absorb',
'proxy_secu_proceeds',
'proxy_sell_proceeds',
'notes_payable',
'advance_peceipts',
'insurance_receive_early',
'commission_payable',
'insurance_payable',
'salaries_payable',
'taxs_payable',
'interest_payable',
'proxy_liability',
'estimate_liability',
'compensation_payable',
'interest_insurance_payable',
'investment_money',
'not_time_reserve',
'not_decide_reserve',
'live_reserve',
'longterm_reserve',
'longterm_loan',
'bonds_payable',
'independent_account',
'deferred_tax_liability',
'other_liability',
'total_liability',
'paidin_capital',
'capital_reserve_fund',
'treasury_stock',
'surplus_reserve_fund',
'equities_parent_company_owners',
'retained_profit',
'minority_interests',
'currency_mis',
'total_owner_equities',
'total_liability_equity',
'perferred_share_liability',
'account_receivable',
'other_equity_tools',
'perferred_share_equity',
'pep_debt_equity',
'other_comprehensive_income',
'good_will',
'shortterm_loan_payable',
'accounts_payable'
]
}
|
/**
* An implementation of the SSFIM algorithm for mining frequent itemsets from a
* transaction database.<br/><br/>
*
* It is based on the description in:<br/><br/>
*
* Djenouri, Y., Comuzzi, M. and Djenouri, D., 2017, May. SS-FIM: Single Scan for Frequent Itemsets Mining in Transactional Databases.
* In Pacific-Asia Conference on Knowledge Discovery and Data Mining (pp. 644-654). Springer.
* <br/><br/>
*
* @see Itemset
* @author Philippe Fournier-Viger, 2017
*/
public class AlgoSSFIM {
/** the time the algorithm started */
long startTimestamp = 0;
/** the time the algorithm terminated */
long endTimestamp = 0;
/** the number of patterns generated */
int patternCount =0;
/** writer to write the output file **/
BufferedWriter writer = null;
/** buffer for storing the current itemset that is mined when performing mining
* the idea is to always reuse the same buffer to reduce memory usage. **/
private int[] itemsetBuffer = null;
/** buffer for storing the current transaction that is read from the file to reduce memory usage. **/
private int[] transactionBuffer = null;
/** size of the buffers */
final int BUFFERS_SIZE = 200;
/** the minSupport threshold **/
int minSupportAbsolute = 0;
/**
* Default constructor
*/
public AlgoSSFIM() {
}
/**
* Run the algorithm
* @param input the input file path
* @param output the output file path
* @param minSupport the minimum support threshold
* @throws IOException exception if error while writing the file
*/
public void runAlgorithm(String input, String output, double minSupport) throws IOException {
// initialize the buffer for storing the current itemset
itemsetBuffer = new int[BUFFERS_SIZE];
// initialize the buffer for storing the current transaction
transactionBuffer = new int[BUFFERS_SIZE];
// reset memory logger
MemoryLogger.getInstance().reset();
// record the start time of the algorithm
startTimestamp = System.currentTimeMillis();
// create a writer object to write results to file
writer = new BufferedWriter(new FileWriter(output));
// create a map to store the support of each itemset
// (the hash table)
final Map<Itemset, Integer> mapItemsetToSupport = new HashMap<Itemset, Integer>();
// We scan the database a first time to calculate the support of each item.
BufferedReader myInput = null;
String thisLine;
// this variable will count the number of item occurence in the database
int itemOccurrencesCount = 0;
// this variable will count the number of transactions
int transactionCount = 0;
try {
// prepare the object for reading the file
myInput = new BufferedReader(new InputStreamReader( new FileInputStream(new File(input))));
// for each line (transaction) until the end of file
while ((thisLine = myInput.readLine()) != null) {
// if the line is a comment, is empty or is a
// kind of metadata
if (thisLine.isEmpty() == true ||
thisLine.charAt(0) == '#' || thisLine.charAt(0) == '%'
|| thisLine.charAt(0) == '@') {
continue;
}
// the first part is the list of items
String items[] = thisLine.split(" ");
int count = items.length;
// Copy each item from the transaction to a buffer
for(int i=0; i <items.length; i++){
transactionBuffer[i] = Integer.valueOf(items[i]);
}
// Generate all subsets of a transaction except the empty set
// and output them. We use bits to generate all subsets.
for (long i = 1, max = 1 << count; i < max; i++) {
//
int itemCount = 0;
// for each bit
for (int j = 0; j < count; j++) {
// check if the j bit is set to 1
int isSet = (int) i & (1 << j);
// if yes, add the bit position as an item to the new subset
if (isSet > 0) {
itemsetBuffer[itemCount] = transactionBuffer[j];
itemCount++;
}
}
// copy the itemset to a new object
int[] newItemset = new int[itemCount];
System.arraycopy(itemsetBuffer, 0, newItemset, 0, itemCount);
Itemset itemsetObject = new Itemset(newItemset);
// Update the support of that itemset
Integer support = mapItemsetToSupport.get(itemsetObject);
if(support == null){
mapItemsetToSupport.put(itemsetObject, 1);
}else{
mapItemsetToSupport.put(itemsetObject, support + 1);
}
}
// Arrays.deepHashCode(a)
// increase the number of transactions
transactionCount++;
}
} catch (Exception e) {
// catches exception if error while reading the input file
e.printStackTrace();
}finally {
if(myInput != null){
myInput.close();
}
}
// convert from an absolute minsup to a relative minsup by multiplying
// by the database size
this.minSupportAbsolute = (int) Math.ceil(minSupport * transactionCount);
// Save the frequent itemsets
for(Entry<Itemset, Integer> entry : mapItemsetToSupport.entrySet()){
int support = entry.getValue();
if(support >= minSupportAbsolute){
int[] itemset = entry.getKey().itemset;
writeOut(itemset, itemset.length, support);
}
}
// check the memory usage
MemoryLogger.getInstance().checkMemory();
// check the memory usage again and close the file.
MemoryLogger.getInstance().checkMemory();
// close output file
writer.close();
// record end time
endTimestamp = System.currentTimeMillis();
}
/**
* Method to write an itemset to the output file.
* @param itemset an itemset
* @param support the support of the itemset
* @param length the ote,set
*/
private void writeOut(int[] itemset, int length, int support) throws IOException {
patternCount++; // increase the number of high support itemsets found
//Create a string buffer
StringBuilder buffer = new StringBuilder();
// append the prefix
for (int i = 0; i < length; i++) {
buffer.append(itemset[i]);
buffer.append(' ');
}
// append the support value
buffer.append("#SUP: ");
buffer.append(support);
// write to file
writer.write(buffer.toString());
writer.newLine();
}
/**
* Print statistics about the latest execution to System.out.
*/
public void printStats() {
System.out.println("============= SSFIM ALGORITHM v2.19 - STATS =============");
System.out.println(" Total time ~ " + (endTimestamp - startTimestamp) + " ms");
System.out.println(" Max Memory ~ " +
MemoryLogger.getInstance().getMaxMemory() + " MB");
System.out.println(" Frequent itemsets count : " + patternCount);
System.out.println("===================================================");
}
} |
<gh_stars>10-100
package nl.weeaboo.vn.impl.image;
import com.badlogic.gdx.graphics.Pixmap;
import nl.weeaboo.common.Dim;
import nl.weeaboo.vn.gdx.res.NativeMemoryTracker;
/**
* Screenshot which initially stores its pixel data in a compressed format, then decodes the binary data using
* {@link Pixmap} if needed.
*/
public class PixmapDecodingScreenshot extends DecodingScreenshot {
private static final long serialVersionUID = ImageImpl.serialVersionUID;
public PixmapDecodingScreenshot(byte[] bytes) {
super(bytes);
}
@Override
protected void tryLoad(byte[] data) {
Pixmap pixmap = new Pixmap(data, 0, data.length);
NativeMemoryTracker.get().register(pixmap);
// The stored data is already premultiplied
PixelTextureData texData = PixelTextureData.fromPremultipliedPixmap(pixmap);
setPixels(texData, Dim.of(texData.getWidth(), texData.getHeight()));
}
}
|
def loadNode(self, nodepath=None):
if nodepath is None:
nodepath = self.snpath
return SNCRS.Node.LoadNode(nodepath) |
Why Trump Appears Irresistible: The Politics of Social Power
mista. frank. Blocked Unblock Follow Following Feb 1, 2017
Fight of the Century: Muhammad Ali vs. Joe Frazier
There are primarily two types of attacks currently being wielded against Donald Trump: the first is shame, the second is truth. Neither has proven to be very effective — but why?
Why Shame Fails
Trump opponents have expended enormous effort to try and shame him, his associates, and his supporters. None of it has been effective. Shame tactics include: emotional manipulation, moralistic finger-wagging, character assassinations, and ad hominem attacks. The weakness of shame as a political tactic is that it attempts to change behavior through psychological coercion, and it is always destructive even when it works. Trump sees shame for the manipulative tactic that it is, and he avoids it by either steadfast denial or by humanizing his flaws. He then immediately launches a strong counterattack by pointing out any hypocrisy in his opponents, making the accusation of shame itself shameful, which is pretty devastating.
Even when shame works, it is only temporary because it just buries the problem deeper. Shame relies on taboos that tend to shut down conversation without really solving them. It is an immensely regressive strategy that tends to obfuscate rather than clarify, allowing problems to quietly fester only to appear later in a more ferocious form. In order to create lasting change, we must honestly reveal and reckon with problems as openly as possible, and stop pretending shame is in any way constructive or progressive.
Why Truth Fails
Trump seemingly cannot be held accountable. Liberals believe the scientific method overrules human judgement, and thus reason and evidence-based truth is inherently attached to political thought and governance. Liberals have also wrongly assumed that facts cannot be challenged in a political environment. However, it is through debate that facts become politicized. A good analysis of the best data still requires a strong political argument, but unfortunately, there is no thesis offered by liberals against Trump other than character-based and moral-based accusations — simply data-driven shame tactics.
Trump easily rebukes these attacks by simply offering an opposing thesis and a different analysis of the same data, and by questioning the methods used. This counterattack works because he knows the liberal attack is weak and that science (even bad science) requires a period of serious review. Even if nobody ultimately buys what he is selling, it gives Trump plausible deniability, a way to deflect and stall, and affords him time to change position later. Trump knows how to use the benefit of the doubt to his advantage, even manufacturing doubt out of thin air if necessary.
Politics is about power
The fundamental mistake is that the left has forgotten what it means to be political. Instead of seeing politics as a struggle for social power, they attempt to dictate the rules and language of an imaginary political game. It is like a referee shouting out boxing rules during a knife fight and calling it progress. Nobody is listening.
Trump is reminding the world that politics is not about facts at all, but that it is a purely social construct. It is about power and how it is thrust upon others, whether they like it or not. Politics is only limited by our imagination about what is possible or not possible in a future society. Facts are irrelevant, because they represent the past.
Trump’s perspective on politics is accurate. The problem is liberals have completely forgotten that politics is a struggle for power, and are getting steamrolled by Trump because of it.
So where do we go from here?
First, we have to stop repeating the same mistakes and expecting a different outcome. The attitude many liberals have taken post-election is to continue shame tactics while simultaneously attempting to revive a broken Democratic Party. The denial and refusal to accept reality is not only futile, it has now become toxic, with reactionaries and conspiracy theorists proposing any number of explanations that cannot be concretely proven nor disproved. Meanwhile real people are suffering, real people are on the streets protesting, yet this powerful energy that could be harnessed for change is lacking any sort of unified leadership or direction.
To challenge Trump, a new political vanguard has to emerge that 1) inspires through imagination and critique 2) realizes politics is about gaining social power and 3) uses whatever tools (including but not limited to “facts”) available to lead an authentic people’s movement towards shared goals.
The challenge of any social transformation is to overcome the traumas of history and to avoid repeating past mistakes. Looking at history as a series of struggles, we see human beings continually transforming and liberating themselves from their own oppression: from tribalism to feudalism, and again from feudalism to capitalism. We remain simultaneously freed of a feudal past and yet oppressed in the present by capitalism, and we must resolve to continue struggling and imagining a better world beyond the present, a future where inequality and subjugation of one group by another does not exist.
To develop a strong left, we must realize that we are simply inheriting the unfinished mission our ancestors — emancipating ourselves from the circumstances that enslave us. We must cast a relentless critical eye towards the past and present, and use historical consciousness to clarify the concrete political tasks needed in the present. It is this ongoing cycle of studying history, developing a strong thesis, clarifying political tasks, and applying them in praxis that creates the momentum for social change — the true driving force of political change. |
<gh_stars>0
import { TypeCheckResult } from './interfaces';
export declare function getFileHash(file: string, enableCache: boolean): Promise<string>;
export declare function saveCache(typeCheckResult: TypeCheckResult): Promise<void>;
export declare function readCache(enableCache: boolean): Promise<TypeCheckResult>;
|
def increase_degree(self, by: int) -> None:
self.degree += by
self.attributes["degree"] = self.degree |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import sys, re
from counter import Counter
from collections import defaultdict
from cncframework.events.dag import DAG
import cncframework.events.styles as styles
import cncframework.events.actions as actions
class EventGraph(DAG):
'''
Directed acyclic graph for a CnC-OCR event log. Assumes that execution is
serialized, i.e. no two activities can be running at the same time.
'''
def __init__(self, event_log, prescribe=True, post_process=True):
"""
EventGraph: create a DAG representing an event log
event_log parameter should be a list of strings, each element being a
line in the event log.
prescribe indicates whether prescribe edges will be added to the graph.
post_process indicates whether to emit warnings for possible errors
based on the graph.
"""
super(EventGraph, self).__init__()
self.init_vars(prescribe)
for event in event_log:
self.process_event(event)
if post_process:
self.post_process()
def init_vars(self, prescribe):
"""
Initialize instance variables to track events.
Initialize internal variables which track state changes during
process_event. Also put the init step on the graph as node 0.
"""
# whether to add prescribe edges
self.prescribe = prescribe
# id of the next node (start at 1 since init = 0)
self._id_count = 1
# action_label_tag identifier -> node id
self._cache_node_ids = {}
# [(id,label,collection) for get events on the next activity prescribed]
self._activity_gets = []
# list of node id's for steps that have entered running state
self._steps_run = []
# prescribe node id -> (prescriber, [items in get list])
self._steps_prescribed = {}
# list of node id's for items that have been put
self._items_put = []
# put init on the graph and style it like a step
self.add_node(0)
self.set_property(0, "label", "init")
self.style_step(0, "init", "0")
self.mark_running_time(0, 1)
# (id, tag) of last step to enter running state
self._last_running_activity_id_tag = (0, "0")
# the id of the finalize node
self.finalize_node = None
def process_event(self, event):
"""
Add event, a line from the event log, to the DAG.
Requires that init_vars be called first, and expects that the rest of
the event log up to this event has already been processed.
"""
# skip anything without an @
if "@" not in event:
return
# action is one of the things defined in actions
# label is either the collection or the step name, depending on action
# tag is the tag of the step or collection
# format: ACTION LABEL @ TAG
pattern = re.compile(r'([^\s]+) ([^\s]+) @ (.+)')
match = pattern.match(event)
action, label, tag = [match.group(i+1) for i in range(3)]
# make sure that cncPrescribe_StepName and StepName are treated the same
node_id = self.create_node_id(action, label, tag)
node_label = self.create_node_label(action, label, tag)
if action == actions.PRESCRIBED:
self.add_get_edges(node_id, node_label, self._activity_gets)
if self.prescribe:
self.add_prescribe_edge(self._last_running_activity_id_tag[0], node_id)
self.style_step(node_id, label, tag)
# clear out the activity get list to prepare for next prescribe
self._activity_gets = []
# track the finalize node
if label.endswith("_finalize"):
self.finalize_node = node_id
elif action == actions.RUNNING:
# create a new id which notes when this node runs
self.mark_running_time(node_id,
self.create_node_id(action, label, tag, force = True))
# record this tag as being the currently running activity
self._last_running_activity_id_tag = (node_id, tag)
self._steps_run.append(node_id)
elif action == actions.DONE:
# nothing to do for this action
pass
elif action == actions.GET_DEP:
# happens before a step is prescribed, so we keep track of these items
self._activity_gets.append((node_id, node_label, label, tag))
elif action == actions.PUT:
self.add_put_edges(node_id, node_label,
label, *self._last_running_activity_id_tag)
self._items_put.append(node_id)
else:
print >>sys.stderr, "Unrecognized action %s" % action
def create_node_id(self, action, label, tag, force=False):
"""
Return a node id for the given action, label, tag.
Node id's are integers in the order of appearance in the log.
If create_node_id is called more than once with the same parameters,
the same id will be returned each time.
However if force is True, then a new id will be created even if we have
seen these arguments before.
"""
ident = "%s %s" % (label, tag)
if ident not in self._cache_node_ids or force:
self._cache_node_ids[ident] = self._id_count
self._id_count += 1
return self._cache_node_ids[ident]
def create_node_label(self, action, label, tag):
"""Return human-readable label for given action, label, tag."""
return "%s: %s" % (label, tag.replace(", ", ","))
def mark_running_time(self, step_id, running_time):
"""Do something to step_id to indicate that it runs at running_time."""
self.set_property(step_id, "href", "{}".format(running_time))
self.set_property(step_id, "run_time", running_time)
def style_step(self, step_id, step_name, step_tag):
"""Style the node for a step."""
self.set_property(step_id, "color", styles.color('step'))
self.set_property(step_id, "tag", step_tag)
self.set_property(step_id, "name", step_name)
self.set_property(step_id, "type", "step")
def style_item(self, item_id, label, collection_name, item_tag):
"""Style the node for an item."""
self.set_property(item_id, "shape", styles.shape('item'))
self.set_property(item_id, "color", styles.color('item', collection_name))
self.set_property(item_id, "label", label)
self.set_property(item_id, "tag", item_tag)
self.set_property(item_id, "name", collection_name)
self.set_property(item_id, "type", "item")
def add_get_edges(self, step, step_label, items):
"""
Add get edges from each item node (id, label, collection, tag) to step node id.
Adds, styles, and labels item nodes if they are not already on the
graph.
"""
self.add_node_with_parents(step, [i[0] for i in items])
self.set_property(step, "label", step_label)
# these are collection nodes, so make them green boxes
for n, label, collection, tag in items:
self.style_item(n, label, collection, tag)
def add_put_edges(self, item_id, item_label, collection, step_id, tag):
"""
Add put edge from step node id to the item of given collection
Also style and label the item node.
Note: Both add_get_edges and add_put_edges will add and style item
nodes. This is done because PUT ⇏ GET.
"""
self.add_node_with_parents(item_id, [step_id])
self.style_item(item_id, item_label, collection, tag)
def add_prescribe_edge(self, parent, child):
"""Add and style a prescribe edge from the parent id to the child id."""
self.add_child(parent, child)
self.set_edge_property(parent, child, "style", styles.style("prescribe"))
def gotten_without_put(self):
"""Return set of item node id's with indegree = 0."""
return {n for n in self if self.property(n, "type") == "item" and not self.in_degree(n)}
def put_without_get(self):
"""Return set of item node id's with outdegree = 0."""
return {n for n in self if self.property(n, "type") == "item" and not self.out_degree(n)}
def post_process(self):
"""
Perform some post processing tasks on the completed graph.
Emit warnings and/or highlight occurrences of possible bugs:
-- Items put multiple times
-- Steps prescribed multiple times
-- Steps prescribed without being run
-- Items gotten without a put and items put without a get
-- Nodes with no path to the final finalize.
"""
# warn for items in sequence of node ids appearing more than once
def warn_on_duplicates(sequence, verb):
# print a warning on duplicates in a sequence
counts = Counter(sequence)
for k in counts:
if counts[k] > 1:
print >>sys.stderr, "Warning: %s %s %d times." % (
self.property(k, 'label', ''), verb, counts[k])
# emit warning if set of node id's is nonempty, and set color if given
def warn_on_existence(s, msg, color=None):
if len(s) > 0:
if color:
map(lambda n: self.set_property(n, 'color', color), s)
else:
map(lambda n: self.set_property(n, 'penwidth', 2.5), s)
print >>sys.stderr, "Highlighting in %s:\n%s: %s" % (
color if color else 'bold',
msg,
', '.join(map(lambda i: self.property(i, 'label', ''), s)))
warn_on_duplicates(self._steps_prescribed.keys(), "prescribed")
warn_on_duplicates(self._items_put, "put")
# warn on items gotten but not put or put without get
gotten_without_put = self.gotten_without_put()
put_without_get = self.put_without_get()
warn_on_existence(gotten_without_put, "Items with GET without PUT",
styles.color('get_without_put'))
warn_on_existence(put_without_get, "Items with PUT without GET",
styles.color('put_without_get'))
# warn on nodes that have no path to the finalize
if self.finalize_node is not None:
trans = self.transpose()
visits = {self.finalize_node}
trans.bfs(self.finalize_node, visitor = visits.add)
warn_on_existence(set(self).difference(visits),
"Nodes without path to FINALIZE")
|
Serralles Notches New Lap Record of 196.374 mph on Dallara-Mazda Indy Debut
Indianapolis, Ind. – Englishman Jack Harvey has been too often the bridesmaid in his year and a half of competition in Indy Lights Presented by Cooper Tires, but he was out in front when it mattered in this afternoon’s Freedom 100 Presented by Allied Building Products at the Indianapolis Motor Speedway. The race was held in glorious weather conditions in front of a sizable Coors Light Carb Day crowd, part of the build-up toward Sunday’s 99th running of the Indianapolis 500.
Schmidt Peterson Motorsports with Curb-Agajanian teammates Ethan Ringel, Scott Anderson and RC Enerson completed a sweep of the top four positions as the race finished under caution after series points leader Ed Jones (Carlin) crashed out of seventh place in Turn Four with just three laps remaining.
For Ringel and Anderson, the finishes represented the best of their careers in Indy Lights. The race also established a new Indy Lights race lap record for Felix Serralles (Belardi Auto Racing) as he circulated the 2.5-mile speedway at an average speed of 196.374 mph, eclipsing the previous mark of 192.301 mph set by Brandon Wagner in 2011.
Harvey and Ringel, who qualified on the pole yesterday at a new record average speed of 197.684 mph, quickly established themselves as the pacesetters. Harvey immediately grasped the advantage by sling-shotting past Ringel at Turn One on the opening lap, as the teammates soon began to inch clear of Kyle Kaiser, who had vaulted from sixth to third on the first lap for Juncos Racing earning the RePlay XD Move of the Race Award.
The two leaders exchanged places several times during the first 10 laps, while Anderson worked his past both Sean Rayhall (8Star Motorsports) and Kaiser to move into third. Enerson also drafted into contention as the four Schmidt Peterson cars edged away at the front of the field.
Harvey was content to settle in behind Ringel through the middle stages of the 100-mile race, but as the leaders circulated relatively conservatively in the 190 mph range – as opposed to the 194 mph pace in the early laps – Kaiser and a train of cars behind him began to inch closer to the leaders.
The pace quickened again on Lap 23 – back above 192 mph – whereupon Ringel and Harvey again edged clear of Enerson, who had drafted past Anderson for third on Lap 22. Harvey, who began the weekend second in the points table (after also finishing second as a rookie in 2014, losing out only on a tie-break to eventual champion Gabby Chaves), grasped the lead on Lap 25, only for Ringel to regain the upper hand next time around.
Finally, on Lap 34, Harvey took advantage of the draft from Spencer Pigot’s lapped Juncos Racing Dallara-Mazda to slip back into the lead at Turn One. It proved to be the decisive move, as the yellow flags waved on Lap 38 and there was insufficient time for the race to be restarted. The sixth win of Harvey’s career elevates him to the championship lead by 192-179 over Jones.
Anderson repassed Enerson for third on Lap 36, while Kaiser finished fifth ahead of Rayhall and Juan Piedrahita (Belardi Auto Racing.)
The second half of the 16-race Indy Lights Presented by Cooper Tires season will commence with a pair of races on the streets of Toronto, Ont., Canada, on June 12-14.
Jack Harvey #42 Racing Steps Foundation/Curb Records-Schmidt Peterson Motorsports with Curb-Agajanian): “I was happy Ethan was on the pole, but I wanted to win the race. We started to establish a gap but Ethan was trying to save his tires and it let RC and Scott back into it. It’s a chess game: you don’t really want to lead going into the last lap but when the caution comes out, all you want to do is lead. I’m sorry for the fans that we didn’t have a four-wide finish, but for me, this is what I wanted to achieve. I think everyone knew last year that I had the pace on road and street courses but the ovals needed a little work. I worked really hard over the winter and the first one we come to, we get our first win – and it’s in the Freedom 100 at Indianapolis. This is the one race I wanted to win, because I understand the importance of it, what it means. To achieve two out of three wins here, I’m incredibly proud of my team for giving me the car – and that I didn’t hit any walls! I still haven’t found the words to fully describe it but the first thing I said to (team owner) Sam (Schmidt) was ‘can I drive your Indy car?’” |
<filename>bqueryd/messages.py<gh_stars>1-10
import cPickle
import json
import time
def msg_factory(msg):
if type(msg) is str:
try:
msg = json.loads(msg)
except:
msg is None
if not msg:
return Message()
msg_mapping = {'calc': CalcMessage, 'rpc': RPCMessage, 'error': ErrorMessage,
'worker_register': WorkerRegisterMessage,
'busy': BusyMessage, 'done': DoneMessage,
'ticketdone': TicketDoneMessage,
'stop': StopMessage, None: Message}
msg_class = msg_mapping.get(msg.get('msg_type'))
return msg_class(msg)
class MalformedMessage(Exception):
pass
class Message(dict):
msg_type = None
def __init__(self, datadict={}):
if datadict is None:
datadict = {}
self.update(datadict)
self['payload'] = datadict.get('payload')
self['version'] = datadict.get('version', 1)
self['msg_type'] = self.msg_type
self['created'] = time.time()
def copy(self):
newme = super(Message, self).copy()
return msg_factory(newme)
def isa(self, payload_or_instance):
if self.msg_type == getattr(payload_or_instance, 'msg_type', '_'):
return True
if self.get('payload') == payload_or_instance:
return True
return False
def add_as_binary(self, key, value):
self[key] = cPickle.dumps(value).encode('base64')
def get_from_binary(self, key, default=None):
buf = self.get(key)
if not buf: return default
return cPickle.loads(buf.decode('base64'))
def to_json(self):
# We could do some serializiation fixes in here for things like datetime or other binary non-json-serializabe members
return json.dumps(self)
def set_args_kwargs(self, args, kwargs):
params = {'args': args, 'kwargs': kwargs}
self.add_as_binary('params', params)
def get_args_kwargs(self):
params = self.get_from_binary('params', {})
kwargs = params.get('kwargs', {})
args = params.get('args', [])
return args, kwargs
class WorkerRegisterMessage(Message):
msg_type = 'worker_register'
class CalcMessage(Message):
msg_type = 'calc'
class RPCMessage(Message):
msg_type = 'rpc'
class ErrorMessage(Message):
msg_type = 'error'
class BusyMessage(Message):
msg_type = 'busy'
class DoneMessage(Message):
msg_type = 'done'
class StopMessage(Message):
msg_type = 'stop'
class TicketDoneMessage(Message):
msg_type = 'ticketdone'
|
You'd be hard-pressed to tell sometimes, but the United States Congress actually maintains its own in-house think tank, staffed by a seasoned team of lawyers, economists, and policy experts: the Congressional Research Service. Founded in 1914 as the Legislative Reference Service, CRS now commands an annual budget in excess of $100 million. But while its funding is public, the reports CRS produces—covering 150 distinct issue areas, according to its 2007 annual report—aren't. While members of Congress frequently choose to make reports they've commissioned public, CRS itself has resisted multiple efforts to amend the law to make its research public as a matter of course.
As that 2007 report puts it, CRS's "policy of confidentiality" is meant to reassure legislators that "that they can come to CRS to explore issues, and they can do so without question, challenge, or disclosure." The idea is to let members inquire into controversial topics without exposing themselves to controversy—but the result is also that legislators get to pick and choose which findings they want to share with the rest of the world.
The Center for Democracy and Technology has been doing its best to pierce that veil of confidentiality with OpenCRS, a repository of all the reports that have been released or leaked. This weekend, they got a treasure trove of fresh documents thanks to Wikileaks, which has passed along 6,780 CRS reports—the vast majority of which weren't previously available. Almost 2,300 of them were produced or updated in the past year, and many address such timely issues as the DTV transition, the powers of the Director of National Intelligence, economic stimulus, the Fairness Doctrine, consumer privacy legislation, and the future of the Office of Science and Technology Policy.
Congress, of course, could choose to make all this redundant by simply instructing CRS to publicly post its own reports. In the meantime, you can BitTorrent the whole cache from, believe it or not, The Pirate Bay. |
<reponame>kartaggen/logistics-company
package com.f97808.logisticscompany.jpa;
import com.f97808.logisticscompany.entity.Employee;
import com.f97808.logisticscompany.entity.Office;
import com.f97808.logisticscompany.entity.User;
import org.springframework.data.repository.CrudRepository;
import java.util.List;
import java.util.Optional;
public interface EmployeeRepository extends CrudRepository<Employee, Integer> {
List<Employee> findByOffice(Office office);
} |
/**
* Created by tlanders on 5/9/2017.
*/
public abstract class BaseService<TE, T> {
private Repository<TE, T> repository;
public BaseService(Repository<TE, T> repository) {
this.repository = repository;
}
public TE findById(T id) {
return repository.get(id);
}
} |
//!
//! @file main.h
//! @brief
//! @version 0.1
//!
//! @date 2020-09-24
//! @author <NAME> (<EMAIL>)
//!
//! @institution University of Colorado Boulder (UCB)
//! @course ECEN 5823-001: IoT Embedded Firmware (Fall 2020)
//! @instructor <NAME>
//!
//! @assignment ecen5823-assignment7-baquerrj
//!
//! @resources Utilized Silicon Labs' EMLIB peripheral libraries to implement functionality
//!
//! @copyright All rights reserved. Distribution allowed only for the use of assignment grading. Use of code excerpts allowed at the discretion of author. Contact for permission.
//!
#ifndef __MAIN_H___
#define __MAIN_H___
#include "stdint.h"
#include "sleep.h"
//! Constants for time conversions
static const uint16_t USEC_PER_MSEC = 1000;
static const uint16_t MSEC_PER_SEC = 1000;
//! Constant specifying LETIMER0 COMP0 interrupt period in milliseconds.
//! Changing this value changes the period, e.g. setting it to 2250
//! would set the period to 2.25 seconds
static const uint16_t TIMER_PERIOD_MS = 3000;
//! Uncomment once of the following lines to define the operating energy mode
// #define SLEEP_MODE sleepEM0
// #define SLEEP_MODE sleepEM1
// #define SLEEP_MODE sleepEM2
#define SLEEP_MODE sleepEM3
//! Comment out the following line to wait for interrupts in while-loop
#define ENABLE_SLEEPING
#define LOWEST_ENERGY_MODE (SLEEP_MODE)
#endif // __MAIN_H___
|
def write_calibration_file(filename, frequency_array, calibration_draws, calibration_parameter_draws=None):
import tables
calibration_file = tables.open_file(filename, 'w')
deltaR_group = calibration_file.create_group(calibration_file.root, 'deltaR')
calibration_file.create_carray(deltaR_group, 'draws_amp_rel', obj=np.abs(calibration_draws))
calibration_file.create_carray(deltaR_group, 'draws_phase', obj=np.angle(calibration_draws))
calibration_file.create_carray(deltaR_group, 'freq', obj=frequency_array)
calibration_file.close()
if calibration_parameter_draws is not None:
calibration_parameter_draws.to_hdf(filename, key='CalParams', data_columns=True, format='table') |
Only a few people (9 percent) insist that 1991 was the year of "victory of a democratic revolution which put an end to Communist Party rule."
Tens of thousands of Yeltsin's (center) supporters gathered around the president’s residence at the White House in Moscow. Source: ITAR-TASS
A relative majority of Russians (41 percent) see the August 19, 1991, putsch in the former Soviet Union as a tragedy which had harmful implications for the country and its people.
The number has grown 14 percent in the past 20 years, from 27 percent in 1994, the Levada Center pollster told Interfax, referring to a nationwide poll held in late July.
The events are seen as a tragedy mostly by businessmen (52 percent), pensioners (48 percent), Russians older than 55 (49 percent), people with secondary education (46 percent), respondents with a low and high consumer status (46 percent in each category) and people in cities with a population less than 100,000 (51 percent).
Some 36 percent described the August 1991 events as an episode in the struggle for power and their number has considerably reduced since 1994 (53 percent). This opinion is mostly shared by salaried workers (44 percent), the unemployed (43 percent), Russians younger than 25 (44 percent), people with less than secondary education (42 percent), people with a low consumer status who can afford only food and clothes (42 percent) and villagers (44 percent), the sociologists said.
Only a few people (9 percent) insist that 1991 was the year of "victory of a democratic revolution which put an end to Communist Party rule." The opinion is mostly asserted by present-day executives and managers (22 percent), businessmen and specialists (17 percent in each category), Russians aged 25 to 40 (12 percent), people with higher education (14 percent), people with a high consumer status who can easily afford a car (21 percent) and Muscovites (33 percent).
In the opinion of 74 percent of the respondents, the country took a wrong turn after the putsch. This was mostly said by businessmen (64 percent), salaried workers (55 percent), pensioners (54 percent), Russians older than 55 (55 percent), people with a low consumer status (50 percent) and people in cities with a population from 100,000 to 500,000 (58 percent).
Twenty-seven percent of the respondents argued the opposite, and 26 percent were undecided.
The political crisis of August 1991 was triggered by the attempt of some senior Soviet officials to disrupt the signing of a new union treaty. Soviet President Mikhail Gorbachev was isolated in his Crimean residence for that purpose.
Boris Yeltsin, then president of the Russian Soviet Federative Socialist Republic, and other leaders of the Russian Federation headed the resistance to the attempted coup, which ended on August 22 with the arrest of the conspirators, one of whom, Interior Minister Boris Pugo, committed suicide.
Soviet President Mikhail Gorbachev declared his resignation from the position of the Soviet Communist Party Central Committee Secretary General and asked Central Committee members for voluntary dissolution of the Soviet Union on August 24, 1991.
All rights reserved by Rossiyskaya Gazeta. |
<gh_stars>0
import { Observable } from 'rxjs';
export declare class MinimizeService {
cureentWidnowData: any[];
private messageSource;
currentMessage: Observable<any>;
constructor();
onMinimizeClick(itemData: any): void;
onCloseClick(data: any): void;
}
|
Guest contributor Michael Coats takes a trip back to where it all began…
I’ve always (sliiight fabrication there) wished that I could have watched the classic episodes of Doctor Who. Unfortunately, there were a few obstacles in my way, not least the small matter being born 3 years too late to have watched any. Actually, I don’t think being born in 1989 would’ve helped much; I doubt my newborn self would’ve been able to quite wrap his head around it. Maybe make that 7 years too late!
Eventually, I realised that this situation would have to be rectified before the 50th anniversary, or I couldn’t live with the shame and would’ve had to fly a spaceship into the sun to incinerate myself. Sun filter descending…where was I? I’ve seen enough commenters on this site say that they’re not interested in watching the Classic episodes, so I’m writing this in order to convince some of the “Nu Whovians” to do so. To paraphrase the Ninth Doctor: it won’t be quiet, it won’t be safe, and I can’t promise it won’t sometimes be irreverent, but I’ll tell you what it will be: the trip of a lifetime.
First, I’ll be looking at the serials of Season 1, though (even partly) missing serials won’t be covered. I cannot watch incomplete stories.
An Unearthly Child
It all begins with An Unearthly Child. Having seen it now, I see it shares an unfortunate similarity with Rose: that despite both being the two most crucial stories in the show’s history, both come across as rather average stories (“Burn the witch!”). Whilst the first part of An Unearthly Child is a strong episode, full of mystery and intrigue, the fire in it seems to splutter out (ironic considering the subject matter). I feel that it doesn’t do a good enough job in establishing the Doctor (William Hartnell) and Susan (Carol Ann Ford) as characters and the concept is stretched a couple of episodes too long. As a result, it’s my second least favourite serial of the series.
The Daleks
Next, my two favourites of the series, the first of which is The Daleks. I understand why they used to leave children cowering behind the sofa, because despite their slightly unorthodox design, they are clever, cunning and ever so slightly eerie (like their first two appearances of the revived series in Dalek and The Parting of the Ways). The methods used to to defeat the Daleks are ingenious, especially the disabling and Ian’s subsequent impersonation of one. It was also nicely fuelled by the tension between The Doctor, Ian (William Russell) and Barbara (Jacqueline Hill), something which came to a head in the next serial.
The Edge of Destruction
And as if I’d just hopped in the TARDIS, here we are. And speaking of the TARDIS; yes, The Edge of Destruction is notable for; amongst other reasons: it is one of only two full-length stories set entirely within the TARDIS, the other being Amy’s Choice (dreams don’t count). The episode plays out like a cross between elements of 42 and The Doctor’s Wife, which certainly is no bad thing in my book, and should be in your book too (and I’ll chuck your book into a supernova if it says otherwise). But, it’s the atmosphere of distrust which really makes this story; everyone is blaming each other for what’s going wrong which results in a really satisfying ending I’m not going to spoil for you. River would kill me. Basically, this is what drama is all about.
The Keys of Marinus & The Aztecs
Skipping the missing Marco Polo, we arrive at what I feel is the weakest story of the series, The Keys of Marinus. While having a different threat each week did little to establish the Voord as threatening creatures, they come across as men in suits and little else. Also, like An Unearthly Child, the episode feels stretched. After that, we had The Aztecs, which did a rather nice job with Barbara. Her being mistaken for an Aztec God was certainly an interesting concept, and I enjoyed watching her attempts to change the history of a culture, ultimately futile though they were. It also gave us the First Doctor’s amazingly brilliant line; ‘Yes, I made some cocoa and got engaged.’
The Sensorites
Lastly, we have The Sensorites, which had a significant impact on the revived series. Russell T Davies stated that the eponymous creatures were the inspiration for the Ood, and the Ood Sphere is apparently located in the same galaxy as the Sense Sphere. Addition, Susan’s description of Gallifrey in the final part of the episode is paraphrased by the Tenth Doctor in Gridlock, and it also introduces the Doctor’s dislike of weapons, a prevalent theme in the RTD era. RTD was quite the fan. It is a good story, involving treachery, telepathy and probably other things beginning with “T”.
Some may find some things jarring, the First Doctor’s cranky grandfather characterisation for example. He’s not quite as mellow as Nine, nor quite as angry/arrogant as Ten in Time Lord Victorious mode. He mellows out over the course of the series, but remains quick to anger. The main problem I have with is with Susan. I know that sometimes in early Doctor Who female characters were often quite weak; a product of the times. However, a constantly shrieking and mostly useless character is not what I expected of the Doctor’s granddaughter. She is more useful in The Sensorites, but the damage has already been done.
Apart from that though, fans introduced by the revived series will feel reassured by how familiar it all feels, apart from the male companion filling the action man role. Ian Chesterton is absolutely awesome, a man with both brains and brawn. Barbara Wright might not be the typical action girl like the revived series companions, but as a schoolteacher and wilful character, she’s one of the strongest female characters you’re likely to have seen in 1963. Best of all, there isn’t a single serial that I’d rate worse than average. Now if you’ll excuse me, I’m off to watch Planet of Giants. Quite so. |
<filename>Codeforces-Code/Codeforces Round 327 (Div. 1)/C.cpp
#include <cstdio>
#include <algorithm>
const int MAXN = 1111;
const int MAXQ = 6666666;
const int INF = ~0u >> 3;
const int DX[] = {1, -1, 0, 0};
const int DY[] = {0, 0, 1, -1};
int n, m, d[4][MAXN][MAXN];
char map[MAXN][MAXN];
std::pair<int, int> Q[MAXQ];
bool v[MAXN][MAXN];
void solve(int a) {
int left = 0, right = 0;
for (int i = 1; i <= n; i++)
for (int j = 1; j <= m; j++)
d[a][i][j] = INF;
std::pair<int, int> *queue = Q + 3333333;
for (int i = 1; i <= n; i++)
for (int j = 1; j <= m; j++) {
if (map[i][j] == '0' + a) {
d[a][i][j] = 0;
queue[++right] = std::make_pair(i, j);
v[i][j] = true;
}
}
while (left < right) {
left++;
std::pair<int, int> now = queue[left];
for (int dir = 0; dir < 4; dir++) {
int nx = now.first + DX[dir];
int ny = now.second + DY[dir];
if (nx < 1 || nx > n || ny < 1 || ny > m || map[nx][ny] == '#') continue;
int cost = (map[nx][ny] == '.');
if (d[a][nx][ny] > d[a][now.first][now.second] + cost) {
d[a][nx][ny] = d[a][now.first][now.second] + cost;
if (!v[nx][ny]) {
if (cost == 0) queue[left--] = std::make_pair(nx, ny);
else queue[++right] = std::make_pair(nx, ny);
v[nx][ny] = true;
}
}
}
v[now.first][now.second] = false;
}
}
int main() {
freopen("C.in", "r", stdin);
scanf("%d%d", &n, &m);
for (int i = 1; i <= n; i++) {
scanf("%s", map[i] + 1);
}
int answer = INF;
for (int a = 1; a <= 3; a++) solve(a);
for (int i = 1; i <= n; i++)
for (int j = 1; j <= m; j++) {
if (map[i][j] == '.') {
answer = std::min(answer, d[1][i][j] + d[2][i][j] + d[3][i][j] - 2);
} else if (map[i][j] != '#') {
answer = std::min(answer, d[1][i][j] + d[2][i][j] + d[3][i][j]);
}
}
if (answer == INF) puts("-1");
else printf("%d\n", answer);
return 0;
}
|
def make_split_idx(args):
print("Make idx list. ")
root = args.root
data_root = root + 'data/refer/data/'
output_folder = root + 'data/referit_raw/'
ref_ann, ref_inst_ann = referit_parser.referit_load_data(data_root)
annotations = referit_parser.get_annotations(ref_ann, ref_inst_ann)
splits = referit_parser.referit_get_idx_split(annotations)
tmp = {0: [], 1:[], 2:[]}
for i, name in enumerate(['train', 'val', 'test']):
split_name = output_folder + name + '.txt'
with open(split_name, 'w') as file:
curr_indexes = list(set([i.split('_')[0] for i in splits[i]]))
file.writelines('\n'.join(curr_indexes))
tmp[i] = curr_indexes
print('Idx {} saved: {} '.format(name, split_name))
print('End.') |
#include<bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef long double ld;
typedef double db;
typedef string str;
typedef pair<int, int> pi;
typedef pair<ll,ll> pl;
typedef pair<ld,ld> pd;
typedef vector<int> vi;
typedef vector<ll> vl;
typedef vector<ld> vd;
typedef vector<str> vs;
typedef vector<pi> vpi;
typedef vector<pl> vpl;
#define FOR(i,a,b) for (int i = (a); i < (b); ++i)
#define F0R(i,a) FOR(i,0,a)
#define ROF(i,a,b) for (int i = (b)-1; i >= (a); --i)
#define R0F(i,a) ROF(i,0,a)
#define trav(a,x) for (auto& a: x)
#define rep(i, begin, end) for (auto i = (begin) - ((begin) > (end)); i != (end) - ((begin) > (end)); i += 1 - 2 * ((begin) > (end)))
#define bsearch(i, b, a) for (auto i = a; i >= b; i /= 2)
#define sz(x) (int)x.size()
#define all(x) begin(x), end(x)
#define rall(x) rbegin(x), rend(x)
#define rsz resize
#define ins insert
#define mp make_pair
#define pb push_back
#define eb emplace_back
#define ff first
#define ss second
#define lb lower_bound
#define ub upper_bound
const int MOD = 1e9+7;
const int MX = 1e6+5;
const ll INF = 1e18;
const ld PI = 4*atan((ld)1);
auto sum() {return 0;}
template<class T> bool ckmin(T& a, const T& b) {
return a > b ? a = b, 1 : 0; }
template<class T> bool ckmax(T& a, const T& b) {
return a < b ? a = b, 1 : 0; }
template<typename T, typename... Args> auto sum(T a, Args... args) {
return a + sum(args...); }
namespace input {
template<class T> void in(vector<T>& a);
template<class T, size_t SZ> void in(array<T,SZ>& a);
template<class T> void in(T& x) { cin >> x; }
void in(double& x) { string t; in(t); x = stod(t); }
void in(ld& x) { string t; in(t); x = stold(t); }
template<class T, class... Ts> void in(T& t, Ts&... ts) { in(t); in(ts...); }
template<class T> void in(vector<T>& a) { F0R(i,sz(a)) in(a[i]); }
template<class T, size_t SZ> void in(array<T,SZ>& a) { F0R(i,SZ) in(a[i]); }
} using namespace input;
namespace output {
void out(int x) { cout << x; }
void out(long x) { cout << x; }
void out(ll x) { cout << x; }
void out(unsigned x) { cout << x; }
void out(unsigned long x) { cout << x; }
void out(unsigned long long x) { cout << x; }
void out(float x) { cout << x; }
void out(double x) { cout << x; }
void out(ld x) { cout << x; }
void out(char x) { cout << x; }
void out(const char* x) { cout << x; }
void out(const string& x) { cout << x; }
void out(bool x) { out(x ? "true" : "false"); }
template<class T> void out(const complex<T>& x) { cout << x; }
template<class T, class... Ts> void out(const T& t, const Ts&... ts) {
out(t); out(ts...);
}
void ous() { out("\n"); }
template<class T, class... Ts> void ous(const T& t, const Ts&... ts) {
out(t); if (sizeof...(ts)) out(" "); ous(ts...);
}
void ouc() { out("]\n"); }
template<class T, class... Ts> void ouc(const T& t, const Ts&... ts) {
out(t); if (sizeof...(ts)) out(", "); ouc(ts...);
}
#define dbg(x...) out("[",#x,"] = ["), ouc(x);
} using namespace output;
int a[200005];
void lol() {
int n;
in(n);
F0R(i, n) {
in(a[i]);
}
F0R(i, n-1) {
if(!(a[i] == a[i+1] || a[i] == a[i+1] + 1 || a[i] == a[i+1] - 1 )) {
cout << "YES\n"<<i + 1<<" "<<i + 2<<"\n";
return;
}
}
cout << "NO\n";
}
int main() {
int t;
in(t);
while (t--) lol();
}
|
/**
* Loads the current quizResults from file.
* <p>
* Local Variable Dictionary
* <p>
* file: Reference variable for File. Holds File representation of QuizResults file.
* <p>
* jc: Reference variable for JAXBContext. Provides a way to access XML
* binding methods.
* <p>
* unmarshaller: Reference variable for Unmarshaller. Reads in data from a
* QuizResults XML file and loads it into a QuizResults class.
* <p>
* quizResults: Reference Variable for QuizResults. Holds class to load data
* into.
*
* @exception JAXBException XML marshalling failure.
* @return QuizResults class with data loaded in.
*/
public void LoadQuizResults() {
try {
File file = new File(quizResultFullPath);
QuizResults quizResults = new QuizResults();
if (file.exists()) {
JAXBContext jc = JAXBContext.newInstance(QuizResults.class);
Unmarshaller unmarshaller = jc.createUnmarshaller();
quizResults = (QuizResults) unmarshaller.unmarshal(file);
this.results = quizResults.results;
}
} catch (JAXBException e) {
e.printStackTrace();
}
} |
<filename>tests/ui/pin_project/unaligned_references.rs
#![forbid(unaligned_references)]
// Refs: https://github.com/rust-lang/rust/issues/82523
#[repr(packed)]
struct Packed {
f: u32,
}
#[repr(packed(2))]
struct PackedN {
f: u32,
}
fn main() {
let a = Packed { f: 1 };
let _ = &a.f; //~ ERROR reference to packed field is unaligned
let b = PackedN { f: 1 };
let _ = &b.f; //~ ERROR reference to packed field is unaligned
}
|
import {
ChannelTypes,
Client,
Embed,
Intents,
Message,
MessageReaction,
User
} from 'https://raw.githubusercontent.com/harmony-org/harmony/main/mod.ts'
import { DISCORD_TOKEN } from '../config.ts'
import { Game, PropEmoji } from './structures/game.ts'
import {
PortalgunStates,
MovementEmojis,
BluePortalEmoji,
OrangePortalEmoji,
PortalEmojis,
translatePerType,
Position
} from './types/other.ts'
import { Directions, Player } from './types/player.ts'
import {
ActivatableProps,
ActivatedButtonProps,
AllButtonProps,
AllDoorProps,
Button,
CubeButtonProps,
CubeDropper,
Door,
Portals,
PropTypes
} from './types/props.ts'
import { World, WorldData, WorldGenerator } from './types/world.ts'
const GamePerUser: {
[key: string]: Game
} = {}
const getMessageInPromise = (
eventEmitter: Client,
checker: (message: Message) => boolean,
timeout?: number
): Promise<Message | undefined> => {
return new Promise((resolve, _reject) => {
let timeoutID: number | undefined
if (timeout !== undefined) {
timeoutID = setTimeout(() => {
eventEmitter.off('messageCreate', func)
resolve(undefined)
}, timeout)
}
const func = (message: Message) => {
if (checker(message)) {
resolve(message)
eventEmitter.off('messageCreate', func)
if (timeoutID !== undefined) clearTimeout(timeoutID)
}
}
eventEmitter.on('messageCreate', func)
})
}
const worldDatas: {
lastSavedID: number
worlds: { [id: string]: WorldData }
} = JSON.parse(Deno.readTextFileSync('../maps.json'))
const embedMaker = (game: Game) => {
const portalgunState = game.player.portalgun
return new Embed({
title: 'Portal: The Discord Version',
description: game.toString(),
fields: [
{
name: 'Health',
value: game.health.toString(),
inline: true
},
{
name: 'Portalgun',
value:
portalgunState !== PortalgunStates.NONE
? portalgunState !== PortalgunStates.BLUE_ONLY
? portalgunState !== PortalgunStates.ORANGE_ONLY
? PortalEmojis.join(' ')
: OrangePortalEmoji
: BluePortalEmoji
: '⚫️',
inline: true
}
]
})
}
class Portal extends Client {
constructor(...args: any[]) {
super(...args)
this.on('ready', this.ready)
this.on('messageCreate', this.message)
this.on('messageReactionAdd', this.messageReactionAdd)
this.on('messageReactionRemove', this.messageReactionRemove)
}
ready() {
console.log('Running!')
}
async message(message: Message) {
if (message.author.bot) return
if (message.content.startsWith('!play')) {
if (message.channel.type === ChannelTypes.DM) {
message.channel.send(
'이 명령어는 리엑션 삭제 기능이 필요하게 때문에, DM에서 사용하실 수 없습니다.'
)
return
}
const args = message.content.split(' ')
const worldData = worldDatas.worlds[args[1]]
if (worldData === undefined) {
message.channel.send('월드가 없습니다!')
return
}
const { world, player, portals, props } = worldData
const game = new Game(world, player, portals, props)
GamePerUser[message.author.id] = game
const embed = embedMaker(game)
const sentMessage = await message.channel.send(embed)
game.on('healthChange', (_health: number) => {
const embed = embedMaker(game)
sentMessage.edit(embed)
})
game.on('dead', () => {
game.close()
delete GamePerUser[message.author.id]
setTimeout(sentMessage.delete.bind(sentMessage), 1000)
message.channel.send('당신은 죽었습니다! 그리고 게임은 끝났습니다.')
})
game.on('timerDone', () => {
const embed = embedMaker(game)
sentMessage.edit(embed)
})
for (const emoji of MovementEmojis) {
await sentMessage.addReaction(emoji)
}
if (game.player.portalgun === PortalgunStates.BLUE_ONLY) {
await sentMessage.addReaction(BluePortalEmoji)
} else if (game.player.portalgun === PortalgunStates.ORANGE_ONLY) {
await sentMessage.addReaction(OrangePortalEmoji)
} else if (game.player.portalgun === PortalgunStates.ALL) {
await sentMessage.addReaction(BluePortalEmoji)
await sentMessage.addReaction(OrangePortalEmoji)
}
await sentMessage.addReaction('🔘')
await sentMessage.addReaction('🛑')
game.on('worldFinished', () => {
game.close()
delete GamePerUser[message.author.id]
setTimeout(sentMessage.delete.bind(sentMessage), 1000)
message.channel.send('축하드립니다! 게임을 깨셨습니다!')
})
} else if (message.content === '!make') {
if (message.channel.type !== ChannelTypes.DM) {
message.channel.send(
'이 명령어는 많은 메시지를 주고 받아야하기 때문에, DM에서만 사용 가능합니다.'
)
return
}
const user = message.author
message.channel.send(
'안녕하세요! 맵 메이커에 오신것을 환영합니다.\n좌표의 맨 왼쪽 위는 `(0, 0)` 입니다.\n**이 이하에 있는 것들은 한번 진행후 수정 할 수 없습니다!!**'
)
const getWorldSize = async (): Promise<[number, number] | undefined> => {
message.channel.send(
'맵 만들기를 시작해보죠. 자, 월드의 크기는 어떻습니까? `너비 x 높이` 형식으로 적어주세요. (곱했을 때 200이 넘어가면 안됩니다!)'
)
const returnMessage = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
30000
)
if (returnMessage === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
const worldSize = returnMessage.content
const [width, height] = worldSize
.split('x')
.map((size) => parseInt(size))
if (isNaN(width) || isNaN(height)) {
message.channel.send('숫자가 아닌것이 왔어요! 다시 해봅시다.')
return await getWorldSize()
}
if (width * height > 200) {
message.channel.send('월드 프롭이 200개가 넘어요! 좀 더 작게 해봐요.')
return await getWorldSize()
}
return [width, height]
}
const size = await getWorldSize()
if (size === undefined) {
return
}
const [width, height] = size
let worldTemplate: string = ''
for (let l = 0; l < height; l++) {
let row: string[] = []
for (let i = 0; i < width; i++) {
row.push('⬛')
}
worldTemplate += `${row.join(',')}\n`
}
message.channel.send(
`월드 템플릿을 생성했습니다! 밑에 있는 양식을 복사하여 수정 후 보내주세요.\n어떤 프롭들이 있는지 궁금하시면, \`!props\`를 입력하면 알려드리겠습니다!\n그리고 디스코드에서 너무 길다며 파일로 보내려 하면, 나눠서 복사 붙여넣기를 하면 됩니다!\n\n${worldTemplate}`
)
const getWorld = async (): Promise<WorldGenerator | undefined> => {
const worldMessage = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
3600000
)
if (worldMessage === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
const customWorld: PropTypes[][] = []
const props: Array<Door | Button | CubeDropper> = []
const portals: Portals = {
blue: null,
orange: null
}
let playerPosition: Position | null = null
let goalExist = false
let content = worldMessage.content
if (content === '!props') {
message.channel.send(
`⬛: 아무것도 없음
⬜: 월석 벽
🟫: 벽
🟠: 주황 포탈
🔵: 파랑 포탈
🟥: 큐브 버튼
🟩: 활성화된 큐브 버튼
🔴: 버튼
🟢: 활성화된 버튼
🟦: 큐브
🔷: 포탈 청소 필드
🧑: 플레이어
🔒: 문
🔓: 열린 문
🟪: 독극물
✅: 도착 지점`
)
return await getWorld()
}
const splited = content.split('\n')
if (splited.length !== height) {
message.channel.send(
'월드가 설정된 값 보다 작거나 큽니다! 수정 후 다시 보내주세요.'
)
return await getWorld()
}
for (let l = 0; l < splited.length; l++) {
const propLine = splited[l].split(',')
if (propLine.length !== width) {
message.channel.send(
'월드가 설정된 값 보다 작거나 큽니다! 수정 후 다시 보내주세요.'
)
return await getWorld()
}
const row = []
for (let i = 0; i < propLine.length; i++) {
const prop = propLine[i].trim()
const temp = PropEmoji.findIndex((emoji) => emoji === prop.trim())
if (temp === -1) {
message.channel.send(
'프롭 리스트에 없는 프롭이 감지되었습니다! 수정 후 다시 보내주세요.'
)
return await getWorld()
}
const propType: PropTypes = temp
if (AllButtonProps.includes(propType)) {
props.push({
type: 'button',
activate: null,
activated: ActivatedButtonProps.includes(propType),
cube: CubeButtonProps.includes(propType),
position: {
x: i,
y: l
}
})
row.push(PropTypes.NONE)
} else if (AllDoorProps.includes(propType)) {
props.push({
type: 'door',
activated: propType === PropTypes.OPENED_DOOR,
position: {
x: i,
y: l
}
})
row.push(PropTypes.NONE)
} else if (propType === PropTypes.BLUE_PORTAL) {
if (portals.blue !== null) {
message.channel.send(
'주황 포탈이 여러 개 발견되었습니다! 수정 후 다시 보내주세요.'
)
return await getWorld()
} else {
portals.blue = {
x: i,
y: l
}
}
row.push(PropTypes.NONE)
} else if (propType === PropTypes.ORANGE_PORTAL) {
if (portals.orange !== null) {
message.channel.send(
'주황 포탈이 여러 개 발견되었습니다! 수정 후 다시 보내주세요.'
)
return await getWorld()
} else {
portals.orange = {
x: i,
y: l
}
}
row.push(PropTypes.NONE)
} else if (propType === PropTypes.PLAYER) {
playerPosition = {
x: i,
y: l
}
row.push(PropTypes.NONE)
} else {
row.push(propType)
}
if (propType === PropTypes.GOAL) {
goalExist = true
}
}
customWorld.push(row)
}
if (playerPosition === null) {
message.channel.send(
'플레이어가 월드에 없습니다! 수정 후 다시 보내주세요.'
)
return await getWorld()
}
if (!goalExist) {
message.channel.send(
'플레이어가 월드에 없습니다! 수정 후 다시 보내주세요.'
)
return await getWorld()
}
return {
world: {
size: {
width,
height
},
field: customWorld
},
props,
portals,
playerPosition
}
}
const testWorld = await getWorld()
if (testWorld === undefined) {
return
}
const { world, props, portals, playerPosition } = testWorld
message.channel.send('월드 인식이 끝났습니다!')
if (props.length !== 0) {
message.channel.send('버튼 설정이 남아있군요. 한번 설정해볼까요?')
const buttonSetting = async (
prop: Button
): Promise<Button | undefined> => {
let loop = true
while (loop) {
const target =
prop.activate === null
? null
: props.find(
(p) =>
p.position.x === prop.activate?.x &&
p.position.y === prop.activate?.y
)
message.channel.send(
`버튼 (${prop.position.x}, ${prop.position.y})
1. 활성화 하는 것: ${
target === null
? '없음'
: target === undefined
? '찾을 수 없음'
: `${translatePerType[target.type]} (${target.position.x}, ${
target.position.y
})`
}
2. 활성화 여부: ${prop.activated ? 'O' : 'X'}
3. 큐브 버튼 여부: ${prop.cube ? 'O' : 'X'}
4. 타이머(ms): ${prop.timer !== undefined ? prop.timer : '영구적'}
5. 위치: (${prop.position.x}, ${prop.position.y})`
)
message.channel.send(
'변경하고 싶은 설정의 번호를 골라 입력해주세요.\n변경이 끝났다면, `s` 또는 `save`를 입력해주세요.'
)
const propSetting = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
3600000
)
if (propSetting === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
const readSettings = async (): Promise<boolean | undefined> => {
switch (propSetting.content.toLowerCase()) {
case '1': {
message.channel.send(
'활성화 할 것의 좌표를 `x, y` 형식으로 입력해주세요.'
)
const setting = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
3600000
)
if (setting === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
const [x, y] = setting.content
.split(',')
.map((s) => parseInt(s))
if (isNaN(x) || isNaN(y)) {
message.channel.send(
'숫자가 아닌것이 왔어요! 다시 입력해주세요.'
)
return await readSettings()
}
if (
props.find(
(p) => p.position.x === x && p.position.y === y
) === undefined
) {
message.channel.send(
'활성화가 가능한 프롭이 아니에요! 다시 입력해주세요.'
)
return await readSettings()
}
prop.activate = {
x,
y
}
return false
}
case '2': {
message.channel.send(
'활성화 여부를 `O` 또는 `X`로 입력해주세요.'
)
const setting = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
3600000
)
if (setting === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
if (!['o', 'x'].includes(setting.content.toLowerCase())) {
message.channel.send(
'잘못된 값이 들어왔어요! 다시 입력해주세요.'
)
return await readSettings()
}
prop.activated = setting.content.toLowerCase() === 'o'
return false
}
case '3': {
message.channel.send(
'큐브 버튼 여부를 `O` 또는 `X`로 입력해주세요.'
)
const setting = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
3600000
)
if (setting === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
if (!['o', 'x'].includes(setting.content.toLowerCase())) {
message.channel.send(
'잘못된 값이 들어왔어요! 다시 입력해주세요.'
)
return await readSettings()
}
prop.cube = setting.content.toLowerCase() === 'o'
return false
}
case '4': {
message.channel.send('초를 숫자로만 ms 기준으로 적어주세요.')
const setting = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
3600000
)
if (setting === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
const time = parseInt(setting.content)
if (isNaN(time)) {
message.channel.send(
'숫자가 아닌것이 왔어요! 다시 입력해주세요.'
)
return await readSettings()
}
prop.timer = time
return false
}
case '5': {
message.channel.send('좌표를 `x, y` 형식으로 입력해주세요.')
const setting = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
3600000
)
if (setting === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
const [x, y] = setting.content
.split(',')
.map((s) => parseInt(s))
if (isNaN(x) || isNaN(y)) {
message.channel.send(
'숫자가 아닌것이 왔어요! 다시 입력해주세요.'
)
return await readSettings()
}
if (
world.field[y] === undefined ||
world.field[y][x] !== PropTypes.NONE ||
props.find(
(p) => p.position.x === x && p.position.y === y
) !== undefined
) {
message.channel.send(
'설치가 가능한 좌표가 아니에요! 다시 입력해주세요.'
)
return await readSettings()
}
prop.position = {
x,
y
}
return false
}
case 's':
case 'save': {
message.channel.send('저장 중...')
return true
}
default: {
message.channel.send(
'잘못된 값이 들어왔어요! 다시 입력해주세요.'
)
return false
}
}
}
const result = await readSettings()
if (result === undefined) {
loop = false
return
} else {
loop = !result
}
}
return prop
}
for (let i = 0; i < props.length; i++) {
const prop = props[i]
if (prop.type !== 'button') continue
const result = await buttonSetting(prop)
if (result === undefined) return
props[i] = result
}
}
message.channel.send('이제 거의 다 끝났습니다!')
const getPlayerPortalgun = async (): Promise<
PortalgunStates | undefined
> => {
message.channel.send(
'플레이어의 포탈건 모드를 숫자로 입력해주세요.\n\n1. 없음\n2. 파란 포탈만\n3. 주황 포탈만\n4. 모두'
)
const positionSetting = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
3600000
)
if (positionSetting === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
const mode = parseInt(positionSetting.content)
if (isNaN(mode) || mode < 1 || mode > 4) {
message.channel.send('잘못된 값이 왔어요! 다시 입력해주세요.')
return await getPlayerPortalgun()
}
return mode - 1
}
const portalgunMode = await getPlayerPortalgun()
if (portalgunMode === undefined) return
message.channel.send('마지막 단계입니다!')
const getPlayerHealth = async (): Promise<number | undefined> => {
message.channel.send('플레이어의 체력을 숫자로만 입력해주세요.')
const positionSetting = await getMessageInPromise(
this,
(message) => message.author.id === user.id,
3600000
)
if (positionSetting === undefined) {
message.channel.send(
'시간 초과! 다시 생성 하고 싶으시다면 다시 명령해주십시오.'
)
return
}
const health = parseInt(positionSetting.content)
if (isNaN(health)) {
message.channel.send('잘못된 값이 왔어요! 다시 입력해주세요.')
return await getPlayerHealth()
}
return health
}
const playerHealth = await getPlayerHealth()
if (playerHealth === undefined) return
message.channel.send('저장 중...')
const lastSavedID = worldDatas.lastSavedID
worldDatas.worlds[(lastSavedID + 1).toString()] = {
world,
portals,
props,
player: {
position: playerPosition,
portalgun: portalgunMode,
health: playerHealth,
holding: false
},
id: lastSavedID + 1
}
worldDatas.lastSavedID = lastSavedID + 1
const encoder = new TextEncoder()
Deno.writeFileSync(
'../maps.json',
encoder.encode(JSON.stringify(worldDatas, null, 2))
)
message.channel.send(`완료! 월드 ID: ${lastSavedID + 1}`)
} else if (message.content === '!help') {
message.channel.send(
new Embed({
title: 'Portal: The Discord Version 도움말',
fields: [
{
name: '!play <맵 ID>',
value: '<맵 ID>을 플레이 하는 커맨드'
},
{
name: '!make',
value: '맵 만드는 커맨드'
}
]
})
)
}
}
async messageReactionAdd(reaction: MessageReaction, user: User) {
if (
user.bot ||
!(user.id in GamePerUser) ||
GamePerUser[user.id].messageID === reaction.message.id
) {
return
}
const game = GamePerUser[user.id]
if (reaction.emoji.name === '🛑') {
game.close()
delete GamePerUser[user.id]
reaction.message.delete()
reaction.message.channel.send('게임이 끝났습니다.')
return
}
if (reaction.emoji.name === '🔘' && !game.player.holding) {
game.toggleUse()
if (game.player.holding) {
reaction.message.removeReaction(reaction.emoji.name)
reaction.message.addReaction('🔻')
}
reaction.message.removeReaction(reaction.emoji.name, user)
const embed = embedMaker(game)
reaction.message.edit(embed)
return
}
if (reaction.emoji.name === '🔻' && game.player.holding) {
game.waitingForDropDirection = true
return
}
if (
reaction.emoji.name === BluePortalEmoji &&
!game.waitingForBluePortalDirection
) {
if (game.waitingForOrangePortalDirection) {
game.waitingForOrangePortalDirection = false
reaction.message.removeReaction(reaction.emoji.name, user)
}
game.waitingForBluePortalDirection = true
return
}
if (
reaction.emoji.name === OrangePortalEmoji &&
!game.waitingForOrangePortalDirection
) {
if (game.waitingForBluePortalDirection) {
game.waitingForBluePortalDirection = false
reaction.message.removeReaction(reaction.emoji.name, user)
}
game.waitingForOrangePortalDirection = true
return
}
if (
game.waitingForDropDirection &&
MovementEmojis.includes(reaction.emoji.name)
) {
let direction: Directions
switch (reaction.emoji.name) {
case '⬆': {
reaction.message.removeReaction(reaction.emoji, user)
direction = Directions.UP
break
}
case '⬇': {
reaction.message.removeReaction(reaction.emoji, user)
direction = Directions.DOWN
break
}
case '⬅': {
reaction.message.removeReaction(reaction.emoji, user)
direction = Directions.LEFT
break
}
case '➡': {
reaction.message.removeReaction(reaction.emoji, user)
direction = Directions.RIGHT
break
}
default:
return
}
game.drop(direction)
if (!game.player.holding) {
reaction.message.removeReaction('🔻', user)
reaction.message.removeReaction('🔻')
reaction.message.addReaction('🔘')
}
const embed = embedMaker(game)
reaction.message.edit(embed)
return
}
if (
(game.waitingForBluePortalDirection ||
game.waitingForOrangePortalDirection) &&
MovementEmojis.includes(reaction.emoji.name)
) {
let direction: Directions
switch (reaction.emoji.name) {
case '⬆': {
reaction.message.removeReaction(reaction.emoji, user)
direction = Directions.UP
break
}
case '⬇': {
reaction.message.removeReaction(reaction.emoji, user)
direction = Directions.DOWN
break
}
case '⬅': {
reaction.message.removeReaction(reaction.emoji, user)
direction = Directions.LEFT
break
}
case '➡': {
reaction.message.removeReaction(reaction.emoji, user)
direction = Directions.RIGHT
break
}
default:
return
}
if (game.waitingForBluePortalDirection) {
reaction.message.removeReaction(BluePortalEmoji, user)
} else if (game.waitingForOrangePortalDirection) {
reaction.message.removeReaction(OrangePortalEmoji, user)
}
game.shootPortal(direction)
const embed = embedMaker(game)
reaction.message.edit(embed)
return
}
if (MovementEmojis.includes(reaction.emoji.name)) {
switch (reaction.emoji.name) {
case '⬆': {
reaction.message.removeReaction(reaction.emoji, user)
game.goTo(Directions.UP)
break
}
case '⬇': {
reaction.message.removeReaction(reaction.emoji, user)
game.goTo(Directions.DOWN)
break
}
case '⬅': {
reaction.message.removeReaction(reaction.emoji, user)
game.goTo(Directions.LEFT)
break
}
case '➡': {
reaction.message.removeReaction(reaction.emoji, user)
game.goTo(Directions.RIGHT)
break
}
default:
return
}
const embed = embedMaker(game)
reaction.message.edit(embed)
}
}
async messageReactionRemove(reaction: MessageReaction, user: User) {
if (
user.bot ||
!(user.id in GamePerUser) ||
GamePerUser[user.id].messageID === reaction.message.id
) {
return
}
const game = GamePerUser[user.id]
if (
reaction.emoji.name === BluePortalEmoji &&
game.waitingForBluePortalDirection
) {
game.waitingForBluePortalDirection = false
return
}
if (
reaction.emoji.name === OrangePortalEmoji &&
game.waitingForOrangePortalDirection
) {
game.waitingForOrangePortalDirection = false
return
}
if (reaction.emoji.name === '🔻' && game.waitingForDropDirection) {
game.waitingForDropDirection = false
return
}
}
}
const bot = new Portal()
bot.connect(DISCORD_TOKEN, Intents.None)
|
Deletion of Indian hedgehog gene causes dominant semi-lethal Creeper trait in chicken
The Creeper trait, a classical monogenic phenotype of chicken, is controlled by a dominant semi-lethal gene. This trait has been widely cited in the genetics and molecular biology textbooks for illustrating autosomal dominant semi-lethal inheritance over decades. However, the genetic basis of the Creeper trait remains unknown. Here we have utilized ultra-deep sequencing and extensive analysis for targeting causative mutation controlling the Creeper trait. Our results indicated that the deletion of Indian hedgehog (IHH) gene was only found in the whole-genome sequencing data of lethal embryos and Creeper chickens. Large scale segregation analysis demonstrated that the deletion of IHH was fully linked with early embryonic death and the Creeper trait. Expression analysis showed a much lower expression of IHH in Creeper than wild-type chickens. We therefore suggest the deletion of IHH to be the causative mutation for the Creeper trait in chicken. Our findings unravel the genetic basis of the longstanding Creeper phenotype mystery in chicken as the same gene also underlies bone dysplasia in human and mouse, and thus highlight the significance of IHH in animal development and human haploinsufficiency disorders.
. Summary of embryonic mortality during the entire period of incubation. 1 Fertile eggs were determined using candling method. 2 Early embryonic death was measured by candling at E4. 3 Total embryonic death was determined by counting all dead embryos during the incubation period.
shorter/smaller than those of wild-type chickens (Fig. 1c-e and Supplementary Fig. S1), while body weight of the Creeper chickens was also lower than that of wild-type chickens from postnatal to adult stages ( Supplementary Fig. S1). The observed phenotypes of Xingyi bantam and segregation analysis results in the present study were consistent with previous experiments 7, 17 . These findings further confirmed that Xingyi bantam breed carries the Cp gene.
Deletion of IHH only exists in the Creeper chickens.
In order to decipher the genetic basis of the semi-lethal Creeper trait in chicken, we constructed a segregation population from the pedigreed Creeper chicken mating (Cp/+ × Cp/+ ) and performed whole-genome sequencing of 6 pairs of full-sibs (6 Creeper chickens, Cp/+ ; 6 wild-type chickens, + /+ ) (Fig. 2). Average sequence coverage of 12-16X for all 12 individuals was obtained (Supplementary Table S2).
To identify the causative mutations responsible for the semi-lethal Creeper trait, we used a set of algorithms to detect various structural variations (SVs; including SNP, indel, rearrangement, translocation, inversion, tandem duplication, copy number variation) from the whole-genome sequencing data and directly probed the causative mutation from the SVs (Fig. 2). As the Cp and Rose-comb genes are closely linked 14 , we therefore prioritized the SVs on chicken chromosome 7 for investigation. We did not find any group-specific rearrangement, translocation, inversion, tandem duplication, medium-size indel or copy number variation (Supplementary Table S4-S6). There are six SNPs, five small indels and one large deletion which were solely presented in all Creeper individuals (Supplementary Table S3-S6). Among all the variations, only a 11,896 bp large deletion region (chr7: 21,798,705-21,810,600) covering the entire Indian hedgehog (IHH) gene (Fig. 3a) was top-ranked in terms of the genetic effects, chromosome position, and mutation type in our analysis pipeline (Fig. 2). Read depth was significantly lower in the deletion region than in both sides of the deletion region in the Creeper chickens, and the read depth in the deletion region of the Creeper chickens was almost half of that in the wild-type chickens (Fig. 3a). All 6 Creeper chickens were shown to carry the same IHH deletion. The IHH gene is adjacent to the MNR2 gene which is a causative gene for Rose-comb mutation in the chicken 14 (Fig. 3a). Our results strongly suggest that the deletion of IHH is the causative mutation for the semi-lethal Creeper trait. Six pairs of full-sibs were obtained for wholegenome sequencing. Several current known major mutations were detected using various algorithms/software. Once the mutations were obtained, the mutations were assessed and ranked from high to low by their potential genetic effects.
Scientific RepoRts | 6:30172 | DOI: 10.1038/srep30172 Deletion of IHH is completely associated with the semi-lethal Creeper trait. The breakpoint of the deletion region was confirmed by a diagnostic PCR test using a forward primer in the upstream and a reverse primer in the downstream of the deletion region (Fig. 4a). An expected, 224 bp PCR product was obtained from the lethal embryos ( Fig. 4b and Supplementary Fig. S2). In comparison, an amplicon positioned within the deletion region yielded the expected 438 bp product in the wild-type chickens ( Fig. 4b and Supplementary Fig. S2). PCR products from the heterozygotes had both 224 bp and 438 bp bands. PCR products of lethal and wild-type embryos were further confirmed by Sanger sequencing (Supplementary Fig. S2). It is clear that three different genotypic individuals can be clearly classified by the diagnostic PCR test for further analysis (Fig. 4b).
Copy number of IHH gene in three genotypic individuals was examined by SYBR Green qPCR analysis with genomic DNA as the template. The results showed 0:1:2 ratio for lethal embryos (Cp/Cp), Creeper (Cp/+ ) and wild-type (+ /+ ) chickens, respectively (Fig. 5). To further explore the IHH deletion in a wide range of the Creeper populations by the diagnostic PCR test, a large segregated population from a mating of Creepers (Cp/+ × Cp/+ ) was constructed to test the association of genotypes with phenotypes. In total, 511 samples (embryos, n = 130; chickens, n = 381) of Creeper progeny were collected for association testing. Our result suggested that the complete association between the IHH deletion and the Creeper phenotype was observed in all the tested samples (p > 0.05, Table 2). The results of the complete association between deletion of IHH and the Creeper phenotype further suggest that this deletion is the causative mutation for the semi-lethal Creeper trait in chicken. Chi-square test χ 2 = 0.0509, p = 0.8215 Table 2. Diagnostic genotyping of individuals from the Creeper intercross population. 1 N: the number of embryos and chickens.
We genotyped all embryos found to be dead at E4 by the diagnostic PCR test and randomly chose 6 homogenous Cp/Cp samples for further whole genome sequencing (sequence coverage: 15-18X), as shown in Supplementary Table S2. No sequencing read was found in the deletion region (Fig. 3a), confirming the complete deletion of IHH in the early lethal embryos. The ratio of normalized read number of the three genotypes was close to 0:1:2 for lethal embryos, Creeper and wild-type phenotypes, respectively (Fig. 3b). Taken together, these lines of evidence allowed us to further conclude that a complete deletion of IHH causes a fully penetrant and dominant inheritance of the Creeper trait in chicken.
Expression analysis reveals decreased IHH quantity responsible for the Creeper trait in chicken.
To better understand the molecular mechanism underlying the semi-lethal Creeper trait in chicken, we investigated the expression pattern of IHH in early embryos and tibial cartilages from three genotypic individuals. E4 embryos were clearly divided into three genotypes using the diagnostic PCR test, then randomly chosen to perform expression analysis. Our qPCR analysis indicated that IHH was expressed at much lower levels at different embryonic developmental stages in the Creeper than in the wild-type chickens (Fig. 6a). Western blot analyses also detected lower IHH levels in tibial cartilages from the Creeper as compared to wild-type chickens (Fig. 6b-e). Our results showed that this deletion resulted in quantitative reduction of IHH expression, suggesting that decreased expression of IHH was incapable of providing sufficient protein product to maintain normal function and thus led to the Creeper trait in the heterozygotes while dominant homozygotes were lethal owing to loss of the whole gene product.
Discussion
The Creeper trait is a well-known monogenic phenotype in chicken following Mendelian autosomal dominant inheritance 7 . However, the molecular basis of this trait remains poorly understood. It will be of great importance to identify genes and/or causative mutations affecting the Creeper trait, understand the biological and medical significance of the genes, and determine the gene regulatory mechanisms underlying this trait. In the present study, we demonstrate that deletion of IHH is responsible for the Creeper trait and associated early embryo death in chicken.
By extensive bioinformatics analyses, we identify that a large deletion region ranging from 21,798,705 to 21,810,600 on GGA7 harbors the entire IHH gene in the Creeper chickens. IHH is a member of the hedgehog family, which is a conserved signaling family in vertebrates and some invertebrates 18 (Fig. 3c). In higher vertebrates, there are at least three highly similar hedgehog genes including Sonic hedgehog (SHH), Desert hedgehog (DHH), and IHH 19,20 . IHH is mainly expressed in the developing cartilage elements, indicating that it plays pivotal roles in regulating numerous developmental processes of bone formation 21 . It has been suggested that IHH is essential for endochondral bone formation and coordinates the proliferation and differentiation of chondrocytes, and osteoblast differentiation . Single-point mutations in the IHH gene can cause the brachydactyly type A-1(BDA-1) with shortening or missing of middle phalanges 26,27 and the severe skeletal dysplasia named acrocapitofemoral dysplasia (ACFD) in humans 28 . Genetic studies also demonstrated that deletion of one amino acid in IHH resulted in mild BDA-1 in a small Dutch family 29 . In the mouse model, most knock-out mouse embryos (IHH −/− ) die before birth while the heterozygotes (IHH +/− ) survive but exhibit foreshortened forelimbs and unsegmented or uncalcified digits after birth 30 , which is highly similar to what we observed in the Creeper chickens. As one of the key genes driving animal body development, IHH is conserved in gene function and signaling pathway in the major animal clades 25 and is required for embryonic bone formation in development 31 . Moreover, we also found that the deletion of IHH is closely associated with the Creeper trait in chicken following the complete Mendelian segregation. These pieces of evidence strongly suggest that IHH is a causative gene responsible for the Creeper trait.
Although IHH plays significant roles in bone formation, skeletal morphogenesis, and gut development, and mutations in IHH cause abnormal digital development and morphogenesis problem in human and mouse 30 , little is known about the effect of IHH deletion in chicken cartilages. In the present study, we demonstrated that decreased expression of IHH in the Creeper chicken cartilages affected bone development. Effects of allele deletion in the heterozygous progeny cannot be masked by one wild-type allele in this case. It has been widely accepted that haploinsufficiency is the genetic mechanism for loss-of-function mutations in most autosomal dominant disorders, which is discovered in all eukaryotes from yeast to humans 32 . Genetically, haploinsufficiency refers to a dominant phenotype in diploid organisms which are heterozygous for the deletion of one functional gene copy, leading to an abnormal phenotype or disease states 33 . In humans, several reported disorders are caused by haploinsufficiency mutations, such as autoimmune lymphoproliferative syndrome 34 , immune dysregulation 35 , cognitive abnormality 36 , and even enhanced cancer susceptibility 37,38 . Previous studies have also shown the effect of haploinsufficiency mutations for abnormal phenotypes in model organisms. In Drosophila, the Minute mutations causing numerous developmental abnormalities are good examples to interpret the effects of ribosomal haploinsufficiency in multicellular eukaryotes 39,40 . A study on zebrafish demonstrated that 11 ribosomal genes haploinsufficiency mutations resulted in increased susceptibility to tumor formation 41 . Therefore we assume that IHH haploinsufficiency is insufficient to provide enough gene product to sustain normal function, thus resulted in the Creeper trait in the heterozygotes while dominant homogenous embryos were lethal during the early stage of embryonic development in chicken. Further experimental studies on heterozygotes are necessary to determine that the Creeper trait is the haploinsufficiency disorder in large populations.
Autosomal dominant disorders are detrimental and rare in domestic animals. Examples of such diseases are wattles in swine 42 ; epidermolysis bullosa in Danish Hereford calves 43 ; congenital myotonia in goat 44 ; hyperkalemic periodic paralysis in horse 45 ; autosomal dominant progressive retinal atrophies in Bullmastiff and English mastiff dogs 46,47 ; and collagen dysplasia in cat 48 . Here we add our Creeper phenotype to this list and provide another important case for illustrating autosomal dominant inheritance in animal genetics. It is considered that chickens have been used as a good and traditional model for studying embryonic vertebrate development as their embryos can be easily manipulated in vitro 49 . Using the Creeper chicken as a model, the expression of IHH and the status of bone formation can be dynamically monitored. The Creeper chicken thus provides another significant feature to serve as an attractive and unique model for studying IHH function and haploinsufficiency disorders.
To the best of our knowledge, this is the first report answering the longstanding riddle about the Creeper trait in animal genetics. Our data strongly suggest that the deletion of IHH is the causative mutation for the Creeper trait and associated early embryo death in chicken. This study will not only highlight the biological role of IHH in animal development, but also shed light on why chicken provides a valuable and unique model to examine the genetic basis and biological processes that likely underpin phenotypic mutations in humans and other species. Fig. 1c-e) was performed using DicomPACS Digital X-ray System and Imaging Solutions (SEDECAL, Madrid, Spain).
Materials and Methods
We constructed a cross between Creeper chickens to generate different phenotypic progenies for association testing, incubation experiments, and tissue sampling (Supplementary Table S1). We conducted two trials for phenotyping and DNA/RNA sample collection. All chicks were pedigreed according to intercross information. In the incubation experiment (first trial), we incubated fertile eggs from the Creeper population and a control cross between Creeper and wild-type birds (Table 1 and Supplementary Table S1). In the second trial, we collected all 511 fertilized eggs with viable embryos from the Creeper cross for association study (Table 2). DNA was extracted from embryos at different incubation periods. We did three manual candlings to identify early death embryos at E4, mid-term death at E10 and late death at E19. We measured shank length using a digital vernier caliper at every two weeks from hatch to 20 weeks of age. Creeper chickens can be clearly classified by shank length at day of hatch. We chose chicks from full-sibs with different phenotypes (Creeper and wild-type chickens). Twelve birds derived from a mating of Creeper chickens, including 6 Creeper chickens (Cp/+ ) and 6 wild-type chickens (+ /+ ), were chosen for whole-genome sequencing, as phenotyped by shank length at 4 weeks of age (Creeper chicken: < 4.08 cm (mean-SD); wild-type chicken: > 5.18 cm (mean + SD)). This design decreased the noise of genetic background, which may complicate the following analyses. In total, 3 pairs of full-sib females and 3 pairs of full-sib males from 4 sire families were chosen for whole-genome sequencing.
Blood samples were collected from the wing vein and stored in acid citrate dextrose (ACD) anticoagulant at − 20 °C prior to DNA extraction. Genomic DNA from whole blood was extracted by standard phenol-chloroform methods. Embryonic DNA was extracted from whole embryos using PureLink ® Genomic DNA Kits (Invitrogen).
Whole-genome library construction and sequencing. Paired-end libraries with average insert size of approximately 500 bp were constructed for each sample according to the manufacturer's instructions (Illumina, San Diego, CA). Library quality and concentration were determined using an Agilent 2100 Bioanalyzer (Agilent Technologies, Palo Alto, CA) and Qubit 3.0 Fluorometer (Life Technologies, CA, USA). These libraries were subjected to 2 × 100 bp paired-end (PE100) sequencing on a HiSeq2000 instrument (Illumina). A standard Illumina base-calling pipeline was used to process the raw fluorescent images and the called sequences. Read quality was evaluated using the FastQC package (www.bioinformatics.babraham.ac.uk/projects/fastqc/). For genome re-sequencing data, short-reads were trimmed 15 bp from the 3′ -end according to the base quality distributions. The raw sequencing data reported in this paper have been publicly deposited in the NCBI Short Reads Archive (SRA) with accession number SRP047477.
Whole-genome sequencing data analysis. Paired-end short reads were aligned to the Gallus gallus reference genome (Galgal 4) using the Burrows-Wheeler Aligner (BWA, version 0.6.2) algorithm with default parameters 50 . SAMTools (version 0.1.19) 51 was used to remove duplicate reads that might have been caused by PCR. To improve the accuracy of reads alignment, aligned reads were realigned at putative SNPs and indel positions using the Genome Analysis Toolkit (GATK, version 2.5.2) realigner algorithm 52 . Base quality scores were recalibrated using the GATK recalibration algorithm. The options used for SNP and indel calling were a minimum 5-read mapping depth and mapping quality of 20. As indels ranged from 1 to 1000 bp, we used three different algorithms to search for indels. The GATK software is suitable for detecting small indels. Mate-Clever is more suitable to probe the medium-size indels from the resequencing data than GATK 53 and was thus used to find the medium-size indels. We used Pindel (version 0.2.4t) 54 to detect large structural variations (SVs). Copy number variation (CNV) was called using the CNVnator (version 0.2.7) 55 . Window bin-size was set to 100 bp with the GC-content adjustment, and the mean-shift algorithm was used to infer the CNV with the p < 0.01. Once we obtained all the SVs from the different algorithms/software, we used the VCFTools (version 0.12b) 56 to extract the common variations for Creeper or wild-type group. SV was filtered by 20 reads for large indels and 8 reads for small SVs. The final SV supported in all 6 individuals of each group was considered to be the potential causative mutations for the Creeper trait. Genetic effects of the SV was assessed by their variation type and marked as HIGH, MODERATE and LOW effects for each SV suggested by SnpEff (version 3.4) 57 . In general, large chromosomal deletions, exon deletions, frame shifts and lost/gained stops were thought to have strong genetic effects on phenotypes. Among these variations with potentially strong genetic effects, large deletion/insertion was ranked on the top. In addition, SV on chromosome 7 was set to first-class for further genetic analysis, as previous studies showed that structural rearrangement of MNR2 gene causing Rose-comb in chicken is closely linked with the semi-lethal Creeper trait (Fig. 3a).
Diagnostic genotyping and Sanger sequencing validation. The delF and delR primers located upstream and downstream of the deleted region were utilized to amplify 224 bp fragment, while IHH-F and IHH-R primers annealing to IHH were used to produce 438 bp fragment. Two pairs of primers were designed to amplify a series of specific bands (Supplementary Table S7). The PCR reactions were carried out in a reaction volume of 20.0 μ l containing 100 ng genomic DNA, 2.0 μ l of 10 × Taq polymerase buffer, 400 μ M dNTPs, 1.5 U Taq DNA polymerase (Tiangen Biotech, Beijing, China), 2.0 μ M of each delF and delR, and 4.0 μ M of each IHH-F and IHH-R primer. The diagnostic PCR protocols included 94 °C for 5 min, 35 cycles of 94 °C for 30 s, 57 °C for 30 s, 72 °C for 35 s, and a final extension at 72 °C for 10 min. The PCR products were separated by 2.0% agarose gel electrophoresis. Sanger sequencing validation was performed for two different PCR products, which were analyzed using BLAT (http://genome.ucsc.edu/cgi-bin/hgBlat) tools to cross-validate the expected sequences and reference sequences.
Quantitative PCR confirmation of IHH deletion. A total of 48 DNA samples, including 16 lethal embryos, 16 Creeper and 16 wild-type birds, were examined using qPCR to quantify DNA in the three different genotypes. All lethal embryos were further validated to be homozygotes of Cp/Cp by the diagnostic PCR test. PCR primers were designed using the Primer3web software (http://primer3.ut.ee/) and UCSC In-Silico PCR to examine the specificity and sensitivity (http://genome.ucsc.edu/). The single-copy PCCA gene, previously validated as a non-CNV locus, was selected as an internal control 58 . The relative copy number was calculated using the 2 −ΔCt method. mRNA expression analysis by qPCR. Tibial cartilages were taken from heterozygous (Cp/+ ) and wild-type (+ /+ ) male birds at day of hatch (D1, N = 8) and day 84 post-hatch (D84, N = 8). Early lethal embryos were collected and validated by genotyping at the fourth day of incubation (E4, N = 8). All the tissue samples and embryos were stored in RNAlater (Ambion, Austin, USA). Total RNA was extracted using RNA Mini kit (Life Technologies, Carlsbad, USA) and purified by RNeasy Mini Kit (Qiagen, Hilden, Germany). Briefly, 1 μ g total RNA was used for first strand cDNA synthesis using TransScript gDNA Remover RT kit (TransGen, Beijing, China) according to the manufacturer's protocols. All primers spanning at least one intron were designed for qPCR by Primer Premier 5.0 software (Premier Biosoft, Canada). The primer sequences are listed in Supplementary Table S7. All samples were run in triplicate using cDNA for qPCR using an ABI Prism 7500 instrument (Applied Biosystems, Carlsbad, CA). The expression data were normalized using GAPDH as an endogenous reference gene and calculated using the 2 −ΔΔCt method.
Western blot. Tibial cartilages were obtained from Creeper and wild-type male chickens at D1 and D84 (N = 3 for each group at each developmental stage), respectively. Total proteins were extracted using RIPA buffer (Beyotime, Nanjing, China) following the manufacturer's protocols. Total protein concentrations were measured using the BCA protein kit assay (Sigma-Aldrich, St. Louis, USA). Protein samples were separated by 12.0% SDS-PAGE and electro-transferred to PVDF membrane (Millipore, Billerica, USA). The membranes were blocked for 1 h at room temperature and incubated overnight at 4 °C with rabbit anti-IHH (1:1000, Novus Biologicals, Littleton, USA) and rabbit anti-β -actin (1:1000, Cell Signaling Technology, Beverly, USA), followed by HRP-conjugated anti-rabbit IgG (1:1000, Novus Biologicals, Littleton, USA) for 1 h. The enhanced chemiluminescence detection kit (Beyotime, Nanjing, China) was used to visualize the immunoreactive proteins. Quantification and data analysis were conducted using Image J software 59 .
Statistical analyses. Statistical analyses were conducted under the R computation environment (www.r-project.org). The two-tailed student's t-test was used to compare the mRNA expression level and DNA quantification between samples. Segregation ratios obtained from different matings were analyzed by Chi-square tests. Data were expressed as mean ± SD. Differences were considered to be statistically significant at p value < 0.05. |
Thanks to Target for sponsoring this post!
While Disney films have brought us songs, tears, and cheer, they’ve also brought us groan-inducing chuckles in the form of a good (or bad) pun from time to time. Here are some of our favorites!
The Lion King
It’s our problem-free, pun philosophy.
Beauty and the Beast
This subtle reference was a real work of art and got our gears turning.
Winnie the Pooh
Rabbit: “Tie them together Piglet can you tie a knot?”
Piglet: “I cannot.”
Rabbit: “Uh, so you can knot?”
Piglet: “No, I cannot knot.”
Adorable.
Frozen
We’ve all been there. Case in point: After a full day of walking at the Disney Parks.
Finding Nemo
Marlin: “There was a mollusk and a sea cucumber. The sea cucumber walks over to the mollusk and says ‘with fronds like these, who needs anemones?’”
Dad jokes … are we right?
Aladdin
Genie: “Well I feel sheepish! Okay you baaa’d boy, but no more freebies.”
Since this moment occurred after Genie realized Aladdin had tricked him, we guess you could say Aladdin pulled the wool over his eyes.
“LAVA”
Singer: I wish that the earth, sea, the sky up above
will send me someone to lava.
I lava you.
Be right back, overflowing with feels.
Hercules
“Zero to Hero”: “From appearance fees and royalties, our Herc had cash to burn, now nouveau riche and famous, he could tell what the Grecian “earn”!”
Someone better call IX I I cause this pun is on intellectual fire!
If you love Disney puns, you’re going to love Zootopia (Walt Disney Animation Studios 55th animated feature coming out March 4). It takes place in a world of anthropomorphic animals that enjoy getting rides with Zuber and shopping at Targoat. Speaking of which, be sure to stock up on exclusive Zootopia gear. Only at Target.
Posted 3 years Ago |
package com.x.organization.assemble.control.wrapout;
import java.util.ArrayList;
import java.util.List;
import com.x.base.core.entity.JpaObject;
import com.x.base.core.http.annotation.Wrap;
import com.x.organization.core.entity.Person;
@Wrap(Person.class)
public class WrapOutPerson extends Person {
private static final long serialVersionUID = -8456354949288335211L;
public static List<String> Excludes = new ArrayList<>(JpaObject.FieldsInvisible);
private List<WrapOutIdentity> identityList;
private List<WrapOutCompanyDuty> companyDutyList;
private List<WrapOutDepartmentDuty> departmentDutyList;
private String onlineStatus;
static {
Excludes.add("password");
}
private Long rank;
public Long getRank() {
return rank;
}
public void setRank(Long rank) {
this.rank = rank;
}
public List<WrapOutIdentity> getIdentityList() {
return identityList;
}
public void setIdentityList(List<WrapOutIdentity> identityList) {
this.identityList = identityList;
}
public List<WrapOutCompanyDuty> getCompanyDutyList() {
return companyDutyList;
}
public void setCompanyDutyList(List<WrapOutCompanyDuty> companyDutyList) {
this.companyDutyList = companyDutyList;
}
public List<WrapOutDepartmentDuty> getDepartmentDutyList() {
return departmentDutyList;
}
public void setDepartmentDutyList(List<WrapOutDepartmentDuty> departmentDutyList) {
this.departmentDutyList = departmentDutyList;
}
public String getOnlineStatus() {
return onlineStatus;
}
public void setOnlineStatus(String onlineStatus) {
this.onlineStatus = onlineStatus;
}
}
|
/**
* Creates a new Knight Character and adds it to the Party HashMap, it needs all the
* parameters to create a Knight object.
* @param name
* The name of the Character
* @param healthpoints
* The health point of the Character
* @param attack
* The attack value of the Character
* @param defense
* The defense value of the Character
*/
public void createKnight(@NotNull String name, int healthpoints, int attack, int defense){
if (playerParty.size()<maxPartyNumber){
Knight knight =new Knight(name, turns,healthpoints, attack,
defense);
knight.addListener(handler2);
knight.addListener(handler3);
playerParty.put(name,knight);
livingPlayers+=1;
}
} |
def create_l2_gateway_connection_precommit(self, ctx, gw_connection):
admin_ctx = ctx.elevated()
nsxlib = self._core_plugin.nsxlib
l2gw_id = gw_connection.get(l2gw_const.L2GATEWAY_ID)
devices = self._get_l2_gateway_devices(ctx, l2gw_id)
bep_id = devices[0].get('device_name')
try:
nsxlib.bridge_endpoint_profile.get_id_by_name_or_id(bep_id)
except nsxlib_exc.ManagerError as e:
msg = (_("Error while retrieving bridge endpoint profile "
"%(bep_id)s from NSX backend. Check that the profile "
"exits and there are not multiple profiles with "
"the given name. Exception: %(exc)s") %
{'bep_id': bep_id, 'exc': e})
raise n_exc.InvalidInput(error_message=msg)
interface_name, seg_id = self._get_conn_parameters(
admin_ctx, gw_connection)
try:
endpoints = nsxlib.search_all_resource_by_attributes(
nsxlib.bridge_endpoint.resource_type,
bridge_endpoint_profile_id=bep_id,
vlan_transport_zone_id=interface_name,
vlan=seg_id)
endpoint_map = dict((endpoint['id'],
endpoint['bridge_endpoint_profile_id'])
for endpoint in endpoints)
except nsxlib_exc.ManagerError as e:
msg = (_("Error while retrieving endpoints for bridge endpoint "
"profile %(bep_id)s s from NSX backend. "
"Exception: %(exc)s") % {'bep_id': bep_id, 'exc': e})
raise n_exc.InvalidInput(error_message=msg)
with db_api.CONTEXT_WRITER.using(admin_ctx):
port_filters = {'device_owner': [nsx_constants.BRIDGE_ENDPOINT]}
ports = self._core_plugin.get_ports(
admin_ctx, filters=port_filters)
for port in ports:
device_id = port.get('device_id')
if endpoint_map.get(device_id) == bep_id:
msg = (_("Cannot create multiple connections with the "
"same segmentation id %(seg_id)s for bridge "
"endpoint profile %(bep_id)s") %
{'seg_id': seg_id,
'bep_id': bep_id})
raise n_exc.InvalidInput(error_message=msg) |
/**
* Test should not fail with NPE when getChildDataAndWatchForNewChildren invoked with wrongNode
*/
@Test
@SuppressWarnings("deprecation")
public void testGetChildDataAndWatchForNewChildrenShouldNotThrowNPE() throws Exception {
ZKUtil.getChildDataAndWatchForNewChildren(ZKW, "/wrongNode");
} |
/*
* Copyright (c) 2003, Adam Dunkels.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* This file is part of the Contiki desktop OS.
*
*
*/
#include "program-handler.h"
#include "contiki-net.h"
#include "lib/petsciiconv.h"
#include "shell.h"
#include "telnetd.h"
#include <string.h>
#define ISO_nl 0x0a
#define ISO_cr 0x0d
#define XSIZE 78
#define YSIZE 30
static struct ctk_window window;
static char log[XSIZE * YSIZE];
static struct ctk_label loglabel =
{CTK_LABEL(0, 0, XSIZE, YSIZE, log)};
/*-----------------------------------------------------------------------------------*/
void
telnetd_gui_output(const char *str1, int len1, const char *str2, int len2)
{
static unsigned int i;
for(i = 1; i < YSIZE; ++i) {
memcpy(&log[(i - 1) * XSIZE], &log[i * XSIZE], XSIZE);
}
strncpy(&log[(YSIZE - 1) * XSIZE], str1, XSIZE);
if(len1 < XSIZE) {
strncpy(&log[(YSIZE - 1) * XSIZE] + len1, str2, XSIZE - len1);
if(len1 + len2 < XSIZE) {
log[(YSIZE - 1) * XSIZE + len1 + len2] = 0;
}
}
CTK_WIDGET_REDRAW(&loglabel);
}
/*-----------------------------------------------------------------------------------*/
void
telnetd_gui_quit(void)
{
ctk_window_close(&window);
}
/*-----------------------------------------------------------------------------------*/
void
telnetd_gui_init(void)
{
shell_file_init();
shell_ps_init();
shell_run_init();
shell_text_init();
shell_time_init();
shell_wget_init();
ctk_window_new(&window, XSIZE, YSIZE, "Shell server");
CTK_WIDGET_ADD(&window, &loglabel);
memset(log, 0, sizeof(log));
ctk_window_open(&window);
}
/*-----------------------------------------------------------------------------------*/
void
telnetd_gui_eventhandler(process_event_t ev, process_data_t data)
{
if(ev == ctk_signal_window_close) {
telnetd_quit();
}
}
/*-----------------------------------------------------------------------------------*/
|
The evolution of alliances relevance in USA
Over the last two decades, important contributions on alliances have shown the increasing relevance of this type of organisation. But, since we started working on Thomson financial database on alliances, we found a decrease of the number of alliances created since 1994-1995. This unexpected result is also the first to show this new trend of alliances. This is questioning the relevance of this mode of organisation among others. This is why transaction cost theory as explaining the choice between these different types of organisations, can be useful on this topic. But, at the same time, it is questioning the nature of this evolution. This is why evolutionary economics can also be helpful. Many assumptions can be made: is the development of alliances transitory or is it more periodic? Is it a stop and go evolution? |
// Next loads the next word
func (source HunspellWordSource) Next() bool {
hasToken := source.scanner.Scan()
if hasToken && !isValidLine(source.scanner.Text()) {
hasToken = source.Next()
}
return hasToken
} |
"""
Extract reported SubT artifacts and generate JSON file
"""
import os
import json
from osgar.logger import LogReader, lookup_stream_names
from osgar.lib.serialize import deserialize
def get_annotation_item(filename, result):
# result has form [['phone', 0.24000608921051025, [563, 71, 617, 107]]]
ret = {
"filename": '../md/' + filename,
"size": -1,
"regions": [],
"file_attributes": {}
}
for region in result:
artf, frac, bbox = region
x, y, x2, y2 = bbox
w, h = x2 - x, y2 - y
ret['regions'].append(
{
"shape_attributes": {
"name": "rect",
"x": x,
"y": y,
"width": w,
"height": h
},
"region_attributes": {
"artifact": artf
}
})
return ret
def debug2dir(filename, out_dir, detector_name):
names = lookup_stream_names(filename)
assert detector_name + '.debug_rgbd' in names, names
assert detector_name + '.debug_camera' in names, names
assert detector_name + '.localized_artf' in names, names
assert detector_name + '.debug_cv_result' in names, names
assert 'rosmsg.sim_time_sec' in names, names
rgbd_id = names.index(detector_name + '.debug_rgbd') + 1
camera_id = names.index(detector_name + '.debug_camera') + 1
artf_id = names.index(detector_name + '.localized_artf') + 1
result_id = names.index(detector_name + '.debug_cv_result') + 1
sim_sec_id = names.index('rosmsg.sim_time_sec') + 1
sim_time_sec = None
artf = None
last_result = None
out_json = {
"_via_settings": {
"ui": {
"annotation_editor_height": 25,
"annotation_editor_fontsize": 0.8,
"leftsidebar_width": 18,
"image_grid": {
"img_height": 80,
"rshape_fill": "none",
"rshape_fill_opacity": 0.3,
"rshape_stroke": "yellow",
"rshape_stroke_width": 2,
"show_region_shape": True,
"show_image_policy": "all"
},
"image": {
"region_label": "artifact",
"region_color": "artifact",
"region_label_font": "10px Sans",
"on_image_annotation_editor_placement": "NEAR_REGION"
}
},
"core": {
"buffer_size": 18,
"filepath": {},
"default_filepath": ""
},
"project": {
"name": "subt2020"
}
},
"_via_img_metadata": {
},
"_via_attributes": {
"region": {
"artifact": {
"type": "dropdown",
"description": "",
"options": {
"backpack": "",
"phone": "",
"survivor": "",
"robot": "",
"nothing": "",
"vent": "",
"fire_extinguisher": "",
"rope": "",
"helmet": "",
"breadcrumb": "",
"drill": "",
"cube": ""
},
"default_options": {
"helmet": True
}
}
},
"file": {}
}
}
stream_ids = [rgbd_id, camera_id, artf_id, result_id, sim_sec_id]
for dt, channel, data in LogReader(filename, only_stream_id=stream_ids):
data = deserialize(data)
if channel == sim_sec_id:
sim_time_sec = data
elif channel == artf_id:
artf = data
elif channel == result_id:
last_result = data
elif channel in [rgbd_id, camera_id]:
# 'debug_rgbd' is the last published topic for given detection
if channel == rgbd_id:
robot_pose, camera_pose, rgb_compressed, depth_compressed = data
else: # channel == camera_id
# image stereo artefact localization
# expects localized pair of images [camera_name, [robot_pose, camera_pose, image], [robot_pose, camera_pose, image]]
assert len(data) == 3, data[0]
rgb_compressed = data[1][2] # first image of stereo pair
image = rgb_compressed
assert artf is not None
time_sec = sim_time_sec if sim_time_sec is not None else int(dt.total_seconds())
name = os.path.basename(filename)[:-4] + '-' + artf[0] + '-' + str(time_sec) + '.jpg'
print(name, last_result)
with open(os.path.join(out_dir, name), 'wb') as f:
f.write(image)
out_json["_via_img_metadata"][name + '-1'] = get_annotation_item(name, last_result)
with open(os.path.join(out_dir, 'annotation.json'), 'w') as f:
json.dump(out_json, f, indent=2)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filename', help='JPEG filename')
parser.add_argument('--out-dir', help='dump classified debug images into directory')
parser.add_argument('--detector-name', help='detector module name (detector, detector_rear)', default='detector')
args = parser.parse_args()
debug2dir(args.filename, args.out_dir, args.detector_name)
# vim: expandtab sw=4 ts=4
|
<gh_stars>10-100
package goterators
import (
"fmt"
"testing"
)
func TestSum(t *testing.T) {
testSource := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
expectedValue := 210
actutalValue := Sum(testSource)
if actutalValue != expectedValue {
t.Errorf("Expected = %v , got = %v", expectedValue, actutalValue)
}
}
func ExampleSum() {
testSource := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
fmt.Println("Sum: ", Sum(testSource))
}
|
/**
* Given a certain three cell indices, returns the cell with the max amount of a given pheremone
*
* @param lower
* @param upper
* @param mid
* @param pherType - desired pheremone
* @param neighbors
* @return
*/
private int findMaxPher (int lower, int upper, int mid, int pherType, Cell[] neighbors) {
double low = getPher(neighbors[lower], pherType);
double midd = getPher(neighbors[mid], pherType);
double up = getPher(neighbors[upper], pherType);
double max = Math.max(Math.max(low, midd), up);
if (max <= 0) {
return findNextOrientation(pherType, neighbors);
}
if (max == low) {
return lower;
}
else if (max == midd) {
return mid;
}
return upper;
} |
/**
* @file Kinematics.cpp
* @author jonas
* @date 9/18/20
* Description here TODO
*/
#include "Kinematics.hpp"
#include "Angle.hpp" // for Angle
#include "KinematicExceptions.hpp" // for UnreachableException
#include "MathUtil.hpp" // for cosineLawAngle, squareAngle
#include <cmath> // for atan2, sqrt, M_PI, M_PI_2
#include <fmt/format.h> // for format, print
#include <stdexcept> // for invalid_argument
struct HipKneeAngles {
Angle hipAngle;
Angle kneeAngle;
};
static HipKneeAngles calcInLegPlane(double x, double z, const LegConfig &config) {
const auto l = sqrt(x * x + z * z);
// Angle between vector to foot and upper leg
const auto innerHipAngle = math::cosineLawAngle(config.upperLegLength, l, config.lowerLegLength);
// Angle -x axis to foot vector
const auto footAngle = Angle{atan2(z, -x)};
// -x -> footVector -> upper leg
const auto hipAngle = footAngle + innerHipAngle;
// Angle between upper and lower leg
const auto innerKneeAngle = math::cosineLawAngle(config.lowerLegLength, config.upperLegLength, l);
// Apply viereck
const auto beta = Angle{M_PI} - innerKneeAngle;
try {
const auto alpha = math::squareAngle(config.upperLegLength, config.lowerLegLeverLength, config.wireLength,
config.servoArmLength, beta);
const auto kneeServoAngle = (hipAngle + alpha) - Angle{M_PI_2};
return HipKneeAngles{.hipAngle=hipAngle, .kneeAngle=kneeServoAngle};
} catch (std::invalid_argument &e) {
throw UnreachableException(
fmt::format("Error in computing knee servo angle for config {}, x={}, z={}, beta={}",
config, x, z, beta));
}
}
LegPose Kinematics::legPoseFromFootPosition(const Vector3d &footPos, const LegConfig &config) {
fmt::print("Solving leg pose for {}\n", footPos);
// Solve abduction angle in YZ plane
// Distance of foot to origin in YZ plane
const auto footDistanceYZSquare = footPos.y * footPos.y + footPos.z * footPos.z;
// foot Z coordinate in leg plane
const auto lzSquared = footDistanceYZSquare - config.abductionOffset * config.abductionOffset;
if (lzSquared < 0) {
throw UnreachableException{"Distance to foot smaller than abduction offset."};
}
const auto legPlaneZ = sqrt(lzSquared) *
(footPos.z > 0 ? 1 : -1);
// Angle between abduction axis and foot vector, positive = foot above hip in leg plane
const auto phi = Angle{atan2(legPlaneZ, config.abductionOffset)};
// Angle between robot y axis and foot vector
const auto hfa = Angle{atan2(footPos.z, footPos.y)};
// Abduction angle, positive = tilt upwards
const auto abductionAngle = hfa - phi;
auto planeResults = calcInLegPlane(footPos.x, legPlaneZ, config);
return LegPose{.abductionAngle=abductionAngle, .hipAngle=planeResults.hipAngle, .kneeAngle=planeResults.kneeAngle};
}
|
/**
* Converts Java values to a script language values.
*
* @param values
* the java values
* @return the script values
*/
public static Object[] fromJava(Object... values) {
Object[] converted = new Object[values.length];
for (int i = 0; i < values.length; ++i) {
converted[i] = fromJava(values[i]);
}
return converted;
} |
def updateAvailableLicenseSeats(installinfo):
license_info_url = munkicommon.pref('LicenseInfoURL')
if not license_info_url:
return
if not installinfo.get('optional_installs'):
return
license_info = {}
items_to_check = [item['name']
for item in installinfo['optional_installs']
if item.get('licensed_seat_info_available')
and not item['installed']]
start_index = 0
q_char = "?"
if "?" in license_info_url:
q_char = "&"
while start_index < len(items_to_check):
end_index = len(items_to_check)
while True:
query_items = ['name=' + quote_plus(item)
for item in items_to_check[start_index:end_index]]
querystring = q_char + '&'.join(query_items)
url = license_info_url + querystring
if len(url) < 256:
break
end_index = end_index - 1
munkicommon.display_debug1('Fetching licensed seat data from %s', url)
try:
license_data = getDataFromURL(url)
munkicommon.display_debug1('Got: %s', license_data)
license_dict = FoundationPlist.readPlistFromString(
license_data)
except (fetch.MunkiDownloadError, fetch.GurlDownloadError), err:
munkicommon.display_error('Error from %s: %s', url, err)
except FoundationPlist.FoundationPlistException:
munkicommon.display_error(
'Bad license data from %s: %s', url, license_data)
else:
license_info.update(license_dict)
start_index = end_index
for item in installinfo['optional_installs']:
if item['name'] in items_to_check:
munkicommon.display_debug2(
'Looking for license info for %s', item['name'])
seats_available = False
seat_info = license_info.get(item['name'], 0)
try:
seats_available = int(seat_info) > 0
munkicommon.display_debug1(
'Recording available seats for %s: %s',
item['name'], seats_available)
except ValueError:
munkicommon.display_warning(
'Bad license data for %s: %s', item['name'], seat_info)
item['licensed_seats_available'] = seats_available |
def as_tabular(elements,var,wkt=False,wkb=False,path = None):
import osgeo.ogr as ogr
if path is None:
path = get_temp_path(suffix='.txt')
sr = ogr.osr.SpatialReference()
sr.ImportFromEPSG(4326)
sr2 = ogr.osr.SpatialReference()
sr2.ImportFromEPSG(3005)
with open(path,'w') as f:
for ii,element in enumerate(elements):
geo = ogr.CreateGeometryFromWkb(element['geometry'].wkb)
geo.AssignSpatialReference(sr)
geo.TransformTo(sr2)
area = geo.GetArea()
f.write(','.join([repr(ii+1),element['properties']['timestamp'].strftime("%Y-%m-%d %H:%M:%S"),repr(element['properties'][var])]))
if 'level' in element['properties'].keys():
f.write(','+repr(element['properties']['level']))
f.write(','+repr(area))
if wkb:
f.write(','+repr(element['geometry'].wkb))
if wkt:
f.write(','+repr(element['geometry'].wkt))
f.write('\n')
f.close()
return path |
<reponame>connectedcustomer/Project1<filename>tests_python/tests/test_mempool.py
import time
import pytest
from tools import utils
BAKE_ARGS = ['--max-priority', '512', '--minimal-timestamp']
@pytest.mark.mempool
@pytest.mark.multinode
@pytest.mark.slow
@pytest.mark.incremental
class TestMempool:
" Tests mempool"
def test_init(self, sandbox):
sandbox.add_node(1)
sandbox.add_node(2)
sandbox.add_node(3, params=['--disable-mempool'])
utils.activate_alpha(sandbox.client(1))
def test_running_prevalidators(self, sandbox):
assert sandbox.client(1).get_prevalidator()
assert sandbox.client(2).get_prevalidator()
assert not sandbox.client(3).get_prevalidator()
def test_mempool_empty(self, sandbox):
for i in range(1, 4):
assert sandbox.client(i).mempool_is_empty()
def test_transfer(self, sandbox):
sandbox.client(1).transfer(1.000, 'bootstrap1', 'bootstrap2')
def test_sleep_3s(self):
time.sleep(3)
def test_mempool_include_transfer(self, sandbox):
assert not sandbox.client(1).mempool_is_empty()
assert not sandbox.client(2).mempool_is_empty()
assert sandbox.client(3).mempool_is_empty()
def test_bake_for(self, sandbox):
sandbox.client(1).bake('bootstrap1')
def test_sleep_2s(self):
time.sleep(2)
def test_mempools_are_empty(self, sandbox):
for i in range(1, 4):
assert sandbox.client(i).mempool_is_empty()
def test_injection_fails_on_mempool_disabled_node(self, sandbox):
with pytest.raises(Exception):
sandbox.client(3).transfer(2.000, 'bootstrap2', 'bootstrap3')
|
/*****************************************************************************/
/** Adds a core morph animation.
*
* This function adds a core morph animation to the core model instance.
*
* @param pCoreMorphAnimation A pointer to the core morph animation that
* should be added.
*
* @return One of the following values:
* \li the assigned morph animation \b ID of the added core morph animation
* \li \b -1 if an error happend
*****************************************************************************/
int CalCoreModel::addCoreMorphAnimation(CalCoreMorphAnimation *pCoreMorphAnimation)
{
int morphAnimationId;
morphAnimationId = m_vectorCoreMorphAnimation.size();
m_vectorCoreMorphAnimation.push_back(pCoreMorphAnimation);
return morphAnimationId;
} |
<filename>slycore/src/io/memorystream.cpp
#include "sly/io/memorystream.h"
using namespace sly;
MemoryStream::MemoryStream() {
}
MemoryStream::MemoryStream(size_t length) {
_buffer.reserve(length);
}
MemoryStream::~MemoryStream() {}
retval<size_t> MemoryStream::read(vptr_t buffer, size_t size) {
if((_position + size) > _buffer.size()) {
size = _buffer.size() - _position;
}
if(size > 0) {
auto span = gsl::span((byte_t*)buffer, size);
std::copy(_buffer.begin() + _position, _buffer.begin() + _position + size, span.begin());
_position += size;
}
return _position;
}
retval<void> MemoryStream::write(vptr_t data, size_t size) {
if((_position + size) > _buffer.size()) {
_buffer.resize(_position + size);
}
auto span = gsl::span((byte_t*)data, size);
std::copy(span.begin(), span.end(), _buffer.begin() + _position);
_position += size;
return success();
}
size_t MemoryStream::size() const {
return _buffer.size();
}
size_t MemoryStream::position() const {
return _position;
}
retval<void> MemoryStream::seek(s64 offset) {
if((_position + offset) < 0 || (_position + offset) >= _buffer.size()) {
return failed();
}
_position += offset;
return success();
}
retval<void> MemoryStream::setPosition(size_t position) {
_position = position;
return success();
}
retval<void> MemoryStream::flush() {
return success();
}
retval<void> MemoryStream::close() {
return success();
} |
The view of the red light area in Budhwar Peth on Saturday evening was different from the usual one. Shehnai could be heard playing, rangolis were being drawn and Diwali lamps had lit up the alley otherwise crowded by commercial sex workers (CSWs) and their customers.
Arguably for the first time, the CSWs, police and citizens from 'outside' were celebrating Diwali together in Budhwar Peth. In an attempt to bridge the gap, Faraskhana Police, who have jurisdiction over the red light area, had invited over 3,500 CSWs to celebrate Diwali over 'faral' (refreshments).
A 50-year-old brothel keeper from Budhwar Peth said, "Some of us have no families, some have been ostracised by our kin. We do celebrate every year but we miss our families. It really feels great when someone from outside joins you in the celebrations. We come in contact with the police when there are raids or there is some crime. But this is different."
A 25-year-old CSW said, "I had drawn a rangoli last year in front of our building. But this is a different feeling. We know that society will never accept us back but such events come as a pleasant change."
Senior police inspector Bhanupratap Barge, whose initiative it was to organise the programme, said, "When I took charge at Faraskhana police station, I had called for a meeting of CSWs. I had asked them for three things - that they will not keep underage girls, that they will inform us about forced prostitution and that they will not stand on the roads outside Budhwar Peth. There has been commendable cooperation from them on all three fronts. We also wanted to take a step further to build their trust and reciprocate their support."
He added, "We decided to invite them for Faral. We also invited dignitaries from various walks of life so that there is interaction. It is our feeling that better communication leads to better policing."
A large number of transgender CSWs also attended the programme.
Bindumadhav Khire, president of Samapathik Trust, an organisation working with gays, lesbians, transgenders and the intersex community on health and advocacy issues said, "Such events will help reduce the fear of police in the minds of CSWs." Rajesh Taru, who owns a tea stall in the heart of Budhwar Peth, said, "They should organise more such programmes."
Deputy commissioner of police Makarand Ranade and ACP Prashant Khaire were present for the programme.
E-learning set donated to area school
PUNE: Nutan Samartha Vidyalaya in Budhwar Peth, which caters to educational needs of children of commercial sex workers, among other underprivileged young ones, received an e-learning set as a donation. Donated by Rotary Club Pune Katraj and Navchaitanya Hasya Pariwar, the state-of-the-art teaching aid will be beneficial for students upto class IX. Bharatkumar Sancheti, Prakash Dhoka and school secretary Chandrashekhar Pansare were amongst those who were present on the occasion. Hundreds of underprivileged children have been benefited from the education offered by the Nutan Samartha Vidyalaya, which does not take a single penny from these children towards educational expenditure. ENS
ALSO READ PMPML decides to appoint experts for dedicated BRTS cell
Please read our terms of use before posting comments |
Photo illustration by Slate. Photo by iStock/Thinkstock.
The best place to live in America, according to Money’s 2017 ranking, is Fishers, Indiana (population: 90,000). The little-known Indianapolis suburb, we’re told, is not only “safe and quaint and full of young families,” it also boasts a growing economy and a farmer’s market that was recently voted “one of Indiana’s best.” Are you convinced yet?
Not so fast. According to 24/7 Wall Street’s rankings, Money got it wrong—albeit by only a few miles. That outlet crowned Fishers’ next-door neighbor Carmel, Indiana (population 91,000), the best American city to live in, while Fishers rated nary a mention. But at least we can all agree that the Greater Indianapolis area is America’s garden spot, right?
As it turns out, we cannot. A ranking by Livability, developed in conjunction with New York University and famed urbanist Richard Florida, claims to be the most scientific of all—and it puts Rochester, Minnesota, at the top of the list. No place in Indiana cracks its top 50, though our old friend Fishers makes a cameo at No. 99. U.S. News’ top 50 is also notably Indiana-less. And as for Rochester, U.S. News and World Report doesn’t rank it as even the best Rochester in America: That honor goes to Rochester, New York, which checks in at No. 39 on its list. (Austin is its big winner.)
If you’re beginning to suspect that perhaps these city rankings are rather arbitrary, well, that would be one reasonable takeaway. But before you dismiss them altogether, it’s worth considering what we can learn from their discrepancies—not only about U.S. cities, but about ranking algorithms in general, and how they can mislead us about all kinds of other topics.
First, a point in defense of the city rankings. The wild variation from one “best cities” list to the next might seem like an indictment because it implies that each ranking reflects its peculiar methodology more than any real-world consensus. Yet it would actually be more suspicious if they all agreed with each other.
Take, for example, the broad agreement exhibited by popular magazine rankings of the best colleges in the United States. With few exceptions, they inevitably place such famous institutions as Harvard, Princeton, Stanford, Yale, and MIT near the top. This gives them a veneer of plausibility—which is exactly the point. As critics have long pointed out, rankings pioneer U.S. News knew its list had to reflect conventional wisdom to be taken seriously so it developed a methodology that would achieve that. The publication’s rankings have since become a sort of self-fulfilling prophecy.
Let’s count it as a virtue, then, that city rankings tend to exhibit more diversity in the specific data they consider and the weights they assign them: It implies that the lists’ creators probably haven’t reverse-engineered their models to match a preconceived output. It also means that they often shine a light on places that would otherwise pass unknown or underappreciated. Rochester really does have a lot going for it!
That said, it should give readers pause that the same cities they find at the top of one magazine’s list are often nowhere to be found on another—even as they both purport to be prioritizing roughly the same traits. So why can’t our professional list–makers agree on what makes for the optimal city?
One factor is relatively boring, but bears mentioning anyway. It’s that even more than the college rankings, the city rankings tend to shuffle their criteria in significant ways on a yearly basis. Money, for instance, confined its 2017 list to cities with populations between 10,000 and 100,000, whereas in 2016 it considered cities ranging from 50,000 to 300,000. Livability set the upper bound for its 2017 ranking at 350,000, while U.S. News and 24/7 Wall Street imposed no upper bound but ruled out cities smaller than 50,000 and 65,000, respectively. These kinds of constraints might detract from the rankings’ comprehensiveness—and belie the unqualified superlatives in their titles—but there is some sense in them: As Malcolm Gladwell once pointed out in a New Yorker critique of college rankings, the more diverse the entities you’re trying to compare, the less meaningful your comparison will be.
Yet those population cutoffs hardly begin to explain the huge disparities in outcome from one magazine to the next. Fishers, for instance, qualified for all four of the lists mentioned above, but made the top 50 in just one of them. And here is where we come to the more interesting questions of methodology.
The four “best places to live” lists mentioned above—Money, 24/7 Wall Street, Livability, and U.S. News—all include metrics designed to represent cost of living, quality of life, and economic opportunity among other criteria. But the thing to remember is that the data they’re feeding into their formulas is, in most cases, only a proxy for what they’re really trying to measure. (This is true of many forms of data.) And it isn’t always a good proxy.
For instance, U.S. News uses a metropolitan area’s per-capita crime rate as a proxy for how safe its residents are. That sounds reasonable enough, until you consider that metropolitan areas have more or less arbitrary boundaries; that crime is not evenly distributed within them; and that crime rates themselves vary in methodology. You might live in a perfectly safe neighborhood, yet your city might suffer in U.S. News’ rankings because of a high-crime pocket 20 miles away that happens to fall within the same metropolitan area. (The complexity of crime scores was one of several issues raised in a 2015 report by the Chicago Council on Global Affairs on how to interpret city rankings.)
Money, for its part, nods to consideration of ethnic diversity in its 2017 “Best Places to Live” list. Yet it doesn’t factor into the rankings in quite the way you might expect. In compiling the list of cities to evaluate, Money says it eliminated “any place that had … a lack of ethnic diversity.” In other words, it set an arbitrary threshold for what counts as “diverse.” But as long as a city cleared that undisclosed bar, Money didn’t seem care whether it was extremely diverse, somewhat diverse, or just a tiny bit diverse. Hence the victory for Fishers, which, per the 2010 census, is 86 percent white.
Cultural amenities, which Money’s list and others also claim to consider, are notoriously hard to quantify. Money tries to do it by counting the “number of leisure activities in the town and surrounding area, including bars, restaurants, museums, sports complexes, and green spaces.” It’s nice to have three small museums nearby, sure—but is it better than having one great one? And does history matter to a city’s culture at all? Not according to this methodology: Fishers barely existed as recently as 1990, when it had just 7,500 people. It has since boomed to more than 10 times that size, thanks to a flood of new development.
Livability’s list makes perhaps the most earnest effort to weigh the things that real people—at least, some relatively affluent subset of real people—find appealing in cities. It developed its scoring system with the help of New York University’s Initiative for Creativity and Innovation in Cities, directed by urbanists Richard Florida and Steven Pedigo, and used data from the economic data consultancy Emsi. As such, it appears tilted toward the sorts of factors that might appeal to Florida’s “creative class:” a highly ranked school system, ethnic diversity, a “thriving arts scene,” and even the prevalence of farmers markets.
That sort of open bias can be a blessing in a list like this. All such models represent normative judgments, and it’s better for readers to know upfront what biases are encoded in them. Yet even the most thoughtful attempts to quantify something as subjective as “livability” are bound to end up quantifying something rather different. Often, if you look closely enough at a list and its attendant methodology, you can discern a crude pattern in the rankings that belies the complexity of the model that produced them.
For instance, Livability’s rankings weigh 40 different data points, yet they turn out to be thoroughly dominated by smallish cities that are home to disproportionately large colleges, hospitals, and/or tech-company headquarters (but especially colleges and hospitals). Fifth-ranked Charlottesville, Virginia, third-ranked Ann Arbor, Michigan, and second-ranked Iowa City are all best known for their major public universities; top-ranked Rochester is home to the famous Mayo Clinic, along with the University of Minnesota–Rochester and several other colleges. Seventh-ranked Palo Alto, California, and eighth-ranked Madison host Stanford University and the University of Wisconsin, respectively. Expedia and T-Mobile have headquarters in sixth-ranked Bellevue, Washington; ninth-ranked Overland Park, Kansas, is Sprint’s home base; and so on. All fine places to make one’s home, no doubt. Together, however, they embody a version of “livability” that has more to do with the kinds of data you can readily plug into a spreadsheet than the experience of living there.
An even more glaring pattern jumps out from Money’s 2017 rankings. Recall that Fishers, its overall winner, is essentially brand-new. Well, it isn’t the only one. Second-ranked Allen, Texas, has boomed from about 18,000 people in 1990 to nearly 100,000 today. Fourth-ranked Franklin, Tennessee, has gone from 20,000 to 75,000 in that same period. Fifth-ranked Olive Branch, Mississippi, grew from just 3,600 people in 1990 to nearly 10 times that size by 2010, earning a Bloomberg profile for being the “fastest-growing city in the U.S.” And so on. This is a list of boomtowns, not best places.
What you’re beginning to get a feel for when you find these commonalities is what the model is actually measuring, as opposed to what it purports to measure. Livability went looking for dynamic places to live and found a bunch of company towns where dynamic people happen to live (not always by choice). Money sought small cities with job growth, but its algorithm spat out a list of new-build suburbs and exurbs where jobs can’t help but grow (because there were none before). These lists, in the end, are best regarded as the outputs of various more-or-less arbitrary ways of slicing and dicing the specific types of data that tend to be available for the majority of cities.
Such exercises in number-crunching can have their value provided one understands that this is what they are. Unfortunately, the editors who explain and frame these lists to readers usually go out of their way to pretend otherwise, penning jaunty ex post facto justifications for each city’s inclusion. These are often unintentionally funny: Did you know that Money’s No. 5 place to live, Olive Branch, is home to a popular bonsai nursery? Or that its residents “no longer have to leave the town’s borders now for basic services and shopping?” Back up the moving truck!
The blatant mismatch between the model’s output and reality is what makes these lists an instructive case study. They’re textbook examples of how ranking algorithms can end up producing outputs far afield from their purported goals. That’s crucial to understand in an age like ours, when weighted models of this kind underpin not only special issues of otherwise irrelevant newsmagazines, but things like credit scores and no-fly lists that directly affect people’s lives. (These weightier applications are the sort catalogued in Cathy O’Neil’s trenchant Weapons of Math Destruction.)
It’s a cruel irony that the methodologies behind patently silly city rankings are often more accessible to the public than those that go into the scores used to deny people loans or set their prison sentences. But then, it’s probably no accident that such models are kept opaque by the people who create and use them. Otherwise, we could all see for ourselves just how arbitrary they are.
Home page photo of Fishers, Indiana, by Scott Morris via Flickr/Creative Commons. |
/*PLEASE DO NOT EDIT THIS CODE*/
/*This code was generated using the UMPLE 1.31.1.5860.78bb27cc6 modeling language!*/
package cruise.associations.compositions;
import java.util.*;
// line 74 "../../../../src/TestHarnessCompositionsLeft.ump"
public class X0_n__m_n
{
//------------------------
// MEMBER VARIABLES
//------------------------
//X0_n__m_n Attributes
private int num;
//X0_n__m_n Associations
private List<Y0_n__m_n> y0_n__m_n;
//------------------------
// CONSTRUCTOR
//------------------------
public X0_n__m_n(int aNum)
{
num = aNum;
y0_n__m_n = new ArrayList<Y0_n__m_n>();
}
//------------------------
// INTERFACE
//------------------------
public boolean setNum(int aNum)
{
boolean wasSet = false;
num = aNum;
wasSet = true;
return wasSet;
}
public int getNum()
{
return num;
}
/* Code from template association_GetMany */
public Y0_n__m_n getY0_n__m_n(int index)
{
Y0_n__m_n aY0_n__m_n = y0_n__m_n.get(index);
return aY0_n__m_n;
}
public List<Y0_n__m_n> getY0_n__m_n()
{
List<Y0_n__m_n> newY0_n__m_n = Collections.unmodifiableList(y0_n__m_n);
return newY0_n__m_n;
}
public int numberOfY0_n__m_n()
{
int number = y0_n__m_n.size();
return number;
}
public boolean hasY0_n__m_n()
{
boolean has = y0_n__m_n.size() > 0;
return has;
}
public int indexOfY0_n__m_n(Y0_n__m_n aY0_n__m_n)
{
int index = y0_n__m_n.indexOf(aY0_n__m_n);
return index;
}
/* Code from template association_MinimumNumberOfMethod */
public static int minimumNumberOfY0_n__m_n()
{
return 0;
}
/* Code from template association_MaximumNumberOfMethod */
public static int maximumNumberOfY0_n__m_n()
{
return 3;
}
/* Code from template association_AddManyToManyMethod */
public boolean addY0_n__m_n(Y0_n__m_n aY0_n__m_n)
{
boolean wasAdded = false;
if (y0_n__m_n.contains(aY0_n__m_n)) { return false; }
if (numberOfY0_n__m_n() >= maximumNumberOfY0_n__m_n())
{
return wasAdded;
}
y0_n__m_n.add(aY0_n__m_n);
if (aY0_n__m_n.indexOfXVar(this) != -1)
{
wasAdded = true;
}
else
{
wasAdded = aY0_n__m_n.addXVar(this);
if (!wasAdded)
{
y0_n__m_n.remove(aY0_n__m_n);
}
}
return wasAdded;
}
/* Code from template association_RemoveMany */
public boolean removeY0_n__m_n(Y0_n__m_n aY0_n__m_n)
{
boolean wasRemoved = false;
if (!y0_n__m_n.contains(aY0_n__m_n))
{
return wasRemoved;
}
int oldIndex = y0_n__m_n.indexOf(aY0_n__m_n);
y0_n__m_n.remove(oldIndex);
if (aY0_n__m_n.indexOfXVar(this) == -1)
{
wasRemoved = true;
}
else
{
wasRemoved = aY0_n__m_n.removeXVar(this);
if (!wasRemoved)
{
y0_n__m_n.add(oldIndex,aY0_n__m_n);
}
}
return wasRemoved;
}
/* Code from template association_AddIndexControlFunctions */
public boolean addY0_n__m_nAt(Y0_n__m_n aY0_n__m_n, int index)
{
boolean wasAdded = false;
if(addY0_n__m_n(aY0_n__m_n))
{
if(index < 0 ) { index = 0; }
if(index > numberOfY0_n__m_n()) { index = numberOfY0_n__m_n() - 1; }
y0_n__m_n.remove(aY0_n__m_n);
y0_n__m_n.add(index, aY0_n__m_n);
wasAdded = true;
}
return wasAdded;
}
public boolean addOrMoveY0_n__m_nAt(Y0_n__m_n aY0_n__m_n, int index)
{
boolean wasAdded = false;
if(y0_n__m_n.contains(aY0_n__m_n))
{
if(index < 0 ) { index = 0; }
if(index > numberOfY0_n__m_n()) { index = numberOfY0_n__m_n() - 1; }
y0_n__m_n.remove(aY0_n__m_n);
y0_n__m_n.add(index, aY0_n__m_n);
wasAdded = true;
}
else
{
wasAdded = addY0_n__m_nAt(aY0_n__m_n, index);
}
return wasAdded;
}
public void delete()
{
while (y0_n__m_n.size() > 0)
{
Y0_n__m_n aY0_n__m_n = y0_n__m_n.get(y0_n__m_n.size() - 1);
aY0_n__m_n.delete();
y0_n__m_n.remove(aY0_n__m_n);
}
}
public String toString()
{
return super.toString() + "["+
"num" + ":" + getNum()+ "]";
}
} |
<reponame>sadema/GildedRose-Refactoring-Kata
package com.gildedrose.item_types.backstage;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
import static org.junit.jupiter.api.Assertions.assertEquals;
class BackstageItemTest {
@ParameterizedTest
@CsvSource({"2,1", "1,0", "0,-1", "-1,-2", "-2,-3"})
void updateSellIn(int sellIn, int expected) {
BackstageItem cut = BackstageItem.of(sellIn, 0);
cut.updateSellIn();
assertEquals(expected, cut.getSellIn());
}
@ParameterizedTest
@CsvSource({"1,28", "0,28", "-1,0"})
void updateQuality(int sellIn, int expected) {
BackstageItem item = BackstageItem.of(sellIn, 25);
item.updateQuality();
assertEquals(expected, item.getQuality());
}
}
|
// SPDX-License-Identifier: GPL-2.0+
//
// soc-pcm.c -- ALSA SoC PCM
//
// Copyright 2005 Wolfson Microelectronics PLC.
// Copyright 2005 Openedhand Ltd.
// Copyright (C) 2010 Slimlogic Ltd.
// Copyright (C) 2010 Texas Instruments Inc.
//
// Authors: <NAME> <<EMAIL>>
// <NAME> <<EMAIL>>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/export.h>
#include <linux/debugfs.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/soc-dpcm.h>
#include <sound/soc-link.h>
#include <sound/initval.h>
static inline void snd_soc_dpcm_mutex_lock(struct snd_soc_pcm_runtime *rtd)
{
mutex_lock_nested(&rtd->card->pcm_mutex, rtd->card->pcm_subclass);
}
static inline void snd_soc_dpcm_mutex_unlock(struct snd_soc_pcm_runtime *rtd)
{
mutex_unlock(&rtd->card->pcm_mutex);
}
#define snd_soc_dpcm_mutex_assert_held(rtd) \
lockdep_assert_held(&(rtd)->card->pcm_mutex)
static inline void snd_soc_dpcm_stream_lock_irq(struct snd_soc_pcm_runtime *rtd,
int stream)
{
snd_pcm_stream_lock_irq(snd_soc_dpcm_get_substream(rtd, stream));
}
#define snd_soc_dpcm_stream_lock_irqsave_nested(rtd, stream, flags) \
snd_pcm_stream_lock_irqsave_nested(snd_soc_dpcm_get_substream(rtd, stream), flags)
static inline void snd_soc_dpcm_stream_unlock_irq(struct snd_soc_pcm_runtime *rtd,
int stream)
{
snd_pcm_stream_unlock_irq(snd_soc_dpcm_get_substream(rtd, stream));
}
#define snd_soc_dpcm_stream_unlock_irqrestore(rtd, stream, flags) \
snd_pcm_stream_unlock_irqrestore(snd_soc_dpcm_get_substream(rtd, stream), flags)
#define DPCM_MAX_BE_USERS 8
static inline const char *soc_cpu_dai_name(struct snd_soc_pcm_runtime *rtd)
{
return (rtd)->num_cpus == 1 ? asoc_rtd_to_cpu(rtd, 0)->name : "multicpu";
}
static inline const char *soc_codec_dai_name(struct snd_soc_pcm_runtime *rtd)
{
return (rtd)->num_codecs == 1 ? asoc_rtd_to_codec(rtd, 0)->name : "multicodec";
}
#ifdef CONFIG_DEBUG_FS
static const char *dpcm_state_string(enum snd_soc_dpcm_state state)
{
switch (state) {
case SND_SOC_DPCM_STATE_NEW:
return "new";
case SND_SOC_DPCM_STATE_OPEN:
return "open";
case SND_SOC_DPCM_STATE_HW_PARAMS:
return "hw_params";
case SND_SOC_DPCM_STATE_PREPARE:
return "prepare";
case SND_SOC_DPCM_STATE_START:
return "start";
case SND_SOC_DPCM_STATE_STOP:
return "stop";
case SND_SOC_DPCM_STATE_SUSPEND:
return "suspend";
case SND_SOC_DPCM_STATE_PAUSED:
return "paused";
case SND_SOC_DPCM_STATE_HW_FREE:
return "hw_free";
case SND_SOC_DPCM_STATE_CLOSE:
return "close";
}
return "unknown";
}
static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
int stream, char *buf, size_t size)
{
struct snd_pcm_hw_params *params = &fe->dpcm[stream].hw_params;
struct snd_soc_dpcm *dpcm;
ssize_t offset = 0;
/* FE state */
offset += scnprintf(buf + offset, size - offset,
"[%s - %s]\n", fe->dai_link->name,
stream ? "Capture" : "Playback");
offset += scnprintf(buf + offset, size - offset, "State: %s\n",
dpcm_state_string(fe->dpcm[stream].state));
if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
(fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
offset += scnprintf(buf + offset, size - offset,
"Hardware Params: "
"Format = %s, Channels = %d, Rate = %d\n",
snd_pcm_format_name(params_format(params)),
params_channels(params),
params_rate(params));
/* BEs state */
offset += scnprintf(buf + offset, size - offset, "Backends:\n");
if (list_empty(&fe->dpcm[stream].be_clients)) {
offset += scnprintf(buf + offset, size - offset,
" No active DSP links\n");
goto out;
}
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
params = &dpcm->hw_params;
offset += scnprintf(buf + offset, size - offset,
"- %s\n", be->dai_link->name);
offset += scnprintf(buf + offset, size - offset,
" State: %s\n",
dpcm_state_string(be->dpcm[stream].state));
if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
offset += scnprintf(buf + offset, size - offset,
" Hardware Params: "
"Format = %s, Channels = %d, Rate = %d\n",
snd_pcm_format_name(params_format(params)),
params_channels(params),
params_rate(params));
}
out:
return offset;
}
static ssize_t dpcm_state_read_file(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct snd_soc_pcm_runtime *fe = file->private_data;
ssize_t out_count = PAGE_SIZE, offset = 0, ret = 0;
int stream;
char *buf;
if (fe->num_cpus > 1) {
dev_err(fe->dev,
"%s doesn't support Multi CPU yet\n", __func__);
return -EINVAL;
}
buf = kmalloc(out_count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
snd_soc_dpcm_mutex_lock(fe);
for_each_pcm_streams(stream)
if (snd_soc_dai_stream_valid(asoc_rtd_to_cpu(fe, 0), stream))
offset += dpcm_show_state(fe, stream,
buf + offset,
out_count - offset);
snd_soc_dpcm_mutex_unlock(fe);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, offset);
kfree(buf);
return ret;
}
static const struct file_operations dpcm_state_fops = {
.open = simple_open,
.read = dpcm_state_read_file,
.llseek = default_llseek,
};
void soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd)
{
if (!rtd->dai_link->dynamic)
return;
if (!rtd->card->debugfs_card_root)
return;
rtd->debugfs_dpcm_root = debugfs_create_dir(rtd->dai_link->name,
rtd->card->debugfs_card_root);
debugfs_create_file("state", 0444, rtd->debugfs_dpcm_root,
rtd, &dpcm_state_fops);
}
static void dpcm_create_debugfs_state(struct snd_soc_dpcm *dpcm, int stream)
{
char *name;
name = kasprintf(GFP_KERNEL, "%s:%s", dpcm->be->dai_link->name,
stream ? "capture" : "playback");
if (name) {
dpcm->debugfs_state = debugfs_create_dir(
name, dpcm->fe->debugfs_dpcm_root);
debugfs_create_u32("state", 0644, dpcm->debugfs_state,
&dpcm->state);
kfree(name);
}
}
static void dpcm_remove_debugfs_state(struct snd_soc_dpcm *dpcm)
{
debugfs_remove_recursive(dpcm->debugfs_state);
}
#else
static inline void dpcm_create_debugfs_state(struct snd_soc_dpcm *dpcm,
int stream)
{
}
static inline void dpcm_remove_debugfs_state(struct snd_soc_dpcm *dpcm)
{
}
#endif
/* Set FE's runtime_update state; the state is protected via PCM stream lock
* for avoiding the race with trigger callback.
* If the state is unset and a trigger is pending while the previous operation,
* process the pending trigger action here.
*/
static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd);
static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe,
int stream, enum snd_soc_dpcm_update state)
{
struct snd_pcm_substream *substream =
snd_soc_dpcm_get_substream(fe, stream);
snd_soc_dpcm_stream_lock_irq(fe, stream);
if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) {
dpcm_fe_dai_do_trigger(substream,
fe->dpcm[stream].trigger_pending - 1);
fe->dpcm[stream].trigger_pending = 0;
}
fe->dpcm[stream].runtime_update = state;
snd_soc_dpcm_stream_unlock_irq(fe, stream);
}
static void dpcm_set_be_update_state(struct snd_soc_pcm_runtime *be,
int stream, enum snd_soc_dpcm_update state)
{
be->dpcm[stream].runtime_update = state;
}
/**
* snd_soc_runtime_action() - Increment/Decrement active count for
* PCM runtime components
* @rtd: ASoC PCM runtime that is activated
* @stream: Direction of the PCM stream
* @action: Activate stream if 1. Deactivate if -1.
*
* Increments/Decrements the active count for all the DAIs and components
* attached to a PCM runtime.
* Should typically be called when a stream is opened.
*
* Must be called with the rtd->card->pcm_mutex being held
*/
void snd_soc_runtime_action(struct snd_soc_pcm_runtime *rtd,
int stream, int action)
{
struct snd_soc_dai *dai;
int i;
snd_soc_dpcm_mutex_assert_held(rtd);
for_each_rtd_dais(rtd, i, dai)
snd_soc_dai_action(dai, stream, action);
}
EXPORT_SYMBOL_GPL(snd_soc_runtime_action);
/**
* snd_soc_runtime_ignore_pmdown_time() - Check whether to ignore the power down delay
* @rtd: The ASoC PCM runtime that should be checked.
*
* This function checks whether the power down delay should be ignored for a
* specific PCM runtime. Returns true if the delay is 0, if it the DAI link has
* been configured to ignore the delay, or if none of the components benefits
* from having the delay.
*/
bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_component *component;
bool ignore = true;
int i;
if (!rtd->pmdown_time || rtd->dai_link->ignore_pmdown_time)
return true;
for_each_rtd_components(rtd, i, component)
ignore &= !component->driver->use_pmdown_time;
return ignore;
}
/**
* snd_soc_set_runtime_hwparams - set the runtime hardware parameters
* @substream: the pcm substream
* @hw: the hardware parameters
*
* Sets the substream runtime hardware parameters.
*/
int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream,
const struct snd_pcm_hardware *hw)
{
substream->runtime->hw = *hw;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_set_runtime_hwparams);
/* DPCM stream event, send event to FE and all active BEs. */
int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
int event)
{
struct snd_soc_dpcm *dpcm;
snd_soc_dpcm_mutex_assert_held(fe);
for_each_dpcm_be(fe, dir, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
be->dai_link->name, event, dir);
if ((event == SND_SOC_DAPM_STREAM_STOP) &&
(be->dpcm[dir].users >= 1))
continue;
snd_soc_dapm_stream_event(be, dir, event);
}
snd_soc_dapm_stream_event(fe, dir, event);
return 0;
}
static void soc_pcm_set_dai_params(struct snd_soc_dai *dai,
struct snd_pcm_hw_params *params)
{
if (params) {
dai->rate = params_rate(params);
dai->channels = params_channels(params);
dai->sample_bits = snd_pcm_format_physical_width(params_format(params));
} else {
dai->rate = 0;
dai->channels = 0;
dai->sample_bits = 0;
}
}
static int soc_pcm_apply_symmetry(struct snd_pcm_substream *substream,
struct snd_soc_dai *soc_dai)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
if (!snd_soc_dai_active(soc_dai))
return 0;
#define __soc_pcm_apply_symmetry(name, NAME) \
if (soc_dai->name && (soc_dai->driver->symmetric_##name || \
rtd->dai_link->symmetric_##name)) { \
dev_dbg(soc_dai->dev, "ASoC: Symmetry forces %s to %d\n",\
#name, soc_dai->name); \
\
ret = snd_pcm_hw_constraint_single(substream->runtime, \
SNDRV_PCM_HW_PARAM_##NAME,\
soc_dai->name); \
if (ret < 0) { \
dev_err(soc_dai->dev, \
"ASoC: Unable to apply %s constraint: %d\n",\
#name, ret); \
return ret; \
} \
}
__soc_pcm_apply_symmetry(rate, RATE);
__soc_pcm_apply_symmetry(channels, CHANNELS);
__soc_pcm_apply_symmetry(sample_bits, SAMPLE_BITS);
return 0;
}
static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai d;
struct snd_soc_dai *dai;
struct snd_soc_dai *cpu_dai;
unsigned int symmetry, i;
d.name = __func__;
soc_pcm_set_dai_params(&d, params);
#define __soc_pcm_params_symmetry(xxx) \
symmetry = rtd->dai_link->symmetric_##xxx; \
for_each_rtd_dais(rtd, i, dai) \
symmetry |= dai->driver->symmetric_##xxx; \
\
if (symmetry) \
for_each_rtd_cpu_dais(rtd, i, cpu_dai) \
if (!snd_soc_dai_is_dummy(cpu_dai) && \
cpu_dai->xxx && cpu_dai->xxx != d.xxx) { \
dev_err(rtd->dev, "ASoC: unmatched %s symmetry: %s:%d - %s:%d\n", \
#xxx, cpu_dai->name, cpu_dai->xxx, d.name, d.xxx); \
return -EINVAL; \
}
/* reject unmatched parameters when applying symmetry */
__soc_pcm_params_symmetry(rate);
__soc_pcm_params_symmetry(channels);
__soc_pcm_params_symmetry(sample_bits);
return 0;
}
static void soc_pcm_update_symmetry(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai_link *link = rtd->dai_link;
struct snd_soc_dai *dai;
unsigned int symmetry, i;
symmetry = link->symmetric_rate ||
link->symmetric_channels ||
link->symmetric_sample_bits;
for_each_rtd_dais(rtd, i, dai)
symmetry = symmetry ||
dai->driver->symmetric_rate ||
dai->driver->symmetric_channels ||
dai->driver->symmetric_sample_bits;
if (symmetry)
substream->runtime->hw.info |= SNDRV_PCM_INFO_JOINT_DUPLEX;
}
static void soc_pcm_set_msb(struct snd_pcm_substream *substream, int bits)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
if (!bits)
return;
ret = snd_pcm_hw_constraint_msbits(substream->runtime, 0, 0, bits);
if (ret != 0)
dev_warn(rtd->dev, "ASoC: Failed to set MSB %d: %d\n",
bits, ret);
}
static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
int stream = substream->stream;
int i;
unsigned int bits = 0, cpu_bits = 0;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
struct snd_soc_pcm_stream *pcm_codec = snd_soc_dai_get_pcm_stream(codec_dai, stream);
if (pcm_codec->sig_bits == 0) {
bits = 0;
break;
}
bits = max(pcm_codec->sig_bits, bits);
}
for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
struct snd_soc_pcm_stream *pcm_cpu = snd_soc_dai_get_pcm_stream(cpu_dai, stream);
if (pcm_cpu->sig_bits == 0) {
cpu_bits = 0;
break;
}
cpu_bits = max(pcm_cpu->sig_bits, cpu_bits);
}
soc_pcm_set_msb(substream, bits);
soc_pcm_set_msb(substream, cpu_bits);
}
static void soc_pcm_hw_init(struct snd_pcm_hardware *hw)
{
hw->rates = UINT_MAX;
hw->rate_min = 0;
hw->rate_max = UINT_MAX;
hw->channels_min = 0;
hw->channels_max = UINT_MAX;
hw->formats = ULLONG_MAX;
}
static void soc_pcm_hw_update_rate(struct snd_pcm_hardware *hw,
struct snd_soc_pcm_stream *p)
{
hw->rates = snd_pcm_rate_mask_intersect(hw->rates, p->rates);
/* setup hw->rate_min/max via hw->rates first */
snd_pcm_hw_limit_rates(hw);
/* update hw->rate_min/max by snd_soc_pcm_stream */
hw->rate_min = max(hw->rate_min, p->rate_min);
hw->rate_max = min_not_zero(hw->rate_max, p->rate_max);
}
static void soc_pcm_hw_update_chan(struct snd_pcm_hardware *hw,
struct snd_soc_pcm_stream *p)
{
hw->channels_min = max(hw->channels_min, p->channels_min);
hw->channels_max = min(hw->channels_max, p->channels_max);
}
static void soc_pcm_hw_update_format(struct snd_pcm_hardware *hw,
struct snd_soc_pcm_stream *p)
{
hw->formats &= p->formats;
}
/**
* snd_soc_runtime_calc_hw() - Calculate hw limits for a PCM stream
* @rtd: ASoC PCM runtime
* @hw: PCM hardware parameters (output)
* @stream: Direction of the PCM stream
*
* Calculates the subset of stream parameters supported by all DAIs
* associated with the PCM stream.
*/
int snd_soc_runtime_calc_hw(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_hardware *hw, int stream)
{
struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai;
struct snd_soc_pcm_stream *codec_stream;
struct snd_soc_pcm_stream *cpu_stream;
unsigned int cpu_chan_min = 0, cpu_chan_max = UINT_MAX;
int i;
soc_pcm_hw_init(hw);
/* first calculate min/max only for CPUs in the DAI link */
for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
/*
* Skip CPUs which don't support the current stream type.
* Otherwise, since the rate, channel, and format values will
* zero in that case, we would have no usable settings left,
* causing the resulting setup to fail.
*/
if (!snd_soc_dai_stream_valid(cpu_dai, stream))
continue;
cpu_stream = snd_soc_dai_get_pcm_stream(cpu_dai, stream);
soc_pcm_hw_update_chan(hw, cpu_stream);
soc_pcm_hw_update_rate(hw, cpu_stream);
soc_pcm_hw_update_format(hw, cpu_stream);
}
cpu_chan_min = hw->channels_min;
cpu_chan_max = hw->channels_max;
/* second calculate min/max only for CODECs in the DAI link */
for_each_rtd_codec_dais(rtd, i, codec_dai) {
/*
* Skip CODECs which don't support the current stream type.
* Otherwise, since the rate, channel, and format values will
* zero in that case, we would have no usable settings left,
* causing the resulting setup to fail.
*/
if (!snd_soc_dai_stream_valid(codec_dai, stream))
continue;
codec_stream = snd_soc_dai_get_pcm_stream(codec_dai, stream);
soc_pcm_hw_update_chan(hw, codec_stream);
soc_pcm_hw_update_rate(hw, codec_stream);
soc_pcm_hw_update_format(hw, codec_stream);
}
/* Verify both a valid CPU DAI and a valid CODEC DAI were found */
if (!hw->channels_min)
return -EINVAL;
/*
* chan min/max cannot be enforced if there are multiple CODEC DAIs
* connected to CPU DAI(s), use CPU DAI's directly and let
* channel allocation be fixed up later
*/
if (rtd->num_codecs > 1) {
hw->channels_min = cpu_chan_min;
hw->channels_max = cpu_chan_max;
}
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_runtime_calc_hw);
static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
{
struct snd_pcm_hardware *hw = &substream->runtime->hw;
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
u64 formats = hw->formats;
/*
* At least one CPU and one CODEC should match. Otherwise, we should
* have bailed out on a higher level, since there would be no CPU or
* CODEC to support the transfer direction in that case.
*/
snd_soc_runtime_calc_hw(rtd, hw, substream->stream);
if (formats)
hw->formats &= formats;
}
static int soc_pcm_components_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_component *component;
int i, ret = 0;
for_each_rtd_components(rtd, i, component) {
ret = snd_soc_component_module_get_when_open(component, substream);
if (ret < 0)
break;
ret = snd_soc_component_open(component, substream);
if (ret < 0)
break;
}
return ret;
}
static int soc_pcm_components_close(struct snd_pcm_substream *substream,
int rollback)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_soc_component *component;
int i, ret = 0;
for_each_rtd_components(rtd, i, component) {
int r = snd_soc_component_close(component, substream, rollback);
if (r < 0)
ret = r; /* use last ret */
snd_soc_component_module_put_when_close(component, substream, rollback);
}
return ret;
}
static int soc_pcm_clean(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream, int rollback)
{
struct snd_soc_component *component;
struct snd_soc_dai *dai;
int i;
snd_soc_dpcm_mutex_assert_held(rtd);
if (!rollback)
snd_soc_runtime_deactivate(rtd, substream->stream);
for_each_rtd_dais(rtd, i, dai)
snd_soc_dai_shutdown(dai, substream, rollback);
snd_soc_link_shutdown(substream, rollback);
soc_pcm_components_close(substream, rollback);
snd_soc_pcm_component_pm_runtime_put(rtd, substream, rollback);
for_each_rtd_components(rtd, i, component)
if (!snd_soc_component_active(component))
pinctrl_pm_select_sleep_state(component->dev);
return 0;
}
/*
* Called by ALSA when a PCM substream is closed. Private data can be
* freed here. The cpu DAI, codec DAI, machine and components are also
* shutdown.
*/
static int __soc_pcm_close(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
return soc_pcm_clean(rtd, substream, 0);
}
/* PCM close ops for non-DPCM streams */
static int soc_pcm_close(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
snd_soc_dpcm_mutex_lock(rtd);
soc_pcm_clean(rtd, substream, 0);
snd_soc_dpcm_mutex_unlock(rtd);
return 0;
}
static int soc_hw_sanity_check(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_pcm_hardware *hw = &substream->runtime->hw;
const char *name_cpu = soc_cpu_dai_name(rtd);
const char *name_codec = soc_codec_dai_name(rtd);
const char *err_msg;
struct device *dev = rtd->dev;
err_msg = "rates";
if (!hw->rates)
goto config_err;
err_msg = "formats";
if (!hw->formats)
goto config_err;
err_msg = "channels";
if (!hw->channels_min || !hw->channels_max ||
hw->channels_min > hw->channels_max)
goto config_err;
dev_dbg(dev, "ASoC: %s <-> %s info:\n", name_codec,
name_cpu);
dev_dbg(dev, "ASoC: rate mask 0x%x\n", hw->rates);
dev_dbg(dev, "ASoC: ch min %d max %d\n", hw->channels_min,
hw->channels_max);
dev_dbg(dev, "ASoC: rate min %d max %d\n", hw->rate_min,
hw->rate_max);
return 0;
config_err:
dev_err(dev, "ASoC: %s <-> %s No matching %s\n",
name_codec, name_cpu, err_msg);
return -EINVAL;
}
/*
* Called by ALSA when a PCM substream is opened, the runtime->hw record is
* then initialized and any private data can be allocated. This also calls
* startup for the cpu DAI, component, machine and codec DAI.
*/
static int __soc_pcm_open(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
struct snd_soc_component *component;
struct snd_soc_dai *dai;
int i, ret = 0;
snd_soc_dpcm_mutex_assert_held(rtd);
for_each_rtd_components(rtd, i, component)
pinctrl_pm_select_default_state(component->dev);
ret = snd_soc_pcm_component_pm_runtime_get(rtd, substream);
if (ret < 0)
goto err;
ret = soc_pcm_components_open(substream);
if (ret < 0)
goto err;
ret = snd_soc_link_startup(substream);
if (ret < 0)
goto err;
/* startup the audio subsystem */
for_each_rtd_dais(rtd, i, dai) {
ret = snd_soc_dai_startup(dai, substream);
if (ret < 0)
goto err;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dai->tx_mask = 0;
else
dai->rx_mask = 0;
}
/* Dynamic PCM DAI links compat checks use dynamic capabilities */
if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm)
goto dynamic;
/* Check that the codec and cpu DAIs are compatible */
soc_pcm_init_runtime_hw(substream);
soc_pcm_update_symmetry(substream);
ret = soc_hw_sanity_check(substream);
if (ret < 0)
goto err;
soc_pcm_apply_msb(substream);
/* Symmetry only applies if we've already got an active stream. */
for_each_rtd_dais(rtd, i, dai) {
ret = soc_pcm_apply_symmetry(substream, dai);
if (ret != 0)
goto err;
}
dynamic:
snd_soc_runtime_activate(rtd, substream->stream);
ret = 0;
err:
if (ret < 0) {
soc_pcm_clean(rtd, substream, 1);
dev_err(rtd->dev, "%s() failed (%d)", __func__, ret);
}
return ret;
}
/* PCM open ops for non-DPCM streams */
static int soc_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_open(rtd, substream);
snd_soc_dpcm_mutex_unlock(rtd);
return ret;
}
static void codec2codec_close_delayed_work(struct snd_soc_pcm_runtime *rtd)
{
/*
* Currently nothing to do for c2c links
* Since c2c links are internal nodes in the DAPM graph and
* don't interface with the outside world or application layer
* we don't have to do any special handling on close.
*/
}
/*
* Called by ALSA when the PCM substream is prepared, can set format, sample
* rate, etc. This function is non atomic and can be called multiple times,
* it can refer to the runtime info.
*/
static int __soc_pcm_prepare(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
struct snd_soc_dai *dai;
int i, ret = 0;
snd_soc_dpcm_mutex_assert_held(rtd);
ret = snd_soc_link_prepare(substream);
if (ret < 0)
goto out;
ret = snd_soc_pcm_component_prepare(substream);
if (ret < 0)
goto out;
ret = snd_soc_pcm_dai_prepare(substream);
if (ret < 0)
goto out;
/* cancel any delayed stream shutdown that is pending */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
rtd->pop_wait) {
rtd->pop_wait = 0;
cancel_delayed_work(&rtd->delayed_work);
}
snd_soc_dapm_stream_event(rtd, substream->stream,
SND_SOC_DAPM_STREAM_START);
for_each_rtd_dais(rtd, i, dai)
snd_soc_dai_digital_mute(dai, 0, substream->stream);
out:
if (ret < 0)
dev_err(rtd->dev, "ASoC: %s() failed (%d)\n", __func__, ret);
return ret;
}
/* PCM prepare ops for non-DPCM streams */
static int soc_pcm_prepare(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_prepare(rtd, substream);
snd_soc_dpcm_mutex_unlock(rtd);
return ret;
}
static void soc_pcm_codec_params_fixup(struct snd_pcm_hw_params *params,
unsigned int mask)
{
struct snd_interval *interval;
int channels = hweight_long(mask);
interval = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
interval->min = channels;
interval->max = channels;
}
static int soc_pcm_hw_clean(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream, int rollback)
{
struct snd_soc_dai *dai;
int i;
snd_soc_dpcm_mutex_assert_held(rtd);
/* clear the corresponding DAIs parameters when going to be inactive */
for_each_rtd_dais(rtd, i, dai) {
if (snd_soc_dai_active(dai) == 1)
soc_pcm_set_dai_params(dai, NULL);
if (snd_soc_dai_stream_active(dai, substream->stream) == 1)
snd_soc_dai_digital_mute(dai, 1, substream->stream);
}
/* run the stream event */
snd_soc_dapm_stream_stop(rtd, substream->stream);
/* free any machine hw params */
snd_soc_link_hw_free(substream, rollback);
/* free any component resources */
snd_soc_pcm_component_hw_free(substream, rollback);
/* now free hw params for the DAIs */
for_each_rtd_dais(rtd, i, dai)
if (snd_soc_dai_stream_valid(dai, substream->stream))
snd_soc_dai_hw_free(dai, substream, rollback);
return 0;
}
/*
* Frees resources allocated by hw_params, can be called multiple times
*/
static int __soc_pcm_hw_free(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream)
{
return soc_pcm_hw_clean(rtd, substream, 0);
}
/* hw_free PCM ops for non-DPCM streams */
static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_hw_free(rtd, substream);
snd_soc_dpcm_mutex_unlock(rtd);
return ret;
}
/*
* Called by ALSA when the hardware params are set by application. This
* function can also be called multiple times and can allocate buffers
* (using snd_pcm_lib_* ). It's non-atomic.
*/
static int __soc_pcm_hw_params(struct snd_soc_pcm_runtime *rtd,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_dai *cpu_dai;
struct snd_soc_dai *codec_dai;
int i, ret = 0;
snd_soc_dpcm_mutex_assert_held(rtd);
ret = soc_pcm_params_symmetry(substream, params);
if (ret)
goto out;
ret = snd_soc_link_hw_params(substream, params);
if (ret < 0)
goto out;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
struct snd_pcm_hw_params codec_params;
/*
* Skip CODECs which don't support the current stream type,
* the idea being that if a CODEC is not used for the currently
* set up transfer direction, it should not need to be
* configured, especially since the configuration used might
* not even be supported by that CODEC. There may be cases
* however where a CODEC needs to be set up although it is
* actually not being used for the transfer, e.g. if a
* capture-only CODEC is acting as an LRCLK and/or BCLK master
* for the DAI link including a playback-only CODEC.
* If this becomes necessary, we will have to augment the
* machine driver setup with information on how to act, so
* we can do the right thing here.
*/
if (!snd_soc_dai_stream_valid(codec_dai, substream->stream))
continue;
/* copy params for each codec */
codec_params = *params;
/* fixup params based on TDM slot masks */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
codec_dai->tx_mask)
soc_pcm_codec_params_fixup(&codec_params,
codec_dai->tx_mask);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE &&
codec_dai->rx_mask)
soc_pcm_codec_params_fixup(&codec_params,
codec_dai->rx_mask);
ret = snd_soc_dai_hw_params(codec_dai, substream,
&codec_params);
if(ret < 0)
goto out;
soc_pcm_set_dai_params(codec_dai, &codec_params);
snd_soc_dapm_update_dai(substream, &codec_params, codec_dai);
}
for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
/*
* Skip CPUs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
if (!snd_soc_dai_stream_valid(cpu_dai, substream->stream))
continue;
ret = snd_soc_dai_hw_params(cpu_dai, substream, params);
if (ret < 0)
goto out;
/* store the parameters for each DAI */
soc_pcm_set_dai_params(cpu_dai, params);
snd_soc_dapm_update_dai(substream, params, cpu_dai);
}
ret = snd_soc_pcm_component_hw_params(substream, params);
out:
if (ret < 0) {
soc_pcm_hw_clean(rtd, substream, 1);
dev_err(rtd->dev, "ASoC: %s() failed (%d)\n", __func__, ret);
}
return ret;
}
/* hw_params PCM ops for non-DPCM streams */
static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret;
snd_soc_dpcm_mutex_lock(rtd);
ret = __soc_pcm_hw_params(rtd, substream, params);
snd_soc_dpcm_mutex_unlock(rtd);
return ret;
}
static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
int ret = -EINVAL, _ret = 0;
int rollback = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ret = snd_soc_link_trigger(substream, cmd, 0);
if (ret < 0)
goto start_err;
ret = snd_soc_pcm_component_trigger(substream, cmd, 0);
if (ret < 0)
goto start_err;
ret = snd_soc_pcm_dai_trigger(substream, cmd, 0);
start_err:
if (ret < 0)
rollback = 1;
}
if (rollback) {
_ret = ret;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
cmd = SNDRV_PCM_TRIGGER_STOP;
break;
case SNDRV_PCM_TRIGGER_RESUME:
cmd = SNDRV_PCM_TRIGGER_SUSPEND;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
cmd = SNDRV_PCM_TRIGGER_PAUSE_PUSH;
break;
}
}
switch (cmd) {
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (rtd->dai_link->stop_dma_first) {
ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
if (ret < 0)
break;
ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
if (ret < 0)
break;
} else {
ret = snd_soc_pcm_dai_trigger(substream, cmd, rollback);
if (ret < 0)
break;
ret = snd_soc_pcm_component_trigger(substream, cmd, rollback);
if (ret < 0)
break;
}
ret = snd_soc_link_trigger(substream, cmd, rollback);
break;
}
if (_ret)
ret = _ret;
return ret;
}
/*
* soc level wrapper for pointer callback
* If cpu_dai, codec_dai, component driver has the delay callback, then
* the runtime->delay will be updated via snd_soc_pcm_component/dai_delay().
*/
static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t offset = 0;
snd_pcm_sframes_t codec_delay = 0;
snd_pcm_sframes_t cpu_delay = 0;
offset = snd_soc_pcm_component_pointer(substream);
/* should be called *after* snd_soc_pcm_component_pointer() */
snd_soc_pcm_dai_delay(substream, &cpu_delay, &codec_delay);
snd_soc_pcm_component_delay(substream, &cpu_delay, &codec_delay);
runtime->delay = cpu_delay + codec_delay;
return offset;
}
/* connect a FE and BE */
static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream)
{
struct snd_pcm_substream *fe_substream;
struct snd_pcm_substream *be_substream;
struct snd_soc_dpcm *dpcm;
snd_soc_dpcm_mutex_assert_held(fe);
/* only add new dpcms */
for_each_dpcm_be(fe, stream, dpcm) {
if (dpcm->be == be && dpcm->fe == fe)
return 0;
}
fe_substream = snd_soc_dpcm_get_substream(fe, stream);
be_substream = snd_soc_dpcm_get_substream(be, stream);
if (!fe_substream->pcm->nonatomic && be_substream->pcm->nonatomic) {
dev_err(be->dev, "%s: FE is atomic but BE is nonatomic, invalid configuration\n",
__func__);
return -EINVAL;
}
if (fe_substream->pcm->nonatomic && !be_substream->pcm->nonatomic) {
dev_warn(be->dev, "%s: FE is nonatomic but BE is not, forcing BE as nonatomic\n",
__func__);
be_substream->pcm->nonatomic = 1;
}
dpcm = kzalloc(sizeof(struct snd_soc_dpcm), GFP_ATOMIC);
if (!dpcm)
return -ENOMEM;
dpcm->be = be;
dpcm->fe = fe;
be->dpcm[stream].runtime = fe->dpcm[stream].runtime;
dpcm->state = SND_SOC_DPCM_LINK_STATE_NEW;
snd_soc_dpcm_stream_lock_irq(fe, stream);
list_add(&dpcm->list_be, &fe->dpcm[stream].be_clients);
list_add(&dpcm->list_fe, &be->dpcm[stream].fe_clients);
snd_soc_dpcm_stream_unlock_irq(fe, stream);
dev_dbg(fe->dev, "connected new DPCM %s path %s %s %s\n",
stream ? "capture" : "playback", fe->dai_link->name,
stream ? "<-" : "->", be->dai_link->name);
dpcm_create_debugfs_state(dpcm, stream);
return 1;
}
/* reparent a BE onto another FE */
static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream)
{
struct snd_soc_dpcm *dpcm;
struct snd_pcm_substream *fe_substream, *be_substream;
/* reparent if BE is connected to other FEs */
if (!be->dpcm[stream].users)
return;
be_substream = snd_soc_dpcm_get_substream(be, stream);
for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
continue;
dev_dbg(fe->dev, "reparent %s path %s %s %s\n",
stream ? "capture" : "playback",
dpcm->fe->dai_link->name,
stream ? "<-" : "->", dpcm->be->dai_link->name);
fe_substream = snd_soc_dpcm_get_substream(dpcm->fe, stream);
be_substream->runtime = fe_substream->runtime;
break;
}
}
/* disconnect a BE and FE */
void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm, *d;
LIST_HEAD(deleted_dpcms);
snd_soc_dpcm_mutex_assert_held(fe);
snd_soc_dpcm_stream_lock_irq(fe, stream);
for_each_dpcm_be_safe(fe, stream, dpcm, d) {
dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n",
stream ? "capture" : "playback",
dpcm->be->dai_link->name);
if (dpcm->state != SND_SOC_DPCM_LINK_STATE_FREE)
continue;
dev_dbg(fe->dev, "freed DSP %s path %s %s %s\n",
stream ? "capture" : "playback", fe->dai_link->name,
stream ? "<-" : "->", dpcm->be->dai_link->name);
/* BEs still alive need new FE */
dpcm_be_reparent(fe, dpcm->be, stream);
list_del(&dpcm->list_be);
list_move(&dpcm->list_fe, &deleted_dpcms);
}
snd_soc_dpcm_stream_unlock_irq(fe, stream);
while (!list_empty(&deleted_dpcms)) {
dpcm = list_first_entry(&deleted_dpcms, struct snd_soc_dpcm,
list_fe);
list_del(&dpcm->list_fe);
dpcm_remove_debugfs_state(dpcm);
kfree(dpcm);
}
}
/* get BE for DAI widget and stream */
static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
struct snd_soc_dapm_widget *widget, int stream)
{
struct snd_soc_pcm_runtime *be;
struct snd_soc_dapm_widget *w;
struct snd_soc_dai *dai;
int i;
dev_dbg(card->dev, "ASoC: find BE for widget %s\n", widget->name);
for_each_card_rtds(card, be) {
if (!be->dai_link->no_pcm)
continue;
for_each_rtd_dais(be, i, dai) {
w = snd_soc_dai_get_widget(dai, stream);
dev_dbg(card->dev, "ASoC: try BE : %s\n",
w ? w->name : "(not set)");
if (w == widget)
return be;
}
}
/* Widget provided is not a BE */
return NULL;
}
static int widget_in_list(struct snd_soc_dapm_widget_list *list,
struct snd_soc_dapm_widget *widget)
{
struct snd_soc_dapm_widget *w;
int i;
for_each_dapm_widgets(list, i, w)
if (widget == w)
return 1;
return 0;
}
bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget, enum snd_soc_dapm_direction dir)
{
struct snd_soc_card *card = widget->dapm->card;
struct snd_soc_pcm_runtime *rtd;
int stream;
/* adjust dir to stream */
if (dir == SND_SOC_DAPM_DIR_OUT)
stream = SNDRV_PCM_STREAM_PLAYBACK;
else
stream = SNDRV_PCM_STREAM_CAPTURE;
rtd = dpcm_get_be(card, widget, stream);
if (rtd)
return true;
return false;
}
EXPORT_SYMBOL_GPL(dpcm_end_walk_at_be);
int dpcm_path_get(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list)
{
struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(fe, 0);
int paths;
if (fe->num_cpus > 1) {
dev_err(fe->dev,
"%s doesn't support Multi CPU yet\n", __func__);
return -EINVAL;
}
/* get number of valid DAI paths and their widgets */
paths = snd_soc_dapm_dai_get_connected_widgets(cpu_dai, stream, list,
fe->card->component_chaining ?
NULL : dpcm_end_walk_at_be);
if (paths > 0)
dev_dbg(fe->dev, "ASoC: found %d audio %s paths\n", paths,
stream ? "capture" : "playback");
else if (paths == 0)
dev_dbg(fe->dev, "ASoC: %s no valid %s path\n", fe->dai_link->name,
stream ? "capture" : "playback");
return paths;
}
void dpcm_path_put(struct snd_soc_dapm_widget_list **list)
{
snd_soc_dapm_dai_free_widgets(list);
}
static bool dpcm_be_is_active(struct snd_soc_dpcm *dpcm, int stream,
struct snd_soc_dapm_widget_list *list)
{
struct snd_soc_dai *dai;
unsigned int i;
/* is there a valid DAI widget for this BE */
for_each_rtd_dais(dpcm->be, i, dai) {
struct snd_soc_dapm_widget *widget = snd_soc_dai_get_widget(dai, stream);
/*
* The BE is pruned only if none of the dai
* widgets are in the active list.
*/
if (widget && widget_in_list(list, widget))
return true;
}
return false;
}
static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_dapm_widget_list **list_)
{
struct snd_soc_dpcm *dpcm;
int prune = 0;
/* Destroy any old FE <--> BE connections */
for_each_dpcm_be(fe, stream, dpcm) {
if (dpcm_be_is_active(dpcm, stream, *list_))
continue;
dev_dbg(fe->dev, "ASoC: pruning %s BE %s for %s\n",
stream ? "capture" : "playback",
dpcm->be->dai_link->name, fe->dai_link->name);
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_set_be_update_state(dpcm->be, stream, SND_SOC_DPCM_UPDATE_BE);
prune++;
}
dev_dbg(fe->dev, "ASoC: found %d old BE paths for pruning\n", prune);
return prune;
}
static int dpcm_add_paths(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_dapm_widget_list **list_)
{
struct snd_soc_card *card = fe->card;
struct snd_soc_dapm_widget_list *list = *list_;
struct snd_soc_pcm_runtime *be;
struct snd_soc_dapm_widget *widget;
int i, new = 0, err;
/* Create any new FE <--> BE connections */
for_each_dapm_widgets(list, i, widget) {
switch (widget->id) {
case snd_soc_dapm_dai_in:
if (stream != SNDRV_PCM_STREAM_PLAYBACK)
continue;
break;
case snd_soc_dapm_dai_out:
if (stream != SNDRV_PCM_STREAM_CAPTURE)
continue;
break;
default:
continue;
}
/* is there a valid BE rtd for this widget */
be = dpcm_get_be(card, widget, stream);
if (!be) {
dev_dbg(fe->dev, "ASoC: no BE found for %s\n",
widget->name);
continue;
}
/* don't connect if FE is not running */
if (!fe->dpcm[stream].runtime && !fe->fe_compr)
continue;
/*
* Filter for systems with 'component_chaining' enabled.
* This helps to avoid unnecessary re-configuration of an
* already active BE on such systems.
*/
if (fe->card->component_chaining &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE))
continue;
/* newly connected FE and BE */
err = dpcm_be_connect(fe, be, stream);
if (err < 0) {
dev_err(fe->dev, "ASoC: can't connect %s\n",
widget->name);
break;
} else if (err == 0) /* already connected */
continue;
/* new */
dpcm_set_be_update_state(be, stream, SND_SOC_DPCM_UPDATE_BE);
new++;
}
dev_dbg(fe->dev, "ASoC: found %d new BE paths\n", new);
return new;
}
/*
* Find the corresponding BE DAIs that source or sink audio to this
* FE substream.
*/
int dpcm_process_paths(struct snd_soc_pcm_runtime *fe,
int stream, struct snd_soc_dapm_widget_list **list, int new)
{
if (new)
return dpcm_add_paths(fe, stream, list);
else
return dpcm_prune_paths(fe, stream, list);
}
void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
for_each_dpcm_be(fe, stream, dpcm)
dpcm_set_be_update_state(dpcm->be, stream, SND_SOC_DPCM_UPDATE_NO);
}
void dpcm_be_dai_stop(struct snd_soc_pcm_runtime *fe, int stream,
int do_hw_free, struct snd_soc_dpcm *last)
{
struct snd_soc_dpcm *dpcm;
/* disable any enabled and non active backends */
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
if (dpcm == last)
return;
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
continue;
if (be->dpcm[stream].users == 0) {
dev_err(be->dev, "ASoC: no users %s at close - state %d\n",
stream ? "capture" : "playback",
be->dpcm[stream].state);
continue;
}
if (--be->dpcm[stream].users != 0)
continue;
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) {
if (!do_hw_free)
continue;
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) {
__soc_pcm_hw_free(be, be_substream);
be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
}
}
__soc_pcm_close(be, be_substream);
be_substream->runtime = NULL;
be->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
}
}
int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_pcm_runtime *be;
struct snd_soc_dpcm *dpcm;
int err, count = 0;
/* only startup BE DAIs that are either sinks or sources to this FE DAI */
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_pcm_substream *be_substream;
be = dpcm->be;
be_substream = snd_soc_dpcm_get_substream(be, stream);
if (!be_substream) {
dev_err(be->dev, "ASoC: no backend %s stream\n",
stream ? "capture" : "playback");
continue;
}
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
continue;
/* first time the dpcm is open ? */
if (be->dpcm[stream].users == DPCM_MAX_BE_USERS) {
dev_err(be->dev, "ASoC: too many users %s at open %d\n",
stream ? "capture" : "playback",
be->dpcm[stream].state);
continue;
}
if (be->dpcm[stream].users++ != 0)
continue;
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_NEW) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_CLOSE))
continue;
dev_dbg(be->dev, "ASoC: open %s BE %s\n",
stream ? "capture" : "playback", be->dai_link->name);
be_substream->runtime = be->dpcm[stream].runtime;
err = __soc_pcm_open(be, be_substream);
if (err < 0) {
be->dpcm[stream].users--;
if (be->dpcm[stream].users < 0)
dev_err(be->dev, "ASoC: no users %s at unwind %d\n",
stream ? "capture" : "playback",
be->dpcm[stream].state);
be->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
goto unwind;
}
be->dpcm[stream].be_start = 0;
be->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
count++;
}
return count;
unwind:
dpcm_be_dai_startup_rollback(fe, stream, dpcm);
dev_err(fe->dev, "ASoC: %s() failed at %s (%d)\n",
__func__, be->dai_link->name, err);
return err;
}
static void dpcm_runtime_setup_fe(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hardware *hw = &runtime->hw;
struct snd_soc_dai *dai;
int stream = substream->stream;
int i;
soc_pcm_hw_init(hw);
for_each_rtd_cpu_dais(fe, i, dai) {
struct snd_soc_pcm_stream *cpu_stream;
/*
* Skip CPUs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
if (!snd_soc_dai_stream_valid(dai, stream))
continue;
cpu_stream = snd_soc_dai_get_pcm_stream(dai, stream);
soc_pcm_hw_update_rate(hw, cpu_stream);
soc_pcm_hw_update_chan(hw, cpu_stream);
soc_pcm_hw_update_format(hw, cpu_stream);
}
}
static void dpcm_runtime_setup_be_format(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hardware *hw = &runtime->hw;
struct snd_soc_dpcm *dpcm;
struct snd_soc_dai *dai;
int stream = substream->stream;
if (!fe->dai_link->dpcm_merged_format)
return;
/*
* It returns merged BE codec format
* if FE want to use it (= dpcm_merged_format)
*/
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_soc_pcm_stream *codec_stream;
int i;
for_each_rtd_codec_dais(be, i, dai) {
/*
* Skip CODECs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
if (!snd_soc_dai_stream_valid(dai, stream))
continue;
codec_stream = snd_soc_dai_get_pcm_stream(dai, stream);
soc_pcm_hw_update_format(hw, codec_stream);
}
}
}
static void dpcm_runtime_setup_be_chan(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hardware *hw = &runtime->hw;
struct snd_soc_dpcm *dpcm;
int stream = substream->stream;
if (!fe->dai_link->dpcm_merged_chan)
return;
/*
* It returns merged BE codec channel;
* if FE want to use it (= dpcm_merged_chan)
*/
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_soc_pcm_stream *cpu_stream;
struct snd_soc_dai *dai;
int i;
for_each_rtd_cpu_dais(be, i, dai) {
/*
* Skip CPUs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
if (!snd_soc_dai_stream_valid(dai, stream))
continue;
cpu_stream = snd_soc_dai_get_pcm_stream(dai, stream);
soc_pcm_hw_update_chan(hw, cpu_stream);
}
/*
* chan min/max cannot be enforced if there are multiple CODEC
* DAIs connected to a single CPU DAI, use CPU DAI's directly
*/
if (be->num_codecs == 1) {
struct snd_soc_pcm_stream *codec_stream = snd_soc_dai_get_pcm_stream(
asoc_rtd_to_codec(be, 0), stream);
soc_pcm_hw_update_chan(hw, codec_stream);
}
}
}
static void dpcm_runtime_setup_be_rate(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hardware *hw = &runtime->hw;
struct snd_soc_dpcm *dpcm;
int stream = substream->stream;
if (!fe->dai_link->dpcm_merged_rate)
return;
/*
* It returns merged BE codec channel;
* if FE want to use it (= dpcm_merged_chan)
*/
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_soc_pcm_stream *pcm;
struct snd_soc_dai *dai;
int i;
for_each_rtd_dais(be, i, dai) {
/*
* Skip DAIs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
if (!snd_soc_dai_stream_valid(dai, stream))
continue;
pcm = snd_soc_dai_get_pcm_stream(dai, stream);
soc_pcm_hw_update_rate(hw, pcm);
}
}
}
static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
int stream)
{
struct snd_soc_dpcm *dpcm;
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream);
struct snd_soc_dai *fe_cpu_dai;
int err = 0;
int i;
/* apply symmetry for FE */
soc_pcm_update_symmetry(fe_substream);
for_each_rtd_cpu_dais (fe, i, fe_cpu_dai) {
/* Symmetry only applies if we've got an active stream. */
err = soc_pcm_apply_symmetry(fe_substream, fe_cpu_dai);
if (err < 0)
goto error;
}
/* apply symmetry for BE */
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai *dai;
/* A backend may not have the requested substream */
if (!be_substream)
continue;
rtd = asoc_substream_to_rtd(be_substream);
if (rtd->dai_link->be_hw_params_fixup)
continue;
soc_pcm_update_symmetry(be_substream);
/* Symmetry only applies if we've got an active stream. */
for_each_rtd_dais(rtd, i, dai) {
err = soc_pcm_apply_symmetry(fe_substream, dai);
if (err < 0)
goto error;
}
}
error:
if (err < 0)
dev_err(fe->dev, "ASoC: %s failed (%d)\n", __func__, err);
return err;
}
static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream);
int stream = fe_substream->stream, ret = 0;
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
ret = dpcm_be_dai_startup(fe, stream);
if (ret < 0)
goto be_err;
dev_dbg(fe->dev, "ASoC: open FE %s\n", fe->dai_link->name);
/* start the DAI frontend */
ret = __soc_pcm_open(fe, fe_substream);
if (ret < 0)
goto unwind;
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN;
dpcm_runtime_setup_fe(fe_substream);
dpcm_runtime_setup_be_format(fe_substream);
dpcm_runtime_setup_be_chan(fe_substream);
dpcm_runtime_setup_be_rate(fe_substream);
ret = dpcm_apply_symmetry(fe_substream, stream);
unwind:
if (ret < 0)
dpcm_be_dai_startup_unwind(fe, stream);
be_err:
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
if (ret < 0)
dev_err(fe->dev, "%s() failed (%d)\n", __func__, ret);
return ret;
}
static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int stream = substream->stream;
snd_soc_dpcm_mutex_assert_held(fe);
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
/* shutdown the BEs */
dpcm_be_dai_shutdown(fe, stream);
dev_dbg(fe->dev, "ASoC: close FE %s\n", fe->dai_link->name);
/* now shutdown the frontend */
__soc_pcm_close(fe, substream);
/* run the stream stop event */
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
return 0;
}
void dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
/* only hw_params backends that are either sinks or sources
* to this frontend DAI */
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
continue;
/* only free hw when no longer used - check all FEs */
if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
continue;
/* do not free hw if this BE is used by other FE */
if (be->dpcm[stream].users > 1)
continue;
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
continue;
dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
be->dai_link->name);
__soc_pcm_hw_free(be, be_substream);
be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
}
}
static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int stream = substream->stream;
snd_soc_dpcm_mutex_lock(fe);
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name);
/* call hw_free on the frontend */
soc_pcm_hw_clean(fe, substream, 0);
/* only hw_params backends that are either sinks or sources
* to this frontend DAI */
dpcm_be_dai_hw_free(fe, stream);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
snd_soc_dpcm_mutex_unlock(fe);
return 0;
}
int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_pcm_runtime *be;
struct snd_pcm_substream *be_substream;
struct snd_soc_dpcm *dpcm;
int ret;
for_each_dpcm_be(fe, stream, dpcm) {
be = dpcm->be;
be_substream = snd_soc_dpcm_get_substream(be, stream);
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
continue;
/* copy params for each dpcm */
memcpy(&dpcm->hw_params, &fe->dpcm[stream].hw_params,
sizeof(struct snd_pcm_hw_params));
/* perform any hw_params fixups */
ret = snd_soc_link_be_hw_params_fixup(be, &dpcm->hw_params);
if (ret < 0)
goto unwind;
/* copy the fixed-up hw params for BE dai */
memcpy(&be->dpcm[stream].hw_params, &dpcm->hw_params,
sizeof(struct snd_pcm_hw_params));
/* only allow hw_params() if no connected FEs are running */
if (!snd_soc_dpcm_can_be_params(fe, be, stream))
continue;
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE))
continue;
dev_dbg(be->dev, "ASoC: hw_params BE %s\n",
be->dai_link->name);
ret = __soc_pcm_hw_params(be, be_substream, &dpcm->hw_params);
if (ret < 0)
goto unwind;
be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
}
return 0;
unwind:
dev_dbg(fe->dev, "ASoC: %s() failed at %s (%d)\n",
__func__, be->dai_link->name, ret);
/* disable any enabled and non active backends */
for_each_dpcm_be_rollback(fe, stream, dpcm) {
be = dpcm->be;
be_substream = snd_soc_dpcm_get_substream(be, stream);
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
continue;
/* only allow hw_free() if no connected FEs are running */
if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
continue;
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
continue;
__soc_pcm_hw_free(be, be_substream);
}
return ret;
}
static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int ret, stream = substream->stream;
snd_soc_dpcm_mutex_lock(fe);
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
memcpy(&fe->dpcm[stream].hw_params, params,
sizeof(struct snd_pcm_hw_params));
ret = dpcm_be_dai_hw_params(fe, stream);
if (ret < 0)
goto out;
dev_dbg(fe->dev, "ASoC: hw_params FE %s rate %d chan %x fmt %d\n",
fe->dai_link->name, params_rate(params),
params_channels(params), params_format(params));
/* call hw_params on the frontend */
ret = __soc_pcm_hw_params(fe, substream, params);
if (ret < 0)
dpcm_be_dai_hw_free(fe, stream);
else
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
out:
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
snd_soc_dpcm_mutex_unlock(fe);
if (ret < 0)
dev_err(fe->dev, "ASoC: %s failed (%d)\n", __func__, ret);
return ret;
}
int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
int cmd)
{
struct snd_soc_pcm_runtime *be;
struct snd_soc_dpcm *dpcm;
unsigned long flags;
int ret = 0;
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_pcm_substream *be_substream;
be = dpcm->be;
be_substream = snd_soc_dpcm_get_substream(be, stream);
snd_soc_dpcm_stream_lock_irqsave_nested(be, stream, flags);
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
goto next;
dev_dbg(be->dev, "ASoC: trigger BE %s cmd %d\n",
be->dai_link->name, cmd);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (!be->dpcm[stream].be_start &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
goto next;
be->dpcm[stream].be_start++;
if (be->dpcm[stream].be_start != 1)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret) {
be->dpcm[stream].be_start--;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_RESUME:
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
goto next;
be->dpcm[stream].be_start++;
if (be->dpcm[stream].be_start != 1)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret) {
be->dpcm[stream].be_start--;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (!be->dpcm[stream].be_start &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
goto next;
be->dpcm[stream].be_start++;
if (be->dpcm[stream].be_start != 1)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret) {
be->dpcm[stream].be_start--;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_STOP:
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_START) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
goto next;
if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_START)
be->dpcm[stream].be_start--;
if (be->dpcm[stream].be_start != 0)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret) {
if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_START)
be->dpcm[stream].be_start++;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
goto next;
be->dpcm[stream].be_start--;
if (be->dpcm[stream].be_start != 0)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret) {
be->dpcm[stream].be_start++;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_SUSPEND;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
goto next;
be->dpcm[stream].be_start--;
if (be->dpcm[stream].be_start != 0)
goto next;
ret = soc_pcm_trigger(be_substream, cmd);
if (ret) {
be->dpcm[stream].be_start++;
goto next;
}
be->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
break;
}
next:
snd_soc_dpcm_stream_unlock_irqrestore(be, stream, flags);
if (ret)
break;
}
if (ret < 0)
dev_err(fe->dev, "ASoC: %s() failed at %s (%d)\n",
__func__, be->dai_link->name, ret);
return ret;
}
EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
static int dpcm_dai_trigger_fe_be(struct snd_pcm_substream *substream,
int cmd, bool fe_first)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int ret;
/* call trigger on the frontend before the backend. */
if (fe_first) {
dev_dbg(fe->dev, "ASoC: pre trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
if (ret < 0)
return ret;
ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
return ret;
}
/* call trigger on the frontend after the backend. */
ret = dpcm_be_dai_trigger(fe, substream->stream, cmd);
if (ret < 0)
return ret;
dev_dbg(fe->dev, "ASoC: post trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = soc_pcm_trigger(substream, cmd);
return ret;
}
static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int stream = substream->stream;
int ret = 0;
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE;
switch (trigger) {
case SND_SOC_DPCM_TRIGGER_PRE:
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_DRAIN:
ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
break;
default:
ret = -EINVAL;
break;
}
break;
case SND_SOC_DPCM_TRIGGER_POST:
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_DRAIN:
ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
break;
default:
ret = -EINVAL;
break;
}
break;
case SND_SOC_DPCM_TRIGGER_BESPOKE:
/* bespoke trigger() - handles both FE and BEs */
dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd %d\n",
fe->dai_link->name, cmd);
ret = snd_soc_pcm_dai_bespoke_trigger(substream, cmd);
break;
default:
dev_err(fe->dev, "ASoC: invalid trigger cmd %d for %s\n", cmd,
fe->dai_link->name);
ret = -EINVAL;
goto out;
}
if (ret < 0) {
dev_err(fe->dev, "ASoC: trigger FE cmd: %d failed: %d\n",
cmd, ret);
goto out;
}
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_START;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
break;
}
out:
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
return ret;
}
static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int stream = substream->stream;
/* if FE's runtime_update is already set, we're in race;
* process this trigger later at exit
*/
if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) {
fe->dpcm[stream].trigger_pending = cmd + 1;
return 0; /* delayed, assuming it's successful */
}
/* we're alone, let's trigger */
return dpcm_fe_dai_do_trigger(substream, cmd);
}
int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
int ret = 0;
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
continue;
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
continue;
dev_dbg(be->dev, "ASoC: prepare BE %s\n",
be->dai_link->name);
ret = __soc_pcm_prepare(be, be_substream);
if (ret < 0)
break;
be->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
}
if (ret < 0)
dev_err(fe->dev, "ASoC: %s() failed (%d)\n", __func__, ret);
return ret;
}
static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(substream);
int stream = substream->stream, ret = 0;
snd_soc_dpcm_mutex_lock(fe);
dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
/* there is no point preparing this FE if there are no BEs */
if (list_empty(&fe->dpcm[stream].be_clients)) {
dev_err(fe->dev, "ASoC: no backend DAIs enabled for %s\n",
fe->dai_link->name);
ret = -EINVAL;
goto out;
}
ret = dpcm_be_dai_prepare(fe, stream);
if (ret < 0)
goto out;
/* call prepare on the frontend */
ret = __soc_pcm_prepare(fe, substream);
if (ret < 0)
goto out;
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
out:
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
snd_soc_dpcm_mutex_unlock(fe);
if (ret < 0)
dev_err(fe->dev, "ASoC: %s() failed (%d)\n", __func__, ret);
return ret;
}
static int dpcm_run_update_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_pcm_substream *substream =
snd_soc_dpcm_get_substream(fe, stream);
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
int err;
dev_dbg(fe->dev, "ASoC: runtime %s close on FE %s\n",
stream ? "capture" : "playback", fe->dai_link->name);
if (trigger == SND_SOC_DPCM_TRIGGER_BESPOKE) {
/* call bespoke trigger - FE takes care of all BE triggers */
dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd stop\n",
fe->dai_link->name);
err = snd_soc_pcm_dai_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_STOP);
} else {
dev_dbg(fe->dev, "ASoC: trigger FE %s cmd stop\n",
fe->dai_link->name);
err = dpcm_be_dai_trigger(fe, stream, SNDRV_PCM_TRIGGER_STOP);
}
dpcm_be_dai_hw_free(fe, stream);
dpcm_be_dai_shutdown(fe, stream);
/* run the stream event for each BE */
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_NOP);
if (err < 0)
dev_err(fe->dev, "ASoC: %s() failed (%d)\n", __func__, err);
return err;
}
static int dpcm_run_update_startup(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_pcm_substream *substream =
snd_soc_dpcm_get_substream(fe, stream);
struct snd_soc_dpcm *dpcm;
enum snd_soc_dpcm_trigger trigger = fe->dai_link->trigger[stream];
int ret = 0;
dev_dbg(fe->dev, "ASoC: runtime %s open on FE %s\n",
stream ? "capture" : "playback", fe->dai_link->name);
/* Only start the BE if the FE is ready */
if (fe->dpcm[stream].state == SND_SOC_DPCM_STATE_HW_FREE ||
fe->dpcm[stream].state == SND_SOC_DPCM_STATE_CLOSE) {
dev_err(fe->dev, "ASoC: FE %s is not ready %d\n",
fe->dai_link->name, fe->dpcm[stream].state);
ret = -EINVAL;
goto disconnect;
}
/* startup must always be called for new BEs */
ret = dpcm_be_dai_startup(fe, stream);
if (ret < 0)
goto disconnect;
/* keep going if FE state is > open */
if (fe->dpcm[stream].state == SND_SOC_DPCM_STATE_OPEN)
return 0;
ret = dpcm_be_dai_hw_params(fe, stream);
if (ret < 0)
goto close;
/* keep going if FE state is > hw_params */
if (fe->dpcm[stream].state == SND_SOC_DPCM_STATE_HW_PARAMS)
return 0;
ret = dpcm_be_dai_prepare(fe, stream);
if (ret < 0)
goto hw_free;
/* run the stream event for each BE */
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_NOP);
/* keep going if FE state is > prepare */
if (fe->dpcm[stream].state == SND_SOC_DPCM_STATE_PREPARE ||
fe->dpcm[stream].state == SND_SOC_DPCM_STATE_STOP)
return 0;
if (trigger == SND_SOC_DPCM_TRIGGER_BESPOKE) {
/* call trigger on the frontend - FE takes care of all BE triggers */
dev_dbg(fe->dev, "ASoC: bespoke trigger FE %s cmd start\n",
fe->dai_link->name);
ret = snd_soc_pcm_dai_bespoke_trigger(substream, SNDRV_PCM_TRIGGER_START);
if (ret < 0)
goto hw_free;
} else {
dev_dbg(fe->dev, "ASoC: trigger FE %s cmd start\n",
fe->dai_link->name);
ret = dpcm_be_dai_trigger(fe, stream,
SNDRV_PCM_TRIGGER_START);
if (ret < 0)
goto hw_free;
}
return 0;
hw_free:
dpcm_be_dai_hw_free(fe, stream);
close:
dpcm_be_dai_shutdown(fe, stream);
disconnect:
/* disconnect any pending BEs */
for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
/* is this op for this BE ? */
if (!snd_soc_dpcm_be_can_update(fe, be, stream))
continue;
if (be->dpcm[stream].state == SND_SOC_DPCM_STATE_CLOSE ||
be->dpcm[stream].state == SND_SOC_DPCM_STATE_NEW)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
}
if (ret < 0)
dev_err(fe->dev, "ASoC: %s() failed (%d)\n", __func__, ret);
return ret;
}
static int soc_dpcm_fe_runtime_update(struct snd_soc_pcm_runtime *fe, int new)
{
struct snd_soc_dapm_widget_list *list;
int stream;
int count, paths;
if (!fe->dai_link->dynamic)
return 0;
if (fe->num_cpus > 1) {
dev_err(fe->dev,
"%s doesn't support Multi CPU yet\n", __func__);
return -EINVAL;
}
/* only check active links */
if (!snd_soc_dai_active(asoc_rtd_to_cpu(fe, 0)))
return 0;
/* DAPM sync will call this to update DSP paths */
dev_dbg(fe->dev, "ASoC: DPCM %s runtime update for FE %s\n",
new ? "new" : "old", fe->dai_link->name);
for_each_pcm_streams(stream) {
/* skip if FE doesn't have playback/capture capability */
if (!snd_soc_dai_stream_valid(asoc_rtd_to_cpu(fe, 0), stream) ||
!snd_soc_dai_stream_valid(asoc_rtd_to_codec(fe, 0), stream))
continue;
/* skip if FE isn't currently playing/capturing */
if (!snd_soc_dai_stream_active(asoc_rtd_to_cpu(fe, 0), stream) ||
!snd_soc_dai_stream_active(asoc_rtd_to_codec(fe, 0), stream))
continue;
paths = dpcm_path_get(fe, stream, &list);
if (paths < 0)
return paths;
/* update any playback/capture paths */
count = dpcm_process_paths(fe, stream, &list, new);
if (count) {
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
if (new)
dpcm_run_update_startup(fe, stream);
else
dpcm_run_update_shutdown(fe, stream);
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
dpcm_clear_pending_state(fe, stream);
dpcm_be_disconnect(fe, stream);
}
dpcm_path_put(&list);
}
return 0;
}
/* Called by DAPM mixer/mux changes to update audio routing between PCMs and
* any DAI links.
*/
int snd_soc_dpcm_runtime_update(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *fe;
int ret = 0;
mutex_lock_nested(&card->pcm_mutex, card->pcm_subclass);
/* shutdown all old paths first */
for_each_card_rtds(card, fe) {
ret = soc_dpcm_fe_runtime_update(fe, 0);
if (ret)
goto out;
}
/* bring new paths up */
for_each_card_rtds(card, fe) {
ret = soc_dpcm_fe_runtime_update(fe, 1);
if (ret)
goto out;
}
out:
mutex_unlock(&card->pcm_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_runtime_update);
static void dpcm_fe_dai_cleanup(struct snd_pcm_substream *fe_substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream);
struct snd_soc_dpcm *dpcm;
int stream = fe_substream->stream;
snd_soc_dpcm_mutex_assert_held(fe);
/* mark FE's links ready to prune */
for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_be_disconnect(fe, stream);
fe->dpcm[stream].runtime = NULL;
}
static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream);
int ret;
snd_soc_dpcm_mutex_lock(fe);
ret = dpcm_fe_dai_shutdown(fe_substream);
dpcm_fe_dai_cleanup(fe_substream);
snd_soc_dpcm_mutex_unlock(fe);
return ret;
}
static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
{
struct snd_soc_pcm_runtime *fe = asoc_substream_to_rtd(fe_substream);
struct snd_soc_dapm_widget_list *list;
int ret;
int stream = fe_substream->stream;
snd_soc_dpcm_mutex_lock(fe);
fe->dpcm[stream].runtime = fe_substream->runtime;
ret = dpcm_path_get(fe, stream, &list);
if (ret < 0)
goto open_end;
/* calculate valid and active FE <-> BE dpcms */
dpcm_process_paths(fe, stream, &list, 1);
ret = dpcm_fe_dai_startup(fe_substream);
if (ret < 0)
dpcm_fe_dai_cleanup(fe_substream);
dpcm_clear_pending_state(fe, stream);
dpcm_path_put(&list);
open_end:
snd_soc_dpcm_mutex_unlock(fe);
return ret;
}
static int soc_get_playback_capture(struct snd_soc_pcm_runtime *rtd,
int *playback, int *capture)
{
struct snd_soc_dai *cpu_dai;
int i;
if (rtd->dai_link->dynamic && rtd->num_cpus > 1) {
dev_err(rtd->dev,
"DPCM doesn't support Multi CPU for Front-Ends yet\n");
return -EINVAL;
}
if (rtd->dai_link->dynamic || rtd->dai_link->no_pcm) {
int stream;
if (rtd->dai_link->dpcm_playback) {
stream = SNDRV_PCM_STREAM_PLAYBACK;
for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
if (snd_soc_dai_stream_valid(cpu_dai, stream)) {
*playback = 1;
break;
}
}
if (!*playback) {
dev_err(rtd->card->dev,
"No CPU DAIs support playback for stream %s\n",
rtd->dai_link->stream_name);
return -EINVAL;
}
}
if (rtd->dai_link->dpcm_capture) {
stream = SNDRV_PCM_STREAM_CAPTURE;
for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
if (snd_soc_dai_stream_valid(cpu_dai, stream)) {
*capture = 1;
break;
}
}
if (!*capture) {
dev_err(rtd->card->dev,
"No CPU DAIs support capture for stream %s\n",
rtd->dai_link->stream_name);
return -EINVAL;
}
}
} else {
struct snd_soc_dai *codec_dai;
/* Adapt stream for codec2codec links */
int cpu_capture = rtd->dai_link->params ?
SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
int cpu_playback = rtd->dai_link->params ?
SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
if (rtd->num_cpus == 1) {
cpu_dai = asoc_rtd_to_cpu(rtd, 0);
} else if (rtd->num_cpus == rtd->num_codecs) {
cpu_dai = asoc_rtd_to_cpu(rtd, i);
} else {
dev_err(rtd->card->dev,
"N cpus to M codecs link is not supported yet\n");
return -EINVAL;
}
if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_PLAYBACK) &&
snd_soc_dai_stream_valid(cpu_dai, cpu_playback))
*playback = 1;
if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_CAPTURE) &&
snd_soc_dai_stream_valid(cpu_dai, cpu_capture))
*capture = 1;
}
}
if (rtd->dai_link->playback_only) {
*playback = 1;
*capture = 0;
}
if (rtd->dai_link->capture_only) {
*playback = 0;
*capture = 1;
}
return 0;
}
static int soc_create_pcm(struct snd_pcm **pcm,
struct snd_soc_pcm_runtime *rtd,
int playback, int capture, int num)
{
char new_name[64];
int ret;
/* create the PCM */
if (rtd->dai_link->params) {
snprintf(new_name, sizeof(new_name), "codec2codec(%s)",
rtd->dai_link->stream_name);
ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
playback, capture, pcm);
} else if (rtd->dai_link->no_pcm) {
snprintf(new_name, sizeof(new_name), "(%s)",
rtd->dai_link->stream_name);
ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
playback, capture, pcm);
} else {
if (rtd->dai_link->dynamic)
snprintf(new_name, sizeof(new_name), "%s (*)",
rtd->dai_link->stream_name);
else
snprintf(new_name, sizeof(new_name), "%s %s-%d",
rtd->dai_link->stream_name,
soc_codec_dai_name(rtd), num);
ret = snd_pcm_new(rtd->card->snd_card, new_name, num, playback,
capture, pcm);
}
if (ret < 0) {
dev_err(rtd->card->dev, "ASoC: can't create pcm %s for dailink %s: %d\n",
new_name, rtd->dai_link->name, ret);
return ret;
}
dev_dbg(rtd->card->dev, "ASoC: registered pcm #%d %s\n",num, new_name);
return 0;
}
/* create a new pcm */
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
{
struct snd_soc_component *component;
struct snd_pcm *pcm;
int ret = 0, playback = 0, capture = 0;
int i;
ret = soc_get_playback_capture(rtd, &playback, &capture);
if (ret < 0)
return ret;
ret = soc_create_pcm(&pcm, rtd, playback, capture, num);
if (ret < 0)
return ret;
/* DAPM dai link stream work */
if (rtd->dai_link->params)
rtd->close_delayed_work_func = codec2codec_close_delayed_work;
else
rtd->close_delayed_work_func = snd_soc_close_delayed_work;
rtd->pcm = pcm;
pcm->nonatomic = rtd->dai_link->nonatomic;
pcm->private_data = rtd;
if (rtd->dai_link->no_pcm || rtd->dai_link->params) {
if (playback)
pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
if (capture)
pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
goto out;
}
/* ASoC PCM operations */
if (rtd->dai_link->dynamic) {
rtd->ops.open = dpcm_fe_dai_open;
rtd->ops.hw_params = dpcm_fe_dai_hw_params;
rtd->ops.prepare = dpcm_fe_dai_prepare;
rtd->ops.trigger = dpcm_fe_dai_trigger;
rtd->ops.hw_free = dpcm_fe_dai_hw_free;
rtd->ops.close = dpcm_fe_dai_close;
rtd->ops.pointer = soc_pcm_pointer;
} else {
rtd->ops.open = soc_pcm_open;
rtd->ops.hw_params = soc_pcm_hw_params;
rtd->ops.prepare = soc_pcm_prepare;
rtd->ops.trigger = soc_pcm_trigger;
rtd->ops.hw_free = soc_pcm_hw_free;
rtd->ops.close = soc_pcm_close;
rtd->ops.pointer = soc_pcm_pointer;
}
for_each_rtd_components(rtd, i, component) {
const struct snd_soc_component_driver *drv = component->driver;
if (drv->ioctl)
rtd->ops.ioctl = snd_soc_pcm_component_ioctl;
if (drv->sync_stop)
rtd->ops.sync_stop = snd_soc_pcm_component_sync_stop;
if (drv->copy_user)
rtd->ops.copy_user = snd_soc_pcm_component_copy_user;
if (drv->page)
rtd->ops.page = snd_soc_pcm_component_page;
if (drv->mmap)
rtd->ops.mmap = snd_soc_pcm_component_mmap;
if (drv->ack)
rtd->ops.ack = snd_soc_pcm_component_ack;
}
if (playback)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &rtd->ops);
if (capture)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &rtd->ops);
ret = snd_soc_pcm_component_new(rtd);
if (ret < 0)
return ret;
pcm->no_device_suspend = true;
out:
dev_dbg(rtd->card->dev, "%s <-> %s mapping ok\n",
soc_codec_dai_name(rtd), soc_cpu_dai_name(rtd));
return ret;
}
/* is the current PCM operation for this FE ? */
int snd_soc_dpcm_fe_can_update(struct snd_soc_pcm_runtime *fe, int stream)
{
if (fe->dpcm[stream].runtime_update == SND_SOC_DPCM_UPDATE_FE)
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_fe_can_update);
/* is the current PCM operation for this BE ? */
int snd_soc_dpcm_be_can_update(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream)
{
if ((fe->dpcm[stream].runtime_update == SND_SOC_DPCM_UPDATE_FE) ||
((fe->dpcm[stream].runtime_update == SND_SOC_DPCM_UPDATE_BE) &&
be->dpcm[stream].runtime_update))
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_be_can_update);
/* get the substream for this BE */
struct snd_pcm_substream *
snd_soc_dpcm_get_substream(struct snd_soc_pcm_runtime *be, int stream)
{
return be->pcm->streams[stream].substream;
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_get_substream);
static int snd_soc_dpcm_check_state(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be,
int stream,
const enum snd_soc_dpcm_state *states,
int num_states)
{
struct snd_soc_dpcm *dpcm;
int state;
int ret = 1;
int i;
for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
continue;
state = dpcm->fe->dpcm[stream].state;
for (i = 0; i < num_states; i++) {
if (state == states[i]) {
ret = 0;
break;
}
}
}
/* it's safe to do this BE DAI */
return ret;
}
/*
* We can only hw_free, stop, pause or suspend a BE DAI if any of it's FE
* are not running, paused or suspended for the specified stream direction.
*/
int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream)
{
const enum snd_soc_dpcm_state state[] = {
SND_SOC_DPCM_STATE_START,
SND_SOC_DPCM_STATE_PAUSED,
SND_SOC_DPCM_STATE_SUSPEND,
};
return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_free_stop);
/*
* We can only change hw params a BE DAI if any of it's FE are not prepared,
* running, paused or suspended for the specified stream direction.
*/
int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream)
{
const enum snd_soc_dpcm_state state[] = {
SND_SOC_DPCM_STATE_START,
SND_SOC_DPCM_STATE_PAUSED,
SND_SOC_DPCM_STATE_SUSPEND,
SND_SOC_DPCM_STATE_PREPARE,
};
return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
}
EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
|
/// Remove the branching code at the end of the specific MBB.
/// This is only invoked in cases where AnalyzeBranch returns success. It
/// returns the number of instructions that were removed.
unsigned TVMInstrInfo::removeBranch(MachineBasicBlock &MBB,
int *BytesRemoved) const {
MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
if (I == MBB.end())
return 0;
if (I->getOpcode() != TVM::JMPX &&
I->getOpcode() != TVM::IFELSE &&
I->getOpcode() != TVM::IFJMP &&
I->getOpcode() != TVM::IFNOTJMP)
return 0;
if (I->getOpcode() == TVM::JMPX) {
if (BytesRemoved)
*BytesRemoved = 8;
I->eraseFromParent();
return 1;
}
if (I->getOpcode() == TVM::IFELSE) {
if (BytesRemoved)
*BytesRemoved = 8;
I->eraseFromParent();
return 1;
}
if (I->getOpcode() == TVM::IFJMP || I->getOpcode() == TVM::IFNOTJMP) {
if (BytesRemoved)
*BytesRemoved = 8;
I->eraseFromParent();
return 1;
}
return 0;
} |
def update_user_settings():
if admin_session_timedout():
if flask.request.method == "POST":
flask.flash("Action canceled, try it again", "error")
return flask.redirect(
flask.url_for("auth_login", next=flask.request.url)
)
user = _get_user(username=flask.g.fas_user.username)
form = pagure.forms.ConfirmationForm()
if form.validate_on_submit():
settings = {}
for key in flask.request.form:
if key == "csrf_token":
continue
settings[key] = flask.request.form[key]
try:
message = pagure.lib.query.update_user_settings(
flask.g.session, settings=settings, user=user.username
)
flask.g.session.commit()
flask.flash(message)
except pagure.exceptions.PagureException as msg:
flask.g.session.rollback()
flask.flash(msg, "error")
except SQLAlchemyError as err:
flask.g.session.rollback()
flask.flash(str(err), "error")
return flask.redirect(flask.url_for("ui_ns.user_settings")) |
def _workflow_inputs(activity_gateway: IActivityGateway, paths: List[str] = None):
usage_paths = activity_gateway.get_all_usage_paths()
if not paths:
return usage_paths
return _lookup_paths_in_paths(lookup_paths=paths, target_paths=usage_paths) |
package procstats
func collectProcMetrics(pid int) (m proc, err error) {
// TODO
return
}
|
/*input
3
2
5
17
*/
import java.util.Scanner;
public class CF1{
public static void main(String[] args){
Scanner sc=new Scanner(System.in);
int t=sc.nextInt();
while(t-->0){ //Number of test cases
int n=sc.nextInt();
while((n&(n-1))!=0){
n=n&(n-1);
}
System.out.println(n-1);
}
}
} |
#include <iostream>
#include <vector>
#include <cmath>
using namespace std;
typedef long long int lint;
int main() {
int t;
cin >> t;
while (t--) {
int n;
lint x;
cin >> n >> x;
lint maxD = 0, maxDH = 0;
for (int i = 0; i<n; i++) {
lint d,h;
cin >> d >> h;
maxD = max(d,maxD);
maxDH = max(d-h,maxDH);
}
if (maxD >= x) cout << "1\n";
else {
if (maxDH <= 0) cout << "-1\n";
else cout << lint(ceil(double(x-maxD)/maxDH)) + 1 << endl;
}
}
return 0;
} |
<reponame>gurubamal/integration-simulators-ran-simulator<filename>ransim/ransimctrlr/RANSIM-CTRLR/src/main/java/org/onap/ransim/rest/api/controller/RANSliceConfigController.java
/*
* ============LICENSE_START=======================================================
* Ran Simulator Controller
* ================================================================================
* Copyright (C) 2020-2021 Wipro Limited.
* ================================================================================
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ============LICENSE_END=========================================================
*/
package org.onap.ransim.rest.api.controller;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.onap.ransim.rest.api.models.NSSAIConfig;
import org.onap.ransim.rest.api.services.RANSliceConfigService;
import org.onap.ransim.rest.web.mapper.GNBCUCPModel;
import org.onap.ransim.rest.web.mapper.GNBCUUPModel;
import org.onap.ransim.rest.web.mapper.GNBDUModel;
import org.onap.ransim.rest.web.mapper.NRCellCUModel;
import org.onap.ransim.rest.web.mapper.NRCellDUModel;
import org.onap.ransim.rest.web.mapper.NearRTRICModel;
import org.onap.ransim.rest.web.mapper.RANSliceInfoModel;
import org.onap.ransim.rest.web.mapper.RRMPolicyRatioModel;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.CrossOrigin;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PatchMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.PutMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;
@RestController
@RequestMapping(path = "/api/ransim-db/v4")
public class RANSliceConfigController {
private static final Logger logger = LoggerFactory.getLogger(RANSliceConfigController.class);
@Autowired
private RANSliceConfigService ranSliceConfigService;
// SDN-R APIs
/**
* This method updates the slice details, config details of CUCP
*
* @param GNBCUCPModel
* @return ResponseEntity<GNBCUCPModel>
*/
@PutMapping(path = "/gNBCUCP")
public ResponseEntity<GNBCUCPModel> updateGNBCUCPFunction(@RequestBody GNBCUCPModel gNBCUCPModel) {
logger.info("Request Received");
try {
return new ResponseEntity<GNBCUCPModel>(ranSliceConfigService.saveGNBCUCP(gNBCUCPModel), HttpStatus.OK);
} catch (Exception e) {
logger.error("Error while updating GNBCUCP:" + e.getMessage());
return new ResponseEntity<GNBCUCPModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the CUCP details
*
* @param gNBCUCPName
* @return ResponseEntity<GNBCUCPModel>
*/
@GetMapping(path = "/gNBCUCP/{gNBCUCPName}")
public ResponseEntity<GNBCUCPModel> findGNBCUCPFunction(@PathVariable String gNBCUCPName) {
logger.info("Request Received");
try {
if (ranSliceConfigService.fetchGNBCUCPData(gNBCUCPName) != null) {
return new ResponseEntity<GNBCUCPModel>(ranSliceConfigService.fetchGNBCUCPData(gNBCUCPName),
HttpStatus.OK);
} else {
return new ResponseEntity<GNBCUCPModel>(ranSliceConfigService.fetchGNBCUCPData(gNBCUCPName),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching GNBCUCP:" + e.getMessage());
return new ResponseEntity<GNBCUCPModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* This method updates the slice details, config details of CUUP
*
* @param GNBCUUPModel
* @return ResponseEntity<GNBCUUPModel>
*/
@PutMapping(path = "/gNBCUUP")
public ResponseEntity<GNBCUUPModel> updateGNBCUUPFunction(@RequestBody GNBCUUPModel gNBCUUPModel) {
logger.info("Request Received");
try {
return new ResponseEntity<GNBCUUPModel>(ranSliceConfigService.saveGNBCUUP(gNBCUUPModel), HttpStatus.OK);
} catch (Exception e) {
logger.error("Error while updating GNBCUUP:" + e.getMessage());
return new ResponseEntity<GNBCUUPModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the CUCP details
*
* @param gNBCUCPName
* @return ResponseEntity<GNBCUCPModel>
*/
@GetMapping(path = "/gNBCUUP/{gNBCUUPId}")
public ResponseEntity<GNBCUUPModel> findGNBCUUPFunction(@PathVariable Integer gNBCUUPId) {
logger.info("Request Received");
try {
if (ranSliceConfigService.fetchGNBCUUPData(gNBCUUPId) != null) {
return new ResponseEntity<GNBCUUPModel>(ranSliceConfigService.fetchGNBCUUPData(gNBCUUPId),
HttpStatus.OK);
} else {
return new ResponseEntity<GNBCUUPModel>(ranSliceConfigService.fetchGNBCUUPData(gNBCUUPId),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching GNBCUCP:" + e.getMessage());
return new ResponseEntity<GNBCUUPModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* This method updates the slice details, config details of gNBDU
*
* @param GNBDUModel
* @return ResponseEntity<GNBDUModel>
*/
@PutMapping(path = "/gNBDU")
public ResponseEntity<GNBDUModel> updateGNBDUFunction(@RequestBody GNBDUModel gNBDUModel) {
logger.info("Request Received");
try {
return new ResponseEntity<GNBDUModel>(ranSliceConfigService.saveGNBDU(gNBDUModel), HttpStatus.OK);
} catch (Exception e) {
logger.error("Error while updating GNBDU:" + e.getMessage());
return new ResponseEntity<GNBDUModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the gNBDU details
*
* @param gNBDUId
* @return ResponseEntity<GNBDUModel>
*/
@GetMapping(path = "/gNBDU/{gNBDUId}")
public ResponseEntity<GNBDUModel> findGNBDUFunction(@PathVariable Integer gNBDUId) {
logger.info("Request Received");
try {
if (ranSliceConfigService.fetchGNBDUData(gNBDUId) != null) {
return new ResponseEntity<GNBDUModel>(ranSliceConfigService.fetchGNBDUData(gNBDUId), HttpStatus.OK);
} else {
return new ResponseEntity<GNBDUModel>(ranSliceConfigService.fetchGNBDUData(gNBDUId),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching GNBDU:" + e.getMessage());
return new ResponseEntity<GNBDUModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* This method updates the NearRTRIC details
*
* @param nearRTRICModel
* @return ResponseEntity<NearRTRICModel>
*/
@PutMapping(path = "/nearRTRIC")
public ResponseEntity<NearRTRICModel> updateNearRTRIC(@RequestBody NearRTRICModel nearRTRICModel) {
logger.info("Request Received");
try {
return new ResponseEntity<NearRTRICModel>(ranSliceConfigService.saveNearRTRIC(nearRTRICModel),
HttpStatus.OK);
} catch (Exception e) {
logger.error("Error while updating nearRTRIC:" + e.getMessage());
return new ResponseEntity<NearRTRICModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the nearRTRIC details
*
* @param nearRTRICId
* @return ResponseEntity<GNBDUModel>
*/
@GetMapping(path = "/nearRTRIC/{nearRTRICId}")
public ResponseEntity<NearRTRICModel> findNearRTRICFunction(@PathVariable Integer nearRTRICId) {
logger.info("Request Received");
try {
if (ranSliceConfigService.fetchNearRTRICData(nearRTRICId) != null) {
return new ResponseEntity<NearRTRICModel>(ranSliceConfigService.fetchNearRTRICData(nearRTRICId),
HttpStatus.OK);
} else {
return new ResponseEntity<NearRTRICModel>(ranSliceConfigService.fetchNearRTRICData(nearRTRICId),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching nearRTRIC:" + e.getMessage());
return new ResponseEntity<NearRTRICModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the RRMPolicy of CU/DU
*
* @param resourceType
* @param resourceId
* @return
*/
@GetMapping(path = "/rrmPolicy/{resourceType}/{resourceId}")
public ResponseEntity<RRMPolicyRatioModel> findRRMPolicyOfNE(@PathVariable String resourceType,
@PathVariable String resourceId) {
logger.debug("Request Received");
try {
if (ranSliceConfigService.fetchRRMPolicyOfNE(resourceType, resourceId) != null) {
return new ResponseEntity<RRMPolicyRatioModel>(
ranSliceConfigService.fetchRRMPolicyOfNE(resourceType, resourceId), HttpStatus.OK);
} else {
return new ResponseEntity<RRMPolicyRatioModel>(
ranSliceConfigService.fetchRRMPolicyOfNE(resourceType, resourceId), HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching RRMPolicy:" + e.getMessage());
return new ResponseEntity<RRMPolicyRatioModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* This method updates the RRM policy of a network function
*
* @param nearRTRICModel
* @return ResponseEntity<NearRTRICModel>
*/
@PostMapping(path = "/rrmPolicy")
public ResponseEntity<RRMPolicyRatioModel> updateRRMPolicy(@RequestBody RRMPolicyRatioModel rrmPolicy) {
logger.info("Request Received");
try {
return new ResponseEntity<RRMPolicyRatioModel>(ranSliceConfigService.updateRRMPolicy(rrmPolicy),
HttpStatus.OK);
} catch (Exception e) {
logger.error("Error while updating RRM Policy:" + e.getMessage());
return new ResponseEntity<RRMPolicyRatioModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To find the list of RICs from tracking area
* 1. Find Cells from TA
* 2. find List of RICs of the cells
*
* @param trackingArea
* @return
*/
@GetMapping(path = "/nearrtric-list/{trackingArea}")
public ResponseEntity<List<NearRTRICModel>> findNearRTRICofCellsFromTA(@PathVariable int trackingArea) {
logger.info("Request Received");
try {
List<String> cellIds = this.findListOfCells(trackingArea).getBody();
List<Integer> cellIdList = cellIds.stream().map(Integer::parseInt).collect(Collectors.toList());
if (ranSliceConfigService.findNearRTRICofCells(cellIdList).size() > 0) {
return new ResponseEntity<List<NearRTRICModel>>(ranSliceConfigService.findNearRTRICofCells(cellIdList),
HttpStatus.OK);
} else {
return new ResponseEntity<List<NearRTRICModel>>(ranSliceConfigService.findNearRTRICofCells(cellIdList),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the RICs:" + e.getMessage());
return new ResponseEntity<List<NearRTRICModel>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To find the list of CUs in a tracking area
*
* @param trackingArea
* @return
*/
@GetMapping(path = "/cell-list/{trackingArea}")
public ResponseEntity<List<String>> findListOfCells(@PathVariable int trackingArea) {
logger.info("Request Received");
try {
if (ranSliceConfigService.fetchCellsofTA(trackingArea).size() > 0) {
return new ResponseEntity<List<String>>(ranSliceConfigService.fetchCellsofTA(trackingArea),
HttpStatus.OK);
} else {
return new ResponseEntity<List<String>>(ranSliceConfigService.fetchCellsofTA(trackingArea),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the Cells:" + e.getMessage());
return new ResponseEntity<List<String>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To find the list of CU-Cells
*
* @param nearRTRICId
* @return
*/
@GetMapping(path = "/cu-cell-list/{nearRTRICId}")
public ResponseEntity<List<NRCellCUModel>> findCUCellsofRIC(@PathVariable Integer nearRTRICId) {
logger.info("Request Received");
try {
if (ranSliceConfigService.fetchCUCellsofRIC(nearRTRICId).size() > 0) {
return new ResponseEntity<List<NRCellCUModel>>(ranSliceConfigService.fetchCUCellsofRIC(nearRTRICId),
HttpStatus.OK);
} else {
return new ResponseEntity<List<NRCellCUModel>>(ranSliceConfigService.fetchCUCellsofRIC(nearRTRICId),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
e.printStackTrace();
logger.error("Error while fetching the Cells-CU:" + e.getMessage());
return new ResponseEntity<List<NRCellCUModel>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To find the nearRTRIC of NSSI
*
* This API can be used in Terminate/activate/deactivate to find the RIC from ranNFNSSIId in SO request
*
* @param ranNFNSSIId
* @return List<NearRTRICModel>
*/
@GetMapping(path = "/nearrtric/{ranNFNSSIId}")
public ResponseEntity<List<NearRTRICModel>> findNearRTRICByNSSI(@PathVariable String ranNFNSSIId) {
logger.info("Request Received");
try {
if (ranSliceConfigService.findNearRTRICByNSSI(ranNFNSSIId).size() > 0) {
return new ResponseEntity<List<NearRTRICModel>>(ranSliceConfigService.findNearRTRICByNSSI(ranNFNSSIId),
HttpStatus.OK);
} else {
return new ResponseEntity<List<NearRTRICModel>>(ranSliceConfigService.findNearRTRICByNSSI(ranNFNSSIId),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the nearRTRIC by RANNFNSSI:" + e.getMessage());
return new ResponseEntity<List<NearRTRICModel>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To find the list of DU-Cells
*
* @param nearRTRICId
* @return
*/
@GetMapping(path = "/du-cell-list/{sNSSAI}")
public ResponseEntity<Map<Integer, List<NRCellDUModel>>> findDUCellsofRIC(@PathVariable String sNSSAI) {
logger.info("Request Received");
try {
if (ranSliceConfigService.fetchDUCellsofRIC(sNSSAI).size() > 0) {
return new ResponseEntity<Map<Integer, List<NRCellDUModel>>>(
ranSliceConfigService.fetchDUCellsofRIC(sNSSAI), HttpStatus.OK);
} else {
return new ResponseEntity<Map<Integer, List<NRCellDUModel>>>(
ranSliceConfigService.fetchDUCellsofRIC(sNSSAI), HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
e.printStackTrace();
logger.error("Error while fetching the Cells-DU:" + e.getMessage());
return new ResponseEntity<Map<Integer, List<NRCellDUModel>>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
// Inventory APIs
/**
* This method updates the RAN slice details
*
* @param ranSliceInfoModel
* @return ResponseEntity<RANSliceInfoModel>
*/
@PutMapping(path = "/ranslice-details")
public ResponseEntity<RANSliceInfoModel> updateRANInventory(@RequestBody RANSliceInfoModel ranSliceInfoModel) {
logger.info("Request Received");
try {
return new ResponseEntity<RANSliceInfoModel>(ranSliceConfigService.updateRANInventory(ranSliceInfoModel),
HttpStatus.OK);
} catch (Exception e) {
logger.error("Error while updating RAN Inventory:" + e.getMessage());
return new ResponseEntity<RANSliceInfoModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the RAN slice Details
*
* @param ranNFNSSIId
* @return RANSliceInfoModel
*/
@GetMapping(path = "/ranslice-details/{ranNFNSSIId}")
public ResponseEntity<RANSliceInfoModel> findRANSlice(@PathVariable String ranNFNSSIId) {
logger.info("Request Received");
try {
if (ranSliceConfigService.fetchRANSlice(ranNFNSSIId) != null) {
return new ResponseEntity<RANSliceInfoModel>(ranSliceConfigService.fetchRANSlice(ranNFNSSIId),
HttpStatus.OK);
} else {
return new ResponseEntity<RANSliceInfoModel>(ranSliceConfigService.fetchRANSlice(ranNFNSSIId),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the RAN slice Details:" + e.getMessage());
return new ResponseEntity<RANSliceInfoModel>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
// Slice Analysis MS APIs
/**
* To fetch The NearRTRICs serving the sNSSAI
*
* @param sNSSAI
* @return List<NearRTRICModel>
*/
@GetMapping(path = "/nearrtric/snssai/{sNSSAI}")
public ResponseEntity<List<NearRTRICModel>> findRICsofNSSAI(@PathVariable String sNSSAI) {
logger.info("Request Received");
try {
if (ranSliceConfigService.findRICsByNSSAI(sNSSAI).size() > 0) {
return new ResponseEntity<List<NearRTRICModel>>(ranSliceConfigService.findRICsByNSSAI(sNSSAI),
HttpStatus.OK);
} else {
return new ResponseEntity<List<NearRTRICModel>>(ranSliceConfigService.findRICsByNSSAI(sNSSAI),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the nearRTRIC by sNSSAI:" + e.getMessage());
return new ResponseEntity<List<NearRTRICModel>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the configuration requested for a slice
*
* @param sNSSAI
* @return
*/
@GetMapping(path = "/profile-config/{sNSSAI}")
public ResponseEntity<Map<String, Integer>> fetchSliceProfileConfiguration(@PathVariable String sNSSAI) {
logger.info("Request Received");
try {
if (ranSliceConfigService.findSliceProfileconfig(sNSSAI).size() > 0) {
return new ResponseEntity<Map<String, Integer>>(ranSliceConfigService.findSliceProfileconfig(sNSSAI),
HttpStatus.OK);
} else {
return new ResponseEntity<Map<String, Integer>>(ranSliceConfigService.findSliceProfileconfig(sNSSAI),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the Requested Configuration:" + e.getMessage());
return new ResponseEntity<Map<String, Integer>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the configuration of a slice in RIC
*
* @param sNSSAI
* @return
*/
@GetMapping(path = "/slice-config/{sNSSAI}")
public ResponseEntity<Map<Integer, NSSAIConfig>> fetchSliceConfiguration(@PathVariable String sNSSAI) {
logger.info("Request Received");
try {
if (ranSliceConfigService.findSliceConfig(sNSSAI).size() > 0) {
return new ResponseEntity<Map<Integer, NSSAIConfig>>(ranSliceConfigService.findSliceConfig(sNSSAI),
HttpStatus.OK);
} else {
return new ResponseEntity<Map<Integer, NSSAIConfig>>(ranSliceConfigService.findSliceConfig(sNSSAI),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
e.printStackTrace();
logger.error("Error while fetching the Configuration of a Slice at RIC:" + e.getMessage());
return new ResponseEntity<Map<Integer, NSSAIConfig>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the DU details
*
* @param
* @return List<GNBDUModel>
*/
@GetMapping(path = "/du-list/{sNSSAI}")
public ResponseEntity<List<GNBDUModel>> fetchDUFunctionsOfNSSAI(@PathVariable String sNSSAI) {
logger.info("Request Received::" + sNSSAI);
try {
if (ranSliceConfigService.findDUsofSNssai(sNSSAI).size() > 0) {
return new ResponseEntity<List<GNBDUModel>>(ranSliceConfigService.findDUsofSNssai(sNSSAI),
HttpStatus.OK);
} else {
return new ResponseEntity<List<GNBDUModel>>(ranSliceConfigService.findDUsofSNssai(sNSSAI),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
e.printStackTrace();
logger.error("Error while fetching the DU details of NSSAI:" + e.getMessage());
return new ResponseEntity<List<GNBDUModel>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the DU details
*
* @param
* @return List<GNBDUModel>
*/
@GetMapping(path = "/cucp-list/{sNSSAI}")
public ResponseEntity<List<GNBCUCPModel>> fetchCUFunctionsOfNSSAI(@PathVariable String sNSSAI) {
logger.info("Request Received::" + sNSSAI);
try {
if (ranSliceConfigService.findDUsofSNssai(sNSSAI).size() > 0) {
return new ResponseEntity<List<GNBCUCPModel>>(ranSliceConfigService.findCUsofSNssai(sNSSAI),
HttpStatus.OK);
} else {
return new ResponseEntity<List<GNBCUCPModel>>(ranSliceConfigService.findCUsofSNssai(sNSSAI),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the CU details of NSSAI:" + e.getMessage());
return new ResponseEntity<List<GNBCUCPModel>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the Customer Details
*
* @param
* @return Map<String, String>
*/
@GetMapping(path = "/subscriber-details/{sNSSAI}")
public ResponseEntity<Map<String, String>> fetchSubsciberDetailsOfNSSAI(@PathVariable String sNSSAI) {
logger.info("Request Received::" + sNSSAI);
try {
if (ranSliceConfigService.getSubscriberDetails(sNSSAI).size() > 0) {
return new ResponseEntity<Map<String, String>>(ranSliceConfigService.getSubscriberDetails(sNSSAI),
HttpStatus.OK);
} else {
return new ResponseEntity<Map<String, String>>(ranSliceConfigService.getSubscriberDetails(sNSSAI),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the Customer details of NSSAI:" + e.getMessage());
return new ResponseEntity<Map<String, String>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the cu details
*
* @param
* @return List<GNBCUCPModel>
*/
@GetMapping(path = "/cucp-list")
public ResponseEntity<List<GNBCUCPModel>> fetchCUCPFunctions() {
logger.info("Request Received");
try {
if (ranSliceConfigService.findAllCUCPFunctions().size() > 0) {
return new ResponseEntity<List<GNBCUCPModel>>(ranSliceConfigService.findAllCUCPFunctions(),
HttpStatus.OK);
} else {
return new ResponseEntity<List<GNBCUCPModel>>(ranSliceConfigService.findAllCUCPFunctions(),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the CU details:" + e.getMessage());
return new ResponseEntity<List<GNBCUCPModel>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
/**
* To fetch the DU details
*
* @param
* @return List<GNBDUModel>
*/
@GetMapping(path = "/du-list")
public ResponseEntity<List<GNBDUModel>> fetchDUFunctions() {
logger.info("Request Received");
try {
if (ranSliceConfigService.findAllDUFunctions().size() > 0) {
return new ResponseEntity<List<GNBDUModel>>(ranSliceConfigService.findAllDUFunctions(), HttpStatus.OK);
} else {
return new ResponseEntity<List<GNBDUModel>>(ranSliceConfigService.findAllDUFunctions(),
HttpStatus.NO_CONTENT);
}
} catch (Exception e) {
logger.error("Error while fetching the DU details:" + e.getMessage());
return new ResponseEntity<List<GNBDUModel>>(HttpStatus.INTERNAL_SERVER_ERROR);
}
}
}
|
n = int(input())
taxi = 0
a = input().split( )
a = [int(i) for i in a]
lista = [0 for i in range(4)]
for i in a:
lista[i - 1] += 1
taxi += lista[3]
taxi += lista[2]
lista[0] -= min(lista[2], lista[0])
taxi += lista[1]//2
if lista[1]%2 == 1:
lista[1] -= 2
taxi += 1
if lista[0] >= 2:
lista[0] -= 2
elif lista[0] == 1:
lista[0] -= 1
else:
lista[0] = 0
taxi += lista[0]//4
if lista[0]%4 != 0:
taxi += 1
print(taxi)
|
Dissecting the Wjj Anomaly: Diagnostic Tests of a Leptophobic Z'
We examine the scenario where a leptophobic Z' boson accounts for the excess of events in the Wjj channel as observed by CDF. We assume generation independent couplings for the Z' and obtain allowed regions for the four hadronic couplings using the cross section range quoted by CDF as well as constraints from dijet production at UA2. These coupling regions translate into well-determined rates for the associated production of Z/\gamma+Z' at the Tevatron and LHC, as well as W+Z' at the LHC,that are directly correlated with the Wjj rate observed at the Tevatron. The Wjj rate at the LHC is large and this channel should be observed soon once the SM backgrounds are under control. The rates for Z/\gamma+Z' associated production are smaller, and these processes should not yet have been observed at the Tevatron given the expected SM backgrounds. In addition, we also show that valuable coupling information is obtainable from the distributions of other kinematic variables, e.g., M_{WZ'}, p_T^W, and \cos \theta_W^*. Once detected, these associated production processes and the corresponding kinematic distributions examined here will provide further valuable information on the Z' boson couplings.
Introduction and Background
The CDF Collaboration has reported the observation of an excess of events in the ℓνjj channel with a statistical significance of 3.2σ corresponding to 4.3 fb −1 of integrated luminosity. Recently, CDF has included an additional 3 fb −1 to their data sample , for a total of 7.3 fb −1 , and the significance of this anomaly has grown to ∼4.8σ (∼ 4.1σ including systematics). This is now a serious situation. An examination of the m jj distribution for these events reveals a peak that is compatible with Standard Model (SM) W W + W Z production, as well as a second peak that is compatible with a new resonance at m jj ∼ 150 GeV.
This state of affairs has gathered much attention, even before the inclusion of the additional data sample. Skeptics have been concerned about the detailed shape of the Monte Carlo simulation modeling of the SM background, the jet-energy scale, as well as possible contamination from top-quark production . However, the CDF Collaboration has shown that neither the top background nor changes to the jet-energy scale is likely to account for this excess. Optimists have offered several new physics explanations, including a new Z ′ boson , technicolor , Supersymmetry with and without R-parity conservation , color octet production , and more . More recently, the D0 Collaboration has weighed in on this anomaly and does not observe a signal at the same level as claimed by CDF in a luminosity sample of 4.3 fb −1 . An understanding of this discrepancy between CDF and D0 has not yet been reached, and the situation most likely will only be clarified with results from the LHC. Certainly, if new physics is really present, it's cross section is most likely to be at the low end of the range discussed by CDF.
Here, we will assume the excess excess observed by CDF is due to new physics, and we will further examine the possibility of Z ′ production, pp → W + Z ′ → ℓν + jj, as the potential source. Interestingly, we note that the CDF data shows a sharp dip, or valley, in the m jj spectrum between the first peak (i.e., SM W W + W Z production) and the second peak (the hypothetical Z ′ boson); this is the behavior that one might expect due to the destructive interference between the SM W and Z and a new gauge boson . Clearly, this new Z ′ boson must have very leptophobic couplings in order to evade direct production at LEPII as well as the Tevatron and LHC Z ′ Drell-Yan dilepton searches. In addition, there must be some mechanism which prohibits any significant Z − Z ′ mixing ( < ∼ 10 −3 ) in order to be consistent with precision electroweak data and to avoid any 'leakage' of the SM Z leptonic couplings to the Z ′ . CDF reports that there is no particular excess of b-quarks in the events near m jj ∼ 150 GeV, and thus we will assume that the Z ′ decays democratically to all kinematically accessible hadronic states, i.e., the Z ′ has generation-independent couplings ‡ .
It has been known since long ago (in preparation for the SSC) , that the associated production W/Z/γ + Z ′ provides an excellent opportunity to perform diagnostic tests on the coupling structure of a new gauge boson. In particular, if the Z ′ explanation for the CDF ‡ Note, however, that a significant b-quark content for these jets, < ∼ 20 − 30%, is consistent with the existing CDF data . excess is correct, then one should at some point also observe Z + Z ′ and γ + Z ′ associated production. As we will show below, given the CDF result, one can make relatively definitive predictions for the rates of these processes at both the Tevatron and the LHC.
In what follows, we will perform an analysis of the possible coupling structure and strength for the Z ′ that is consistent with the data and will determine the allowed regions for the left-and right-handed Z ′ couplings. We will then be armed to compute the predictions for W/Z/γ + jj production. We find that the rates for Z/γ + jj are likely too small to be observed at the Tevatron with current data samples, and that a Z ′ in W/Z/γ + jj could be detected at the LHC with integrated luminosities of order a few fb −1 once SM backgrounds are under control. We provide the most general expressions for these cross sections. We also examine the M W Z ′ , as well as other, kinematic distributions and show that they can yield additional valuable coupling information, particularly for the left-handed quarks. Our main conclusion is that the allowed regions of the Z ′ couplings are relatively restricted, allowing for reasonably firm predictions for the associated production rates and the rates for other kinematic distributions. If the CDF anomaly is due to a new ∼ 150 GeV leptophobic Z ′ boson, the LHC should confirm this signal relatively soon.
Analysis
We define the couplings of the Z ′ to the SM quarks in a manner similar to that for the conventional SM Z boson, in order to make contact with our earlier analyses . It will also be convenient to define the chiral coupling combinations u L,R = v ′ u ± a ′ u (and similarly for u → d) for the analysis below. For simplicity, and to avoid possible issues with Flavor Changing Neutral Currents (FCNC), we will assume that these couplings are generation-independent; this assumption has very little (if any) direct impact in what follows as it is essentially only the Z ′ couplings to the first generation quarks that determine its production cross sections at the Tevatron and LHC. We will, however, return to this point later below when discussing the Z ′ total decay width.
Since the observed excess is in the proposed W ± + Z ′ channel, let us first examine the differential cross section for this process; it is easily obtained from the expressions in the original Refs. , which describe the corresponding SM process with suitable simple modifications: where K W (taken to be 1.3 in our numerical analysis) is a NLO K-factor, c W = cos θ W , β W (z) is the speed of the W boson in the center of mass (CM) frame, z is the CM scattering angle cos θ * , Y = (ût − M 2 W M 2 Z ′ )/ŝ 2 and the quantity X is given by the expression Since the SM W is purely left-handed, the right-handed couplings of the Z ′ to the SM quarks are projected out in this amplitude so that only the left-handed couplings of both u and d appear in this expression for the cross section. It is important to note that for large values ofŝ, X behaves as ∼ŝ 2 /M 2 W M 2 Z ′ >> 1 and can provide a very significant cross section enhancement when u L = d L as was noted numerically by some previous authors , , . In contrast, the other terms in the cross section are of order unity (or parametrically smaller) in the same limit. As we will see below, the presence of this term will allow for a large W +Z ′ production rate, without necessarily enhancing the corresponding Z/γ + Z ′ cross sections. However, we note that the possibility of u L = d L implies that the group generator, Q ′ , to which the Z ′ couples does not commute with the usual SU(2) L isospin generators, i.e., = 0. This can have a number of implications elsewhere .
Requiring that the Z ′ decays only to two jets, integration of the above expression over z = cos θ * and the relevant parton densities leads to the numerical value for the (pre-cut) W + Z ′ cross sections at the Tevatron and LHC for arbitrary couplings given by These results explicitly show the enhancement arising in the case of u L = d L . In performing these numerical calculations, and the ones found below, we make use of the CTEQ6.6M parton density functions . Since the apparent excess in the Tevatron W + Z ′ cross section is observed , prior to acceptance and analysis cuts, to be in the range of ∼ 1 − 4 pb, this results in an ellipse of potentially allowed values in the u L − d L plane § . This is displayed in the top panel of Fig. 1, assuming M Z ′ = 150 GeV; in this figure we show the allowed ellipses for W + Z ′ cross section values ranging from 1.5-4.0 pb. Of course, given the results from D0, the lower end of this range will be likely to be of interest to us in what follows.
Of course, a leptophobic Z ′ boson will also be produced directly and contribute to dijet production and may be observed as a resonance in the dijet invariant mass spectrum. Due to kinematics, the data from SppS has the best signal to background ratio for searches in the dijet channel in this low mass region. UA2 performed such a search in the dijet channel and constrained the cross section to be less than roughly ≃ 150 pb for a ∼ 150 GeV resonance. This places an additional constraint on the Z ′ couplings that needs to be satisfied. Employing the narrow width approximation (which we will justify below), the dijet rate induced by a Z ′ at UA2 (recall the CM energy for the SppS was 630 GeV) resulting from the process qq → Z ′ → jj can be written numerically as making use of the same procedure and assumptions as above. Given this result and the UA2 bound on the cross section, the largest corresponding constraint ellipse that can be drawn in the u L − d L plane denoting the UA2 allowed region is obviously obtained when the Z ′ has only left-handed quark couplings, i.e., u R = d R = 0. This bound is shown as the red ellipse in the top panel of Fig. 1. Clearly, if non-zero values of u R or d R are also present, then this constraint ellipse will only contract. Here we note that the UA2-allowed coupling ellipse intersects the corresponding ones obtained by evaluating the W ± Z ′ cross section at the Tevatron at different values of the Z ′ couplings depending upon the assumed value of σ W ± Z ′ . Note that the simultaneous consistency of the CDF result with the UA2 dijet data forbids very large u L couplings of either sign and allows for the possibility of u L = d L . The segments of these ellipses that are simultaneously allowed by both cross section constraints are highlighted in the lower panel of Fig. 1 and are color coded for comparisons with results to be shown in later figures.
For the case u R = d R = 0, the upper panel in Fig. 2 shows the predicted UA2 dijet cross section along the allowed coupling line segments of Fig. 1. The curves in this figure correspond to the upper set of arcs in Fig. 1; a corresponding set of curves can also be obtained representing the bottom arcs and is obtained by flipping the values u L → −u L in Fig. 2. In all cases we see that the values along the parabolic shaped curves can lead to a dijet cross section that is significantly far from the quoted upper bound. However, this still implies that the possible values of u R , d R must be restricted or the UA2 dijet bound would be exceeded. Of course, for any arbitrary point along these parabolas one can perform a scan of the u R − d R plane to obtain the corresponding region which is allowed by UA2; the weakest bounds on the right-handed couplings are clearly obtained when the predicted dijet cross section is minimized. These constraints on the maximal values of the right-handed couplings are shown in the u R − d R plane in the lower panel of Fig. 2 for various values of the Tevatron W + Z ′ cross section. Note that while the largest u R − d R allowed region is obtained at the minimum of the parabolas in the top panel, the region will shrink substantially at points where the dijet cross section arising from the left-handed couplings alone almost saturates the UA2 bound. Now that we have obtained constraints on the left-and right-handed Z ′ couplings, let us turn to the other relevant processes for associated production, namely Z + Z ′ and γ + Z ′ . In analogy with our W ± + Z ′ result above, the qq → Z + Z ′ differential cross section can be obtained by a suitable modification of the corresponding result in the SM given by where (v, a) q are the couplings of the quarks to the SM Z boson, K Z , β Z are the K-factor (=1.3 here) for this process and speed of the SM Z in the CM frame, and P represents the same kinematics as in the W ± + Z ′ case above in the limit of equal couplings and with the replacement M W → M Z , i.e., Since the SM Z couplings are known, and folding in the SM Z decay to lepton pairs (with B = 0.03366 for e or µ and then summing over both) this expression can be numerically evaluated for arbitrary Z ′ couplings after integration over z and the relevant parton densities at either the Tevatron or the LHC. Writing we obtain, in units of fb and before any cuts, α Z = 381.5 (1109) Analogously, we can obtain the corresponding numerical result for the case of the γ + Z ′ final state; the analytic expression for the differential cross section can be obtained from that for Z + Z ′ production by taking M Z → 0 in P and by a setting v Q ∼ Q q with a q = 0. In this case we impose the experimental cuts |η γ | < 1.1(2.5) and p γ T > 25(50) GeV at the Tevatron(LHC) and obtain numerically after integration where f γ u = 767(533) fb and f γ d = 72.7(114) fb at the Tevatron(LHC). We are now ready to calculate the expected values of σ ZZ ′ ,γZ ′ at both colliders. In evaluating the Z ′ couplings, we proceed as follows: we select a point on one of the line segments in the bottom panel of Fig. 1 which tells us the specific values of u L , d L . We then locate that point on the upper panel in Fig. 2 and scan over the possible values in the u R − d R plane which are consistent with the UA2 upper bound on the dijet cross section for those u L , d L couplings and obtain the maximum and minimum values for both σ ZZ ′ ,γZ ′ at the Tevatron and the LHC. The minimum values in all cases correspond, of course, to the situation when u R = d R = 0 as contributions arising from non-zero values of these couplings always add constructively. The results of this analysis for the Tevatron and LHC are shown in Fig. 3 and Fig. 4, respectively, employing the same color coding as before. We see that these cross sections are much smaller than that for W ± + Z ′ at the Tevatron (as well as for the LHC) and are possibly too small to be observed at the Tevatron with present integrated luminosities given SM backgrounds. These cross sections are, of course, much larger at the LHC and should be observable with roughly 1 fb −1 of integrated luminosity once SM backgrounds are sufficiently understood. The predicted results for σ W ± Z ′ at the LHC, which are independent of the possible values of u R , d R as was the case for the Tevatron, can be found in Fig. 5. Note that the branching fraction for the leptonic decays of the SM W are included in these results. We see that the cross section is quite large and should be detectable soon.
We learn a number of things from examining these Figures: (i) The predicted values for σ γZ ′ ,ZZ ′ at the Tevatron (and the LHC) are always substantially lower than the corresponding ones for σ W ± Z ′ . These processes should not yet have been observed at the Tevatron but will eventually provide a test of the Z ′ hypothesis once enough data accumulates at the LHC. ¶ (ii) The predicted values of σ γZ ′ ,ZZ ′ are relatively constrained and are determined by the CDF W + Z ′ cross section itself, as well as by the UA2 dijet constraints, except for possible NLO contributions. (iii) The γZ ′ , ZZ ′ cross sections at the LHC and the ZZ ′ cross section at the Tevatron are found to be relatively insensitive to the specific values of u R , d R due to the rather strong constraints arising from the UA2 data. (iv) The γZ ′ process at the Tevatron could potentially be used to obtain further constraints on the values of u R , d R given sufficient integrated luminosity. (v) The W ± Z ′ cross section at the LHC is large and is well-predicted apart from possible NLO contributions. Lastly, (vi) we see that the ratio of the W + Z ′ and W − Z ′ cross sections at the LHC also has a weak coupling dependence which may also be useful as an additional handle on the left-handed quark couplings of the Z ′ . Thus we see that even with four free coupling parameters, the Z ′ explanation of the W jj excess seen by CDF leads to a very predictive scenario that can be further tested quite soon at both the Tevatron and the LHC.
To be consistent, we need to demonstrate that our use of the narrow width approximation is valid in the Z ′ scenario. Essentially, it suffices to show that the Z ′ total width, Γ, assuming decays to only SM particles, is always substantially smaller than the CDF dijet mass resolution, ≃ 14.3 GeV , for M Z ′ ∼ 150 GeV. Clearly, this condition will be most difficult to satisfy when the Z ′ couples in a generation-independent manner to all 3 generations (as we have assumed here) instead of, e.g., only to the first generation which can then lead to significant flavor physics issues. Using the Z ′ coupling parameter scans above, we can calculate the allowed regions for the predicted value of Γ; the results are shown in Fig. 6, using the same color coding as before. Here we see that in the generation-independent coupling scenario, Γ always remains in the range 0.5 − 5.6 GeV, i.e., a set of values significantly below the CDF dijet mass resolution. Thus the Z ′ will always appear to be quite narrow and, in particular, with Γ/M Z ′ < ∼ 3.3%, validates our use of the narrow width approximation above. It is also of some interest to notice that the corresponding branching fractions for the decay Z ′ → bb, under the assumption of 3-generation coupling universality, are always found to lie in the approximate range ∼ 0 − 0.33, which is consistent with the W jj data from CDF . The coupling dependence of this branching fraction can be seen in detail in the lower panel of Fig. 6. If lower values for this branching fraction are favored this would be an indication for couplings with |u L | > |d L |. ¶ In fact, their observation at the Tevatron at relatively low luminosity would likely have ruled out the Z ′ hypothesis. The predicted values for the sum of the cross section for W ± Z ′ at the LHC based on the corresponding cross section observed by CDF at the Tevatron along the parameter space arcs described above. The W leptonic branching fraction is included. Again, note that another set of solutions exist with u L → −u L . (Bottom) The ratio of the W + Z ′ to the W − Z ′ cross sections at the LHC, with the same color coding. Figure 6: (Top) Predicted ranges for the value of the Z ′ width, Γ, arising from the parameter space along the color-coded arcs described above. (Bottom) The b-quark branching fraction of the Z ′ for the corresponding range of coupling parameters.
Since the above analysis restricts the allowed values of u L , d L for the new Z ′ boson (while u R , d R play a lesser role and may in fact be zero) one would like to attempt to constrain these couplings further. Clearly a better determination of σ W Z ′ at the Tevatron and a measurement of σ γ,Z+Z ′ at both the Tevatron and LHC will be useful in this regard. However, it may be possible to obtain additional information from the W + Z ′ kinematic distributions themselves. To this end, we return to our discussion of the W + Z ′ differential cross section above. There, we saw that in the case of u L = d L an additional term contributes to the cross section, i.e., the term denoted by X, which grows with increasingŝ. If this term is absent, the differential distribution for dσ/dM W Z ′ will peak at low values of M W Z ′ , not far above threshold and then fall rapidly. However, the presence of this term will push this peak in this distribution to significantly larger values of M W Z ′ and the corresponding fall off of this differential cross section will be far slower. Thus, in principle, a measurement of the M W Z ′ distribution could provide an additional useful handle on the u L , d L coupling relationship which is independent of the values for u R , d R .
Since the 'discovery' channel, W ± Z ′ , has the largeset cross section, a detailed study of this reaction can provide us a way to pin down the values of the u L , d L couplings which will then further restrict u R , d R . In Fig. 7 we show the dσ/dM W Z ′ distribution at the Tevatron and the LHC for several different representative values of u L , d L lying within the allowed coupling ellipses shown in Fig. 1. In the top panel for W + Z ′ production at the Tevatron, we see that this distribution is quite sensitive to the choice of these couplings. In particular, we see that when u L = d L the peak in the distribution is at very low M W Z ′ values, not far from threshold, as expected. However, the peak occurs at larger values of M W Z ′ when u L , d L take on significantly different values. We especially note the strong differences between the cases of u L , d L = (−0.5, 0.5) and u L , d L = (−0.5, −0.5). Fig. 7 also shows the corresponding results for this distribution at the LHC which show similar coupling sensitivity since the shape of the distributions is quite similar to those found at the Tevatron.
Further information can be obtained by examining other kinematic distributions involving the W ± or the dijet system. Fig. 8 shows the angular distribution of the W ± at both the Tevatron and the LHC. We notice several things: (i) The dσ/dz distribution is very sensitive to the values of the u L , d L couplings which can be traced back to the various terms in Eq.(2) above. First, we see that when u L = d L the distribution is forward and backward peaked (due to the u− and t−channel 'poles') and is z → −z symmetric. In the other extreme, where the term proportional to X in Eq.(2) dominates, we still have z → −z symmetry but the distribution is much flatter being proportional to ∼ût. In the intermediate cases where all terms are comparable, the z → −z symmetry is now lost and some forward and backward peaking is possible. However, the distributions are generally fairly flat for central values of z. (ii) The angular distributions are quite different from what one would expect from scalar production. (iii) As in the case of the dσ/dM W Z ′ spectrum, we note that the W ± angular distributions look very similar at both colliders. This will remain true for the other distributions we display below and so we will only show the results for the LHC. Figure 9 shows both the W ± rapidity (y) and p T distributions at the LHC for the same set of u L , d L couplings as examined above. (As noted above, very similar results are obtained at the Tevatron.) The rapidity distribution shows only a relatively weak dependence on the couplings. However, it is easy to see that when u L = d L it is quite flat in the central region, whereas, when u L and d L are very different it it much more peaked near y = 0. On the otherhand, the p T spectrum of the W ± is seen to be highly sensitive to the u L , d L couplings as we might have expected based on the shapes of the dσ/dM W Z ′ and the dσ/dz distributions discussed above. In particular we see that when u L = d L the W ± p T spectrum is somewhat harder, growing more so as the difference in couplings gets larger. Clearly information from this distribution will help in the determination of the left-handed quark couplings to the Z ′ . Figure. 10 displays the velocity distribution of the Z ′ in the CM frame; this is of particular importance in the determination of the boost required to go to the dijet CM frame in order to obtain the dijet angular distribution. A measurement of this quantity is necessary if one wants to verify the spin-1 nature of the Z ′ . Again, we see that this distribution is quite sensitive to the u L , d L couplings and peaks at significantly larger values when u L = d L .
Lastly, we have also examined the possibility of observing Z ′ bremsstrahlung in qq production in e + e − annihilation, i.e., e + e − → qqZ ′ . We found that the rate for this process is hopelessly small at LEPII energies and thus does not provide a constraint on this scenario.
Summary and Conclusions
In summary, we have examined the hypothesis that a leptophobic Z ′ boson accounts for the excess of events in the W jj channel as observed by CDF. The quoted range for the production cross section places constraints on the left-handed couplings of the Z ′ to the upand down-quarks. Consistency with the lack of observation by D0 forces us to the lower end of this cross section range. Further consistency with the non-observation of dijet resonances at m jj ∼ 150 GeV at UA2 constrains these couplings, and severely limits the possible values of the Z ′ right-handed couplings to the light quarks. Assuming that these couplings are generation independent, these results provide a relatively restrictive allowed region for the four hadronic couplings of the Z ′ .
These allowed coupling regions translate into well-determined rates for the associated production of Z/γ+Z ′ at the Tevatron and LHC, as well as for W +Z ′ at the LHC, apart from NLO corrections. The W jj rate at the LHC is large and this channel should be observed soon once the SM backgrounds are under control. The rates for Z/γ + Z ′ associated production are smaller, and these processes should not yet have been observed at the Tevatron given the expected SM backgrounds. Once detected, these processes will provide valuable information on the Z ′ boson couplings. Further information on the u L − d L coupling relationship was shown to also be obtainable from measurements of the dσ/dM W Z ′ as well as other kinematic Even with four free coupling parameters, this scenario is predictive, even more so once the W + Z ′ cross section is better determined at the Tevatron, and can be further tested at both the Tevatron itself as well as at the LHC in the near future. In particular, the LHC should confirm (or not) this scenario soon. |
package org.javasimon.callback.logging;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Concrete log template using {@link Logger}.
*
* @author gquintana
*/
public class JULLogTemplate<C> extends LogTemplate<C> {
/** Target log template. */
private final Logger logger;
/** Logging level. */
private Level level;
/**
* Constructor.
*
* @param logger Logger
* @param level Level
*/
public JULLogTemplate(Logger logger, Level level) {
this.logger = logger;
this.level = level;
}
/**
* Constructor.
*
* @param loggerName Logger name
* @param level Level
*/
public JULLogTemplate(String loggerName, Level level) {
this(Logger.getLogger(loggerName), level);
}
/**
* Returns log level.
*
* @return log level
*/
public Level getLevel() {
return level;
}
/**
* Changes log level.
*
* @param level new log level
*/
public void setLevel(Level level) {
this.level = level;
}
/**
* Get logger
*
* @return Logger
*/
public Logger getLogger() {
return logger;
}
/**
* {@inheritDoc }
* <p/>
* Logger is enabled if underlying logger is {@link Logger#isLoggable} is true for current level.
*/
protected boolean isEnabled(C context) {
return logger.isLoggable(level);
}
protected void log(String message) {
logger.log(level, message);
}
}
|
#include <bits/stdc++.h>
using namespace std;
int main() {
ios_base::sync_with_stdio(false);
cin.tie(NULL);
int t;cin>>t;
while(t--){
int n,m;cin>>n>>m;
int sump=0,sumn=0,countn=0,minn=INT_MIN,minp=INT_MAX;
for(int i=0;i<n;i++){
for(int j=0;j<m;j++){
int k;cin>>k;
if(k>=0){
sump=sump+k;
minp=min(minp,k);
}
else{
sumn=sumn+k;
minn=max(minn,k);
countn++;
}
}
}
if(countn%2){
sump=sump-sumn-2*min(minp,-minn);
}
else if(countn)
sump=sump-sumn;
cout<<sump<<endl;
}
return 0;
} |
import { PropertiesVolume } from '../main/modules/properties-volume';
import { Application } from 'express';
if (!process.env.TEST_PASSWORD) {
new PropertiesVolume().enableFor({ locals: { developmentMode: true } } as unknown as Application);
}
export const config = {
TestUrl: process.env.TEST_URL || 'http://localhost:3001',
TestHeadlessBrowser: process.env.TEST_HEADLESS ? process.env.TEST_HEADLESS === 'true' : true,
TestSlowMo: 250,
WaitForTimeout: 10000,
Gherkin: {
features: './features/**/*.feature',
steps: ['../e2e/step_definitions/steps.ts'],
},
helpers: {},
};
config.helpers = {
Playwright: {
url: config.TestUrl,
show: !config.TestHeadlessBrowser,
browser: 'chromium',
waitForTimeout: config.WaitForTimeout,
waitForAction: 1000,
waitForNavigation: 'networkidle0',
ignoreHTTPSErrors: true,
},
};
|
def _systemctl_action(self, package: Package, action: str):
name = package.manifest['service']['name']
host_service = package.manifest['service']['host-service']
asic_service = package.manifest['service']['asic-service']
single_instance = host_service or (asic_service and not self.is_multi_npu)
multi_instance = asic_service and self.is_multi_npu
if in_chroot():
return
if single_instance:
run_command(f'systemctl {action} {name}')
if multi_instance:
for npu in range(self.num_npus):
run_command(f'systemctl {action} {name}@{npu}') |
// Load loads config options from a toml file.
func (c *Config) Load(confFile string) error {
metaData, err := toml.DecodeFile(confFile, c)
undecoded := metaData.Undecoded()
if len(undecoded) > 0 && err == nil {
var undecodedItems []string
for _, item := range undecoded {
undecodedItems = append(undecodedItems, item.String())
}
err = &ErrConfigValidationFailed{confFile, undecodedItems}
}
return err
} |
<reponame>majinjing3/neaten-checkin<filename>vendor/github.com/chanxuehong/util/xml_test.go
package util
import (
"reflect"
"strings"
"testing"
)
func TestDecodeXMLToMap(t *testing.T) {
var xmlArr = []string{
`<xml>
<a>a</a>
<b>b</b>
</xml>`,
`<xml>
<a>a</a>
<b>
<ba>ba</ba>
</b>
<c>c</c>
</xml>`,
`<xml>
<a>a</a>
<b>
bchara
<ba>ba</ba>
</b>
<c>c</c>
</xml>`,
`<xml>
<a>a</a>
<b>
bchara
<ba>ba</ba>
bchara
<bb>bb</bb>
bchara
</b>
<c>c</c>
</xml>`,
`<xml>
chara
<a>a</a>
<b>
<ba>ba</ba>
bchara
</b>
<c>c</c>
</xml>`,
}
var mapArr = []map[string]string{
{
"a": "a",
"b": "b",
},
{
"a": "a",
"c": "c",
},
{
"a": "a",
"c": "c",
},
{
"a": "a",
"c": "c",
},
{
"a": "a",
"c": "c",
},
}
for i, src := range xmlArr {
m, err := DecodeXMLToMap(strings.NewReader(src))
if err != nil {
t.Errorf("DecodeXMLToMap(%s) failed: %s\n", src, err.Error())
continue
}
if !reflect.DeepEqual(m, mapArr[i]) {
t.Errorf("DecodeXMLToMap(%s) failed:\nhave %+v\nwant %+v\n", src, m, mapArr[i])
continue
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.